summaryrefslogtreecommitdiffstats
path: root/src/go/plugin
diff options
context:
space:
mode:
Diffstat (limited to 'src/go/plugin')
-rw-r--r--src/go/plugin/go.d/README.md244
-rw-r--r--src/go/plugin/go.d/agent/README.md157
-rw-r--r--src/go/plugin/go.d/agent/agent.go253
-rw-r--r--src/go/plugin/go.d/agent/agent_test.go103
-rw-r--r--src/go/plugin/go.d/agent/confgroup/cache.go93
-rw-r--r--src/go/plugin/go.d/agent/confgroup/cache_test.go134
-rw-r--r--src/go/plugin/go.d/agent/confgroup/config.go178
-rw-r--r--src/go/plugin/go.d/agent/confgroup/config_test.go390
-rw-r--r--src/go/plugin/go.d/agent/confgroup/group.go8
-rw-r--r--src/go/plugin/go.d/agent/confgroup/registry.go23
-rw-r--r--src/go/plugin/go.d/agent/confgroup/registry_test.go44
-rw-r--r--src/go/plugin/go.d/agent/config.go76
-rw-r--r--src/go/plugin/go.d/agent/discovery/cache.go38
-rw-r--r--src/go/plugin/go.d/agent/discovery/config.go29
-rw-r--r--src/go/plugin/go.d/agent/discovery/dummy/config.go24
-rw-r--r--src/go/plugin/go.d/agent/discovery/dummy/discovery.go76
-rw-r--r--src/go/plugin/go.d/agent/discovery/dummy/discovery_test.go109
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/config.go25
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/discovery.go104
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/discovery_test.go25
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/parse.go142
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/parse_test.go431
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/read.go98
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/read_test.go116
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/sim_test.go130
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/watch.go220
-rw-r--r--src/go/plugin/go.d/agent/discovery/file/watch_test.go378
-rw-r--r--src/go/plugin/go.d/agent/discovery/manager.go199
-rw-r--r--src/go/plugin/go.d/agent/discovery/manager_test.go177
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/conffile.go69
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/docker.go241
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/dockerd_test.go162
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/sim_test.go162
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/target.go55
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/config.go34
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes.go268
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go160
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod.go434
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod_test.go648
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service.go209
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service_test.go456
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/sim_test.go137
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go326
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners_test.go169
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/sim_test.go167
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/target.go41
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/model/discoverer.go11
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/model/tags.go87
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/model/tags_test.go3
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/model/target.go15
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/accumulator.go152
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/classify.go132
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/classify_test.go83
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/compose.go157
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/compose_test.go92
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/config.go136
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap.go63
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap_test.go81
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline.go236
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline_test.go303
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/promport.go662
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/selector.go154
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/selector_test.go248
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/pipeline/sim_test.go130
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/sd.go147
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/sd_test.go106
-rw-r--r--src/go/plugin/go.d/agent/discovery/sd/sim_test.go118
-rw-r--r--src/go/plugin/go.d/agent/discovery/sim_test.go67
-rw-r--r--src/go/plugin/go.d/agent/filelock/filelock.go64
-rw-r--r--src/go/plugin/go.d/agent/filelock/filelock_test.go99
-rw-r--r--src/go/plugin/go.d/agent/filestatus/manager.go98
-rw-r--r--src/go/plugin/go.d/agent/filestatus/manager_test.go122
-rw-r--r--src/go/plugin/go.d/agent/filestatus/store.go90
-rw-r--r--src/go/plugin/go.d/agent/filestatus/store_test.go138
-rw-r--r--src/go/plugin/go.d/agent/functions/ext.go30
-rw-r--r--src/go/plugin/go.d/agent/functions/function.go96
-rw-r--r--src/go/plugin/go.d/agent/functions/input.go35
-rw-r--r--src/go/plugin/go.d/agent/functions/manager.go127
-rw-r--r--src/go/plugin/go.d/agent/functions/manager_test.go320
-rw-r--r--src/go/plugin/go.d/agent/hostinfo/hostinfo.go39
-rw-r--r--src/go/plugin/go.d/agent/hostinfo/hostinfo_common.go7
-rw-r--r--src/go/plugin/go.d/agent/hostinfo/hostinfo_linux.go42
-rw-r--r--src/go/plugin/go.d/agent/jobmgr/cache.go181
-rw-r--r--src/go/plugin/go.d/agent/jobmgr/di.go39
-rw-r--r--src/go/plugin/go.d/agent/jobmgr/dyncfg.go852
-rw-r--r--src/go/plugin/go.d/agent/jobmgr/manager.go370
-rw-r--r--src/go/plugin/go.d/agent/jobmgr/manager_test.go1892
-rw-r--r--src/go/plugin/go.d/agent/jobmgr/noop.go21
-rw-r--r--src/go/plugin/go.d/agent/jobmgr/sim_test.go152
-rw-r--r--src/go/plugin/go.d/agent/module/charts.go497
-rw-r--r--src/go/plugin/go.d/agent/module/charts_test.go383
-rw-r--r--src/go/plugin/go.d/agent/module/job.go645
-rw-r--r--src/go/plugin/go.d/agent/module/job_test.go291
-rw-r--r--src/go/plugin/go.d/agent/module/mock.go94
-rw-r--r--src/go/plugin/go.d/agent/module/mock_test.go54
-rw-r--r--src/go/plugin/go.d/agent/module/module.go77
-rw-r--r--src/go/plugin/go.d/agent/module/registry.go52
-rw-r--r--src/go/plugin/go.d/agent/module/registry_test.go34
-rw-r--r--src/go/plugin/go.d/agent/netdataapi/api.go213
-rw-r--r--src/go/plugin/go.d/agent/netdataapi/api_test.go265
-rw-r--r--src/go/plugin/go.d/agent/safewriter/writer.go30
-rw-r--r--src/go/plugin/go.d/agent/setup.go207
-rw-r--r--src/go/plugin/go.d/agent/setup_test.go209
-rw-r--r--src/go/plugin/go.d/agent/testdata/agent-empty.conf0
-rw-r--r--src/go/plugin/go.d/agent/testdata/agent-invalid-syntax.conf7
-rw-r--r--src/go/plugin/go.d/agent/testdata/agent-valid.conf7
-rw-r--r--src/go/plugin/go.d/agent/ticker/ticker.go55
-rw-r--r--src/go/plugin/go.d/agent/ticker/ticket_test.go50
-rw-r--r--src/go/plugin/go.d/agent/vnodes/testdata/config.yaml11
-rw-r--r--src/go/plugin/go.d/agent/vnodes/vnodes.go114
-rw-r--r--src/go/plugin/go.d/agent/vnodes/vnodes_test.go27
-rw-r--r--src/go/plugin/go.d/cli/cli.go42
-rw-r--r--src/go/plugin/go.d/config/go.d.conf123
-rw-r--r--src/go/plugin/go.d/config/go.d/activemq.conf10
-rw-r--r--src/go/plugin/go.d/config/go.d/adaptec_raid.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/ap.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/apache.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/beanstalk.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/bind.conf9
-rw-r--r--src/go/plugin/go.d/config/go.d/cassandra.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/chrony.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/clickhouse.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/cockroachdb.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/consul.conf7
-rw-r--r--src/go/plugin/go.d/config/go.d/coredns.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/couchbase.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/couchdb.conf10
-rw-r--r--src/go/plugin/go.d/config/go.d/dmcache.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/dns_query.conf14
-rw-r--r--src/go/plugin/go.d/config/go.d/dnsdist.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/dnsmasq.conf7
-rw-r--r--src/go/plugin/go.d/config/go.d/dnsmasq_dhcp.conf13
-rw-r--r--src/go/plugin/go.d/config/go.d/docker.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/docker_engine.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/dockerhub.conf9
-rw-r--r--src/go/plugin/go.d/config/go.d/dovecot.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/elasticsearch.conf7
-rw-r--r--src/go/plugin/go.d/config/go.d/envoy.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/example.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/exim.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/fail2ban.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/filecheck.conf16
-rw-r--r--src/go/plugin/go.d/config/go.d/fluentd.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/freeradius.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/gearman.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/geth.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/haproxy.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/hddtemp.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/hdfs.conf9
-rw-r--r--src/go/plugin/go.d/config/go.d/hpssa.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/httpcheck.conf12
-rw-r--r--src/go/plugin/go.d/config/go.d/icecast.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/intelgpu.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/ipfs.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/isc_dhcpd.conf23
-rw-r--r--src/go/plugin/go.d/config/go.d/k8s_kubelet.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/k8s_kubeproxy.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/k8s_state.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/lighttpd.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/litespeed.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/logind.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/logstash.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/lvm.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/megacli.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/memcached.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/mongodb.conf10
-rw-r--r--src/go/plugin/go.d/config/go.d/monit.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/mysql.conf12
-rw-r--r--src/go/plugin/go.d/config/go.d/nginx.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/nginxplus.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/nginxvts.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/nsd.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/ntpd.conf7
-rw-r--r--src/go/plugin/go.d/config/go.d/nvidia_smi.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/nvme.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/openvpn.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/openvpn_status_log.conf9
-rw-r--r--src/go/plugin/go.d/config/go.d/pgbouncer.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/phpdaemon.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/phpfpm.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/pihole.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/pika.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/ping.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/portcheck.conf11
-rw-r--r--src/go/plugin/go.d/config/go.d/postfix.conf12
-rw-r--r--src/go/plugin/go.d/config/go.d/postgres.conf10
-rw-r--r--src/go/plugin/go.d/config/go.d/powerdns.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/powerdns_recursor.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/prometheus.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/proxysql.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/pulsar.conf7
-rw-r--r--src/go/plugin/go.d/config/go.d/puppet.conf7
-rw-r--r--src/go/plugin/go.d/config/go.d/rabbitmq.conf9
-rw-r--r--src/go/plugin/go.d/config/go.d/redis.conf12
-rw-r--r--src/go/plugin/go.d/config/go.d/rethinkdb.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/riakkv.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/rspamd.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/scaleio.conf13
-rw-r--r--src/go/plugin/go.d/config/go.d/sd/docker.conf262
-rw-r--r--src/go/plugin/go.d/config/go.d/sd/net_listeners.conf541
-rw-r--r--src/go/plugin/go.d/config/go.d/sensors.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/smartctl.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/snmp.conf10
-rw-r--r--src/go/plugin/go.d/config/go.d/squid.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/squidlog.conf9
-rw-r--r--src/go/plugin/go.d/config/go.d/storcli.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/supervisord.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/systemdunits.conf16
-rw-r--r--src/go/plugin/go.d/config/go.d/tengine.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/tomcat.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/tor.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/traefik.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/unbound.conf13
-rw-r--r--src/go/plugin/go.d/config/go.d/upsd.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/uwsgi.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/vcsa.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/vernemq.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/vsphere.conf13
-rw-r--r--src/go/plugin/go.d/config/go.d/web_log.conf44
-rw-r--r--src/go/plugin/go.d/config/go.d/whoisquery.conf6
-rw-r--r--src/go/plugin/go.d/config/go.d/windows.conf8
-rw-r--r--src/go/plugin/go.d/config/go.d/wireguard.conf5
-rw-r--r--src/go/plugin/go.d/config/go.d/x509check.conf12
-rw-r--r--src/go/plugin/go.d/config/go.d/zfspool.conf9
-rw-r--r--src/go/plugin/go.d/config/go.d/zookeeper.conf6
-rw-r--r--src/go/plugin/go.d/docs/how-to-write-a-module.md302
-rw-r--r--src/go/plugin/go.d/examples/simple/main.go130
-rwxr-xr-xsrc/go/plugin/go.d/hack/go-build.sh109
-rwxr-xr-xsrc/go/plugin/go.d/hack/go-fmt.sh8
l---------src/go/plugin/go.d/modules/activemq/README.md1
-rw-r--r--src/go/plugin/go.d/modules/activemq/activemq.go138
-rw-r--r--src/go/plugin/go.d/modules/activemq/activemq_test.go340
-rw-r--r--src/go/plugin/go.d/modules/activemq/apiclient.go137
-rw-r--r--src/go/plugin/go.d/modules/activemq/charts.go46
-rw-r--r--src/go/plugin/go.d/modules/activemq/collect.go185
-rw-r--r--src/go/plugin/go.d/modules/activemq/config_schema.json234
-rw-r--r--src/go/plugin/go.d/modules/activemq/init.go32
-rw-r--r--src/go/plugin/go.d/modules/activemq/integrations/activemq.md268
-rw-r--r--src/go/plugin/go.d/modules/activemq/metadata.yaml230
-rw-r--r--src/go/plugin/go.d/modules/activemq/testdata/config.json25
-rw-r--r--src/go/plugin/go.d/modules/activemq/testdata/config.yaml22
l---------src/go/plugin/go.d/modules/adaptecraid/README.md1
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/adaptec.go108
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/adaptec_test.go281
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/charts.go129
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/collect.go28
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/collect_ld.go100
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/collect_pd.go128
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/exec.go50
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/init.go23
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/integrations/adaptec_raid.md229
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/metadata.yaml146
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/testdata/config.yaml2
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-ld-current.txt30
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-ld-old.txt33
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-pd-current.txt216
-rw-r--r--src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-pd-old.txt107
l---------src/go/plugin/go.d/modules/ap/README.md1
-rw-r--r--src/go/plugin/go.d/modules/ap/ap.go113
-rw-r--r--src/go/plugin/go.d/modules/ap/ap_test.go292
-rw-r--r--src/go/plugin/go.d/modules/ap/charts.go147
-rw-r--r--src/go/plugin/go.d/modules/ap/collect.go221
-rw-r--r--src/go/plugin/go.d/modules/ap/config_schema.json47
-rw-r--r--src/go/plugin/go.d/modules/ap/exec.go56
-rw-r--r--src/go/plugin/go.d/modules/ap/init.go37
-rw-r--r--src/go/plugin/go.d/modules/ap/integrations/access_points.md202
-rw-r--r--src/go/plugin/go.d/modules/ap/metadata.yaml141
-rw-r--r--src/go/plugin/go.d/modules/ap/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/ap/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/ap/testdata/iw_dev_ap.txt25
-rw-r--r--src/go/plugin/go.d/modules/ap/testdata/iw_dev_managed.txt11
-rw-r--r--src/go/plugin/go.d/modules/ap/testdata/station_dump.txt58
l---------src/go/plugin/go.d/modules/apache/README.md1
-rw-r--r--src/go/plugin/go.d/modules/apache/apache.go116
-rw-r--r--src/go/plugin/go.d/modules/apache/apache_test.go345
-rw-r--r--src/go/plugin/go.d/modules/apache/charts.go189
-rw-r--r--src/go/plugin/go.d/modules/apache/collect.go163
-rw-r--r--src/go/plugin/go.d/modules/apache/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/apache/init.go25
-rw-r--r--src/go/plugin/go.d/modules/apache/integrations/apache.md273
-rw-r--r--src/go/plugin/go.d/modules/apache/integrations/httpd.md273
-rw-r--r--src/go/plugin/go.d/modules/apache/metadata.yaml302
-rw-r--r--src/go/plugin/go.d/modules/apache/metrics.go61
-rw-r--r--src/go/plugin/go.d/modules/apache/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/apache/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/apache/testdata/extended-status-mpm-event.txt39
-rw-r--r--src/go/plugin/go.d/modules/apache/testdata/extended-status-mpm-prefork.txt48
-rw-r--r--src/go/plugin/go.d/modules/apache/testdata/lighttpd-status.txt6
-rw-r--r--src/go/plugin/go.d/modules/apache/testdata/simple-status-mpm-event.txt24
l---------src/go/plugin/go.d/modules/beanstalk/README.md1
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/beanstalk.go123
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/beanstalk_test.go384
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/charts.go333
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/client.go249
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/collect.go118
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/config_schema.json54
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/init.go29
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/integrations/beanstalk.md253
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/metadata.yaml255
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/testdata/config.json6
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/testdata/config.yaml4
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/testdata/list-tubes.txt3
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/testdata/stats-tube-default.txt16
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/testdata/stats.txt50
-rw-r--r--src/go/plugin/go.d/modules/bind/README.md117
-rw-r--r--src/go/plugin/go.d/modules/bind/bind.go136
-rw-r--r--src/go/plugin/go.d/modules/bind/bind_test.go532
-rw-r--r--src/go/plugin/go.d/modules/bind/charts.go196
-rw-r--r--src/go/plugin/go.d/modules/bind/collect.go200
-rw-r--r--src/go/plugin/go.d/modules/bind/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/bind/init.go37
-rw-r--r--src/go/plugin/go.d/modules/bind/json_client.go82
-rw-r--r--src/go/plugin/go.d/modules/bind/testdata/config.json21
-rw-r--r--src/go/plugin/go.d/modules/bind/testdata/config.yaml18
-rw-r--r--src/go/plugin/go.d/modules/bind/testdata/query-server.json302
-rw-r--r--src/go/plugin/go.d/modules/bind/testdata/query-server.xml470
-rw-r--r--src/go/plugin/go.d/modules/bind/xml3_client.go133
l---------src/go/plugin/go.d/modules/cassandra/README.md1
-rw-r--r--src/go/plugin/go.d/modules/cassandra/cassandra.go118
-rw-r--r--src/go/plugin/go.d/modules/cassandra/cassandra_test.go298
-rw-r--r--src/go/plugin/go.d/modules/cassandra/charts.go461
-rw-r--r--src/go/plugin/go.d/modules/cassandra/collect.go403
-rw-r--r--src/go/plugin/go.d/modules/cassandra/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/cassandra/init.go25
-rw-r--r--src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md313
-rw-r--r--src/go/plugin/go.d/modules/cassandra/jmx_exporter.yaml31
-rw-r--r--src/go/plugin/go.d/modules/cassandra/metadata.yaml410
-rw-r--r--src/go/plugin/go.d/modules/cassandra/metrics.go103
-rw-r--r--src/go/plugin/go.d/modules/cassandra/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/cassandra/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/cassandra/testdata/metrics.txt402
l---------src/go/plugin/go.d/modules/chrony/README.md1
-rw-r--r--src/go/plugin/go.d/modules/chrony/charts.go320
-rw-r--r--src/go/plugin/go.d/modules/chrony/chrony.go112
-rw-r--r--src/go/plugin/go.d/modules/chrony/chrony_test.go326
-rw-r--r--src/go/plugin/go.d/modules/chrony/client.go171
-rw-r--r--src/go/plugin/go.d/modules/chrony/collect.go156
-rw-r--r--src/go/plugin/go.d/modules/chrony/config_schema.json43
-rw-r--r--src/go/plugin/go.d/modules/chrony/init.go14
-rw-r--r--src/go/plugin/go.d/modules/chrony/integrations/chrony.md222
-rw-r--r--src/go/plugin/go.d/modules/chrony/metadata.yaml208
-rw-r--r--src/go/plugin/go.d/modules/chrony/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/chrony/testdata/config.yaml3
l---------src/go/plugin/go.d/modules/clickhouse/README.md1
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/charts.go1005
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/clickhouse.go123
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/clickhouse_test.go315
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/collect.go96
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/collect_system_async_metrics.go61
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/collect_system_disks.go82
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/collect_system_events.go120
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/collect_system_metrics.go75
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/collect_system_parts.go98
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/collect_system_processes.go29
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/init.go21
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md368
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/metadata.yaml624
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/testdata/resp_longest_query_time.csv2
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_async_metrics.csv434
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_disks.csv2
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_events.csv102
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_metrics.csv283
-rw-r--r--src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_parts.csv6
l---------src/go/plugin/go.d/modules/cockroachdb/README.md1
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/charts.go850
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/cockroachdb.go121
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/cockroachdb_test.go333
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/collect.go160
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/init.go25
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/integrations/cockroachdb.md323
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/metadata.yaml620
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/metrics.go376
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/testdata/metrics.txt2952
-rw-r--r--src/go/plugin/go.d/modules/cockroachdb/testdata/non_cockroachdb.txt27
l---------src/go/plugin/go.d/modules/consul/README.md1
-rw-r--r--src/go/plugin/go.d/modules/consul/charts.go739
-rw-r--r--src/go/plugin/go.d/modules/consul/collect.go116
-rw-r--r--src/go/plugin/go.d/modules/consul/collect_autopilot.go62
-rw-r--r--src/go/plugin/go.d/modules/consul/collect_checks.go47
-rw-r--r--src/go/plugin/go.d/modules/consul/collect_config.go71
-rw-r--r--src/go/plugin/go.d/modules/consul/collect_metrics.go205
-rw-r--r--src/go/plugin/go.d/modules/consul/collect_net_rtt.go75
-rw-r--r--src/go/plugin/go.d/modules/consul/config_schema.json193
-rw-r--r--src/go/plugin/go.d/modules/consul/consul.go136
-rw-r--r--src/go/plugin/go.d/modules/consul/consul_test.go721
-rw-r--r--src/go/plugin/go.d/modules/consul/init.go48
-rw-r--r--src/go/plugin/go.d/modules/consul/integrations/consul.md359
-rw-r--r--src/go/plugin/go.d/modules/consul/metadata.yaml599
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/config.json21
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/config.yaml18
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/client_v1-agent-metrics.txt989
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/client_v1-agent-self.json50
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-metrics.txt1255
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt1509
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self.json50
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_cloud-managed.json50
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_disabled_prom.json50
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_with_hostname.json50
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-coordinate-nodes.json59
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-operator-autopilot-health.json48
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.13.2/v1-agent-checks.json68
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-metrics.txt1502
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-self.json71
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json42
-rw-r--r--src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/v1-agent-checks.json68
l---------src/go/plugin/go.d/modules/coredns/README.md1
-rw-r--r--src/go/plugin/go.d/modules/coredns/charts.go326
-rw-r--r--src/go/plugin/go.d/modules/coredns/collect.go713
-rw-r--r--src/go/plugin/go.d/modules/coredns/config_schema.json270
-rw-r--r--src/go/plugin/go.d/modules/coredns/coredns.go141
-rw-r--r--src/go/plugin/go.d/modules/coredns/coredns_test.go573
-rw-r--r--src/go/plugin/go.d/modules/coredns/init.go40
-rw-r--r--src/go/plugin/go.d/modules/coredns/integrations/coredns.md329
-rw-r--r--src/go/plugin/go.d/modules/coredns/metadata.yaml459
-rw-r--r--src/go/plugin/go.d/modules/coredns/metrics.go111
-rw-r--r--src/go/plugin/go.d/modules/coredns/testdata/config.json36
-rw-r--r--src/go/plugin/go.d/modules/coredns/testdata/config.yaml27
-rw-r--r--src/go/plugin/go.d/modules/coredns/testdata/no_version/no_load.txt6
-rw-r--r--src/go/plugin/go.d/modules/coredns/testdata/version169/no_load.txt6
-rw-r--r--src/go/plugin/go.d/modules/coredns/testdata/version169/some_load.txt180
-rw-r--r--src/go/plugin/go.d/modules/coredns/testdata/version170/no_load.txt6
-rw-r--r--src/go/plugin/go.d/modules/coredns/testdata/version170/some_load.txt38
l---------src/go/plugin/go.d/modules/couchbase/README.md1
-rw-r--r--src/go/plugin/go.d/modules/couchbase/charts.go84
-rw-r--r--src/go/plugin/go.d/modules/couchbase/collect.go154
-rw-r--r--src/go/plugin/go.d/modules/couchbase/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/couchbase/couchbase.go122
-rw-r--r--src/go/plugin/go.d/modules/couchbase/couchbase_test.go240
-rw-r--r--src/go/plugin/go.d/modules/couchbase/init.go39
-rw-r--r--src/go/plugin/go.d/modules/couchbase/integrations/couchbase.md247
-rw-r--r--src/go/plugin/go.d/modules/couchbase/metadata.yaml214
-rw-r--r--src/go/plugin/go.d/modules/couchbase/metrics.go33
-rw-r--r--src/go/plugin/go.d/modules/couchbase/testdata/6.6.0/buckets_basic_stats.json422
-rw-r--r--src/go/plugin/go.d/modules/couchbase/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/couchbase/testdata/config.yaml17
l---------src/go/plugin/go.d/modules/couchdb/README.md1
-rw-r--r--src/go/plugin/go.d/modules/couchdb/charts.go228
-rw-r--r--src/go/plugin/go.d/modules/couchdb/collect.go240
-rw-r--r--src/go/plugin/go.d/modules/couchdb/config_schema.json197
-rw-r--r--src/go/plugin/go.d/modules/couchdb/couchdb.go134
-rw-r--r--src/go/plugin/go.d/modules/couchdb/couchdb_test.go464
-rw-r--r--src/go/plugin/go.d/modules/couchdb/init.go66
-rw-r--r--src/go/plugin/go.d/modules/couchdb/integrations/couchdb.md260
-rw-r--r--src/go/plugin/go.d/modules/couchdb/metadata.yaml323
-rw-r--r--src/go/plugin/go.d/modules/couchdb/metrics.go200
-rw-r--r--src/go/plugin/go.d/modules/couchdb/testdata/config.json22
-rw-r--r--src/go/plugin/go.d/modules/couchdb/testdata/config.yaml19
-rw-r--r--src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/active_tasks.json63
-rw-r--r--src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/dbs_info.json52
-rw-r--r--src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/node_stats.json1651
-rw-r--r--src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/node_system.json176
-rw-r--r--src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/root.json16
l---------src/go/plugin/go.d/modules/dmcache/README.md1
-rw-r--r--src/go/plugin/go.d/modules/dmcache/charts.go149
-rw-r--r--src/go/plugin/go.d/modules/dmcache/collect.go173
-rw-r--r--src/go/plugin/go.d/modules/dmcache/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/dmcache/dmcache.go105
-rw-r--r--src/go/plugin/go.d/modules/dmcache/dmcache_test.go253
-rw-r--r--src/go/plugin/go.d/modules/dmcache/exec.go42
-rw-r--r--src/go/plugin/go.d/modules/dmcache/init.go23
-rw-r--r--src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md198
-rw-r--r--src/go/plugin/go.d/modules/dmcache/metadata.yaml131
-rw-r--r--src/go/plugin/go.d/modules/dmcache/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/dmcache/testdata/config.yaml2
l---------src/go/plugin/go.d/modules/dnsdist/README.md1
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/charts.go151
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/collect.go76
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/dnsdist.go121
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/dnsdist_test.go265
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/init.go31
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/integrations/dnsdist.md245
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/metadata.yaml259
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/metrics.go41
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/dnsdist/testdata/v1.5.1/jsonstat.json56
l---------src/go/plugin/go.d/modules/dnsmasq/README.md1
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/charts.go51
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/collect.go139
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/config_schema.json61
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/dnsmasq.go123
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/dnsmasq_test.go278
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/init.go43
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/integrations/dnsmasq.md230
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/metadata.yaml144
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/testdata/config.json6
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq/testdata/config.yaml4
l---------src/go/plugin/go.d/modules/dnsmasq_dhcp/README.md1
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/charts.go111
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/collect.go166
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/config_schema.json50
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp.go111
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp_test.go209
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/init.go21
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md240
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml151
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/parse_configuration.go393
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/config.json6
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/config.yaml4
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.conf77
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/.dnsmasq.conf1
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv4.any10
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv6.any10
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv4.any10
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv6.any10
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/~dnsmasq.conf1
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasq.bak1
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv4.any10
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv6.any3
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasq.other1
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv4.conf10
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv6.conf10
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.leases19
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq2.conf6
-rw-r--r--src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq3.conf4
l---------src/go/plugin/go.d/modules/dnsquery/README.md1
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/charts.go64
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/collect.go73
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/config_schema.json133
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/dnsquery.go121
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/dnsquery_test.go242
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/init.go98
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/integrations/dns_query.md216
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/metadata.yaml142
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/testdata/config.json16
-rw-r--r--src/go/plugin/go.d/modules/dnsquery/testdata/config.yaml11
l---------src/go/plugin/go.d/modules/docker/README.md1
-rw-r--r--src/go/plugin/go.d/modules/docker/charts.go174
-rw-r--r--src/go/plugin/go.d/modules/docker/collect.go200
-rw-r--r--src/go/plugin/go.d/modules/docker/config_schema.json52
-rw-r--r--src/go/plugin/go.d/modules/docker/docker.go127
-rw-r--r--src/go/plugin/go.d/modules/docker/docker_test.go852
-rw-r--r--src/go/plugin/go.d/modules/docker/integrations/docker.md243
-rw-r--r--src/go/plugin/go.d/modules/docker/metadata.yaml190
-rw-r--r--src/go/plugin/go.d/modules/docker/testdata/config.json6
-rw-r--r--src/go/plugin/go.d/modules/docker/testdata/config.yaml4
l---------src/go/plugin/go.d/modules/docker_engine/README.md1
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/charts.go136
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/collect.go212
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/docker_engine.go125
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/docker_engine_test.go372
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/init.go25
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/integrations/docker_engine.md264
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/metadata.yaml263
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/metrics.go74
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/testdata/non-docker-engine.txt0
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/testdata/v17.05.0-ce.txt460
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/testdata/v18.09.3-ce-swarm.txt468
-rw-r--r--src/go/plugin/go.d/modules/docker_engine/testdata/v18.09.3-ce.txt465
l---------src/go/plugin/go.d/modules/dockerhub/README.md1
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/apiclient.go83
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/charts.go90
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/collect.go65
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/config_schema.json203
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/dockerhub.go110
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/dockerhub_test.go159
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/init.go26
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/integrations/docker_hub_repository.md209
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/metadata.yaml190
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/testdata/config.json23
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/testdata/config.yaml19
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/testdata/repo1.txt22
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/testdata/repo2.txt22
-rw-r--r--src/go/plugin/go.d/modules/dockerhub/testdata/repo3.txt22
l---------src/go/plugin/go.d/modules/dovecot/README.md1
-rw-r--r--src/go/plugin/go.d/modules/dovecot/charts.go185
-rw-r--r--src/go/plugin/go.d/modules/dovecot/client.go54
-rw-r--r--src/go/plugin/go.d/modules/dovecot/collect.go89
-rw-r--r--src/go/plugin/go.d/modules/dovecot/config_schema.json47
-rw-r--r--src/go/plugin/go.d/modules/dovecot/dovecot.go101
-rw-r--r--src/go/plugin/go.d/modules/dovecot/dovecot_test.go281
-rw-r--r--src/go/plugin/go.d/modules/dovecot/integrations/dovecot.md244
-rw-r--r--src/go/plugin/go.d/modules/dovecot/metadata.yaml194
-rw-r--r--src/go/plugin/go.d/modules/dovecot/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/dovecot/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/dovecot/testdata/export_global.txt2
l---------src/go/plugin/go.d/modules/elasticsearch/README.md1
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/charts.go845
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/collect.go307
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/config_schema.json218
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/elasticsearch.go136
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/elasticsearch_test.go743
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/init.go27
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/integrations/elasticsearch.md378
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/integrations/opensearch.md378
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/metadata.yaml634
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/metrics.go277
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/testdata/config.json25
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/testdata/config.yaml22
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cat_indices_stats.json50
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cluster_health.json17
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cluster_stats.json377
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/info.json17
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/nodes_local_stats.json867
-rw-r--r--src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/nodes_stats.json2577
l---------src/go/plugin/go.d/modules/envoy/README.md1
-rw-r--r--src/go/plugin/go.d/modules/envoy/charts.go870
-rw-r--r--src/go/plugin/go.d/modules/envoy/collect.go423
-rw-r--r--src/go/plugin/go.d/modules/envoy/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/envoy/envoy.go126
-rw-r--r--src/go/plugin/go.d/modules/envoy/envoy_test.go567
-rw-r--r--src/go/plugin/go.d/modules/envoy/init.go26
-rw-r--r--src/go/plugin/go.d/modules/envoy/integrations/envoy.md306
-rw-r--r--src/go/plugin/go.d/modules/envoy/metadata.yaml538
-rw-r--r--src/go/plugin/go.d/modules/envoy/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/envoy/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/envoy/testdata/consul-dataplane.txt1786
-rw-r--r--src/go/plugin/go.d/modules/envoy/testdata/envoy.txt929
-rw-r--r--src/go/plugin/go.d/modules/example/README.md80
-rw-r--r--src/go/plugin/go.d/modules/example/charts.go59
-rw-r--r--src/go/plugin/go.d/modules/example/collect.go47
-rw-r--r--src/go/plugin/go.d/modules/example/config_schema.json177
-rw-r--r--src/go/plugin/go.d/modules/example/example.go110
-rw-r--r--src/go/plugin/go.d/modules/example/example_test.go351
-rw-r--r--src/go/plugin/go.d/modules/example/init.go63
-rw-r--r--src/go/plugin/go.d/modules/example/testdata/config.json17
-rw-r--r--src/go/plugin/go.d/modules/example/testdata/config.yaml13
l---------src/go/plugin/go.d/modules/exim/README.md1
-rw-r--r--src/go/plugin/go.d/modules/exim/charts.go27
-rw-r--r--src/go/plugin/go.d/modules/exim/collect.go43
-rw-r--r--src/go/plugin/go.d/modules/exim/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/exim/exec.go47
-rw-r--r--src/go/plugin/go.d/modules/exim/exim.go97
-rw-r--r--src/go/plugin/go.d/modules/exim/exim_test.go217
-rw-r--r--src/go/plugin/go.d/modules/exim/init.go23
-rw-r--r--src/go/plugin/go.d/modules/exim/integrations/exim.md191
-rw-r--r--src/go/plugin/go.d/modules/exim/metadata.yaml100
-rw-r--r--src/go/plugin/go.d/modules/exim/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/exim/testdata/config.yaml2
l---------src/go/plugin/go.d/modules/fail2ban/README.md1
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/charts.go75
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/collect.go163
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/exec.go77
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/fail2ban.go112
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/fail2ban_test.go238
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/init.go23
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/integrations/fail2ban.md204
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/metadata.yaml114
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/testdata/config.yaml2
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/testdata/fail2ban-jail-status.txt9
-rw-r--r--src/go/plugin/go.d/modules/fail2ban/testdata/fail2ban-status.txt3
l---------src/go/plugin/go.d/modules/filecheck/README.md1
-rw-r--r--src/go/plugin/go.d/modules/filecheck/cache.go28
-rw-r--r--src/go/plugin/go.d/modules/filecheck/charts.go266
-rw-r--r--src/go/plugin/go.d/modules/filecheck/collect.go40
-rw-r--r--src/go/plugin/go.d/modules/filecheck/collect_dirs.go91
-rw-r--r--src/go/plugin/go.d/modules/filecheck/collect_files.go59
-rw-r--r--src/go/plugin/go.d/modules/filecheck/config_schema.json164
-rw-r--r--src/go/plugin/go.d/modules/filecheck/discover.go43
-rw-r--r--src/go/plugin/go.d/modules/filecheck/filecheck.go128
-rw-r--r--src/go/plugin/go.d/modules/filecheck/filecheck_test.go350
-rw-r--r--src/go/plugin/go.d/modules/filecheck/init.go38
-rw-r--r--src/go/plugin/go.d/modules/filecheck/integrations/files_and_directories.md280
-rw-r--r--src/go/plugin/go.d/modules/filecheck/metadata.yaml198
-rw-r--r--src/go/plugin/go.d/modules/filecheck/testdata/config.json21
-rw-r--r--src/go/plugin/go.d/modules/filecheck/testdata/config.yaml13
-rw-r--r--src/go/plugin/go.d/modules/filecheck/testdata/dir/empty_file.log0
-rw-r--r--src/go/plugin/go.d/modules/filecheck/testdata/dir/file.log61
-rw-r--r--src/go/plugin/go.d/modules/filecheck/testdata/dir/subdir/empty_file.log0
-rw-r--r--src/go/plugin/go.d/modules/filecheck/testdata/empty_file.log0
-rw-r--r--src/go/plugin/go.d/modules/filecheck/testdata/file.log42
l---------src/go/plugin/go.d/modules/fluentd/README.md1
-rw-r--r--src/go/plugin/go.d/modules/fluentd/apiclient.go101
-rw-r--r--src/go/plugin/go.d/modules/fluentd/charts.go37
-rw-r--r--src/go/plugin/go.d/modules/fluentd/collect.go66
-rw-r--r--src/go/plugin/go.d/modules/fluentd/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/fluentd/fluentd.go122
-rw-r--r--src/go/plugin/go.d/modules/fluentd/fluentd_test.go127
-rw-r--r--src/go/plugin/go.d/modules/fluentd/init.go35
-rw-r--r--src/go/plugin/go.d/modules/fluentd/integrations/fluentd.md256
-rw-r--r--src/go/plugin/go.d/modules/fluentd/metadata.yaml192
-rw-r--r--src/go/plugin/go.d/modules/fluentd/testdata/config.json21
-rw-r--r--src/go/plugin/go.d/modules/fluentd/testdata/config.yaml18
-rw-r--r--src/go/plugin/go.d/modules/fluentd/testdata/plugins.json101
l---------src/go/plugin/go.d/modules/freeradius/README.md1
-rw-r--r--src/go/plugin/go.d/modules/freeradius/api/client.go174
-rw-r--r--src/go/plugin/go.d/modules/freeradius/api/client_test.go152
-rw-r--r--src/go/plugin/go.d/modules/freeradius/api/dictionary.go2683
-rw-r--r--src/go/plugin/go.d/modules/freeradius/charts.go139
-rw-r--r--src/go/plugin/go.d/modules/freeradius/collect.go16
-rw-r--r--src/go/plugin/go.d/modules/freeradius/config_schema.json60
-rw-r--r--src/go/plugin/go.d/modules/freeradius/freeradius.go106
-rw-r--r--src/go/plugin/go.d/modules/freeradius/freeradius_test.go204
-rw-r--r--src/go/plugin/go.d/modules/freeradius/init.go20
-rw-r--r--src/go/plugin/go.d/modules/freeradius/integrations/freeradius.md234
-rw-r--r--src/go/plugin/go.d/modules/freeradius/metadata.yaml206
-rw-r--r--src/go/plugin/go.d/modules/freeradius/testdata/config.json7
-rw-r--r--src/go/plugin/go.d/modules/freeradius/testdata/config.yaml5
l---------src/go/plugin/go.d/modules/gearman/README.md1
-rw-r--r--src/go/plugin/go.d/modules/gearman/charts.go158
-rw-r--r--src/go/plugin/go.d/modules/gearman/client.go80
-rw-r--r--src/go/plugin/go.d/modules/gearman/collect.go221
-rw-r--r--src/go/plugin/go.d/modules/gearman/config_schema.json44
-rw-r--r--src/go/plugin/go.d/modules/gearman/gearman.go106
-rw-r--r--src/go/plugin/go.d/modules/gearman/gearman_test.go326
-rw-r--r--src/go/plugin/go.d/modules/gearman/integrations/gearman.md235
-rw-r--r--src/go/plugin/go.d/modules/gearman/metadata.yaml152
-rw-r--r--src/go/plugin/go.d/modules/gearman/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/gearman/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/gearman/testdata/priority-status.txt5
-rw-r--r--src/go/plugin/go.d/modules/gearman/testdata/status.txt5
l---------src/go/plugin/go.d/modules/geth/README.md1
-rw-r--r--src/go/plugin/go.d/modules/geth/charts.go220
-rw-r--r--src/go/plugin/go.d/modules/geth/collect.go92
-rw-r--r--src/go/plugin/go.d/modules/geth/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/geth/geth.go108
-rw-r--r--src/go/plugin/go.d/modules/geth/geth_test.go30
-rw-r--r--src/go/plugin/go.d/modules/geth/init.go24
-rw-r--r--src/go/plugin/go.d/modules/geth/integrations/go-ethereum.md252
-rw-r--r--src/go/plugin/go.d/modules/geth/metadata.yaml291
-rw-r--r--src/go/plugin/go.d/modules/geth/metrics.go54
-rw-r--r--src/go/plugin/go.d/modules/geth/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/geth/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/geth/testdata/metrics_geth.txt1569
l---------src/go/plugin/go.d/modules/haproxy/README.md1
-rw-r--r--src/go/plugin/go.d/modules/haproxy/charts.go112
-rw-r--r--src/go/plugin/go.d/modules/haproxy/collect.go143
-rw-r--r--src/go/plugin/go.d/modules/haproxy/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/haproxy/haproxy.go115
-rw-r--r--src/go/plugin/go.d/modules/haproxy/haproxy_test.go263
-rw-r--r--src/go/plugin/go.d/modules/haproxy/init.go44
-rw-r--r--src/go/plugin/go.d/modules/haproxy/integrations/haproxy.md276
-rw-r--r--src/go/plugin/go.d/modules/haproxy/metadata.yaml231
-rw-r--r--src/go/plugin/go.d/modules/haproxy/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/haproxy/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/haproxy/testdata/v2.3.10/metrics.txt382
l---------src/go/plugin/go.d/modules/hddtemp/README.md1
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/charts.go70
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/client.go44
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/collect.go140
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/config_schema.json44
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/hddtemp.go105
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/hddtemp_test.go321
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/integrations/hdd_temperature.md224
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/metadata.yaml134
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/testdata/hddtemp-all-ok.txt1
-rw-r--r--src/go/plugin/go.d/modules/hddtemp/testdata/hddtemp-all-sleep.txt1
l---------src/go/plugin/go.d/modules/hdfs/README.md1
-rw-r--r--src/go/plugin/go.d/modules/hdfs/charts.go328
-rw-r--r--src/go/plugin/go.d/modules/hdfs/client.go69
-rw-r--r--src/go/plugin/go.d/modules/hdfs/collect.go201
-rw-r--r--src/go/plugin/go.d/modules/hdfs/config_schema.json186
-rw-r--r--src/go/plugin/go.d/modules/hdfs/hdfs.go132
-rw-r--r--src/go/plugin/go.d/modules/hdfs/hdfs_test.go316
-rw-r--r--src/go/plugin/go.d/modules/hdfs/init.go25
-rw-r--r--src/go/plugin/go.d/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md286
-rw-r--r--src/go/plugin/go.d/modules/hdfs/metadata.yaml388
-rw-r--r--src/go/plugin/go.d/modules/hdfs/metrics.go245
-rw-r--r--src/go/plugin/go.d/modules/hdfs/raw_data.go51
-rw-r--r--src/go/plugin/go.d/modules/hdfs/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/hdfs/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/hdfs/testdata/datanode.json165
-rw-r--r--src/go/plugin/go.d/modules/hdfs/testdata/namenode.json132
-rw-r--r--src/go/plugin/go.d/modules/hdfs/testdata/unknownnode.json34
l---------src/go/plugin/go.d/modules/hpssa/README.md1
-rw-r--r--src/go/plugin/go.d/modules/hpssa/charts.go403
-rw-r--r--src/go/plugin/go.d/modules/hpssa/collect.go139
-rw-r--r--src/go/plugin/go.d/modules/hpssa/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/hpssa/exec.go46
-rw-r--r--src/go/plugin/go.d/modules/hpssa/hpssa.go111
-rw-r--r--src/go/plugin/go.d/modules/hpssa/hpssa_test.go430
-rw-r--r--src/go/plugin/go.d/modules/hpssa/init.go23
-rw-r--r--src/go/plugin/go.d/modules/hpssa/integrations/hpe_smart_arrays.md271
-rw-r--r--src/go/plugin/go.d/modules/hpssa/metadata.yaml213
-rw-r--r--src/go/plugin/go.d/modules/hpssa/parse.go364
-rw-r--r--src/go/plugin/go.d/modules/hpssa/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/hpssa/testdata/config.yaml2
-rw-r--r--src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P212_P410i.txt748
-rw-r--r--src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P400ar.txt397
-rw-r--r--src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P400i-unassigned.txt207
l---------src/go/plugin/go.d/modules/httpcheck/README.md1
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/charts.go75
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/collect.go189
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/config_schema.json270
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/cookiejar.go89
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/httpcheck.go156
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/httpcheck_test.go604
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/init.go85
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/integrations/http_endpoints.md364
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/metadata.yaml303
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/metrics.go20
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/testdata/config.json32
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/testdata/config.yaml25
-rw-r--r--src/go/plugin/go.d/modules/httpcheck/testdata/cookie.txt5
l---------src/go/plugin/go.d/modules/icecast/README.md1
-rw-r--r--src/go/plugin/go.d/modules/icecast/charts.go65
-rw-r--r--src/go/plugin/go.d/modules/icecast/collect.go107
-rw-r--r--src/go/plugin/go.d/modules/icecast/config_schema.json177
-rw-r--r--src/go/plugin/go.d/modules/icecast/icecast.go118
-rw-r--r--src/go/plugin/go.d/modules/icecast/icecast_test.go285
-rw-r--r--src/go/plugin/go.d/modules/icecast/integrations/icecast.md226
-rw-r--r--src/go/plugin/go.d/modules/icecast/metadata.yaml169
-rw-r--r--src/go/plugin/go.d/modules/icecast/server_stats.go45
-rw-r--r--src/go/plugin/go.d/modules/icecast/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/icecast/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/icecast/testdata/stats_multi_source.json46
-rw-r--r--src/go/plugin/go.d/modules/icecast/testdata/stats_no_sources.json11
-rw-r--r--src/go/plugin/go.d/modules/icecast/testdata/stats_single_source.json27
-rw-r--r--src/go/plugin/go.d/modules/init.go116
l---------src/go/plugin/go.d/modules/intelgpu/README.md1
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/charts.go92
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/collect.go76
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/config_schema.json33
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/exec.go162
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/init.go21
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/integrations/intel_gpu.md213
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/intelgpu.go108
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/intelgpu_test.go206
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/metadata.yaml119
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/testdata/config.yaml2
-rw-r--r--src/go/plugin/go.d/modules/intelgpu/testdata/igt.json80
l---------src/go/plugin/go.d/modules/ipfs/README.md1
-rw-r--r--src/go/plugin/go.d/modules/ipfs/charts.go105
-rw-r--r--src/go/plugin/go.d/modules/ipfs/collect.go209
-rw-r--r--src/go/plugin/go.d/modules/ipfs/config_schema.json195
-rw-r--r--src/go/plugin/go.d/modules/ipfs/integrations/ipfs.md246
-rw-r--r--src/go/plugin/go.d/modules/ipfs/ipfs.go128
-rw-r--r--src/go/plugin/go.d/modules/ipfs/ipfs_test.go278
-rw-r--r--src/go/plugin/go.d/modules/ipfs/metadata.yaml224
-rw-r--r--src/go/plugin/go.d/modules/ipfs/testdata/api_v0_pin_ls.json8
-rw-r--r--src/go/plugin/go.d/modules/ipfs/testdata/api_v0_stats_bw.json6
-rw-r--r--src/go/plugin/go.d/modules/ipfs/testdata/api_v0_stats_repo.json7
-rw-r--r--src/go/plugin/go.d/modules/ipfs/testdata/api_v0_swarm_peers.json70
-rw-r--r--src/go/plugin/go.d/modules/ipfs/testdata/config.json22
-rw-r--r--src/go/plugin/go.d/modules/ipfs/testdata/config.yaml19
l---------src/go/plugin/go.d/modules/isc_dhcpd/README.md1
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/charts.go57
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/collect.go89
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/config_schema.json70
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/init.go88
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/integrations/isc_dhcp.md228
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd.go121
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd_test.go345
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml135
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/parse.go92
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/testdata/config.json10
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/testdata/config.yaml5
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_empty0
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4370
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_backup39
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_inactive370
-rw-r--r--src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv667
l---------src/go/plugin/go.d/modules/k8s_kubelet/README.md1
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/charts.go236
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/collect.go348
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/init.go35
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/integrations/kubelet.md254
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/kubelet.go122
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/kubelet_test.go213
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/metadata.yaml331
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/metrics.go113
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/testdata/config.json21
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/testdata/config.yaml18
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/testdata/metrics.txt574
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubelet/testdata/token.txt1
l---------src/go/plugin/go.d/modules/k8s_kubeproxy/README.md1
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/charts.go108
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/collect.go146
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/init.go26
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/integrations/kubeproxy.md221
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy.go111
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy_test.go146
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/metadata.yaml227
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/metrics.go54
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/metrics.txt190
l---------src/go/plugin/go.d/modules/k8s_state/README.md1
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/charts.go785
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/client.go64
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/cluster_meta.go84
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/collect.go271
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/config_schema.json25
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/discover_kubernetes.go160
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/discover_node.go105
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/discover_pod.go105
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/init.go15
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/integrations/kubernetes_cluster_state.md253
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/kube_state.go147
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/kube_state_test.go859
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/metadata.yaml356
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/resource.go44
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/state.go165
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/testdata/config.json3
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/testdata/config.yaml1
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/update_node_state.go47
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/update_pod_state.go158
-rw-r--r--src/go/plugin/go.d/modules/k8s_state/update_state.go27
l---------src/go/plugin/go.d/modules/lighttpd/README.md1
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/apiclient.go170
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/charts.go80
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/collect.go25
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/init.go29
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/integrations/lighttpd.md266
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/lighttpd.go104
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/lighttpd_test.go155
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/metadata.yaml231
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/metrics.go33
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/testdata/apache-status.txt39
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/lighttpd/testdata/status.txt6
l---------src/go/plugin/go.d/modules/litespeed/README.md1
-rw-r--r--src/go/plugin/go.d/modules/litespeed/charts.go152
-rw-r--r--src/go/plugin/go.d/modules/litespeed/collect.go119
-rw-r--r--src/go/plugin/go.d/modules/litespeed/config_schema.json37
-rw-r--r--src/go/plugin/go.d/modules/litespeed/integrations/litespeed.md193
-rw-r--r--src/go/plugin/go.d/modules/litespeed/litespeed.go91
-rw-r--r--src/go/plugin/go.d/modules/litespeed/litespeed_test.go164
-rw-r--r--src/go/plugin/go.d/modules/litespeed/metadata.yaml148
-rw-r--r--src/go/plugin/go.d/modules/litespeed/testdata/.rtreport8
-rw-r--r--src/go/plugin/go.d/modules/litespeed/testdata/.rtreport.28
-rw-r--r--src/go/plugin/go.d/modules/litespeed/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/litespeed/testdata/config.yaml2
l---------src/go/plugin/go.d/modules/logind/README.md1
-rw-r--r--src/go/plugin/go.d/modules/logind/charts.go83
-rw-r--r--src/go/plugin/go.d/modules/logind/collect.go130
-rw-r--r--src/go/plugin/go.d/modules/logind/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/logind/connection.go75
-rw-r--r--src/go/plugin/go.d/modules/logind/doc.go3
-rw-r--r--src/go/plugin/go.d/modules/logind/integrations/systemd-logind_users.md170
-rw-r--r--src/go/plugin/go.d/modules/logind/logind.go98
-rw-r--r--src/go/plugin/go.d/modules/logind/logind_test.go350
-rw-r--r--src/go/plugin/go.d/modules/logind/metadata.yaml105
-rw-r--r--src/go/plugin/go.d/modules/logind/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/logind/testdata/config.yaml2
l---------src/go/plugin/go.d/modules/logstash/README.md1
-rw-r--r--src/go/plugin/go.d/modules/logstash/charts.go236
-rw-r--r--src/go/plugin/go.d/modules/logstash/collect.go91
-rw-r--r--src/go/plugin/go.d/modules/logstash/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/logstash/integrations/logstash.md283
-rw-r--r--src/go/plugin/go.d/modules/logstash/logstash.go114
-rw-r--r--src/go/plugin/go.d/modules/logstash/logstash_test.go253
-rw-r--r--src/go/plugin/go.d/modules/logstash/metadata.yaml274
-rw-r--r--src/go/plugin/go.d/modules/logstash/node_stats.go65
-rw-r--r--src/go/plugin/go.d/modules/logstash/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/logstash/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/logstash/testdata/stats.json252
l---------src/go/plugin/go.d/modules/lvm/README.md1
-rw-r--r--src/go/plugin/go.d/modules/lvm/charts.go66
-rw-r--r--src/go/plugin/go.d/modules/lvm/collect.go131
-rw-r--r--src/go/plugin/go.d/modules/lvm/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/lvm/exec.go47
-rw-r--r--src/go/plugin/go.d/modules/lvm/init.go23
-rw-r--r--src/go/plugin/go.d/modules/lvm/integrations/lvm_logical_volumes.md202
-rw-r--r--src/go/plugin/go.d/modules/lvm/lvm.go105
-rw-r--r--src/go/plugin/go.d/modules/lvm/lvm_test.go237
-rw-r--r--src/go/plugin/go.d/modules/lvm/metadata.yaml115
-rw-r--r--src/go/plugin/go.d/modules/lvm/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/lvm/testdata/config.yaml2
-rw-r--r--src/go/plugin/go.d/modules/lvm/testdata/lvs-report-no-thin.json16
-rw-r--r--src/go/plugin/go.d/modules/lvm/testdata/lvs-report.json16
l---------src/go/plugin/go.d/modules/megacli/README.md1
-rw-r--r--src/go/plugin/go.d/modules/megacli/charts.go196
-rw-r--r--src/go/plugin/go.d/modules/megacli/collect.go46
-rw-r--r--src/go/plugin/go.d/modules/megacli/collect_bbu.go141
-rw-r--r--src/go/plugin/go.d/modules/megacli/collect_phys_drives.go129
-rw-r--r--src/go/plugin/go.d/modules/megacli/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/megacli/exec.go50
-rw-r--r--src/go/plugin/go.d/modules/megacli/init.go23
-rw-r--r--src/go/plugin/go.d/modules/megacli/integrations/megacli_megaraid.md250
-rw-r--r--src/go/plugin/go.d/modules/megacli/megacli.go110
-rw-r--r--src/go/plugin/go.d/modules/megacli/megacli_test.go301
-rw-r--r--src/go/plugin/go.d/modules/megacli/metadata.yaml183
-rw-r--r--src/go/plugin/go.d/modules/megacli/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/megacli/testdata/config.yaml2
-rw-r--r--src/go/plugin/go.d/modules/megacli/testdata/mega-bbu-info-old.txt84
-rw-r--r--src/go/plugin/go.d/modules/megacli/testdata/mega-bbu-info-recent.txt74
-rw-r--r--src/go/plugin/go.d/modules/megacli/testdata/mega-phys-drives-info.txt433
l---------src/go/plugin/go.d/modules/memcached/README.md1
-rw-r--r--src/go/plugin/go.d/modules/memcached/charts.go229
-rw-r--r--src/go/plugin/go.d/modules/memcached/client.go45
-rw-r--r--src/go/plugin/go.d/modules/memcached/collect.go121
-rw-r--r--src/go/plugin/go.d/modules/memcached/config_schema.json44
-rw-r--r--src/go/plugin/go.d/modules/memcached/integrations/memcached.md231
-rw-r--r--src/go/plugin/go.d/modules/memcached/memcached.go108
-rw-r--r--src/go/plugin/go.d/modules/memcached/memcached_test.go296
-rw-r--r--src/go/plugin/go.d/modules/memcached/metadata.yaml217
-rw-r--r--src/go/plugin/go.d/modules/memcached/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/memcached/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/memcached/testdata/stats.txt93
l---------src/go/plugin/go.d/modules/mongodb/README.md1
-rw-r--r--src/go/plugin/go.d/modules/mongodb/charts.go1036
-rw-r--r--src/go/plugin/go.d/modules/mongodb/client.go299
-rw-r--r--src/go/plugin/go.d/modules/mongodb/collect.go43
-rw-r--r--src/go/plugin/go.d/modules/mongodb/collect_dbstats.go100
-rw-r--r--src/go/plugin/go.d/modules/mongodb/collect_replsetgetstatus.go113
-rw-r--r--src/go/plugin/go.d/modules/mongodb/collect_serverstatus.go129
-rw-r--r--src/go/plugin/go.d/modules/mongodb/collect_sharding.go102
-rw-r--r--src/go/plugin/go.d/modules/mongodb/config_schema.json105
-rw-r--r--src/go/plugin/go.d/modules/mongodb/documents.go276
-rw-r--r--src/go/plugin/go.d/modules/mongodb/init.go29
-rw-r--r--src/go/plugin/go.d/modules/mongodb/integrations/mongodb.md391
-rw-r--r--src/go/plugin/go.d/modules/mongodb/metadata.yaml580
-rw-r--r--src/go/plugin/go.d/modules/mongodb/mongodb.go128
-rw-r--r--src/go/plugin/go.d/modules/mongodb/mongodb_test.go816
-rw-r--r--src/go/plugin/go.d/modules/mongodb/testdata/config.json13
-rw-r--r--src/go/plugin/go.d/modules/mongodb/testdata/config.yaml8
-rw-r--r--src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/dbStats.json9
-rw-r--r--src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/mongod-serverStatus.json497
-rw-r--r--src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/mongos-serverStatus.json129
-rw-r--r--src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/replSetGetStatus.json27
l---------src/go/plugin/go.d/modules/monit/README.md1
-rw-r--r--src/go/plugin/go.d/modules/monit/charts.go91
-rw-r--r--src/go/plugin/go.d/modules/monit/collect.go117
-rw-r--r--src/go/plugin/go.d/modules/monit/config_schema.json185
-rw-r--r--src/go/plugin/go.d/modules/monit/integrations/monit.md255
-rw-r--r--src/go/plugin/go.d/modules/monit/metadata.yaml193
-rw-r--r--src/go/plugin/go.d/modules/monit/monit.go117
-rw-r--r--src/go/plugin/go.d/modules/monit/monit_test.go371
-rw-r--r--src/go/plugin/go.d/modules/monit/status.go153
-rw-r--r--src/go/plugin/go.d/modules/monit/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/monit/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/monit/testdata/v5.33.0/status.xml688
l---------src/go/plugin/go.d/modules/mysql/README.md1
-rw-r--r--src/go/plugin/go.d/modules/mysql/charts.go1239
-rw-r--r--src/go/plugin/go.d/modules/mysql/collect.go202
-rw-r--r--src/go/plugin/go.d/modules/mysql/collect_global_status.go216
-rw-r--r--src/go/plugin/go.d/modules/mysql/collect_global_vars.go43
-rw-r--r--src/go/plugin/go.d/modules/mysql/collect_process_list.go87
-rw-r--r--src/go/plugin/go.d/modules/mysql/collect_slave_status.go87
-rw-r--r--src/go/plugin/go.d/modules/mysql/collect_user_statistics.go51
-rw-r--r--src/go/plugin/go.d/modules/mysql/collect_version.go62
-rw-r--r--src/go/plugin/go.d/modules/mysql/config_schema.json52
-rw-r--r--src/go/plugin/go.d/modules/mysql/disable_logging.go58
-rw-r--r--src/go/plugin/go.d/modules/mysql/integrations/mariadb.md401
-rw-r--r--src/go/plugin/go.d/modules/mysql/integrations/mysql.md401
-rw-r--r--src/go/plugin/go.d/modules/mysql/integrations/percona_mysql.md401
-rw-r--r--src/go/plugin/go.d/modules/mysql/metadata.yaml802
-rw-r--r--src/go/plugin/go.d/modules/mysql/mycnf.go79
-rw-r--r--src/go/plugin/go.d/modules/mysql/mycnf_test.go100
-rw-r--r--src/go/plugin/go.d/modules/mysql/mysql.go169
-rw-r--r--src/go/plugin/go.d/modules/mysql/mysql_test.go1759
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/config.json6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/config.yaml4
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_status.txt621
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt8
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/process_list.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/version.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt5
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/global_status.txt569
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/global_variables.txt8
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/process_list.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/user_statistics.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/version.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/global_status.txt423
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/global_variables.txt7
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/process_list.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/version.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/global_status.txt490
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/global_variables.txt9
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/process_list.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/replica_status_multi_source.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/version.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/global_status.txt533
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/global_variables.txt9
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/process_list.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/user_statistics.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/version.txt6
-rw-r--r--src/go/plugin/go.d/modules/mysql/testdata/session_variables.txt6
l---------src/go/plugin/go.d/modules/nginx/README.md1
-rw-r--r--src/go/plugin/go.d/modules/nginx/apiclient.go168
-rw-r--r--src/go/plugin/go.d/modules/nginx/charts.go58
-rw-r--r--src/go/plugin/go.d/modules/nginx/collect.go17
-rw-r--r--src/go/plugin/go.d/modules/nginx/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/nginx/integrations/nginx.md267
-rw-r--r--src/go/plugin/go.d/modules/nginx/metadata.yaml226
-rw-r--r--src/go/plugin/go.d/modules/nginx/metrics.go34
-rw-r--r--src/go/plugin/go.d/modules/nginx/nginx.go106
-rw-r--r--src/go/plugin/go.d/modules/nginx/nginx_test.go156
-rw-r--r--src/go/plugin/go.d/modules/nginx/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/nginx/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/nginx/testdata/status.txt4
-rw-r--r--src/go/plugin/go.d/modules/nginx/testdata/tengine-status.txt4
l---------src/go/plugin/go.d/modules/nginxplus/README.md1
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/cache.go172
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/charts.go981
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/collect.go393
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md448
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/metadata.yaml584
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/nginx_http_api.go212
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/nginx_http_api_query.go373
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/nginxplus.go127
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/nginxplus_test.go596
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/404.json9
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/api_versions.json10
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/connections.json6
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_http.json10
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_root.json10
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_stream.json6
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_caches.json40
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_location_zones.json35
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_requests.json4
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_server_zones.json21
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_upstreams.json76
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/nginx.json10
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/resolvers.json36
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/ssl.json16
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/stream_server_zones.json15
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/api-8/stream_upstreams.json48
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/nginxplus/testdata/config.yaml17
l---------src/go/plugin/go.d/modules/nginxvts/README.md1
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/charts.go130
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/collect.go81
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/config_schema.json182
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/init.go47
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md268
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/metadata.yaml264
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/metrics.go53
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/nginxvts.go118
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/nginxvts_test.go266
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/nginxvts/testdata/vts-v0.1.18.json44
l---------src/go/plugin/go.d/modules/nsd/README.md1
-rw-r--r--src/go/plugin/go.d/modules/nsd/charts.go249
-rw-r--r--src/go/plugin/go.d/modules/nsd/collect.go81
-rw-r--r--src/go/plugin/go.d/modules/nsd/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/nsd/exec.go47
-rw-r--r--src/go/plugin/go.d/modules/nsd/init.go23
-rw-r--r--src/go/plugin/go.d/modules/nsd/integrations/nsd.md203
-rw-r--r--src/go/plugin/go.d/modules/nsd/metadata.yaml272
-rw-r--r--src/go/plugin/go.d/modules/nsd/nsd.go97
-rw-r--r--src/go/plugin/go.d/modules/nsd/nsd_test.go337
-rw-r--r--src/go/plugin/go.d/modules/nsd/stats_counters.go123
-rw-r--r--src/go/plugin/go.d/modules/nsd/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/nsd/testdata/config.yaml2
-rw-r--r--src/go/plugin/go.d/modules/nsd/testdata/stats.txt95
l---------src/go/plugin/go.d/modules/ntpd/README.md1
-rw-r--r--src/go/plugin/go.d/modules/ntpd/charts.go346
-rw-r--r--src/go/plugin/go.d/modules/ntpd/client.go89
-rw-r--r--src/go/plugin/go.d/modules/ntpd/collect.go154
-rw-r--r--src/go/plugin/go.d/modules/ntpd/config_schema.json49
-rw-r--r--src/go/plugin/go.d/modules/ntpd/integrations/ntpd.md263
-rw-r--r--src/go/plugin/go.d/modules/ntpd/metadata.yaml260
-rw-r--r--src/go/plugin/go.d/modules/ntpd/ntpd.go127
-rw-r--r--src/go/plugin/go.d/modules/ntpd/ntpd_test.go372
-rw-r--r--src/go/plugin/go.d/modules/ntpd/testdata/config.json6
-rw-r--r--src/go/plugin/go.d/modules/ntpd/testdata/config.yaml4
l---------src/go/plugin/go.d/modules/nvidia_smi/README.md1
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/charts.go370
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/collect.go204
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/config_schema.json56
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/exec.go213
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/gpu_info.go121
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/init.go22
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md232
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/metadata.yaml234
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi.go114
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi_test.go447
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/testdata/a100-sxm4-mig.xml359
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/testdata/config.json6
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/testdata/config.yaml4
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-2080-win.xml776
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-3060.xml917
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-4090-driver-535.xml1082
-rw-r--r--src/go/plugin/go.d/modules/nvidia_smi/testdata/tesla-p100.xml313
l---------src/go/plugin/go.d/modules/nvme/README.md1
-rw-r--r--src/go/plugin/go.d/modules/nvme/charts.go267
-rw-r--r--src/go/plugin/go.d/modules/nvme/collect.go120
-rw-r--r--src/go/plugin/go.d/modules/nvme/config_schema.json36
-rw-r--r--src/go/plugin/go.d/modules/nvme/exec.go94
-rw-r--r--src/go/plugin/go.d/modules/nvme/init.go26
-rw-r--r--src/go/plugin/go.d/modules/nvme/integrations/nvme_devices.md242
-rw-r--r--src/go/plugin/go.d/modules/nvme/metadata.yaml225
-rw-r--r--src/go/plugin/go.d/modules/nvme/nvme.go109
-rw-r--r--src/go/plugin/go.d/modules/nvme/nvme_test.go430
-rw-r--r--src/go/plugin/go.d/modules/nvme/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/nvme/testdata/config.yaml2
-rw-r--r--src/go/plugin/go.d/modules/nvme/testdata/nvme-list-empty.json4
-rw-r--r--src/go/plugin/go.d/modules/nvme/testdata/nvme-list.json30
-rw-r--r--src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log-float.json24
-rw-r--r--src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log-string.json24
-rw-r--r--src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log.json24
l---------src/go/plugin/go.d/modules/openvpn/README.md1
-rw-r--r--src/go/plugin/go.d/modules/openvpn/charts.go62
-rw-r--r--src/go/plugin/go.d/modules/openvpn/client/client.go153
-rw-r--r--src/go/plugin/go.d/modules/openvpn/client/client_test.go103
-rw-r--r--src/go/plugin/go.d/modules/openvpn/client/commands.go38
-rw-r--r--src/go/plugin/go.d/modules/openvpn/client/testdata/load-stats.txt1
-rw-r--r--src/go/plugin/go.d/modules/openvpn/client/testdata/status3.txt77
-rw-r--r--src/go/plugin/go.d/modules/openvpn/client/testdata/version.txt3
-rw-r--r--src/go/plugin/go.d/modules/openvpn/client/types.go28
-rw-r--r--src/go/plugin/go.d/modules/openvpn/collect.go90
-rw-r--r--src/go/plugin/go.d/modules/openvpn/config_schema.json102
-rw-r--r--src/go/plugin/go.d/modules/openvpn/init.go30
-rw-r--r--src/go/plugin/go.d/modules/openvpn/integrations/openvpn.md258
-rw-r--r--src/go/plugin/go.d/modules/openvpn/metadata.yaml177
-rw-r--r--src/go/plugin/go.d/modules/openvpn/openvpn.go128
-rw-r--r--src/go/plugin/go.d/modules/openvpn/openvpn_test.go153
-rw-r--r--src/go/plugin/go.d/modules/openvpn/testdata/config.json13
-rw-r--r--src/go/plugin/go.d/modules/openvpn/testdata/config.yaml8
l---------src/go/plugin/go.d/modules/openvpn_status_log/README.md1
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/charts.go72
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/collect.go65
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/config_schema.json92
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/init.go27
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/integrations/openvpn_status_log.md213
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/metadata.yaml144
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/openvpn.go100
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/openvpn_test.go362
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/parser.go131
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/config.json12
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/config.yaml7
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/empty.txt0
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/static-key.txt8
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version1-no-clients.txt8
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version1.txt12
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version2-no-clients.txt6
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version2.txt10
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version3-no-clients.txt6
-rw-r--r--src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version3.txt10
l---------src/go/plugin/go.d/modules/pgbouncer/README.md1
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/charts.go247
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/collect.go354
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/config_schema.json47
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/init.go12
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/integrations/pgbouncer.md289
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/metadata.yaml239
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/metrics.go47
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/pgbouncer.go115
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/pgbouncer_test.go364
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/config.txt86
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/databases.txt6
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/pools.txt6
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/stats.txt6
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/version.txt3
-rw-r--r--src/go/plugin/go.d/modules/pgbouncer/testdata/v1.7.0/version.txt3
l---------src/go/plugin/go.d/modules/phpdaemon/README.md1
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/charts.go66
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/client.go77
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/collect.go19
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/init.go27
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/integrations/phpdaemon.md333
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/metadata.yaml276
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/metrics.go33
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/phpdaemon.go114
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/phpdaemon_test.go144
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/phpdaemon/testdata/fullstatus.json10
l---------src/go/plugin/go.d/modules/phpfpm/README.md1
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/charts.go84
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/client.go216
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/collect.go72
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/config_schema.json211
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/decode.go132
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/init.go52
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/integrations/php-fpm.md264
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/metadata.yaml230
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/phpfpm.go99
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/phpfpm_test.go272
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/testdata/config.json23
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/testdata/config.yaml20
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/testdata/status-full-no-idle.json63
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/testdata/status-full.json63
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/testdata/status-full.txt59
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/testdata/status.json16
-rw-r--r--src/go/plugin/go.d/modules/phpfpm/testdata/status.txt14
l---------src/go/plugin/go.d/modules/pihole/README.md1
-rw-r--r--src/go/plugin/go.d/modules/pihole/charts.go166
-rw-r--r--src/go/plugin/go.d/modules/pihole/collect.go270
-rw-r--r--src/go/plugin/go.d/modules/pihole/config_schema.json190
-rw-r--r--src/go/plugin/go.d/modules/pihole/init.go78
-rw-r--r--src/go/plugin/go.d/modules/pihole/integrations/pi-hole.md259
-rw-r--r--src/go/plugin/go.d/modules/pihole/metadata.yaml248
-rw-r--r--src/go/plugin/go.d/modules/pihole/metrics.go84
-rw-r--r--src/go/plugin/go.d/modules/pihole/pihole.go129
-rw-r--r--src/go/plugin/go.d/modules/pihole/pihole_test.go278
-rw-r--r--src/go/plugin/go.d/modules/pihole/testdata/config.json21
-rw-r--r--src/go/plugin/go.d/modules/pihole/testdata/config.yaml18
-rw-r--r--src/go/plugin/go.d/modules/pihole/testdata/getForwardDestinations.json7
-rw-r--r--src/go/plugin/go.d/modules/pihole/testdata/getQueryTypes.json11
-rw-r--r--src/go/plugin/go.d/modules/pihole/testdata/setupVars.conf11
-rw-r--r--src/go/plugin/go.d/modules/pihole/testdata/summaryRaw.json27
l---------src/go/plugin/go.d/modules/pika/README.md1
-rw-r--r--src/go/plugin/go.d/modules/pika/charts.go246
-rw-r--r--src/go/plugin/go.d/modules/pika/collect.go71
-rw-r--r--src/go/plugin/go.d/modules/pika/collect_info.go219
-rw-r--r--src/go/plugin/go.d/modules/pika/config_schema.json93
-rw-r--r--src/go/plugin/go.d/modules/pika/init.go47
-rw-r--r--src/go/plugin/go.d/modules/pika/integrations/pika.md256
-rw-r--r--src/go/plugin/go.d/modules/pika/metadata.yaml277
-rw-r--r--src/go/plugin/go.d/modules/pika/pika.go134
-rw-r--r--src/go/plugin/go.d/modules/pika/pika_test.go299
-rw-r--r--src/go/plugin/go.d/modules/pika/testdata/config.json9
-rw-r--r--src/go/plugin/go.d/modules/pika/testdata/config.yaml7
-rw-r--r--src/go/plugin/go.d/modules/pika/testdata/redis/info_all.txt165
-rw-r--r--src/go/plugin/go.d/modules/pika/testdata/v3.4.0/info_all.txt64
l---------src/go/plugin/go.d/modules/ping/README.md1
-rw-r--r--src/go/plugin/go.d/modules/ping/charts.go101
-rw-r--r--src/go/plugin/go.d/modules/ping/collect.go49
-rw-r--r--src/go/plugin/go.d/modules/ping/config_schema.json95
-rw-r--r--src/go/plugin/go.d/modules/ping/init.go39
-rw-r--r--src/go/plugin/go.d/modules/ping/integrations/ping.md271
-rw-r--r--src/go/plugin/go.d/modules/ping/metadata.yaml193
-rw-r--r--src/go/plugin/go.d/modules/ping/ping.go122
-rw-r--r--src/go/plugin/go.d/modules/ping/ping_test.go206
-rw-r--r--src/go/plugin/go.d/modules/ping/prober.go111
-rw-r--r--src/go/plugin/go.d/modules/ping/testdata/config.json11
-rw-r--r--src/go/plugin/go.d/modules/ping/testdata/config.yaml8
l---------src/go/plugin/go.d/modules/portcheck/README.md1
-rw-r--r--src/go/plugin/go.d/modules/portcheck/charts.go75
-rw-r--r--src/go/plugin/go.d/modules/portcheck/collect.go77
-rw-r--r--src/go/plugin/go.d/modules/portcheck/config_schema.json66
-rw-r--r--src/go/plugin/go.d/modules/portcheck/init.go49
-rw-r--r--src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md252
-rw-r--r--src/go/plugin/go.d/modules/portcheck/metadata.yaml162
-rw-r--r--src/go/plugin/go.d/modules/portcheck/portcheck.go101
-rw-r--r--src/go/plugin/go.d/modules/portcheck/portcheck_test.go169
-rw-r--r--src/go/plugin/go.d/modules/portcheck/testdata/config.json8
-rw-r--r--src/go/plugin/go.d/modules/portcheck/testdata/config.yaml5
l---------src/go/plugin/go.d/modules/postfix/README.md1
-rw-r--r--src/go/plugin/go.d/modules/postfix/charts.go44
-rw-r--r--src/go/plugin/go.d/modules/postfix/collect.go71
-rw-r--r--src/go/plugin/go.d/modules/postfix/config_schema.json47
-rw-r--r--src/go/plugin/go.d/modules/postfix/exec.go41
-rw-r--r--src/go/plugin/go.d/modules/postfix/init.go38
-rw-r--r--src/go/plugin/go.d/modules/postfix/integrations/postfix.md195
-rw-r--r--src/go/plugin/go.d/modules/postfix/metadata.yaml106
-rw-r--r--src/go/plugin/go.d/modules/postfix/postfix.go109
-rw-r--r--src/go/plugin/go.d/modules/postfix/postfix_test.go241
-rw-r--r--src/go/plugin/go.d/modules/postfix/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/postfix/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/postfix/testdata/postqueue.txt34
l---------src/go/plugin/go.d/modules/postgres/README.md1
-rw-r--r--src/go/plugin/go.d/modules/postgres/charts.go1400
-rw-r--r--src/go/plugin/go.d/modules/postgres/collect.go273
-rw-r--r--src/go/plugin/go.d/modules/postgres/collect_metrics.go367
-rw-r--r--src/go/plugin/go.d/modules/postgres/config_schema.json141
-rw-r--r--src/go/plugin/go.d/modules/postgres/do_query.go78
-rw-r--r--src/go/plugin/go.d/modules/postgres/do_query_bloat.go73
-rw-r--r--src/go/plugin/go.d/modules/postgres/do_query_columns.go55
-rw-r--r--src/go/plugin/go.d/modules/postgres/do_query_databases.go160
-rw-r--r--src/go/plugin/go.d/modules/postgres/do_query_global.go285
-rw-r--r--src/go/plugin/go.d/modules/postgres/do_query_indexes.go59
-rw-r--r--src/go/plugin/go.d/modules/postgres/do_query_misc.go170
-rw-r--r--src/go/plugin/go.d/modules/postgres/do_query_replication.go94
-rw-r--r--src/go/plugin/go.d/modules/postgres/do_query_tables.go147
-rw-r--r--src/go/plugin/go.d/modules/postgres/init.go24
-rw-r--r--src/go/plugin/go.d/modules/postgres/integrations/postgresql.md417
-rw-r--r--src/go/plugin/go.d/modules/postgres/metadata.yaml750
-rw-r--r--src/go/plugin/go.d/modules/postgres/metrics.go231
-rw-r--r--src/go/plugin/go.d/modules/postgres/postgres.go171
-rw-r--r--src/go/plugin/go.d/modules/postgres/postgres_test.go731
-rw-r--r--src/go/plugin/go.d/modules/postgres/queries.go757
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/config.json14
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/config.yaml10
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/autovacuum_workers.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/bloat_tables.txt12
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/catalog_relations.txt6
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/checkpoints.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_conflicts.txt4
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_locks.txt7
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_size.txt4
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_stats.txt4
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/is_super_user-false.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/is_super_user-true.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/queryable_database_list.txt2
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_slot_files.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt5
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt5
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_connections_state.txt8
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_current_connections.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_version_num.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/settings_max_connections.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/settings_max_locks_held.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt11
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt6
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt6
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/table_columns_stats.txt10
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/txid_wraparound.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/uptime.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_archive_files.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_files.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_writes.txt3
-rw-r--r--src/go/plugin/go.d/modules/postgres/testdata/v14.4/xact_query_running_time.txt10
l---------src/go/plugin/go.d/modules/powerdns/README.md1
-rw-r--r--src/go/plugin/go.d/modules/powerdns/authoritativens.go116
-rw-r--r--src/go/plugin/go.d/modules/powerdns/authoritativens_test.go340
-rw-r--r--src/go/plugin/go.d/modules/powerdns/charts.go66
-rw-r--r--src/go/plugin/go.d/modules/powerdns/collect.go100
-rw-r--r--src/go/plugin/go.d/modules/powerdns/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/powerdns/init.go29
-rw-r--r--src/go/plugin/go.d/modules/powerdns/integrations/powerdns_authoritative_server.md258
-rw-r--r--src/go/plugin/go.d/modules/powerdns/metadata.yaml215
-rw-r--r--src/go/plugin/go.d/modules/powerdns/metrics.go13
-rw-r--r--src/go/plugin/go.d/modules/powerdns/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/powerdns/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/powerdns/testdata/recursor/statistics.json587
-rw-r--r--src/go/plugin/go.d/modules/powerdns/testdata/v4.3.0/statistics.json507
l---------src/go/plugin/go.d/modules/powerdns_recursor/README.md1
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/charts.go98
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/collect.go100
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/init.go29
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/integrations/powerdns_recursor.md261
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/metadata.yaml240
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/metrics.go18
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/recursor.go116
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/recursor_test.go375
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/testdata/authoritative/statistics.json507
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/powerdns_recursor/testdata/v4.3.1/statistics.json587
l---------src/go/plugin/go.d/modules/prometheus/README.md1
-rw-r--r--src/go/plugin/go.d/modules/prometheus/cache.go41
-rw-r--r--src/go/plugin/go.d/modules/prometheus/charts.go329
-rw-r--r--src/go/plugin/go.d/modules/prometheus/collect.go277
-rw-r--r--src/go/plugin/go.d/modules/prometheus/config_schema.json311
-rw-r--r--src/go/plugin/go.d/modules/prometheus/init.go64
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/4d_server.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/8430ft_modem.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/a10_acos_network_devices.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/airthings_waveplus_air_sensor.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/akamai_edge_dns_traffic.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/akamai_global_traffic_management.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/akami_cloudmonitor.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/alamos_fe2_server.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/alibaba_cloud.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/altaro_backup.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/amd_cpu_&_gpu.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/andrews_&_arnold_line_status.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/apache_airflow.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/apache_flink.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/apicast.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/apple_time_machine.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/arm_hwcpipe.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aruba_devices.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/arvancloud_cdn.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/audisto.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/authlog.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_compute_instances.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_spot_instance.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aws_ecs.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aws_health_events.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aws_instance_health.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aws_quota.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aws_s3_buckets.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/aws_sqs.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/azure_ad_app_passwords.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/azure_application.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/azure_elastic_pool_sql.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/azure_resources.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/azure_service_bus.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/azure_sql.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/bigquery.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/bird_routing_daemon.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/blackbox.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/bobcat_miner_300.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/borg_backup.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/bosh.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/bpftrace_variables.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/bungeecord.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cadvisor.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/celery.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md326
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/checkpoint_device.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/chia.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cilium_agent.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cilium_operator.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cilium_proxy.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cisco_aci.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/citrix_netscaler.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/clamav_daemon.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/clamscan_results.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/clash.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry_firehose.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cloudflare_pcap.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cloudwatch.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/clustercontrol_cmon.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/collectd.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/concourse.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/craftbeerpi.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/crowdsec.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/crypto_exchanges.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cryptowatch.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/custom_exporter.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ddwrt_routers.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_ecs_cluster.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_isilon_cluster.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_xtremio_cluster.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/dell_powermax.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/dependency-track.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/digitalocean.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/discourse.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/dmarc.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/dnsbl.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/dutch_electricity_smart_meter.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/dynatrace.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/elgato_key_light_devices..md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/energomera_smart_power_meters.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/eos.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/etcd.md321
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/excel_spreadsheet.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/fastd.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/fortigate_firewall.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/freebsd_nfs.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/freebsd_rctl-racct.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/freifunk_network.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/fritzbox_network_devices.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/frrouting.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/gcp_gce.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/gcp_quota.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/generic_command_line_output.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/generic_storage_enclosure_tool.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/github_api_rate_limit.md326
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/gitlab_runner.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/gobetween.md321
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/google_cloud_platform.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/google_pagespeed.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/google_stackdriver.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/gpsd.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/grafana.md321
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/graylog_server.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/gtp.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/halon.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/hana.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/hashicorp_vault_secrets.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/hasura_graphql_server.md326
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/hdsentinel.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/helium_hotspot.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/helium_miner_validator.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/hhvm.md326
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/hitron_cgn_series_cpe.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/hitron_coda_cable_modem.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/homebridge.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/homey.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/honeypot.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/hp_ilo.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/huawei_devices.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/hubble.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ibm_aix_systems_njmon.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ibm_mq.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum_virtualize.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ibm_z_hardware_management_console.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/influxdb.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/iota_full_node.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ipmi_by_soundcloud.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/jarvis_standing_desk.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/jenkins.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/jetbrains_floating_license_server.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/jmx.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/jolokia.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/journald.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/kafka.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/kafka_connect.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/kafka_consumer_lag.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/kafka_zookeeper.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/kannel.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/keepalived.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/lagerist_disk_latency.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ldap.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/linode.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/loki.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/lustre_metadata.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/lynis_audit_reports.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/machbase.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/maildir.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/meilisearch.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/meraki_dashboard.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/mesos.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_devices.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_routeros_devices.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/minecraft.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/modbus_protocol.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/mogilefs.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/monnit_sensors_mqtt.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/mosquitto.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/mp707_usb_thermometer.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/mqtt_blackbox.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/mtail.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/naemon.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/nagios.md326
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/nature_remo_e_lite_devices.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/netapp_solidfire.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/netatmo_sensors.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/netflow.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/netmeter.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/new_relic.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/nextcloud_servers.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/nextdns.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/nftables.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/nrpe_daemon.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/nsx-t.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/nvml.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/obs_studio.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/odbc.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/open_vswitch.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/openhab.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/openrc.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/openroadm_devices.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/openstack.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/openvas.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/openweathermap.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/otrs.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/patroni.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/personal_weather_station.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/pgbackrest.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/pgpool-ii.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/philips_hue.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/pimoroni_enviro+.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/pingdom.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/podman.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/powerpal_devices.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/proftpd.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/prometheus_endpoint.md321
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/proxmox_ve.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/radio_thermostat.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/radius.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/rancher.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/raritan_pdu.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/redis_queue.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ripe_atlas.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sabnzbd.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/salicru_eqx_inverter.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sense_energy.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sentry.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/servertech.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/shell_command.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/shelly_humidity_sensor.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sia.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/siemens_s7_plc.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/slurm.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sma_inverters.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/smart_meters_sml.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/smartrg_808ac_cable_modem.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/softether_vpn_server.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/solar_logging_stick.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/solaredge_inverters.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/solis_ginlong_5g_inverters.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sonic_nos.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/spacelift.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/speedify_cli.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sql_database_agnostic.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ssh.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ssl_certificate.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/starlink_spacex.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/statuspage.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/steam.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/storidge.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/stream.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/strongswan.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sunspec_solar_energy.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/suricata.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/synology_activebackup.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/sysload.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/tacacs.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/tado_smart_heating_solution.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/tankerkoenig_api.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/tesla_powerwall.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/tesla_vehicle.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/tesla_wall_connector.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/tp-link_p110.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/traceroute.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/twincat_ads_web_service.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/twitch.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/ubiquiti_ufiber_olt.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/vault_pki.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/vertica.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/vscode.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/warp10.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/xiaomi_mi_flora.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/xmpp_server.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/yourls_url_shortener.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/zerto.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/zulip.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/integrations/zyxel_gs1200-8.md325
-rw-r--r--src/go/plugin/go.d/modules/prometheus/metadata.yaml7866
-rw-r--r--src/go/plugin/go.d/modules/prometheus/prometheus.go144
-rw-r--r--src/go/plugin/go.d/modules/prometheus/prometheus_test.go611
-rw-r--r--src/go/plugin/go.d/modules/prometheus/testdata/config.json42
-rw-r--r--src/go/plugin/go.d/modules/prometheus/testdata/config.yaml33
l---------src/go/plugin/go.d/modules/proxysql/README.md1
-rw-r--r--src/go/plugin/go.d/modules/proxysql/cache.go63
-rw-r--r--src/go/plugin/go.d/modules/proxysql/charts.go726
-rw-r--r--src/go/plugin/go.d/modules/proxysql/collect.go308
-rw-r--r--src/go/plugin/go.d/modules/proxysql/config_schema.json47
-rw-r--r--src/go/plugin/go.d/modules/proxysql/integrations/proxysql.md309
-rw-r--r--src/go/plugin/go.d/modules/proxysql/metadata.yaml430
-rw-r--r--src/go/plugin/go.d/modules/proxysql/proxysql.go114
-rw-r--r--src/go/plugin/go.d/modules/proxysql/proxysql_test.go1240
-rw-r--r--src/go/plugin/go.d/modules/proxysql/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/proxysql/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_memory_metrics.txt21
-rw-r--r--src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_commands_counters.txt56
-rw-r--r--src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_connection_pool .txt11
-rw-r--r--src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_global.txt106
-rw-r--r--src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_users.txt6
-rw-r--r--src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/version.txt5
l---------src/go/plugin/go.d/modules/pulsar/README.md1
-rw-r--r--src/go/plugin/go.d/modules/pulsar/cache.go19
-rw-r--r--src/go/plugin/go.d/modules/pulsar/charts.go664
-rw-r--r--src/go/plugin/go.d/modules/pulsar/collect.go138
-rw-r--r--src/go/plugin/go.d/modules/pulsar/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/pulsar/init.go34
-rw-r--r--src/go/plugin/go.d/modules/pulsar/integrations/apache_pulsar.md314
-rw-r--r--src/go/plugin/go.d/modules/pulsar/metadata.yaml519
-rw-r--r--src/go/plugin/go.d/modules/pulsar/metrics.go116
-rw-r--r--src/go/plugin/go.d/modules/pulsar/pulsar.go137
-rw-r--r--src/go/plugin/go.d/modules/pulsar/pulsar_test.go1024
-rw-r--r--src/go/plugin/go.d/modules/pulsar/testdata/config.json28
-rw-r--r--src/go/plugin/go.d/modules/pulsar/testdata/config.yaml22
-rw-r--r--src/go/plugin/go.d/modules/pulsar/testdata/non-pulsar.txt27
-rw-r--r--src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-namespaces.txt500
-rw-r--r--src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-topics-2.txt748
-rw-r--r--src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-topics.txt748
l---------src/go/plugin/go.d/modules/puppet/README.md1
-rw-r--r--src/go/plugin/go.d/modules/puppet/charts.go93
-rw-r--r--src/go/plugin/go.d/modules/puppet/collect.go75
-rw-r--r--src/go/plugin/go.d/modules/puppet/config_schema.json177
-rw-r--r--src/go/plugin/go.d/modules/puppet/integrations/puppet.md233
-rw-r--r--src/go/plugin/go.d/modules/puppet/metadata.yaml184
-rw-r--r--src/go/plugin/go.d/modules/puppet/puppet.go114
-rw-r--r--src/go/plugin/go.d/modules/puppet/puppet_test.go252
-rw-r--r--src/go/plugin/go.d/modules/puppet/response.go32
-rw-r--r--src/go/plugin/go.d/modules/puppet/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/puppet/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/puppet/testdata/serviceStatusResponse.json497
l---------src/go/plugin/go.d/modules/rabbitmq/README.md1
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/charts.go360
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/collect.go177
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/config_schema.json192
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/integrations/rabbitmq.md300
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/metadata.yaml341
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/metrics.go82
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/rabbitmq.go127
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/rabbitmq_test.go357
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/testdata/config.json21
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/testdata/config.yaml18
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-nodes-node.json453
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-overview.json183
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-queues.json334
-rw-r--r--src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-vhosts.json82
l---------src/go/plugin/go.d/modules/redis/README.md1
-rw-r--r--src/go/plugin/go.d/modules/redis/charts.go383
-rw-r--r--src/go/plugin/go.d/modules/redis/collect.go71
-rw-r--r--src/go/plugin/go.d/modules/redis/collect_info.go258
-rw-r--r--src/go/plugin/go.d/modules/redis/collect_ping_latency.go27
-rw-r--r--src/go/plugin/go.d/modules/redis/config_schema.json123
-rw-r--r--src/go/plugin/go.d/modules/redis/init.go54
-rw-r--r--src/go/plugin/go.d/modules/redis/integrations/redis.md287
-rw-r--r--src/go/plugin/go.d/modules/redis/metadata.yaml343
-rw-r--r--src/go/plugin/go.d/modules/redis/redis.go147
-rw-r--r--src/go/plugin/go.d/modules/redis/redis_test.go418
-rw-r--r--src/go/plugin/go.d/modules/redis/testdata/config.json12
-rw-r--r--src/go/plugin/go.d/modules/redis/testdata/config.yaml10
-rw-r--r--src/go/plugin/go.d/modules/redis/testdata/pika/info_all.txt67
-rw-r--r--src/go/plugin/go.d/modules/redis/testdata/v6.0.9/info_all.txt172
l---------src/go/plugin/go.d/modules/rethinkdb/README.md1
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/charts.go189
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/client.go72
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/collect.go123
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/config_schema.json82
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/integrations/rethinkdb.md257
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/metadata.yaml198
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/rethinkdb.go107
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/rethinkdb_test.go267
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/testdata/config.json7
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/testdata/config.yaml5
-rw-r--r--src/go/plugin/go.d/modules/rethinkdb/testdata/v2.4.4/stats.txt4
l---------src/go/plugin/go.d/modules/riakkv/README.md1
-rw-r--r--src/go/plugin/go.d/modules/riakkv/charts.go461
-rw-r--r--src/go/plugin/go.d/modules/riakkv/collect.go74
-rw-r--r--src/go/plugin/go.d/modules/riakkv/config_schema.json186
-rw-r--r--src/go/plugin/go.d/modules/riakkv/integrations/riak_kv.md283
-rw-r--r--src/go/plugin/go.d/modules/riakkv/metadata.yaml390
-rw-r--r--src/go/plugin/go.d/modules/riakkv/riakkv.go122
-rw-r--r--src/go/plugin/go.d/modules/riakkv/riakkv_test.go265
-rw-r--r--src/go/plugin/go.d/modules/riakkv/stats.go112
-rw-r--r--src/go/plugin/go.d/modules/riakkv/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/riakkv/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/riakkv/testdata/stats.json478
l---------src/go/plugin/go.d/modules/rspamd/README.md1
-rw-r--r--src/go/plugin/go.d/modules/rspamd/charts.go110
-rw-r--r--src/go/plugin/go.d/modules/rspamd/collect.go92
-rw-r--r--src/go/plugin/go.d/modules/rspamd/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/rspamd/integrations/rspamd.md243
-rw-r--r--src/go/plugin/go.d/modules/rspamd/metadata.yaml221
-rw-r--r--src/go/plugin/go.d/modules/rspamd/rspamd.go114
-rw-r--r--src/go/plugin/go.d/modules/rspamd/rspamd_test.go258
-rw-r--r--src/go/plugin/go.d/modules/rspamd/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/rspamd/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/rspamd/testdata/v3.4-stat.json66
l---------src/go/plugin/go.d/modules/scaleio/README.md1
-rw-r--r--src/go/plugin/go.d/modules/scaleio/charts.go465
-rw-r--r--src/go/plugin/go.d/modules/scaleio/client/client.go316
-rw-r--r--src/go/plugin/go.d/modules/scaleio/client/client_test.go142
-rw-r--r--src/go/plugin/go.d/modules/scaleio/client/server.go149
-rw-r--r--src/go/plugin/go.d/modules/scaleio/client/types.go1096
-rw-r--r--src/go/plugin/go.d/modules/scaleio/collect.go58
-rw-r--r--src/go/plugin/go.d/modules/scaleio/collect_sdc.go38
-rw-r--r--src/go/plugin/go.d/modules/scaleio/collect_storage_pool.go41
-rw-r--r--src/go/plugin/go.d/modules/scaleio/collect_system.go250
-rw-r--r--src/go/plugin/go.d/modules/scaleio/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/scaleio/integrations/dell_emc_scaleio.md290
-rw-r--r--src/go/plugin/go.d/modules/scaleio/metadata.yaml399
-rw-r--r--src/go/plugin/go.d/modules/scaleio/metrics.go126
-rw-r--r--src/go/plugin/go.d/modules/scaleio/queries.go111
-rw-r--r--src/go/plugin/go.d/modules/scaleio/scaleio.go129
-rw-r--r--src/go/plugin/go.d/modules/scaleio/scaleio_test.go384
-rw-r--r--src/go/plugin/go.d/modules/scaleio/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/scaleio/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/scaleio/testdata/instances.json1160
-rw-r--r--src/go/plugin/go.d/modules/scaleio/testdata/selected_statistics.json777
l---------src/go/plugin/go.d/modules/sensors/README.md1
-rw-r--r--src/go/plugin/go.d/modules/sensors/charts.go159
-rw-r--r--src/go/plugin/go.d/modules/sensors/collect.go179
-rw-r--r--src/go/plugin/go.d/modules/sensors/config_schema.json47
-rw-r--r--src/go/plugin/go.d/modules/sensors/exec.go41
-rw-r--r--src/go/plugin/go.d/modules/sensors/init.go38
-rw-r--r--src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md215
-rw-r--r--src/go/plugin/go.d/modules/sensors/metadata.yaml157
-rw-r--r--src/go/plugin/go.d/modules/sensors/sensors.go112
-rw-r--r--src/go/plugin/go.d/modules/sensors/sensors_test.go308
-rw-r--r--src/go/plugin/go.d/modules/sensors/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/sensors/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt72
-rw-r--r--src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt81
l---------src/go/plugin/go.d/modules/smartctl/README.md1
-rw-r--r--src/go/plugin/go.d/modules/smartctl/charts.go379
-rw-r--r--src/go/plugin/go.d/modules/smartctl/collect.go214
-rw-r--r--src/go/plugin/go.d/modules/smartctl/config_schema.json140
-rw-r--r--src/go/plugin/go.d/modules/smartctl/exec.go85
-rw-r--r--src/go/plugin/go.d/modules/smartctl/init.go53
-rw-r--r--src/go/plugin/go.d/modules/smartctl/integrations/s.m.a.r.t..md284
-rw-r--r--src/go/plugin/go.d/modules/smartctl/metadata.yaml240
-rw-r--r--src/go/plugin/go.d/modules/smartctl/scan.go119
-rw-r--r--src/go/plugin/go.d/modules/smartctl/smart_device.go119
-rw-r--r--src/go/plugin/go.d/modules/smartctl/smartctl.go148
-rw-r--r--src/go/plugin/go.d/modules/smartctl/smartctl_test.go508
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/config.json14
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/config.yaml9
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/device-nvme0.json112
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/device-nvme1.json113
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/scan.json29
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/type-sat/device-hdd-sda.json601
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/type-sat/device-ssd-sdc.json652
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/type-sat/scan.json35
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/type-scsi/device-sda.json128
-rw-r--r--src/go/plugin/go.d/modules/smartctl/testdata/type-scsi/scan.json29
l---------src/go/plugin/go.d/modules/snmp/README.md1
-rw-r--r--src/go/plugin/go.d/modules/snmp/charts.go309
-rw-r--r--src/go/plugin/go.d/modules/snmp/collect.go395
-rw-r--r--src/go/plugin/go.d/modules/snmp/config.go52
-rw-r--r--src/go/plugin/go.d/modules/snmp/config_schema.json422
-rw-r--r--src/go/plugin/go.d/modules/snmp/init.go175
-rw-r--r--src/go/plugin/go.d/modules/snmp/integrations/snmp_devices.md496
-rw-r--r--src/go/plugin/go.d/modules/snmp/metadata.yaml496
-rw-r--r--src/go/plugin/go.d/modules/snmp/netif.go412
-rw-r--r--src/go/plugin/go.d/modules/snmp/snmp.go155
-rw-r--r--src/go/plugin/go.d/modules/snmp/snmp_test.go754
-rw-r--r--src/go/plugin/go.d/modules/snmp/testdata/config.json47
-rw-r--r--src/go/plugin/go.d/modules/snmp/testdata/config.yaml35
l---------src/go/plugin/go.d/modules/squid/README.md1
-rw-r--r--src/go/plugin/go.d/modules/squid/charts.go81
-rw-r--r--src/go/plugin/go.d/modules/squid/collect.go105
-rw-r--r--src/go/plugin/go.d/modules/squid/config_schema.json177
-rw-r--r--src/go/plugin/go.d/modules/squid/integrations/squid.md227
-rw-r--r--src/go/plugin/go.d/modules/squid/metadata.yaml195
-rw-r--r--src/go/plugin/go.d/modules/squid/squid.go114
-rw-r--r--src/go/plugin/go.d/modules/squid/squid_test.go223
-rw-r--r--src/go/plugin/go.d/modules/squid/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/squid/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/squid/testdata/counters.txt59
l---------src/go/plugin/go.d/modules/squidlog/README.md1
-rw-r--r--src/go/plugin/go.d/modules/squidlog/charts.go368
-rw-r--r--src/go/plugin/go.d/modules/squidlog/collect.go360
-rw-r--r--src/go/plugin/go.d/modules/squidlog/config_schema.json217
-rw-r--r--src/go/plugin/go.d/modules/squidlog/init.go93
-rw-r--r--src/go/plugin/go.d/modules/squidlog/integrations/squid_log_files.md284
-rw-r--r--src/go/plugin/go.d/modules/squidlog/logline.go413
-rw-r--r--src/go/plugin/go.d/modules/squidlog/logline_test.go479
-rw-r--r--src/go/plugin/go.d/modules/squidlog/metadata.yaml315
-rw-r--r--src/go/plugin/go.d/modules/squidlog/metrics.go93
-rw-r--r--src/go/plugin/go.d/modules/squidlog/squidlog.go112
-rw-r--r--src/go/plugin/go.d/modules/squidlog/squidlog_test.go348
-rw-r--r--src/go/plugin/go.d/modules/squidlog/testdata/access.log500
-rw-r--r--src/go/plugin/go.d/modules/squidlog/testdata/config.json27
-rw-r--r--src/go/plugin/go.d/modules/squidlog/testdata/config.yaml19
-rw-r--r--src/go/plugin/go.d/modules/squidlog/testdata/unknown.log1
l---------src/go/plugin/go.d/modules/storcli/README.md1
-rw-r--r--src/go/plugin/go.d/modules/storcli/charts.go241
-rw-r--r--src/go/plugin/go.d/modules/storcli/collect.go45
-rw-r--r--src/go/plugin/go.d/modules/storcli/collect_controllers.go154
-rw-r--r--src/go/plugin/go.d/modules/storcli/collect_drives.go237
-rw-r--r--src/go/plugin/go.d/modules/storcli/config_schema.json35
-rw-r--r--src/go/plugin/go.d/modules/storcli/exec.go50
-rw-r--r--src/go/plugin/go.d/modules/storcli/init.go23
-rw-r--r--src/go/plugin/go.d/modules/storcli/integrations/storecli_raid.md252
-rw-r--r--src/go/plugin/go.d/modules/storcli/metadata.yaml194
-rw-r--r--src/go/plugin/go.d/modules/storcli/storcli.go110
-rw-r--r--src/go/plugin/go.d/modules/storcli/storcli_test.go309
-rw-r--r--src/go/plugin/go.d/modules/storcli/testdata/config.json4
-rw-r--r--src/go/plugin/go.d/modules/storcli/testdata/config.yaml2
-rw-r--r--src/go/plugin/go.d/modules/storcli/testdata/megaraid-controllers-info.json687
-rw-r--r--src/go/plugin/go.d/modules/storcli/testdata/megaraid-drives-info.json495
-rw-r--r--src/go/plugin/go.d/modules/storcli/testdata/mpt3sas-controllers-info.json2260
l---------src/go/plugin/go.d/modules/supervisord/README.md1
-rw-r--r--src/go/plugin/go.d/modules/supervisord/charts.go94
-rw-r--r--src/go/plugin/go.d/modules/supervisord/client.go109
-rw-r--r--src/go/plugin/go.d/modules/supervisord/collect.go174
-rw-r--r--src/go/plugin/go.d/modules/supervisord/config_schema.json87
-rw-r--r--src/go/plugin/go.d/modules/supervisord/init.go30
-rw-r--r--src/go/plugin/go.d/modules/supervisord/integrations/supervisor.md249
-rw-r--r--src/go/plugin/go.d/modules/supervisord/metadata.yaml161
-rw-r--r--src/go/plugin/go.d/modules/supervisord/supervisord.go115
-rw-r--r--src/go/plugin/go.d/modules/supervisord/supervisord_test.go277
-rw-r--r--src/go/plugin/go.d/modules/supervisord/testdata/config.json11
-rw-r--r--src/go/plugin/go.d/modules/supervisord/testdata/config.yaml9
l---------src/go/plugin/go.d/modules/systemdunits/README.md1
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/charts.go118
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/client.go34
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/collect.go88
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/collect_unit_files.go94
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/collect_units.go187
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/config_schema.json122
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/doc.go4
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/init.go29
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/integrations/systemd_units.md324
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/metadata.yaml344
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/systemdunits.go139
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/systemdunits_test.go1156
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/testdata/config.json13
-rw-r--r--src/go/plugin/go.d/modules/systemdunits/testdata/config.yaml9
l---------src/go/plugin/go.d/modules/tengine/README.md1
-rw-r--r--src/go/plugin/go.d/modules/tengine/apiclient.go247
-rw-r--r--src/go/plugin/go.d/modules/tengine/charts.go118
-rw-r--r--src/go/plugin/go.d/modules/tengine/collect.go22
-rw-r--r--src/go/plugin/go.d/modules/tengine/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/tengine/integrations/tengine.md267
-rw-r--r--src/go/plugin/go.d/modules/tengine/metadata.yaml245
-rw-r--r--src/go/plugin/go.d/modules/tengine/metrics.go75
-rw-r--r--src/go/plugin/go.d/modules/tengine/tengine.go110
-rw-r--r--src/go/plugin/go.d/modules/tengine/tengine_test.go147
-rw-r--r--src/go/plugin/go.d/modules/tengine/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/tengine/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/tengine/testdata/status.txt3
l---------src/go/plugin/go.d/modules/tomcat/README.md1
-rw-r--r--src/go/plugin/go.d/modules/tomcat/charts.go196
-rw-r--r--src/go/plugin/go.d/modules/tomcat/collect.go130
-rw-r--r--src/go/plugin/go.d/modules/tomcat/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/tomcat/init.go21
-rw-r--r--src/go/plugin/go.d/modules/tomcat/integrations/tomcat.md275
-rw-r--r--src/go/plugin/go.d/modules/tomcat/metadata.yaml241
-rw-r--r--src/go/plugin/go.d/modules/tomcat/status_response.go51
-rw-r--r--src/go/plugin/go.d/modules/tomcat/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/tomcat/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/tomcat/testdata/server_status.xml54
-rw-r--r--src/go/plugin/go.d/modules/tomcat/tomcat.go120
-rw-r--r--src/go/plugin/go.d/modules/tomcat/tomcat_test.go272
l---------src/go/plugin/go.d/modules/tor/README.md1
-rw-r--r--src/go/plugin/go.d/modules/tor/charts.go43
-rw-r--r--src/go/plugin/go.d/modules/tor/client.go117
-rw-r--r--src/go/plugin/go.d/modules/tor/collect.go65
-rw-r--r--src/go/plugin/go.d/modules/tor/config_schema.json53
-rw-r--r--src/go/plugin/go.d/modules/tor/integrations/tor.md225
-rw-r--r--src/go/plugin/go.d/modules/tor/metadata.yaml135
-rw-r--r--src/go/plugin/go.d/modules/tor/testdata/config.json6
-rw-r--r--src/go/plugin/go.d/modules/tor/testdata/config.yaml4
-rw-r--r--src/go/plugin/go.d/modules/tor/tor.go102
-rw-r--r--src/go/plugin/go.d/modules/tor/tor_test.go328
l---------src/go/plugin/go.d/modules/traefik/README.md1
-rw-r--r--src/go/plugin/go.d/modules/traefik/charts.go73
-rw-r--r--src/go/plugin/go.d/modules/traefik/collect.go214
-rw-r--r--src/go/plugin/go.d/modules/traefik/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/traefik/init.go37
-rw-r--r--src/go/plugin/go.d/modules/traefik/integrations/traefik.md246
-rw-r--r--src/go/plugin/go.d/modules/traefik/metadata.yaml196
-rw-r--r--src/go/plugin/go.d/modules/traefik/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/traefik/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/traefik/testdata/v2.2.1/metrics.txt1170
-rw-r--r--src/go/plugin/go.d/modules/traefik/traefik.go130
-rw-r--r--src/go/plugin/go.d/modules/traefik/traefik_test.go370
l---------src/go/plugin/go.d/modules/unbound/README.md1
-rw-r--r--src/go/plugin/go.d/modules/unbound/charts.go527
-rw-r--r--src/go/plugin/go.d/modules/unbound/collect.go209
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/config.go78
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/config_test.go172
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/parse.go165
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/parse_test.go93
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/infinite_rec.conf85
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/non_existent_glob_include.conf85
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/non_existent_include.conf85
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob.conf82
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob2.conf80
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob3.conf81
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/valid_include.conf82
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/valid_include2.conf81
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/valid_include3.conf81
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel.conf82
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel2.conf81
-rw-r--r--src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel3.conf81
-rw-r--r--src/go/plugin/go.d/modules/unbound/config_schema.json113
-rw-r--r--src/go/plugin/go.d/modules/unbound/init.go106
-rw-r--r--src/go/plugin/go.d/modules/unbound/integrations/unbound.md305
-rw-r--r--src/go/plugin/go.d/modules/unbound/metadata.yaml431
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/config.json12
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/config.yaml10
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/stats/common.txt66
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/stats/extended.txt162
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended1.txt162
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended2.txt162
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended3.txt163
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended1.txt163
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended2.txt156
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended3.txt163
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/unbound.conf85
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/unbound_disabled.conf85
-rw-r--r--src/go/plugin/go.d/modules/unbound/testdata/unbound_empty.conf85
-rw-r--r--src/go/plugin/go.d/modules/unbound/unbound.go126
-rw-r--r--src/go/plugin/go.d/modules/unbound/unbound_test.go1288
l---------src/go/plugin/go.d/modules/upsd/README.md1
-rw-r--r--src/go/plugin/go.d/modules/upsd/charts.go399
-rw-r--r--src/go/plugin/go.d/modules/upsd/client.go168
-rw-r--r--src/go/plugin/go.d/modules/upsd/collect.go180
-rw-r--r--src/go/plugin/go.d/modules/upsd/config_schema.json85
-rw-r--r--src/go/plugin/go.d/modules/upsd/integrations/ups_nut.md246
-rw-r--r--src/go/plugin/go.d/modules/upsd/metadata.yaml264
-rw-r--r--src/go/plugin/go.d/modules/upsd/testdata/config.json7
-rw-r--r--src/go/plugin/go.d/modules/upsd/testdata/config.yaml5
-rw-r--r--src/go/plugin/go.d/modules/upsd/upsd.go115
-rw-r--r--src/go/plugin/go.d/modules/upsd/upsd_test.go446
-rw-r--r--src/go/plugin/go.d/modules/upsd/variables.go39
l---------src/go/plugin/go.d/modules/uwsgi/README.md1
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/charts.go275
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/client.go64
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/collect.go128
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/config_schema.json44
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/init.go3
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/integrations/uwsgi.md248
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/metadata.yaml215
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/testdata/stats.json117
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/testdata/stats_no_workers.json49
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/uwsgi.go98
-rw-r--r--src/go/plugin/go.d/modules/uwsgi/uwsgi_test.go325
l---------src/go/plugin/go.d/modules/vcsa/README.md1
-rw-r--r--src/go/plugin/go.d/modules/vcsa/charts.go138
-rw-r--r--src/go/plugin/go.d/modules/vcsa/client/client.go213
-rw-r--r--src/go/plugin/go.d/modules/vcsa/client/client_test.go288
-rw-r--r--src/go/plugin/go.d/modules/vcsa/collect.go95
-rw-r--r--src/go/plugin/go.d/modules/vcsa/config_schema.json186
-rw-r--r--src/go/plugin/go.d/modules/vcsa/init.go29
-rw-r--r--src/go/plugin/go.d/modules/vcsa/integrations/vcenter_server_appliance.md292
-rw-r--r--src/go/plugin/go.d/modules/vcsa/metadata.yaml346
-rw-r--r--src/go/plugin/go.d/modules/vcsa/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/vcsa/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/vcsa/vcsa.go138
-rw-r--r--src/go/plugin/go.d/modules/vcsa/vcsa_test.go304
l---------src/go/plugin/go.d/modules/vernemq/README.md1
-rw-r--r--src/go/plugin/go.d/modules/vernemq/charts.go911
-rw-r--r--src/go/plugin/go.d/modules/vernemq/collect.go288
-rw-r--r--src/go/plugin/go.d/modules/vernemq/config_schema.json183
-rw-r--r--src/go/plugin/go.d/modules/vernemq/init.go26
-rw-r--r--src/go/plugin/go.d/modules/vernemq/integrations/vernemq.md332
-rw-r--r--src/go/plugin/go.d/modules/vernemq/metadata.yaml670
-rw-r--r--src/go/plugin/go.d/modules/vernemq/metrics.go150
-rw-r--r--src/go/plugin/go.d/modules/vernemq/testdata/config.json20
-rw-r--r--src/go/plugin/go.d/modules/vernemq/testdata/config.yaml17
-rw-r--r--src/go/plugin/go.d/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt416
-rw-r--r--src/go/plugin/go.d/modules/vernemq/testdata/non_vernemq.txt27
-rw-r--r--src/go/plugin/go.d/modules/vernemq/vernemq.go113
-rw-r--r--src/go/plugin/go.d/modules/vernemq/vernemq_test.go578
l---------src/go/plugin/go.d/modules/vsphere/README.md1
-rw-r--r--src/go/plugin/go.d/modules/vsphere/charts.go506
-rw-r--r--src/go/plugin/go.d/modules/vsphere/client/client.go180
-rw-r--r--src/go/plugin/go.d/modules/vsphere/client/client_test.go175
-rw-r--r--src/go/plugin/go.d/modules/vsphere/client/keepalive.go45
-rw-r--r--src/go/plugin/go.d/modules/vsphere/collect.go132
-rw-r--r--src/go/plugin/go.d/modules/vsphere/config_schema.json252
-rw-r--r--src/go/plugin/go.d/modules/vsphere/discover.go31
-rw-r--r--src/go/plugin/go.d/modules/vsphere/discover/build.go180
-rw-r--r--src/go/plugin/go.d/modules/vsphere/discover/discover.go163
-rw-r--r--src/go/plugin/go.d/modules/vsphere/discover/discover_test.go179
-rw-r--r--src/go/plugin/go.d/modules/vsphere/discover/filter.go60
-rw-r--r--src/go/plugin/go.d/modules/vsphere/discover/hierarchy.go100
-rw-r--r--src/go/plugin/go.d/modules/vsphere/discover/metric_lists.go135
-rw-r--r--src/go/plugin/go.d/modules/vsphere/init.go66
-rw-r--r--src/go/plugin/go.d/modules/vsphere/integrations/vmware_vcenter_server.md357
-rw-r--r--src/go/plugin/go.d/modules/vsphere/match/match.go233
-rw-r--r--src/go/plugin/go.d/modules/vsphere/match/match_test.go287
-rw-r--r--src/go/plugin/go.d/modules/vsphere/metadata.yaml439
-rw-r--r--src/go/plugin/go.d/modules/vsphere/metrics.txt328
-rw-r--r--src/go/plugin/go.d/modules/vsphere/resources/resources.go137
-rw-r--r--src/go/plugin/go.d/modules/vsphere/scrape/scrape.go159
-rw-r--r--src/go/plugin/go.d/modules/vsphere/scrape/scrape_test.go70
-rw-r--r--src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller.go33
-rw-r--r--src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller_test.go42
-rw-r--r--src/go/plugin/go.d/modules/vsphere/task.go61
-rw-r--r--src/go/plugin/go.d/modules/vsphere/task_test.go41
-rw-r--r--src/go/plugin/go.d/modules/vsphere/testdata/config.json27
-rw-r--r--src/go/plugin/go.d/modules/vsphere/testdata/config.yaml22
-rw-r--r--src/go/plugin/go.d/modules/vsphere/vsphere.go144
-rw-r--r--src/go/plugin/go.d/modules/vsphere/vsphere_test.go489
l---------src/go/plugin/go.d/modules/weblog/README.md1
-rw-r--r--src/go/plugin/go.d/modules/weblog/charts.go890
-rw-r--r--src/go/plugin/go.d/modules/weblog/collect.go564
-rw-r--r--src/go/plugin/go.d/modules/weblog/config_schema.json453
-rw-r--r--src/go/plugin/go.d/modules/weblog/init.go197
-rw-r--r--src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md405
-rw-r--r--src/go/plugin/go.d/modules/weblog/logline.go617
-rw-r--r--src/go/plugin/go.d/modules/weblog/logline_test.go669
-rw-r--r--src/go/plugin/go.d/modules/weblog/metadata.yaml525
-rw-r--r--src/go/plugin/go.d/modules/weblog/metrics.go188
-rw-r--r--src/go/plugin/go.d/modules/weblog/parser.go167
-rw-r--r--src/go/plugin/go.d/modules/weblog/parser_test.go224
-rw-r--r--src/go/plugin/go.d/modules/weblog/testdata/common.log500
-rw-r--r--src/go/plugin/go.d/modules/weblog/testdata/config.json64
-rw-r--r--src/go/plugin/go.d/modules/weblog/testdata/config.yaml39
-rw-r--r--src/go/plugin/go.d/modules/weblog/testdata/custom.log100
-rw-r--r--src/go/plugin/go.d/modules/weblog/testdata/custom_time_fields.log72
-rw-r--r--src/go/plugin/go.d/modules/weblog/testdata/full.log500
-rw-r--r--src/go/plugin/go.d/modules/weblog/testdata/u_ex221107.log168
-rw-r--r--src/go/plugin/go.d/modules/weblog/weblog.go168
-rw-r--r--src/go/plugin/go.d/modules/weblog/weblog_test.go1502
l---------src/go/plugin/go.d/modules/whoisquery/README.md1
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/charts.go23
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/collect.go23
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/config_schema.json60
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/init.go32
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/integrations/domain_expiration_date.md222
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/metadata.yaml125
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/provider.go95
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/testdata/config.json7
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/testdata/config.yaml5
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/whoisquery.go105
-rw-r--r--src/go/plugin/go.d/modules/whoisquery/whoisquery_test.go149
l---------src/go/plugin/go.d/modules/windows/README.md1
-rw-r--r--src/go/plugin/go.d/modules/windows/charts.go4933
-rw-r--r--src/go/plugin/go.d/modules/windows/collect.go163
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_ad.go100
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_adcs.go70
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_adfs.go119
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_collector.go46
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_cpu.go78
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_exchange.go244
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_hyperv.go288
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_iis.go140
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_logical_disk.go95
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_logon.go24
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_memory.go78
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_mssql.go259
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_net.go90
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_netframework.go531
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_os.go47
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_process.go95
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_service.go56
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_system.go29
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_tcp.go65
-rw-r--r--src/go/plugin/go.d/modules/windows/collect_thermalzone.go45
-rw-r--r--src/go/plugin/go.d/modules/windows/config_schema.json190
-rw-r--r--src/go/plugin/go.d/modules/windows/init.go25
-rw-r--r--src/go/plugin/go.d/modules/windows/integrations/active_directory.md843
-rw-r--r--src/go/plugin/go.d/modules/windows/integrations/hyperv.md843
-rw-r--r--src/go/plugin/go.d/modules/windows/integrations/ms_exchange.md843
-rw-r--r--src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md843
-rw-r--r--src/go/plugin/go.d/modules/windows/integrations/net_framework.md843
-rw-r--r--src/go/plugin/go.d/modules/windows/integrations/windows.md843
-rw-r--r--src/go/plugin/go.d/modules/windows/metadata.yaml2172
-rw-r--r--src/go/plugin/go.d/modules/windows/testdata/config.json21
-rw-r--r--src/go/plugin/go.d/modules/windows/testdata/config.yaml18
-rw-r--r--src/go/plugin/go.d/modules/windows/testdata/v0.20.0/metrics.txt3129
-rw-r--r--src/go/plugin/go.d/modules/windows/windows.go171
-rw-r--r--src/go/plugin/go.d/modules/windows/windows_test.go1100
l---------src/go/plugin/go.d/modules/wireguard/README.md1
-rw-r--r--src/go/plugin/go.d/modules/wireguard/charts.go152
-rw-r--r--src/go/plugin/go.d/modules/wireguard/collect.go109
-rw-r--r--src/go/plugin/go.d/modules/wireguard/config_schema.json25
-rw-r--r--src/go/plugin/go.d/modules/wireguard/integrations/wireguard.md204
-rw-r--r--src/go/plugin/go.d/modules/wireguard/metadata.yaml121
-rw-r--r--src/go/plugin/go.d/modules/wireguard/testdata/config.json3
-rw-r--r--src/go/plugin/go.d/modules/wireguard/testdata/config.yaml1
-rw-r--r--src/go/plugin/go.d/modules/wireguard/wireguard.go106
-rw-r--r--src/go/plugin/go.d/modules/wireguard/wireguard_test.go509
l---------src/go/plugin/go.d/modules/x509check/README.md1
-rw-r--r--src/go/plugin/go.d/modules/x509check/charts.go43
-rw-r--r--src/go/plugin/go.d/modules/x509check/collect.go58
-rw-r--r--src/go/plugin/go.d/modules/x509check/config_schema.json114
-rw-r--r--src/go/plugin/go.d/modules/x509check/init.go38
-rw-r--r--src/go/plugin/go.d/modules/x509check/integrations/x.509_certificate.md260
-rw-r--r--src/go/plugin/go.d/modules/x509check/metadata.yaml172
-rw-r--r--src/go/plugin/go.d/modules/x509check/provider.go131
-rw-r--r--src/go/plugin/go.d/modules/x509check/testdata/config.json12
-rw-r--r--src/go/plugin/go.d/modules/x509check/testdata/config.yaml10
-rw-r--r--src/go/plugin/go.d/modules/x509check/x509check.go111
-rw-r--r--src/go/plugin/go.d/modules/x509check/x509check_test.go177
l---------src/go/plugin/go.d/modules/zfspool/README.md1
-rw-r--r--src/go/plugin/go.d/modules/zfspool/charts.go175
-rw-r--r--src/go/plugin/go.d/modules/zfspool/collect.go27
-rw-r--r--src/go/plugin/go.d/modules/zfspool/collect_zpool_list.go160
-rw-r--r--src/go/plugin/go.d/modules/zfspool/collect_zpool_list_vdev.go138
-rw-r--r--src/go/plugin/go.d/modules/zfspool/config_schema.json47
-rw-r--r--src/go/plugin/go.d/modules/zfspool/exec.go56
-rw-r--r--src/go/plugin/go.d/modules/zfspool/init.go38
-rw-r--r--src/go/plugin/go.d/modules/zfspool/integrations/zfs_pools.md222
-rw-r--r--src/go/plugin/go.d/modules/zfspool/metadata.yaml162
-rw-r--r--src/go/plugin/go.d/modules/zfspool/testdata/config.json5
-rw-r--r--src/go/plugin/go.d/modules/zfspool/testdata/config.yaml3
-rw-r--r--src/go/plugin/go.d/modules/zfspool/testdata/zpool-list-vdev-logs-cache.txt12
-rw-r--r--src/go/plugin/go.d/modules/zfspool/testdata/zpool-list-vdev.txt5
-rw-r--r--src/go/plugin/go.d/modules/zfspool/testdata/zpool-list.txt3
-rw-r--r--src/go/plugin/go.d/modules/zfspool/zfspool.go115
-rw-r--r--src/go/plugin/go.d/modules/zfspool/zfspool_test.go546
l---------src/go/plugin/go.d/modules/zookeeper/README.md1
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/charts.go111
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/collect.go79
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/config_schema.json95
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/fetcher.go74
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/fetcher_test.go49
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/init.go41
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/integrations/zookeeper.md250
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/metadata.yaml202
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/testdata/config.json10
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/testdata/config.yaml8
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/testdata/mntr.txt416
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/testdata/mntr_notinwhitelist.txt1
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/zookeeper.go103
-rw-r--r--src/go/plugin/go.d/modules/zookeeper/zookeeper_test.go174
-rw-r--r--src/go/plugin/go.d/pkg/README.md22
-rw-r--r--src/go/plugin/go.d/pkg/dockerhost/dockerhost.go23
-rw-r--r--src/go/plugin/go.d/pkg/iprange/README.md37
-rw-r--r--src/go/plugin/go.d/pkg/iprange/parse.go138
-rw-r--r--src/go/plugin/go.d/pkg/iprange/parse_test.go258
-rw-r--r--src/go/plugin/go.d/pkg/iprange/pool.go40
-rw-r--r--src/go/plugin/go.d/pkg/iprange/pool_test.go104
-rw-r--r--src/go/plugin/go.d/pkg/iprange/range.go100
-rw-r--r--src/go/plugin/go.d/pkg/iprange/range_test.go200
-rw-r--r--src/go/plugin/go.d/pkg/k8sclient/k8sclient.go71
-rw-r--r--src/go/plugin/go.d/pkg/logs/csv.go195
-rw-r--r--src/go/plugin/go.d/pkg/logs/csv_test.go175
-rw-r--r--src/go/plugin/go.d/pkg/logs/json.go140
-rw-r--r--src/go/plugin/go.d/pkg/logs/json_test.go224
-rw-r--r--src/go/plugin/go.d/pkg/logs/lastline.go65
-rw-r--r--src/go/plugin/go.d/pkg/logs/lastline_test.go54
-rw-r--r--src/go/plugin/go.d/pkg/logs/ltsv.go95
-rw-r--r--src/go/plugin/go.d/pkg/logs/ltsv_test.go125
-rw-r--r--src/go/plugin/go.d/pkg/logs/parser.go65
-rw-r--r--src/go/plugin/go.d/pkg/logs/parser_test.go3
-rw-r--r--src/go/plugin/go.d/pkg/logs/reader.go193
-rw-r--r--src/go/plugin/go.d/pkg/logs/reader_test.go245
-rw-r--r--src/go/plugin/go.d/pkg/logs/regexp.go76
-rw-r--r--src/go/plugin/go.d/pkg/logs/regexp_test.go131
-rw-r--r--src/go/plugin/go.d/pkg/matcher/README.md142
-rw-r--r--src/go/plugin/go.d/pkg/matcher/cache.go56
-rw-r--r--src/go/plugin/go.d/pkg/matcher/cache_test.go53
-rw-r--r--src/go/plugin/go.d/pkg/matcher/doc.go40
-rw-r--r--src/go/plugin/go.d/pkg/matcher/doc_test.go49
-rw-r--r--src/go/plugin/go.d/pkg/matcher/expr.go62
-rw-r--r--src/go/plugin/go.d/pkg/matcher/expr_test.go100
-rw-r--r--src/go/plugin/go.d/pkg/matcher/glob.go265
-rw-r--r--src/go/plugin/go.d/pkg/matcher/glob_test.go97
-rw-r--r--src/go/plugin/go.d/pkg/matcher/logical.go101
-rw-r--r--src/go/plugin/go.d/pkg/matcher/logical_test.go97
-rw-r--r--src/go/plugin/go.d/pkg/matcher/matcher.go149
-rw-r--r--src/go/plugin/go.d/pkg/matcher/matcher_test.go122
-rw-r--r--src/go/plugin/go.d/pkg/matcher/regexp.go60
-rw-r--r--src/go/plugin/go.d/pkg/matcher/regexp_test.go66
-rw-r--r--src/go/plugin/go.d/pkg/matcher/simple_patterns.go65
-rw-r--r--src/go/plugin/go.d/pkg/matcher/simple_patterns_test.go88
-rw-r--r--src/go/plugin/go.d/pkg/matcher/string.go48
-rw-r--r--src/go/plugin/go.d/pkg/matcher/string_test.go62
-rw-r--r--src/go/plugin/go.d/pkg/metrics/counter.go93
-rw-r--r--src/go/plugin/go.d/pkg/metrics/counter_test.go105
-rw-r--r--src/go/plugin/go.d/pkg/metrics/gauge.go103
-rw-r--r--src/go/plugin/go.d/pkg/metrics/gauge_test.go129
-rw-r--r--src/go/plugin/go.d/pkg/metrics/histogram.go171
-rw-r--r--src/go/plugin/go.d/pkg/metrics/histogram_test.go136
-rw-r--r--src/go/plugin/go.d/pkg/metrics/metrics.go12
-rw-r--r--src/go/plugin/go.d/pkg/metrics/summary.go125
-rw-r--r--src/go/plugin/go.d/pkg/metrics/summary_test.go78
-rw-r--r--src/go/plugin/go.d/pkg/metrics/unique_counter.go109
-rw-r--r--src/go/plugin/go.d/pkg/metrics/unique_counter_test.go145
-rw-r--r--src/go/plugin/go.d/pkg/multipath/multipath.go90
-rw-r--r--src/go/plugin/go.d/pkg/multipath/multipath_test.go60
-rw-r--r--src/go/plugin/go.d/pkg/multipath/testdata/data1/test-empty.conf0
-rw-r--r--src/go/plugin/go.d/pkg/multipath/testdata/data1/test.conf1
-rw-r--r--src/go/plugin/go.d/pkg/multipath/testdata/data2/test-empty.conf0
-rw-r--r--src/go/plugin/go.d/pkg/multipath/testdata/data2/test.conf1
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/client.go155
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/client_test.go137
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/metric_family.go116
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/metric_family_test.go356
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/metric_series.go110
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/metric_series_test.go140
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/parse.go414
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/parse_test.go1675
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/selector/README.md102
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/selector/expr.go62
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/selector/expr_test.go231
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/selector/logical.go49
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/selector/logical_test.go226
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/selector/parse.go97
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/selector/parse_test.go117
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/selector/selector.go52
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/selector/selector_test.go11
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/counter-meta.txt11
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/counter-no-meta.txt8
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/gauge-meta.txt11
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/gauge-no-meta.txt8
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/histogram-meta.txt43
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/histogram-no-meta.txt40
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/multiline-help.txt3
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/summary-meta.txt43
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/summary-no-meta.txt40
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/testdata.nometa.txt410
-rw-r--r--src/go/plugin/go.d/pkg/prometheus/testdata/testdata.txt528
-rw-r--r--src/go/plugin/go.d/pkg/socket/client.go106
-rw-r--r--src/go/plugin/go.d/pkg/socket/client_test.go163
-rw-r--r--src/go/plugin/go.d/pkg/socket/servers_test.go139
-rw-r--r--src/go/plugin/go.d/pkg/socket/types.go41
-rw-r--r--src/go/plugin/go.d/pkg/socket/utils.go25
-rw-r--r--src/go/plugin/go.d/pkg/stm/stm.go172
-rw-r--r--src/go/plugin/go.d/pkg/stm/stm_test.go415
-rw-r--r--src/go/plugin/go.d/pkg/tlscfg/config.go77
-rw-r--r--src/go/plugin/go.d/pkg/tlscfg/config_test.go10
-rw-r--r--src/go/plugin/go.d/pkg/web/client.go80
-rw-r--r--src/go/plugin/go.d/pkg/web/client_test.go23
-rw-r--r--src/go/plugin/go.d/pkg/web/doc.go9
-rw-r--r--src/go/plugin/go.d/pkg/web/doc_test.go15
-rw-r--r--src/go/plugin/go.d/pkg/web/duration.go72
-rw-r--r--src/go/plugin/go.d/pkg/web/duration_test.go114
-rw-r--r--src/go/plugin/go.d/pkg/web/request.go105
-rw-r--r--src/go/plugin/go.d/pkg/web/request_test.go208
-rw-r--r--src/go/plugin/go.d/pkg/web/web.go11
2374 files changed, 424974 insertions, 0 deletions
diff --git a/src/go/plugin/go.d/README.md b/src/go/plugin/go.d/README.md
new file mode 100644
index 000000000..28f046ab9
--- /dev/null
+++ b/src/go/plugin/go.d/README.md
@@ -0,0 +1,244 @@
+<!--
+title: go.d.plugin
+description: "go.d.plugin is an external plugin for Netdata, responsible for running individual data collectors written in Go."
+custom_edit_url: "/src/go/plugin/go.d/README.md"
+sidebar_label: "go.d.plugin"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Developers/External plugins/go.d.plugin"
+sidebar_position: 1
+-->
+
+# go.d.plugin
+
+`go.d.plugin` is a [Netdata](https://github.com/netdata/netdata) external plugin. It is an **orchestrator** for data
+collection modules written in `go`.
+
+1. It runs as an independent process (`ps fax` shows it).
+2. It is started and stopped automatically by Netdata.
+3. It communicates with Netdata via a unidirectional pipe (sending data to the Netdata daemon).
+4. Supports any number of data collection modules.
+5. Allows each module to have any number of data collection jobs.
+
+## Bug reports, feature requests, and questions
+
+Are welcome! We are using [netdata/netdata](https://github.com/netdata/netdata/) repository for bugs, feature requests,
+and questions.
+
+- [GitHub Issues](https://github.com/netdata/netdata/issues/new/choose): report bugs or open a new feature request.
+- [GitHub Discussions](https://github.com/netdata/netdata/discussions): ask a question or suggest a new idea.
+
+## Install
+
+Go.d.plugin is shipped with Netdata.
+
+### Required Linux capabilities
+
+All capabilities are set automatically during Netdata installation using
+the [official installation method](/packaging/installer/methods/kickstart.md).
+No further action required. If you have used a different installation method and need to set the capabilities manually,
+see the appropriate collector readme.
+
+| Capability | Required by |
+|:--------------------|:-------------------------------------------------------------------------------------------------------:|
+| CAP_NET_RAW | [Ping](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ping#readme) |
+| CAP_NET_ADMIN | [Wireguard](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/wireguard#readme) |
+| CAP_DAC_READ_SEARCH | [Filecheck](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/filecheck#readme) |
+
+## Available modules
+
+| Name | Monitors |
+|:-------------------------------------------------------------------------------------------------------------------|:-----------------------------:|
+| [adaptec_raid](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/adaptecraid) | Adaptec Hardware RAID |
+| [activemq](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/activemq) | ActiveMQ |
+| [ap](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ap) | Wireless AP |
+| [apache](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/apache) | Apache |
+| [beanstalk](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/beanstalk) | Beanstalk |
+| [bind](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/bind) | ISC Bind |
+| [cassandra](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/cassandra) | Cassandra |
+| [chrony](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/chrony) | Chrony |
+| [clickhouse](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/clickhouse) | ClickHouse |
+| [cockroachdb](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/cockroachdb) | CockroachDB |
+| [consul](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/consul) | Consul |
+| [coredns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/coredns) | CoreDNS |
+| [couchbase](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/couchbase) | Couchbase |
+| [couchdb](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/couchdb) | CouchDB |
+| [dmcache](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dmcache) | DMCache |
+| [dnsdist](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dnsdist) | Dnsdist |
+| [dnsmasq](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dnsmasq) | Dnsmasq DNS Forwarder |
+| [dnsmasq_dhcp](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dnsmasq_dhcp) | Dnsmasq DHCP |
+| [dns_query](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dnsquery) | DNS Query RTT |
+| [docker](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/docker) | Docker Engine |
+| [docker_engine](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/docker_engine) | Docker Engine |
+| [dockerhub](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dockerhub) | Docker Hub |
+| [dovecot](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dovecot) | Dovecot |
+| [elasticsearch](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/elasticsearch) | Elasticsearch/OpenSearch |
+| [envoy](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/envoy) | Envoy |
+| [example](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/example) | - |
+| [exim](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/exim) | Exim |
+| [fail2ban](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/fail2ban) | Fail2Ban Jails |
+| [filecheck](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/filecheck) | Files and Directories |
+| [fluentd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/fluentd) | Fluentd |
+| [freeradius](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/freeradius) | FreeRADIUS |
+| [gearman](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/gearman) | Gearman |
+| [haproxy](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/haproxy) | HAProxy |
+| [hddtemp](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/hddtemp) | Disks temperature |
+| [hdfs](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/hdfs) | HDFS |
+| [hpssa](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/hpssa) | HPE Smart Array |
+| [httpcheck](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/httpcheck) | Any HTTP Endpoint |
+| [icecast](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/icecast) | Icecast |
+| [intelgpu](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/intelgpu) | Intel integrated GPU |
+| [ipfs](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ipfs) | IPFS |
+| [isc_dhcpd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/isc_dhcpd) | ISC DHCP |
+| [k8s_kubelet](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/k8s_kubelet) | Kubelet |
+| [k8s_kubeproxy](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/k8s_kubeproxy) | Kube-proxy |
+| [k8s_state](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/k8s_state) | Kubernetes cluster state |
+| [lighttpd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/lighttpd) | Lighttpd |
+| [litespeed](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/litespeed) | Litespeed |
+| [logind](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/logind) | systemd-logind |
+| [logstash](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/logstash) | Logstash |
+| [lvm](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/lvm) | LVM logical volumes |
+| [megacli](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/megacli) | MegaCli Hardware Raid |
+| [memcached](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/memcached) | Memcached |
+| [mongoDB](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/mongodb) | MongoDB |
+| [monit](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/monit) | Monit |
+| [mysql](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/mysql) | MySQL |
+| [nginx](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginx) | NGINX |
+| [nginxplus](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginxplus) | NGINX Plus |
+| [nginxvts](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginxvts) | NGINX VTS |
+| [nsd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nsd) | NSD (NLnet Labs) |
+| [ntpd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ntpd) | NTP daemon |
+| [nvme](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvme) | NVMe devices |
+| [openvpn](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/openvpn) | OpenVPN |
+| [openvpn_status_log](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/openvpn_status_log) | OpenVPN |
+| [pgbouncer](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pgbouncer) | PgBouncer |
+| [phpdaemon](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/phpdaemon) | phpDaemon |
+| [phpfpm](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/phpfpm) | PHP-FPM |
+| [pihole](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pihole) | Pi-hole |
+| [pika](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pika) | Pika |
+| [ping](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ping) | Any network host |
+| [prometheus](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/prometheus) | Any Prometheus Endpoint |
+| [portcheck](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/portcheck) | Any TCP Endpoint |
+| [postgres](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/postgres) | PostgreSQL |
+| [postfix](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/postfix) | Postfix |
+| [powerdns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/powerdns) | PowerDNS Authoritative Server |
+| [powerdns_recursor](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/powerdns_recursor) | PowerDNS Recursor |
+| [proxysql](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/proxysql) | ProxySQL |
+| [pulsar](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pulsar) | Apache Pulsar |
+| [puppet](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/puppet) | Puppet |
+| [rabbitmq](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/rabbitmq) | RabbitMQ |
+| [redis](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/redis) | Redis |
+| [rethinkdb](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/rethinkdb) | RethinkDB |
+| [riakkv](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/riakkv) | Riak KV |
+| [rspamd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/rspamd) | Rspamd |
+| [scaleio](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/scaleio) | Dell EMC ScaleIO |
+| [sensors](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/sensors) | Hardware Sensors |
+| [SNMP](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/snmp) | SNMP |
+| [squid](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/squid) | Squid |
+| [squidlog](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/squidlog) | Squid |
+| [smartctl](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/smartctl) | S.M.A.R.T Storage Devices |
+| [storcli](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/storcli) | Broadcom Hardware RAID |
+| [supervisord](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/supervisord) | Supervisor |
+| [systemdunits](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/systemdunits) | Systemd unit state |
+| [tengine](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/tengine) | Tengine |
+| [tomcat](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/tomcat) | Tomcat |
+| [tor](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/tor) | Tor |
+| [traefik](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/traefik) | Traefik |
+| [upsd](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/upsd) | UPSd (Nut) |
+| [unbound](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/unbound) | Unbound |
+| [vcsa](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vcsa) | vCenter Server Appliance |
+| [vernemq](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vernemq) | VerneMQ |
+| [vsphere](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vsphere) | VMware vCenter Server |
+| [web_log](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/weblog) | Apache/NGINX |
+| [wireguard](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/wireguard) | WireGuard |
+| [whoisquery](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/whoisquery) | Domain Expiry |
+| [windows](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/windows) | Windows |
+| [x509check](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/x509check) | Digital Certificates |
+| [zfspool](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/zfspool) | ZFS Pools |
+| [zookeeper](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/zookeeper) | ZooKeeper |
+
+## Configuration
+
+Edit the `go.d.conf` configuration file using `edit-config` from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory),
+which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config go.d.conf
+```
+
+Configurations are written in [YAML](http://yaml.org/).
+
+- [plugin configuration](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf)
+- [specific module configuration](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/config/go.d)
+
+### Enable a collector
+
+To enable a collector you should edit `go.d.conf` to uncomment the collector in question and change it from `no`
+to `yes`.
+
+For example, to enable the `example` plugin you would need to update `go.d.conf` from something like:
+
+```yaml
+modules:
+# example: no
+```
+
+to
+
+```yaml
+modules:
+ example: yes
+```
+
+Then [restart netdata](/docs/netdata-agent/start-stop-restart.md)
+for the change to take effect.
+
+## Contributing
+
+If you want to contribute to this project, we are humbled. Please take a look at
+our [contributing guidelines](https://github.com/netdata/.github/blob/main/CONTRIBUTING.md) and don't hesitate to
+contact us in our forums.
+
+### How to develop a collector
+
+Read [how to write a Netdata collector in Go](/src/go/plugin/go.d/docs/how-to-write-a-module.md).
+
+## Troubleshooting
+
+Plugin CLI:
+
+```sh
+Usage:
+ orchestrator [OPTIONS] [update every]
+
+Application Options:
+ -m, --modules= module name to run (default: all)
+ -c, --config-dir= config dir to read
+ -w, --watch-path= config path to watch
+ -d, --debug debug mode
+ -v, --version display the version and exit
+
+Help Options:
+ -h, --help Show this help message
+```
+
+To debug specific module:
+
+```sh
+# become user netdata
+sudo su -s /bin/bash netdata
+
+# run plugin in debug mode
+./go.d.plugin -d -m <module name>
+```
+
+Change `<module name>` to the [module name](#available-modules) you want to debug.
+
+## Netdata Community
+
+This repository follows the Netdata Code of Conduct and is part of the Netdata Community.
+
+- [Community Forums](https://community.netdata.cloud)
+- [Netdata Code of Conduct](https://github.com/netdata/.github/blob/main/CODE_OF_CONDUCT.md)
diff --git a/src/go/plugin/go.d/agent/README.md b/src/go/plugin/go.d/agent/README.md
new file mode 100644
index 000000000..9e0654262
--- /dev/null
+++ b/src/go/plugin/go.d/agent/README.md
@@ -0,0 +1,157 @@
+# agent
+
+This library is a tool for writing [netdata](https://github.com/netdata/netdata) plugins.
+
+We strongly believe that custom plugins are very important, and they must be easy to write.
+
+
+Definitions:
+ - orchestrator
+ > plugin orchestrators are external plugins that do not collect any data by themselves. Instead, they support data collection modules written in the language of the orchestrator. Usually the orchestrator provides a higher level abstraction, making it ideal for writing new data collection modules with the minimum of code.
+
+ - plugin
+ > plugin is a set of data collection modules.
+
+ - module
+ > module is a data collector. It collects, processes and returns processed data to the orchestrator.
+
+ - job
+ > job is a module instance with specific settings.
+
+
+Package provides:
+ - CLI parser
+ - plugin orchestrator (loads configurations, creates and serves jobs)
+
+You are responsible only for __creating modules__.
+
+## Custom plugin example
+
+[Yep! So easy!](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/examples/simple/main.go)
+
+## How to write a Module
+
+Module is responsible for **charts creating** and **data collecting**. Implement Module interface and that is it.
+
+```go
+type Module interface {
+ // Init does initialization.
+ // If it returns false, the job will be disabled.
+ Init() bool
+
+ // Check is called after Init.
+ // If it returns false, the job will be disabled.
+ Check() bool
+
+ // Charts returns the chart definition.
+ // Make sure not to share returned instance.
+ Charts() *Charts
+
+ // Collect collects metrics.
+ Collect() map[string]int64
+
+ // SetLogger sets logger.
+ SetLogger(l *logger.Logger)
+
+ // Cleanup performs cleanup if needed.
+ Cleanup()
+}
+
+// Base is a helper struct. All modules should embed this struct.
+type Base struct {
+ *logger.Logger
+}
+
+// SetLogger sets logger.
+func (b *Base) SetLogger(l *logger.Logger) { b.Logger = l }
+
+```
+
+## How to write a Plugin
+
+Since plugin is a set of modules all you need is:
+ - write module(s)
+ - add module(s) to the plugins [registry](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/plugin/module/registry.go)
+ - start the plugin
+
+
+## How to integrate your plugin into Netdata
+
+Three simple steps:
+ - move the plugin to the `plugins.d` dir.
+ - add plugin configuration file to the `etc/netdata/` dir.
+ - add modules configuration files to the `etc/netdata/<DIR_NAME>/` dir.
+
+Congratulations!
+
+## Configurations
+
+Configurations are written in [YAML](https://yaml.org/).
+
+ - plugin configuration:
+
+```yaml
+
+# Enable/disable the whole plugin.
+enabled: yes
+
+# Default enable/disable value for all modules.
+default_run: yes
+
+# Maximum number of used CPUs. Zero means no limit.
+max_procs: 0
+
+# Enable/disable specific plugin module
+modules:
+# module_name1: yes
+# module_name2: yes
+
+```
+
+ - module configuration
+
+```yaml
+# [ GLOBAL ]
+update_every: 1
+autodetection_retry: 0
+
+# [ JOBS ]
+jobs:
+ - name: job1
+ param1: value1
+ param2: value2
+
+ - name: job2
+ param1: value1
+ param2: value2
+```
+
+Plugin uses `yaml.Unmarshal` to add configuration parameters to the module. Please use `yaml` tags!
+
+## Debug
+
+Plugin CLI:
+```
+Usage:
+ plugin [OPTIONS] [update every]
+
+Application Options:
+ -d, --debug debug mode
+ -m, --modules= modules name (default: all)
+ -c, --config= config dir
+
+Help Options:
+ -h, --help Show this help message
+
+```
+
+Specific module debug:
+```
+# become user netdata
+sudo su -s /bin/bash netdata
+
+# run plugin in debug mode
+./<plugin_name> -d -m <module_name>
+```
+
+Change `<plugin_name>` to your plugin name and `<module_name>` to the module name you want to debug.
diff --git a/src/go/plugin/go.d/agent/agent.go b/src/go/plugin/go.d/agent/agent.go
new file mode 100644
index 000000000..2423e84e0
--- /dev/null
+++ b/src/go/plugin/go.d/agent/agent.go
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package agent
+
+import (
+ "context"
+ "io"
+ "log/slog"
+ "os"
+ "os/signal"
+ "sync"
+ "syscall"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/filelock"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/filestatus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/functions"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/jobmgr"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/multipath"
+
+ "github.com/mattn/go-isatty"
+)
+
+var isTerminal = isatty.IsTerminal(os.Stdout.Fd())
+
+// Config is an Agent configuration.
+type Config struct {
+ Name string
+ ConfDir []string
+ ModulesConfDir []string
+ ModulesConfSDDir []string
+ ModulesConfWatchPath []string
+ VnodesConfDir []string
+ StateFile string
+ LockDir string
+ ModuleRegistry module.Registry
+ RunModule string
+ MinUpdateEvery int
+}
+
+// Agent represents orchestrator.
+type Agent struct {
+ *logger.Logger
+
+ Name string
+ ConfDir multipath.MultiPath
+ ModulesConfDir multipath.MultiPath
+ ModulesConfSDDir multipath.MultiPath
+ ModulesSDConfPath []string
+ VnodesConfDir multipath.MultiPath
+ StateFile string
+ LockDir string
+ RunModule string
+ MinUpdateEvery int
+ ModuleRegistry module.Registry
+ Out io.Writer
+
+ api *netdataapi.API
+}
+
+// New creates a new Agent.
+func New(cfg Config) *Agent {
+ return &Agent{
+ Logger: logger.New().With(
+ slog.String("component", "agent"),
+ ),
+ Name: cfg.Name,
+ ConfDir: cfg.ConfDir,
+ ModulesConfDir: cfg.ModulesConfDir,
+ ModulesConfSDDir: cfg.ModulesConfSDDir,
+ ModulesSDConfPath: cfg.ModulesConfWatchPath,
+ VnodesConfDir: cfg.VnodesConfDir,
+ StateFile: cfg.StateFile,
+ LockDir: cfg.LockDir,
+ RunModule: cfg.RunModule,
+ MinUpdateEvery: cfg.MinUpdateEvery,
+ ModuleRegistry: module.DefaultRegistry,
+ Out: safewriter.Stdout,
+ api: netdataapi.New(safewriter.Stdout),
+ }
+}
+
+// Run starts the Agent.
+func (a *Agent) Run() {
+ go a.keepAlive()
+ serve(a)
+}
+
+func serve(a *Agent) {
+ ch := make(chan os.Signal, 1)
+ signal.Notify(ch, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM)
+ var wg sync.WaitGroup
+
+ var exit bool
+
+ for {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ wg.Add(1)
+ go func() { defer wg.Done(); a.run(ctx) }()
+
+ switch sig := <-ch; sig {
+ case syscall.SIGHUP:
+ a.Infof("received %s signal (%d). Restarting running instance", sig, sig)
+ default:
+ a.Infof("received %s signal (%d). Terminating...", sig, sig)
+ module.DontObsoleteCharts()
+ exit = true
+ }
+
+ cancel()
+
+ func() {
+ timeout := time.Second * 10
+ t := time.NewTimer(timeout)
+ defer t.Stop()
+ done := make(chan struct{})
+
+ go func() { wg.Wait(); close(done) }()
+
+ select {
+ case <-t.C:
+ a.Errorf("stopping all goroutines timed out after %s. Exiting...", timeout)
+ os.Exit(0)
+ case <-done:
+ }
+ }()
+
+ if exit {
+ os.Exit(0)
+ }
+
+ time.Sleep(time.Second)
+ }
+}
+
+func (a *Agent) run(ctx context.Context) {
+ a.Info("instance is started")
+ defer func() { a.Info("instance is stopped") }()
+
+ cfg := a.loadPluginConfig()
+ a.Infof("using config: %s", cfg.String())
+
+ if !cfg.Enabled {
+ a.Info("plugin is disabled in the configuration file, exiting...")
+ if isTerminal {
+ os.Exit(0)
+ }
+ _ = a.api.DISABLE()
+ return
+ }
+
+ enabledModules := a.loadEnabledModules(cfg)
+ if len(enabledModules) == 0 {
+ a.Info("no modules to run")
+ if isTerminal {
+ os.Exit(0)
+ }
+ _ = a.api.DISABLE()
+ return
+ }
+
+ discCfg := a.buildDiscoveryConf(enabledModules)
+
+ discMgr, err := discovery.NewManager(discCfg)
+ if err != nil {
+ a.Error(err)
+ if isTerminal {
+ os.Exit(0)
+ }
+ return
+ }
+
+ fnMgr := functions.NewManager()
+
+ jobMgr := jobmgr.New()
+ jobMgr.PluginName = a.Name
+ jobMgr.Out = a.Out
+ jobMgr.Modules = enabledModules
+ jobMgr.ConfigDefaults = discCfg.Registry
+ jobMgr.FnReg = fnMgr
+
+ if reg := a.setupVnodeRegistry(); reg == nil || reg.Len() == 0 {
+ vnodes.Disabled = true
+ } else {
+ jobMgr.Vnodes = reg
+ }
+
+ if a.LockDir != "" {
+ jobMgr.FileLock = filelock.New(a.LockDir)
+ }
+
+ var fsMgr *filestatus.Manager
+ if !isTerminal && a.StateFile != "" {
+ fsMgr = filestatus.NewManager(a.StateFile)
+ jobMgr.FileStatus = fsMgr
+ if store, err := filestatus.LoadStore(a.StateFile); err != nil {
+ a.Warningf("couldn't load state file: %v", err)
+ } else {
+ jobMgr.FileStatusStore = store
+ }
+ }
+
+ in := make(chan []*confgroup.Group)
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() { defer wg.Done(); fnMgr.Run(ctx) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); jobMgr.Run(ctx, in) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); discMgr.Run(ctx, in) }()
+
+ if fsMgr != nil {
+ wg.Add(1)
+ go func() { defer wg.Done(); fsMgr.Run(ctx) }()
+ }
+
+ wg.Wait()
+ <-ctx.Done()
+}
+
+func (a *Agent) keepAlive() {
+ if isTerminal {
+ return
+ }
+
+ tk := time.NewTicker(time.Second)
+ defer tk.Stop()
+
+ var n int
+ for range tk.C {
+ if err := a.api.EMPTYLINE(); err != nil {
+ a.Infof("keepAlive: %v", err)
+ n++
+ } else {
+ n = 0
+ }
+ if n == 3 {
+ a.Info("too many keepAlive errors. Terminating...")
+ os.Exit(0)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/agent/agent_test.go b/src/go/plugin/go.d/agent/agent_test.go
new file mode 100644
index 000000000..9096b9015
--- /dev/null
+++ b/src/go/plugin/go.d/agent/agent_test.go
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package agent
+
+import (
+ "bytes"
+ "context"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// TODO: tech debt
+func TestNew(t *testing.T) {
+
+}
+
+func TestAgent_Run(t *testing.T) {
+ a := New(Config{Name: "nodyncfg"})
+
+ var buf bytes.Buffer
+ a.Out = safewriter.New(&buf)
+
+ var mux sync.Mutex
+ stats := make(map[string]int)
+ a.ModuleRegistry = prepareRegistry(&mux, stats, "module1", "module2")
+
+ ctx, cancel := context.WithCancel(context.Background())
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() { defer wg.Done(); a.run(ctx) }()
+
+ time.Sleep(time.Second * 2)
+ cancel()
+ wg.Wait()
+
+ assert.Equalf(t, 1, stats["module1_init"], "module1 init")
+ assert.Equalf(t, 1, stats["module2_init"], "module2 init")
+ assert.Equalf(t, 1, stats["module1_check"], "module1 check")
+ assert.Equalf(t, 1, stats["module2_check"], "module2 check")
+ assert.Equalf(t, 1, stats["module1_charts"], "module1 charts")
+ assert.Equalf(t, 1, stats["module2_charts"], "module2 charts")
+ assert.Truef(t, stats["module1_collect"] > 0, "module1 collect")
+ assert.Truef(t, stats["module2_collect"] > 0, "module2 collect")
+ assert.Equalf(t, 1, stats["module1_cleanup"], "module1 cleanup")
+ assert.Equalf(t, 1, stats["module2_cleanup"], "module2 cleanup")
+ assert.True(t, buf.String() != "")
+}
+
+func prepareRegistry(mux *sync.Mutex, stats map[string]int, names ...string) module.Registry {
+ reg := module.Registry{}
+ for _, name := range names {
+ name := name
+ reg.Register(name, module.Creator{
+ Create: func() module.Module {
+ return prepareMockModule(name, mux, stats)
+ },
+ })
+ }
+ return reg
+}
+
+func prepareMockModule(name string, mux *sync.Mutex, stats map[string]int) module.Module {
+ return &module.MockModule{
+ InitFunc: func() error {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_init"]++
+ return nil
+ },
+ CheckFunc: func() error {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_check"]++
+ return nil
+ },
+ ChartsFunc: func() *module.Charts {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_charts"]++
+ return &module.Charts{
+ &module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}},
+ }
+ },
+ CollectFunc: func() map[string]int64 {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_collect"]++
+ return map[string]int64{"id1": 1}
+ },
+ CleanupFunc: func() {
+ mux.Lock()
+ defer mux.Unlock()
+ stats[name+"_cleanup"]++
+ },
+ }
+}
diff --git a/src/go/plugin/go.d/agent/confgroup/cache.go b/src/go/plugin/go.d/agent/confgroup/cache.go
new file mode 100644
index 000000000..8b369e653
--- /dev/null
+++ b/src/go/plugin/go.d/agent/confgroup/cache.go
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package confgroup
+
+func NewCache() *Cache {
+ return &Cache{
+ hashes: make(map[uint64]uint),
+ sources: make(map[string]map[uint64]Config),
+ }
+}
+
+type Cache struct {
+ hashes map[uint64]uint // map[cfgHash]cfgCount
+ sources map[string]map[uint64]Config // map[cfgSource]map[cfgHash]cfg
+}
+
+func (c *Cache) Add(group *Group) (added, removed []Config) {
+ if group == nil {
+ return nil, nil
+ }
+
+ if len(group.Configs) == 0 {
+ return c.addEmpty(group)
+ }
+
+ return c.addNotEmpty(group)
+}
+
+func (c *Cache) addEmpty(group *Group) (added, removed []Config) {
+ set, ok := c.sources[group.Source]
+ if !ok {
+ return nil, nil
+ }
+
+ for hash, cfg := range set {
+ c.hashes[hash]--
+ if c.hashes[hash] == 0 {
+ removed = append(removed, cfg)
+ }
+ delete(set, hash)
+ }
+
+ delete(c.sources, group.Source)
+
+ return nil, removed
+}
+
+func (c *Cache) addNotEmpty(group *Group) (added, removed []Config) {
+ set, ok := c.sources[group.Source]
+ if !ok {
+ set = make(map[uint64]Config)
+ c.sources[group.Source] = set
+ }
+
+ seen := make(map[uint64]struct{})
+
+ for _, cfg := range group.Configs {
+ hash := cfg.Hash()
+ seen[hash] = struct{}{}
+
+ if _, ok := set[hash]; ok {
+ continue
+ }
+
+ set[hash] = cfg
+ if c.hashes[hash] == 0 {
+ added = append(added, cfg)
+ }
+ c.hashes[hash]++
+ }
+
+ if !ok {
+ return added, nil
+ }
+
+ for hash, cfg := range set {
+ if _, ok := seen[hash]; ok {
+ continue
+ }
+
+ delete(set, hash)
+ c.hashes[hash]--
+ if c.hashes[hash] == 0 {
+ removed = append(removed, cfg)
+ }
+ }
+
+ if len(set) == 0 {
+ delete(c.sources, group.Source)
+ }
+
+ return added, removed
+}
diff --git a/src/go/plugin/go.d/agent/confgroup/cache_test.go b/src/go/plugin/go.d/agent/confgroup/cache_test.go
new file mode 100644
index 000000000..a2bbd4919
--- /dev/null
+++ b/src/go/plugin/go.d/agent/confgroup/cache_test.go
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package confgroup
+
+import (
+ "sort"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestConfigCache_Add(t *testing.T) {
+ tests := map[string]struct {
+ prepareGroups []Group
+ groups []Group
+ expectedAdd []Config
+ expectedRemove []Config
+ }{
+ "new group, new configs": {
+ groups: []Group{
+ prepareGroup("source", prepareCfg("name", "module")),
+ },
+ expectedAdd: []Config{
+ prepareCfg("name", "module"),
+ },
+ },
+ "several equal updates for the same group": {
+ groups: []Group{
+ prepareGroup("source", prepareCfg("name", "module")),
+ prepareGroup("source", prepareCfg("name", "module")),
+ prepareGroup("source", prepareCfg("name", "module")),
+ prepareGroup("source", prepareCfg("name", "module")),
+ prepareGroup("source", prepareCfg("name", "module")),
+ },
+ expectedAdd: []Config{
+ prepareCfg("name", "module"),
+ },
+ },
+ "empty group update for cached group": {
+ prepareGroups: []Group{
+ prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ },
+ groups: []Group{
+ prepareGroup("source"),
+ },
+ expectedRemove: []Config{
+ prepareCfg("name1", "module"),
+ prepareCfg("name2", "module"),
+ },
+ },
+ "changed group update for cached group": {
+ prepareGroups: []Group{
+ prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ },
+ groups: []Group{
+ prepareGroup("source", prepareCfg("name2", "module")),
+ },
+ expectedRemove: []Config{
+ prepareCfg("name1", "module"),
+ },
+ },
+ "empty group update for uncached group": {
+ groups: []Group{
+ prepareGroup("source"),
+ prepareGroup("source"),
+ },
+ },
+ "several updates with different source but same context": {
+ groups: []Group{
+ prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ },
+ expectedAdd: []Config{
+ prepareCfg("name1", "module"),
+ prepareCfg("name2", "module"),
+ },
+ },
+ "have equal configs from 2 sources, get empty group for the 1st source": {
+ prepareGroups: []Group{
+ prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
+ },
+ groups: []Group{
+ prepareGroup("source2"),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ cache := NewCache()
+
+ for _, group := range test.prepareGroups {
+ cache.Add(&group)
+ }
+
+ var added, removed []Config
+ for _, group := range test.groups {
+ a, r := cache.Add(&group)
+ added = append(added, a...)
+ removed = append(removed, r...)
+ }
+
+ sortConfigs(added)
+ sortConfigs(removed)
+ sortConfigs(test.expectedAdd)
+ sortConfigs(test.expectedRemove)
+
+ assert.Equalf(t, test.expectedAdd, added, "added configs")
+ assert.Equalf(t, test.expectedRemove, removed, "removed configs")
+ })
+ }
+}
+
+func prepareGroup(source string, cfgs ...Config) Group {
+ return Group{
+ Configs: cfgs,
+ Source: source,
+ }
+}
+
+func prepareCfg(name, module string) Config {
+ return Config{
+ "name": name,
+ "module": module,
+ }
+}
+
+func sortConfigs(cfgs []Config) {
+ if len(cfgs) == 0 {
+ return
+ }
+ sort.Slice(cfgs, func(i, j int) bool { return cfgs[i].FullName() < cfgs[j].FullName() })
+}
diff --git a/src/go/plugin/go.d/agent/confgroup/config.go b/src/go/plugin/go.d/agent/confgroup/config.go
new file mode 100644
index 000000000..8f0523f1a
--- /dev/null
+++ b/src/go/plugin/go.d/agent/confgroup/config.go
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package confgroup
+
+import (
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/hostinfo"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/ilyam8/hashstructure"
+ "gopkg.in/yaml.v2"
+)
+
+const (
+ keyName = "name"
+ keyModule = "module"
+ keyUpdateEvery = "update_every"
+ keyDetectRetry = "autodetection_retry"
+ keyPriority = "priority"
+ keyLabels = "labels"
+ keyVnode = "vnode"
+
+ ikeySource = "__source__"
+ ikeySourceType = "__source_type__"
+ ikeyProvider = "__provider__"
+)
+
+const (
+ TypeStock = "stock"
+ TypeUser = "user"
+ TypeDiscovered = "discovered"
+ TypeDyncfg = "dyncfg"
+)
+
+type Config map[string]any
+
+func (c Config) HashIncludeMap(_ string, k, _ any) (bool, error) {
+ s := k.(string)
+ return !(strings.HasPrefix(s, "__") || strings.HasSuffix(s, "__")), nil
+}
+
+func (c Config) Set(key string, value any) Config { c[key] = value; return c }
+func (c Config) Get(key string) any { return c[key] }
+
+func (c Config) Name() string { v, _ := c.Get(keyName).(string); return v }
+func (c Config) Module() string { v, _ := c.Get(keyModule).(string); return v }
+func (c Config) FullName() string { return fullName(c.Name(), c.Module()) }
+func (c Config) UpdateEvery() int { v, _ := c.Get(keyUpdateEvery).(int); return v }
+func (c Config) AutoDetectionRetry() int { v, _ := c.Get(keyDetectRetry).(int); return v }
+func (c Config) Priority() int { v, _ := c.Get(keyPriority).(int); return v }
+func (c Config) Labels() map[any]any { v, _ := c.Get(keyLabels).(map[any]any); return v }
+func (c Config) Hash() uint64 { return calcHash(c) }
+func (c Config) Vnode() string { v, _ := c.Get(keyVnode).(string); return v }
+
+func (c Config) SetName(v string) Config { return c.Set(keyName, v) }
+func (c Config) SetModule(v string) Config { return c.Set(keyModule, v) }
+
+func (c Config) UID() string {
+ return fmt.Sprintf("%s_%s_%s_%s_%d", c.SourceType(), c.Provider(), c.Source(), c.FullName(), c.Hash())
+}
+
+func (c Config) Source() string { v, _ := c.Get(ikeySource).(string); return v }
+func (c Config) SourceType() string { v, _ := c.Get(ikeySourceType).(string); return v }
+func (c Config) Provider() string { v, _ := c.Get(ikeyProvider).(string); return v }
+func (c Config) SetSource(v string) Config { return c.Set(ikeySource, v) }
+func (c Config) SetSourceType(v string) Config { return c.Set(ikeySourceType, v) }
+func (c Config) SetProvider(v string) Config { return c.Set(ikeyProvider, v) }
+
+func (c Config) SourceTypePriority() int {
+ switch c.SourceType() {
+ default:
+ return 0
+ case TypeStock:
+ return 2
+ case TypeDiscovered:
+ return 4
+ case TypeUser:
+ return 8
+ case TypeDyncfg:
+ return 16
+ }
+}
+
+func (c Config) Clone() (Config, error) {
+ type plain Config
+ bytes, err := yaml.Marshal((plain)(c))
+ if err != nil {
+ return nil, err
+ }
+ var newConfig Config
+ if err := yaml.Unmarshal(bytes, &newConfig); err != nil {
+ return nil, err
+ }
+ return newConfig, nil
+}
+
+func (c Config) ApplyDefaults(def Default) {
+ if c.UpdateEvery() <= 0 {
+ v := firstPositive(def.UpdateEvery, module.UpdateEvery)
+ c.Set("update_every", v)
+ }
+ if c.AutoDetectionRetry() <= 0 {
+ v := firstPositive(def.AutoDetectionRetry, module.AutoDetectionRetry)
+ c.Set("autodetection_retry", v)
+ }
+ if c.Priority() <= 0 {
+ v := firstPositive(def.Priority, module.Priority)
+ c.Set("priority", v)
+ }
+ if c.UpdateEvery() < def.MinUpdateEvery && def.MinUpdateEvery > 0 {
+ c.Set("update_every", def.MinUpdateEvery)
+ }
+ if c.Name() == "" {
+ c.Set("name", c.Module())
+ } else {
+ c.Set("name", cleanName(jobNameResolveHostname(c.Name())))
+ }
+
+ if v, ok := c.Get("url").(string); ok {
+ c.Set("url", urlResolveHostname(v))
+ }
+}
+
+var reInvalidCharacters = regexp.MustCompile(`\s+|\.+|:+`)
+
+func cleanName(name string) string {
+ return reInvalidCharacters.ReplaceAllString(name, "_")
+}
+
+func fullName(name, module string) string {
+ if name == module {
+ return name
+ }
+ return module + "_" + name
+}
+
+func calcHash(obj any) uint64 {
+ hash, _ := hashstructure.Hash(obj, nil)
+ return hash
+}
+
+func firstPositive(value int, others ...int) int {
+ if value > 0 || len(others) == 0 {
+ return value
+ }
+ return firstPositive(others[0], others[1:]...)
+}
+
+func urlResolveHostname(rawURL string) string {
+ if hostinfo.Hostname == "" || !strings.Contains(rawURL, "hostname") {
+ return rawURL
+ }
+
+ u, err := url.Parse(rawURL)
+ if err != nil || (u.Hostname() != "hostname" && !strings.Contains(u.Hostname(), "hostname.")) {
+ return rawURL
+ }
+
+ u.Host = strings.Replace(u.Host, "hostname", hostinfo.Hostname, 1)
+
+ return u.String()
+}
+
+func jobNameResolveHostname(name string) string {
+ if hostinfo.Hostname == "" || !strings.Contains(name, "hostname") {
+ return name
+ }
+
+ if name != "hostname" && !strings.HasPrefix(name, "hostname.") && !strings.HasPrefix(name, "hostname_") {
+ return name
+ }
+
+ return strings.Replace(name, "hostname", hostinfo.Hostname, 1)
+}
diff --git a/src/go/plugin/go.d/agent/confgroup/config_test.go b/src/go/plugin/go.d/agent/confgroup/config_test.go
new file mode 100644
index 000000000..98c6c3e78
--- /dev/null
+++ b/src/go/plugin/go.d/agent/confgroup/config_test.go
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package confgroup
+
+import (
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestConfig_Name(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "string": {cfg: Config{"name": "name"}, expected: "name"},
+ "empty string": {cfg: Config{"name": ""}, expected: ""},
+ "not string": {cfg: Config{"name": 0}, expected: ""},
+ "not set": {cfg: Config{}, expected: ""},
+ "nil cfg": {expected: ""},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.Name())
+ })
+ }
+}
+
+func TestConfig_Module(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "string": {cfg: Config{"module": "module"}, expected: "module"},
+ "empty string": {cfg: Config{"module": ""}, expected: ""},
+ "not string": {cfg: Config{"module": 0}, expected: ""},
+ "not set": {cfg: Config{}, expected: ""},
+ "nil cfg": {expected: ""},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.Module())
+ })
+ }
+}
+
+func TestConfig_FullName(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "name == module": {cfg: Config{"name": "name", "module": "name"}, expected: "name"},
+ "name != module": {cfg: Config{"name": "name", "module": "module"}, expected: "module_name"},
+ "nil cfg": {expected: ""},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.FullName())
+ })
+ }
+}
+
+func TestConfig_UpdateEvery(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "int": {cfg: Config{"update_every": 1}, expected: 1},
+ "not int": {cfg: Config{"update_every": "1"}, expected: 0},
+ "not set": {cfg: Config{}, expected: 0},
+ "nil cfg": {expected: 0},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.UpdateEvery())
+ })
+ }
+}
+
+func TestConfig_AutoDetectionRetry(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "int": {cfg: Config{"autodetection_retry": 1}, expected: 1},
+ "not int": {cfg: Config{"autodetection_retry": "1"}, expected: 0},
+ "not set": {cfg: Config{}, expected: 0},
+ "nil cfg": {expected: 0},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.AutoDetectionRetry())
+ })
+ }
+}
+
+func TestConfig_Priority(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ expected interface{}
+ }{
+ "int": {cfg: Config{"priority": 1}, expected: 1},
+ "not int": {cfg: Config{"priority": "1"}, expected: 0},
+ "not set": {cfg: Config{}, expected: 0},
+ "nil cfg": {expected: 0},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.cfg.Priority())
+ })
+ }
+}
+
+func TestConfig_Hash(t *testing.T) {
+ tests := map[string]struct {
+ one, two Config
+ equal bool
+ }{
+ "same keys, no internal keys": {
+ one: Config{"name": "name"},
+ two: Config{"name": "name"},
+ equal: true,
+ },
+ "same keys, different internal keys": {
+ one: Config{"name": "name", "__key__": 1},
+ two: Config{"name": "name", "__value__": 1},
+ equal: true,
+ },
+ "same keys, same internal keys": {
+ one: Config{"name": "name", "__key__": 1},
+ two: Config{"name": "name", "__key__": 1},
+ equal: true,
+ },
+ "diff keys, no internal keys": {
+ one: Config{"name": "name1"},
+ two: Config{"name": "name2"},
+ equal: false,
+ },
+ "diff keys, different internal keys": {
+ one: Config{"name": "name1", "__key__": 1},
+ two: Config{"name": "name2", "__value__": 1},
+ equal: false,
+ },
+ "diff keys, same internal keys": {
+ one: Config{"name": "name1", "__key__": 1},
+ two: Config{"name": "name2", "__key__": 1},
+ equal: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if test.equal {
+ assert.Equal(t, test.one.Hash(), test.two.Hash())
+ } else {
+ assert.NotEqual(t, test.one.Hash(), test.two.Hash())
+ }
+ })
+ }
+ cfg := Config{"name": "name", "module": "module"}
+ assert.NotZero(t, cfg.Hash())
+}
+
+func TestConfig_SetModule(t *testing.T) {
+ cfg := Config{}
+ cfg.SetModule("name")
+
+ assert.Equal(t, cfg.Module(), "name")
+}
+
+func TestConfig_SetSource(t *testing.T) {
+ cfg := Config{}
+ cfg.SetSource("name")
+
+ assert.Equal(t, cfg.Source(), "name")
+}
+
+func TestConfig_SetProvider(t *testing.T) {
+ cfg := Config{}
+ cfg.SetProvider("name")
+
+ assert.Equal(t, cfg.Provider(), "name")
+}
+
+func TestConfig_Apply(t *testing.T) {
+ const jobDef = 11
+ const applyDef = 22
+ tests := map[string]struct {
+ def Default
+ origCfg Config
+ expectedCfg Config
+ }{
+ "+job +def": {
+ def: Default{
+ UpdateEvery: applyDef,
+ AutoDetectionRetry: applyDef,
+ Priority: applyDef,
+ },
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ "-job +def": {
+ def: Default{
+ UpdateEvery: applyDef,
+ AutoDetectionRetry: applyDef,
+ Priority: applyDef,
+ },
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": applyDef,
+ "autodetection_retry": applyDef,
+ "priority": applyDef,
+ },
+ },
+ "-job -def (+global)": {
+ def: Default{},
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ "adjust update_every (update_every < min update every)": {
+ def: Default{
+ MinUpdateEvery: jobDef + 10,
+ },
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef + 10,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ "do not adjust update_every (update_every > min update every)": {
+ def: Default{
+ MinUpdateEvery: 2,
+ },
+ origCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ },
+ expectedCfg: Config{
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ "set name to module name if name not set": {
+ def: Default{},
+ origCfg: Config{
+ "module": "module",
+ },
+ expectedCfg: Config{
+ "name": "module",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ "clean name": {
+ def: Default{},
+ origCfg: Config{
+ "name": "na me",
+ "module": "module",
+ },
+ expectedCfg: Config{
+ "name": "na_me",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ test.origCfg.ApplyDefaults(test.def)
+
+ assert.Equal(t, test.expectedCfg, test.origCfg)
+ })
+ }
+}
+
+func Test_urlResolveHostname(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantChanged bool
+ }{
+ "hostname with suffix": {
+ wantChanged: true,
+ input: "http://hostname.local:80/metrics",
+ },
+ "hostname without suffix": {
+ wantChanged: true,
+ input: "http://hostname:80/metrics",
+ },
+ "no hostname": {
+ wantChanged: false,
+ input: "http://127.0.0.1:80/metrics",
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+
+ if test.wantChanged {
+ assert.NotEqual(t, test.input, urlResolveHostname(test.input))
+ } else {
+ assert.Equal(t, test.input, urlResolveHostname(test.input))
+ }
+ })
+ }
+}
+
+func Test_jobNameResolveHostname(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantChanged bool
+ }{
+ "hostname with dot suffix": {
+ wantChanged: true,
+ input: "hostname.local",
+ },
+ "hostname with underscore suffix": {
+ wantChanged: true,
+ input: "hostname_local",
+ },
+ "hostname without suffix": {
+ wantChanged: true,
+ input: "hostname",
+ },
+ "no hostname": {
+ wantChanged: false,
+ input: "name",
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+
+ if test.wantChanged {
+ assert.NotEqual(t, test.input, jobNameResolveHostname(test.input))
+ } else {
+ assert.Equal(t, test.input, jobNameResolveHostname(test.input))
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/confgroup/group.go b/src/go/plugin/go.d/agent/confgroup/group.go
new file mode 100644
index 000000000..b8e7bd775
--- /dev/null
+++ b/src/go/plugin/go.d/agent/confgroup/group.go
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package confgroup
+
+type Group struct {
+ Configs []Config
+ Source string
+}
diff --git a/src/go/plugin/go.d/agent/confgroup/registry.go b/src/go/plugin/go.d/agent/confgroup/registry.go
new file mode 100644
index 000000000..295a75129
--- /dev/null
+++ b/src/go/plugin/go.d/agent/confgroup/registry.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package confgroup
+
+type Registry map[string]Default
+
+type Default struct {
+ MinUpdateEvery int `yaml:"-"`
+ UpdateEvery int `yaml:"update_every"`
+ AutoDetectionRetry int `yaml:"autodetection_retry"`
+ Priority int `yaml:"priority"`
+}
+
+func (r Registry) Register(name string, def Default) {
+ if name != "" {
+ r[name] = def
+ }
+}
+
+func (r Registry) Lookup(name string) (Default, bool) {
+ def, ok := r[name]
+ return def, ok
+}
diff --git a/src/go/plugin/go.d/agent/confgroup/registry_test.go b/src/go/plugin/go.d/agent/confgroup/registry_test.go
new file mode 100644
index 000000000..a63c0ceb1
--- /dev/null
+++ b/src/go/plugin/go.d/agent/confgroup/registry_test.go
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package confgroup
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRegistry_Register(t *testing.T) {
+ name := "module"
+ defaults := Default{
+ MinUpdateEvery: 1,
+ UpdateEvery: 1,
+ AutoDetectionRetry: 1,
+ Priority: 1,
+ }
+ expected := Registry{
+ name: defaults,
+ }
+
+ actual := Registry{}
+ actual.Register(name, defaults)
+
+ assert.Equal(t, expected, actual)
+}
+
+func TestRegistry_Lookup(t *testing.T) {
+ name := "module"
+ expected := Default{
+ MinUpdateEvery: 1,
+ UpdateEvery: 1,
+ AutoDetectionRetry: 1,
+ Priority: 1,
+ }
+ reg := Registry{}
+ reg.Register(name, expected)
+
+ actual, ok := reg.Lookup("module")
+
+ assert.True(t, ok)
+ assert.Equal(t, expected, actual)
+}
diff --git a/src/go/plugin/go.d/agent/config.go b/src/go/plugin/go.d/agent/config.go
new file mode 100644
index 000000000..fef68c7e0
--- /dev/null
+++ b/src/go/plugin/go.d/agent/config.go
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package agent
+
+import (
+ "fmt"
+
+ "gopkg.in/yaml.v2"
+)
+
+func defaultConfig() config {
+ return config{
+ Enabled: true,
+ DefaultRun: true,
+ MaxProcs: 0,
+ Modules: nil,
+ }
+}
+
+type config struct {
+ Enabled bool `yaml:"enabled"`
+ DefaultRun bool `yaml:"default_run"`
+ MaxProcs int `yaml:"max_procs"`
+ Modules map[string]bool `yaml:"modules"`
+}
+
+func (c *config) String() string {
+ return fmt.Sprintf("enabled '%v', default_run '%v', max_procs '%d'",
+ c.Enabled, c.DefaultRun, c.MaxProcs)
+}
+
+func (c *config) isExplicitlyEnabled(moduleName string) bool {
+ return c.isEnabled(moduleName, true)
+}
+
+func (c *config) isImplicitlyEnabled(moduleName string) bool {
+ return c.isEnabled(moduleName, false)
+}
+
+func (c *config) isEnabled(moduleName string, explicit bool) bool {
+ if enabled, ok := c.Modules[moduleName]; ok {
+ return enabled
+ }
+ if explicit {
+ return false
+ }
+ return c.DefaultRun
+}
+
+func (c *config) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ type plain config
+ if err := unmarshal((*plain)(c)); err != nil {
+ return err
+ }
+
+ var m map[string]interface{}
+ if err := unmarshal(&m); err != nil {
+ return err
+ }
+
+ for key, value := range m {
+ switch key {
+ case "enabled", "default_run", "max_procs", "modules":
+ continue
+ }
+ var b bool
+ if in, err := yaml.Marshal(value); err != nil || yaml.Unmarshal(in, &b) != nil {
+ continue
+ }
+ if c.Modules == nil {
+ c.Modules = make(map[string]bool)
+ }
+ c.Modules[key] = b
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/agent/discovery/cache.go b/src/go/plugin/go.d/agent/discovery/cache.go
new file mode 100644
index 000000000..032ccca38
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/cache.go
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discovery
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+)
+
+type cache map[string]*confgroup.Group // [Source]
+
+func newCache() *cache {
+ return &cache{}
+}
+
+func (c cache) update(groups []*confgroup.Group) {
+ if len(groups) == 0 {
+ return
+ }
+ for _, group := range groups {
+ if group != nil {
+ c[group.Source] = group
+ }
+ }
+}
+
+func (c cache) reset() {
+ for key := range c {
+ delete(c, key)
+ }
+}
+
+func (c cache) groups() []*confgroup.Group {
+ groups := make([]*confgroup.Group, 0, len(c))
+ for _, group := range c {
+ groups = append(groups, group)
+ }
+ return groups
+}
diff --git a/src/go/plugin/go.d/agent/discovery/config.go b/src/go/plugin/go.d/agent/discovery/config.go
new file mode 100644
index 000000000..258d1b830
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/config.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discovery
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/dummy"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/file"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd"
+)
+
+type Config struct {
+ Registry confgroup.Registry
+ File file.Config
+ Dummy dummy.Config
+ SD sd.Config
+}
+
+func validateConfig(cfg Config) error {
+ if len(cfg.Registry) == 0 {
+ return errors.New("empty config registry")
+ }
+ if len(cfg.File.Read)+len(cfg.File.Watch) == 0 && len(cfg.Dummy.Names) == 0 {
+ return errors.New("discoverers not set")
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/agent/discovery/dummy/config.go b/src/go/plugin/go.d/agent/discovery/dummy/config.go
new file mode 100644
index 000000000..1e8e8f333
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/dummy/config.go
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dummy
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+)
+
+type Config struct {
+ Registry confgroup.Registry
+ Names []string
+}
+
+func validateConfig(cfg Config) error {
+ if len(cfg.Registry) == 0 {
+ return errors.New("empty config registry")
+ }
+ if len(cfg.Names) == 0 {
+ return errors.New("names not set")
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/agent/discovery/dummy/discovery.go b/src/go/plugin/go.d/agent/discovery/dummy/discovery.go
new file mode 100644
index 000000000..6fad0f059
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/dummy/discovery.go
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dummy
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+)
+
+func NewDiscovery(cfg Config) (*Discovery, error) {
+ if err := validateConfig(cfg); err != nil {
+ return nil, fmt.Errorf("config validation: %v", err)
+ }
+ d := &Discovery{
+ Logger: logger.New().With(
+ slog.String("component", "discovery"),
+ slog.String("discoverer", "dummy"),
+ ),
+ reg: cfg.Registry,
+ names: cfg.Names,
+ }
+ return d, nil
+}
+
+type Discovery struct {
+ *logger.Logger
+
+ reg confgroup.Registry
+ names []string
+}
+
+func (d *Discovery) String() string {
+ return d.Name()
+}
+
+func (d *Discovery) Name() string {
+ return "dummy discovery"
+}
+
+func (d *Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ d.Info("instance is started")
+ defer func() { d.Info("instance is stopped") }()
+
+ select {
+ case <-ctx.Done():
+ case in <- d.groups():
+ }
+
+ close(in)
+}
+
+func (d *Discovery) groups() []*confgroup.Group {
+ group := &confgroup.Group{Source: "internal"}
+
+ for _, name := range d.names {
+ def, ok := d.reg.Lookup(name)
+ if !ok {
+ continue
+ }
+ src := "internal"
+ cfg := confgroup.Config{}
+ cfg.SetModule(name)
+ cfg.SetProvider("dummy")
+ cfg.SetSourceType(confgroup.TypeStock)
+ cfg.SetSource(src)
+ cfg.ApplyDefaults(def)
+
+ group.Configs = append(group.Configs, cfg)
+ }
+
+ return []*confgroup.Group{group}
+}
diff --git a/src/go/plugin/go.d/agent/discovery/dummy/discovery_test.go b/src/go/plugin/go.d/agent/discovery/dummy/discovery_test.go
new file mode 100644
index 000000000..2c908eb66
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/dummy/discovery_test.go
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dummy
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewDiscovery(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ wantErr bool
+ }{
+ "valid config": {
+ cfg: Config{
+ Registry: confgroup.Registry{"module1": confgroup.Default{}},
+ Names: []string{"module1", "module2"},
+ },
+ },
+ "invalid config, registry not set": {
+ cfg: Config{
+ Names: []string{"module1", "module2"},
+ },
+ wantErr: true,
+ },
+ "invalid config, names not set": {
+ cfg: Config{
+ Names: []string{"module1", "module2"},
+ },
+ wantErr: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ d, err := NewDiscovery(test.cfg)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.NotNil(t, d)
+ }
+ })
+ }
+}
+
+func TestDiscovery_Run(t *testing.T) {
+ expected := []*confgroup.Group{
+ {
+ Source: "internal",
+ Configs: []confgroup.Config{
+ {
+ "name": "module1",
+ "module": "module1",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "dummy",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": "internal",
+ },
+ {
+ "name": "module2",
+ "module": "module2",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "dummy",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": "internal",
+ },
+ },
+ },
+ }
+
+ reg := confgroup.Registry{
+ "module1": {},
+ "module2": {},
+ }
+ cfg := Config{
+ Registry: reg,
+ Names: []string{"module1", "module2"},
+ }
+
+ discovery, err := NewDiscovery(cfg)
+ require.NoError(t, err)
+
+ in := make(chan []*confgroup.Group)
+ timeout := time.Second * 2
+
+ go discovery.Run(context.Background(), in)
+
+ var actual []*confgroup.Group
+ select {
+ case actual = <-in:
+ case <-time.After(timeout):
+ t.Logf("discovery timed out after %s", timeout)
+ }
+ assert.Equal(t, expected, actual)
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/config.go b/src/go/plugin/go.d/agent/discovery/file/config.go
new file mode 100644
index 000000000..3836d201a
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/config.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+)
+
+type Config struct {
+ Registry confgroup.Registry
+ Read []string
+ Watch []string
+}
+
+func validateConfig(cfg Config) error {
+ if len(cfg.Registry) == 0 {
+ return errors.New("empty config registry")
+ }
+ if len(cfg.Read)+len(cfg.Watch) == 0 {
+ return errors.New("discoverers not set")
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/discovery.go b/src/go/plugin/go.d/agent/discovery/file/discovery.go
new file mode 100644
index 000000000..527b1cbbc
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/discovery.go
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "sync"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+)
+
+var log = logger.New().With(
+ slog.String("component", "discovery"),
+ slog.String("discoverer", "file"),
+)
+
+func NewDiscovery(cfg Config) (*Discovery, error) {
+ if err := validateConfig(cfg); err != nil {
+ return nil, fmt.Errorf("file discovery config validation: %v", err)
+ }
+
+ d := Discovery{
+ Logger: log,
+ }
+
+ if err := d.registerDiscoverers(cfg); err != nil {
+ return nil, fmt.Errorf("file discovery initialization: %v", err)
+ }
+
+ return &d, nil
+}
+
+type (
+ Discovery struct {
+ *logger.Logger
+ discoverers []discoverer
+ }
+ discoverer interface {
+ Run(ctx context.Context, in chan<- []*confgroup.Group)
+ }
+)
+
+func (d *Discovery) String() string {
+ return d.Name()
+}
+
+func (d *Discovery) Name() string {
+ return fmt.Sprintf("file discovery: %v", d.discoverers)
+}
+
+func (d *Discovery) registerDiscoverers(cfg Config) error {
+ if len(cfg.Read) != 0 {
+ d.discoverers = append(d.discoverers, NewReader(cfg.Registry, cfg.Read))
+ }
+ if len(cfg.Watch) != 0 {
+ d.discoverers = append(d.discoverers, NewWatcher(cfg.Registry, cfg.Watch))
+ }
+ if len(d.discoverers) == 0 {
+ return errors.New("zero registered discoverers")
+ }
+ return nil
+}
+
+func (d *Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ d.Info("instance is started")
+ defer func() { d.Info("instance is stopped") }()
+
+ var wg sync.WaitGroup
+
+ for _, dd := range d.discoverers {
+ wg.Add(1)
+ go func(dd discoverer) {
+ defer wg.Done()
+ d.runDiscoverer(ctx, dd, in)
+ }(dd)
+ }
+
+ wg.Wait()
+ <-ctx.Done()
+}
+
+func (d *Discovery) runDiscoverer(ctx context.Context, dd discoverer, in chan<- []*confgroup.Group) {
+ updates := make(chan []*confgroup.Group)
+ go dd.Run(ctx, updates)
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case groups, ok := <-updates:
+ if !ok {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ return
+ case in <- groups:
+ }
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/discovery_test.go b/src/go/plugin/go.d/agent/discovery/file/discovery_test.go
new file mode 100644
index 000000000..2bdb669eb
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/discovery_test.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+// TODO: tech dept
+func TestNewDiscovery(t *testing.T) {
+
+}
+
+// TODO: tech dept
+func TestDiscovery_Run(t *testing.T) {
+
+}
+
+func prepareDiscovery(t *testing.T, cfg Config) *Discovery {
+ d, err := NewDiscovery(cfg)
+ require.NoError(t, err)
+ return d
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/parse.go b/src/go/plugin/go.d/agent/discovery/file/parse.go
new file mode 100644
index 000000000..5fd31f32a
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/parse.go
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+
+ "gopkg.in/yaml.v2"
+)
+
+type format int
+
+const (
+ unknownFormat format = iota
+ unknownEmptyFormat
+ staticFormat
+ sdFormat
+)
+
+func parse(req confgroup.Registry, path string) (*confgroup.Group, error) {
+ bs, err := os.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+ if len(bs) == 0 {
+ return nil, nil
+ }
+
+ switch cfgFormat(bs) {
+ case staticFormat:
+ return parseStaticFormat(req, path, bs)
+ case sdFormat:
+ return parseSDFormat(req, path, bs)
+ case unknownEmptyFormat:
+ return nil, nil
+ default:
+ return nil, fmt.Errorf("unknown file format: '%s'", path)
+ }
+}
+
+func parseStaticFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.Group, error) {
+ name := fileName(path)
+ // TODO: properly handle module renaming
+ // See agent/setup.go buildDiscoveryConf() for details
+ if name == "wmi" {
+ name = "windows"
+ }
+ modDef, ok := reg.Lookup(name)
+ if !ok {
+ return nil, nil
+ }
+
+ var modCfg staticConfig
+ if err := yaml.Unmarshal(bs, &modCfg); err != nil {
+ return nil, err
+ }
+
+ for _, cfg := range modCfg.Jobs {
+ cfg.SetModule(name)
+ def := mergeDef(modCfg.Default, modDef)
+ cfg.ApplyDefaults(def)
+ }
+
+ group := &confgroup.Group{
+ Configs: modCfg.Jobs,
+ Source: path,
+ }
+
+ return group, nil
+}
+
+func parseSDFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.Group, error) {
+ var cfgs sdConfig
+ if err := yaml.Unmarshal(bs, &cfgs); err != nil {
+ return nil, err
+ }
+
+ var i int
+ for _, cfg := range cfgs {
+ if def, ok := reg.Lookup(cfg.Module()); ok && cfg.Module() != "" {
+ cfg.ApplyDefaults(def)
+ cfgs[i] = cfg
+ i++
+ }
+ }
+
+ group := &confgroup.Group{
+ Configs: cfgs[:i],
+ Source: path,
+ }
+
+ return group, nil
+}
+
+func cfgFormat(bs []byte) format {
+ var data interface{}
+ if err := yaml.Unmarshal(bs, &data); err != nil {
+ return unknownFormat
+ }
+ if data == nil {
+ return unknownEmptyFormat
+ }
+
+ type (
+ static = map[any]any
+ sd = []any
+ )
+ switch data.(type) {
+ case static:
+ return staticFormat
+ case sd:
+ return sdFormat
+ default:
+ return unknownFormat
+ }
+}
+
+func mergeDef(a, b confgroup.Default) confgroup.Default {
+ return confgroup.Default{
+ MinUpdateEvery: firstPositive(a.MinUpdateEvery, b.MinUpdateEvery),
+ UpdateEvery: firstPositive(a.UpdateEvery, b.UpdateEvery),
+ AutoDetectionRetry: firstPositive(a.AutoDetectionRetry, b.AutoDetectionRetry),
+ Priority: firstPositive(a.Priority, b.Priority),
+ }
+}
+
+func firstPositive(value int, others ...int) int {
+ if value > 0 || len(others) == 0 {
+ return value
+ }
+ return firstPositive(others[0], others[1:]...)
+}
+
+func fileName(path string) string {
+ _, file := filepath.Split(path)
+ ext := filepath.Ext(path)
+ return file[:len(file)-len(ext)]
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/parse_test.go b/src/go/plugin/go.d/agent/discovery/file/parse_test.go
new file mode 100644
index 000000000..5790f5650
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/parse_test.go
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParse(t *testing.T) {
+ const (
+ jobDef = 11
+ cfgDef = 22
+ modDef = 33
+ )
+ tests := map[string]struct {
+ test func(t *testing.T, tmp *tmpDir)
+ }{
+ "static, default: +job +conf +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := staticConfig{
+ Default: confgroup.Default{
+ UpdateEvery: cfgDef,
+ AutoDetectionRetry: cfgDef,
+ Priority: cfgDef,
+ },
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "static, default: +job +conf +module (merge all)": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ Priority: modDef,
+ },
+ }
+ cfg := staticConfig{
+ Default: confgroup.Default{
+ AutoDetectionRetry: cfgDef,
+ },
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ "update_every": jobDef,
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": cfgDef,
+ "priority": modDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "static, default: -job +conf +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := staticConfig{
+ Default: confgroup.Default{
+ UpdateEvery: cfgDef,
+ AutoDetectionRetry: cfgDef,
+ Priority: cfgDef,
+ },
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": cfgDef,
+ "autodetection_retry": cfgDef,
+ "priority": cfgDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "static, default: -job -conf +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := staticConfig{
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "autodetection_retry": modDef,
+ "priority": modDef,
+ "update_every": modDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "static, default: -job -conf -module (+global)": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := staticConfig{
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "update_every": module.UpdateEvery,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "sd, default: +job +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "sd_module",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "module": "sd_module",
+ "name": "name",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "sd, default: -job +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "sd_module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "sd_module",
+ "update_every": modDef,
+ "autodetection_retry": modDef,
+ "priority": modDef,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "sd, default: -job -module (+global)": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "sd_module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "sd_module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
+ },
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "sd, job has no 'module' or 'module' is empty": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{},
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "conf registry has no module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{},
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "empty file": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+
+ filename := tmp.createFile("empty-*")
+ group, err := parse(reg, filename)
+
+ assert.Nil(t, group)
+ require.NoError(t, err)
+ },
+ },
+ "only comments, unknown empty format": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{}
+
+ filename := tmp.createFile("unknown-empty-format-*")
+ tmp.writeString(filename, "# a comment")
+ group, err := parse(reg, filename)
+
+ assert.Nil(t, group)
+ assert.NoError(t, err)
+ },
+ },
+ "unknown format": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{}
+
+ filename := tmp.createFile("unknown-format-*")
+ tmp.writeYAML(filename, "unknown")
+ group, err := parse(reg, filename)
+
+ assert.Nil(t, group)
+ assert.Error(t, err)
+ },
+ },
+ }
+
+ for name, scenario := range tests {
+ t.Run(name, func(t *testing.T) {
+ tmp := newTmpDir(t, "parse-file-*")
+ defer tmp.cleanup()
+
+ scenario.test(t, tmp)
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/read.go b/src/go/plugin/go.d/agent/discovery/file/read.go
new file mode 100644
index 000000000..3e7869ba7
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/read.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+)
+
+type (
+ staticConfig struct {
+ confgroup.Default `yaml:",inline"`
+ Jobs []confgroup.Config `yaml:"jobs"`
+ }
+ sdConfig []confgroup.Config
+)
+
+func NewReader(reg confgroup.Registry, paths []string) *Reader {
+ return &Reader{
+ Logger: log,
+ reg: reg,
+ paths: paths,
+ }
+}
+
+type Reader struct {
+ *logger.Logger
+
+ reg confgroup.Registry
+ paths []string
+}
+
+func (r *Reader) String() string {
+ return r.Name()
+}
+
+func (r *Reader) Name() string {
+ return "file reader"
+}
+
+func (r *Reader) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ r.Info("instance is started")
+ defer func() { r.Info("instance is stopped") }()
+
+ select {
+ case <-ctx.Done():
+ case in <- r.groups():
+ }
+
+ close(in)
+}
+
+func (r *Reader) groups() (groups []*confgroup.Group) {
+ for _, pattern := range r.paths {
+ matches, err := filepath.Glob(pattern)
+ if err != nil {
+ continue
+ }
+
+ for _, path := range matches {
+ if fi, err := os.Stat(path); err != nil || !fi.Mode().IsRegular() {
+ continue
+ }
+
+ group, err := parse(r.reg, path)
+ if err != nil {
+ r.Warningf("parse '%s': %v", path, err)
+ continue
+ }
+
+ if group == nil {
+ group = &confgroup.Group{Source: path}
+ } else {
+ for _, cfg := range group.Configs {
+ cfg.SetProvider("file reader")
+ cfg.SetSourceType(configSourceType(path))
+ cfg.SetSource(fmt.Sprintf("discoverer=file_reader,file=%s", path))
+ }
+ }
+ groups = append(groups, group)
+ }
+ }
+
+ return groups
+}
+
+func configSourceType(path string) string {
+ if strings.Contains(path, "/etc/netdata") {
+ return "user"
+ }
+ return "stock"
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/read_test.go b/src/go/plugin/go.d/agent/discovery/file/read_test.go
new file mode 100644
index 000000000..1bde06c5e
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/read_test.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestReader_String(t *testing.T) {
+ assert.NotEmpty(t, NewReader(confgroup.Registry{}, nil))
+}
+
+func TestNewReader(t *testing.T) {
+ tests := map[string]struct {
+ reg confgroup.Registry
+ paths []string
+ }{
+ "empty inputs": {
+ reg: confgroup.Registry{},
+ paths: []string{},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.NotNil(t, NewReader(test.reg, test.paths))
+ })
+ }
+}
+
+func TestReader_Run(t *testing.T) {
+ tests := map[string]struct {
+ createSim func(tmp *tmpDir) discoverySim
+ }{
+ "read multiple files": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ module1 := tmp.join("module1.conf")
+ module2 := tmp.join("module2.conf")
+ module3 := tmp.join("module3.conf")
+
+ tmp.writeYAML(module1, staticConfig{
+ Jobs: []confgroup.Config{{"name": "name"}},
+ })
+ tmp.writeYAML(module2, staticConfig{
+ Jobs: []confgroup.Config{{"name": "name"}},
+ })
+ tmp.writeString(module3, "# a comment")
+
+ reg := confgroup.Registry{
+ "module1": {},
+ "module2": {},
+ "module3": {},
+ }
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Read: []string{module1, module2, module3},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: module1,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module1",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file reader",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_reader,file=%s", module1),
+ },
+ },
+ },
+ {
+ Source: module2,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module2",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file reader",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_reader,file=%s", module2),
+ },
+ },
+ },
+ {
+ Source: module3,
+ },
+ }
+
+ return discoverySim{
+ discovery: discovery,
+ expectedGroups: expected,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tmp := newTmpDir(t, "reader-run-*")
+ defer tmp.cleanup()
+
+ test.createSim(tmp).run(t)
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/sim_test.go b/src/go/plugin/go.d/agent/discovery/file/sim_test.go
new file mode 100644
index 000000000..3219c6892
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/sim_test.go
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "context"
+ "os"
+ "path/filepath"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+type (
+ discoverySim struct {
+ discovery *Discovery
+ beforeRun func()
+ afterRun func()
+ expectedGroups []*confgroup.Group
+ }
+)
+
+func (sim discoverySim) run(t *testing.T) {
+ t.Helper()
+ require.NotNil(t, sim.discovery)
+
+ if sim.beforeRun != nil {
+ sim.beforeRun()
+ }
+
+ in, out := make(chan []*confgroup.Group), make(chan []*confgroup.Group)
+ go sim.collectGroups(t, in, out)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+ defer cancel()
+ go sim.discovery.Run(ctx, in)
+ time.Sleep(time.Millisecond * 250)
+
+ if sim.afterRun != nil {
+ sim.afterRun()
+ }
+
+ actual := <-out
+
+ sortGroups(actual)
+ sortGroups(sim.expectedGroups)
+
+ assert.Equal(t, sim.expectedGroups, actual)
+}
+
+func (sim discoverySim) collectGroups(t *testing.T, in, out chan []*confgroup.Group) {
+ timeout := time.Second * 5
+ var groups []*confgroup.Group
+loop:
+ for {
+ select {
+ case updates := <-in:
+ if groups = append(groups, updates...); len(groups) >= len(sim.expectedGroups) {
+ break loop
+ }
+ case <-time.After(timeout):
+ t.Logf("discovery %s timed out after %s, got %d groups, expected %d, some events are skipped",
+ sim.discovery.discoverers, timeout, len(groups), len(sim.expectedGroups))
+ break loop
+ }
+ }
+ out <- groups
+}
+
+type tmpDir struct {
+ dir string
+ t *testing.T
+}
+
+func newTmpDir(t *testing.T, pattern string) *tmpDir {
+ pattern = "netdata-go-test-discovery-file-" + pattern
+ dir, err := os.MkdirTemp(os.TempDir(), pattern)
+ require.NoError(t, err)
+ return &tmpDir{dir: dir, t: t}
+}
+
+func (d *tmpDir) cleanup() {
+ assert.NoError(d.t, os.RemoveAll(d.dir))
+}
+
+func (d *tmpDir) join(filename string) string {
+ return filepath.Join(d.dir, filename)
+}
+
+func (d *tmpDir) createFile(pattern string) string {
+ f, err := os.CreateTemp(d.dir, pattern)
+ require.NoError(d.t, err)
+ _ = f.Close()
+ return f.Name()
+}
+
+func (d *tmpDir) removeFile(filename string) {
+ err := os.Remove(filename)
+ require.NoError(d.t, err)
+}
+
+func (d *tmpDir) renameFile(origFilename, newFilename string) {
+ err := os.Rename(origFilename, newFilename)
+ require.NoError(d.t, err)
+}
+
+func (d *tmpDir) writeYAML(filename string, in interface{}) {
+ bs, err := yaml.Marshal(in)
+ require.NoError(d.t, err)
+ err = os.WriteFile(filename, bs, 0644)
+ require.NoError(d.t, err)
+}
+
+func (d *tmpDir) writeString(filename, data string) {
+ err := os.WriteFile(filename, []byte(data), 0644)
+ require.NoError(d.t, err)
+}
+
+func sortGroups(groups []*confgroup.Group) {
+ if len(groups) == 0 {
+ return
+ }
+ sort.Slice(groups, func(i, j int) bool { return groups[i].Source < groups[j].Source })
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/watch.go b/src/go/plugin/go.d/agent/discovery/file/watch.go
new file mode 100644
index 000000000..7adefd261
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/watch.go
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+type (
+ Watcher struct {
+ *logger.Logger
+
+ paths []string
+ reg confgroup.Registry
+ watcher *fsnotify.Watcher
+ cache cache
+ refreshEvery time.Duration
+ }
+ cache map[string]time.Time
+)
+
+func (c cache) lookup(path string) (time.Time, bool) { v, ok := c[path]; return v, ok }
+func (c cache) has(path string) bool { _, ok := c.lookup(path); return ok }
+func (c cache) remove(path string) { delete(c, path) }
+func (c cache) put(path string, modTime time.Time) { c[path] = modTime }
+
+func NewWatcher(reg confgroup.Registry, paths []string) *Watcher {
+ d := &Watcher{
+ Logger: log,
+ paths: paths,
+ reg: reg,
+ watcher: nil,
+ cache: make(cache),
+ refreshEvery: time.Minute,
+ }
+ return d
+}
+
+func (w *Watcher) String() string {
+ return w.Name()
+}
+
+func (w *Watcher) Name() string {
+ return "file watcher"
+}
+
+func (w *Watcher) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ w.Info("instance is started")
+ defer func() { w.Info("instance is stopped") }()
+
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ w.Errorf("fsnotify watcher initialization: %v", err)
+ return
+ }
+
+ w.watcher = watcher
+ defer w.stop()
+ w.refresh(ctx, in)
+
+ tk := time.NewTicker(w.refreshEvery)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tk.C:
+ w.refresh(ctx, in)
+ case event := <-w.watcher.Events:
+ // TODO: check if event.Has will do
+ if event.Name == "" || isChmodOnly(event) || !w.fileMatches(event.Name) {
+ break
+ }
+ if event.Has(fsnotify.Create) && w.cache.has(event.Name) {
+ // vim "backupcopy=no" case, already collected after Rename event.
+ break
+ }
+ if event.Has(fsnotify.Rename) {
+ // It is common to modify files using vim.
+ // When writing to a file a backup is made. "backupcopy" option tells how it's done.
+ // Default is "no": rename the file and write a new one.
+ // This is cheap attempt to not send empty group for the old file.
+ time.Sleep(time.Millisecond * 100)
+ }
+ w.refresh(ctx, in)
+ case err := <-w.watcher.Errors:
+ if err != nil {
+ w.Warningf("watch: %v", err)
+ }
+ }
+ }
+}
+
+func (w *Watcher) fileMatches(file string) bool {
+ for _, pattern := range w.paths {
+ if ok, _ := filepath.Match(pattern, file); ok {
+ return true
+ }
+ }
+ return false
+}
+
+func (w *Watcher) listFiles() (files []string) {
+ for _, pattern := range w.paths {
+ if matches, err := filepath.Glob(pattern); err == nil {
+ files = append(files, matches...)
+ }
+ }
+ return files
+}
+
+func (w *Watcher) refresh(ctx context.Context, in chan<- []*confgroup.Group) {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ }
+ var groups []*confgroup.Group
+ seen := make(map[string]bool)
+
+ for _, file := range w.listFiles() {
+ fi, err := os.Lstat(file)
+ if err != nil {
+ w.Warningf("lstat '%s': %v", file, err)
+ continue
+ }
+
+ if !fi.Mode().IsRegular() {
+ continue
+ }
+
+ seen[file] = true
+ if v, ok := w.cache.lookup(file); ok && v.Equal(fi.ModTime()) {
+ continue
+ }
+ w.cache.put(file, fi.ModTime())
+
+ if group, err := parse(w.reg, file); err != nil {
+ w.Warningf("parse '%s': %v", file, err)
+ } else if group == nil {
+ groups = append(groups, &confgroup.Group{Source: file})
+ } else {
+ for _, cfg := range group.Configs {
+ cfg.SetProvider("file watcher")
+ cfg.SetSourceType(configSourceType(file))
+ cfg.SetSource(fmt.Sprintf("discoverer=file_watcher,file=%s", file))
+ }
+ groups = append(groups, group)
+ }
+ }
+
+ for name := range w.cache {
+ if seen[name] {
+ continue
+ }
+ w.cache.remove(name)
+ groups = append(groups, &confgroup.Group{Source: name})
+ }
+
+ send(ctx, in, groups)
+
+ w.watchDirs()
+}
+
+func (w *Watcher) watchDirs() {
+ for _, path := range w.paths {
+ if idx := strings.LastIndex(path, "/"); idx > -1 {
+ path = path[:idx]
+ } else {
+ path = "./"
+ }
+ if err := w.watcher.Add(path); err != nil {
+ w.Errorf("start watching '%s': %v", path, err)
+ }
+ }
+}
+
+func (w *Watcher) stop() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // closing the watcher deadlocks unless all events and errors are drained.
+ go func() {
+ for {
+ select {
+ case <-w.watcher.Errors:
+ case <-w.watcher.Events:
+ case <-ctx.Done():
+ return
+ }
+ }
+ }()
+
+ _ = w.watcher.Close()
+}
+
+func isChmodOnly(event fsnotify.Event) bool {
+ return event.Op^fsnotify.Chmod == 0
+}
+
+func send(ctx context.Context, in chan<- []*confgroup.Group, groups []*confgroup.Group) {
+ if len(groups) == 0 {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ case in <- groups:
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/file/watch_test.go b/src/go/plugin/go.d/agent/discovery/file/watch_test.go
new file mode 100644
index 000000000..f29b5d579
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/file/watch_test.go
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package file
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWatcher_String(t *testing.T) {
+ assert.NotEmpty(t, NewWatcher(confgroup.Registry{}, nil))
+}
+
+func TestNewWatcher(t *testing.T) {
+ tests := map[string]struct {
+ reg confgroup.Registry
+ paths []string
+ }{
+ "empty inputs": {
+ reg: confgroup.Registry{},
+ paths: []string{},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.NotNil(t, NewWatcher(test.reg, test.paths))
+ })
+ }
+}
+
+func TestWatcher_Run(t *testing.T) {
+ tests := map[string]struct {
+ createSim func(tmp *tmpDir) discoverySim
+ }{
+ "file exists before start": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
+ },
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ },
+ "empty file": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeString(filename, "")
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ },
+ "only comments, no data": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeString(filename, "# a comment")
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ },
+ "add file": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
+ },
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ afterRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ },
+ "remove file": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
+ },
+ },
+ {
+ Source: filename,
+ Configs: nil,
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ afterRun: func() {
+ tmp.removeFile(filename)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ },
+ "change file": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfgOrig := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ cfgChanged := sdConfig{
+ {
+ "name": "name_changed",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
+ },
+ },
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name_changed",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
+ },
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfgOrig)
+ },
+ afterRun: func() {
+ tmp.writeYAML(filename, cfgChanged)
+ time.Sleep(time.Millisecond * 500)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ },
+ "vim 'backupcopy=no' (writing to a file and backup)": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
+ },
+ },
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": "stock",
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
+ },
+ },
+ }
+
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ afterRun: func() {
+ newFilename := filename + ".swp"
+ tmp.renameFile(filename, newFilename)
+ tmp.writeYAML(filename, cfg)
+ tmp.removeFile(newFilename)
+ time.Sleep(time.Millisecond * 500)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tmp := newTmpDir(t, "watch-run-*")
+ defer tmp.cleanup()
+
+ test.createSim(tmp).run(t)
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/manager.go b/src/go/plugin/go.d/agent/discovery/manager.go
new file mode 100644
index 000000000..646616023
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/manager.go
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discovery
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/dummy"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/file"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd"
+)
+
+func NewManager(cfg Config) (*Manager, error) {
+ if err := validateConfig(cfg); err != nil {
+ return nil, fmt.Errorf("discovery manager config validation: %v", err)
+ }
+
+ mgr := &Manager{
+ Logger: logger.New().With(
+ slog.String("component", "discovery manager"),
+ ),
+ send: make(chan struct{}, 1),
+ sendEvery: time.Second * 2, // timeout to aggregate changes
+ discoverers: make([]discoverer, 0),
+ mux: &sync.RWMutex{},
+ cache: newCache(),
+ }
+
+ if err := mgr.registerDiscoverers(cfg); err != nil {
+ return nil, fmt.Errorf("discovery manager initializaion: %v", err)
+ }
+
+ return mgr, nil
+}
+
+type discoverer interface {
+ Run(ctx context.Context, in chan<- []*confgroup.Group)
+}
+
+type Manager struct {
+ *logger.Logger
+ discoverers []discoverer
+ send chan struct{}
+ sendEvery time.Duration
+ mux *sync.RWMutex
+ cache *cache
+}
+
+func (m *Manager) String() string {
+ return fmt.Sprintf("discovery manager: %v", m.discoverers)
+}
+
+func (m *Manager) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ m.Info("instance is started")
+ defer func() { m.Info("instance is stopped") }()
+
+ var wg sync.WaitGroup
+
+ for _, d := range m.discoverers {
+ wg.Add(1)
+ go func(d discoverer) {
+ defer wg.Done()
+ m.runDiscoverer(ctx, d)
+ }(d)
+ }
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ m.sendLoop(ctx, in)
+ }()
+
+ wg.Wait()
+ <-ctx.Done()
+}
+
+func (m *Manager) registerDiscoverers(cfg Config) error {
+ if len(cfg.File.Read) > 0 || len(cfg.File.Watch) > 0 {
+ cfg.File.Registry = cfg.Registry
+ d, err := file.NewDiscovery(cfg.File)
+ if err != nil {
+ return err
+ }
+ m.discoverers = append(m.discoverers, d)
+ }
+
+ if len(cfg.Dummy.Names) > 0 {
+ cfg.Dummy.Registry = cfg.Registry
+ d, err := dummy.NewDiscovery(cfg.Dummy)
+ if err != nil {
+ return err
+ }
+ m.discoverers = append(m.discoverers, d)
+ }
+
+ if len(cfg.SD.ConfDir) != 0 {
+ cfg.SD.ConfigDefaults = cfg.Registry
+ d, err := sd.NewServiceDiscovery(cfg.SD)
+ if err != nil {
+ return err
+ }
+ m.discoverers = append(m.discoverers, d)
+ }
+
+ if len(m.discoverers) == 0 {
+ return errors.New("zero registered discoverers")
+ }
+
+ m.Infof("registered discoverers: %v", m.discoverers)
+
+ return nil
+}
+
+func (m *Manager) runDiscoverer(ctx context.Context, d discoverer) {
+ updates := make(chan []*confgroup.Group)
+ go d.Run(ctx, updates)
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case groups, ok := <-updates:
+ if !ok {
+ return
+ }
+ func() {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ m.cache.update(groups)
+ m.triggerSend()
+ }()
+ }
+ }
+}
+
+func (m *Manager) sendLoop(ctx context.Context, in chan<- []*confgroup.Group) {
+ m.mustSend(ctx, in)
+
+ tk := time.NewTicker(m.sendEvery)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tk.C:
+ select {
+ case <-m.send:
+ m.trySend(in)
+ default:
+ }
+ }
+ }
+}
+
+func (m *Manager) mustSend(ctx context.Context, in chan<- []*confgroup.Group) {
+ select {
+ case <-ctx.Done():
+ return
+ case <-m.send:
+ m.mux.Lock()
+ groups := m.cache.groups()
+ m.cache.reset()
+ m.mux.Unlock()
+
+ select {
+ case <-ctx.Done():
+ case in <- groups:
+ }
+ return
+ }
+}
+
+func (m *Manager) trySend(in chan<- []*confgroup.Group) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ select {
+ case in <- m.cache.groups():
+ m.cache.reset()
+ default:
+ m.triggerSend()
+ }
+}
+
+func (m *Manager) triggerSend() {
+ select {
+ case m.send <- struct{}{}:
+ default:
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/manager_test.go b/src/go/plugin/go.d/agent/discovery/manager_test.go
new file mode 100644
index 000000000..5861b0902
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/manager_test.go
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discovery
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/file"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewManager(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ wantErr bool
+ }{
+ "valid config": {
+ cfg: Config{
+ Registry: confgroup.Registry{"module1": confgroup.Default{}},
+ File: file.Config{Read: []string{"path"}},
+ },
+ },
+ "invalid config, registry not set": {
+ cfg: Config{
+ File: file.Config{Read: []string{"path"}},
+ },
+ wantErr: true,
+ },
+ "invalid config, discoverers not set": {
+ cfg: Config{
+ Registry: confgroup.Registry{"module1": confgroup.Default{}},
+ },
+ wantErr: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mgr, err := NewManager(test.cfg)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.NotNil(t, mgr)
+ }
+ })
+ }
+}
+
+func TestManager_Run(t *testing.T) {
+ tests := map[string]func() discoverySim{
+ "several discoverers, unique groups with delayed collect": func() discoverySim {
+ const numGroups, numCfgs = 2, 2
+ d1 := prepareMockDiscoverer("test1", numGroups, numCfgs)
+ d2 := prepareMockDiscoverer("test2", numGroups, numCfgs)
+ mgr := prepareManager(d1, d2)
+ expected := combineGroups(d1.groups, d2.groups)
+
+ sim := discoverySim{
+ mgr: mgr,
+ collectDelay: mgr.sendEvery + time.Second,
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "several discoverers, unique groups": func() discoverySim {
+ const numGroups, numCfgs = 2, 2
+ d1 := prepareMockDiscoverer("test1", numGroups, numCfgs)
+ d2 := prepareMockDiscoverer("test2", numGroups, numCfgs)
+ mgr := prepareManager(d1, d2)
+ expected := combineGroups(d1.groups, d2.groups)
+ sim := discoverySim{
+ mgr: mgr,
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "several discoverers, same groups": func() discoverySim {
+ const numGroups, numTargets = 2, 2
+ d1 := prepareMockDiscoverer("test1", numGroups, numTargets)
+ mgr := prepareManager(d1, d1)
+ expected := combineGroups(d1.groups)
+
+ sim := discoverySim{
+ mgr: mgr,
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "several discoverers, empty groups": func() discoverySim {
+ const numGroups, numCfgs = 1, 0
+ d1 := prepareMockDiscoverer("test1", numGroups, numCfgs)
+ d2 := prepareMockDiscoverer("test2", numGroups, numCfgs)
+ mgr := prepareManager(d1, d2)
+ expected := combineGroups(d1.groups, d2.groups)
+
+ sim := discoverySim{
+ mgr: mgr,
+ expectedGroups: expected,
+ }
+ return sim
+ },
+ "several discoverers, nil groups": func() discoverySim {
+ const numGroups, numCfgs = 0, 0
+ d1 := prepareMockDiscoverer("test1", numGroups, numCfgs)
+ d2 := prepareMockDiscoverer("test2", numGroups, numCfgs)
+ mgr := prepareManager(d1, d2)
+
+ sim := discoverySim{
+ mgr: mgr,
+ expectedGroups: nil,
+ }
+ return sim
+ },
+ }
+
+ for name, sim := range tests {
+ t.Run(name, func(t *testing.T) { sim().run(t) })
+ }
+}
+
+func prepareMockDiscoverer(source string, groups, configs int) mockDiscoverer {
+ d := mockDiscoverer{}
+
+ for i := 0; i < groups; i++ {
+ group := confgroup.Group{
+ Source: fmt.Sprintf("%s_group_%d", source, i+1),
+ }
+ for j := 0; j < configs; j++ {
+ group.Configs = append(group.Configs,
+ confgroup.Config{"name": fmt.Sprintf("%s_group_%d_target_%d", source, i+1, j+1)})
+ }
+ d.groups = append(d.groups, &group)
+ }
+ return d
+}
+
+func prepareManager(discoverers ...discoverer) *Manager {
+ mgr := &Manager{
+ send: make(chan struct{}, 1),
+ sendEvery: 2 * time.Second,
+ discoverers: discoverers,
+ cache: newCache(),
+ mux: &sync.RWMutex{},
+ }
+ return mgr
+}
+
+type mockDiscoverer struct {
+ groups []*confgroup.Group
+}
+
+func (md mockDiscoverer) Run(ctx context.Context, out chan<- []*confgroup.Group) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case out <- md.groups:
+ return
+ }
+ }
+}
+
+func combineGroups(groups ...[]*confgroup.Group) (combined []*confgroup.Group) {
+ for _, set := range groups {
+ combined = append(combined, set...)
+ }
+ return combined
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/conffile.go b/src/go/plugin/go.d/agent/discovery/sd/conffile.go
new file mode 100644
index 000000000..e08a4021b
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/conffile.go
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sd
+
+import (
+ "context"
+ "os"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/multipath"
+)
+
+type confFile struct {
+ source string
+ content []byte
+}
+
+func newConfFileReader(log *logger.Logger, dir multipath.MultiPath) *confFileReader {
+ return &confFileReader{
+ Logger: log,
+ confDir: dir,
+ confChan: make(chan confFile),
+ }
+}
+
+type confFileReader struct {
+ *logger.Logger
+
+ confDir multipath.MultiPath
+ confChan chan confFile
+}
+
+func (c *confFileReader) run(ctx context.Context) {
+ files, err := c.confDir.FindFiles(".conf")
+ if err != nil {
+ c.Error(err)
+ return
+ }
+
+ if len(files) == 0 {
+ return
+ }
+
+ var confFiles []confFile
+
+ for _, file := range files {
+ bs, err := os.ReadFile(file)
+ if err != nil {
+ c.Error(err)
+ continue
+ }
+ confFiles = append(confFiles, confFile{
+ source: file,
+ content: bs,
+ })
+ }
+
+ for _, conf := range confFiles {
+ select {
+ case <-ctx.Done():
+ case c.confChan <- conf:
+ }
+ }
+
+}
+
+func (c *confFileReader) configs() chan confFile {
+ return c.confChan
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/docker.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/docker.go
new file mode 100644
index 000000000..1cea014a9
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/docker.go
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerd
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/dockerhost"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/docker/docker/api/types"
+ typesContainer "github.com/docker/docker/api/types/container"
+ docker "github.com/docker/docker/client"
+ "github.com/ilyam8/hashstructure"
+)
+
+func NewDiscoverer(cfg Config) (*Discoverer, error) {
+ tags, err := model.ParseTags(cfg.Tags)
+ if err != nil {
+ return nil, fmt.Errorf("parse tags: %v", err)
+ }
+
+ d := &Discoverer{
+ Logger: logger.New().With(
+ slog.String("component", "service discovery"),
+ slog.String("discoverer", "docker"),
+ ),
+ cfgSource: cfg.Source,
+ newDockerClient: func(addr string) (dockerClient, error) {
+ return docker.NewClientWithOpts(docker.WithHost(addr))
+ },
+ addr: docker.DefaultDockerHost,
+ listInterval: time.Second * 60,
+ timeout: time.Second * 2,
+ seenTggSources: make(map[string]bool),
+ started: make(chan struct{}),
+ }
+
+ if addr := dockerhost.FromEnv(); addr != "" && d.addr == docker.DefaultDockerHost {
+ d.Infof("using docker host from environment: %s ", addr)
+ d.addr = addr
+ }
+
+ d.Tags().Merge(tags)
+
+ if cfg.Timeout.Duration().Seconds() != 0 {
+ d.timeout = cfg.Timeout.Duration()
+ }
+ if cfg.Address != "" {
+ d.addr = cfg.Address
+ }
+
+ return d, nil
+}
+
+type Config struct {
+ Source string
+
+ Tags string `yaml:"tags"`
+ Address string `yaml:"address"`
+ Timeout web.Duration `yaml:"timeout"`
+}
+
+type (
+ Discoverer struct {
+ *logger.Logger
+ model.Base
+
+ dockerClient dockerClient
+ newDockerClient func(addr string) (dockerClient, error)
+ addr string
+
+ cfgSource string
+
+ listInterval time.Duration
+ timeout time.Duration
+ seenTggSources map[string]bool // [targetGroup.Source]
+
+ started chan struct{}
+ }
+ dockerClient interface {
+ NegotiateAPIVersion(context.Context)
+ ContainerList(context.Context, typesContainer.ListOptions) ([]types.Container, error)
+ Close() error
+ }
+)
+
+func (d *Discoverer) String() string {
+ return "sd:docker"
+}
+
+func (d *Discoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) {
+ d.Info("instance is started")
+ defer func() { d.cleanup(); d.Info("instance is stopped") }()
+
+ close(d.started)
+
+ if d.dockerClient == nil {
+ client, err := d.newDockerClient(d.addr)
+ if err != nil {
+ d.Errorf("error on creating docker client: %v", err)
+ return
+ }
+ d.dockerClient = client
+ }
+
+ d.dockerClient.NegotiateAPIVersion(ctx)
+
+ if err := d.listContainers(ctx, in); err != nil {
+ d.Error(err)
+ return
+ }
+
+ tk := time.NewTicker(d.listInterval)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tk.C:
+ if err := d.listContainers(ctx, in); err != nil {
+ d.Warning(err)
+ }
+ }
+ }
+}
+
+func (d *Discoverer) listContainers(ctx context.Context, in chan<- []model.TargetGroup) error {
+ listCtx, cancel := context.WithTimeout(ctx, d.timeout)
+ defer cancel()
+
+ containers, err := d.dockerClient.ContainerList(listCtx, typesContainer.ListOptions{})
+ if err != nil {
+ return err
+ }
+
+ var tggs []model.TargetGroup
+ seen := make(map[string]bool)
+
+ for _, cntr := range containers {
+ if tgg := d.buildTargetGroup(cntr); tgg != nil {
+ tggs = append(tggs, tgg)
+ seen[tgg.Source()] = true
+ }
+ }
+
+ for src := range d.seenTggSources {
+ if !seen[src] {
+ tggs = append(tggs, &targetGroup{source: src})
+ }
+ }
+ d.seenTggSources = seen
+
+ select {
+ case <-ctx.Done():
+ case in <- tggs:
+ }
+
+ return nil
+}
+
+func (d *Discoverer) buildTargetGroup(cntr types.Container) model.TargetGroup {
+ if len(cntr.Names) == 0 || cntr.NetworkSettings == nil || len(cntr.NetworkSettings.Networks) == 0 {
+ return nil
+ }
+
+ tgg := &targetGroup{
+ source: cntrSource(cntr),
+ }
+ if d.cfgSource != "" {
+ tgg.source += fmt.Sprintf(",%s", d.cfgSource)
+ }
+
+ for netDriver, network := range cntr.NetworkSettings.Networks {
+ // container with network mode host will be discovered by local-listeners
+ for _, port := range cntr.Ports {
+ tgt := &target{
+ ID: cntr.ID,
+ Name: strings.TrimPrefix(cntr.Names[0], "/"),
+ Image: cntr.Image,
+ Command: cntr.Command,
+ Labels: mapAny(cntr.Labels),
+ PrivatePort: strconv.Itoa(int(port.PrivatePort)),
+ PublicPort: strconv.Itoa(int(port.PublicPort)),
+ PublicPortIP: port.IP,
+ PortProtocol: port.Type,
+ NetworkMode: cntr.HostConfig.NetworkMode,
+ NetworkDriver: netDriver,
+ IPAddress: network.IPAddress,
+ }
+ tgt.Address = net.JoinHostPort(tgt.IPAddress, tgt.PrivatePort)
+
+ hash, err := calcHash(tgt)
+ if err != nil {
+ continue
+ }
+
+ tgt.hash = hash
+ tgt.Tags().Merge(d.Tags())
+
+ tgg.targets = append(tgg.targets, tgt)
+ }
+ }
+
+ return tgg
+}
+
+func (d *Discoverer) cleanup() {
+ if d.dockerClient != nil {
+ _ = d.dockerClient.Close()
+ }
+}
+
+func cntrSource(cntr types.Container) string {
+ name := strings.TrimPrefix(cntr.Names[0], "/")
+ return fmt.Sprintf("discoverer=docker,container=%s,image=%s", name, cntr.Image)
+}
+
+func calcHash(obj any) (uint64, error) {
+ return hashstructure.Hash(obj, nil)
+}
+
+func mapAny(src map[string]string) map[string]any {
+ if src == nil {
+ return nil
+ }
+ m := make(map[string]any, len(src))
+ for k, v := range src {
+ m[k] = v
+ }
+ return m
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/dockerd_test.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/dockerd_test.go
new file mode 100644
index 000000000..630afb0f5
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/dockerd_test.go
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerd
+
+import (
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/docker/docker/api/types"
+ typesNetwork "github.com/docker/docker/api/types/network"
+)
+
+func TestDiscoverer_Discover(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *discoverySim
+ }{
+ "add containers": {
+ createSim: func() *discoverySim {
+ nginx1 := prepareNginxContainer("nginx1")
+ nginx2 := prepareNginxContainer("nginx2")
+
+ sim := &discoverySim{
+ dockerCli: func(cli dockerCli, _ time.Duration) {
+ cli.addContainer(nginx1)
+ cli.addContainer(nginx2)
+ },
+ wantGroups: []model.TargetGroup{
+ &targetGroup{
+ source: cntrSource(nginx1),
+ targets: []model.Target{
+ withHash(&target{
+ ID: nginx1.ID,
+ Name: nginx1.Names[0][1:],
+ Image: nginx1.Image,
+ Command: nginx1.Command,
+ Labels: mapAny(nginx1.Labels),
+ PrivatePort: "80",
+ PublicPort: "8080",
+ PublicPortIP: "0.0.0.0",
+ PortProtocol: "tcp",
+ NetworkMode: "default",
+ NetworkDriver: "bridge",
+ IPAddress: "192.0.2.0",
+ Address: "192.0.2.0:80",
+ }),
+ },
+ },
+ &targetGroup{
+ source: cntrSource(nginx2),
+ targets: []model.Target{
+ withHash(&target{
+ ID: nginx2.ID,
+ Name: nginx2.Names[0][1:],
+ Image: nginx2.Image,
+ Command: nginx2.Command,
+ Labels: mapAny(nginx2.Labels),
+ PrivatePort: "80",
+ PublicPort: "8080",
+ PublicPortIP: "0.0.0.0",
+ PortProtocol: "tcp",
+ NetworkMode: "default",
+ NetworkDriver: "bridge",
+ IPAddress: "192.0.2.0",
+ Address: "192.0.2.0:80",
+ }),
+ },
+ },
+ },
+ }
+ return sim
+ },
+ },
+ "remove containers": {
+ createSim: func() *discoverySim {
+ nginx1 := prepareNginxContainer("nginx1")
+ nginx2 := prepareNginxContainer("nginx2")
+
+ sim := &discoverySim{
+ dockerCli: func(cli dockerCli, interval time.Duration) {
+ cli.addContainer(nginx1)
+ cli.addContainer(nginx2)
+ time.Sleep(interval * 2)
+ cli.removeContainer(nginx1.ID)
+ },
+ wantGroups: []model.TargetGroup{
+ &targetGroup{
+ source: cntrSource(nginx1),
+ targets: nil,
+ },
+ &targetGroup{
+ source: cntrSource(nginx2),
+ targets: []model.Target{
+ withHash(&target{
+ ID: nginx2.ID,
+ Name: nginx2.Names[0][1:],
+ Image: nginx2.Image,
+ Command: nginx2.Command,
+ Labels: mapAny(nginx2.Labels),
+ PrivatePort: "80",
+ PublicPort: "8080",
+ PublicPortIP: "0.0.0.0",
+ PortProtocol: "tcp",
+ NetworkMode: "default",
+ NetworkDriver: "bridge",
+ IPAddress: "192.0.2.0",
+ Address: "192.0.2.0:80",
+ }),
+ },
+ },
+ },
+ }
+ return sim
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func prepareNginxContainer(name string) types.Container {
+ return types.Container{
+ ID: "id-" + name,
+ Names: []string{"/" + name},
+ Image: "nginx-image",
+ ImageID: "nginx-image-id",
+ Command: "nginx-command",
+ Ports: []types.Port{
+ {
+ IP: "0.0.0.0",
+ PrivatePort: 80,
+ PublicPort: 8080,
+ Type: "tcp",
+ },
+ },
+ Labels: map[string]string{"key1": "value1"},
+ HostConfig: struct {
+ NetworkMode string `json:",omitempty"`
+ Annotations map[string]string `json:",omitempty"`
+ }{
+ NetworkMode: "default",
+ },
+ NetworkSettings: &types.SummaryNetworkSettings{
+ Networks: map[string]*typesNetwork.EndpointSettings{
+ "bridge": {IPAddress: "192.0.2.0"},
+ },
+ },
+ }
+}
+
+func withHash(tgt *target) *target {
+ tgt.hash, _ = calcHash(tgt)
+ tags, _ := model.ParseTags("docker")
+ tgt.Tags().Merge(tags)
+ return tgt
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/sim_test.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/sim_test.go
new file mode 100644
index 000000000..fcdbeb894
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/sim_test.go
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerd
+
+import (
+ "context"
+ "sort"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/docker/docker/api/types"
+ typesContainer "github.com/docker/docker/api/types/container"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type dockerCli interface {
+ addContainer(cntr types.Container)
+ removeContainer(id string)
+}
+
+type discoverySim struct {
+ dockerCli func(cli dockerCli, interval time.Duration)
+ wantGroups []model.TargetGroup
+}
+
+func (sim *discoverySim) run(t *testing.T) {
+ d, err := NewDiscoverer(Config{
+ Source: "",
+ Tags: "docker",
+ })
+ require.NoError(t, err)
+
+ mock := newMockDockerd()
+
+ d.newDockerClient = func(addr string) (dockerClient, error) {
+ return mock, nil
+ }
+ d.listInterval = time.Millisecond * 100
+
+ seen := make(map[string]model.TargetGroup)
+ ctx, cancel := context.WithCancel(context.Background())
+ in := make(chan []model.TargetGroup)
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ d.Discover(ctx, in)
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case tggs := <-in:
+ for _, tgg := range tggs {
+ seen[tgg.Source()] = tgg
+ }
+ }
+ }
+ }()
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ wg.Wait()
+ }()
+
+ select {
+ case <-d.started:
+ case <-time.After(time.Second * 3):
+ require.Fail(t, "discovery failed to start")
+ }
+
+ sim.dockerCli(mock, d.listInterval)
+ time.Sleep(time.Second)
+
+ cancel()
+
+ select {
+ case <-done:
+ case <-time.After(time.Second * 3):
+ require.Fail(t, "discovery hasn't finished after cancel")
+ }
+
+ var tggs []model.TargetGroup
+ for _, tgg := range seen {
+ tggs = append(tggs, tgg)
+ }
+
+ sortTargetGroups(tggs)
+ sortTargetGroups(sim.wantGroups)
+
+ wantLen, gotLen := len(sim.wantGroups), len(tggs)
+ assert.Equalf(t, wantLen, gotLen, "different len (want %d got %d)", wantLen, gotLen)
+ assert.Equal(t, sim.wantGroups, tggs)
+
+ assert.True(t, mock.negApiVerCalled, "NegotiateAPIVersion called")
+ assert.True(t, mock.closeCalled, "Close called")
+}
+
+func newMockDockerd() *mockDockerd {
+ return &mockDockerd{
+ containers: make(map[string]types.Container),
+ }
+}
+
+type mockDockerd struct {
+ negApiVerCalled bool
+ closeCalled bool
+ mux sync.Mutex
+ containers map[string]types.Container
+}
+
+func (m *mockDockerd) addContainer(cntr types.Container) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ m.containers[cntr.ID] = cntr
+}
+
+func (m *mockDockerd) removeContainer(id string) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ delete(m.containers, id)
+}
+
+func (m *mockDockerd) ContainerList(_ context.Context, _ typesContainer.ListOptions) ([]types.Container, error) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ var cntrs []types.Container
+ for _, cntr := range m.containers {
+ cntrs = append(cntrs, cntr)
+ }
+
+ return cntrs, nil
+}
+
+func (m *mockDockerd) NegotiateAPIVersion(_ context.Context) {
+ m.negApiVerCalled = true
+}
+
+func (m *mockDockerd) Close() error {
+ m.closeCalled = true
+ return nil
+}
+
+func sortTargetGroups(tggs []model.TargetGroup) {
+ if len(tggs) == 0 {
+ return
+ }
+ sort.Slice(tggs, func(i, j int) bool { return tggs[i].Source() < tggs[j].Source() })
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/target.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/target.go
new file mode 100644
index 000000000..2cf0575b5
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/dockerd/target.go
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerd
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+)
+
+type targetGroup struct {
+ source string
+ targets []model.Target
+}
+
+func (g *targetGroup) Provider() string { return "sd:docker" }
+func (g *targetGroup) Source() string { return g.source }
+func (g *targetGroup) Targets() []model.Target { return g.targets }
+
+type target struct {
+ model.Base
+
+ hash uint64
+
+ ID string
+ Name string
+ Image string
+ Command string
+ Labels map[string]any
+ PrivatePort string // Port on the container
+ PublicPort string // Port exposed on the host
+ PublicPortIP string // Host IP address that the container's port is mapped to
+ PortProtocol string
+ NetworkMode string
+ NetworkDriver string
+ IPAddress string
+
+ Address string // "IPAddress:PrivatePort"
+}
+
+func (t *target) TUID() string {
+ if t.PublicPort != "" {
+ return fmt.Sprintf("%s_%s_%s_%s_%s_%s",
+ t.Name, t.IPAddress, t.PublicPortIP, t.PortProtocol, t.PublicPort, t.PrivatePort)
+ }
+ if t.PrivatePort != "" {
+ return fmt.Sprintf("%s_%s_%s_%s",
+ t.Name, t.IPAddress, t.PortProtocol, t.PrivatePort)
+ }
+ return fmt.Sprintf("%s_%s", t.Name, t.IPAddress)
+}
+
+func (t *target) Hash() uint64 {
+ return t.hash
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/config.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/config.go
new file mode 100644
index 000000000..15a1e4745
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/config.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package kubernetes
+
+import (
+ "errors"
+ "fmt"
+)
+
+type Config struct {
+ APIServer string `yaml:"api_server"` // TODO: not used
+ Role string `yaml:"role"`
+ Tags string `yaml:"tags"`
+ Namespaces []string `yaml:"namespaces"`
+ Selector struct {
+ Label string `yaml:"label"`
+ Field string `yaml:"field"`
+ } `yaml:"selector"`
+ Pod struct {
+ LocalMode bool `yaml:"local_mode"`
+ } `yaml:"pod"`
+}
+
+func validateConfig(cfg Config) error {
+ switch role(cfg.Role) {
+ case rolePod, roleService:
+ default:
+ return fmt.Errorf("unknown role: '%s'", cfg.Role)
+ }
+ if cfg.Tags == "" {
+ return errors.New("'tags' not set")
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes.go
new file mode 100644
index 000000000..439e2b695
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes.go
@@ -0,0 +1,268 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package kubernetes
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "os"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/k8sclient"
+
+ "github.com/ilyam8/hashstructure"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+)
+
+type role string
+
+const (
+ rolePod role = "pod"
+ roleService role = "service"
+)
+
+const (
+ envNodeName = "MY_NODE_NAME"
+)
+
+var log = logger.New().With(
+ slog.String("component", "service discovery"),
+ slog.String("discoverer", "kubernetes"),
+)
+
+func NewKubeDiscoverer(cfg Config) (*KubeDiscoverer, error) {
+ if err := validateConfig(cfg); err != nil {
+ return nil, fmt.Errorf("config validation: %v", err)
+ }
+
+ tags, err := model.ParseTags(cfg.Tags)
+ if err != nil {
+ return nil, fmt.Errorf("parse tags: %v", err)
+ }
+
+ client, err := k8sclient.New("Netdata/service-td")
+ if err != nil {
+ return nil, fmt.Errorf("create clientset: %v", err)
+ }
+
+ ns := cfg.Namespaces
+ if len(ns) == 0 {
+ ns = []string{corev1.NamespaceAll}
+ }
+
+ selectorField := cfg.Selector.Field
+ if role(cfg.Role) == rolePod && cfg.Pod.LocalMode {
+ name := os.Getenv(envNodeName)
+ if name == "" {
+ return nil, fmt.Errorf("local_mode is enabled, but env '%s' not set", envNodeName)
+ }
+ selectorField = joinSelectors(selectorField, "spec.nodeName="+name)
+ }
+
+ d := &KubeDiscoverer{
+ Logger: log,
+ client: client,
+ tags: tags,
+ role: role(cfg.Role),
+ namespaces: ns,
+ selectorLabel: cfg.Selector.Label,
+ selectorField: selectorField,
+ discoverers: make([]model.Discoverer, 0, len(ns)),
+ started: make(chan struct{}),
+ }
+
+ return d, nil
+}
+
+type KubeDiscoverer struct {
+ *logger.Logger
+
+ client kubernetes.Interface
+
+ tags model.Tags
+ role role
+ namespaces []string
+ selectorLabel string
+ selectorField string
+ discoverers []model.Discoverer
+ started chan struct{}
+}
+
+func (d *KubeDiscoverer) String() string {
+ return "sd:k8s"
+}
+
+const resyncPeriod = 10 * time.Minute
+
+func (d *KubeDiscoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) {
+ d.Info("instance is started")
+ defer d.Info("instance is stopped")
+
+ for _, namespace := range d.namespaces {
+ var dd model.Discoverer
+ switch d.role {
+ case rolePod:
+ dd = d.setupPodDiscoverer(ctx, namespace)
+ case roleService:
+ dd = d.setupServiceDiscoverer(ctx, namespace)
+ default:
+ d.Errorf("unknown role: '%s'", d.role)
+ continue
+ }
+ d.discoverers = append(d.discoverers, dd)
+ }
+
+ if len(d.discoverers) == 0 {
+ d.Error("no discoverers registered")
+ return
+ }
+
+ d.Infof("registered: %v", d.discoverers)
+
+ var wg sync.WaitGroup
+ updates := make(chan []model.TargetGroup)
+
+ for _, disc := range d.discoverers {
+ wg.Add(1)
+ go func(disc model.Discoverer) { defer wg.Done(); disc.Discover(ctx, updates) }(disc)
+ }
+
+ done := make(chan struct{})
+ go func() { defer close(done); wg.Wait() }()
+
+ close(d.started)
+
+ for {
+ select {
+ case <-ctx.Done():
+ select {
+ case <-done:
+ d.Info("all discoverers exited")
+ case <-time.After(time.Second * 5):
+ d.Warning("not all discoverers exited")
+ }
+ return
+ case <-done:
+ d.Info("all discoverers exited")
+ return
+ case tggs := <-updates:
+ select {
+ case <-ctx.Done():
+ case in <- tggs:
+ }
+ }
+ }
+}
+
+func (d *KubeDiscoverer) setupPodDiscoverer(ctx context.Context, ns string) *podDiscoverer {
+ pod := d.client.CoreV1().Pods(ns)
+ podLW := &cache.ListWatch{
+ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
+ opts.FieldSelector = d.selectorField
+ opts.LabelSelector = d.selectorLabel
+ return pod.List(ctx, opts)
+ },
+ WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
+ opts.FieldSelector = d.selectorField
+ opts.LabelSelector = d.selectorLabel
+ return pod.Watch(ctx, opts)
+ },
+ }
+
+ cmap := d.client.CoreV1().ConfigMaps(ns)
+ cmapLW := &cache.ListWatch{
+ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
+ return cmap.List(ctx, opts)
+ },
+ WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
+ return cmap.Watch(ctx, opts)
+ },
+ }
+
+ secret := d.client.CoreV1().Secrets(ns)
+ secretLW := &cache.ListWatch{
+ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
+ return secret.List(ctx, opts)
+ },
+ WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
+ return secret.Watch(ctx, opts)
+ },
+ }
+
+ td := newPodDiscoverer(
+ cache.NewSharedInformer(podLW, &corev1.Pod{}, resyncPeriod),
+ cache.NewSharedInformer(cmapLW, &corev1.ConfigMap{}, resyncPeriod),
+ cache.NewSharedInformer(secretLW, &corev1.Secret{}, resyncPeriod),
+ )
+ td.Tags().Merge(d.tags)
+
+ return td
+}
+
+func (d *KubeDiscoverer) setupServiceDiscoverer(ctx context.Context, namespace string) *serviceDiscoverer {
+ svc := d.client.CoreV1().Services(namespace)
+
+ svcLW := &cache.ListWatch{
+ ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) {
+ opts.FieldSelector = d.selectorField
+ opts.LabelSelector = d.selectorLabel
+ return svc.List(ctx, opts)
+ },
+ WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) {
+ opts.FieldSelector = d.selectorField
+ opts.LabelSelector = d.selectorLabel
+ return svc.Watch(ctx, opts)
+ },
+ }
+
+ inf := cache.NewSharedInformer(svcLW, &corev1.Service{}, resyncPeriod)
+
+ td := newServiceDiscoverer(inf)
+ td.Tags().Merge(d.tags)
+
+ return td
+}
+
+func enqueue(queue *workqueue.Type, obj any) {
+ key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
+ if err != nil {
+ return
+ }
+ queue.Add(key)
+}
+
+func send(ctx context.Context, in chan<- []model.TargetGroup, tgg model.TargetGroup) {
+ if tgg == nil {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ case in <- []model.TargetGroup{tgg}:
+ }
+}
+
+func calcHash(obj any) (uint64, error) {
+ return hashstructure.Hash(obj, nil)
+}
+
+func joinSelectors(srs ...string) string {
+ var i int
+ for _, v := range srs {
+ if v != "" {
+ srs[i] = v
+ i++
+ }
+ }
+ return strings.Join(srs[:i], ",")
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go
new file mode 100644
index 000000000..ba60a47b4
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package kubernetes
+
+import (
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/k8sclient"
+
+ "github.com/stretchr/testify/assert"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/fake"
+)
+
+var discoveryTags, _ = model.ParseTags("k8s")
+
+func TestMain(m *testing.M) {
+ _ = os.Setenv(envNodeName, "m01")
+ _ = os.Setenv(k8sclient.EnvFakeClient, "true")
+ code := m.Run()
+ _ = os.Unsetenv(envNodeName)
+ _ = os.Unsetenv(k8sclient.EnvFakeClient)
+ os.Exit(code)
+}
+
+func TestNewKubeDiscoverer(t *testing.T) {
+ tests := map[string]struct {
+ cfg Config
+ wantErr bool
+ }{
+ "pod role config": {
+ wantErr: false,
+ cfg: Config{Role: string(rolePod), Tags: "k8s"},
+ },
+ "service role config": {
+ wantErr: false,
+ cfg: Config{Role: string(roleService), Tags: "k8s"},
+ },
+ "empty config": {
+ wantErr: true,
+ cfg: Config{},
+ },
+ }
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ disc, err := NewKubeDiscoverer(test.cfg)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ assert.Nil(t, disc)
+ } else {
+ assert.NoError(t, err)
+ assert.NotNil(t, disc)
+ }
+ })
+ }
+}
+
+func TestKubeDiscoverer_Discover(t *testing.T) {
+ const prod = "prod"
+ const dev = "dev"
+ prodNamespace := newNamespace(prod)
+ devNamespace := newNamespace(dev)
+
+ tests := map[string]struct {
+ createSim func() discoverySim
+ }{
+ "multiple namespaces pod td": {
+ createSim: func() discoverySim {
+ httpdProd, nginxProd := newHTTPDPod(), newNGINXPod()
+ httpdProd.Namespace = prod
+ nginxProd.Namespace = prod
+
+ httpdDev, nginxDev := newHTTPDPod(), newNGINXPod()
+ httpdDev.Namespace = dev
+ nginxDev.Namespace = dev
+
+ disc, _ := preparePodDiscoverer(
+ []string{prod, dev},
+ prodNamespace, devNamespace, httpdProd, nginxProd, httpdDev, nginxDev)
+
+ return discoverySim{
+ td: disc,
+ sortBeforeVerify: true,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroup(httpdDev),
+ preparePodTargetGroup(nginxDev),
+ preparePodTargetGroup(httpdProd),
+ preparePodTargetGroup(nginxProd),
+ },
+ }
+ },
+ },
+ "multiple namespaces ClusterIP service td": {
+ createSim: func() discoverySim {
+ httpdProd, nginxProd := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ httpdProd.Namespace = prod
+ nginxProd.Namespace = prod
+
+ httpdDev, nginxDev := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ httpdDev.Namespace = dev
+ nginxDev.Namespace = dev
+
+ disc, _ := prepareSvcDiscoverer(
+ []string{prod, dev},
+ prodNamespace, devNamespace, httpdProd, nginxProd, httpdDev, nginxDev)
+
+ return discoverySim{
+ td: disc,
+ sortBeforeVerify: true,
+ wantTargetGroups: []model.TargetGroup{
+ prepareSvcTargetGroup(httpdDev),
+ prepareSvcTargetGroup(nginxDev),
+ prepareSvcTargetGroup(httpdProd),
+ prepareSvcTargetGroup(nginxProd),
+ },
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func prepareDiscoverer(role role, namespaces []string, objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) {
+ client := fake.NewSimpleClientset(objects...)
+ tags, _ := model.ParseTags("k8s")
+ disc := &KubeDiscoverer{
+ tags: tags,
+ role: role,
+ namespaces: namespaces,
+ client: client,
+ discoverers: nil,
+ started: make(chan struct{}),
+ }
+ return disc, client
+}
+
+func newNamespace(name string) *corev1.Namespace {
+ return &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: name}}
+}
+
+func mustCalcHash(obj any) uint64 {
+ hash, err := calcHash(obj)
+ if err != nil {
+ panic(fmt.Sprintf("hash calculation: %v", err))
+ }
+ return hash
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod.go
new file mode 100644
index 000000000..617081742
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod.go
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package kubernetes
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+)
+
+type podTargetGroup struct {
+ targets []model.Target
+ source string
+}
+
+func (p podTargetGroup) Provider() string { return "sd:k8s:pod" }
+func (p podTargetGroup) Source() string { return p.source }
+func (p podTargetGroup) Targets() []model.Target { return p.targets }
+
+type PodTarget struct {
+ model.Base `hash:"ignore"`
+
+ hash uint64
+ tuid string
+
+ Address string
+ Namespace string
+ Name string
+ Annotations map[string]any
+ Labels map[string]any
+ NodeName string
+ PodIP string
+ ControllerName string
+ ControllerKind string
+ ContName string
+ Image string
+ Env map[string]any
+ Port string
+ PortName string
+ PortProtocol string
+}
+
+func (p PodTarget) Hash() uint64 { return p.hash }
+func (p PodTarget) TUID() string { return p.tuid }
+
+func newPodDiscoverer(pod, cmap, secret cache.SharedInformer) *podDiscoverer {
+
+ if pod == nil || cmap == nil || secret == nil {
+ panic("nil pod or cmap or secret informer")
+ }
+
+ queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "pod"})
+
+ _, _ = pod.AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj any) { enqueue(queue, obj) },
+ UpdateFunc: func(_, obj any) { enqueue(queue, obj) },
+ DeleteFunc: func(obj any) { enqueue(queue, obj) },
+ })
+
+ return &podDiscoverer{
+ Logger: log,
+ podInformer: pod,
+ cmapInformer: cmap,
+ secretInformer: secret,
+ queue: queue,
+ }
+}
+
+type podDiscoverer struct {
+ *logger.Logger
+ model.Base
+
+ podInformer cache.SharedInformer
+ cmapInformer cache.SharedInformer
+ secretInformer cache.SharedInformer
+ queue *workqueue.Type
+}
+
+func (p *podDiscoverer) String() string {
+ return "sd:k8s:pod"
+}
+
+func (p *podDiscoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) {
+ p.Info("instance is started")
+ defer p.Info("instance is stopped")
+ defer p.queue.ShutDown()
+
+ go p.podInformer.Run(ctx.Done())
+ go p.cmapInformer.Run(ctx.Done())
+ go p.secretInformer.Run(ctx.Done())
+
+ if !cache.WaitForCacheSync(ctx.Done(),
+ p.podInformer.HasSynced, p.cmapInformer.HasSynced, p.secretInformer.HasSynced) {
+ p.Error("failed to sync caches")
+ return
+ }
+
+ go p.run(ctx, in)
+
+ <-ctx.Done()
+}
+
+func (p *podDiscoverer) run(ctx context.Context, in chan<- []model.TargetGroup) {
+ for {
+ item, shutdown := p.queue.Get()
+ if shutdown {
+ return
+ }
+ p.handleQueueItem(ctx, in, item)
+ }
+}
+
+func (p *podDiscoverer) handleQueueItem(ctx context.Context, in chan<- []model.TargetGroup, item any) {
+ defer p.queue.Done(item)
+
+ key := item.(string)
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ return
+ }
+
+ obj, ok, err := p.podInformer.GetStore().GetByKey(key)
+ if err != nil {
+ return
+ }
+
+ if !ok {
+ tgg := &podTargetGroup{source: podSourceFromNsName(namespace, name)}
+ send(ctx, in, tgg)
+ return
+ }
+
+ pod, err := toPod(obj)
+ if err != nil {
+ return
+ }
+
+ tgg := p.buildTargetGroup(pod)
+
+ for _, tgt := range tgg.Targets() {
+ tgt.Tags().Merge(p.Tags())
+ }
+
+ send(ctx, in, tgg)
+
+}
+
+func (p *podDiscoverer) buildTargetGroup(pod *corev1.Pod) model.TargetGroup {
+ if pod.Status.PodIP == "" || len(pod.Spec.Containers) == 0 {
+ return &podTargetGroup{
+ source: podSource(pod),
+ }
+ }
+ return &podTargetGroup{
+ source: podSource(pod),
+ targets: p.buildTargets(pod),
+ }
+}
+
+func (p *podDiscoverer) buildTargets(pod *corev1.Pod) (targets []model.Target) {
+ var name, kind string
+ for _, ref := range pod.OwnerReferences {
+ if ref.Controller != nil && *ref.Controller {
+ name = ref.Name
+ kind = ref.Kind
+ break
+ }
+ }
+
+ for _, container := range pod.Spec.Containers {
+ env := p.collectEnv(pod.Namespace, container)
+
+ if len(container.Ports) == 0 {
+ tgt := &PodTarget{
+ tuid: podTUID(pod, container),
+ Address: pod.Status.PodIP,
+ Namespace: pod.Namespace,
+ Name: pod.Name,
+ Annotations: mapAny(pod.Annotations),
+ Labels: mapAny(pod.Labels),
+ NodeName: pod.Spec.NodeName,
+ PodIP: pod.Status.PodIP,
+ ControllerName: name,
+ ControllerKind: kind,
+ ContName: container.Name,
+ Image: container.Image,
+ Env: mapAny(env),
+ }
+ hash, err := calcHash(tgt)
+ if err != nil {
+ continue
+ }
+ tgt.hash = hash
+
+ targets = append(targets, tgt)
+ } else {
+ for _, port := range container.Ports {
+ portNum := strconv.FormatUint(uint64(port.ContainerPort), 10)
+ tgt := &PodTarget{
+ tuid: podTUIDWithPort(pod, container, port),
+ Address: net.JoinHostPort(pod.Status.PodIP, portNum),
+ Namespace: pod.Namespace,
+ Name: pod.Name,
+ Annotations: mapAny(pod.Annotations),
+ Labels: mapAny(pod.Labels),
+ NodeName: pod.Spec.NodeName,
+ PodIP: pod.Status.PodIP,
+ ControllerName: name,
+ ControllerKind: kind,
+ ContName: container.Name,
+ Image: container.Image,
+ Env: mapAny(env),
+ Port: portNum,
+ PortName: port.Name,
+ PortProtocol: string(port.Protocol),
+ }
+ hash, err := calcHash(tgt)
+ if err != nil {
+ continue
+ }
+ tgt.hash = hash
+
+ targets = append(targets, tgt)
+ }
+ }
+ }
+
+ return targets
+}
+
+func (p *podDiscoverer) collectEnv(ns string, container corev1.Container) map[string]string {
+ vars := make(map[string]string)
+
+ // When a key exists in multiple sources,
+ // the value associated with the last source will take precedence.
+ // Values defined by an Env with a duplicate key will take precedence.
+ //
+ // Order (https://github.com/kubernetes/kubectl/blob/master/pkg/describe/describe.go)
+ // - envFrom: configMapRef, secretRef
+ // - env: value || valueFrom: fieldRef, resourceFieldRef, secretRef, configMap
+
+ for _, src := range container.EnvFrom {
+ switch {
+ case src.ConfigMapRef != nil:
+ p.envFromConfigMap(vars, ns, src)
+ case src.SecretRef != nil:
+ p.envFromSecret(vars, ns, src)
+ }
+ }
+
+ for _, env := range container.Env {
+ if env.Name == "" || isVar(env.Name) {
+ continue
+ }
+ switch {
+ case env.Value != "":
+ vars[env.Name] = env.Value
+ case env.ValueFrom != nil && env.ValueFrom.SecretKeyRef != nil:
+ p.valueFromSecret(vars, ns, env)
+ case env.ValueFrom != nil && env.ValueFrom.ConfigMapKeyRef != nil:
+ p.valueFromConfigMap(vars, ns, env)
+ }
+ }
+
+ if len(vars) == 0 {
+ return nil
+ }
+ return vars
+}
+
+func (p *podDiscoverer) valueFromConfigMap(vars map[string]string, ns string, env corev1.EnvVar) {
+ if env.ValueFrom.ConfigMapKeyRef.Name == "" || env.ValueFrom.ConfigMapKeyRef.Key == "" {
+ return
+ }
+
+ sr := env.ValueFrom.ConfigMapKeyRef
+ key := ns + "/" + sr.Name
+
+ item, exist, err := p.cmapInformer.GetStore().GetByKey(key)
+ if err != nil || !exist {
+ return
+ }
+
+ cmap, err := toConfigMap(item)
+ if err != nil {
+ return
+ }
+
+ if v, ok := cmap.Data[sr.Key]; ok {
+ vars[env.Name] = v
+ }
+}
+
+func (p *podDiscoverer) valueFromSecret(vars map[string]string, ns string, env corev1.EnvVar) {
+ if env.ValueFrom.SecretKeyRef.Name == "" || env.ValueFrom.SecretKeyRef.Key == "" {
+ return
+ }
+
+ secretKey := env.ValueFrom.SecretKeyRef
+ key := ns + "/" + secretKey.Name
+
+ item, exist, err := p.secretInformer.GetStore().GetByKey(key)
+ if err != nil || !exist {
+ return
+ }
+
+ secret, err := toSecret(item)
+ if err != nil {
+ return
+ }
+
+ if v, ok := secret.Data[secretKey.Key]; ok {
+ vars[env.Name] = string(v)
+ }
+}
+
+func (p *podDiscoverer) envFromConfigMap(vars map[string]string, ns string, src corev1.EnvFromSource) {
+ if src.ConfigMapRef.Name == "" {
+ return
+ }
+
+ key := ns + "/" + src.ConfigMapRef.Name
+ item, exist, err := p.cmapInformer.GetStore().GetByKey(key)
+ if err != nil || !exist {
+ return
+ }
+
+ cmap, err := toConfigMap(item)
+ if err != nil {
+ return
+ }
+
+ for k, v := range cmap.Data {
+ vars[src.Prefix+k] = v
+ }
+}
+
+func (p *podDiscoverer) envFromSecret(vars map[string]string, ns string, src corev1.EnvFromSource) {
+ if src.SecretRef.Name == "" {
+ return
+ }
+
+ key := ns + "/" + src.SecretRef.Name
+ item, exist, err := p.secretInformer.GetStore().GetByKey(key)
+ if err != nil || !exist {
+ return
+ }
+
+ secret, err := toSecret(item)
+ if err != nil {
+ return
+ }
+
+ for k, v := range secret.Data {
+ vars[src.Prefix+k] = string(v)
+ }
+}
+
+func podTUID(pod *corev1.Pod, container corev1.Container) string {
+ return fmt.Sprintf("%s_%s_%s",
+ pod.Namespace,
+ pod.Name,
+ container.Name,
+ )
+}
+
+func podTUIDWithPort(pod *corev1.Pod, container corev1.Container, port corev1.ContainerPort) string {
+ return fmt.Sprintf("%s_%s_%s_%s_%s",
+ pod.Namespace,
+ pod.Name,
+ container.Name,
+ strings.ToLower(string(port.Protocol)),
+ strconv.FormatUint(uint64(port.ContainerPort), 10),
+ )
+}
+
+func podSourceFromNsName(namespace, name string) string {
+ return fmt.Sprintf("discoverer=k8s,kind=pod,namespace=%s,pod_name=%s", namespace, name)
+}
+
+func podSource(pod *corev1.Pod) string {
+ return podSourceFromNsName(pod.Namespace, pod.Name)
+}
+
+func toPod(obj any) (*corev1.Pod, error) {
+ pod, ok := obj.(*corev1.Pod)
+ if !ok {
+ return nil, fmt.Errorf("received unexpected object type: %T", obj)
+ }
+ return pod, nil
+}
+
+func toConfigMap(obj any) (*corev1.ConfigMap, error) {
+ cmap, ok := obj.(*corev1.ConfigMap)
+ if !ok {
+ return nil, fmt.Errorf("received unexpected object type: %T", obj)
+ }
+ return cmap, nil
+}
+
+func toSecret(obj any) (*corev1.Secret, error) {
+ secret, ok := obj.(*corev1.Secret)
+ if !ok {
+ return nil, fmt.Errorf("received unexpected object type: %T", obj)
+ }
+ return secret, nil
+}
+
+func isVar(name string) bool {
+ // Variable references $(VAR_NAME) are expanded using the previous defined
+ // environment variables in the container and any service environment
+ // variables.
+ return strings.IndexByte(name, '$') != -1
+}
+
+func mapAny(src map[string]string) map[string]any {
+ if src == nil {
+ return nil
+ }
+ m := make(map[string]any, len(src))
+ for k, v := range src {
+ m[k] = v
+ }
+ return m
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod_test.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod_test.go
new file mode 100644
index 000000000..838c2413f
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/pod_test.go
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package kubernetes
+
+import (
+ "context"
+ "net"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/stretchr/testify/assert"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/tools/cache"
+)
+
+func TestPodTargetGroup_Provider(t *testing.T) {
+ var p podTargetGroup
+ assert.NotEmpty(t, p.Provider())
+}
+
+func TestPodTargetGroup_Source(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ wantSources []string
+ }{
+ "pods with multiple ports": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ disc, _ := prepareAllNsPodDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroup(httpd),
+ preparePodTargetGroup(nginx),
+ },
+ }
+ },
+ wantSources: []string{
+ "discoverer=k8s,kind=pod,namespace=default,pod_name=httpd-dd95c4d68-5bkwl",
+ "discoverer=k8s,kind=pod,namespace=default,pod_name=nginx-7cfd77469b-q6kxj",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+
+ var sources []string
+ for _, tgg := range sim.run(t) {
+ sources = append(sources, tgg.Source())
+ }
+
+ assert.Equal(t, test.wantSources, sources)
+ })
+ }
+}
+
+func TestPodTargetGroup_Targets(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ wantTargets int
+ }{
+ "pods with multiple ports": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ discovery, _ := prepareAllNsPodDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: discovery,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroup(httpd),
+ preparePodTargetGroup(nginx),
+ },
+ }
+ },
+ wantTargets: 4,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+
+ var targets int
+ for _, tgg := range sim.run(t) {
+ targets += len(tgg.Targets())
+ }
+
+ assert.Equal(t, test.wantTargets, targets)
+ })
+ }
+}
+
+func TestPodTarget_Hash(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ wantHashes []uint64
+ }{
+ "pods with multiple ports": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ discovery, _ := prepareAllNsPodDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: discovery,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroup(httpd),
+ preparePodTargetGroup(nginx),
+ },
+ }
+ },
+ wantHashes: []uint64{
+ 12703169414253998055,
+ 13351713096133918928,
+ 8241692333761256175,
+ 11562466355572729519,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+
+ var hashes []uint64
+ for _, tgg := range sim.run(t) {
+ for _, tg := range tgg.Targets() {
+ hashes = append(hashes, tg.Hash())
+ }
+ }
+
+ assert.Equal(t, test.wantHashes, hashes)
+ })
+ }
+}
+
+func TestPodTarget_TUID(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ wantTUID []string
+ }{
+ "pods with multiple ports": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ discovery, _ := prepareAllNsPodDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: discovery,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroup(httpd),
+ preparePodTargetGroup(nginx),
+ },
+ }
+ },
+ wantTUID: []string{
+ "default_httpd-dd95c4d68-5bkwl_httpd_tcp_80",
+ "default_httpd-dd95c4d68-5bkwl_httpd_tcp_443",
+ "default_nginx-7cfd77469b-q6kxj_nginx_tcp_80",
+ "default_nginx-7cfd77469b-q6kxj_nginx_tcp_443",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+
+ var tuid []string
+ for _, tgg := range sim.run(t) {
+ for _, tg := range tgg.Targets() {
+ tuid = append(tuid, tg.TUID())
+ }
+ }
+
+ assert.Equal(t, test.wantTUID, tuid)
+ })
+ }
+}
+
+func TestNewPodDiscoverer(t *testing.T) {
+ tests := map[string]struct {
+ podInf cache.SharedInformer
+ cmapInf cache.SharedInformer
+ secretInf cache.SharedInformer
+ wantPanic bool
+ }{
+ "valid informers": {
+ wantPanic: false,
+ podInf: cache.NewSharedInformer(nil, &corev1.Pod{}, resyncPeriod),
+ cmapInf: cache.NewSharedInformer(nil, &corev1.ConfigMap{}, resyncPeriod),
+ secretInf: cache.NewSharedInformer(nil, &corev1.Secret{}, resyncPeriod),
+ },
+ "nil informers": {
+ wantPanic: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ f := func() { newPodDiscoverer(test.podInf, test.cmapInf, test.secretInf) }
+
+ if test.wantPanic {
+ assert.Panics(t, f)
+ } else {
+ assert.NotPanics(t, f)
+ }
+ })
+ }
+}
+
+func TestPodDiscoverer_String(t *testing.T) {
+ var p podDiscoverer
+ assert.NotEmpty(t, p.String())
+}
+
+func TestPodDiscoverer_Discover(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ }{
+ "ADD: pods exist before run": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ td, _ := prepareAllNsPodDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: td,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroup(httpd),
+ preparePodTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "ADD: pods exist before run and add after sync": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ disc, client := prepareAllNsPodDiscoverer(httpd)
+ podClient := client.CoreV1().Pods("default")
+
+ return discoverySim{
+ td: disc,
+ runAfterSync: func(ctx context.Context) {
+ _, _ = podClient.Create(ctx, nginx, metav1.CreateOptions{})
+ },
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroup(httpd),
+ preparePodTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "DELETE: remove pods after sync": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ disc, client := prepareAllNsPodDiscoverer(httpd, nginx)
+ podClient := client.CoreV1().Pods("default")
+
+ return discoverySim{
+ td: disc,
+ runAfterSync: func(ctx context.Context) {
+ time.Sleep(time.Millisecond * 50)
+ _ = podClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{})
+ _ = podClient.Delete(ctx, nginx.Name, metav1.DeleteOptions{})
+ },
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroup(httpd),
+ preparePodTargetGroup(nginx),
+ prepareEmptyPodTargetGroup(httpd),
+ prepareEmptyPodTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "DELETE,ADD: remove and add pods after sync": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ disc, client := prepareAllNsPodDiscoverer(httpd)
+ podClient := client.CoreV1().Pods("default")
+
+ return discoverySim{
+ td: disc,
+ runAfterSync: func(ctx context.Context) {
+ time.Sleep(time.Millisecond * 50)
+ _ = podClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{})
+ _, _ = podClient.Create(ctx, nginx, metav1.CreateOptions{})
+ },
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroup(httpd),
+ prepareEmptyPodTargetGroup(httpd),
+ preparePodTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "ADD: pods with empty PodIP": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ httpd.Status.PodIP = ""
+ nginx.Status.PodIP = ""
+ disc, _ := prepareAllNsPodDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ prepareEmptyPodTargetGroup(httpd),
+ prepareEmptyPodTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "UPDATE: set pods PodIP after sync": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ httpd.Status.PodIP = ""
+ nginx.Status.PodIP = ""
+ disc, client := prepareAllNsPodDiscoverer(httpd, nginx)
+ podClient := client.CoreV1().Pods("default")
+
+ return discoverySim{
+ td: disc,
+ runAfterSync: func(ctx context.Context) {
+ time.Sleep(time.Millisecond * 50)
+ _, _ = podClient.Update(ctx, newHTTPDPod(), metav1.UpdateOptions{})
+ _, _ = podClient.Update(ctx, newNGINXPod(), metav1.UpdateOptions{})
+ },
+ wantTargetGroups: []model.TargetGroup{
+ prepareEmptyPodTargetGroup(httpd),
+ prepareEmptyPodTargetGroup(nginx),
+ preparePodTargetGroup(newHTTPDPod()),
+ preparePodTargetGroup(newNGINXPod()),
+ },
+ }
+ },
+ },
+ "ADD: pods without containers": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDPod(), newNGINXPod()
+ httpd.Spec.Containers = httpd.Spec.Containers[:0]
+ nginx.Spec.Containers = httpd.Spec.Containers[:0]
+ disc, _ := prepareAllNsPodDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ prepareEmptyPodTargetGroup(httpd),
+ prepareEmptyPodTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "Env: from value": {
+ createSim: func() discoverySim {
+ httpd := newHTTPDPod()
+ mangle := func(c *corev1.Container) {
+ c.Env = []corev1.EnvVar{
+ {Name: "key1", Value: "value1"},
+ }
+ }
+ mangleContainers(httpd.Spec.Containers, mangle)
+ data := map[string]string{"key1": "value1"}
+
+ disc, _ := prepareAllNsPodDiscoverer(httpd)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroupWithEnv(httpd, data),
+ },
+ }
+ },
+ },
+ "Env: from Secret": {
+ createSim: func() discoverySim {
+ httpd := newHTTPDPod()
+ mangle := func(c *corev1.Container) {
+ c.Env = []corev1.EnvVar{
+ {
+ Name: "key1",
+ ValueFrom: &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"},
+ Key: "key1",
+ }},
+ },
+ }
+ }
+ mangleContainers(httpd.Spec.Containers, mangle)
+ data := map[string]string{"key1": "value1"}
+ secret := prepareSecret("my-secret", data)
+
+ disc, _ := prepareAllNsPodDiscoverer(httpd, secret)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroupWithEnv(httpd, data),
+ },
+ }
+ },
+ },
+ "Env: from ConfigMap": {
+ createSim: func() discoverySim {
+ httpd := newHTTPDPod()
+ mangle := func(c *corev1.Container) {
+ c.Env = []corev1.EnvVar{
+ {
+ Name: "key1",
+ ValueFrom: &corev1.EnvVarSource{ConfigMapKeyRef: &corev1.ConfigMapKeySelector{
+ LocalObjectReference: corev1.LocalObjectReference{Name: "my-cmap"},
+ Key: "key1",
+ }},
+ },
+ }
+ }
+ mangleContainers(httpd.Spec.Containers, mangle)
+ data := map[string]string{"key1": "value1"}
+ cmap := prepareConfigMap("my-cmap", data)
+
+ disc, _ := prepareAllNsPodDiscoverer(httpd, cmap)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroupWithEnv(httpd, data),
+ },
+ }
+ },
+ },
+ "EnvFrom: from ConfigMap": {
+ createSim: func() discoverySim {
+ httpd := newHTTPDPod()
+ mangle := func(c *corev1.Container) {
+ c.EnvFrom = []corev1.EnvFromSource{
+ {
+ ConfigMapRef: &corev1.ConfigMapEnvSource{
+ LocalObjectReference: corev1.LocalObjectReference{Name: "my-cmap"}},
+ },
+ }
+ }
+ mangleContainers(httpd.Spec.Containers, mangle)
+ data := map[string]string{"key1": "value1", "key2": "value2"}
+ cmap := prepareConfigMap("my-cmap", data)
+
+ disc, _ := prepareAllNsPodDiscoverer(httpd, cmap)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroupWithEnv(httpd, data),
+ },
+ }
+ },
+ },
+ "EnvFrom: from Secret": {
+ createSim: func() discoverySim {
+ httpd := newHTTPDPod()
+ mangle := func(c *corev1.Container) {
+ c.EnvFrom = []corev1.EnvFromSource{
+ {
+ SecretRef: &corev1.SecretEnvSource{
+ LocalObjectReference: corev1.LocalObjectReference{Name: "my-secret"}},
+ },
+ }
+ }
+ mangleContainers(httpd.Spec.Containers, mangle)
+ data := map[string]string{"key1": "value1", "key2": "value2"}
+ secret := prepareSecret("my-secret", data)
+
+ disc, _ := prepareAllNsPodDiscoverer(httpd, secret)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ preparePodTargetGroupWithEnv(httpd, data),
+ },
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func prepareAllNsPodDiscoverer(objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) {
+ return prepareDiscoverer(rolePod, []string{corev1.NamespaceAll}, objects...)
+}
+
+func preparePodDiscoverer(namespaces []string, objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) {
+ return prepareDiscoverer(rolePod, namespaces, objects...)
+}
+
+func mangleContainers(containers []corev1.Container, mange func(container *corev1.Container)) {
+ for i := range containers {
+ mange(&containers[i])
+ }
+}
+
+var controllerTrue = true
+
+func newHTTPDPod() *corev1.Pod {
+ return &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "httpd-dd95c4d68-5bkwl",
+ Namespace: "default",
+ UID: "1cebb6eb-0c1e-495b-8131-8fa3e6668dc8",
+ Annotations: map[string]string{"phase": "prod"},
+ Labels: map[string]string{"app": "httpd", "tier": "frontend"},
+ OwnerReferences: []metav1.OwnerReference{
+ {Name: "netdata-test", Kind: "DaemonSet", Controller: &controllerTrue},
+ },
+ },
+ Spec: corev1.PodSpec{
+ NodeName: "m01",
+ Containers: []corev1.Container{
+ {
+ Name: "httpd",
+ Image: "httpd",
+ Ports: []corev1.ContainerPort{
+ {Name: "http", Protocol: corev1.ProtocolTCP, ContainerPort: 80},
+ {Name: "https", Protocol: corev1.ProtocolTCP, ContainerPort: 443},
+ },
+ },
+ },
+ },
+ Status: corev1.PodStatus{
+ PodIP: "172.17.0.1",
+ },
+ }
+}
+
+func newNGINXPod() *corev1.Pod {
+ return &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "nginx-7cfd77469b-q6kxj",
+ Namespace: "default",
+ UID: "09e883f2-d740-4c5f-970d-02cf02876522",
+ Annotations: map[string]string{"phase": "prod"},
+ Labels: map[string]string{"app": "nginx", "tier": "frontend"},
+ OwnerReferences: []metav1.OwnerReference{
+ {Name: "netdata-test", Kind: "DaemonSet", Controller: &controllerTrue},
+ },
+ },
+ Spec: corev1.PodSpec{
+ NodeName: "m01",
+ Containers: []corev1.Container{
+ {
+ Name: "nginx",
+ Image: "nginx",
+ Ports: []corev1.ContainerPort{
+ {Name: "http", Protocol: corev1.ProtocolTCP, ContainerPort: 80},
+ {Name: "https", Protocol: corev1.ProtocolTCP, ContainerPort: 443},
+ },
+ },
+ },
+ },
+ Status: corev1.PodStatus{
+ PodIP: "172.17.0.2",
+ },
+ }
+}
+
+func prepareConfigMap(name string, data map[string]string) *corev1.ConfigMap {
+ return &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: "default",
+ UID: types.UID("a03b8dc6-dc40-46dc-b571-5030e69d8167" + name),
+ },
+ Data: data,
+ }
+}
+
+func prepareSecret(name string, data map[string]string) *corev1.Secret {
+ secretData := make(map[string][]byte, len(data))
+ for k, v := range data {
+ secretData[k] = []byte(v)
+ }
+ return &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: "default",
+ UID: types.UID("a03b8dc6-dc40-46dc-b571-5030e69d8161" + name),
+ },
+ Data: secretData,
+ }
+}
+
+func prepareEmptyPodTargetGroup(pod *corev1.Pod) *podTargetGroup {
+ return &podTargetGroup{source: podSource(pod)}
+}
+
+func preparePodTargetGroup(pod *corev1.Pod) *podTargetGroup {
+ tgg := prepareEmptyPodTargetGroup(pod)
+
+ for _, container := range pod.Spec.Containers {
+ for _, port := range container.Ports {
+ portNum := strconv.FormatUint(uint64(port.ContainerPort), 10)
+ tgt := &PodTarget{
+ tuid: podTUIDWithPort(pod, container, port),
+ Address: net.JoinHostPort(pod.Status.PodIP, portNum),
+ Namespace: pod.Namespace,
+ Name: pod.Name,
+ Annotations: mapAny(pod.Annotations),
+ Labels: mapAny(pod.Labels),
+ NodeName: pod.Spec.NodeName,
+ PodIP: pod.Status.PodIP,
+ ControllerName: "netdata-test",
+ ControllerKind: "DaemonSet",
+ ContName: container.Name,
+ Image: container.Image,
+ Env: nil,
+ Port: portNum,
+ PortName: port.Name,
+ PortProtocol: string(port.Protocol),
+ }
+ tgt.hash = mustCalcHash(tgt)
+ tgt.Tags().Merge(discoveryTags)
+
+ tgg.targets = append(tgg.targets, tgt)
+ }
+ }
+
+ return tgg
+}
+
+func preparePodTargetGroupWithEnv(pod *corev1.Pod, env map[string]string) *podTargetGroup {
+ tgg := preparePodTargetGroup(pod)
+
+ for _, tgt := range tgg.Targets() {
+ tgt.(*PodTarget).Env = mapAny(env)
+ tgt.(*PodTarget).hash = mustCalcHash(tgt)
+ }
+
+ return tgg
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service.go
new file mode 100644
index 000000000..1d5ae7cd5
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service.go
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package kubernetes
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+)
+
+type serviceTargetGroup struct {
+ targets []model.Target
+ source string
+}
+
+func (s serviceTargetGroup) Provider() string { return "sd:k8s:service" }
+func (s serviceTargetGroup) Source() string { return s.source }
+func (s serviceTargetGroup) Targets() []model.Target { return s.targets }
+
+type ServiceTarget struct {
+ model.Base `hash:"ignore"`
+
+ hash uint64
+ tuid string
+
+ Address string
+ Namespace string
+ Name string
+ Annotations map[string]any
+ Labels map[string]any
+ Port string
+ PortName string
+ PortProtocol string
+ ClusterIP string
+ ExternalName string
+ Type string
+}
+
+func (s ServiceTarget) Hash() uint64 { return s.hash }
+func (s ServiceTarget) TUID() string { return s.tuid }
+
+type serviceDiscoverer struct {
+ *logger.Logger
+ model.Base
+
+ informer cache.SharedInformer
+ queue *workqueue.Type
+}
+
+func newServiceDiscoverer(inf cache.SharedInformer) *serviceDiscoverer {
+ if inf == nil {
+ panic("nil service informer")
+ }
+
+ queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "service"})
+ _, _ = inf.AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj any) { enqueue(queue, obj) },
+ UpdateFunc: func(_, obj any) { enqueue(queue, obj) },
+ DeleteFunc: func(obj any) { enqueue(queue, obj) },
+ })
+
+ return &serviceDiscoverer{
+ Logger: log,
+ informer: inf,
+ queue: queue,
+ }
+}
+
+func (s *serviceDiscoverer) String() string {
+ return "k8s service"
+}
+
+func (s *serviceDiscoverer) Discover(ctx context.Context, ch chan<- []model.TargetGroup) {
+ s.Info("instance is started")
+ defer s.Info("instance is stopped")
+ defer s.queue.ShutDown()
+
+ go s.informer.Run(ctx.Done())
+
+ if !cache.WaitForCacheSync(ctx.Done(), s.informer.HasSynced) {
+ s.Error("failed to sync caches")
+ return
+ }
+
+ go s.run(ctx, ch)
+
+ <-ctx.Done()
+}
+
+func (s *serviceDiscoverer) run(ctx context.Context, in chan<- []model.TargetGroup) {
+ for {
+ item, shutdown := s.queue.Get()
+ if shutdown {
+ return
+ }
+
+ s.handleQueueItem(ctx, in, item)
+ }
+}
+
+func (s *serviceDiscoverer) handleQueueItem(ctx context.Context, in chan<- []model.TargetGroup, item any) {
+ defer s.queue.Done(item)
+
+ key := item.(string)
+ namespace, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ return
+ }
+
+ obj, exists, err := s.informer.GetStore().GetByKey(key)
+ if err != nil {
+ return
+ }
+
+ if !exists {
+ tgg := &serviceTargetGroup{source: serviceSourceFromNsName(namespace, name)}
+ send(ctx, in, tgg)
+ return
+ }
+
+ svc, err := toService(obj)
+ if err != nil {
+ return
+ }
+
+ tgg := s.buildTargetGroup(svc)
+
+ for _, tgt := range tgg.Targets() {
+ tgt.Tags().Merge(s.Tags())
+ }
+
+ send(ctx, in, tgg)
+}
+
+func (s *serviceDiscoverer) buildTargetGroup(svc *corev1.Service) model.TargetGroup {
+ // TODO: headless service?
+ if svc.Spec.ClusterIP == "" || len(svc.Spec.Ports) == 0 {
+ return &serviceTargetGroup{
+ source: serviceSource(svc),
+ }
+ }
+ return &serviceTargetGroup{
+ source: serviceSource(svc),
+ targets: s.buildTargets(svc),
+ }
+}
+
+func (s *serviceDiscoverer) buildTargets(svc *corev1.Service) (targets []model.Target) {
+ for _, port := range svc.Spec.Ports {
+ portNum := strconv.FormatInt(int64(port.Port), 10)
+ tgt := &ServiceTarget{
+ tuid: serviceTUID(svc, port),
+ Address: net.JoinHostPort(svc.Name+"."+svc.Namespace+".svc", portNum),
+ Namespace: svc.Namespace,
+ Name: svc.Name,
+ Annotations: mapAny(svc.Annotations),
+ Labels: mapAny(svc.Labels),
+ Port: portNum,
+ PortName: port.Name,
+ PortProtocol: string(port.Protocol),
+ ClusterIP: svc.Spec.ClusterIP,
+ ExternalName: svc.Spec.ExternalName,
+ Type: string(svc.Spec.Type),
+ }
+ hash, err := calcHash(tgt)
+ if err != nil {
+ continue
+ }
+ tgt.hash = hash
+
+ targets = append(targets, tgt)
+ }
+
+ return targets
+}
+
+func serviceTUID(svc *corev1.Service, port corev1.ServicePort) string {
+ return fmt.Sprintf("%s_%s_%s_%s",
+ svc.Namespace,
+ svc.Name,
+ strings.ToLower(string(port.Protocol)),
+ strconv.FormatInt(int64(port.Port), 10),
+ )
+}
+
+func serviceSourceFromNsName(namespace, name string) string {
+ return fmt.Sprintf("discoverer=k8s,kind=service,namespace=%s,service_name=%s", namespace, name)
+}
+
+func serviceSource(svc *corev1.Service) string {
+ return serviceSourceFromNsName(svc.Namespace, svc.Name)
+}
+
+func toService(obj any) (*corev1.Service, error) {
+ svc, ok := obj.(*corev1.Service)
+ if !ok {
+ return nil, fmt.Errorf("received unexpected object type: %T", obj)
+ }
+ return svc, nil
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service_test.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service_test.go
new file mode 100644
index 000000000..c3e83e202
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/service_test.go
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package kubernetes
+
+import (
+ "context"
+ "net"
+ "strconv"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/stretchr/testify/assert"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/tools/cache"
+)
+
+func TestServiceTargetGroup_Provider(t *testing.T) {
+ var s serviceTargetGroup
+ assert.NotEmpty(t, s.Provider())
+}
+
+func TestServiceTargetGroup_Source(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ wantSources []string
+ }{
+ "ClusterIP svc with multiple ports": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ prepareSvcTargetGroup(httpd),
+ prepareSvcTargetGroup(nginx),
+ },
+ }
+ },
+ wantSources: []string{
+ "discoverer=k8s,kind=service,namespace=default,service_name=httpd-cluster-ip-service",
+ "discoverer=k8s,kind=service,namespace=default,service_name=nginx-cluster-ip-service",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+
+ var sources []string
+ for _, tgg := range sim.run(t) {
+ sources = append(sources, tgg.Source())
+ }
+
+ assert.Equal(t, test.wantSources, sources)
+ })
+ }
+}
+
+func TestServiceTargetGroup_Targets(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ wantTargets int
+ }{
+ "ClusterIP svc with multiple ports": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ prepareSvcTargetGroup(httpd),
+ prepareSvcTargetGroup(nginx),
+ },
+ }
+ },
+ wantTargets: 4,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+
+ var targets int
+ for _, tgg := range sim.run(t) {
+ targets += len(tgg.Targets())
+ }
+
+ assert.Equal(t, test.wantTargets, targets)
+ })
+ }
+}
+
+func TestServiceTarget_Hash(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ wantHashes []uint64
+ }{
+ "ClusterIP svc with multiple ports": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ prepareSvcTargetGroup(httpd),
+ prepareSvcTargetGroup(nginx),
+ },
+ }
+ },
+ wantHashes: []uint64{
+ 17611803477081780974,
+ 6019985892433421258,
+ 4151907287549842238,
+ 5757608926096186119,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+
+ var hashes []uint64
+ for _, tgg := range sim.run(t) {
+ for _, tgt := range tgg.Targets() {
+ hashes = append(hashes, tgt.Hash())
+ }
+ }
+
+ assert.Equal(t, test.wantHashes, hashes)
+ })
+ }
+}
+
+func TestServiceTarget_TUID(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ wantTUID []string
+ }{
+ "ClusterIP svc with multiple ports": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ prepareSvcTargetGroup(httpd),
+ prepareSvcTargetGroup(nginx),
+ },
+ }
+ },
+ wantTUID: []string{
+ "default_httpd-cluster-ip-service_tcp_80",
+ "default_httpd-cluster-ip-service_tcp_443",
+ "default_nginx-cluster-ip-service_tcp_80",
+ "default_nginx-cluster-ip-service_tcp_443",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+
+ var tuid []string
+ for _, tgg := range sim.run(t) {
+ for _, tgt := range tgg.Targets() {
+ tuid = append(tuid, tgt.TUID())
+ }
+ }
+
+ assert.Equal(t, test.wantTUID, tuid)
+ })
+ }
+}
+
+func TestNewServiceDiscoverer(t *testing.T) {
+ tests := map[string]struct {
+ informer cache.SharedInformer
+ wantPanic bool
+ }{
+ "valid informer": {
+ wantPanic: false,
+ informer: cache.NewSharedInformer(nil, &corev1.Service{}, resyncPeriod),
+ },
+ "nil informer": {
+ wantPanic: true,
+ informer: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ f := func() { newServiceDiscoverer(test.informer) }
+
+ if test.wantPanic {
+ assert.Panics(t, f)
+ } else {
+ assert.NotPanics(t, f)
+ }
+ })
+ }
+}
+
+func TestServiceDiscoverer_String(t *testing.T) {
+ var s serviceDiscoverer
+ assert.NotEmpty(t, s.String())
+}
+
+func TestServiceDiscoverer_Discover(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() discoverySim
+ }{
+ "ADD: ClusterIP svc exist before run": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ prepareSvcTargetGroup(httpd),
+ prepareSvcTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "ADD: ClusterIP svc exist before run and add after sync": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ disc, client := prepareAllNsSvcDiscoverer(httpd)
+ svcClient := client.CoreV1().Services("default")
+
+ return discoverySim{
+ td: disc,
+ runAfterSync: func(ctx context.Context) {
+ _, _ = svcClient.Create(ctx, nginx, metav1.CreateOptions{})
+ },
+ wantTargetGroups: []model.TargetGroup{
+ prepareSvcTargetGroup(httpd),
+ prepareSvcTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "DELETE: ClusterIP svc remove after sync": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ disc, client := prepareAllNsSvcDiscoverer(httpd, nginx)
+ svcClient := client.CoreV1().Services("default")
+
+ return discoverySim{
+ td: disc,
+ runAfterSync: func(ctx context.Context) {
+ time.Sleep(time.Millisecond * 50)
+ _ = svcClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{})
+ _ = svcClient.Delete(ctx, nginx.Name, metav1.DeleteOptions{})
+ },
+ wantTargetGroups: []model.TargetGroup{
+ prepareSvcTargetGroup(httpd),
+ prepareSvcTargetGroup(nginx),
+ prepareEmptySvcTargetGroup(httpd),
+ prepareEmptySvcTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "ADD,DELETE: ClusterIP svc remove and add after sync": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ disc, client := prepareAllNsSvcDiscoverer(httpd)
+ svcClient := client.CoreV1().Services("default")
+
+ return discoverySim{
+ td: disc,
+ runAfterSync: func(ctx context.Context) {
+ time.Sleep(time.Millisecond * 50)
+ _ = svcClient.Delete(ctx, httpd.Name, metav1.DeleteOptions{})
+ _, _ = svcClient.Create(ctx, nginx, metav1.CreateOptions{})
+ },
+ wantTargetGroups: []model.TargetGroup{
+ prepareSvcTargetGroup(httpd),
+ prepareEmptySvcTargetGroup(httpd),
+ prepareSvcTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "ADD: Headless svc exist before run": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDHeadlessService(), newNGINXHeadlessService()
+ disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ prepareEmptySvcTargetGroup(httpd),
+ prepareEmptySvcTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ "UPDATE: Headless => ClusterIP svc after sync": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDHeadlessService(), newNGINXHeadlessService()
+ httpdUpd, nginxUpd := *httpd, *nginx
+ httpdUpd.Spec.ClusterIP = "10.100.0.1"
+ nginxUpd.Spec.ClusterIP = "10.100.0.2"
+ disc, client := prepareAllNsSvcDiscoverer(httpd, nginx)
+ svcClient := client.CoreV1().Services("default")
+
+ return discoverySim{
+ td: disc,
+ runAfterSync: func(ctx context.Context) {
+ time.Sleep(time.Millisecond * 50)
+ _, _ = svcClient.Update(ctx, &httpdUpd, metav1.UpdateOptions{})
+ _, _ = svcClient.Update(ctx, &nginxUpd, metav1.UpdateOptions{})
+ },
+ wantTargetGroups: []model.TargetGroup{
+ prepareEmptySvcTargetGroup(httpd),
+ prepareEmptySvcTargetGroup(nginx),
+ prepareSvcTargetGroup(&httpdUpd),
+ prepareSvcTargetGroup(&nginxUpd),
+ },
+ }
+ },
+ },
+ "ADD: ClusterIP svc with zero exposed ports": {
+ createSim: func() discoverySim {
+ httpd, nginx := newHTTPDClusterIPService(), newNGINXClusterIPService()
+ httpd.Spec.Ports = httpd.Spec.Ports[:0]
+ nginx.Spec.Ports = httpd.Spec.Ports[:0]
+ disc, _ := prepareAllNsSvcDiscoverer(httpd, nginx)
+
+ return discoverySim{
+ td: disc,
+ wantTargetGroups: []model.TargetGroup{
+ prepareEmptySvcTargetGroup(httpd),
+ prepareEmptySvcTargetGroup(nginx),
+ },
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func prepareAllNsSvcDiscoverer(objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) {
+ return prepareDiscoverer(roleService, []string{corev1.NamespaceAll}, objects...)
+}
+
+func prepareSvcDiscoverer(namespaces []string, objects ...runtime.Object) (*KubeDiscoverer, kubernetes.Interface) {
+ return prepareDiscoverer(roleService, namespaces, objects...)
+}
+
+func newHTTPDClusterIPService() *corev1.Service {
+ return &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "httpd-cluster-ip-service",
+ Namespace: "default",
+ Annotations: map[string]string{"phase": "prod"},
+ Labels: map[string]string{"app": "httpd", "tier": "frontend"},
+ },
+ Spec: corev1.ServiceSpec{
+ Ports: []corev1.ServicePort{
+ {Name: "http", Protocol: corev1.ProtocolTCP, Port: 80},
+ {Name: "https", Protocol: corev1.ProtocolTCP, Port: 443},
+ },
+ Type: corev1.ServiceTypeClusterIP,
+ ClusterIP: "10.100.0.1",
+ Selector: map[string]string{"app": "httpd", "tier": "frontend"},
+ },
+ }
+}
+
+func newNGINXClusterIPService() *corev1.Service {
+ return &corev1.Service{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "nginx-cluster-ip-service",
+ Namespace: "default",
+ Annotations: map[string]string{"phase": "prod"},
+ Labels: map[string]string{"app": "nginx", "tier": "frontend"},
+ },
+ Spec: corev1.ServiceSpec{
+ Ports: []corev1.ServicePort{
+ {Name: "http", Protocol: corev1.ProtocolTCP, Port: 80},
+ {Name: "https", Protocol: corev1.ProtocolTCP, Port: 443},
+ },
+ Type: corev1.ServiceTypeClusterIP,
+ ClusterIP: "10.100.0.2",
+ Selector: map[string]string{"app": "nginx", "tier": "frontend"},
+ },
+ }
+}
+
+func newHTTPDHeadlessService() *corev1.Service {
+ svc := newHTTPDClusterIPService()
+ svc.Name = "httpd-headless-service"
+ svc.Spec.ClusterIP = ""
+ return svc
+}
+
+func newNGINXHeadlessService() *corev1.Service {
+ svc := newNGINXClusterIPService()
+ svc.Name = "nginx-headless-service"
+ svc.Spec.ClusterIP = ""
+ return svc
+}
+
+func prepareEmptySvcTargetGroup(svc *corev1.Service) *serviceTargetGroup {
+ return &serviceTargetGroup{source: serviceSource(svc)}
+}
+
+func prepareSvcTargetGroup(svc *corev1.Service) *serviceTargetGroup {
+ tgg := prepareEmptySvcTargetGroup(svc)
+
+ for _, port := range svc.Spec.Ports {
+ portNum := strconv.FormatInt(int64(port.Port), 10)
+ tgt := &ServiceTarget{
+ tuid: serviceTUID(svc, port),
+ Address: net.JoinHostPort(svc.Name+"."+svc.Namespace+".svc", portNum),
+ Namespace: svc.Namespace,
+ Name: svc.Name,
+ Annotations: mapAny(svc.Annotations),
+ Labels: mapAny(svc.Labels),
+ Port: portNum,
+ PortName: port.Name,
+ PortProtocol: string(port.Protocol),
+ ClusterIP: svc.Spec.ClusterIP,
+ ExternalName: svc.Spec.ExternalName,
+ Type: string(svc.Spec.Type),
+ }
+ tgt.hash = mustCalcHash(tgt)
+ tgt.Tags().Merge(discoveryTags)
+ tgg.targets = append(tgg.targets, tgt)
+ }
+
+ return tgg
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/sim_test.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/sim_test.go
new file mode 100644
index 000000000..99bdfae54
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/kubernetes/sim_test.go
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package kubernetes
+
+import (
+ "context"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "k8s.io/client-go/tools/cache"
+)
+
+const (
+ startWaitTimeout = time.Second * 3
+ finishWaitTimeout = time.Second * 5
+)
+
+type discoverySim struct {
+ td *KubeDiscoverer
+ runAfterSync func(ctx context.Context)
+ sortBeforeVerify bool
+ wantTargetGroups []model.TargetGroup
+}
+
+func (sim discoverySim) run(t *testing.T) []model.TargetGroup {
+ t.Helper()
+ require.NotNil(t, sim.td)
+ require.NotEmpty(t, sim.wantTargetGroups)
+
+ in, out := make(chan []model.TargetGroup), make(chan []model.TargetGroup)
+ go sim.collectTargetGroups(t, in, out)
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+ defer cancel()
+ go sim.td.Discover(ctx, in)
+
+ select {
+ case <-sim.td.started:
+ case <-time.After(startWaitTimeout):
+ t.Fatalf("td %s failed to start in %s", sim.td.discoverers, startWaitTimeout)
+ }
+
+ synced := cache.WaitForCacheSync(ctx.Done(), sim.td.hasSynced)
+ require.Truef(t, synced, "td %s failed to sync", sim.td.discoverers)
+
+ if sim.runAfterSync != nil {
+ sim.runAfterSync(ctx)
+ }
+
+ groups := <-out
+
+ if sim.sortBeforeVerify {
+ sortTargetGroups(groups)
+ }
+
+ sim.verifyResult(t, groups)
+ return groups
+}
+
+func (sim discoverySim) collectTargetGroups(t *testing.T, in, out chan []model.TargetGroup) {
+ var tggs []model.TargetGroup
+loop:
+ for {
+ select {
+ case inGroups := <-in:
+ if tggs = append(tggs, inGroups...); len(tggs) >= len(sim.wantTargetGroups) {
+ break loop
+ }
+ case <-time.After(finishWaitTimeout):
+ t.Logf("td %s timed out after %s, got %d groups, expected %d, some events are skipped",
+ sim.td.discoverers, finishWaitTimeout, len(tggs), len(sim.wantTargetGroups))
+ break loop
+ }
+ }
+ out <- tggs
+}
+
+func (sim discoverySim) verifyResult(t *testing.T, result []model.TargetGroup) {
+ var expected, actual any
+
+ if len(sim.wantTargetGroups) == len(result) {
+ expected = sim.wantTargetGroups
+ actual = result
+ } else {
+ want := make(map[string]model.TargetGroup)
+ for _, group := range sim.wantTargetGroups {
+ want[group.Source()] = group
+ }
+ got := make(map[string]model.TargetGroup)
+ for _, group := range result {
+ got[group.Source()] = group
+ }
+ expected, actual = want, got
+ }
+
+ assert.Equal(t, expected, actual)
+}
+
+type hasSynced interface {
+ hasSynced() bool
+}
+
+var (
+ _ hasSynced = &KubeDiscoverer{}
+ _ hasSynced = &podDiscoverer{}
+ _ hasSynced = &serviceDiscoverer{}
+)
+
+func (d *KubeDiscoverer) hasSynced() bool {
+ for _, disc := range d.discoverers {
+ v, ok := disc.(hasSynced)
+ if !ok || !v.hasSynced() {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *podDiscoverer) hasSynced() bool {
+ return p.podInformer.HasSynced() && p.cmapInformer.HasSynced() && p.secretInformer.HasSynced()
+}
+
+func (s *serviceDiscoverer) hasSynced() bool {
+ return s.informer.HasSynced()
+}
+
+func sortTargetGroups(tggs []model.TargetGroup) {
+ if len(tggs) == 0 {
+ return
+ }
+ sort.Slice(tggs, func(i, j int) bool { return tggs[i].Source() < tggs[j].Source() })
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go
new file mode 100644
index 000000000..6f536c49e
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners.go
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package netlisteners
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/ilyam8/hashstructure"
+)
+
+var (
+ shortName = "net_listeners"
+ fullName = fmt.Sprintf("sd:%s", shortName)
+)
+
+func NewDiscoverer(cfg Config) (*Discoverer, error) {
+ tags, err := model.ParseTags(cfg.Tags)
+ if err != nil {
+ return nil, fmt.Errorf("parse tags: %v", err)
+ }
+
+ dir := os.Getenv("NETDATA_PLUGINS_DIR")
+ if dir == "" {
+ dir = executable.Directory
+ }
+ if dir == "" {
+ dir, _ = os.Getwd()
+ }
+
+ d := &Discoverer{
+ Logger: logger.New().With(
+ slog.String("component", "service discovery"),
+ slog.String("discoverer", shortName),
+ ),
+ cfgSource: cfg.Source,
+ ll: &localListenersExec{
+ binPath: filepath.Join(dir, "local-listeners"),
+ timeout: time.Second * 5,
+ },
+ interval: time.Minute * 2,
+ expiryTime: time.Minute * 10,
+ cache: make(map[uint64]*cacheItem),
+ started: make(chan struct{}),
+ }
+
+ d.Tags().Merge(tags)
+
+ return d, nil
+}
+
+type Config struct {
+ Source string `yaml:"-"`
+ Tags string `yaml:"tags"`
+}
+
+type (
+ Discoverer struct {
+ *logger.Logger
+ model.Base
+
+ cfgSource string
+
+ interval time.Duration
+ ll localListeners
+
+ expiryTime time.Duration
+ cache map[uint64]*cacheItem // [target.Hash]
+
+ started chan struct{}
+ }
+ cacheItem struct {
+ lastSeenTime time.Time
+ tgt model.Target
+ }
+ localListeners interface {
+ discover(ctx context.Context) ([]byte, error)
+ }
+)
+
+func (d *Discoverer) String() string {
+ return fullName
+}
+
+func (d *Discoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) {
+ d.Info("instance is started")
+ defer func() { d.Info("instance is stopped") }()
+
+ close(d.started)
+
+ if err := d.discoverLocalListeners(ctx, in); err != nil {
+ d.Error(err)
+ return
+ }
+
+ tk := time.NewTicker(d.interval)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tk.C:
+ if err := d.discoverLocalListeners(ctx, in); err != nil {
+ d.Warning(err)
+ return
+ }
+ }
+ }
+}
+
+func (d *Discoverer) discoverLocalListeners(ctx context.Context, in chan<- []model.TargetGroup) error {
+ bs, err := d.ll.discover(ctx)
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ return nil
+ }
+ return err
+ }
+
+ tgts, err := d.parseLocalListeners(bs)
+ if err != nil {
+ return err
+ }
+
+ tggs := d.processTargets(tgts)
+
+ select {
+ case <-ctx.Done():
+ case in <- tggs:
+ }
+
+ return nil
+}
+
+func (d *Discoverer) processTargets(tgts []model.Target) []model.TargetGroup {
+ tgg := &targetGroup{
+ provider: fullName,
+ source: fmt.Sprintf("discoverer=%s,host=localhost", shortName),
+ }
+ if d.cfgSource != "" {
+ tgg.source += fmt.Sprintf(",%s", d.cfgSource)
+ }
+
+ if d.expiryTime.Milliseconds() == 0 {
+ tgg.targets = tgts
+ return []model.TargetGroup{tgg}
+ }
+
+ now := time.Now()
+
+ for _, tgt := range tgts {
+ hash := tgt.Hash()
+ if _, ok := d.cache[hash]; !ok {
+ d.cache[hash] = &cacheItem{tgt: tgt}
+ }
+ d.cache[hash].lastSeenTime = now
+ }
+
+ for k, v := range d.cache {
+ if now.Sub(v.lastSeenTime) > d.expiryTime {
+ delete(d.cache, k)
+ continue
+ }
+ tgg.targets = append(tgg.targets, v.tgt)
+ }
+
+ return []model.TargetGroup{tgg}
+}
+
+func (d *Discoverer) parseLocalListeners(bs []byte) ([]model.Target, error) {
+ const (
+ local4 = "127.0.0.1"
+ local6 = "::1"
+ )
+
+ var targets []target
+ sc := bufio.NewScanner(bytes.NewReader(bs))
+
+ for sc.Scan() {
+ text := strings.TrimSpace(sc.Text())
+ if text == "" {
+ continue
+ }
+
+ // Protocol|IPAddress|Port|Cmdline
+ parts := strings.SplitN(text, "|", 4)
+ if len(parts) != 4 {
+ return nil, fmt.Errorf("unexpected data: '%s'", text)
+ }
+
+ tgt := target{
+ Protocol: parts[0],
+ IPAddress: parts[1],
+ Port: parts[2],
+ Comm: extractComm(parts[3]),
+ Cmdline: parts[3],
+ }
+
+ if tgt.Comm == "docker-proxy" {
+ continue
+ }
+
+ if tgt.IPAddress == "0.0.0.0" || strings.HasPrefix(tgt.IPAddress, "127") {
+ tgt.IPAddress = local4
+ } else if tgt.IPAddress == "::" {
+ tgt.IPAddress = local6
+ }
+
+ // quick support for https://github.com/netdata/netdata/pull/17866
+ // TODO: create both ipv4 and ipv6 targets?
+ if tgt.IPAddress == "*" {
+ tgt.IPAddress = local4
+ }
+
+ tgt.Address = net.JoinHostPort(tgt.IPAddress, tgt.Port)
+
+ hash, err := calcHash(tgt)
+ if err != nil {
+ continue
+ }
+
+ tgt.hash = hash
+ tgt.Tags().Merge(d.Tags())
+
+ targets = append(targets, tgt)
+ }
+
+ // order: TCP, TCP6, UDP, UDP6
+ sort.Slice(targets, func(i, j int) bool {
+ tgt1, tgt2 := targets[i], targets[j]
+ if tgt1.Protocol != tgt2.Protocol {
+ return tgt1.Protocol < tgt2.Protocol
+ }
+
+ p1, _ := strconv.Atoi(targets[i].Port)
+ p2, _ := strconv.Atoi(targets[j].Port)
+ if p1 != p2 {
+ return p1 < p2
+ }
+
+ return tgt1.IPAddress == local4 || tgt1.IPAddress == local6
+ })
+
+ seen := make(map[string]bool)
+ tgts := make([]model.Target, len(targets))
+ var n int
+
+ for _, tgt := range targets {
+ tgt := tgt
+
+ proto := strings.TrimSuffix(tgt.Protocol, "6")
+ key := tgt.Protocol + ":" + tgt.Address
+ keyLocal4 := proto + ":" + net.JoinHostPort(local4, tgt.Port)
+ keyLocal6 := proto + "6:" + net.JoinHostPort(local6, tgt.Port)
+
+ // Filter targets that accept conns on any (0.0.0.0) and additionally on each individual network interface (a.b.c.d).
+ // Create a target only for localhost. Assumption: any address always goes first.
+ if seen[key] || seen[keyLocal4] || seen[keyLocal6] {
+ continue
+ }
+ seen[key] = true
+
+ tgts[n] = &tgt
+ n++
+ }
+
+ return tgts[:n], nil
+}
+
+type localListenersExec struct {
+ binPath string
+ timeout time.Duration
+}
+
+func (e *localListenersExec) discover(ctx context.Context) ([]byte, error) {
+ execCtx, cancel := context.WithTimeout(ctx, e.timeout)
+ defer cancel()
+
+ // TCPv4/6 and UPDv4 sockets in LISTEN state
+ // https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/local_listeners.c
+ args := []string{
+ "no-udp6",
+ "no-local",
+ "no-inbound",
+ "no-outbound",
+ "no-namespaces",
+ }
+
+ cmd := exec.CommandContext(execCtx, e.binPath, args...)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on executing '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
+
+func extractComm(cmdLine string) string {
+ if i := strings.IndexByte(cmdLine, ' '); i != -1 {
+ cmdLine = cmdLine[:i]
+ }
+ _, comm := filepath.Split(cmdLine)
+ return strings.TrimSuffix(comm, ":")
+}
+
+func calcHash(obj any) (uint64, error) {
+ return hashstructure.Hash(obj, nil)
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners_test.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners_test.go
new file mode 100644
index 000000000..9b3cae801
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/netlisteners_test.go
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package netlisteners
+
+import (
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+)
+
+func TestDiscoverer_Discover(t *testing.T) {
+ tests := map[string]discoverySim{
+ "add listeners": {
+ listenersCli: func(cli listenersCli, interval, expiry time.Duration) {
+ cli.addListener("UDP|127.0.0.1|323|/usr/sbin/chronyd")
+ cli.addListener("UDP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP6|::|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP6|2001:DB8::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP|127.0.0.1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP|0.0.0.0|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP|192.0.2.1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("UDP|127.0.0.1|53768|/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1")
+ cli.addListener("TCP46|*|80|/usr/sbin/httpd -k start")
+ cli.addListener("TCP6|::|80|/usr/sbin/apache2 -k start")
+ cli.addListener("TCP|0.0.0.0|80|/usr/sbin/apache2 -k start")
+ cli.addListener("TCP|0.0.0.0|8080|/usr/sbin/docker-proxy -proto tcp -host-ip 0.0.0.0 -host-port 8080 -container-ip 172.17.0.4 -container-port 80")
+ cli.addListener("TCP6|::|8080|/usr/sbin/docker-proxy -proto tcp -host-ip :: -host-port 8080 -container-ip 172.17.0.4 -container-port 80")
+ time.Sleep(interval * 2)
+ },
+ wantGroups: []model.TargetGroup{&targetGroup{
+ provider: "sd:net_listeners",
+ source: "discoverer=net_listeners,host=localhost",
+ targets: []model.Target{
+ withHash(&target{
+ Protocol: "UDP",
+ IPAddress: "127.0.0.1",
+ Port: "323",
+ Address: "127.0.0.1:323",
+ Comm: "chronyd",
+ Cmdline: "/usr/sbin/chronyd",
+ }),
+ withHash(&target{
+ Protocol: "TCP46",
+ IPAddress: "127.0.0.1",
+ Port: "80",
+ Address: "127.0.0.1:80",
+ Comm: "httpd",
+ Cmdline: "/usr/sbin/httpd -k start",
+ }),
+ withHash(&target{
+ Protocol: "TCP",
+ IPAddress: "127.0.0.1",
+ Port: "80",
+ Address: "127.0.0.1:80",
+ Comm: "apache2",
+ Cmdline: "/usr/sbin/apache2 -k start",
+ }),
+ withHash(&target{
+ Protocol: "TCP",
+ IPAddress: "127.0.0.1",
+ Port: "8125",
+ Address: "127.0.0.1:8125",
+ Comm: "netdata",
+ Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D",
+ }),
+ withHash(&target{
+ Protocol: "UDP",
+ IPAddress: "127.0.0.1",
+ Port: "53768",
+ Address: "127.0.0.1:53768",
+ Comm: "go.d.plugin",
+ Cmdline: "/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1",
+ }),
+ withHash(&target{
+ Protocol: "UDP6",
+ IPAddress: "::1",
+ Port: "8125",
+ Address: "[::1]:8125",
+ Comm: "netdata",
+ Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D",
+ }),
+ },
+ }},
+ },
+ "remove listeners; not expired": {
+ listenersCli: func(cli listenersCli, interval, expiry time.Duration) {
+ cli.addListener("UDP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP|127.0.0.1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("UDP|127.0.0.1|53768|/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1")
+ time.Sleep(interval * 2)
+ cli.removeListener("UDP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.removeListener("UDP|127.0.0.1|53768|/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1")
+ time.Sleep(interval * 2)
+ },
+ wantGroups: []model.TargetGroup{&targetGroup{
+ provider: "sd:net_listeners",
+ source: "discoverer=net_listeners,host=localhost",
+ targets: []model.Target{
+ withHash(&target{
+ Protocol: "UDP6",
+ IPAddress: "::1",
+ Port: "8125",
+ Address: "[::1]:8125",
+ Comm: "netdata",
+ Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D",
+ }),
+ withHash(&target{
+ Protocol: "TCP",
+ IPAddress: "127.0.0.1",
+ Port: "8125",
+ Address: "127.0.0.1:8125",
+ Comm: "netdata",
+ Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D",
+ }),
+ withHash(&target{
+ Protocol: "UDP",
+ IPAddress: "127.0.0.1",
+ Port: "53768",
+ Address: "127.0.0.1:53768",
+ Comm: "go.d.plugin",
+ Cmdline: "/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1",
+ }),
+ },
+ }},
+ },
+ "remove listeners; expired": {
+ listenersCli: func(cli listenersCli, interval, expiry time.Duration) {
+ cli.addListener("UDP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("TCP|127.0.0.1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.addListener("UDP|127.0.0.1|53768|/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1")
+ time.Sleep(interval * 2)
+ cli.removeListener("UDP6|::1|8125|/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D")
+ cli.removeListener("UDP|127.0.0.1|53768|/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1")
+ time.Sleep(expiry * 2)
+ },
+ wantGroups: []model.TargetGroup{&targetGroup{
+ provider: "sd:net_listeners",
+ source: "discoverer=net_listeners,host=localhost",
+ targets: []model.Target{
+ withHash(&target{
+ Protocol: "TCP",
+ IPAddress: "127.0.0.1",
+ Port: "8125",
+ Address: "127.0.0.1:8125",
+ Comm: "netdata",
+ Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D",
+ }),
+ },
+ }},
+ },
+ }
+
+ for name, sim := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim.run(t)
+ })
+ }
+}
+
+func withHash(l *target) *target {
+ l.hash, _ = calcHash(l)
+ tags, _ := model.ParseTags("netlisteners")
+ l.Tags().Merge(tags)
+ return l
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/sim_test.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/sim_test.go
new file mode 100644
index 000000000..4cb65832d
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/sim_test.go
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package netlisteners
+
+import (
+ "context"
+ "errors"
+ "slices"
+ "sort"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type listenersCli interface {
+ addListener(s string)
+ removeListener(s string)
+}
+
+type discoverySim struct {
+ listenersCli func(cli listenersCli, interval, expiry time.Duration)
+ wantGroups []model.TargetGroup
+}
+
+func (sim *discoverySim) run(t *testing.T) {
+ d, err := NewDiscoverer(Config{
+ Source: "",
+ Tags: "netlisteners",
+ })
+ require.NoError(t, err)
+
+ mock := newMockLocalListenersExec()
+
+ d.ll = mock
+
+ d.interval = time.Millisecond * 100
+ d.expiryTime = time.Second * 1
+
+ seen := make(map[string]model.TargetGroup)
+ ctx, cancel := context.WithCancel(context.Background())
+ in := make(chan []model.TargetGroup)
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ d.Discover(ctx, in)
+ }()
+
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case tggs := <-in:
+ for _, tgg := range tggs {
+ seen[tgg.Source()] = tgg
+ }
+ }
+ }
+ }()
+
+ done := make(chan struct{})
+ go func() {
+ defer close(done)
+ wg.Wait()
+ }()
+
+ select {
+ case <-d.started:
+ case <-time.After(time.Second * 3):
+ require.Fail(t, "discovery failed to start")
+ }
+
+ sim.listenersCli(mock, d.interval, d.expiryTime)
+
+ cancel()
+
+ select {
+ case <-done:
+ case <-time.After(time.Second * 3):
+ require.Fail(t, "discovery hasn't finished after cancel")
+ }
+
+ var tggs []model.TargetGroup
+ for _, tgg := range seen {
+ tggs = append(tggs, tgg)
+ }
+
+ sortTargetGroups(tggs)
+ sortTargetGroups(sim.wantGroups)
+
+ wantLen, gotLen := calcTargets(sim.wantGroups), calcTargets(tggs)
+ assert.Equalf(t, wantLen, gotLen, "different len (want %d got %d)", wantLen, gotLen)
+ assert.Equal(t, sim.wantGroups, tggs)
+}
+
+func newMockLocalListenersExec() *mockLocalListenersExec {
+ return &mockLocalListenersExec{}
+}
+
+type mockLocalListenersExec struct {
+ errResponse bool
+ mux sync.Mutex
+ listeners []string
+}
+
+func (m *mockLocalListenersExec) addListener(s string) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ m.listeners = append(m.listeners, s)
+}
+
+func (m *mockLocalListenersExec) removeListener(s string) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ if i := slices.Index(m.listeners, s); i != -1 {
+ m.listeners = append(m.listeners[:i], m.listeners[i+1:]...)
+ }
+}
+
+func (m *mockLocalListenersExec) discover(context.Context) ([]byte, error) {
+ if m.errResponse {
+ return nil, errors.New("mock discover() error")
+ }
+
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ var buf strings.Builder
+ for _, s := range m.listeners {
+ buf.WriteString(s)
+ buf.WriteByte('\n')
+ }
+
+ return []byte(buf.String()), nil
+}
+
+func calcTargets(tggs []model.TargetGroup) int {
+ var n int
+ for _, tgg := range tggs {
+ n += len(tgg.Targets())
+ }
+ return n
+}
+
+func sortTargetGroups(tggs []model.TargetGroup) {
+ if len(tggs) == 0 {
+ return
+ }
+ sort.Slice(tggs, func(i, j int) bool { return tggs[i].Source() < tggs[j].Source() })
+
+ for idx := range tggs {
+ tgts := tggs[idx].Targets()
+ sort.Slice(tgts, func(i, j int) bool { return tgts[i].Hash() < tgts[j].Hash() })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/target.go b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/target.go
new file mode 100644
index 000000000..9d57d3cc7
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/discoverer/netlisteners/target.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package netlisteners
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+)
+
+type targetGroup struct {
+ provider string
+ source string
+ targets []model.Target
+}
+
+func (g *targetGroup) Provider() string { return g.provider }
+func (g *targetGroup) Source() string { return g.source }
+func (g *targetGroup) Targets() []model.Target { return g.targets }
+
+type target struct {
+ model.Base
+
+ hash uint64
+
+ Protocol string
+ IPAddress string
+ Port string
+ Comm string
+ Cmdline string
+
+ Address string // "IPAddress:Port"
+}
+
+func (t *target) TUID() string { return tuid(t) }
+func (t *target) Hash() uint64 { return t.hash }
+
+func tuid(tgt *target) string {
+ return fmt.Sprintf("%s_%s_%d", strings.ToLower(tgt.Protocol), tgt.Port, tgt.hash)
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/model/discoverer.go b/src/go/plugin/go.d/agent/discovery/sd/model/discoverer.go
new file mode 100644
index 000000000..301322d32
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/model/discoverer.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package model
+
+import (
+ "context"
+)
+
+type Discoverer interface {
+ Discover(ctx context.Context, ch chan<- []TargetGroup)
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/model/tags.go b/src/go/plugin/go.d/agent/discovery/sd/model/tags.go
new file mode 100644
index 000000000..22517d77e
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/model/tags.go
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package model
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+type Base struct {
+ tags Tags
+}
+
+func (b *Base) Tags() Tags {
+ if b.tags == nil {
+ b.tags = NewTags()
+ }
+ return b.tags
+}
+
+type Tags map[string]struct{}
+
+func NewTags() Tags {
+ return Tags{}
+}
+
+func (t Tags) Merge(tags Tags) {
+ for tag := range tags {
+ if strings.HasPrefix(tag, "-") {
+ delete(t, tag[1:])
+ } else {
+ t[tag] = struct{}{}
+ }
+ }
+}
+
+func (t Tags) Clone() Tags {
+ ts := NewTags()
+ ts.Merge(t)
+ return ts
+}
+
+func (t Tags) String() string {
+ ts := make([]string, 0, len(t))
+ for key := range t {
+ ts = append(ts, key)
+ }
+ sort.Strings(ts)
+ return fmt.Sprintf("{%s}", strings.Join(ts, ", "))
+}
+
+func ParseTags(line string) (Tags, error) {
+ words := strings.Fields(line)
+ if len(words) == 0 {
+ return NewTags(), nil
+ }
+
+ tags := NewTags()
+ for _, tag := range words {
+ if !isTagWordValid(tag) {
+ return nil, fmt.Errorf("tags '%s' contains tag '%s' with forbidden symbol", line, tag)
+ }
+ tags[tag] = struct{}{}
+ }
+ return tags, nil
+}
+
+func isTagWordValid(word string) bool {
+ // valid:
+ // ^[a-zA-Z][a-zA-Z0-9=_.]*$
+ word = strings.TrimPrefix(word, "-")
+ if len(word) == 0 {
+ return false
+ }
+ for i, b := range word {
+ switch {
+ default:
+ return false
+ case b >= 'a' && b <= 'z':
+ case b >= 'A' && b <= 'Z':
+ case b >= '0' && b <= '9' && i > 0:
+ case (b == '=' || b == '_' || b == '.') && i > 0:
+ }
+ }
+ return true
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/model/tags_test.go b/src/go/plugin/go.d/agent/discovery/sd/model/tags_test.go
new file mode 100644
index 000000000..4f07bcbf6
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/model/tags_test.go
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package model
diff --git a/src/go/plugin/go.d/agent/discovery/sd/model/target.go b/src/go/plugin/go.d/agent/discovery/sd/model/target.go
new file mode 100644
index 000000000..eb2bd9d51
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/model/target.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package model
+
+type Target interface {
+ Hash() uint64
+ Tags() Tags
+ TUID() string
+}
+
+type TargetGroup interface {
+ Targets() []Target
+ Provider() string
+ Source() string
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/accumulator.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/accumulator.go
new file mode 100644
index 000000000..60c901492
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/accumulator.go
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+)
+
+func newAccumulator() *accumulator {
+ return &accumulator{
+ send: make(chan struct{}, 1),
+ sendEvery: time.Second * 2,
+ mux: &sync.Mutex{},
+ tggs: make(map[string]model.TargetGroup),
+ }
+}
+
+type accumulator struct {
+ *logger.Logger
+ discoverers []model.Discoverer
+ send chan struct{}
+ sendEvery time.Duration
+ mux *sync.Mutex
+ tggs map[string]model.TargetGroup
+}
+
+func (a *accumulator) run(ctx context.Context, in chan []model.TargetGroup) {
+ updates := make(chan []model.TargetGroup)
+
+ var wg sync.WaitGroup
+ for _, d := range a.discoverers {
+ wg.Add(1)
+ d := d
+ go func() { defer wg.Done(); a.runDiscoverer(ctx, d, updates) }()
+ }
+
+ done := make(chan struct{})
+ go func() { defer close(done); wg.Wait() }()
+
+ tk := time.NewTicker(a.sendEvery)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ select {
+ case <-done:
+ a.Info("all discoverers exited")
+ case <-time.After(time.Second * 3):
+ a.Warning("not all discoverers exited")
+ }
+ a.trySend(in)
+ return
+ case <-done:
+ if !isDone(ctx) {
+ a.Info("all discoverers exited before ctx done")
+ } else {
+ a.Info("all discoverers exited")
+ }
+ a.trySend(in)
+ return
+ case <-tk.C:
+ select {
+ case <-a.send:
+ a.trySend(in)
+ default:
+ }
+ }
+ }
+}
+
+func (a *accumulator) runDiscoverer(ctx context.Context, d model.Discoverer, updates chan []model.TargetGroup) {
+ done := make(chan struct{})
+ go func() { defer close(done); d.Discover(ctx, updates) }()
+
+ for {
+ select {
+ case <-ctx.Done():
+ select {
+ case <-done:
+ case <-time.After(time.Second * 2):
+ a.Warningf("discoverer '%v' didn't exit on ctx done", d)
+ }
+ return
+ case <-done:
+ if !isDone(ctx) {
+ a.Infof("discoverer '%v' exited before ctx done", d)
+ }
+ return
+ case tggs := <-updates:
+ a.mux.Lock()
+ a.groupsUpdate(tggs)
+ a.mux.Unlock()
+ a.triggerSend()
+ }
+ }
+}
+
+func (a *accumulator) trySend(in chan<- []model.TargetGroup) {
+ a.mux.Lock()
+ defer a.mux.Unlock()
+
+ select {
+ case in <- a.groupsList():
+ a.groupsReset()
+ default:
+ a.triggerSend()
+ }
+}
+
+func (a *accumulator) triggerSend() {
+ select {
+ case a.send <- struct{}{}:
+ default:
+ }
+}
+
+func (a *accumulator) groupsUpdate(tggs []model.TargetGroup) {
+ for _, tgg := range tggs {
+ a.tggs[tgg.Source()] = tgg
+ }
+}
+
+func (a *accumulator) groupsReset() {
+ for key := range a.tggs {
+ delete(a.tggs, key)
+ }
+}
+
+func (a *accumulator) groupsList() []model.TargetGroup {
+ tggs := make([]model.TargetGroup, 0, len(a.tggs))
+ for _, tgg := range a.tggs {
+ if tgg != nil {
+ tggs = append(tggs, tgg)
+ }
+ }
+ return tggs
+}
+
+func isDone(ctx context.Context) bool {
+ select {
+ case <-ctx.Done():
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/classify.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/classify.go
new file mode 100644
index 000000000..a7490d2e0
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/classify.go
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+ "text/template"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+)
+
+func newTargetClassificator(cfg []ClassifyRuleConfig) (*targetClassificator, error) {
+ rules, err := newClassifyRules(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ c := &targetClassificator{
+ rules: rules,
+ buf: bytes.Buffer{},
+ }
+
+ return c, nil
+}
+
+type (
+ targetClassificator struct {
+ *logger.Logger
+ rules []*classifyRule
+ buf bytes.Buffer
+ }
+
+ classifyRule struct {
+ name string
+ sr selector
+ tags model.Tags
+ match []*classifyRuleMatch
+ }
+ classifyRuleMatch struct {
+ tags model.Tags
+ expr *template.Template
+ }
+)
+
+func (c *targetClassificator) classify(tgt model.Target) model.Tags {
+ tgtTags := tgt.Tags().Clone()
+ var tags model.Tags
+
+ for i, rule := range c.rules {
+ if !rule.sr.matches(tgtTags) {
+ continue
+ }
+
+ for j, match := range rule.match {
+ c.buf.Reset()
+
+ if err := match.expr.Execute(&c.buf, tgt); err != nil {
+ c.Warningf("failed to execute classify rule[%d]->match[%d]->expr on target '%s'", i+1, j+1, tgt.TUID())
+ continue
+ }
+ if strings.TrimSpace(c.buf.String()) != "true" {
+ continue
+ }
+
+ if tags == nil {
+ tags = model.NewTags()
+ }
+
+ tags.Merge(rule.tags)
+ tags.Merge(match.tags)
+ tgtTags.Merge(tags)
+ }
+ }
+
+ return tags
+}
+
+func newClassifyRules(cfg []ClassifyRuleConfig) ([]*classifyRule, error) {
+ var rules []*classifyRule
+
+ fmap := newFuncMap()
+
+ for i, ruleCfg := range cfg {
+ i++
+ rule := classifyRule{name: ruleCfg.Name}
+
+ sr, err := parseSelector(ruleCfg.Selector)
+ if err != nil {
+ return nil, fmt.Errorf("rule '%d': %v", i, err)
+ }
+ rule.sr = sr
+
+ tags, err := model.ParseTags(ruleCfg.Tags)
+ if err != nil {
+ return nil, fmt.Errorf("rule '%d': %v", i, err)
+ }
+ rule.tags = tags
+
+ for j, matchCfg := range ruleCfg.Match {
+ j++
+ var match classifyRuleMatch
+
+ tags, err := model.ParseTags(matchCfg.Tags)
+ if err != nil {
+ return nil, fmt.Errorf("rule '%d/%d': %v", i, j, err)
+ }
+ match.tags = tags
+
+ tmpl, err := parseTemplate(matchCfg.Expr, fmap)
+ if err != nil {
+ return nil, fmt.Errorf("rule '%d/%d': %v", i, j, err)
+ }
+ match.expr = tmpl
+
+ rule.match = append(rule.match, &match)
+ }
+
+ rules = append(rules, &rule)
+ }
+
+ return rules, nil
+}
+
+func parseTemplate(s string, fmap template.FuncMap) (*template.Template, error) {
+ return template.New("root").
+ Option("missingkey=error").
+ Funcs(fmap).
+ Parse(s)
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/classify_test.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/classify_test.go
new file mode 100644
index 000000000..606e3411c
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/classify_test.go
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+func TestTargetClassificator_classify(t *testing.T) {
+ config := `
+- selector: "rule0"
+ tags: "skip"
+ match:
+ - tags: "skip"
+ expr: '{{ glob .Name "*" }}'
+- selector: "!skip rule1"
+ tags: "foo1"
+ match:
+ - tags: "bar1"
+ expr: '{{ glob .Name "mock*1*" }}'
+ - tags: "bar2"
+ expr: '{{ glob .Name "mock*2*" }}'
+- selector: "!skip rule2"
+ tags: "foo2"
+ match:
+ - tags: "bar3"
+ expr: '{{ glob .Name "mock*3*" }}'
+ - tags: "bar4"
+ expr: '{{ glob .Name "mock*4*" }}'
+- selector: "rule3"
+ tags: "foo3"
+ match:
+ - tags: "bar5"
+ expr: '{{ glob .Name "mock*5*" }}'
+ - tags: "bar6"
+ expr: '{{ glob .Name "mock*6*" }}'
+`
+ tests := map[string]struct {
+ target model.Target
+ wantTags model.Tags
+ }{
+ "no rules match": {
+ target: newMockTarget("mock1"),
+ wantTags: nil,
+ },
+ "one rule one match": {
+ target: newMockTarget("mock4", "rule2"),
+ wantTags: mustParseTags("foo2 bar4"),
+ },
+ "one rule two match": {
+ target: newMockTarget("mock56", "rule3"),
+ wantTags: mustParseTags("foo3 bar5 bar6"),
+ },
+ "all rules all matches": {
+ target: newMockTarget("mock123456", "rule1 rule2 rule3"),
+ wantTags: mustParseTags("foo1 foo2 foo3 bar1 bar2 bar3 bar4 bar5 bar6"),
+ },
+ "applying labels after every rule": {
+ target: newMockTarget("mock123456", "rule0 rule1 rule2 rule3"),
+ wantTags: mustParseTags("skip foo3 bar5 bar6"),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ var cfg []ClassifyRuleConfig
+
+ err := yaml.Unmarshal([]byte(config), &cfg)
+ require.NoError(t, err, "yaml unmarshalling of config")
+
+ clr, err := newTargetClassificator(cfg)
+ require.NoError(t, err, "targetClassificator creation")
+
+ assert.Equal(t, test.wantTags, clr.classify(test.target))
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/compose.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/compose.go
new file mode 100644
index 000000000..80830fd6d
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/compose.go
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "text/template"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "gopkg.in/yaml.v2"
+)
+
+func newConfigComposer(cfg []ComposeRuleConfig) (*configComposer, error) {
+ rules, err := newComposeRules(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ c := &configComposer{
+ rules: rules,
+ buf: bytes.Buffer{},
+ }
+
+ return c, nil
+}
+
+type (
+ configComposer struct {
+ *logger.Logger
+ rules []*composeRule
+ buf bytes.Buffer
+ }
+
+ composeRule struct {
+ name string
+ sr selector
+ conf []*composeRuleConf
+ }
+ composeRuleConf struct {
+ sr selector
+ tmpl *template.Template
+ }
+)
+
+func (c *configComposer) compose(tgt model.Target) []confgroup.Config {
+ var configs []confgroup.Config
+
+ for i, rule := range c.rules {
+ if !rule.sr.matches(tgt.Tags()) {
+ continue
+ }
+
+ for j, conf := range rule.conf {
+ if !conf.sr.matches(tgt.Tags()) {
+ continue
+ }
+
+ c.buf.Reset()
+
+ if err := conf.tmpl.Execute(&c.buf, tgt); err != nil {
+ c.Warningf("failed to execute rule[%d]->config[%d]->template on target '%s': %v",
+ i+1, j+1, tgt.TUID(), err)
+ continue
+ }
+ if c.buf.Len() == 0 {
+ continue
+ }
+
+ cfgs, err := c.parseTemplateData(c.buf.Bytes())
+ if err != nil {
+ c.Warningf("failed to parse template data: %v", err)
+ continue
+ }
+
+ configs = append(configs, cfgs...)
+ }
+ }
+
+ if len(configs) > 0 {
+ c.Debugf("created %d config(s) for target '%s'", len(configs), tgt.TUID())
+ }
+ return configs
+}
+
+func (c *configComposer) parseTemplateData(bs []byte) ([]confgroup.Config, error) {
+ var data any
+ if err := yaml.Unmarshal(bs, &data); err != nil {
+ return nil, err
+ }
+
+ type (
+ single = map[any]any
+ multi = []any
+ )
+
+ switch data.(type) {
+ case single:
+ var cfg confgroup.Config
+ if err := yaml.Unmarshal(bs, &cfg); err != nil {
+ return nil, err
+ }
+ return []confgroup.Config{cfg}, nil
+ case multi:
+ var cfgs []confgroup.Config
+ if err := yaml.Unmarshal(bs, &cfgs); err != nil {
+ return nil, err
+ }
+ return cfgs, nil
+ default:
+ return nil, errors.New("unknown config format")
+ }
+}
+
+func newComposeRules(cfg []ComposeRuleConfig) ([]*composeRule, error) {
+ var rules []*composeRule
+
+ fmap := newFuncMap()
+
+ for i, ruleCfg := range cfg {
+ i++
+ rule := composeRule{name: ruleCfg.Name}
+
+ sr, err := parseSelector(ruleCfg.Selector)
+ if err != nil {
+ return nil, fmt.Errorf("rule '%d': %v", i, err)
+ }
+ rule.sr = sr
+
+ for j, confCfg := range ruleCfg.Config {
+ j++
+ var conf composeRuleConf
+
+ sr, err := parseSelector(confCfg.Selector)
+ if err != nil {
+ return nil, fmt.Errorf("rule '%d/%d': %v", i, j, err)
+ }
+ conf.sr = sr
+
+ tmpl, err := parseTemplate(confCfg.Template, fmap)
+ if err != nil {
+ return nil, fmt.Errorf("rule '%d/%d': %v", i, j, err)
+ }
+ conf.tmpl = tmpl
+
+ rule.conf = append(rule.conf, &conf)
+ }
+
+ rules = append(rules, &rule)
+ }
+
+ return rules, nil
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/compose_test.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/compose_test.go
new file mode 100644
index 000000000..1c56bf086
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/compose_test.go
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+func TestConfigComposer_compose(t *testing.T) {
+ config := `
+- selector: "rule1"
+ config:
+ - selector: "bar1"
+ template: |
+ name: {{ .Name }}-1
+ - selector: "bar2"
+ template: |
+ name: {{ .Name }}-2
+- selector: "rule2"
+ config:
+ - selector: "bar3"
+ template: |
+ name: {{ .Name }}-3
+ - selector: "bar4"
+ template: |
+ name: {{ .Name }}-4
+- selector: "rule3"
+ config:
+ - selector: "bar5"
+ template: |
+ name: {{ .Name }}-5
+ - selector: "bar6"
+ template: |
+ - name: {{ .Name }}-6
+ - name: {{ .Name }}-7
+`
+ tests := map[string]struct {
+ target model.Target
+ wantConfigs []confgroup.Config
+ }{
+ "no rules matches": {
+ target: newMockTarget("mock"),
+ wantConfigs: nil,
+ },
+ "one rule one config": {
+ target: newMockTarget("mock", "rule1 bar1"),
+ wantConfigs: []confgroup.Config{
+ {"name": "mock-1"},
+ },
+ },
+ "one rule two config": {
+ target: newMockTarget("mock", "rule2 bar3 bar4"),
+ wantConfigs: []confgroup.Config{
+ {"name": "mock-3"},
+ {"name": "mock-4"},
+ },
+ },
+ "all rules all configs": {
+ target: newMockTarget("mock", "rule1 bar1 bar2 rule2 bar3 bar4 rule3 bar5 bar6"),
+ wantConfigs: []confgroup.Config{
+ {"name": "mock-1"},
+ {"name": "mock-2"},
+ {"name": "mock-3"},
+ {"name": "mock-4"},
+ {"name": "mock-5"},
+ {"name": "mock-6"},
+ {"name": "mock-7"},
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ var cfg []ComposeRuleConfig
+
+ err := yaml.Unmarshal([]byte(config), &cfg)
+ require.NoErrorf(t, err, "yaml unmarshalling of config")
+
+ cmr, err := newConfigComposer(cfg)
+ require.NoErrorf(t, err, "configComposer creation")
+
+ assert.Equal(t, test.wantConfigs, cmr.compose(test.target))
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/config.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/config.go
new file mode 100644
index 000000000..9df7ec59d
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/config.go
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/discoverer/dockerd"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/discoverer/kubernetes"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/discoverer/netlisteners"
+)
+
+type Config struct {
+ Source string `yaml:"-"`
+ ConfigDefaults confgroup.Registry `yaml:"-"`
+
+ Disabled bool `yaml:"disabled"`
+ Name string `yaml:"name"`
+ Discover []DiscoveryConfig `yaml:"discover"`
+ Classify []ClassifyRuleConfig `yaml:"classify"`
+ Compose []ComposeRuleConfig `yaml:"compose"`
+}
+
+type DiscoveryConfig struct {
+ Discoverer string `yaml:"discoverer"`
+ NetListeners netlisteners.Config `yaml:"net_listeners"`
+ Docker dockerd.Config `yaml:"docker"`
+ K8s []kubernetes.Config `yaml:"k8s"`
+}
+
+type ClassifyRuleConfig struct {
+ Name string `yaml:"name"`
+ Selector string `yaml:"selector"` // mandatory
+ Tags string `yaml:"tags"` // mandatory
+ Match []struct {
+ Tags string `yaml:"tags"` // mandatory
+ Expr string `yaml:"expr"` // mandatory
+ } `yaml:"match"` // mandatory, at least 1
+}
+
+type ComposeRuleConfig struct {
+ Name string `yaml:"name"` // optional
+ Selector string `yaml:"selector"` // mandatory
+ Config []struct {
+ Selector string `yaml:"selector"` // mandatory
+ Template string `yaml:"template"` // mandatory
+ } `yaml:"config"` // mandatory, at least 1
+}
+
+func validateConfig(cfg Config) error {
+ if cfg.Name == "" {
+ return errors.New("'name' not set")
+ }
+ if err := validateDiscoveryConfig(cfg.Discover); err != nil {
+ return fmt.Errorf("discover config: %v", err)
+ }
+ if err := validateClassifyConfig(cfg.Classify); err != nil {
+ return fmt.Errorf("classify rules: %v", err)
+ }
+ if err := validateComposeConfig(cfg.Compose); err != nil {
+ return fmt.Errorf("compose rules: %v", err)
+ }
+ return nil
+}
+
+func validateDiscoveryConfig(config []DiscoveryConfig) error {
+ if len(config) == 0 {
+ return errors.New("no discoverers, must be at least one")
+ }
+ for _, cfg := range config {
+ switch cfg.Discoverer {
+ case "net_listeners", "docker", "k8s":
+ default:
+ return fmt.Errorf("unknown discoverer: '%s'", cfg.Discoverer)
+ }
+ }
+ return nil
+}
+
+func validateClassifyConfig(rules []ClassifyRuleConfig) error {
+ if len(rules) == 0 {
+ return errors.New("empty config, need least 1 rule")
+ }
+ for i, rule := range rules {
+ i++
+ if rule.Selector == "" {
+ return fmt.Errorf("'rule[%s][%d]->selector' not set", rule.Name, i)
+ }
+ if rule.Tags == "" {
+ return fmt.Errorf("'rule[%s][%d]->tags' not set", rule.Name, i)
+ }
+ if len(rule.Match) == 0 {
+ return fmt.Errorf("'rule[%s][%d]->match' not set, need at least 1 rule match", rule.Name, i)
+ }
+
+ for j, match := range rule.Match {
+ j++
+ if match.Tags == "" {
+ return fmt.Errorf("'rule[%s][%d]->match[%d]->tags' not set", rule.Name, i, j)
+ }
+ if match.Expr == "" {
+ return fmt.Errorf("'rule[%s][%d]->match[%d]->expr' not set", rule.Name, i, j)
+ }
+ }
+ }
+ return nil
+}
+
+func validateComposeConfig(rules []ComposeRuleConfig) error {
+ if len(rules) == 0 {
+ return errors.New("empty config, need least 1 rule")
+ }
+ for i, rule := range rules {
+ i++
+ if rule.Selector == "" {
+ return fmt.Errorf("'rule[%s][%d]->selector' not set", rule.Name, i)
+ }
+
+ if len(rule.Config) == 0 {
+ return fmt.Errorf("'rule[%s][%d]->config' not set", rule.Name, i)
+ }
+
+ for j, conf := range rule.Config {
+ j++
+ if conf.Selector == "" {
+ return fmt.Errorf("'rule[%s][%d]->config[%d]->selector' not set", rule.Name, i, j)
+ }
+ if conf.Template == "" {
+ return fmt.Errorf("'rule[%s][%d]->config[%d]->template' not set", rule.Name, i, j)
+ }
+ }
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap.go
new file mode 100644
index 000000000..5ed188a54
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap.go
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "regexp"
+ "strconv"
+ "text/template"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+ "github.com/Masterminds/sprig/v3"
+ "github.com/bmatcuk/doublestar/v4"
+)
+
+func newFuncMap() template.FuncMap {
+ custom := map[string]interface{}{
+ "match": funcMatchAny,
+ "glob": func(value, pattern string, patterns ...string) bool {
+ return funcMatchAny("glob", value, pattern, patterns...)
+ },
+ "promPort": func(port string) string {
+ v, _ := strconv.Atoi(port)
+ return prometheusPortAllocations[v]
+ },
+ }
+
+ fm := sprig.HermeticTxtFuncMap()
+
+ for name, fn := range custom {
+ fm[name] = fn
+ }
+
+ return fm
+}
+
+func funcMatchAny(typ, value, pattern string, patterns ...string) bool {
+ switch len(patterns) {
+ case 0:
+ return funcMatch(typ, value, pattern)
+ default:
+ return funcMatch(typ, value, pattern) || funcMatchAny(typ, value, patterns[0], patterns[1:]...)
+ }
+}
+
+func funcMatch(typ string, value, pattern string) bool {
+ switch typ {
+ case "glob", "":
+ m, err := matcher.NewGlobMatcher(pattern)
+ return err == nil && m.MatchString(value)
+ case "sp":
+ m, err := matcher.NewSimplePatternsMatcher(pattern)
+ return err == nil && m.MatchString(value)
+ case "re":
+ ok, err := regexp.MatchString(pattern, value)
+ return err == nil && ok
+ case "dstar":
+ ok, err := doublestar.Match(pattern, value)
+ return err == nil && ok
+ default:
+ return false
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap_test.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap_test.go
new file mode 100644
index 000000000..3de71ef70
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/funcmap_test.go
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_funcMatchAny(t *testing.T) {
+ tests := map[string]struct {
+ typ string
+ patterns []string
+ value string
+ wantMatch bool
+ }{
+ "dstar: one param, matches": {
+ wantMatch: true,
+ typ: "dstar",
+ patterns: []string{"*"},
+ value: "value",
+ },
+ "dstar: one param, matches with *": {
+ wantMatch: true,
+ typ: "dstar",
+ patterns: []string{"**/value"},
+ value: "/one/two/three/value",
+ },
+ "dstar: one param, not matches": {
+ wantMatch: false,
+ typ: "dstar",
+ patterns: []string{"Value"},
+ value: "value",
+ },
+ "dstar: several params, last one matches": {
+ wantMatch: true,
+ typ: "dstar",
+ patterns: []string{"not", "matches", "*"},
+ value: "value",
+ },
+ "dstar: several params, no matches": {
+ wantMatch: false,
+ typ: "dstar",
+ patterns: []string{"not", "matches", "really"},
+ value: "value",
+ },
+ "re: one param, matches": {
+ wantMatch: true,
+ typ: "re",
+ patterns: []string{"^value$"},
+ value: "value",
+ },
+ "re: one param, not matches": {
+ wantMatch: false,
+ typ: "re",
+ patterns: []string{"^Value$"},
+ value: "value",
+ },
+ "re: several params, last one matches": {
+ wantMatch: true,
+ typ: "re",
+ patterns: []string{"not", "matches", "va[lue]{3}"},
+ value: "value",
+ },
+ "re: several params, no matches": {
+ wantMatch: false,
+ typ: "re",
+ patterns: []string{"not", "matches", "val[^l]ue"},
+ value: "value",
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ok := funcMatchAny(test.typ, test.value, test.patterns[0], test.patterns[1:]...)
+
+ assert.Equal(t, test.wantMatch, ok)
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline.go
new file mode 100644
index 000000000..4d391d41e
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline.go
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log/slog"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/discoverer/dockerd"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/discoverer/kubernetes"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/discoverer/netlisteners"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/hostinfo"
+)
+
+func New(cfg Config) (*Pipeline, error) {
+ if err := validateConfig(cfg); err != nil {
+ return nil, err
+ }
+
+ clr, err := newTargetClassificator(cfg.Classify)
+ if err != nil {
+ return nil, fmt.Errorf("classify rules: %v", err)
+ }
+
+ cmr, err := newConfigComposer(cfg.Compose)
+ if err != nil {
+ return nil, fmt.Errorf("compose rules: %v", err)
+ }
+
+ p := &Pipeline{
+ Logger: logger.New().With(
+ slog.String("component", "service discovery"),
+ slog.String("pipeline", cfg.Name),
+ ),
+ configDefaults: cfg.ConfigDefaults,
+ clr: clr,
+ cmr: cmr,
+ accum: newAccumulator(),
+ discoverers: make([]model.Discoverer, 0),
+ configs: make(map[string]map[uint64][]confgroup.Config),
+ }
+ p.accum.Logger = p.Logger
+
+ if err := p.registerDiscoverers(cfg); err != nil {
+ return nil, err
+ }
+
+ return p, nil
+}
+
+type (
+ Pipeline struct {
+ *logger.Logger
+
+ configDefaults confgroup.Registry
+ discoverers []model.Discoverer
+ accum *accumulator
+ clr classificator
+ cmr composer
+ configs map[string]map[uint64][]confgroup.Config // [targetSource][targetHash]
+ }
+ classificator interface {
+ classify(model.Target) model.Tags
+ }
+ composer interface {
+ compose(model.Target) []confgroup.Config
+ }
+)
+
+func (p *Pipeline) registerDiscoverers(conf Config) error {
+ for _, cfg := range conf.Discover {
+ switch cfg.Discoverer {
+ case "net_listeners":
+ cfg.NetListeners.Source = conf.Source
+ td, err := netlisteners.NewDiscoverer(cfg.NetListeners)
+ if err != nil {
+ return fmt.Errorf("failed to create '%s' discoverer: %v", cfg.Discoverer, err)
+ }
+ p.discoverers = append(p.discoverers, td)
+ case "docker":
+ if hostinfo.IsInsideK8sCluster() {
+ p.Infof("not registering '%s' discoverer: disabled in k8s environment", cfg.Discoverer)
+ continue
+ }
+ cfg.Docker.Source = conf.Source
+ td, err := dockerd.NewDiscoverer(cfg.Docker)
+ if err != nil {
+ return fmt.Errorf("failed to create '%s' discoverer: %v", cfg.Discoverer, err)
+ }
+ p.discoverers = append(p.discoverers, td)
+ case "k8s":
+ for _, k8sCfg := range cfg.K8s {
+ td, err := kubernetes.NewKubeDiscoverer(k8sCfg)
+ if err != nil {
+ return fmt.Errorf("failed to create '%s' discoverer: %v", cfg.Discoverer, err)
+ }
+ p.discoverers = append(p.discoverers, td)
+ }
+ default:
+ return fmt.Errorf("unknown discoverer: '%s'", cfg.Discoverer)
+ }
+ }
+
+ if len(p.discoverers) == 0 {
+ return errors.New("no discoverers registered")
+ }
+
+ return nil
+}
+
+func (p *Pipeline) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ p.Info("instance is started")
+ defer p.Info("instance is stopped")
+
+ p.accum.discoverers = p.discoverers
+
+ updates := make(chan []model.TargetGroup)
+ done := make(chan struct{})
+
+ go func() { defer close(done); p.accum.run(ctx, updates) }()
+
+ for {
+ select {
+ case <-ctx.Done():
+ select {
+ case <-done:
+ case <-time.After(time.Second * 4):
+ }
+ return
+ case <-done:
+ return
+ case tggs := <-updates:
+ p.Debugf("received %d target groups", len(tggs))
+ if cfggs := p.processGroups(tggs); len(cfggs) > 0 {
+ select {
+ case <-ctx.Done():
+ case in <- cfggs: // FIXME: potentially stale configs if upstream cannot receive (blocking)
+ }
+ }
+ }
+ }
+}
+
+func (p *Pipeline) processGroups(tggs []model.TargetGroup) []*confgroup.Group {
+ var groups []*confgroup.Group
+ // updates come from the accumulator, this ensures that all groups have different sources
+ for _, tgg := range tggs {
+ p.Debugf("processing group '%s' with %d target(s)", tgg.Source(), len(tgg.Targets()))
+ if v := p.processGroup(tgg); v != nil {
+ groups = append(groups, v)
+ }
+ }
+ return groups
+}
+
+func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group {
+ if len(tgg.Targets()) == 0 {
+ if _, ok := p.configs[tgg.Source()]; !ok {
+ return nil
+ }
+ delete(p.configs, tgg.Source())
+
+ return &confgroup.Group{Source: tgg.Source()}
+ }
+
+ targetsCache, ok := p.configs[tgg.Source()]
+ if !ok {
+ targetsCache = make(map[uint64][]confgroup.Config)
+ p.configs[tgg.Source()] = targetsCache
+ }
+
+ var changed bool
+ seen := make(map[uint64]bool)
+
+ for _, tgt := range tgg.Targets() {
+ if tgt == nil {
+ continue
+ }
+
+ hash := tgt.Hash()
+ seen[hash] = true
+
+ if _, ok := targetsCache[hash]; ok {
+ continue
+ }
+
+ targetsCache[hash] = nil
+
+ if tags := p.clr.classify(tgt); len(tags) > 0 {
+ tgt.Tags().Merge(tags)
+
+ if cfgs := p.cmr.compose(tgt); len(cfgs) > 0 {
+ targetsCache[hash] = cfgs
+ changed = true
+
+ for _, cfg := range cfgs {
+ cfg.SetProvider(tgg.Provider())
+ cfg.SetSource(tgg.Source())
+ cfg.SetSourceType(confgroup.TypeDiscovered)
+ if def, ok := p.configDefaults.Lookup(cfg.Module()); ok {
+ cfg.ApplyDefaults(def)
+ }
+ }
+ }
+ }
+ }
+
+ for hash := range targetsCache {
+ if seen[hash] {
+ continue
+ }
+ if cfgs := targetsCache[hash]; len(cfgs) > 0 {
+ changed = true
+ }
+ delete(targetsCache, hash)
+ }
+
+ if !changed {
+ return nil
+ }
+
+ // TODO: deepcopy?
+ cfgGroup := &confgroup.Group{Source: tgg.Source()}
+
+ for _, cfgs := range targetsCache {
+ cfgGroup.Configs = append(cfgGroup.Configs, cfgs...)
+ }
+
+ return cfgGroup
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline_test.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline_test.go
new file mode 100644
index 000000000..e67b6d7ce
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/pipeline_test.go
@@ -0,0 +1,303 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/ilyam8/hashstructure"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+func Test_defaultConfigs(t *testing.T) {
+ dir := "../../../../config/go.d/sd/"
+ entries, err := os.ReadDir(dir)
+ require.NoError(t, err)
+
+ require.NotEmpty(t, entries)
+
+ for _, e := range entries {
+ if strings.Contains(e.Name(), "prometheus") {
+ continue
+ }
+ file, err := filepath.Abs(filepath.Join(dir, e.Name()))
+ require.NoError(t, err, "abs path")
+
+ bs, err := os.ReadFile(file)
+ require.NoError(t, err, "read config file")
+
+ var cfg Config
+ require.NoError(t, yaml.Unmarshal(bs, &cfg), "unmarshal")
+
+ _, err = New(cfg)
+ require.NoError(t, err, "create pipeline")
+ }
+}
+
+func TestNew(t *testing.T) {
+ tests := map[string]struct {
+ config string
+ wantErr bool
+ }{
+ "fails when config unset": {
+ wantErr: true,
+ config: "",
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+
+ var cfg Config
+ err := yaml.Unmarshal([]byte(test.config), &cfg)
+ require.Nilf(t, err, "cfg unmarshal")
+
+ _, err = New(cfg)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestPipeline_Run(t *testing.T) {
+ const config = `
+classify:
+ - selector: "rule1"
+ tags: "foo1"
+ match:
+ - tags: "bar1"
+ expr: '{{ glob .Name "mock*1*" }}'
+ - tags: "bar2"
+ expr: '{{ glob .Name "mock*2*" }}'
+compose:
+ - selector: "foo1"
+ config:
+ - selector: "bar1"
+ template: |
+ name: {{ .Name }}-foobar1
+ - selector: "bar2"
+ template: |
+ name: {{ .Name }}-foobar2
+`
+ tests := map[string]discoverySim{
+ "new group with no targets": {
+ config: config,
+ discoverers: []model.Discoverer{
+ newMockDiscoverer("",
+ newMockTargetGroup("test"),
+ ),
+ },
+ wantClassifyCalls: 0,
+ wantComposeCalls: 0,
+ wantConfGroups: nil,
+ },
+ "new group with targets": {
+ config: config,
+ discoverers: []model.Discoverer{
+ newMockDiscoverer("rule1",
+ newMockTargetGroup("test", "mock1", "mock2"),
+ ),
+ },
+ wantClassifyCalls: 2,
+ wantComposeCalls: 2,
+ wantConfGroups: []*confgroup.Group{
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
+ },
+ },
+ "existing group with same targets": {
+ config: config,
+ discoverers: []model.Discoverer{
+ newMockDiscoverer("rule1",
+ newMockTargetGroup("test", "mock1", "mock2"),
+ ),
+ newDelayedMockDiscoverer("rule1", 5,
+ newMockTargetGroup("test", "mock1", "mock2"),
+ ),
+ },
+ wantClassifyCalls: 2,
+ wantComposeCalls: 2,
+ wantConfGroups: []*confgroup.Group{
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
+ },
+ },
+ "existing group that previously had targets with no targets": {
+ config: config,
+ discoverers: []model.Discoverer{
+ newMockDiscoverer("rule1",
+ newMockTargetGroup("test", "mock1", "mock2"),
+ ),
+ newDelayedMockDiscoverer("rule1", 5,
+ newMockTargetGroup("test"),
+ ),
+ },
+ wantClassifyCalls: 2,
+ wantComposeCalls: 2,
+ wantConfGroups: []*confgroup.Group{
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
+ prepareDiscoveredGroup(),
+ },
+ },
+ "existing group with old and new targets": {
+ config: config,
+ discoverers: []model.Discoverer{
+ newMockDiscoverer("rule1",
+ newMockTargetGroup("test", "mock1", "mock2"),
+ ),
+ newDelayedMockDiscoverer("rule1", 5,
+ newMockTargetGroup("test", "mock1", "mock2", "mock11", "mock22"),
+ ),
+ },
+ wantClassifyCalls: 4,
+ wantComposeCalls: 4,
+ wantConfGroups: []*confgroup.Group{
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2", "mock11-foobar1", "mock22-foobar2"),
+ },
+ },
+ "existing group with new targets only": {
+ config: config,
+ discoverers: []model.Discoverer{
+ newMockDiscoverer("rule1",
+ newMockTargetGroup("test", "mock1", "mock2"),
+ ),
+ newDelayedMockDiscoverer("rule1", 5,
+ newMockTargetGroup("test", "mock11", "mock22"),
+ ),
+ },
+ wantClassifyCalls: 4,
+ wantComposeCalls: 4,
+ wantConfGroups: []*confgroup.Group{
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
+ prepareDiscoveredGroup("mock11-foobar1", "mock22-foobar2"),
+ },
+ },
+ }
+
+ for name, sim := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim.run(t)
+ })
+ }
+}
+
+func prepareDiscoveredGroup(configNames ...string) *confgroup.Group {
+ var configs []confgroup.Config
+
+ for _, name := range configNames {
+ configs = append(configs, confgroup.Config{}.
+ SetProvider("mock").
+ SetSourceType(confgroup.TypeDiscovered).
+ SetSource("test").
+ SetName(name))
+ }
+
+ return &confgroup.Group{
+ Source: "test",
+ Configs: configs,
+ }
+}
+
+func newMockDiscoverer(tags string, tggs ...model.TargetGroup) *mockDiscoverer {
+ return &mockDiscoverer{
+ tags: mustParseTags(tags),
+ tggs: tggs,
+ }
+}
+
+func newDelayedMockDiscoverer(tags string, delay int, tggs ...model.TargetGroup) *mockDiscoverer {
+ return &mockDiscoverer{
+ tags: mustParseTags(tags),
+ tggs: tggs,
+ delay: time.Duration(delay) * time.Second,
+ }
+}
+
+type mockDiscoverer struct {
+ tggs []model.TargetGroup
+ tags model.Tags
+ delay time.Duration
+}
+
+func (md mockDiscoverer) String() string {
+ return "mock discoverer"
+}
+
+func (md mockDiscoverer) Discover(ctx context.Context, out chan<- []model.TargetGroup) {
+ for _, tgg := range md.tggs {
+ for _, tgt := range tgg.Targets() {
+ tgt.Tags().Merge(md.tags)
+ }
+ }
+
+ select {
+ case <-ctx.Done():
+ case <-time.After(md.delay):
+ select {
+ case <-ctx.Done():
+ case out <- md.tggs:
+ }
+ }
+}
+
+func newMockTargetGroup(source string, targets ...string) *mockTargetGroup {
+ m := &mockTargetGroup{source: source}
+ for _, name := range targets {
+ m.targets = append(m.targets, &mockTarget{Name: name})
+ }
+ return m
+}
+
+type mockTargetGroup struct {
+ targets []model.Target
+ source string
+}
+
+func (mg mockTargetGroup) Targets() []model.Target { return mg.targets }
+func (mg mockTargetGroup) Source() string { return mg.source }
+func (mg mockTargetGroup) Provider() string { return "mock" }
+
+func newMockTarget(name string, tags ...string) *mockTarget {
+ m := &mockTarget{Name: name}
+ v, _ := model.ParseTags(strings.Join(tags, " "))
+ m.Tags().Merge(v)
+ return m
+}
+
+type mockTarget struct {
+ model.Base
+ Name string
+}
+
+func (mt mockTarget) TUID() string { return mt.Name }
+func (mt mockTarget) Hash() uint64 { return mustCalcHash(mt.Name) }
+
+func mustParseTags(line string) model.Tags {
+ v, err := model.ParseTags(line)
+ if err != nil {
+ panic(fmt.Sprintf("mustParseTags: %v", err))
+ }
+ return v
+}
+
+func mustCalcHash(obj any) uint64 {
+ hash, err := hashstructure.Hash(obj, nil)
+ if err != nil {
+ panic(fmt.Sprintf("hash calculation: %v", err))
+ }
+ return hash
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/promport.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/promport.go
new file mode 100644
index 000000000..646e1abb1
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/promport.go
@@ -0,0 +1,662 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+// https://github.com/prometheus/prometheus/wiki/Default-port-allocations
+var prometheusPortAllocations = map[int]string{
+ 2019: "caddy",
+ 3000: "grafana",
+ 3100: "loki",
+ 5555: "prometheus-jdbc-exporter",
+ 6060: "crowdsec",
+ 7300: "midonet_agent",
+ 8001: "netbox",
+ 8088: "fawkes",
+ 8089: "prom2teams",
+ 8292: "phabricator_webhook_for_alertmanager",
+ 8404: "ha_proxy_v2_plus",
+ 9042: "rds_exporter",
+ 9087: "telegram_bot_for_alertmanager",
+ 9091: "pushgateway",
+ 9097: "jiralert",
+ 9101: "haproxy_exporter",
+ 9102: "statsd_exporter",
+ 9103: "collectd_exporter",
+ 9104: "mysqld_exporter",
+ 9105: "mesos_exporter",
+ 9106: "cloudwatch_exporter",
+ 9107: "consul_exporter",
+ 9108: "graphite_exporter",
+ 9109: "graphite_exporter",
+ 9110: "blackbox_exporter",
+ 9111: "expvar_exporter",
+ 9112: "promacct_pcap-based_network_traffic_accounting",
+ 9113: "nginx_exporter",
+ 9114: "elasticsearch_exporter",
+ 9115: "blackbox_exporter",
+ 9116: "snmp_exporter",
+ 9117: "apache_exporter",
+ 9118: "jenkins_exporter",
+ 9119: "bind_exporter",
+ 9120: "powerdns_exporter",
+ 9121: "redis_exporter",
+ 9122: "influxdb_exporter",
+ 9123: "rethinkdb_exporter",
+ 9124: "freebsd_sysctl_exporter",
+ 9125: "statsd_exporter",
+ 9126: "new_relic_exporter",
+ 9127: "pgbouncer_exporter",
+ 9128: "ceph_exporter",
+ 9129: "haproxy_log_exporter",
+ 9130: "unifi_poller",
+ 9131: "varnish_exporter",
+ 9132: "airflow_exporter",
+ 9133: "fritz_box_exporter",
+ 9134: "zfs_exporter",
+ 9135: "rtorrent_exporter",
+ 9136: "collins_exporter",
+ 9137: "silicondust_hdhomerun_exporter",
+ 9138: "heka_exporter",
+ 9139: "azure_sql_exporter",
+ 9140: "mirth_exporter",
+ 9141: "zookeeper_exporter",
+ 9142: "big-ip_exporter",
+ 9143: "cloudmonitor_exporter",
+ 9145: "aerospike_exporter",
+ 9146: "icecast_exporter",
+ 9147: "nginx_request_exporter",
+ 9148: "nats_exporter",
+ 9149: "passenger_exporter",
+ 9150: "memcached_exporter",
+ 9151: "varnish_request_exporter",
+ 9152: "command_runner_exporter",
+ 9154: "postfix_exporter",
+ 9155: "vsphere_graphite",
+ 9156: "webdriver_exporter",
+ 9157: "ibm_mq_exporter",
+ 9158: "pingdom_exporter",
+ 9160: "apache_flink_exporter",
+ 9161: "oracle_db_exporter",
+ 9162: "apcupsd_exporter",
+ 9163: "zgres_exporter",
+ 9164: "s6_exporter",
+ 9165: "keepalived_exporter",
+ 9166: "dovecot_exporter",
+ 9167: "unbound_exporter",
+ 9168: "gitlab-monitor",
+ 9169: "lustre_exporter",
+ 9170: "docker_hub_exporter",
+ 9171: "github_exporter",
+ 9172: "script_exporter",
+ 9173: "rancher_exporter",
+ 9174: "docker-cloud_exporter",
+ 9175: "saltstack_exporter",
+ 9176: "openvpn_exporter",
+ 9177: "libvirt_exporter",
+ 9178: "stream_exporter",
+ 9179: "shield_exporter",
+ 9180: "scylladb_exporter",
+ 9181: "openstack_ceilometer_exporter",
+ 9183: "openstack_exporter",
+ 9184: "twitch_exporter",
+ 9185: "kafka_topic_exporter",
+ 9186: "cloud_foundry_firehose_exporter",
+ 9187: "postgresql_exporter",
+ 9188: "crypto_exporter",
+ 9189: "hetzner_cloud_csi_driver_nodes",
+ 9190: "bosh_exporter",
+ 9191: "netflow_exporter",
+ 9192: "ceph_exporter",
+ 9193: "cloud_foundry_exporter",
+ 9194: "bosh_tsdb_exporter",
+ 9195: "maxscale_exporter",
+ 9196: "upnp_internet_gateway_device_exporter",
+ 9198: "logstash_exporter",
+ 9199: "cloudflare_exporter",
+ 9202: "pacemaker_exporter",
+ 9203: "domain_exporter",
+ 9204: "pcsensor_temper_exporter",
+ 9205: "nextcloud_exporter",
+ 9206: "elasticsearch_exporter",
+ 9207: "mysql_exporter",
+ 9208: "kafka_consumer_group_exporter",
+ 9209: "fastnetmon_advanced_exporter",
+ 9210: "netatmo_exporter",
+ 9211: "dnsbl-exporter",
+ 9212: "digitalocean_exporter",
+ 9213: "custom_exporter",
+ 9214: "mqtt_blackbox_exporter",
+ 9215: "prometheus_graphite_bridge",
+ 9216: "mongodb_exporter",
+ 9217: "consul_agent_exporter",
+ 9218: "promql-guard",
+ 9219: "ssl_certificate_exporter",
+ 9220: "netapp_trident_exporter",
+ 9221: "proxmox_ve_exporter",
+ 9222: "aws_ecs_exporter",
+ 9223: "bladepsgi_exporter",
+ 9224: "fluentd_exporter",
+ 9225: "mailexporter",
+ 9226: "allas",
+ 9227: "proc_exporter",
+ 9228: "flussonic_exporter",
+ 9229: "gitlab-workhorse",
+ 9230: "network_ups_tools_exporter",
+ 9231: "solr_exporter",
+ 9232: "osquery_exporter",
+ 9233: "mgmt_exporter",
+ 9234: "mosquitto_exporter",
+ 9235: "gitlab-pages_exporter",
+ 9236: "gitlab_gitaly_exporter",
+ 9237: "sql_exporter",
+ 9238: "uwsgi_expoter",
+ 9239: "surfboard_exporter",
+ 9240: "tinyproxy_exporter",
+ 9241: "arangodb_exporter",
+ 9242: "ceph_radosgw_usage_exporter",
+ 9243: "chef_compliance_exporter",
+ 9244: "moby_container_exporter",
+ 9245: "naemon_nagios_exporter",
+ 9246: "smartpi",
+ 9247: "sphinx_exporter",
+ 9248: "freebsd_gstat_exporter",
+ 9249: "apache_flink_metrics_reporter",
+ 9250: "opentsdb_exporter",
+ 9251: "sensu_exporter",
+ 9252: "gitlab_runner_exporter",
+ 9253: "php-fpm_exporter",
+ 9254: "kafka_burrow_exporter",
+ 9255: "google_stackdriver_exporter",
+ 9256: "td-agent_exporter",
+ 9257: "smart_exporter",
+ 9258: "hello_sense_exporter",
+ 9259: "azure_resources_exporter",
+ 9260: "buildkite_exporter",
+ 9261: "grafana_exporter",
+ 9262: "bloomsky_exporter",
+ 9263: "vmware_guest_exporter",
+ 9264: "nest_exporter",
+ 9265: "weather_exporter",
+ 9266: "openhab_exporter",
+ 9267: "nagios_livestatus_exporter",
+ 9268: "cratedb_remote_remote_read_write_adapter",
+ 9269: "fluent-agent-lite_exporter",
+ 9270: "jmeter_exporter",
+ 9271: "pagespeed_exporter",
+ 9272: "vmware_exporter",
+ 9274: "kubernetes_persistentvolume_disk_usage_exporter",
+ 9275: "nrpe_exporter",
+ 9276: "azure_monitor_exporter",
+ 9277: "mongo_collection_exporter",
+ 9278: "crypto_miner_exporter",
+ 9279: "instaclustr_exporter",
+ 9280: "citrix_netscaler_exporter",
+ 9281: "fastd_exporter",
+ 9282: "freeswitch_exporter",
+ 9283: "ceph_ceph-mgr_prometheus_plugin",
+ 9284: "gobetween",
+ 9285: "database_exporter",
+ 9286: "vdo_compression_and_deduplication_exporter",
+ 9287: "ceph_iscsi_gateway_statistics",
+ 9288: "consrv",
+ 9289: "lovoos_ipmi_exporter",
+ 9290: "soundclouds_ipmi_exporter",
+ 9291: "ibm_z_hmc_exporter",
+ 9292: "netapp_ontap_api_exporter",
+ 9293: "connection_status_exporter",
+ 9294: "miflora_flower_care_exporter",
+ 9295: "freifunk_exporter",
+ 9296: "odbc_exporter",
+ 9297: "machbase_exporter",
+ 9298: "generic_exporter",
+ 9299: "exporter_aggregator",
+ 9301: "squid_exporter",
+ 9302: "faucet_sdn_faucet_exporter",
+ 9303: "faucet_sdn_gauge_exporter",
+ 9304: "logstash_exporter",
+ 9305: "go-ethereum_exporter",
+ 9306: "kyototycoon_exporter",
+ 9307: "audisto_exporter",
+ 9308: "kafka_exporter",
+ 9309: "fluentd_exporter",
+ 9310: "open_vswitch_exporter",
+ 9311: "iota_exporter",
+ 9313: "cloudprober_exporter",
+ 9314: "eris_exporter",
+ 9315: "centrifugo_exporter",
+ 9316: "tado_exporter",
+ 9317: "tellstick_local_exporter",
+ 9318: "conntrack_exporter",
+ 9319: "flexlm_exporter",
+ 9320: "consul_telemetry_exporter",
+ 9321: "spring_boot_actuator_exporter",
+ 9322: "haproxy_abuser_exporter",
+ 9323: "docker_prometheus_metrics",
+ 9324: "bird_routing_daemon_exporter",
+ 9325: "ovirt_exporter",
+ 9326: "junos_exporter",
+ 9327: "s3_exporter",
+ 9328: "openldap_syncrepl_exporter",
+ 9329: "cups_exporter",
+ 9330: "openldap_metrics_exporter",
+ 9331: "influx-spout_prometheus_metrics",
+ 9332: "network_exporter",
+ 9333: "vault_pki_exporter",
+ 9334: "ejabberd_exporter",
+ 9335: "nexsan_exporter",
+ 9336: "mediacom_internet_usage_exporter",
+ 9337: "mqttgateway",
+ 9339: "aws_s3_exporter",
+ 9340: "financial_quotes_exporter",
+ 9341: "slurm_exporter",
+ 9342: "frr_exporter",
+ 9343: "gridserver_exporter",
+ 9344: "mqtt_exporter",
+ 9345: "ruckus_smartzone_exporter",
+ 9346: "ping_exporter",
+ 9347: "junos_exporter",
+ 9348: "bigquery_exporter",
+ 9349: "configurable_elasticsearch_query_exporter",
+ 9350: "thousandeyes_exporter",
+ 9351: "wal-e_wal-g_exporter",
+ 9352: "nature_remo_exporter",
+ 9353: "ceph_exporter",
+ 9354: "deluge_exporter",
+ 9355: "nightwatchjs_exporter",
+ 9356: "pacemaker_exporter",
+ 9357: "p1_exporter",
+ 9358: "performance_counters_exporter",
+ 9359: "sidekiq_prometheus",
+ 9360: "powershell_exporter",
+ 9361: "scaleway_sd_exporter",
+ 9362: "cisco_exporter",
+ // Netdata has clickhouse collector.
+ // CH itself exposes messy Prometheus metrics: camelCase names, appends instances to names instead of labels.
+ //9363: "clickhouse",
+ 9364: "continent8_exporter",
+ 9365: "cumulus_linux_exporter",
+ 9366: "haproxy_stick_table_exporter",
+ 9367: "teamspeak3_exporter",
+ 9368: "ethereum_client_exporter",
+ 9369: "prometheus_pushprox",
+ 9370: "u-bmc",
+ 9371: "conntrack-stats-exporter",
+ 9372: "appmetrics_prometheus",
+ 9373: "gcp_service_discovery",
+ 9374: "smokeping_prober",
+ 9375: "particle_exporter",
+ 9376: "falco",
+ 9377: "cisco_aci_exporter",
+ 9378: "etcd_grpc_proxy_exporter",
+ 9379: "etcd_exporter",
+ 9380: "mythtv_exporter",
+ 9381: "kafka_zookeeper_exporter",
+ 9382: "frrouting_exporter",
+ 9383: "aws_health_exporter",
+ 9384: "aws_sqs_exporter",
+ 9385: "apcupsdexporter",
+ 9386: "tankerkönig_api_exporter",
+ 9387: "sabnzbd_exporter",
+ 9388: "linode_exporter",
+ 9389: "scylla-cluster-tests_exporter",
+ 9390: "kannel_exporter",
+ 9391: "concourse_prometheus_metrics",
+ 9392: "generic_command_line_output_exporter",
+ 9393: "alertmanager_github_webhook_receiver",
+ 9394: "ruby_prometheus_exporter",
+ 9395: "ldap_exporter",
+ 9396: "monerod_exporter",
+ 9397: "comap",
+ 9398: "open_hardware_monitor_exporter",
+ 9399: "prometheus_sql_exporter",
+ 9400: "ripe_atlas_exporter",
+ 9401: "1-wire_exporter",
+ 9402: "google_cloud_platform_exporter",
+ 9403: "zerto_exporter",
+ 9404: "jmx_exporter",
+ 9405: "discourse_exporter",
+ 9406: "hhvm_exporter",
+ 9407: "obs_studio_exporter",
+ 9408: "rds_enhanced_monitoring_exporter",
+ 9409: "ovn-kubernetes_master_exporter",
+ 9410: "ovn-kubernetes_node_exporter",
+ 9411: "softether_exporter",
+ 9412: "sentry_exporter",
+ 9413: "mogilefs_exporter",
+ 9414: "homey_exporter",
+ 9415: "cloudwatch_read_adapter",
+ 9416: "hp_ilo_metrics_exporter",
+ 9417: "ethtool_exporter",
+ 9418: "gearman_exporter",
+ 9419: "rabbitmq_exporter",
+ 9420: "couchbase_exporter",
+ 9421: "apicast",
+ 9422: "jolokia_exporter",
+ 9423: "hp_raid_exporter",
+ 9424: "influxdb_stats_exporter",
+ 9425: "pachyderm_exporter",
+ 9426: "vespa_engine_exporter",
+ 9427: "ping_exporter",
+ 9428: "ssh_exporter",
+ 9429: "uptimerobot_exporter",
+ 9430: "corerad",
+ 9431: "hpfeeds_broker_exporter",
+ 9432: "windows_perflib_exporter",
+ 9433: "knot_exporter",
+ 9434: "opensips_exporter",
+ 9435: "ebpf_exporter",
+ 9436: "mikrotik-exporter",
+ 9437: "dell_emc_isilon_exporter",
+ 9438: "dell_emc_ecs_exporter",
+ 9439: "bitcoind_exporter",
+ 9440: "ravendb_exporter",
+ 9441: "nomad_exporter",
+ 9442: "mcrouter_exporter",
+ 9444: "foundationdb_exporter",
+ 9445: "nvidia_gpu_exporter",
+ 9446: "orange_livebox_dsl_modem_exporter",
+ 9447: "resque_exporter",
+ 9448: "eventstore_exporter",
+ 9449: "omeroserver_exporter",
+ 9450: "habitat_exporter",
+ 9451: "reindexer_exporter",
+ 9452: "freebsd_jail_exporter",
+ 9453: "midonet-kubernetes",
+ 9454: "nvidia_smi_exporter",
+ 9455: "iptables_exporter",
+ 9456: "aws_lambda_exporter",
+ 9457: "files_content_exporter",
+ 9458: "rocketchat_exporter",
+ 9459: "yarn_exporter",
+ 9460: "hana_exporter",
+ 9461: "aws_lambda_read_adapter",
+ 9462: "php_opcache_exporter",
+ 9463: "virgin_media_liberty_global_hub3_exporter",
+ 9464: "opencensus-nodejs_prometheus_exporter",
+ 9465: "hetzner_cloud_k8s_cloud_controller_manager",
+ 9466: "mqtt_push_gateway",
+ 9467: "nginx-prometheus-shiny-exporter",
+ 9468: "nasa-swpc-exporter",
+ 9469: "script_exporter",
+ 9470: "cachet_exporter",
+ 9471: "lxc-exporter",
+ 9472: "hetzner_cloud_csi_driver_controller",
+ 9473: "stellar-core-exporter",
+ 9474: "libvirtd_exporter",
+ 9475: "wgipamd",
+ 9476: "ovn_metrics_exporter",
+ 9477: "csp_violation_report_exporter",
+ 9478: "sentinel_exporter",
+ 9479: "elasticbeat_exporter",
+ 9480: "brigade_exporter",
+ 9481: "drbd9_exporter",
+ 9482: "vector_packet_process_vpp_exporter",
+ 9483: "ibm_app_connect_enterprise_exporter",
+ 9484: "kubedex-exporter",
+ 9485: "emarsys_exporter",
+ 9486: "domoticz_exporter",
+ 9487: "docker_stats_exporter",
+ 9488: "bmw_connected_drive_exporter",
+ 9489: "tezos_node_metrics_exporter",
+ 9490: "exporter_for_docker_libnetwork_plugin_for_ovn",
+ 9491: "docker_container_stats_exporter_docker_ps",
+ 9492: "azure_exporter_monitor_and_usage",
+ 9493: "prosafe_exporter",
+ 9494: "kamailio_exporter",
+ 9495: "ingestor_exporter",
+ 9496: "389ds_ipa_exporter",
+ 9497: "immudb_exporter",
+ 9498: "tp-link_hs110_exporter",
+ 9499: "smartthings_exporter",
+ 9500: "cassandra_exporter",
+ 9501: "hetznercloud_exporter",
+ 9502: "hetzner_exporter",
+ 9503: "scaleway_exporter",
+ 9504: "github_exporter",
+ 9505: "dockerhub_exporter",
+ 9506: "jenkins_exporter",
+ 9507: "owncloud_exporter",
+ 9508: "ccache_exporter",
+ 9509: "hetzner_storagebox_exporter",
+ 9510: "dummy_exporter",
+ 9512: "cloudera_exporter",
+ 9513: "openconfig_streaming_telemetry_exporter",
+ 9514: "app_stores_exporter",
+ 9515: "swarm-exporter",
+ 9516: "prometheus_speedtest_exporter",
+ 9517: "matroschka_prober",
+ 9518: "crypto_stock_exchanges_funds_exporter",
+ 9519: "acurite_exporter",
+ 9520: "swift_health_exporter",
+ 9521: "ruuvi_exporter",
+ 9522: "tftp_exporter",
+ 9523: "3cx_exporter",
+ 9524: "loki_exporter",
+ 9525: "alibaba_cloud_exporter",
+ 9526: "kafka_lag_exporter",
+ 9527: "netgear_cable_modem_exporter",
+ 9528: "total_connect_comfort_exporter",
+ 9529: "octoprint_exporter",
+ 9530: "custom_prometheus_exporter",
+ 9531: "jfrog_artifactory_exporter",
+ 9532: "snyk_exporter",
+ 9533: "network_exporter_for_cisco_api",
+ 9534: "humio_exporter",
+ 9535: "cron_exporter",
+ 9536: "ipsec_exporter",
+ 9537: "cri-o",
+ 9538: "bull_queue",
+ 9539: "modemmanager_exporter",
+ 9540: "emq_exporter",
+ 9541: "smartmon_exporter",
+ 9542: "sakuracloud_exporter",
+ 9543: "kube2iam_exporter",
+ 9544: "pgio_exporter",
+ 9545: "hp_ilo4_exporter",
+ 9546: "pwrstat-exporter",
+ 9547: "patroni_exporter",
+ 9548: "trafficserver_exporter",
+ 9549: "raspberry_exporter",
+ 9550: "rtl_433_exporter",
+ 9551: "hostapd_exporter",
+ 9552: "aws_elastic_beanstalk_exporter",
+ 9553: "apt_exporter",
+ 9554: "acc_server_manager_exporter",
+ 9555: "sona_exporter",
+ 9556: "routinator_exporter",
+ 9557: "mysql_count_exporter",
+ 9558: "systemd_exporter",
+ 9559: "ntp_exporter",
+ 9560: "sql_queries_exporter",
+ 9561: "qbittorrent_exporter",
+ 9562: "ptv_xserver_exporter",
+ 9563: "kibana_exporter",
+ 9564: "purpleair_exporter",
+ 9565: "bminer_exporter",
+ 9566: "rabbitmq_cli_consumer",
+ 9567: "alertsnitch",
+ 9568: "dell_poweredge_ipmi_exporter",
+ 9569: "hvpa_controller",
+ 9570: "vpa_exporter",
+ 9571: "helm_exporter",
+ 9572: "ctld_exporter",
+ 9573: "jkstatus_exporter",
+ 9574: "opentracker_exporter",
+ 9575: "poweradmin_server_monitor_exporter",
+ 9576: "exabgp_exporter",
+ 9578: "aria2_exporter",
+ 9579: "iperf3_exporter",
+ 9580: "azure_service_bus_exporter",
+ 9581: "codenotary_vcn_exporter",
+ 9583: "signatory_a_remote_operation_signer_for_tezos",
+ 9584: "bunnycdn_exporter",
+ 9585: "opvizor_performance_analyzer_process_exporter",
+ 9586: "wireguard_exporter",
+ 9587: "nfs-ganesha_exporter",
+ 9588: "ltsv-tailer_exporter",
+ 9589: "goflow_exporter",
+ 9590: "flow_exporter",
+ 9591: "srcds_exporter",
+ 9592: "gcp_quota_exporter",
+ 9593: "lighthouse_exporter",
+ 9594: "plex_exporter",
+ 9595: "netio_exporter",
+ 9596: "azure_elastic_sql_exporter",
+ 9597: "github_vulnerability_alerts_exporter",
+ 9599: "pirograph_exporter",
+ 9600: "circleci_exporter",
+ 9601: "messagebird_exporter",
+ 9602: "modbus_exporter",
+ 9603: "xen_exporter_using_xenlight",
+ 9604: "xmpp_blackbox_exporter",
+ 9605: "fping-exporter",
+ 9606: "ecr-exporter",
+ 9607: "raspberry_pi_sense_hat_exporter",
+ 9608: "ironic_prometheus_exporter",
+ 9609: "netapp_exporter",
+ 9610: "kubernetes_exporter",
+ 9611: "speedport_exporter",
+ 9612: "opflex-agent_exporter",
+ 9613: "azure_health_exporter",
+ 9614: "nut_upsc_exporter",
+ 9615: "mellanox_mlx5_exporter",
+ 9616: "mailgun_exporter",
+ 9617: "pi-hole_exporter",
+ 9618: "stellar-account-exporter",
+ 9619: "stellar-horizon-exporter",
+ 9620: "rundeck_exporter",
+ 9621: "opennebula_exporter",
+ 9622: "bmc_exporter",
+ 9623: "tc4400_exporter",
+ 9624: "pact_broker_exporter",
+ 9625: "bareos_exporter",
+ 9626: "hockeypuck",
+ 9627: "artifactory_exporter",
+ 9628: "solace_pubsub_plus_exporter",
+ 9629: "prometheus_gitlab_notifier",
+ 9630: "nftables_exporter",
+ 9631: "a_op5_monitor_exporter",
+ 9632: "opflex-server_exporter",
+ 9633: "smartctl_exporter",
+ 9634: "aerospike_ttl_exporter",
+ 9635: "fail2ban_exporter",
+ 9636: "exim4_exporter",
+ 9637: "kubeversion_exporter",
+ 9638: "a_icinga2_exporter",
+ 9639: "scriptable_jmx_exporter",
+ 9640: "logstash_output_exporter",
+ 9641: "coturn_exporter",
+ 9642: "bugsnag_exporter",
+ 9644: "exporter_for_grouped_process",
+ 9645: "burp_exporter",
+ 9646: "locust_exporter",
+ 9647: "docker_exporter",
+ 9648: "ntpmon_exporter",
+ 9649: "logstash_exporter",
+ 9650: "keepalived_exporter",
+ 9651: "storj_exporter",
+ 9652: "praefect_exporter",
+ 9653: "jira_issues_exporter",
+ 9654: "ansible_galaxy_exporter",
+ 9655: "kube-netc_exporter",
+ 9656: "matrix",
+ 9657: "krill_exporter",
+ 9658: "sap_hana_sql_exporter",
+ 9660: "kaiterra_laser_egg_exporter",
+ 9661: "hashpipe_exporter",
+ 9662: "pms5003_particulate_matter_sensor_exporter",
+ 9663: "sap_nwrfc_exporter",
+ 9664: "linux_ha_clusterlabs_exporter",
+ 9665: "senderscore_exporter",
+ 9666: "alertmanager_silences_exporter",
+ 9667: "smtpd_exporter",
+ 9668: "suses_sap_hana_exporter",
+ 9669: "panopticon_native_metrics",
+ 9670: "flare_native_metrics",
+ 9671: "aws_ec2_spot_exporter",
+ 9672: "aircontrol_co2_exporter",
+ 9673: "co2_monitor_exporter",
+ 9674: "google_analytics_exporter",
+ 9675: "docker_swarm_exporter",
+ 9676: "hetzner_traffic_exporter",
+ 9677: "aws_ecs_exporter",
+ 9678: "ircd_user_exporter",
+ 9679: "aws_health_exporter",
+ 9680: "suses_sap_host_exporter",
+ 9681: "myfitnesspal_exporter",
+ 9682: "powder_monkey",
+ 9683: "infiniband_exporter",
+ 9684: "kibana_standalone_exporter",
+ 9685: "eideticom",
+ 9686: "aws_ec2_exporter",
+ 9687: "gitaly_blackbox_exporter",
+ 9689: "lan_server_modbus_exporter",
+ 9690: "tcp_longterm_connection_exporter",
+ 9691: "celery_redis_exporter",
+ 9692: "gcp_gce_exporter",
+ 9693: "sigma_air_manager_exporter",
+ 9694: "per-user_usage_exporter_for_cisco_xe_lnss",
+ 9695: "cifs_exporter",
+ 9696: "jitsi_videobridge_exporter",
+ 9697: "tendermint_blockchain_exporter",
+ 9698: "integrated_dell_remote_access_controller_idrac_exporter",
+ 9699: "pyncette_exporter",
+ 9700: "jitsi_meet_exporter",
+ 9701: "workbook_exporter",
+ 9702: "homeplug_plc_exporter",
+ 9703: "vircadia",
+ 9704: "linux_tc_exporter",
+ 9705: "upc_connect_box_exporter",
+ 9706: "postfix_exporter",
+ 9707: "radarr_exporter",
+ 9708: "sonarr_exporter",
+ 9709: "hadoop_hdfs_fsimage_exporter",
+ 9710: "nut-exporter",
+ 9711: "cloudflare_flan_scan_report_exporter",
+ 9712: "siemens_s7_plc_exporter",
+ 9713: "glusterfs_exporter",
+ 9714: "fritzbox_exporter",
+ 9715: "twincat_ads_web_service_exporter",
+ 9716: "signald_webhook_receiver",
+ 9717: "tplink_easysmart_switch_exporter",
+ 9718: "warp10_exporter",
+ 9719: "pgpool-ii_exporter",
+ 9720: "moodle_db_exporter",
+ 9721: "gtp_exporter",
+ 9722: "miele_exporter",
+ 9724: "freeswitch_exporter",
+ 9725: "sunnyboy_exporter",
+ 9726: "python_rq_exporter",
+ 9727: "ctdb_exporter",
+ 9728: "nginx-rtmp_exporter",
+ 9729: "libvirtd_exporter",
+ 9730: "lynis_exporter",
+ 9731: "nebula_mam_exporter",
+ 9732: "nftables_exporter",
+ 9733: "honeypot_exporter",
+ 9734: "a10-networks_prometheus_exporter",
+ 9735: "webweaver",
+ 9736: "mongodb_query_exporter",
+ 9737: "folding_home_exporter",
+ 9738: "processor_counter_monitor_exporter",
+ 9739: "kafka_consumer_lag_monitoring",
+ 9740: "flightdeck",
+ 9741: "ibm_spectrum_exporter",
+ 9742: "transmission-exporter",
+ 9743: "sma-exporter",
+ 9803: "site24x7_exporter",
+ 9901: "envoy_proxy",
+ 9913: "nginx_vts_exporter",
+ 9943: "filestat_exporter",
+ 9980: "login_exporter",
+ 9983: "sia_exporter",
+ 9984: "couchdb_exporter",
+ 9987: "netapp_solidfire_exporter",
+ 9990: "wildfly_exporter",
+ 16995: "storidge_exporter",
+ 19091: "transmission_exporter",
+ 24231: "fluent_plugin_for_prometheus",
+ 42004: "proxysql_exporter",
+ 44323: "pcp_exporter",
+ 61091: "dcos_exporter",
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/selector.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/selector.go
new file mode 100644
index 000000000..cdd2cf000
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/selector.go
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+)
+
+type selector interface {
+ matches(model.Tags) bool
+}
+
+type (
+ exactSelector string
+ trueSelector struct{}
+ negSelector struct{ selector }
+ orSelector struct{ lhs, rhs selector }
+ andSelector struct{ lhs, rhs selector }
+)
+
+func (s exactSelector) matches(tags model.Tags) bool { _, ok := tags[string(s)]; return ok }
+func (s trueSelector) matches(model.Tags) bool { return true }
+func (s negSelector) matches(tags model.Tags) bool { return !s.selector.matches(tags) }
+func (s orSelector) matches(tags model.Tags) bool { return s.lhs.matches(tags) || s.rhs.matches(tags) }
+func (s andSelector) matches(tags model.Tags) bool { return s.lhs.matches(tags) && s.rhs.matches(tags) }
+
+func (s exactSelector) String() string { return "{" + string(s) + "}" }
+func (s negSelector) String() string { return "{!" + stringify(s.selector) + "}" }
+func (s trueSelector) String() string { return "{*}" }
+func (s orSelector) String() string { return "{" + stringify(s.lhs) + "|" + stringify(s.rhs) + "}" }
+func (s andSelector) String() string { return "{" + stringify(s.lhs) + ", " + stringify(s.rhs) + "}" }
+func stringify(sr selector) string { return strings.Trim(fmt.Sprintf("%s", sr), "{}") }
+
+func parseSelector(line string) (sr selector, err error) {
+ words := strings.Fields(line)
+ if len(words) == 0 {
+ return trueSelector{}, nil
+ }
+
+ var srs []selector
+ for _, word := range words {
+ if idx := strings.IndexByte(word, '|'); idx > 0 {
+ sr, err = parseOrSelectorWord(word)
+ } else {
+ sr, err = parseSingleSelectorWord(word)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("selector '%s' contains selector '%s' with forbidden symbol", line, word)
+ }
+ srs = append(srs, sr)
+ }
+
+ switch len(srs) {
+ case 0:
+ return trueSelector{}, nil
+ case 1:
+ return srs[0], nil
+ default:
+ return newAndSelector(srs[0], srs[1], srs[2:]...), nil
+ }
+}
+
+func parseOrSelectorWord(orWord string) (sr selector, err error) {
+ var srs []selector
+ for _, word := range strings.Split(orWord, "|") {
+ if sr, err = parseSingleSelectorWord(word); err != nil {
+ return nil, err
+ }
+ srs = append(srs, sr)
+ }
+ switch len(srs) {
+ case 0:
+ return trueSelector{}, nil
+ case 1:
+ return srs[0], nil
+ default:
+ return newOrSelector(srs[0], srs[1], srs[2:]...), nil
+ }
+}
+
+func parseSingleSelectorWord(word string) (selector, error) {
+ if len(word) == 0 {
+ return nil, errors.New("empty word")
+ }
+ neg := word[0] == '!'
+ if neg {
+ word = word[1:]
+ }
+ if len(word) == 0 {
+ return nil, errors.New("empty word")
+ }
+ if word != "*" && !isSelectorWordValid(word) {
+ return nil, errors.New("forbidden symbol")
+ }
+
+ var sr selector
+ switch word {
+ case "*":
+ sr = trueSelector{}
+ default:
+ sr = exactSelector(word)
+ }
+ if neg {
+ return negSelector{sr}, nil
+ }
+ return sr, nil
+}
+
+func newAndSelector(lhs, rhs selector, others ...selector) selector {
+ m := andSelector{lhs: lhs, rhs: rhs}
+ switch len(others) {
+ case 0:
+ return m
+ default:
+ return newAndSelector(m, others[0], others[1:]...)
+ }
+}
+
+func newOrSelector(lhs, rhs selector, others ...selector) selector {
+ m := orSelector{lhs: lhs, rhs: rhs}
+ switch len(others) {
+ case 0:
+ return m
+ default:
+ return newOrSelector(m, others[0], others[1:]...)
+ }
+}
+
+func isSelectorWordValid(word string) bool {
+ // valid:
+ // *
+ // ^[a-zA-Z][a-zA-Z0-9=_.]*$
+ if len(word) == 0 {
+ return false
+ }
+ if word == "*" {
+ return true
+ }
+ for i, b := range word {
+ switch {
+ case b >= 'a' && b <= 'z':
+ case b >= 'A' && b <= 'Z':
+ case b >= '0' && b <= '9' && i > 0:
+ case (b == '=' || b == '_' || b == '.') && i > 0:
+ default:
+ return false
+ }
+ }
+ return true
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/selector_test.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/selector_test.go
new file mode 100644
index 000000000..bed2150e2
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/selector_test.go
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "regexp"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var reSrString = regexp.MustCompile(`^{[^{}]+}$`)
+
+func TestTrueSelector_String(t *testing.T) {
+ var sr trueSelector
+ assert.Equal(t, "{*}", sr.String())
+}
+
+func TestExactSelector_String(t *testing.T) {
+ sr := exactSelector("selector")
+
+ assert.True(t, reSrString.MatchString(sr.String()))
+}
+
+func TestNegSelector_String(t *testing.T) {
+ srs := []selector{
+ exactSelector("selector"),
+ negSelector{exactSelector("selector")},
+ orSelector{
+ lhs: exactSelector("selector"),
+ rhs: exactSelector("selector")},
+ orSelector{
+ lhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}},
+ rhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}},
+ },
+ andSelector{
+ lhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}},
+ rhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}},
+ },
+ }
+
+ for i, sr := range srs {
+ neg := negSelector{sr}
+ assert.True(t, reSrString.MatchString(neg.String()), "selector num %d", i+1)
+ }
+}
+
+func TestOrSelector_String(t *testing.T) {
+ sr := orSelector{
+ lhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}},
+ rhs: orSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}},
+ }
+
+ assert.True(t, reSrString.MatchString(sr.String()))
+}
+
+func TestAndSelector_String(t *testing.T) {
+ sr := andSelector{
+ lhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}},
+ rhs: andSelector{lhs: exactSelector("selector"), rhs: negSelector{exactSelector("selector")}},
+ }
+
+ assert.True(t, reSrString.MatchString(sr.String()))
+}
+
+func TestExactSelector_Matches(t *testing.T) {
+ matchTests := struct {
+ tags model.Tags
+ srs []exactSelector
+ }{
+ tags: model.Tags{"a": {}, "b": {}},
+ srs: []exactSelector{
+ "a",
+ "b",
+ },
+ }
+ notMatchTests := struct {
+ tags model.Tags
+ srs []exactSelector
+ }{
+ tags: model.Tags{"a": {}, "b": {}},
+ srs: []exactSelector{
+ "c",
+ "d",
+ },
+ }
+
+ for i, sr := range matchTests.srs {
+ assert.Truef(t, sr.matches(matchTests.tags), "match selector num %d", i+1)
+ }
+ for i, sr := range notMatchTests.srs {
+ assert.Falsef(t, sr.matches(notMatchTests.tags), "not match selector num %d", i+1)
+ }
+}
+
+func TestNegSelector_Matches(t *testing.T) {
+ matchTests := struct {
+ tags model.Tags
+ srs []negSelector
+ }{
+ tags: model.Tags{"a": {}, "b": {}},
+ srs: []negSelector{
+ {exactSelector("c")},
+ {exactSelector("d")},
+ },
+ }
+ notMatchTests := struct {
+ tags model.Tags
+ srs []negSelector
+ }{
+ tags: model.Tags{"a": {}, "b": {}},
+ srs: []negSelector{
+ {exactSelector("a")},
+ {exactSelector("b")},
+ },
+ }
+
+ for i, sr := range matchTests.srs {
+ assert.Truef(t, sr.matches(matchTests.tags), "match selector num %d", i+1)
+ }
+ for i, sr := range notMatchTests.srs {
+ assert.Falsef(t, sr.matches(notMatchTests.tags), "not match selector num %d", i+1)
+ }
+}
+
+func TestOrSelector_Matches(t *testing.T) {
+ matchTests := struct {
+ tags model.Tags
+ srs []orSelector
+ }{
+ tags: model.Tags{"a": {}, "b": {}},
+ srs: []orSelector{
+ {
+ lhs: orSelector{lhs: exactSelector("c"), rhs: exactSelector("d")},
+ rhs: orSelector{lhs: exactSelector("e"), rhs: exactSelector("b")},
+ },
+ },
+ }
+ notMatchTests := struct {
+ tags model.Tags
+ srs []orSelector
+ }{
+ tags: model.Tags{"a": {}, "b": {}},
+ srs: []orSelector{
+ {
+ lhs: orSelector{lhs: exactSelector("c"), rhs: exactSelector("d")},
+ rhs: orSelector{lhs: exactSelector("e"), rhs: exactSelector("f")},
+ },
+ },
+ }
+
+ for i, sr := range matchTests.srs {
+ assert.Truef(t, sr.matches(matchTests.tags), "match selector num %d", i+1)
+ }
+ for i, sr := range notMatchTests.srs {
+ assert.Falsef(t, sr.matches(notMatchTests.tags), "not match selector num %d", i+1)
+ }
+}
+
+func TestAndSelector_Matches(t *testing.T) {
+ matchTests := struct {
+ tags model.Tags
+ srs []andSelector
+ }{
+ tags: model.Tags{"a": {}, "b": {}, "c": {}, "d": {}},
+ srs: []andSelector{
+ {
+ lhs: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")},
+ rhs: andSelector{lhs: exactSelector("c"), rhs: exactSelector("d")},
+ },
+ },
+ }
+ notMatchTests := struct {
+ tags model.Tags
+ srs []andSelector
+ }{
+ tags: model.Tags{"a": {}, "b": {}, "c": {}, "d": {}},
+ srs: []andSelector{
+ {
+ lhs: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")},
+ rhs: andSelector{lhs: exactSelector("c"), rhs: exactSelector("z")},
+ },
+ },
+ }
+
+ for i, sr := range matchTests.srs {
+ assert.Truef(t, sr.matches(matchTests.tags), "match selector num %d", i+1)
+ }
+ for i, sr := range notMatchTests.srs {
+ assert.Falsef(t, sr.matches(notMatchTests.tags), "not match selector num %d", i+1)
+ }
+}
+
+func TestParseSelector(t *testing.T) {
+ tests := map[string]struct {
+ wantSelector selector
+ wantErr bool
+ }{
+ "": {wantSelector: trueSelector{}},
+ "a": {wantSelector: exactSelector("a")},
+ "Z": {wantSelector: exactSelector("Z")},
+ "a_b": {wantSelector: exactSelector("a_b")},
+ "a=b": {wantSelector: exactSelector("a=b")},
+ "!a": {wantSelector: negSelector{exactSelector("a")}},
+ "a b": {wantSelector: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}},
+ "a|b": {wantSelector: orSelector{lhs: exactSelector("a"), rhs: exactSelector("b")}},
+ "*": {wantSelector: trueSelector{}},
+ "!*": {wantSelector: negSelector{trueSelector{}}},
+ "a b !c d|e f": {
+ wantSelector: andSelector{
+ lhs: andSelector{
+ lhs: andSelector{
+ lhs: andSelector{lhs: exactSelector("a"), rhs: exactSelector("b")},
+ rhs: negSelector{exactSelector("c")},
+ },
+ rhs: orSelector{
+ lhs: exactSelector("d"),
+ rhs: exactSelector("e"),
+ },
+ },
+ rhs: exactSelector("f"),
+ },
+ },
+ "!": {wantErr: true},
+ "a !": {wantErr: true},
+ "a!b": {wantErr: true},
+ "0a": {wantErr: true},
+ "a b c*": {wantErr: true},
+ "__": {wantErr: true},
+ "a|b|c*": {wantErr: true},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sr, err := parseSelector(name)
+
+ if test.wantErr {
+ assert.Nil(t, sr)
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, test.wantSelector, sr)
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/pipeline/sim_test.go b/src/go/plugin/go.d/agent/discovery/sd/pipeline/sim_test.go
new file mode 100644
index 000000000..657009478
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/pipeline/sim_test.go
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pipeline
+
+import (
+ "context"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/model"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+type discoverySim struct {
+ config string
+ discoverers []model.Discoverer
+ wantClassifyCalls int
+ wantComposeCalls int
+ wantConfGroups []*confgroup.Group
+}
+
+func (sim discoverySim) run(t *testing.T) {
+ t.Helper()
+
+ var cfg Config
+ err := yaml.Unmarshal([]byte(sim.config), &cfg)
+ require.Nilf(t, err, "cfg unmarshal")
+
+ clr, err := newTargetClassificator(cfg.Classify)
+ require.Nil(t, err, "newTargetClassificator")
+
+ cmr, err := newConfigComposer(cfg.Compose)
+ require.Nil(t, err, "newConfigComposer")
+
+ mockClr := &mockClassificator{clr: clr}
+ mockCmr := &mockComposer{cmr: cmr}
+
+ accum := newAccumulator()
+ accum.sendEvery = time.Second * 2
+
+ pl := &Pipeline{
+ Logger: logger.New(),
+ discoverers: sim.discoverers,
+ accum: accum,
+ clr: mockClr,
+ cmr: mockCmr,
+ configs: make(map[string]map[uint64][]confgroup.Config),
+ }
+
+ pl.accum.Logger = pl.Logger
+ clr.Logger = pl.Logger
+ cmr.Logger = pl.Logger
+
+ groups := sim.collectGroups(t, pl)
+
+ sortConfigGroups(groups)
+ sortConfigGroups(sim.wantConfGroups)
+
+ assert.Equal(t, sim.wantConfGroups, groups)
+ assert.Equalf(t, sim.wantClassifyCalls, mockClr.calls, "classify calls")
+ assert.Equalf(t, sim.wantComposeCalls, mockCmr.calls, "compose calls")
+}
+
+func (sim discoverySim) collectGroups(t *testing.T, pl *Pipeline) []*confgroup.Group {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ in := make(chan []*confgroup.Group)
+ done := make(chan struct{})
+
+ go func() { defer close(done); pl.Run(ctx, in) }()
+
+ timeout := time.Second * 10
+ var groups []*confgroup.Group
+
+ func() {
+ for {
+ select {
+ case inGroups := <-in:
+ groups = append(groups, inGroups...)
+ case <-done:
+ return
+ case <-time.After(timeout):
+ t.Logf("discovery timed out after %s, got %d groups, expected %d, some events are skipped",
+ timeout, len(groups), len(sim.wantConfGroups))
+ return
+ }
+ }
+ }()
+
+ return groups
+}
+
+type mockClassificator struct {
+ calls int
+ clr *targetClassificator
+}
+
+func (m *mockClassificator) classify(tgt model.Target) model.Tags {
+ m.calls++
+ return m.clr.classify(tgt)
+}
+
+type mockComposer struct {
+ calls int
+ cmr *configComposer
+}
+
+func (m *mockComposer) compose(tgt model.Target) []confgroup.Config {
+ m.calls++
+ return m.cmr.compose(tgt)
+}
+
+func sortConfigGroups(groups []*confgroup.Group) {
+ sort.Slice(groups, func(i, j int) bool {
+ return groups[i].Source < groups[j].Source
+ })
+
+ for _, g := range groups {
+ sort.Slice(g.Configs, func(i, j int) bool {
+ return g.Configs[i].Name() < g.Configs[j].Name()
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/sd.go b/src/go/plugin/go.d/agent/discovery/sd/sd.go
new file mode 100644
index 000000000..687ebfba8
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/sd.go
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sd
+
+import (
+ "context"
+ "fmt"
+ "log/slog"
+ "sync"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/pipeline"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/multipath"
+
+ "gopkg.in/yaml.v2"
+)
+
+type Config struct {
+ ConfigDefaults confgroup.Registry
+ ConfDir multipath.MultiPath
+}
+
+func NewServiceDiscovery(cfg Config) (*ServiceDiscovery, error) {
+ log := logger.New().With(
+ slog.String("component", "service discovery"),
+ )
+
+ d := &ServiceDiscovery{
+ Logger: log,
+ confProv: newConfFileReader(log, cfg.ConfDir),
+ configDefaults: cfg.ConfigDefaults,
+ newPipeline: func(config pipeline.Config) (sdPipeline, error) {
+ return pipeline.New(config)
+ },
+ pipelines: make(map[string]func()),
+ }
+
+ return d, nil
+}
+
+type (
+ ServiceDiscovery struct {
+ *logger.Logger
+
+ confProv confFileProvider
+
+ configDefaults confgroup.Registry
+ newPipeline func(config pipeline.Config) (sdPipeline, error)
+ pipelines map[string]func()
+ }
+ sdPipeline interface {
+ Run(ctx context.Context, in chan<- []*confgroup.Group)
+ }
+ confFileProvider interface {
+ run(ctx context.Context)
+ configs() chan confFile
+ }
+)
+
+func (d *ServiceDiscovery) String() string {
+ return "service discovery"
+}
+
+func (d *ServiceDiscovery) Run(ctx context.Context, in chan<- []*confgroup.Group) {
+ d.Info("instance is started")
+ defer func() { d.cleanup(); d.Info("instance is stopped") }()
+
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() { defer wg.Done(); d.confProv.run(ctx) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); d.run(ctx, in) }()
+
+ wg.Wait()
+ <-ctx.Done()
+}
+
+func (d *ServiceDiscovery) run(ctx context.Context, in chan<- []*confgroup.Group) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case cfg := <-d.confProv.configs():
+ if cfg.source == "" {
+ continue
+ }
+ if len(cfg.content) == 0 {
+ d.removePipeline(cfg)
+ } else {
+ d.addPipeline(ctx, cfg, in)
+ }
+ }
+ }
+}
+
+func (d *ServiceDiscovery) removePipeline(conf confFile) {
+ if stop, ok := d.pipelines[conf.source]; ok {
+ d.Infof("received an empty config, stopping the pipeline ('%s')", conf.source)
+ delete(d.pipelines, conf.source)
+ stop()
+ }
+}
+
+func (d *ServiceDiscovery) addPipeline(ctx context.Context, conf confFile, in chan<- []*confgroup.Group) {
+ var cfg pipeline.Config
+
+ if err := yaml.Unmarshal(conf.content, &cfg); err != nil {
+ d.Error(err)
+ return
+ }
+
+ if cfg.Disabled {
+ d.Infof("pipeline config is disabled '%s' (%s)", cfg.Name, cfg.Source)
+ return
+ }
+
+ cfg.Source = fmt.Sprintf("file=%s", conf.source)
+ cfg.ConfigDefaults = d.configDefaults
+
+ pl, err := d.newPipeline(cfg)
+ if err != nil {
+ d.Error(err)
+ return
+ }
+
+ if stop, ok := d.pipelines[conf.source]; ok {
+ stop()
+ }
+
+ var wg sync.WaitGroup
+ plCtx, cancel := context.WithCancel(ctx)
+
+ wg.Add(1)
+ go func() { defer wg.Done(); pl.Run(plCtx, in) }()
+
+ stop := func() { cancel(); wg.Wait() }
+ d.pipelines[conf.source] = stop
+}
+
+func (d *ServiceDiscovery) cleanup() {
+ for _, stop := range d.pipelines {
+ stop()
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/sd_test.go b/src/go/plugin/go.d/agent/discovery/sd/sd_test.go
new file mode 100644
index 000000000..4269bfd3a
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/sd_test.go
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sd
+
+import (
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/pipeline"
+
+ "gopkg.in/yaml.v2"
+)
+
+func TestServiceDiscovery_Run(t *testing.T) {
+ tests := map[string]discoverySim{
+ "add pipeline": {
+ configs: []confFile{
+ prepareConfigFile("source", "name"),
+ },
+ wantPipelines: []*mockPipeline{
+ {name: "name", started: true, stopped: false},
+ },
+ },
+ "add disabled pipeline": {
+ configs: []confFile{
+ prepareDisabledConfigFile("source", "name"),
+ },
+ wantPipelines: nil,
+ },
+ "remove pipeline": {
+ configs: []confFile{
+ prepareConfigFile("source", "name"),
+ prepareEmptyConfigFile("source"),
+ },
+ wantPipelines: []*mockPipeline{
+ {name: "name", started: true, stopped: true},
+ },
+ },
+ "re-add pipeline multiple times": {
+ configs: []confFile{
+ prepareConfigFile("source", "name"),
+ prepareConfigFile("source", "name"),
+ prepareConfigFile("source", "name"),
+ },
+ wantPipelines: []*mockPipeline{
+ {name: "name", started: true, stopped: true},
+ {name: "name", started: true, stopped: true},
+ {name: "name", started: true, stopped: false},
+ },
+ },
+ "restart pipeline": {
+ configs: []confFile{
+ prepareConfigFile("source", "name1"),
+ prepareConfigFile("source", "name2"),
+ },
+ wantPipelines: []*mockPipeline{
+ {name: "name1", started: true, stopped: true},
+ {name: "name2", started: true, stopped: false},
+ },
+ },
+ "invalid pipeline config": {
+ configs: []confFile{
+ prepareConfigFile("source", "invalid"),
+ },
+ wantPipelines: nil,
+ },
+ "invalid config for running pipeline": {
+ configs: []confFile{
+ prepareConfigFile("source", "name"),
+ prepareConfigFile("source", "invalid"),
+ },
+ wantPipelines: []*mockPipeline{
+ {name: "name", started: true, stopped: false},
+ },
+ },
+ }
+
+ for name, sim := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim.run(t)
+ })
+ }
+}
+
+func prepareConfigFile(source, name string) confFile {
+ bs, _ := yaml.Marshal(pipeline.Config{Name: name})
+
+ return confFile{
+ source: source,
+ content: bs,
+ }
+}
+
+func prepareEmptyConfigFile(source string) confFile {
+ return confFile{
+ source: source,
+ }
+}
+
+func prepareDisabledConfigFile(source, name string) confFile {
+ bs, _ := yaml.Marshal(pipeline.Config{Name: name, Disabled: true})
+
+ return confFile{
+ source: source,
+ content: bs,
+ }
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sd/sim_test.go b/src/go/plugin/go.d/agent/discovery/sd/sim_test.go
new file mode 100644
index 000000000..930c40125
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sd/sim_test.go
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sd
+
+import (
+ "context"
+ "errors"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd/pipeline"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var lock = &sync.Mutex{}
+
+type discoverySim struct {
+ configs []confFile
+ wantPipelines []*mockPipeline
+}
+
+func (sim *discoverySim) run(t *testing.T) {
+ fact := &mockFactory{}
+ mgr := &ServiceDiscovery{
+ Logger: logger.New(),
+ newPipeline: func(config pipeline.Config) (sdPipeline, error) {
+ return fact.create(config)
+ },
+ confProv: &mockConfigProvider{
+ confFiles: sim.configs,
+ ch: make(chan confFile),
+ },
+ pipelines: make(map[string]func()),
+ }
+
+ in := make(chan<- []*confgroup.Group)
+ done := make(chan struct{})
+ ctx, cancel := context.WithCancel(context.Background())
+
+ go func() { defer close(done); mgr.Run(ctx, in) }()
+
+ time.Sleep(time.Second * 3)
+
+ lock.Lock()
+ assert.Equalf(t, sim.wantPipelines, fact.pipelines, "before stop")
+ lock.Unlock()
+
+ cancel()
+
+ timeout := time.Second * 5
+
+ select {
+ case <-done:
+ lock.Lock()
+ for _, pl := range fact.pipelines {
+ assert.Truef(t, pl.stopped, "pipeline '%s' is not stopped after cancel()", pl.name)
+ }
+ lock.Unlock()
+ case <-time.After(timeout):
+ t.Errorf("sd failed to exit in %s", timeout)
+ }
+}
+
+type mockConfigProvider struct {
+ confFiles []confFile
+ ch chan confFile
+}
+
+func (m *mockConfigProvider) run(ctx context.Context) {
+ for _, conf := range m.confFiles {
+ select {
+ case <-ctx.Done():
+ return
+ case m.ch <- conf:
+ }
+ }
+ <-ctx.Done()
+}
+
+func (m *mockConfigProvider) configs() chan confFile {
+ return m.ch
+}
+
+type mockFactory struct {
+ pipelines []*mockPipeline
+}
+
+func (m *mockFactory) create(cfg pipeline.Config) (sdPipeline, error) {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if cfg.Name == "invalid" {
+ return nil, errors.New("mock sdPipelineFactory.create() error")
+ }
+
+ pl := mockPipeline{name: cfg.Name}
+ m.pipelines = append(m.pipelines, &pl)
+
+ return &pl, nil
+}
+
+type mockPipeline struct {
+ name string
+ started bool
+ stopped bool
+}
+
+func (m *mockPipeline) Run(ctx context.Context, _ chan<- []*confgroup.Group) {
+ lock.Lock()
+ m.started = true
+ lock.Unlock()
+ defer func() { lock.Lock(); m.stopped = true; lock.Unlock() }()
+ <-ctx.Done()
+}
diff --git a/src/go/plugin/go.d/agent/discovery/sim_test.go b/src/go/plugin/go.d/agent/discovery/sim_test.go
new file mode 100644
index 000000000..b20344c3c
--- /dev/null
+++ b/src/go/plugin/go.d/agent/discovery/sim_test.go
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discovery
+
+import (
+ "context"
+ "sort"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type discoverySim struct {
+ mgr *Manager
+ collectDelay time.Duration
+ expectedGroups []*confgroup.Group
+}
+
+func (sim discoverySim) run(t *testing.T) {
+ t.Helper()
+ require.NotNil(t, sim.mgr)
+
+ in, out := make(chan []*confgroup.Group), make(chan []*confgroup.Group)
+ go sim.collectGroups(t, in, out)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ go sim.mgr.Run(ctx, in)
+
+ actualGroups := <-out
+
+ sortGroups(sim.expectedGroups)
+ sortGroups(actualGroups)
+
+ assert.Equal(t, sim.expectedGroups, actualGroups)
+}
+
+func (sim discoverySim) collectGroups(t *testing.T, in, out chan []*confgroup.Group) {
+ time.Sleep(sim.collectDelay)
+
+ timeout := sim.mgr.sendEvery + time.Second*2
+ var groups []*confgroup.Group
+loop:
+ for {
+ select {
+ case inGroups := <-in:
+ if groups = append(groups, inGroups...); len(groups) >= len(sim.expectedGroups) {
+ break loop
+ }
+ case <-time.After(timeout):
+ t.Logf("discovery %s timed out after %s, got %d groups, expected %d, some events are skipped",
+ sim.mgr.discoverers, timeout, len(groups), len(sim.expectedGroups))
+ break loop
+ }
+ }
+ out <- groups
+}
+
+func sortGroups(groups []*confgroup.Group) {
+ if len(groups) == 0 {
+ return
+ }
+ sort.Slice(groups, func(i, j int) bool { return groups[i].Source < groups[j].Source })
+}
diff --git a/src/go/plugin/go.d/agent/filelock/filelock.go b/src/go/plugin/go.d/agent/filelock/filelock.go
new file mode 100644
index 000000000..f266e0102
--- /dev/null
+++ b/src/go/plugin/go.d/agent/filelock/filelock.go
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filelock
+
+import (
+ "path/filepath"
+
+ "github.com/gofrs/flock"
+)
+
+func New(dir string) *Locker {
+ return &Locker{
+ suffix: ".collector.lock",
+ dir: dir,
+ locks: make(map[string]*flock.Flock),
+ }
+}
+
+type Locker struct {
+ suffix string
+ dir string
+ locks map[string]*flock.Flock
+}
+
+func (l *Locker) Lock(name string) (bool, error) {
+ filename := l.filename(name)
+
+ if _, ok := l.locks[filename]; ok {
+ return true, nil
+ }
+
+ locker := flock.New(filename)
+
+ ok, err := locker.TryLock()
+ if ok {
+ l.locks[filename] = locker
+ } else {
+ _ = locker.Close()
+ }
+
+ return ok, err
+}
+
+func (l *Locker) Unlock(name string) {
+ filename := l.filename(name)
+
+ locker, ok := l.locks[filename]
+ if !ok {
+ return
+ }
+
+ delete(l.locks, filename)
+
+ _ = locker.Close()
+}
+
+func (l *Locker) isLocked(name string) bool {
+ _, ok := l.locks[l.filename(name)]
+ return ok
+}
+
+func (l *Locker) filename(name string) string {
+ return filepath.Join(l.dir, name+l.suffix)
+}
diff --git a/src/go/plugin/go.d/agent/filelock/filelock_test.go b/src/go/plugin/go.d/agent/filelock/filelock_test.go
new file mode 100644
index 000000000..6ffc794ec
--- /dev/null
+++ b/src/go/plugin/go.d/agent/filelock/filelock_test.go
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filelock
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNew(t *testing.T) {
+ assert.NotNil(t, New(""))
+}
+
+func TestLocker_Lock(t *testing.T) {
+ tests := map[string]func(t *testing.T, dir string){
+ "register a lock": func(t *testing.T, dir string) {
+ reg := New(dir)
+
+ ok, err := reg.Lock("name")
+ assert.True(t, ok)
+ assert.NoError(t, err)
+ },
+ "register the same lock twice": func(t *testing.T, dir string) {
+ reg := New(dir)
+
+ ok, err := reg.Lock("name")
+ require.True(t, ok)
+ require.NoError(t, err)
+
+ ok, err = reg.Lock("name")
+ assert.True(t, ok)
+ assert.NoError(t, err)
+ },
+ "failed to register locked by other process lock": func(t *testing.T, dir string) {
+ reg1 := New(dir)
+ reg2 := New(dir)
+
+ ok, err := reg1.Lock("name")
+ require.True(t, ok)
+ require.NoError(t, err)
+
+ ok, err = reg2.Lock("name")
+ assert.False(t, ok)
+ assert.NoError(t, err)
+ },
+ "failed to register because a directory doesnt exist": func(t *testing.T, dir string) {
+ reg := New(dir + dir)
+
+ ok, err := reg.Lock("name")
+ assert.False(t, ok)
+ assert.Error(t, err)
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dir, err := os.MkdirTemp(os.TempDir(), "netdata-go-test-file-lock-registry")
+ require.NoError(t, err)
+ defer func() { require.NoError(t, os.RemoveAll(dir)) }()
+
+ test(t, dir)
+ })
+ }
+}
+
+func TestLocker_Unlock(t *testing.T) {
+ tests := map[string]func(t *testing.T, dir string){
+ "unregister a lock": func(t *testing.T, dir string) {
+ reg := New(dir)
+
+ ok, err := reg.Lock("name")
+ require.True(t, ok)
+ require.NoError(t, err)
+ reg.Unlock("name")
+
+ assert.False(t, reg.isLocked("name"))
+ },
+ "unregister not registered lock": func(t *testing.T, dir string) {
+ reg := New(dir)
+
+ reg.Unlock("name")
+
+ assert.False(t, reg.isLocked("name"))
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dir, err := os.MkdirTemp(os.TempDir(), "netdata-go-test-file-lock-registry")
+ require.NoError(t, err)
+ defer func() { require.NoError(t, os.RemoveAll(dir)) }()
+
+ test(t, dir)
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/filestatus/manager.go b/src/go/plugin/go.d/agent/filestatus/manager.go
new file mode 100644
index 000000000..03e0dd2fc
--- /dev/null
+++ b/src/go/plugin/go.d/agent/filestatus/manager.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filestatus
+
+import (
+ "context"
+ "log/slog"
+ "os"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+)
+
+func NewManager(path string) *Manager {
+ return &Manager{
+ Logger: logger.New().With(
+ slog.String("component", "filestatus manager"),
+ ),
+ path: path,
+ store: &Store{},
+ flushEvery: time.Second * 5,
+ flushCh: make(chan struct{}, 1),
+ }
+}
+
+type Manager struct {
+ *logger.Logger
+
+ path string
+
+ store *Store
+
+ flushEvery time.Duration
+ flushCh chan struct{}
+}
+
+func (m *Manager) Run(ctx context.Context) {
+ m.Info("instance is started")
+ defer func() { m.Info("instance is stopped") }()
+
+ tk := time.NewTicker(m.flushEvery)
+ defer tk.Stop()
+ defer m.flush()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-tk.C:
+ m.tryFlush()
+ }
+ }
+}
+
+func (m *Manager) Save(cfg confgroup.Config, status string) {
+ if v, ok := m.store.lookup(cfg); !ok || status != v {
+ m.store.add(cfg, status)
+ m.triggerFlush()
+ }
+}
+
+func (m *Manager) Remove(cfg confgroup.Config) {
+ if _, ok := m.store.lookup(cfg); ok {
+ m.store.remove(cfg)
+ m.triggerFlush()
+ }
+}
+
+func (m *Manager) triggerFlush() {
+ select {
+ case m.flushCh <- struct{}{}:
+ default:
+ }
+}
+
+func (m *Manager) tryFlush() {
+ select {
+ case <-m.flushCh:
+ m.flush()
+ default:
+ }
+}
+
+func (m *Manager) flush() {
+ bs, err := m.store.bytes()
+ if err != nil {
+ return
+ }
+
+ f, err := os.Create(m.path)
+ if err != nil {
+ return
+ }
+ defer func() { _ = f.Close() }()
+
+ _, _ = f.Write(bs)
+}
diff --git a/src/go/plugin/go.d/agent/filestatus/manager_test.go b/src/go/plugin/go.d/agent/filestatus/manager_test.go
new file mode 100644
index 000000000..1c7b32884
--- /dev/null
+++ b/src/go/plugin/go.d/agent/filestatus/manager_test.go
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filestatus
+
+import (
+ "context"
+ "os"
+ "path"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewManager(t *testing.T) {
+ mgr := NewManager("")
+ assert.NotNil(t, mgr.store)
+}
+
+func TestManager_Run(t *testing.T) {
+ type testAction struct {
+ name string
+ cfg confgroup.Config
+ status string
+ }
+ tests := map[string]struct {
+ actions []testAction
+ wantFile string
+ }{
+ "save": {
+ actions: []testAction{
+ {
+ name: "save", status: "ok",
+ cfg: prepareConfig("module", "module1", "name", "name1"),
+ },
+ {
+ name: "save", status: "ok",
+ cfg: prepareConfig("module", "module2", "name", "name2"),
+ },
+ },
+ wantFile: `
+{
+ "module1": {
+ "name1:5956328514325012774": "ok"
+ },
+ "module2": {
+ "name2:14684454322123948394": "ok"
+ }
+}
+`,
+ },
+ "remove": {
+ actions: []testAction{
+ {
+ name: "save", status: "ok",
+ cfg: prepareConfig("module", "module1", "name", "name1"),
+ },
+ {
+ name: "save", status: "ok",
+ cfg: prepareConfig("module", "module2", "name", "name2"),
+ },
+ {
+ name: "remove",
+ cfg: prepareConfig("module", "module2", "name", "name2"),
+ },
+ },
+ wantFile: `
+{
+ "module1": {
+ "name1:5956328514325012774": "ok"
+ }
+}
+`,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dir, err := os.MkdirTemp(os.TempDir(), "netdata-go-test-filestatus-run")
+ require.NoError(t, err)
+ defer func() { assert.NoError(t, os.RemoveAll(dir)) }()
+
+ filename := path.Join(dir, "filestatus")
+
+ mgr := NewManager(filename)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ done := make(chan struct{})
+ go func() { defer close(done); mgr.Run(ctx) }()
+
+ for _, v := range test.actions {
+ switch v.name {
+ case "save":
+ mgr.Save(v.cfg, v.status)
+ case "remove":
+ mgr.Remove(v.cfg)
+ }
+ }
+
+ cancel()
+
+ timeout := time.Second * 5
+ tk := time.NewTimer(timeout)
+ defer tk.Stop()
+
+ select {
+ case <-done:
+ case <-tk.C:
+ t.Errorf("timed out after %s", timeout)
+ }
+
+ bs, err := os.ReadFile(filename)
+ require.NoError(t, err)
+
+ assert.Equal(t, strings.TrimSpace(test.wantFile), string(bs))
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/filestatus/store.go b/src/go/plugin/go.d/agent/filestatus/store.go
new file mode 100644
index 000000000..3f500dec6
--- /dev/null
+++ b/src/go/plugin/go.d/agent/filestatus/store.go
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filestatus
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "slices"
+ "sync"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+)
+
+func LoadStore(path string) (*Store, error) {
+ var s Store
+
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = f.Close() }()
+
+ return &s, json.NewDecoder(f).Decode(&s.items)
+}
+
+type Store struct {
+ mux sync.Mutex
+ items map[string]map[string]string // [module][name:hash]status
+}
+
+func (s *Store) Contains(cfg confgroup.Config, statuses ...string) bool {
+ status, ok := s.lookup(cfg)
+ if !ok {
+ return false
+ }
+
+ return slices.Contains(statuses, status)
+}
+
+func (s *Store) lookup(cfg confgroup.Config) (string, bool) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ jobs, ok := s.items[cfg.Module()]
+ if !ok {
+ return "", false
+ }
+
+ status, ok := jobs[storeJobKey(cfg)]
+
+ return status, ok
+}
+
+func (s *Store) add(cfg confgroup.Config, status string) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ if s.items == nil {
+ s.items = make(map[string]map[string]string)
+ }
+
+ if s.items[cfg.Module()] == nil {
+ s.items[cfg.Module()] = make(map[string]string)
+ }
+
+ s.items[cfg.Module()][storeJobKey(cfg)] = status
+}
+
+func (s *Store) remove(cfg confgroup.Config) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ delete(s.items[cfg.Module()], storeJobKey(cfg))
+
+ if len(s.items[cfg.Module()]) == 0 {
+ delete(s.items, cfg.Module())
+ }
+}
+
+func (s *Store) bytes() ([]byte, error) {
+ s.mux.Lock()
+ defer s.mux.Unlock()
+
+ return json.MarshalIndent(s.items, "", " ")
+}
+
+func storeJobKey(cfg confgroup.Config) string {
+ return fmt.Sprintf("%s:%d", cfg.Name(), cfg.Hash())
+}
diff --git a/src/go/plugin/go.d/agent/filestatus/store_test.go b/src/go/plugin/go.d/agent/filestatus/store_test.go
new file mode 100644
index 000000000..d8e18539e
--- /dev/null
+++ b/src/go/plugin/go.d/agent/filestatus/store_test.go
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filestatus
+
+import (
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// TODO: tech debt
+func TestLoadStore(t *testing.T) {
+
+}
+
+// TODO: tech debt
+func TestStore_Contains(t *testing.T) {
+
+}
+
+func TestStore_add(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Store
+ input confgroup.Config
+ wantItemsNum int
+ }{
+ "add cfg to the empty store": {
+ prepare: func() *Store {
+ return &Store{}
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ ),
+ wantItemsNum: 1,
+ },
+ "add cfg that already in the store": {
+ prepare: func() *Store {
+ return &Store{
+ items: map[string]map[string]string{
+ "modName": {"jobName:18299273693089411682": "state"},
+ },
+ }
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ ),
+ wantItemsNum: 1,
+ },
+ "add cfg with same module, same name, but specific options": {
+ prepare: func() *Store {
+ return &Store{
+ items: map[string]map[string]string{
+ "modName": {"jobName:18299273693089411682": "state"},
+ },
+ }
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ "opt", "val",
+ ),
+ wantItemsNum: 2,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ s := test.prepare()
+ s.add(test.input, "state")
+ assert.Equal(t, test.wantItemsNum, calcStoreItems(s))
+ })
+ }
+}
+
+func TestStore_remove(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Store
+ input confgroup.Config
+ wantItemsNum int
+ }{
+ "remove cfg from the empty store": {
+ prepare: func() *Store {
+ return &Store{}
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ ),
+ wantItemsNum: 0,
+ },
+ "remove cfg from the store": {
+ prepare: func() *Store {
+ return &Store{
+ items: map[string]map[string]string{
+ "modName": {
+ "jobName:18299273693089411682": "state",
+ "jobName:18299273693089411683": "state",
+ },
+ },
+ }
+ },
+ input: prepareConfig(
+ "module", "modName",
+ "name", "jobName",
+ ),
+ wantItemsNum: 1,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ s := test.prepare()
+ s.remove(test.input)
+ assert.Equal(t, test.wantItemsNum, calcStoreItems(s))
+ })
+ }
+}
+
+func calcStoreItems(s *Store) (num int) {
+ for _, v := range s.items {
+ for range v {
+ num++
+ }
+ }
+ return num
+}
+
+func prepareConfig(values ...string) confgroup.Config {
+ cfg := confgroup.Config{}
+ for i := 1; i < len(values); i += 2 {
+ cfg[values[i-1]] = values[i]
+ }
+ return cfg
+}
diff --git a/src/go/plugin/go.d/agent/functions/ext.go b/src/go/plugin/go.d/agent/functions/ext.go
new file mode 100644
index 000000000..28c717d88
--- /dev/null
+++ b/src/go/plugin/go.d/agent/functions/ext.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package functions
+
+func (m *Manager) Register(name string, fn func(Function)) {
+ if fn == nil {
+ m.Warningf("not registering '%s': nil function", name)
+ return
+ }
+
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ if _, ok := m.FunctionRegistry[name]; !ok {
+ m.Debugf("registering function '%s'", name)
+ } else {
+ m.Warningf("re-registering function '%s'", name)
+ }
+ m.FunctionRegistry[name] = fn
+}
+
+func (m *Manager) Unregister(name string) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ if _, ok := m.FunctionRegistry[name]; !ok {
+ delete(m.FunctionRegistry, name)
+ m.Debugf("unregistering function '%s'", name)
+ }
+}
diff --git a/src/go/plugin/go.d/agent/functions/function.go b/src/go/plugin/go.d/agent/functions/function.go
new file mode 100644
index 000000000..b65d3d713
--- /dev/null
+++ b/src/go/plugin/go.d/agent/functions/function.go
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package functions
+
+import (
+ "bytes"
+ "context"
+ "encoding/csv"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type Function struct {
+ key string
+ UID string
+ Timeout time.Duration
+ Name string
+ Args []string
+ Payload []byte
+ Permissions string
+ Source string
+ ContentType string
+}
+
+func (f *Function) String() string {
+ return fmt.Sprintf("key: '%s', uid: '%s', timeout: '%s', function: '%s', args: '%v', permissions: '%s', source: '%s', contentType: '%s', payload: '%s'",
+ f.key, f.UID, f.Timeout, f.Name, f.Args, f.Permissions, f.Source, f.ContentType, string(f.Payload))
+}
+
+func parseFunction(s string) (*Function, error) {
+ r := csv.NewReader(strings.NewReader(s))
+ r.Comma = ' '
+
+ parts, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+
+ // FUNCTION UID Timeout "Name ...Parameters" 0xPermissions "SourceType" [ContentType]
+ if n := len(parts); n != 6 && n != 7 {
+ return nil, fmt.Errorf("unexpected number of words: want 6 or 7, got %d (%v)", n, parts)
+ }
+
+ timeout, err := strconv.ParseInt(parts[2], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+
+ cmd := strings.Split(parts[3], " ")
+
+ fn := &Function{
+ key: parts[0],
+ UID: parts[1],
+ Timeout: time.Duration(timeout) * time.Second,
+ Name: cmd[0],
+ Args: cmd[1:],
+ Permissions: parts[4],
+ Source: parts[5],
+ }
+
+ if len(parts) == 7 {
+ fn.ContentType = parts[6]
+ }
+
+ return fn, nil
+}
+
+func parseFunctionWithPayload(ctx context.Context, s string, in input) (*Function, error) {
+ fn, err := parseFunction(s)
+ if err != nil {
+ return nil, err
+ }
+
+ var buf bytes.Buffer
+
+ for {
+ select {
+ case <-ctx.Done():
+ return nil, nil
+ case line, ok := <-in.lines():
+ if !ok {
+ return nil, nil
+ }
+ if line == "FUNCTION_PAYLOAD_END" {
+ fn.Payload = append(fn.Payload, buf.Bytes()...)
+ return fn, nil
+ }
+ if buf.Len() > 0 {
+ buf.WriteString("\n")
+ }
+ buf.WriteString(line)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/agent/functions/input.go b/src/go/plugin/go.d/agent/functions/input.go
new file mode 100644
index 000000000..cb50c54d0
--- /dev/null
+++ b/src/go/plugin/go.d/agent/functions/input.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package functions
+
+import (
+ "bufio"
+ "os"
+)
+
+type input interface {
+ lines() chan string
+}
+
+var stdinInput = func() input {
+ r := &stdinReader{chLines: make(chan string)}
+ go r.run()
+ return r
+}()
+
+type stdinReader struct {
+ chLines chan string
+}
+
+func (in *stdinReader) run() {
+ sc := bufio.NewScanner(bufio.NewReader(os.Stdin))
+
+ for sc.Scan() {
+ text := sc.Text()
+ in.chLines <- text
+ }
+}
+
+func (in *stdinReader) lines() chan string {
+ return in.chLines
+}
diff --git a/src/go/plugin/go.d/agent/functions/manager.go b/src/go/plugin/go.d/agent/functions/manager.go
new file mode 100644
index 000000000..b7cdecd6a
--- /dev/null
+++ b/src/go/plugin/go.d/agent/functions/manager.go
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package functions
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter"
+)
+
+func NewManager() *Manager {
+ return &Manager{
+ Logger: logger.New().With(
+ slog.String("component", "functions manager"),
+ ),
+ api: netdataapi.New(safewriter.Stdout),
+ input: stdinInput,
+ mux: &sync.Mutex{},
+ FunctionRegistry: make(map[string]func(Function)),
+ }
+}
+
+type Manager struct {
+ *logger.Logger
+
+ api *netdataapi.API
+
+ input input
+
+ mux *sync.Mutex
+ FunctionRegistry map[string]func(Function)
+}
+
+func (m *Manager) Run(ctx context.Context) {
+ m.Info("instance is started")
+ defer func() { m.Info("instance is stopped") }()
+
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() { defer wg.Done(); m.run(ctx) }()
+
+ wg.Wait()
+
+ <-ctx.Done()
+}
+
+func (m *Manager) run(ctx context.Context) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case line, ok := <-m.input.lines():
+ if !ok {
+ return
+ }
+
+ var fn *Function
+ var err error
+
+ // FIXME: if we are waiting for FUNCTION_PAYLOAD_END and a new FUNCTION* appears,
+ // we need to discard the current one and switch to the new one
+ switch {
+ case strings.HasPrefix(line, "FUNCTION "):
+ fn, err = parseFunction(line)
+ case strings.HasPrefix(line, "FUNCTION_PAYLOAD "):
+ fn, err = parseFunctionWithPayload(ctx, line, m.input)
+ case line == "":
+ continue
+ default:
+ m.Warningf("unexpected line: '%s'", line)
+ continue
+ }
+
+ if err != nil {
+ m.Warningf("parse function: %v ('%s')", err, line)
+ continue
+ }
+ if fn == nil {
+ continue
+ }
+
+ function, ok := m.lookupFunction(fn.Name)
+ if !ok {
+ m.Infof("skipping execution of '%s': unregistered function", fn.Name)
+ m.respf(fn, 501, "unregistered function: %s", fn.Name)
+ continue
+ }
+ if function == nil {
+ m.Warningf("skipping execution of '%s': nil function registered", fn.Name)
+ m.respf(fn, 501, "nil function: %s", fn.Name)
+ continue
+ }
+
+ function(*fn)
+ }
+ }
+}
+
+func (m *Manager) lookupFunction(name string) (func(Function), bool) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ f, ok := m.FunctionRegistry[name]
+ return f, ok
+}
+
+func (m *Manager) respf(fn *Function, code int, msgf string, a ...any) {
+ bs, _ := json.Marshal(struct {
+ Status int `json:"status"`
+ Message string `json:"message"`
+ }{
+ Status: code,
+ Message: fmt.Sprintf(msgf, a...),
+ })
+ ts := strconv.FormatInt(time.Now().Unix(), 10)
+ m.api.FUNCRESULT(fn.UID, "application/json", string(bs), strconv.Itoa(code), ts)
+}
diff --git a/src/go/plugin/go.d/agent/functions/manager_test.go b/src/go/plugin/go.d/agent/functions/manager_test.go
new file mode 100644
index 000000000..c19519bc1
--- /dev/null
+++ b/src/go/plugin/go.d/agent/functions/manager_test.go
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package functions
+
+import (
+ "bufio"
+ "context"
+ "sort"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewManager(t *testing.T) {
+ mgr := NewManager()
+
+ assert.NotNilf(t, mgr.input, "Input")
+ assert.NotNilf(t, mgr.FunctionRegistry, "FunctionRegistry")
+}
+
+func TestManager_Register(t *testing.T) {
+ type testInputFn struct {
+ name string
+ invalid bool
+ }
+ tests := map[string]struct {
+ input []testInputFn
+ expected []string
+ }{
+ "valid registration": {
+ input: []testInputFn{
+ {name: "fn1"},
+ {name: "fn2"},
+ },
+ expected: []string{"fn1", "fn2"},
+ },
+ "registration with duplicates": {
+ input: []testInputFn{
+ {name: "fn1"},
+ {name: "fn2"},
+ {name: "fn1"},
+ },
+ expected: []string{"fn1", "fn2"},
+ },
+ "registration with nil functions": {
+ input: []testInputFn{
+ {name: "fn1"},
+ {name: "fn2", invalid: true},
+ },
+ expected: []string{"fn1"},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mgr := NewManager()
+
+ for _, v := range test.input {
+ if v.invalid {
+ mgr.Register(v.name, nil)
+ } else {
+ mgr.Register(v.name, func(Function) {})
+ }
+ }
+
+ var got []string
+ for name := range mgr.FunctionRegistry {
+ got = append(got, name)
+ }
+ sort.Strings(got)
+ sort.Strings(test.expected)
+
+ assert.Equal(t, test.expected, got)
+ })
+ }
+}
+
+func TestManager_Run(t *testing.T) {
+ tests := map[string]struct {
+ register []string
+ input string
+ expected []Function
+ }{
+ "valid function: single": {
+ register: []string{"fn1"},
+ input: `
+FUNCTION UID 1 "fn1 arg1 arg2" 0xFFFF "method=api,role=test"
+`,
+ expected: []Function{
+ {
+ key: "FUNCTION",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn1",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "",
+ Payload: nil,
+ },
+ },
+ },
+ "valid function: multiple": {
+ register: []string{"fn1", "fn2"},
+ input: `
+FUNCTION UID 1 "fn1 arg1 arg2" 0xFFFF "method=api,role=test"
+FUNCTION UID 1 "fn2 arg1 arg2" 0xFFFF "method=api,role=test"
+`,
+ expected: []Function{
+ {
+ key: "FUNCTION",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn1",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "",
+ Payload: nil,
+ },
+ {
+ key: "FUNCTION",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn2",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "",
+ Payload: nil,
+ },
+ },
+ },
+ "valid function: single with payload": {
+ register: []string{"fn1", "fn2"},
+ input: `
+FUNCTION_PAYLOAD UID 1 "fn1 arg1 arg2" 0xFFFF "method=api,role=test" application/json
+payload line1
+payload line2
+FUNCTION_PAYLOAD_END
+`,
+ expected: []Function{
+ {
+ key: "FUNCTION_PAYLOAD",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn1",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "application/json",
+ Payload: []byte("payload line1\npayload line2"),
+ },
+ },
+ },
+ "valid function: multiple with payload": {
+ register: []string{"fn1", "fn2"},
+ input: `
+FUNCTION_PAYLOAD UID 1 "fn1 arg1 arg2" 0xFFFF "method=api,role=test" application/json
+payload line1
+payload line2
+FUNCTION_PAYLOAD_END
+
+FUNCTION_PAYLOAD UID 1 "fn2 arg1 arg2" 0xFFFF "method=api,role=test" application/json
+payload line3
+payload line4
+FUNCTION_PAYLOAD_END
+`,
+ expected: []Function{
+ {
+ key: "FUNCTION_PAYLOAD",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn1",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "application/json",
+ Payload: []byte("payload line1\npayload line2"),
+ },
+ {
+ key: "FUNCTION_PAYLOAD",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn2",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "application/json",
+ Payload: []byte("payload line3\npayload line4"),
+ },
+ },
+ },
+ "valid function: multiple with and without payload": {
+ register: []string{"fn1", "fn2", "fn3", "fn4"},
+ input: `
+FUNCTION_PAYLOAD UID 1 "fn1 arg1 arg2" 0xFFFF "method=api,role=test" application/json
+payload line1
+payload line2
+FUNCTION_PAYLOAD_END
+
+FUNCTION UID 1 "fn2 arg1 arg2" 0xFFFF "method=api,role=test"
+FUNCTION UID 1 "fn3 arg1 arg2" 0xFFFF "method=api,role=test"
+
+FUNCTION_PAYLOAD UID 1 "fn4 arg1 arg2" 0xFFFF "method=api,role=test" application/json
+payload line3
+payload line4
+FUNCTION_PAYLOAD_END
+`,
+ expected: []Function{
+ {
+ key: "FUNCTION_PAYLOAD",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn1",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "application/json",
+ Payload: []byte("payload line1\npayload line2"),
+ },
+ {
+ key: "FUNCTION",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn2",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "",
+ Payload: nil,
+ },
+ {
+ key: "FUNCTION",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn3",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "",
+ Payload: nil,
+ },
+ {
+ key: "FUNCTION_PAYLOAD",
+ UID: "UID",
+ Timeout: time.Second,
+ Name: "fn4",
+ Args: []string{"arg1", "arg2"},
+ Permissions: "0xFFFF",
+ Source: "method=api,role=test",
+ ContentType: "application/json",
+ Payload: []byte("payload line3\npayload line4"),
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mgr := NewManager()
+
+ mgr.input = newMockInput(test.input)
+
+ mock := &mockFunctionExecutor{}
+ for _, v := range test.register {
+ mgr.Register(v, mock.execute)
+ }
+
+ testTime := time.Second * 5
+ ctx, cancel := context.WithTimeout(context.Background(), testTime)
+ defer cancel()
+
+ done := make(chan struct{})
+
+ go func() { defer close(done); mgr.Run(ctx) }()
+
+ timeout := testTime + time.Second*2
+ tk := time.NewTimer(timeout)
+ defer tk.Stop()
+
+ select {
+ case <-done:
+ assert.Equal(t, test.expected, mock.executed)
+ case <-tk.C:
+ t.Errorf("timed out after %s", timeout)
+ }
+ })
+ }
+}
+
+type mockFunctionExecutor struct {
+ executed []Function
+}
+
+func (m *mockFunctionExecutor) execute(fn Function) {
+ m.executed = append(m.executed, fn)
+}
+
+func newMockInput(data string) *mockInput {
+ m := &mockInput{chLines: make(chan string)}
+ sc := bufio.NewScanner(strings.NewReader(data))
+ go func() {
+ for sc.Scan() {
+ m.chLines <- sc.Text()
+ }
+ close(m.chLines)
+ }()
+ return m
+}
+
+type mockInput struct {
+ chLines chan string
+}
+
+func (m *mockInput) lines() chan string {
+ return m.chLines
+}
diff --git a/src/go/plugin/go.d/agent/hostinfo/hostinfo.go b/src/go/plugin/go.d/agent/hostinfo/hostinfo.go
new file mode 100644
index 000000000..48508a1c8
--- /dev/null
+++ b/src/go/plugin/go.d/agent/hostinfo/hostinfo.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hostinfo
+
+import (
+ "bytes"
+ "context"
+ "os"
+ "os/exec"
+ "time"
+)
+
+var Hostname = getHostname()
+
+func getHostname() string {
+ path, err := exec.LookPath("hostname")
+ if err != nil {
+ return ""
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
+ defer cancel()
+
+ bs, err := exec.CommandContext(ctx, path).Output()
+ if err != nil {
+ return ""
+ }
+
+ return string(bytes.TrimSpace(bs))
+}
+
+var (
+ envKubeHost = os.Getenv("KUBERNETES_SERVICE_HOST")
+ envKubePort = os.Getenv("KUBERNETES_SERVICE_PORT")
+)
+
+func IsInsideK8sCluster() bool {
+ return envKubeHost != "" && envKubePort != ""
+}
diff --git a/src/go/plugin/go.d/agent/hostinfo/hostinfo_common.go b/src/go/plugin/go.d/agent/hostinfo/hostinfo_common.go
new file mode 100644
index 000000000..69bbf5c78
--- /dev/null
+++ b/src/go/plugin/go.d/agent/hostinfo/hostinfo_common.go
@@ -0,0 +1,7 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build !linux
+
+package hostinfo
+
+var SystemdVersion int
diff --git a/src/go/plugin/go.d/agent/hostinfo/hostinfo_linux.go b/src/go/plugin/go.d/agent/hostinfo/hostinfo_linux.go
new file mode 100644
index 000000000..db2005f00
--- /dev/null
+++ b/src/go/plugin/go.d/agent/hostinfo/hostinfo_linux.go
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+
+package hostinfo
+
+import (
+ "context"
+ "regexp"
+ "strconv"
+
+ "github.com/coreos/go-systemd/v22/dbus"
+)
+
+var SystemdVersion = getSystemdVersion()
+
+func getSystemdVersion() int {
+ var reVersion = regexp.MustCompile(`[0-9][0-9][0-9]`)
+
+ conn, err := dbus.NewWithContext(context.Background())
+ if err != nil {
+ return 0
+ }
+ defer conn.Close()
+
+ version, err := conn.GetManagerProperty("Version")
+ if err != nil {
+ return 0
+ }
+
+ major := reVersion.FindString(version)
+ if major == "" {
+ return 0
+ }
+
+ ver, err := strconv.Atoi(major)
+ if err != nil {
+ return 0
+ }
+
+ return ver
+}
diff --git a/src/go/plugin/go.d/agent/jobmgr/cache.go b/src/go/plugin/go.d/agent/jobmgr/cache.go
new file mode 100644
index 000000000..8ea16ce96
--- /dev/null
+++ b/src/go/plugin/go.d/agent/jobmgr/cache.go
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package jobmgr
+
+import (
+ "context"
+ "sync"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func newDiscoveredConfigsCache() *discoveredConfigs {
+ return &discoveredConfigs{
+ items: make(map[string]map[uint64]confgroup.Config),
+ }
+}
+
+func newSeenConfigCache() *seenConfigs {
+ return &seenConfigs{
+ items: make(map[string]*seenConfig),
+ }
+}
+
+func newExposedConfigCache() *exposedConfigs {
+ return &exposedConfigs{
+ items: make(map[string]*seenConfig),
+ }
+}
+
+func newRunningJobsCache() *runningJobs {
+ return &runningJobs{
+ mux: sync.Mutex{},
+ items: make(map[string]*module.Job),
+ }
+}
+
+func newRetryingTasksCache() *retryingTasks {
+ return &retryingTasks{
+ items: make(map[string]*retryTask),
+ }
+}
+
+type (
+ discoveredConfigs struct {
+ // [Source][Hash]
+ items map[string]map[uint64]confgroup.Config
+ }
+
+ seenConfigs struct {
+ // [cfg.UID()]
+ items map[string]*seenConfig
+ }
+ exposedConfigs struct {
+ // [cfg.FullName()]
+ items map[string]*seenConfig
+ }
+ seenConfig struct {
+ cfg confgroup.Config
+ status dyncfgStatus
+ }
+
+ runningJobs struct {
+ mux sync.Mutex
+ // [cfg.FullName()]
+ items map[string]*module.Job
+ }
+
+ retryingTasks struct {
+ // [cfg.UID()]
+ items map[string]*retryTask
+ }
+ retryTask struct {
+ cancel context.CancelFunc
+ }
+)
+
+func (c *discoveredConfigs) add(group *confgroup.Group) (added, removed []confgroup.Config) {
+ cfgs, ok := c.items[group.Source]
+ if !ok {
+ if len(group.Configs) == 0 {
+ return nil, nil
+ }
+ cfgs = make(map[uint64]confgroup.Config)
+ c.items[group.Source] = cfgs
+ }
+
+ seen := make(map[uint64]bool)
+
+ for _, cfg := range group.Configs {
+ hash := cfg.Hash()
+ seen[hash] = true
+
+ if _, ok := cfgs[hash]; ok {
+ continue
+ }
+
+ cfgs[hash] = cfg
+ added = append(added, cfg)
+ }
+
+ for hash, cfg := range cfgs {
+ if !seen[hash] {
+ delete(cfgs, hash)
+ removed = append(removed, cfg)
+ }
+ }
+
+ if len(cfgs) == 0 {
+ delete(c.items, group.Source)
+ }
+
+ return added, removed
+}
+
+func (c *seenConfigs) add(sj *seenConfig) {
+ c.items[sj.cfg.UID()] = sj
+}
+func (c *seenConfigs) remove(cfg confgroup.Config) {
+ delete(c.items, cfg.UID())
+}
+func (c *seenConfigs) lookup(cfg confgroup.Config) (*seenConfig, bool) {
+ v, ok := c.items[cfg.UID()]
+ return v, ok
+}
+
+func (c *exposedConfigs) add(sj *seenConfig) {
+ c.items[sj.cfg.FullName()] = sj
+}
+func (c *exposedConfigs) remove(cfg confgroup.Config) {
+ delete(c.items, cfg.FullName())
+}
+func (c *exposedConfigs) lookup(cfg confgroup.Config) (*seenConfig, bool) {
+ v, ok := c.items[cfg.FullName()]
+ return v, ok
+}
+
+func (c *exposedConfigs) lookupByName(module, job string) (*seenConfig, bool) {
+ key := module + "_" + job
+ if module == job {
+ key = job
+ }
+ v, ok := c.items[key]
+ return v, ok
+}
+
+func (c *runningJobs) lock() {
+ c.mux.Lock()
+}
+func (c *runningJobs) unlock() {
+ c.mux.Unlock()
+}
+func (c *runningJobs) add(fullName string, job *module.Job) {
+ c.items[fullName] = job
+}
+func (c *runningJobs) remove(fullName string) {
+ delete(c.items, fullName)
+}
+func (c *runningJobs) lookup(fullName string) (*module.Job, bool) {
+ j, ok := c.items[fullName]
+ return j, ok
+}
+func (c *runningJobs) forEach(fn func(fullName string, job *module.Job)) {
+ for k, j := range c.items {
+ fn(k, j)
+ }
+}
+
+func (c *retryingTasks) add(cfg confgroup.Config, retry *retryTask) {
+ c.items[cfg.UID()] = retry
+}
+func (c *retryingTasks) remove(cfg confgroup.Config) {
+ if v, ok := c.lookup(cfg); ok {
+ v.cancel()
+ }
+ delete(c.items, cfg.UID())
+}
+func (c *retryingTasks) lookup(cfg confgroup.Config) (*retryTask, bool) {
+ v, ok := c.items[cfg.UID()]
+ return v, ok
+}
diff --git a/src/go/plugin/go.d/agent/jobmgr/di.go b/src/go/plugin/go.d/agent/jobmgr/di.go
new file mode 100644
index 000000000..466fcdf90
--- /dev/null
+++ b/src/go/plugin/go.d/agent/jobmgr/di.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package jobmgr
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/functions"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes"
+)
+
+type FileLocker interface {
+ Lock(name string) (bool, error)
+ Unlock(name string)
+}
+
+type FileStatus interface {
+ Save(cfg confgroup.Config, state string)
+ Remove(cfg confgroup.Config)
+}
+
+type FileStatusStore interface {
+ Contains(cfg confgroup.Config, states ...string) bool
+}
+
+type Vnodes interface {
+ Lookup(key string) (*vnodes.VirtualNode, bool)
+}
+
+type FunctionRegistry interface {
+ Register(name string, reg func(functions.Function))
+ Unregister(name string)
+}
+
+type dyncfgAPI interface {
+ CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string)
+ CONFIGDELETE(id string)
+ CONFIGSTATUS(id, status string)
+ FUNCRESULT(uid, contentType, payload, code, expireTimestamp string)
+}
diff --git a/src/go/plugin/go.d/agent/jobmgr/dyncfg.go b/src/go/plugin/go.d/agent/jobmgr/dyncfg.go
new file mode 100644
index 000000000..da6d67489
--- /dev/null
+++ b/src/go/plugin/go.d/agent/jobmgr/dyncfg.go
@@ -0,0 +1,852 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package jobmgr
+
+import (
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "log/slog"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+ "unicode"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/functions"
+
+ "gopkg.in/yaml.v2"
+)
+
+type dyncfgStatus int
+
+const (
+ _ dyncfgStatus = iota
+ dyncfgAccepted
+ dyncfgRunning
+ dyncfgFailed
+ dyncfgIncomplete
+ dyncfgDisabled
+)
+
+func (s dyncfgStatus) String() string {
+ switch s {
+ case dyncfgAccepted:
+ return "accepted"
+ case dyncfgRunning:
+ return "running"
+ case dyncfgFailed:
+ return "failed"
+ case dyncfgIncomplete:
+ return "incomplete"
+ case dyncfgDisabled:
+ return "disabled"
+ default:
+ return "unknown"
+ }
+}
+
+const (
+ dyncfgIDPrefix = "go.d:collector:"
+ dyncfgPath = "/collectors/jobs"
+)
+
+func dyncfgModID(name string) string {
+ return fmt.Sprintf("%s%s", dyncfgIDPrefix, name)
+}
+func dyncfgJobID(cfg confgroup.Config) string {
+ return fmt.Sprintf("%s%s:%s", dyncfgIDPrefix, cfg.Module(), cfg.Name())
+}
+
+func dyncfgModCmds() string {
+ return "add schema enable disable test userconfig"
+}
+func dyncfgJobCmds(cfg confgroup.Config) string {
+ cmds := "schema get enable disable update restart test userconfig"
+ if isDyncfg(cfg) {
+ cmds += " remove"
+ }
+ return cmds
+}
+
+func (m *Manager) dyncfgModuleCreate(name string) {
+ id := dyncfgModID(name)
+ path := dyncfgPath
+ cmds := dyncfgModCmds()
+ typ := "template"
+ src := "internal"
+ m.api.CONFIGCREATE(id, dyncfgAccepted.String(), typ, path, src, src, cmds)
+}
+
+func (m *Manager) dyncfgJobCreate(cfg confgroup.Config, status dyncfgStatus) {
+ id := dyncfgJobID(cfg)
+ path := dyncfgPath
+ cmds := dyncfgJobCmds(cfg)
+ typ := "job"
+ m.api.CONFIGCREATE(id, status.String(), typ, path, cfg.SourceType(), cfg.Source(), cmds)
+}
+
+func (m *Manager) dyncfgJobRemove(cfg confgroup.Config) {
+ m.api.CONFIGDELETE(dyncfgJobID(cfg))
+}
+
+func (m *Manager) dyncfgJobStatus(cfg confgroup.Config, status dyncfgStatus) {
+ m.api.CONFIGSTATUS(dyncfgJobID(cfg), status.String())
+}
+
+func (m *Manager) dyncfgConfig(fn functions.Function) {
+ if len(fn.Args) < 2 {
+ m.Warningf("dyncfg: %s: missing required arguments, want 3 got %d", fn.Name, len(fn.Args))
+ m.dyncfgRespf(fn, 400, "Missing required arguments. Need at least 2, but got %d.", len(fn.Args))
+ return
+ }
+
+ select {
+ case <-m.ctx.Done():
+ m.dyncfgRespf(fn, 503, "Job manager is shutting down.")
+ default:
+ }
+
+ //m.Infof("QQ FN: '%s'", fn)
+
+ action := strings.ToLower(fn.Args[1])
+
+ switch action {
+ case "userconfig":
+ m.dyncfgConfigUserconfig(fn)
+ return
+ case "test":
+ m.dyncfgConfigTest(fn)
+ return
+ case "schema":
+ m.dyncfgConfigSchema(fn)
+ return
+ }
+
+ select {
+ case <-m.ctx.Done():
+ m.dyncfgRespf(fn, 503, "Job manager is shutting down.")
+ case m.dyncfgCh <- fn:
+ }
+}
+
+func (m *Manager) dyncfgConfigExec(fn functions.Function) {
+ action := strings.ToLower(fn.Args[1])
+
+ switch action {
+ case "test":
+ m.dyncfgConfigTest(fn)
+ case "schema":
+ m.dyncfgConfigSchema(fn)
+ case "get":
+ m.dyncfgConfigGet(fn)
+ case "restart":
+ m.dyncfgConfigRestart(fn)
+ case "enable":
+ m.dyncfgConfigEnable(fn)
+ case "disable":
+ m.dyncfgConfigDisable(fn)
+ case "add":
+ m.dyncfgConfigAdd(fn)
+ case "remove":
+ m.dyncfgConfigRemove(fn)
+ case "update":
+ m.dyncfgConfigUpdate(fn)
+ default:
+ m.Warningf("dyncfg: function '%s' not implemented", fn.String())
+ m.dyncfgRespf(fn, 501, "Function '%s' is not implemented.", fn.Name)
+ }
+}
+
+func (m *Manager) dyncfgConfigUserconfig(fn functions.Function) {
+ id := fn.Args[0]
+ jn := "test"
+ if len(fn.Args) > 2 {
+ jn = fn.Args[2]
+ }
+
+ mn, ok := extractModuleName(id)
+ if !ok {
+ m.Warningf("dyncfg: userconfig: could not extract module and job from id (%s)", id)
+ m.dyncfgRespf(fn, 400,
+ "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ creator, ok := m.Modules.Lookup(mn)
+ if !ok {
+ m.Warningf("dyncfg: userconfig: module %s not found", mn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn)
+ return
+ }
+
+ if creator.Config == nil || creator.Config() == nil {
+ m.Warningf("dyncfg: userconfig: module %s: configuration not found", mn)
+ m.dyncfgRespf(fn, 500, "Module %s does not provide configuration.", mn)
+ return
+ }
+
+ bs, err := userConfigFromPayload(creator.Config(), jn, fn)
+ if err != nil {
+ m.Warningf("dyncfg: userconfig: module %s: failed to create config from payload: %v", mn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err)
+ }
+
+ m.dyncfgRespPayloadYAML(fn, string(bs))
+}
+
+func (m *Manager) dyncfgConfigTest(fn functions.Function) {
+ id := fn.Args[0]
+ mn, ok := extractModuleName(id)
+ if !ok {
+ m.Warningf("dyncfg: test: could not extract module and job from id (%s)", id)
+ m.dyncfgRespf(fn, 400,
+ "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ jn := "test"
+ if len(fn.Args) > 2 {
+ jn = fn.Args[2]
+ }
+
+ if err := validateJobName(jn); err != nil {
+ m.Warningf("dyncfg: test: module %s: unacceptable job name '%s': %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Unacceptable job name '%s': %v.", jn, err)
+ return
+ }
+
+ creator, ok := m.Modules.Lookup(mn)
+ if !ok {
+ m.Warningf("dyncfg: test: module %s not found", mn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn)
+ return
+ }
+
+ cfg, err := configFromPayload(fn)
+ if err != nil {
+ m.Warningf("dyncfg: test: module %s: failed to create config from payload: %v", mn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err)
+ return
+ }
+
+ if cfg.Vnode() != "" {
+ if _, ok := m.Vnodes.Lookup(cfg.Vnode()); !ok {
+ m.Warningf("dyncfg: test: module %s: vnode %s not found", mn, cfg.Vnode())
+ m.dyncfgRespf(fn, 400, "The specified vnode '%s' is not registered.", cfg.Vnode())
+ return
+ }
+ }
+
+ cfg.SetModule(mn)
+ cfg.SetName(jn)
+
+ job := creator.Create()
+
+ if err := applyConfig(cfg, job); err != nil {
+ m.Warningf("dyncfg: test: module %s: failed to apply config: %v", mn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ return
+ }
+
+ job.GetBase().Logger = logger.New().With(
+ slog.String("collector", cfg.Module()),
+ slog.String("job", cfg.Name()),
+ )
+
+ defer job.Cleanup()
+
+ if err := job.Init(); err != nil {
+ m.dyncfgRespf(fn, 422, "Job initialization failed: %v", err)
+ return
+ }
+ if err := job.Check(); err != nil {
+ m.dyncfgRespf(fn, 422, "Job check failed: %v", err)
+ return
+ }
+
+ m.dyncfgRespf(fn, 200, "")
+}
+
+func (m *Manager) dyncfgConfigSchema(fn functions.Function) {
+ id := fn.Args[0]
+ mn, ok := extractModuleName(id)
+ if !ok {
+ m.Warningf("dyncfg: schema: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ mod, ok := m.Modules.Lookup(mn)
+ if !ok {
+ m.Warningf("dyncfg: schema: module %s not found", mn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn)
+ return
+ }
+
+ if mod.JobConfigSchema == "" {
+ m.Warningf("dyncfg: schema: module %s: schema not found", mn)
+ m.dyncfgRespf(fn, 500, "Module %s configuration schema not found.", mn)
+ return
+ }
+
+ m.dyncfgRespPayloadJSON(fn, mod.JobConfigSchema)
+}
+
+func (m *Manager) dyncfgConfigGet(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: get: could not extract module and job from id (%s)", id)
+ m.dyncfgRespf(fn, 400,
+ "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ creator, ok := m.Modules.Lookup(mn)
+ if !ok {
+ m.Warningf("dyncfg: get: module %s not found", mn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: get: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ mod := creator.Create()
+
+ if err := applyConfig(ecfg.cfg, mod); err != nil {
+ m.Warningf("dyncfg: get: module %s job %s failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ return
+ }
+
+ conf := mod.Configuration()
+ if conf == nil {
+ m.Warningf("dyncfg: get: module %s: configuration not found", mn)
+ m.dyncfgRespf(fn, 500, "Module %s does not provide configuration.", mn)
+ return
+ }
+
+ bs, err := json.Marshal(conf)
+ if err != nil {
+ m.Warningf("dyncfg: get: module %s job %s failed to json marshal config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 500, "Failed to convert configuration into JSON: %v.", err)
+ return
+ }
+
+ m.dyncfgRespPayloadJSON(fn, string(bs))
+}
+
+func (m *Manager) dyncfgConfigRestart(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: restart: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: restart: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ job, err := m.createCollectorJob(ecfg.cfg)
+ if err != nil {
+ m.Warningf("dyncfg: restart: module %s job %s: failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ switch ecfg.status {
+ case dyncfgAccepted, dyncfgDisabled:
+ m.Warningf("dyncfg: restart: module %s job %s: restarting not allowed in '%s' state", mn, jn, ecfg.status)
+ m.dyncfgRespf(fn, 405, "Restarting data collection job is not allowed in '%s' state.", ecfg.status)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ case dyncfgRunning:
+ m.FileStatus.Remove(ecfg.cfg)
+ m.FileLock.Unlock(ecfg.cfg.FullName())
+ m.stopRunningJob(ecfg.cfg.FullName())
+ default:
+ }
+
+ if err := job.AutoDetection(); err != nil {
+ job.Cleanup()
+ ecfg.status = dyncfgFailed
+ m.dyncfgRespf(fn, 422, "Job restart failed: %v", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ if ok, err := m.FileLock.Lock(ecfg.cfg.FullName()); !ok && err == nil {
+ job.Cleanup()
+ ecfg.status = dyncfgFailed
+ m.dyncfgRespf(fn, 500, "Job restart failed: cannot filelock.")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ ecfg.status = dyncfgRunning
+
+ if isDyncfg(ecfg.cfg) {
+ m.FileStatus.Save(ecfg.cfg, ecfg.status.String())
+ }
+ m.startRunningJob(job)
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+}
+
+func (m *Manager) dyncfgConfigEnable(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: enable: could not extract module and job from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: enable: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ if ecfg.cfg.FullName() == m.waitCfgOnOff {
+ m.waitCfgOnOff = ""
+ }
+
+ switch ecfg.status {
+ case dyncfgAccepted, dyncfgDisabled, dyncfgFailed:
+ case dyncfgRunning:
+ // non-dyncfg update triggers enable/disable
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ default:
+ m.Warningf("dyncfg: enable: module %s job %s: enabling not allowed in %s state", mn, jn, ecfg.status)
+ m.dyncfgRespf(fn, 405, "Enabling data collection job is not allowed in '%s' state.", ecfg.status)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ job, err := m.createCollectorJob(ecfg.cfg)
+ if err != nil {
+ ecfg.status = dyncfgFailed
+ m.Warningf("dyncfg: enable: module %s job %s: failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ if err := job.AutoDetection(); err != nil {
+ job.Cleanup()
+ ecfg.status = dyncfgFailed
+ m.dyncfgRespf(fn, 200, "Job enable failed: %v.", err)
+
+ if isStock(ecfg.cfg) {
+ m.exposedConfigs.remove(ecfg.cfg)
+ m.dyncfgJobRemove(ecfg.cfg)
+ } else {
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ }
+
+ if job.RetryAutoDetection() && !isDyncfg(ecfg.cfg) {
+ m.Infof("%s[%s] job detection failed, will retry in %d seconds",
+ ecfg.cfg.Module(), ecfg.cfg.Name(), job.AutoDetectionEvery())
+
+ ctx, cancel := context.WithCancel(m.ctx)
+ m.retryingTasks.add(ecfg.cfg, &retryTask{cancel: cancel})
+ go runRetryTask(ctx, m.addCh, ecfg.cfg)
+ }
+ return
+ }
+
+ if ok, err := m.FileLock.Lock(ecfg.cfg.FullName()); !ok && err == nil {
+ job.Cleanup()
+ ecfg.status = dyncfgFailed
+ m.dyncfgRespf(fn, 500, "Job enable failed: can not filelock.")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ ecfg.status = dyncfgRunning
+
+ if isDyncfg(ecfg.cfg) {
+ m.FileStatus.Save(ecfg.cfg, ecfg.status.String())
+ }
+
+ m.startRunningJob(job)
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+
+}
+
+func (m *Manager) dyncfgConfigDisable(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: disable: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: disable: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ if ecfg.cfg.FullName() == m.waitCfgOnOff {
+ m.waitCfgOnOff = ""
+ }
+
+ switch ecfg.status {
+ case dyncfgDisabled:
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ case dyncfgRunning:
+ m.stopRunningJob(ecfg.cfg.FullName())
+ if isDyncfg(ecfg.cfg) {
+ m.FileStatus.Remove(ecfg.cfg)
+ }
+ m.FileLock.Unlock(ecfg.cfg.FullName())
+ default:
+ }
+
+ ecfg.status = dyncfgDisabled
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+}
+
+func (m *Manager) dyncfgConfigAdd(fn functions.Function) {
+ if len(fn.Args) < 3 {
+ m.Warningf("dyncfg: add: missing required arguments, want 3 got %d", len(fn.Args))
+ m.dyncfgRespf(fn, 400, "Missing required arguments. Need at least 3, but got %d.", len(fn.Args))
+ return
+ }
+
+ id := fn.Args[0]
+ jn := fn.Args[2]
+ mn, ok := extractModuleName(id)
+ if !ok {
+ m.Warningf("dyncfg: add: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ if len(fn.Payload) == 0 {
+ m.Warningf("dyncfg: add: module %s job %s missing configuration payload.", mn, jn)
+ m.dyncfgRespf(fn, 400, "Missing configuration payload.")
+ return
+ }
+
+ if err := validateJobName(jn); err != nil {
+ m.Warningf("dyncfg: add: module %s: unacceptable job name '%s': %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Unacceptable job name '%s': %v.", jn, err)
+ return
+ }
+
+ cfg, err := configFromPayload(fn)
+ if err != nil {
+ m.Warningf("dyncfg: add: module %s job %s: failed to create config from payload: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err)
+ return
+ }
+
+ m.dyncfgSetConfigMeta(cfg, mn, jn)
+
+ if _, err := m.createCollectorJob(cfg); err != nil {
+ m.Warningf("dyncfg: add: module %s job %s: failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ return
+ }
+
+ if ecfg, ok := m.exposedConfigs.lookup(cfg); ok {
+ if scfg, ok := m.seenConfigs.lookup(ecfg.cfg); ok && isDyncfg(scfg.cfg) {
+ m.seenConfigs.remove(ecfg.cfg)
+ }
+ m.exposedConfigs.remove(ecfg.cfg)
+ m.stopRunningJob(ecfg.cfg.FullName())
+ }
+
+ scfg := &seenConfig{cfg: cfg, status: dyncfgAccepted}
+ ecfg := scfg
+ m.seenConfigs.add(scfg)
+ m.exposedConfigs.add(ecfg)
+
+ m.dyncfgRespf(fn, 202, "")
+ m.dyncfgJobCreate(ecfg.cfg, ecfg.status)
+}
+
+func (m *Manager) dyncfgConfigRemove(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: remove: could not extract module and job from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: remove: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ if !isDyncfg(ecfg.cfg) {
+ m.Warningf("dyncfg: remove: module %s job %s: can not remove jobs of type %s", mn, jn, ecfg.cfg.SourceType())
+ m.dyncfgRespf(fn, 405, "Removing jobs of type '%s' is not supported. Only 'dyncfg' jobs can be removed.", ecfg.cfg.SourceType())
+ return
+ }
+
+ m.seenConfigs.remove(ecfg.cfg)
+ m.exposedConfigs.remove(ecfg.cfg)
+ m.stopRunningJob(ecfg.cfg.FullName())
+ m.FileLock.Unlock(ecfg.cfg.FullName())
+ m.FileStatus.Remove(ecfg.cfg)
+
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobRemove(ecfg.cfg)
+}
+
+func (m *Manager) dyncfgConfigUpdate(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: update: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: update: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ cfg, err := configFromPayload(fn)
+ if err != nil {
+ m.Warningf("dyncfg: update: module %s: failed to create config from payload: %v", mn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ m.dyncfgSetConfigMeta(cfg, mn, jn)
+
+ if ecfg.status == dyncfgRunning && ecfg.cfg.UID() == cfg.UID() {
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ job, err := m.createCollectorJob(cfg)
+ if err != nil {
+ m.Warningf("dyncfg: update: module %s job %s: failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ if ecfg.status == dyncfgAccepted {
+ m.Warningf("dyncfg: update: module %s job %s: updating not allowed in %s", mn, jn, ecfg.status)
+ m.dyncfgRespf(fn, 403, "Updating data collection job is not allowed in '%s' state.", ecfg.status)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ m.exposedConfigs.remove(ecfg.cfg)
+ m.stopRunningJob(ecfg.cfg.FullName())
+
+ scfg := &seenConfig{cfg: cfg, status: dyncfgAccepted}
+ m.seenConfigs.add(scfg)
+ m.exposedConfigs.add(scfg)
+
+ if isDyncfg(ecfg.cfg) {
+ m.seenConfigs.remove(ecfg.cfg)
+ } else {
+ // needed to update meta. There is no other way, unfortunately, but to send "create"
+ defer m.dyncfgJobCreate(scfg.cfg, scfg.status)
+ }
+
+ if ecfg.status == dyncfgDisabled {
+ scfg.status = dyncfgDisabled
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(cfg, scfg.status)
+ return
+ }
+
+ if err := job.AutoDetection(); err != nil {
+ job.Cleanup()
+ scfg.status = dyncfgFailed
+ m.dyncfgRespf(fn, 200, "Job update failed: %v", err)
+ m.dyncfgJobStatus(scfg.cfg, scfg.status)
+ return
+ }
+
+ if ok, err := m.FileLock.Lock(scfg.cfg.FullName()); !ok && err == nil {
+ job.Cleanup()
+ scfg.status = dyncfgFailed
+ m.dyncfgRespf(fn, 500, "Job update failed: cannot create file lock.")
+ m.dyncfgJobStatus(scfg.cfg, scfg.status)
+ return
+ }
+
+ scfg.status = dyncfgRunning
+ m.startRunningJob(job)
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(scfg.cfg, scfg.status)
+}
+
+func (m *Manager) dyncfgSetConfigMeta(cfg confgroup.Config, module, name string) {
+ cfg.SetProvider("dyncfg")
+ cfg.SetSource(fmt.Sprintf("type=dyncfg,module=%s,job=%s", module, name))
+ cfg.SetSourceType("dyncfg")
+ cfg.SetModule(module)
+ cfg.SetName(name)
+ if def, ok := m.ConfigDefaults.Lookup(module); ok {
+ cfg.ApplyDefaults(def)
+ }
+}
+
+func (m *Manager) dyncfgRespPayloadJSON(fn functions.Function, payload string) {
+ m.dyncfgRespPayload(fn, payload, "application/json")
+}
+
+func (m *Manager) dyncfgRespPayloadYAML(fn functions.Function, payload string) {
+ m.dyncfgRespPayload(fn, payload, "application/yaml")
+}
+
+func (m *Manager) dyncfgRespPayload(fn functions.Function, payload string, contentType string) {
+ ts := strconv.FormatInt(time.Now().Unix(), 10)
+ m.api.FUNCRESULT(fn.UID, contentType, payload, "200", ts)
+}
+
+func (m *Manager) dyncfgRespf(fn functions.Function, code int, msgf string, a ...any) {
+ if fn.UID == "" {
+ return
+ }
+ bs, _ := json.Marshal(struct {
+ Status int `json:"status"`
+ Message string `json:"message"`
+ }{
+ Status: code,
+ Message: fmt.Sprintf(msgf, a...),
+ })
+ ts := strconv.FormatInt(time.Now().Unix(), 10)
+ m.api.FUNCRESULT(fn.UID, "application/json", string(bs), strconv.Itoa(code), ts)
+}
+
+func userConfigFromPayload(cfg any, jobName string, fn functions.Function) ([]byte, error) {
+ if v := reflect.ValueOf(cfg); v.Kind() != reflect.Ptr || v.IsNil() {
+ return nil, fmt.Errorf("invalid config: expected a pointer to a struct, got a %s", v.Type())
+ }
+
+ if fn.ContentType == "application/json" {
+ if err := json.Unmarshal(fn.Payload, cfg); err != nil {
+ return nil, err
+ }
+ } else {
+ if err := yaml.Unmarshal(fn.Payload, cfg); err != nil {
+ return nil, err
+ }
+ }
+
+ bs, err := yaml.Marshal(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ var yms yaml.MapSlice
+ if err := yaml.Unmarshal(bs, &yms); err != nil {
+ return nil, err
+ }
+
+ yms = append([]yaml.MapItem{{Key: "name", Value: jobName}}, yms...)
+
+ v := map[string]any{
+ "jobs": []any{yms},
+ }
+
+ bs, err = yaml.Marshal(v)
+ if err != nil {
+ return nil, err
+ }
+
+ return bs, nil
+}
+
+func configFromPayload(fn functions.Function) (confgroup.Config, error) {
+ var cfg confgroup.Config
+
+ if fn.ContentType == "application/json" {
+ if err := json.Unmarshal(fn.Payload, &cfg); err != nil {
+ return nil, err
+ }
+
+ return cfg.Clone()
+ }
+
+ if err := yaml.Unmarshal(fn.Payload, &cfg); err != nil {
+ return nil, err
+ }
+
+ return cfg, nil
+}
+
+func extractModuleJobName(id string) (mn string, jn string, ok bool) {
+ if mn, ok = extractModuleName(id); !ok {
+ return "", "", false
+ }
+ if jn, ok = extractJobName(id); !ok {
+ return "", "", false
+ }
+ return mn, jn, true
+}
+
+func extractModuleName(id string) (string, bool) {
+ id = strings.TrimPrefix(id, dyncfgIDPrefix)
+ i := strings.IndexByte(id, ':')
+ if i == -1 {
+ return id, id != ""
+ }
+ return id[:i], true
+}
+
+func extractJobName(id string) (string, bool) {
+ i := strings.LastIndexByte(id, ':')
+ if i == -1 {
+ return "", false
+ }
+ return id[i+1:], true
+}
+
+func validateJobName(jobName string) error {
+ for _, r := range jobName {
+ if unicode.IsSpace(r) {
+ return errors.New("contains spaces")
+ }
+ switch r {
+ case '.', ':':
+ return fmt.Errorf("contains '%c'", r)
+ }
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/agent/jobmgr/manager.go b/src/go/plugin/go.d/agent/jobmgr/manager.go
new file mode 100644
index 000000000..59947be77
--- /dev/null
+++ b/src/go/plugin/go.d/agent/jobmgr/manager.go
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package jobmgr
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/functions"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/ticker"
+
+ "github.com/mattn/go-isatty"
+ "gopkg.in/yaml.v2"
+)
+
+var isTerminal = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsTerminal(os.Stdin.Fd())
+
+func New() *Manager {
+ mgr := &Manager{
+ Logger: logger.New().With(
+ slog.String("component", "job manager"),
+ ),
+ Out: io.Discard,
+ FileLock: noop{},
+ FileStatus: noop{},
+ FileStatusStore: noop{},
+ Vnodes: noop{},
+ FnReg: noop{},
+
+ discoveredConfigs: newDiscoveredConfigsCache(),
+ seenConfigs: newSeenConfigCache(),
+ exposedConfigs: newExposedConfigCache(),
+ runningJobs: newRunningJobsCache(),
+ retryingTasks: newRetryingTasksCache(),
+
+ started: make(chan struct{}),
+ api: netdataapi.New(safewriter.Stdout),
+ addCh: make(chan confgroup.Config),
+ rmCh: make(chan confgroup.Config),
+ dyncfgCh: make(chan functions.Function),
+ }
+
+ return mgr
+}
+
+type Manager struct {
+ *logger.Logger
+
+ PluginName string
+ Out io.Writer
+ Modules module.Registry
+ ConfigDefaults confgroup.Registry
+
+ FileLock FileLocker
+ FileStatus FileStatus
+ FileStatusStore FileStatusStore
+ Vnodes Vnodes
+ FnReg FunctionRegistry
+
+ discoveredConfigs *discoveredConfigs
+ seenConfigs *seenConfigs
+ exposedConfigs *exposedConfigs
+ retryingTasks *retryingTasks
+ runningJobs *runningJobs
+
+ ctx context.Context
+ started chan struct{}
+ api dyncfgAPI
+ addCh chan confgroup.Config
+ rmCh chan confgroup.Config
+ dyncfgCh chan functions.Function
+
+ waitCfgOnOff string // block processing of discovered configs until "enable"/"disable" is received from Netdata
+}
+
+func (m *Manager) Run(ctx context.Context, in chan []*confgroup.Group) {
+ m.Info("instance is started")
+ defer func() { m.cleanup(); m.Info("instance is stopped") }()
+ m.ctx = ctx
+
+ m.FnReg.Register("config", m.dyncfgConfig)
+
+ for name := range m.Modules {
+ m.dyncfgModuleCreate(name)
+ }
+
+ var wg sync.WaitGroup
+
+ wg.Add(1)
+ go func() { defer wg.Done(); m.runProcessConfGroups(in) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); m.run() }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); m.runNotifyRunningJobs() }()
+
+ close(m.started)
+
+ wg.Wait()
+ <-m.ctx.Done()
+}
+
+func (m *Manager) runProcessConfGroups(in chan []*confgroup.Group) {
+ for {
+ select {
+ case <-m.ctx.Done():
+ return
+ case groups := <-in:
+ for _, gr := range groups {
+ a, r := m.discoveredConfigs.add(gr)
+ m.Debugf("received configs: %d/+%d/-%d ('%s')", len(gr.Configs), len(a), len(r), gr.Source)
+ sendConfigs(m.ctx, m.rmCh, r...)
+ sendConfigs(m.ctx, m.addCh, a...)
+ }
+ }
+ }
+}
+
+func (m *Manager) run() {
+ for {
+ if m.waitCfgOnOff != "" {
+ select {
+ case <-m.ctx.Done():
+ return
+ case fn := <-m.dyncfgCh:
+ m.dyncfgConfigExec(fn)
+ }
+ } else {
+ select {
+ case <-m.ctx.Done():
+ return
+ case cfg := <-m.addCh:
+ m.addConfig(cfg)
+ case cfg := <-m.rmCh:
+ m.removeConfig(cfg)
+ case fn := <-m.dyncfgCh:
+ m.dyncfgConfigExec(fn)
+ }
+ }
+ }
+}
+
+func (m *Manager) addConfig(cfg confgroup.Config) {
+ if _, ok := m.Modules.Lookup(cfg.Module()); !ok {
+ return
+ }
+
+ m.retryingTasks.remove(cfg)
+
+ scfg, ok := m.seenConfigs.lookup(cfg)
+ if !ok {
+ scfg = &seenConfig{cfg: cfg}
+ m.seenConfigs.add(scfg)
+ }
+
+ ecfg, ok := m.exposedConfigs.lookup(cfg)
+ if !ok {
+ scfg.status = dyncfgAccepted
+ ecfg = scfg
+ m.exposedConfigs.add(ecfg)
+ } else {
+ sp, ep := scfg.cfg.SourceTypePriority(), ecfg.cfg.SourceTypePriority()
+ if ep > sp || (ep == sp && ecfg.status == dyncfgRunning) {
+ m.retryingTasks.remove(cfg)
+ return
+ }
+ if ecfg.status == dyncfgRunning {
+ m.stopRunningJob(ecfg.cfg.FullName())
+ m.FileLock.Unlock(ecfg.cfg.FullName())
+ m.FileStatus.Remove(ecfg.cfg)
+ }
+ scfg.status = dyncfgAccepted
+ m.exposedConfigs.add(scfg) // replace existing exposed
+ ecfg = scfg
+ }
+
+ m.dyncfgJobCreate(ecfg.cfg, ecfg.status)
+
+ if isTerminal || m.PluginName == "nodyncfg" { // FIXME: quick fix of TestAgent_Run (agent_test.go)
+ m.dyncfgConfigEnable(functions.Function{Args: []string{dyncfgJobID(ecfg.cfg), "enable"}})
+ } else {
+ m.waitCfgOnOff = ecfg.cfg.FullName()
+ }
+}
+
+func (m *Manager) removeConfig(cfg confgroup.Config) {
+ m.retryingTasks.remove(cfg)
+
+ scfg, ok := m.seenConfigs.lookup(cfg)
+ if !ok {
+ return
+ }
+ m.seenConfigs.remove(cfg)
+
+ ecfg, ok := m.exposedConfigs.lookup(cfg)
+ if !ok || scfg.cfg.UID() != ecfg.cfg.UID() {
+ return
+ }
+
+ m.exposedConfigs.remove(cfg)
+ m.stopRunningJob(cfg.FullName())
+ m.FileLock.Unlock(cfg.FullName())
+ m.FileStatus.Remove(cfg)
+
+ if !isStock(cfg) || ecfg.status == dyncfgRunning {
+ m.dyncfgJobRemove(cfg)
+ }
+}
+
+func (m *Manager) runNotifyRunningJobs() {
+ tk := ticker.New(time.Second)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-m.ctx.Done():
+ return
+ case clock := <-tk.C:
+ m.runningJobs.lock()
+ m.runningJobs.forEach(func(_ string, job *module.Job) { job.Tick(clock) })
+ m.runningJobs.unlock()
+ }
+ }
+}
+
+func (m *Manager) startRunningJob(job *module.Job) {
+ m.runningJobs.lock()
+ defer m.runningJobs.unlock()
+
+ if job, ok := m.runningJobs.lookup(job.FullName()); ok {
+ job.Stop()
+ }
+
+ go job.Start()
+ m.runningJobs.add(job.FullName(), job)
+}
+
+func (m *Manager) stopRunningJob(name string) {
+ m.runningJobs.lock()
+ defer m.runningJobs.unlock()
+
+ if job, ok := m.runningJobs.lookup(name); ok {
+ job.Stop()
+ m.runningJobs.remove(name)
+ }
+}
+
+func (m *Manager) cleanup() {
+ m.FnReg.Unregister("config")
+
+ m.runningJobs.lock()
+ defer m.runningJobs.unlock()
+
+ m.runningJobs.forEach(func(key string, job *module.Job) {
+ job.Stop()
+ })
+}
+
+func (m *Manager) createCollectorJob(cfg confgroup.Config) (*module.Job, error) {
+ creator, ok := m.Modules[cfg.Module()]
+ if !ok {
+ return nil, fmt.Errorf("can not find %s module", cfg.Module())
+ }
+
+ var vnode struct {
+ guid string
+ hostname string
+ labels map[string]string
+ }
+
+ if cfg.Vnode() != "" {
+ n, ok := m.Vnodes.Lookup(cfg.Vnode())
+ if !ok {
+ return nil, fmt.Errorf("vnode '%s' is not found", cfg.Vnode())
+ }
+
+ vnode.guid = n.GUID
+ vnode.hostname = n.Hostname
+ vnode.labels = n.Labels
+ }
+
+ m.Debugf("creating %s[%s] job, config: %v", cfg.Module(), cfg.Name(), cfg)
+
+ mod := creator.Create()
+
+ if err := applyConfig(cfg, mod); err != nil {
+ return nil, err
+ }
+
+ jobCfg := module.JobConfig{
+ PluginName: m.PluginName,
+ Name: cfg.Name(),
+ ModuleName: cfg.Module(),
+ FullName: cfg.FullName(),
+ UpdateEvery: cfg.UpdateEvery(),
+ AutoDetectEvery: cfg.AutoDetectionRetry(),
+ Priority: cfg.Priority(),
+ Labels: makeLabels(cfg),
+ IsStock: cfg.SourceType() == "stock",
+ Module: mod,
+ Out: m.Out,
+ VnodeGUID: vnode.guid,
+ VnodeHostname: vnode.hostname,
+ VnodeLabels: vnode.labels,
+ }
+
+ job := module.NewJob(jobCfg)
+
+ return job, nil
+}
+
+func runRetryTask(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config) {
+ t := time.NewTimer(time.Second * time.Duration(cfg.AutoDetectionRetry()))
+ defer t.Stop()
+
+ select {
+ case <-ctx.Done():
+ case <-t.C:
+ sendConfigs(ctx, out, cfg)
+ }
+}
+
+func sendConfigs(ctx context.Context, out chan<- confgroup.Config, cfgs ...confgroup.Config) {
+ for _, cfg := range cfgs {
+ select {
+ case <-ctx.Done():
+ return
+ case out <- cfg:
+ }
+ }
+}
+
+func isStock(cfg confgroup.Config) bool {
+ return cfg.SourceType() == confgroup.TypeStock
+}
+
+func isDyncfg(cfg confgroup.Config) bool {
+ return cfg.SourceType() == confgroup.TypeDyncfg
+}
+
+func applyConfig(cfg confgroup.Config, module any) error {
+ bs, err := yaml.Marshal(cfg)
+ if err != nil {
+ return err
+ }
+ return yaml.Unmarshal(bs, module)
+}
+
+func makeLabels(cfg confgroup.Config) map[string]string {
+ labels := make(map[string]string)
+ for name, value := range cfg.Labels() {
+ n, ok1 := name.(string)
+ v, ok2 := value.(string)
+ if ok1 && ok2 {
+ labels[n] = v
+ }
+ }
+ return labels
+}
diff --git a/src/go/plugin/go.d/agent/jobmgr/manager_test.go b/src/go/plugin/go.d/agent/jobmgr/manager_test.go
new file mode 100644
index 000000000..1b55a8308
--- /dev/null
+++ b/src/go/plugin/go.d/agent/jobmgr/manager_test.go
@@ -0,0 +1,1892 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package jobmgr
+
+import (
+ "encoding/json"
+ "fmt"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/functions"
+)
+
+func TestManager_Run(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "stock => ok: add and remove": {
+ createSim: func() *runSim {
+ cfg := prepareStockCfg("success", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, cfg.Source(), cfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+
+ sendConfGroup(in, cfg.Source())
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:success:name create accepted job /collectors/jobs stock 'type=stock,module=success,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:name status running
+
+CONFIG go.d:collector:success:name delete
+`,
+ }
+ },
+ },
+ "stock => nok: add": {
+ createSim: func() *runSim {
+ cfg := prepareStockCfg("fail", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, cfg.Source(), cfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: []confgroup.Config{cfg},
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgFailed},
+ },
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs stock 'type=stock,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name delete
+`,
+ }
+ },
+ },
+ "stock => nok: add and remove": {
+ createSim: func() *runSim {
+ cfg := prepareStockCfg("fail", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, cfg.Source(), cfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+
+ sendConfGroup(in, cfg.Source())
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs stock 'type=stock,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name delete
+`,
+ }
+ },
+ },
+ "user => ok: add and remove": {
+ createSim: func() *runSim {
+ cfg := prepareUserCfg("success", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, cfg.Source(), cfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+
+ sendConfGroup(in, cfg.Source())
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:success:name create accepted job /collectors/jobs user 'type=user,module=success,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:name status running
+
+CONFIG go.d:collector:success:name delete
+ `,
+ }
+ },
+ },
+ "user => nok: add and remove": {
+ createSim: func() *runSim {
+ cfg := prepareUserCfg("fail", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, cfg.Source(), cfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+
+ sendConfGroup(in, cfg.Source())
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs user 'type=user,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name status failed
+
+CONFIG go.d:collector:fail:name delete
+ `,
+ }
+ },
+ },
+ "disc => ok: add and remove": {
+ createSim: func() *runSim {
+ cfg := prepareDiscoveredCfg("success", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, cfg.Source(), cfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+
+ sendConfGroup(in, cfg.Source())
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:success:name create accepted job /collectors/jobs discovered 'type=discovered,module=success,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:name status running
+
+CONFIG go.d:collector:success:name delete
+ `,
+ }
+ },
+ },
+ "disc => nok: add and remove": {
+ createSim: func() *runSim {
+ cfg := prepareDiscoveredCfg("fail", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, cfg.Source(), cfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+
+ sendConfGroup(in, cfg.Source())
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs discovered 'type=discovered,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name status failed
+
+CONFIG go.d:collector:fail:name delete
+ `,
+ }
+ },
+ },
+ "non-dyncfg => nok: diff src, diff name: add": {
+ createSim: func() *runSim {
+ stockCfg := prepareStockCfg("fail", "stock")
+ discCfg := prepareDiscoveredCfg("fail", "discovered")
+ userCfg := prepareUserCfg("fail", "user")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, stockCfg.Source(), stockCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(stockCfg), "enable"},
+ })
+
+ sendConfGroup(in, discCfg.Source(), discCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(discCfg), "enable"},
+ })
+
+ sendConfGroup(in, userCfg.Source(), userCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-enable",
+ Args: []string{dyncfgJobID(userCfg), "enable"},
+ })
+ },
+ wantDiscovered: []confgroup.Config{
+ stockCfg,
+ userCfg,
+ discCfg,
+ },
+ wantSeen: []seenConfig{
+ {cfg: stockCfg, status: dyncfgFailed},
+ {cfg: discCfg, status: dyncfgFailed},
+ {cfg: userCfg, status: dyncfgFailed},
+ },
+ wantExposed: []seenConfig{
+ {cfg: discCfg, status: dyncfgFailed},
+ {cfg: userCfg, status: dyncfgFailed},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:fail:stock create accepted job /collectors/jobs stock 'type=stock,module=fail,job=stock' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:stock delete
+
+CONFIG go.d:collector:fail:discovered create accepted job /collectors/jobs discovered 'type=discovered,module=fail,job=discovered' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:discovered status failed
+
+CONFIG go.d:collector:fail:user create accepted job /collectors/jobs user 'type=user,module=fail,job=user' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 3-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:user status failed
+ `,
+ }
+ },
+ },
+ "non-dyncfg => nok: diff src,src prio asc,same name: add": {
+ createSim: func() *runSim {
+ stockCfg := prepareStockCfg("fail", "name")
+ discCfg := prepareDiscoveredCfg("fail", "name")
+ userCfg := prepareUserCfg("fail", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, stockCfg.Source(), stockCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(stockCfg), "enable"},
+ })
+
+ sendConfGroup(in, discCfg.Source(), discCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(discCfg), "enable"},
+ })
+
+ sendConfGroup(in, userCfg.Source(), userCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-enable",
+ Args: []string{dyncfgJobID(userCfg), "enable"},
+ })
+ },
+ wantDiscovered: []confgroup.Config{
+ stockCfg,
+ userCfg,
+ discCfg,
+ },
+ wantSeen: []seenConfig{
+ {cfg: stockCfg, status: dyncfgFailed},
+ {cfg: discCfg, status: dyncfgFailed},
+ {cfg: userCfg, status: dyncfgFailed},
+ },
+ wantExposed: []seenConfig{
+ {cfg: userCfg, status: dyncfgFailed},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs stock 'type=stock,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name delete
+
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs discovered 'type=discovered,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name status failed
+
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs user 'type=user,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 3-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name status failed
+ `,
+ }
+ },
+ },
+ "non-dyncfg => nok: diff src,src prio asc,same name: add and remove": {
+ createSim: func() *runSim {
+ stockCfg := prepareStockCfg("fail", "name")
+ discCfg := prepareDiscoveredCfg("fail", "name")
+ userCfg := prepareUserCfg("fail", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, stockCfg.Source(), stockCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(stockCfg), "enable"},
+ })
+
+ sendConfGroup(in, discCfg.Source(), discCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(discCfg), "enable"},
+ })
+
+ sendConfGroup(in, userCfg.Source(), userCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-enable",
+ Args: []string{dyncfgJobID(userCfg), "enable"},
+ })
+
+ sendConfGroup(in, stockCfg.Source())
+ sendConfGroup(in, discCfg.Source())
+ sendConfGroup(in, userCfg.Source())
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs stock 'type=stock,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name delete
+
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs discovered 'type=discovered,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name status failed
+
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs user 'type=user,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 3-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name status failed
+
+CONFIG go.d:collector:fail:name delete
+ `,
+ }
+ },
+ },
+ "non-dyncfg => nok: diff src,src prio desc,same name: add": {
+ createSim: func() *runSim {
+ userCfg := prepareUserCfg("fail", "name")
+ discCfg := prepareDiscoveredCfg("fail", "name")
+ stockCfg := prepareStockCfg("fail", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, userCfg.Source(), userCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(userCfg), "enable"},
+ })
+
+ sendConfGroup(in, discCfg.Source(), discCfg)
+ sendConfGroup(in, stockCfg.Source(), stockCfg)
+ },
+ wantDiscovered: []confgroup.Config{
+ stockCfg,
+ userCfg,
+ discCfg,
+ },
+ wantSeen: []seenConfig{
+ {cfg: userCfg, status: dyncfgFailed},
+ {cfg: discCfg},
+ {cfg: stockCfg},
+ },
+ wantExposed: []seenConfig{
+ {cfg: userCfg, status: dyncfgFailed},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs user 'type=user,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name status failed
+ `,
+ }
+ },
+ },
+ "non-dyncfg => nok: diff src,src prio desc,same name: add and remove": {
+ createSim: func() *runSim {
+ userCfg := prepareUserCfg("fail", "name")
+ discCfg := prepareDiscoveredCfg("fail", "name")
+ stockCfg := prepareStockCfg("fail", "name")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, userCfg.Source(), userCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(userCfg), "enable"},
+ })
+
+ sendConfGroup(in, discCfg.Source(), discCfg)
+ sendConfGroup(in, stockCfg.Source(), stockCfg)
+
+ sendConfGroup(in, userCfg.Source())
+ sendConfGroup(in, discCfg.Source())
+ sendConfGroup(in, stockCfg.Source())
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:fail:name create accepted job /collectors/jobs user 'type=user,module=fail,job=name' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:name status failed
+
+CONFIG go.d:collector:fail:name delete
+ `,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Get(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[get] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-get",
+ Args: []string{dyncfgJobID(cfg), "get"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-get 404 application/json
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[get] existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "1").
+ Set("option_int", 1)
+ bs, _ := json.Marshal(cfg)
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: bs,
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-get",
+ Args: []string{dyncfgJobID(cfg), "get"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-get 200 application/json
+{"option_str":"1","option_int":1}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Userconfig(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[userconfig] existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-userconfig",
+ Args: []string{dyncfgJobID(cfg), "userconfig"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-userconfig 200 application/yaml
+jobs:
+- name: test
+ option_one: one
+ option_two: 2
+
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[userconfig] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success!", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-userconfig",
+ Args: []string{dyncfgJobID(cfg), "userconfig"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-userconfig 404 application/json
+{"status":404,"message":"The specified module 'success!' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Add(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[add] dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+`,
+ }
+ },
+ },
+ "[add] dyncfg:nok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=fail,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+`,
+ }
+ },
+ },
+ "[add] dyncfg:ok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Enable(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[enable] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-enable 404 application/json
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[enable] dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantRunning: []string{cfg.FullName()},
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+`,
+ }
+ },
+ },
+ "[enable] dyncfg:ok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantRunning: []string{cfg.FullName()},
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+
+FUNCTION_RESULT_BEGIN 3-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+`,
+ }
+ },
+ },
+ "[enable] dyncfg:nok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgFailed},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgFailed},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=fail,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test status failed
+`,
+ }
+ },
+ },
+ "[enable] dyncfg:nok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgFailed},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgFailed},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=fail,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test status failed
+
+FUNCTION_RESULT_BEGIN 3-enable 200 application/json
+{"status":200,"message":"Job enable failed: mock failed init."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test status failed
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Disable(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[disable] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-disable 404 application/json
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[disable] dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status disabled
+`,
+ }
+ },
+ },
+ "[disable] dyncfg:ok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status disabled
+
+FUNCTION_RESULT_BEGIN 3-disable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status disabled
+`,
+ }
+ },
+ },
+ "[disable] dyncfg:nok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=fail,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test status disabled
+`,
+ }
+ },
+ },
+ "[disable] dyncfg:nok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=fail,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test status disabled
+
+FUNCTION_RESULT_BEGIN 3-disable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:test status disabled
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Restart(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[restart] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-restart 404 application/json
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[restart] not enabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-restart 405 application/json
+{"status":405,"message":"Restarting data collection job is not allowed in 'accepted' state."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status accepted
+`,
+ }
+ },
+ },
+ "[restart] enabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantRunning: []string{cfg.FullName()},
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+
+FUNCTION_RESULT_BEGIN 3-restart 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+`,
+ }
+ },
+ },
+ "[restart] disabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status disabled
+
+FUNCTION_RESULT_BEGIN 3-restart 405 application/json
+{"status":405,"message":"Restarting data collection job is not allowed in 'disabled' state."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status disabled
+`,
+ }
+ },
+ },
+ "[restart] enabled dyncfg:ok multiple times": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "4-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantRunning: []string{cfg.FullName()},
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+
+FUNCTION_RESULT_BEGIN 3-restart 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+
+FUNCTION_RESULT_BEGIN 4-restart 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Remove(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[remove] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-remove",
+ Args: []string{dyncfgJobID(cfg), "remove"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-remove 404 application/json
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[remove] non-dyncfg": {
+ createSim: func() *runSim {
+ stockCfg := prepareStockCfg("success", "stock")
+ userCfg := prepareUserCfg("success", "user")
+ discCfg := prepareDiscoveredCfg("success", "discovered")
+
+ return &runSim{
+ do: func(mgr *Manager, in chan []*confgroup.Group) {
+ sendConfGroup(in, stockCfg.Source(), stockCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(stockCfg), "enable"},
+ })
+
+ sendConfGroup(in, userCfg.Source(), userCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(userCfg), "enable"},
+ })
+
+ sendConfGroup(in, discCfg.Source(), discCfg)
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-enable",
+ Args: []string{dyncfgJobID(discCfg), "enable"},
+ })
+
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-remove",
+ Args: []string{dyncfgJobID(stockCfg), "remove"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-remove",
+ Args: []string{dyncfgJobID(userCfg), "remove"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-remove",
+ Args: []string{dyncfgJobID(discCfg), "remove"},
+ })
+ },
+ wantDiscovered: []confgroup.Config{
+ stockCfg,
+ userCfg,
+ discCfg,
+ },
+ wantSeen: []seenConfig{
+ {cfg: stockCfg, status: dyncfgRunning},
+ {cfg: userCfg, status: dyncfgRunning},
+ {cfg: discCfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: stockCfg, status: dyncfgRunning},
+ {cfg: userCfg, status: dyncfgRunning},
+ {cfg: discCfg, status: dyncfgRunning},
+ },
+ wantRunning: []string{stockCfg.FullName(), userCfg.FullName(), discCfg.FullName()},
+ wantDyncfg: `
+CONFIG go.d:collector:success:stock create accepted job /collectors/jobs stock 'type=stock,module=success,job=stock' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:stock status running
+
+CONFIG go.d:collector:success:user create accepted job /collectors/jobs user 'type=user,module=success,job=user' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:user status running
+
+CONFIG go.d:collector:success:discovered create accepted job /collectors/jobs discovered 'type=discovered,module=success,job=discovered' 'schema get enable disable update restart test userconfig' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 3-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:discovered status running
+
+FUNCTION_RESULT_BEGIN 1-remove 405 application/json
+{"status":405,"message":"Removing jobs of type 'stock' is not supported. Only 'dyncfg' jobs can be removed."}
+FUNCTION_RESULT_END
+
+FUNCTION_RESULT_BEGIN 2-remove 405 application/json
+{"status":405,"message":"Removing jobs of type 'user' is not supported. Only 'dyncfg' jobs can be removed."}
+FUNCTION_RESULT_END
+
+FUNCTION_RESULT_BEGIN 3-remove 405 application/json
+{"status":405,"message":"Removing jobs of type 'discovered' is not supported. Only 'dyncfg' jobs can be removed."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[remove] not enabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-remove",
+ Args: []string{dyncfgJobID(cfg), "remove"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-remove 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test delete
+`,
+ }
+ },
+ },
+ "[remove] enabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-remove",
+ Args: []string{dyncfgJobID(cfg), "remove"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+
+FUNCTION_RESULT_BEGIN 3-remove 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test delete
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Update(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[update] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-update",
+ Args: []string{dyncfgJobID(cfg), "update"},
+ Payload: []byte("{}"),
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-update 404 application/json
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[update] enabled dyncfg:ok with dyncfg:ok": {
+ createSim: func() *runSim {
+ origCfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "1")
+ updCfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "2")
+ origBs, _ := json.Marshal(origCfg)
+ updBs, _ := json.Marshal(updCfg)
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(origCfg.Module()), "add", origCfg.Name()},
+ Payload: origBs,
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(origCfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-update",
+ Args: []string{dyncfgJobID(origCfg), "update"},
+ Payload: updBs,
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: updCfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: updCfg, status: dyncfgRunning},
+ },
+ wantRunning: []string{updCfg.FullName()},
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+
+FUNCTION_RESULT_BEGIN 3-update 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status running
+`,
+ }
+ },
+ },
+ "[update] disabled dyncfg:ok with dyncfg:ok": {
+ createSim: func() *runSim {
+ origCfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "1")
+ updCfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "2")
+ origBs, _ := json.Marshal(origCfg)
+ updBs, _ := json.Marshal(updCfg)
+
+ return &runSim{
+ do: func(mgr *Manager, _ chan []*confgroup.Group) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(origCfg.Module()), "add", origCfg.Name()},
+ Payload: origBs,
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(origCfg), "disable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-update",
+ Args: []string{dyncfgJobID(origCfg), "update"},
+ Payload: updBs,
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: updCfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: updCfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+
+FUNCTION_RESULT_BEGIN 1-add 202 application/json
+{"status":202,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test create accepted job /collectors/jobs dyncfg 'type=dyncfg,module=success,job=test' 'schema get enable disable update restart test userconfig remove' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status disabled
+
+FUNCTION_RESULT_BEGIN 3-update 200 application/json
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:test status disabled
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func sendConfGroup(in chan []*confgroup.Group, src string, configs ...confgroup.Config) {
+ in <- prepareCfgGroups(src, configs...)
+ in <- prepareCfgGroups("_")
+}
+
+func prepareCfgGroups(src string, configs ...confgroup.Config) []*confgroup.Group {
+ return []*confgroup.Group{{Configs: configs, Source: src}}
+}
+
+func prepareStockCfg(module, job string) confgroup.Config {
+ return confgroup.Config{}.
+ SetSourceType(confgroup.TypeStock).
+ SetProvider("test").
+ SetSource(fmt.Sprintf("type=stock,module=%s,job=%s", module, job)).
+ SetModule(module).
+ SetName(job)
+}
+
+func prepareUserCfg(module, job string) confgroup.Config {
+ return confgroup.Config{}.
+ SetSourceType(confgroup.TypeUser).
+ SetProvider("test").
+ SetSource(fmt.Sprintf("type=user,module=%s,job=%s", module, job)).
+ SetModule(module).
+ SetName(job)
+}
+
+func prepareDiscoveredCfg(module, job string) confgroup.Config {
+ return confgroup.Config{}.
+ SetSourceType(confgroup.TypeDiscovered).
+ SetProvider("test").
+ SetSource(fmt.Sprintf("type=discovered,module=%s,job=%s", module, job)).
+ SetModule(module).
+ SetName(job)
+}
+
+func prepareDyncfgCfg(module, job string) confgroup.Config {
+ return confgroup.Config{}.
+ SetSourceType(confgroup.TypeDyncfg).
+ SetProvider("dyncfg").
+ SetSource(fmt.Sprintf("type=dyncfg,module=%s,job=%s", module, job)).
+ SetModule(module).
+ SetName(job)
+}
diff --git a/src/go/plugin/go.d/agent/jobmgr/noop.go b/src/go/plugin/go.d/agent/jobmgr/noop.go
new file mode 100644
index 000000000..adeacf906
--- /dev/null
+++ b/src/go/plugin/go.d/agent/jobmgr/noop.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package jobmgr
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/functions"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes"
+)
+
+type noop struct{}
+
+func (n noop) Lock(string) (bool, error) { return true, nil }
+func (n noop) Unlock(string) {}
+func (n noop) Save(confgroup.Config, string) {}
+func (n noop) Remove(confgroup.Config) {}
+func (n noop) Contains(confgroup.Config, ...string) bool { return false }
+func (n noop) Lookup(string) (*vnodes.VirtualNode, bool) { return nil, false }
+func (n noop) Register(name string, reg func(functions.Function)) {}
+func (n noop) Unregister(name string) {}
diff --git a/src/go/plugin/go.d/agent/jobmgr/sim_test.go b/src/go/plugin/go.d/agent/jobmgr/sim_test.go
new file mode 100644
index 000000000..9fe67175a
--- /dev/null
+++ b/src/go/plugin/go.d/agent/jobmgr/sim_test.go
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package jobmgr
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/safewriter"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type runSim struct {
+ do func(mgr *Manager, in chan []*confgroup.Group)
+
+ wantDiscovered []confgroup.Config
+ wantSeen []seenConfig
+ wantExposed []seenConfig
+ wantRunning []string
+ wantDyncfg string
+}
+
+func (s *runSim) run(t *testing.T) {
+ t.Helper()
+
+ require.NotNil(t, s.do, "s.do is nil")
+
+ var buf bytes.Buffer
+ mgr := New()
+ mgr.api = netdataapi.New(safewriter.New(&buf))
+ mgr.Modules = prepareMockRegistry()
+
+ done := make(chan struct{})
+ grpCh := make(chan []*confgroup.Group)
+ ctx, cancel := context.WithCancel(context.Background())
+
+ go func() { defer close(done); defer close(grpCh); mgr.Run(ctx, grpCh) }()
+
+ timeout := time.Second * 5
+
+ select {
+ case <-mgr.started:
+ case <-time.After(timeout):
+ t.Errorf("failed to start work in %s", timeout)
+ }
+
+ s.do(mgr, grpCh)
+ cancel()
+
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ t.Errorf("failed to finish work in %s", timeout)
+ }
+
+ var lines []string
+ for _, s := range strings.Split(buf.String(), "\n") {
+ if strings.HasPrefix(s, "CONFIG") && strings.Contains(s, " template ") {
+ continue
+ }
+ if strings.HasPrefix(s, "FUNCTION_RESULT_BEGIN") {
+ parts := strings.Fields(s)
+ s = strings.Join(parts[:len(parts)-1], " ") // remove timestamp
+ }
+ lines = append(lines, s)
+ }
+ wantDyncfg, gotDyncfg := strings.TrimSpace(s.wantDyncfg), strings.TrimSpace(strings.Join(lines, "\n"))
+
+ //fmt.Println(gotDyncfg)
+
+ assert.Equal(t, wantDyncfg, gotDyncfg, "dyncfg commands")
+
+ var n int
+ for _, cfgs := range mgr.discoveredConfigs.items {
+ n += len(cfgs)
+ }
+
+ wantLen, gotLen := len(s.wantDiscovered), n
+ require.Equalf(t, wantLen, gotLen, "discoveredConfigs: different len (want %d got %d)", wantLen, gotLen)
+
+ for _, cfg := range s.wantDiscovered {
+ cfgs, ok := mgr.discoveredConfigs.items[cfg.Source()]
+ require.Truef(t, ok, "discoveredConfigs: source %s is not found", cfg.Source())
+ _, ok = cfgs[cfg.Hash()]
+ require.Truef(t, ok, "discoveredConfigs: source %s config %d is not found", cfg.Source(), cfg.Hash())
+ }
+
+ wantLen, gotLen = len(s.wantSeen), len(mgr.seenConfigs.items)
+ require.Equalf(t, wantLen, gotLen, "seenConfigs: different len (want %d got %d)", wantLen, gotLen)
+
+ for _, scfg := range s.wantSeen {
+ v, ok := mgr.seenConfigs.lookup(scfg.cfg)
+ require.Truef(t, ok, "seenConfigs: config '%s' is not found", scfg.cfg.UID())
+ require.Truef(t, scfg.status == v.status, "seenConfigs: wrong status, want %s got %s", scfg.status, v.status)
+ }
+
+ wantLen, gotLen = len(s.wantExposed), len(mgr.exposedConfigs.items)
+ require.Equalf(t, wantLen, gotLen, "exposedConfigs: different len (want %d got %d)", wantLen, gotLen)
+
+ for _, scfg := range s.wantExposed {
+ v, ok := mgr.exposedConfigs.lookup(scfg.cfg)
+ require.Truef(t, ok && scfg.cfg.UID() == v.cfg.UID(), "exposedConfigs: config '%s' is not found", scfg.cfg.UID())
+ require.Truef(t, scfg.status == v.status, "exposedConfigs: wrong status, want %s got %s", scfg.status, v.status)
+ }
+
+ wantLen, gotLen = len(s.wantRunning), len(mgr.runningJobs.items)
+ require.Equalf(t, wantLen, gotLen, "runningJobs: different len (want %d got %d)", wantLen, gotLen)
+ for _, name := range s.wantRunning {
+ _, ok := mgr.runningJobs.lookup(name)
+ require.Truef(t, ok, "runningJobs: job '%s' is not found", name)
+ }
+}
+
+func prepareMockRegistry() module.Registry {
+ reg := module.Registry{}
+ type config struct {
+ OptionOne string `yaml:"option_one" json:"option_one"`
+ OptionTwo int64 `yaml:"option_two" json:"option_two"`
+ }
+
+ reg.Register("success", module.Creator{
+ JobConfigSchema: module.MockConfigSchema,
+ Create: func() module.Module {
+ return &module.MockModule{
+ ChartsFunc: func() *module.Charts {
+ return &module.Charts{&module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}}}
+ },
+ CollectFunc: func() map[string]int64 { return map[string]int64{"id1": 1} },
+ }
+ },
+ Config: func() any {
+ return &config{OptionOne: "one", OptionTwo: 2}
+ },
+ })
+ reg.Register("fail", module.Creator{
+ Create: func() module.Module {
+ return &module.MockModule{
+ InitFunc: func() error { return errors.New("mock failed init") },
+ }
+ },
+ })
+
+ return reg
+}
diff --git a/src/go/plugin/go.d/agent/module/charts.go b/src/go/plugin/go.d/agent/module/charts.go
new file mode 100644
index 000000000..b60b3bac1
--- /dev/null
+++ b/src/go/plugin/go.d/agent/module/charts.go
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package module
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+ "unicode"
+
+ "github.com/stretchr/testify/assert"
+)
+
+type (
+ ChartType string
+ DimAlgo string
+)
+
+const (
+ // Line chart type.
+ Line ChartType = "line"
+ // Area chart type.
+ Area ChartType = "area"
+ // Stacked chart type.
+ Stacked ChartType = "stacked"
+
+ // Absolute dimension algorithm.
+ // The value is to drawn as-is (interpolated to second boundary).
+ Absolute DimAlgo = "absolute"
+ // Incremental dimension algorithm.
+ // The value increases over time, the difference from the last value is presented in the chart,
+ // the server interpolates the value and calculates a per second figure.
+ Incremental DimAlgo = "incremental"
+ // PercentOfAbsolute dimension algorithm.
+ // The percent of this value compared to the total of all dimensions.
+ PercentOfAbsolute DimAlgo = "percentage-of-absolute-row"
+ // PercentOfIncremental dimension algorithm.
+ // The percent of this value compared to the incremental total of all dimensions
+ PercentOfIncremental DimAlgo = "percentage-of-incremental-row"
+)
+
+const (
+ // Not documented.
+ // https://github.com/netdata/netdata/blob/cc2586de697702f86a3c34e60e23652dd4ddcb42/database/rrd.h#L204
+
+ LabelSourceAuto = 1 << 0
+ LabelSourceConf = 1 << 1
+ LabelSourceK8s = 1 << 2
+)
+
+func (d DimAlgo) String() string {
+ switch d {
+ case Absolute, Incremental, PercentOfAbsolute, PercentOfIncremental:
+ return string(d)
+ }
+ return string(Absolute)
+}
+
+func (c ChartType) String() string {
+ switch c {
+ case Line, Area, Stacked:
+ return string(c)
+ }
+ return string(Line)
+}
+
+type (
+ // Charts is a collection of Charts.
+ Charts []*Chart
+
+ // Opts represents chart options.
+ Opts struct {
+ Obsolete bool
+ Detail bool
+ StoreFirst bool
+ Hidden bool
+ }
+
+ // Chart represents a chart.
+ // For the full description please visit https://docs.netdata.cloud/collectors/plugins.d/#chart
+ Chart struct {
+ // typeID is the unique identification of the chart, if not specified,
+ // the orchestrator will use job full name + chart ID as typeID (default behaviour).
+ typ string
+ id string
+
+ OverModule string
+ IDSep bool
+ ID string
+ OverID string
+ Title string
+ Units string
+ Fam string
+ Ctx string
+ Type ChartType
+ Priority int
+ Opts
+
+ Labels []Label
+ Dims Dims
+ Vars Vars
+
+ Retries int
+
+ remove bool
+ // created flag is used to indicate whether the chart needs to be created by the orchestrator.
+ created bool
+ // updated flag is used to indicate whether the chart was updated on last data collection interval.
+ updated bool
+
+ // ignore flag is used to indicate that the chart shouldn't be sent to the netdata plugins.d
+ ignore bool
+ }
+
+ Label struct {
+ Key string
+ Value string
+ Source int
+ }
+
+ // DimOpts represents dimension options.
+ DimOpts struct {
+ Obsolete bool
+ Hidden bool
+ NoReset bool
+ NoOverflow bool
+ }
+
+ // Dim represents a chart dimension.
+ // For detailed description please visit https://docs.netdata.cloud/collectors/plugins.d/#dimension.
+ Dim struct {
+ ID string
+ Name string
+ Algo DimAlgo
+ Mul int
+ Div int
+ DimOpts
+
+ remove bool
+ }
+
+ // Var represents a chart variable.
+ // For detailed description please visit https://docs.netdata.cloud/collectors/plugins.d/#variable
+ Var struct {
+ ID string
+ Name string
+ Value int64
+ }
+
+ // Dims is a collection of dims.
+ Dims []*Dim
+ // Vars is a collection of vars.
+ Vars []*Var
+)
+
+func (o Opts) String() string {
+ var b strings.Builder
+ if o.Detail {
+ b.WriteString(" detail")
+ }
+ if o.Hidden {
+ b.WriteString(" hidden")
+ }
+ if o.Obsolete {
+ b.WriteString(" obsolete")
+ }
+ if o.StoreFirst {
+ b.WriteString(" store_first")
+ }
+
+ if len(b.String()) == 0 {
+ return ""
+ }
+ return b.String()[1:]
+}
+
+func (o DimOpts) String() string {
+ var b strings.Builder
+ if o.Hidden {
+ b.WriteString(" hidden")
+ }
+ if o.NoOverflow {
+ b.WriteString(" nooverflow")
+ }
+ if o.NoReset {
+ b.WriteString(" noreset")
+ }
+ if o.Obsolete {
+ b.WriteString(" obsolete")
+ }
+
+ if len(b.String()) == 0 {
+ return ""
+ }
+ return b.String()[1:]
+}
+
+// Add adds (appends) a variable number of Charts.
+func (c *Charts) Add(charts ...*Chart) error {
+ for _, chart := range charts {
+ err := checkChart(chart)
+ if err != nil {
+ return fmt.Errorf("error on adding chart '%s' : %s", chart.ID, err)
+ }
+ if chart := c.Get(chart.ID); chart != nil && !chart.remove {
+ return fmt.Errorf("error on adding chart : '%s' is already in charts", chart.ID)
+ }
+ *c = append(*c, chart)
+ }
+
+ return nil
+}
+
+// Get returns the chart by ID.
+func (c Charts) Get(chartID string) *Chart {
+ idx := c.index(chartID)
+ if idx == -1 {
+ return nil
+ }
+ return c[idx]
+}
+
+// Has returns true if ChartsFunc contain the chart with the given ID, false otherwise.
+func (c Charts) Has(chartID string) bool {
+ return c.index(chartID) != -1
+}
+
+// Remove removes the chart from Charts by ID.
+// Avoid to use it in runtime.
+func (c *Charts) Remove(chartID string) error {
+ idx := c.index(chartID)
+ if idx == -1 {
+ return fmt.Errorf("error on removing chart : '%s' is not in charts", chartID)
+ }
+ copy((*c)[idx:], (*c)[idx+1:])
+ (*c)[len(*c)-1] = nil
+ *c = (*c)[:len(*c)-1]
+ return nil
+}
+
+// Copy returns a deep copy of ChartsFunc.
+func (c Charts) Copy() *Charts {
+ charts := Charts{}
+ for idx := range c {
+ charts = append(charts, c[idx].Copy())
+ }
+ return &charts
+}
+
+func (c Charts) index(chartID string) int {
+ for idx := range c {
+ if c[idx].ID == chartID {
+ return idx
+ }
+ }
+ return -1
+}
+
+// MarkNotCreated changes 'created' chart flag to false.
+// Use it to add dimension in runtime.
+func (c *Chart) MarkNotCreated() {
+ c.created = false
+}
+
+// MarkRemove sets 'remove' flag and Obsolete option to true.
+// Use it to remove chart in runtime.
+func (c *Chart) MarkRemove() {
+ c.Obsolete = true
+ c.remove = true
+}
+
+// MarkDimRemove sets 'remove' flag, Obsolete and optionally Hidden options to true.
+// Use it to remove dimension in runtime.
+func (c *Chart) MarkDimRemove(dimID string, hide bool) error {
+ if !c.HasDim(dimID) {
+ return fmt.Errorf("chart '%s' has no '%s' dimension", c.ID, dimID)
+ }
+ dim := c.GetDim(dimID)
+ dim.Obsolete = true
+ if hide {
+ dim.Hidden = true
+ }
+ dim.remove = true
+ return nil
+}
+
+// AddDim adds new dimension to the chart dimensions.
+func (c *Chart) AddDim(newDim *Dim) error {
+ err := checkDim(newDim)
+ if err != nil {
+ return fmt.Errorf("error on adding dim to chart '%s' : %s", c.ID, err)
+ }
+ if c.HasDim(newDim.ID) {
+ return fmt.Errorf("error on adding dim : '%s' is already in chart '%s' dims", newDim.ID, c.ID)
+ }
+ c.Dims = append(c.Dims, newDim)
+
+ return nil
+}
+
+// AddVar adds new variable to the chart variables.
+func (c *Chart) AddVar(newVar *Var) error {
+ err := checkVar(newVar)
+ if err != nil {
+ return fmt.Errorf("error on adding var to chart '%s' : %s", c.ID, err)
+ }
+ if c.indexVar(newVar.ID) != -1 {
+ return fmt.Errorf("error on adding var : '%s' is already in chart '%s' vars", newVar.ID, c.ID)
+ }
+ c.Vars = append(c.Vars, newVar)
+
+ return nil
+}
+
+// GetDim returns dimension by ID.
+func (c *Chart) GetDim(dimID string) *Dim {
+ idx := c.indexDim(dimID)
+ if idx == -1 {
+ return nil
+ }
+ return c.Dims[idx]
+}
+
+// RemoveDim removes dimension by ID.
+// Avoid to use it in runtime.
+func (c *Chart) RemoveDim(dimID string) error {
+ idx := c.indexDim(dimID)
+ if idx == -1 {
+ return fmt.Errorf("error on removing dim : '%s' isn't in chart '%s'", dimID, c.ID)
+ }
+ c.Dims = append(c.Dims[:idx], c.Dims[idx+1:]...)
+
+ return nil
+}
+
+// HasDim returns true if the chart contains dimension with the given ID, false otherwise.
+func (c Chart) HasDim(dimID string) bool {
+ return c.indexDim(dimID) != -1
+}
+
+// Copy returns a deep copy of the chart.
+func (c Chart) Copy() *Chart {
+ chart := c
+ chart.Dims = Dims{}
+ chart.Vars = Vars{}
+
+ for idx := range c.Dims {
+ chart.Dims = append(chart.Dims, c.Dims[idx].copy())
+ }
+ for idx := range c.Vars {
+ chart.Vars = append(chart.Vars, c.Vars[idx].copy())
+ }
+
+ return &chart
+}
+
+func (c Chart) indexDim(dimID string) int {
+ for idx := range c.Dims {
+ if c.Dims[idx].ID == dimID {
+ return idx
+ }
+ }
+ return -1
+}
+
+func (c Chart) indexVar(varID string) int {
+ for idx := range c.Vars {
+ if c.Vars[idx].ID == varID {
+ return idx
+ }
+ }
+ return -1
+}
+
+func (d Dim) copy() *Dim {
+ return &d
+}
+
+func (v Var) copy() *Var {
+ return &v
+}
+
+func checkCharts(charts ...*Chart) error {
+ for _, chart := range charts {
+ err := checkChart(chart)
+ if err != nil {
+ return fmt.Errorf("chart '%s' : %v", chart.ID, err)
+ }
+ }
+ return nil
+}
+
+func checkChart(chart *Chart) error {
+ if chart.ID == "" {
+ return errors.New("empty ID")
+ }
+
+ if chart.Title == "" {
+ return errors.New("empty Title")
+ }
+
+ if chart.Units == "" {
+ return errors.New("empty Units")
+ }
+
+ if id := checkID(chart.ID); id != -1 {
+ return fmt.Errorf("unacceptable symbol in ID : '%c'", id)
+ }
+
+ set := make(map[string]bool)
+
+ for _, d := range chart.Dims {
+ err := checkDim(d)
+ if err != nil {
+ return err
+ }
+ if set[d.ID] {
+ return fmt.Errorf("duplicate dim '%s'", d.ID)
+ }
+ set[d.ID] = true
+ }
+
+ set = make(map[string]bool)
+
+ for _, v := range chart.Vars {
+ if err := checkVar(v); err != nil {
+ return err
+ }
+ if set[v.ID] {
+ return fmt.Errorf("duplicate var '%s'", v.ID)
+ }
+ set[v.ID] = true
+ }
+ return nil
+}
+
+func checkDim(d *Dim) error {
+ if d.ID == "" {
+ return errors.New("empty dim ID")
+ }
+ if id := checkID(d.ID); id != -1 && (d.Name == "" || checkID(d.Name) != -1) {
+ return fmt.Errorf("unacceptable symbol in dim ID '%s' : '%c'", d.ID, id)
+ }
+ return nil
+}
+
+func checkVar(v *Var) error {
+ if v.ID == "" {
+ return errors.New("empty var ID")
+ }
+ if id := checkID(v.ID); id != -1 {
+ return fmt.Errorf("unacceptable symbol in var ID '%s' : '%c'", v.ID, id)
+ }
+ return nil
+}
+
+func checkID(id string) int {
+ for _, r := range id {
+ if unicode.IsSpace(r) {
+ return int(r)
+ }
+ }
+ return -1
+}
+
+func TestMetricsHasAllChartsDims(t *testing.T, charts *Charts, mx map[string]int64) {
+ for _, chart := range *charts {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "missing data for dimension '%s' in chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "missing data for variable '%s' in chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func TestMetricsHasAllChartsDimsSkip(t *testing.T, charts *Charts, mx map[string]int64, skip func(chart *Chart) bool) {
+ for _, chart := range *charts {
+ if chart.Obsolete || (skip != nil && skip(chart)) {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "missing data for dimension '%s' in chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "missing data for variable '%s' in chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/agent/module/charts_test.go b/src/go/plugin/go.d/agent/module/charts_test.go
new file mode 100644
index 000000000..b0dcf806f
--- /dev/null
+++ b/src/go/plugin/go.d/agent/module/charts_test.go
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package module
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func createTestChart(id string) *Chart {
+ return &Chart{
+ ID: id,
+ Title: "Title",
+ Units: "units",
+ Fam: "family",
+ Ctx: "context",
+ Type: Line,
+ Dims: Dims{
+ {ID: "dim1", Algo: Absolute},
+ },
+ Vars: Vars{
+ {ID: "var1", Value: 1},
+ },
+ }
+}
+
+func TestDimAlgo_String(t *testing.T) {
+ cases := []struct {
+ expected string
+ actual fmt.Stringer
+ }{
+ {"absolute", Absolute},
+ {"incremental", Incremental},
+ {"percentage-of-absolute-row", PercentOfAbsolute},
+ {"percentage-of-incremental-row", PercentOfIncremental},
+ {"absolute", DimAlgo("wrong")},
+ }
+
+ for _, v := range cases {
+ assert.Equal(t, v.expected, v.actual.String())
+ }
+}
+
+func TestChartType_String(t *testing.T) {
+ cases := []struct {
+ expected string
+ actual fmt.Stringer
+ }{
+ {"line", Line},
+ {"area", Area},
+ {"stacked", Stacked},
+ {"line", ChartType("wrong")},
+ }
+
+ for _, v := range cases {
+ assert.Equal(t, v.expected, v.actual.String())
+ }
+}
+
+func TestOpts_String(t *testing.T) {
+ cases := []struct {
+ expected string
+ actual fmt.Stringer
+ }{
+ {"", Opts{}},
+ {
+ "detail hidden obsolete store_first",
+ Opts{Detail: true, Hidden: true, Obsolete: true, StoreFirst: true},
+ },
+ {
+ "detail hidden obsolete store_first",
+ Opts{Detail: true, Hidden: true, Obsolete: true, StoreFirst: true},
+ },
+ }
+
+ for _, v := range cases {
+ assert.Equal(t, v.expected, v.actual.String())
+ }
+}
+
+func TestDimOpts_String(t *testing.T) {
+ cases := []struct {
+ expected string
+ actual fmt.Stringer
+ }{
+ {"", DimOpts{}},
+ {
+ "hidden nooverflow noreset obsolete",
+ DimOpts{Hidden: true, NoOverflow: true, NoReset: true, Obsolete: true},
+ },
+ {
+ "hidden obsolete",
+ DimOpts{Hidden: true, NoOverflow: false, NoReset: false, Obsolete: true},
+ },
+ }
+
+ for _, v := range cases {
+ assert.Equal(t, v.expected, v.actual.String())
+ }
+}
+
+func TestCharts_Copy(t *testing.T) {
+ orig := &Charts{
+ createTestChart("1"),
+ createTestChart("2"),
+ }
+ copied := orig.Copy()
+
+ require.False(t, orig == copied, "Charts copy points to the same address")
+ require.Len(t, *orig, len(*copied))
+
+ for idx := range *orig {
+ compareCharts(t, (*orig)[idx], (*copied)[idx])
+ }
+}
+
+func TestChart_Copy(t *testing.T) {
+ orig := createTestChart("1")
+
+ compareCharts(t, orig, orig.Copy())
+}
+
+func TestCharts_Add(t *testing.T) {
+ charts := Charts{}
+ chart1 := createTestChart("1")
+ chart2 := createTestChart("2")
+ chart3 := createTestChart("")
+
+ // OK case
+ assert.NoError(t, charts.Add(
+ chart1,
+ chart2,
+ ))
+ assert.Len(t, charts, 2)
+
+ // NG case
+ assert.Error(t, charts.Add(
+ chart3,
+ chart1,
+ chart2,
+ ))
+ assert.Len(t, charts, 2)
+
+ assert.True(t, charts[0] == chart1)
+ assert.True(t, charts[1] == chart2)
+}
+
+func TestCharts_Add_SameID(t *testing.T) {
+ charts := Charts{}
+ chart1 := createTestChart("1")
+ chart2 := createTestChart("1")
+
+ assert.NoError(t, charts.Add(chart1))
+ assert.Error(t, charts.Add(chart2))
+ assert.Len(t, charts, 1)
+
+ charts = Charts{}
+ chart1 = createTestChart("1")
+ chart2 = createTestChart("1")
+
+ assert.NoError(t, charts.Add(chart1))
+ chart1.MarkRemove()
+ assert.NoError(t, charts.Add(chart2))
+ assert.Len(t, charts, 2)
+}
+
+func TestCharts_Get(t *testing.T) {
+ chart := createTestChart("1")
+ charts := Charts{
+ chart,
+ }
+
+ // OK case
+ assert.True(t, chart == charts.Get("1"))
+ // NG case
+ assert.Nil(t, charts.Get("2"))
+}
+
+func TestCharts_Has(t *testing.T) {
+ chart := createTestChart("1")
+ charts := &Charts{
+ chart,
+ }
+
+ // OK case
+ assert.True(t, charts.Has("1"))
+ // NG case
+ assert.False(t, charts.Has("2"))
+}
+
+func TestCharts_Remove(t *testing.T) {
+ chart := createTestChart("1")
+ charts := &Charts{
+ chart,
+ }
+
+ // OK case
+ assert.NoError(t, charts.Remove("1"))
+ assert.Len(t, *charts, 0)
+
+ // NG case
+ assert.Error(t, charts.Remove("2"))
+}
+
+func TestChart_AddDim(t *testing.T) {
+ chart := createTestChart("1")
+ dim := &Dim{ID: "dim2"}
+
+ // OK case
+ assert.NoError(t, chart.AddDim(dim))
+ assert.Len(t, chart.Dims, 2)
+
+ // NG case
+ assert.Error(t, chart.AddDim(dim))
+ assert.Len(t, chart.Dims, 2)
+}
+
+func TestChart_AddVar(t *testing.T) {
+ chart := createTestChart("1")
+ variable := &Var{ID: "var2"}
+
+ // OK case
+ assert.NoError(t, chart.AddVar(variable))
+ assert.Len(t, chart.Vars, 2)
+
+ // NG case
+ assert.Error(t, chart.AddVar(variable))
+ assert.Len(t, chart.Vars, 2)
+}
+
+func TestChart_GetDim(t *testing.T) {
+ chart := &Chart{
+ Dims: Dims{
+ {ID: "1"},
+ {ID: "2"},
+ },
+ }
+
+ // OK case
+ assert.True(t, chart.GetDim("1") != nil && chart.GetDim("1").ID == "1")
+
+ // NG case
+ assert.Nil(t, chart.GetDim("3"))
+}
+
+func TestChart_RemoveDim(t *testing.T) {
+ chart := createTestChart("1")
+
+ // OK case
+ assert.NoError(t, chart.RemoveDim("dim1"))
+ assert.Len(t, chart.Dims, 0)
+
+ // NG case
+ assert.Error(t, chart.RemoveDim("dim2"))
+}
+
+func TestChart_HasDim(t *testing.T) {
+ chart := createTestChart("1")
+
+ // OK case
+ assert.True(t, chart.HasDim("dim1"))
+ // NG case
+ assert.False(t, chart.HasDim("dim2"))
+}
+
+func TestChart_MarkNotCreated(t *testing.T) {
+ chart := createTestChart("1")
+
+ chart.MarkNotCreated()
+ assert.False(t, chart.created)
+}
+
+func TestChart_MarkRemove(t *testing.T) {
+ chart := createTestChart("1")
+
+ chart.MarkRemove()
+ assert.True(t, chart.remove)
+ assert.True(t, chart.Obsolete)
+}
+
+func TestChart_MarkDimRemove(t *testing.T) {
+ chart := createTestChart("1")
+
+ assert.Error(t, chart.MarkDimRemove("dim99", false))
+ assert.NoError(t, chart.MarkDimRemove("dim1", true))
+ assert.True(t, chart.GetDim("dim1").Obsolete)
+ assert.True(t, chart.GetDim("dim1").Hidden)
+ assert.True(t, chart.GetDim("dim1").remove)
+}
+
+func TestChart_check(t *testing.T) {
+ // OK case
+ chart := createTestChart("1")
+ assert.NoError(t, checkChart(chart))
+
+ // NG case
+ chart = createTestChart("1")
+ chart.ID = ""
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.ID = "invalid id"
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.Title = ""
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.Units = ""
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.Dims = Dims{
+ {ID: "1"},
+ {ID: "1"},
+ }
+ assert.Error(t, checkChart(chart))
+
+ chart = createTestChart("1")
+ chart.Vars = Vars{
+ {ID: "1"},
+ {ID: "1"},
+ }
+ assert.Error(t, checkChart(chart))
+}
+
+func TestDim_check(t *testing.T) {
+ // OK case
+ dim := &Dim{ID: "id"}
+ assert.NoError(t, checkDim(dim))
+
+ // NG case
+ dim = &Dim{ID: "id"}
+ dim.ID = ""
+ assert.Error(t, checkDim(dim))
+
+ dim = &Dim{ID: "id"}
+ dim.ID = "invalid id"
+ assert.Error(t, checkDim(dim))
+
+ dim = &Dim{ID: "i d", Name: "id"}
+ assert.NoError(t, checkDim(dim))
+}
+
+func TestVar_check(t *testing.T) {
+ // OK case
+ v := &Var{ID: "id"}
+ assert.NoError(t, checkVar(v))
+
+ // NG case
+ v = &Var{ID: "id"}
+ v.ID = ""
+ assert.Error(t, checkVar(v))
+
+ v = &Var{ID: "id"}
+ v.ID = "invalid id"
+ assert.Error(t, checkVar(v))
+}
+
+func compareCharts(t *testing.T, orig, copied *Chart) {
+ // 1. compare chart pointers
+ // 2. compare Dims, Vars length
+ // 3. compare Dims, Vars pointers
+
+ assert.False(t, orig == copied, "Chart copy ChartsFunc points to the same address")
+
+ require.Len(t, orig.Dims, len(copied.Dims))
+ require.Len(t, orig.Vars, len(copied.Vars))
+
+ for idx := range (*orig).Dims {
+ assert.False(t, orig.Dims[idx] == copied.Dims[idx], "Chart copy dim points to the same address")
+ assert.Equal(t, orig.Dims[idx], copied.Dims[idx], "Chart copy dim isn't equal to orig")
+ }
+
+ for idx := range (*orig).Vars {
+ assert.False(t, orig.Vars[idx] == copied.Vars[idx], "Chart copy var points to the same address")
+ assert.Equal(t, orig.Vars[idx], copied.Vars[idx], "Chart copy var isn't equal to orig")
+ }
+}
diff --git a/src/go/plugin/go.d/agent/module/job.go b/src/go/plugin/go.d/agent/module/job.go
new file mode 100644
index 000000000..67fae8aa2
--- /dev/null
+++ b/src/go/plugin/go.d/agent/module/job.go
@@ -0,0 +1,645 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package module
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "log/slog"
+ "os"
+ "regexp"
+ "runtime/debug"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/netdataapi"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes"
+)
+
+var obsoleteLock = &sync.Mutex{}
+var obsoleteCharts = true
+
+func DontObsoleteCharts() {
+ obsoleteLock.Lock()
+ obsoleteCharts = false
+ obsoleteLock.Unlock()
+}
+
+func shouldObsoleteCharts() bool {
+ obsoleteLock.Lock()
+ defer obsoleteLock.Unlock()
+ return obsoleteCharts
+}
+
+var reSpace = regexp.MustCompile(`\s+`)
+
+var ndInternalMonitoringDisabled = os.Getenv("NETDATA_INTERNALS_MONITORING") == "NO"
+
+func newRuntimeChart(pluginName string) *Chart {
+ // this is needed to keep the same name as we had before https://github.com/netdata/netdata/go/plugins/plugin/go.d/issues/650
+ ctxName := pluginName
+ if ctxName == "go.d" {
+ ctxName = "go"
+ }
+ ctxName = reSpace.ReplaceAllString(ctxName, "_")
+ return &Chart{
+ typ: "netdata",
+ Title: "Execution time",
+ Units: "ms",
+ Fam: pluginName,
+ Ctx: fmt.Sprintf("netdata.%s_plugin_execution_time", ctxName),
+ Priority: 145000,
+ Dims: Dims{
+ {ID: "time"},
+ },
+ }
+}
+
+type JobConfig struct {
+ PluginName string
+ Name string
+ ModuleName string
+ FullName string
+ Module Module
+ Labels map[string]string
+ Out io.Writer
+ UpdateEvery int
+ AutoDetectEvery int
+ Priority int
+ IsStock bool
+
+ VnodeGUID string
+ VnodeHostname string
+ VnodeLabels map[string]string
+}
+
+const (
+ penaltyStep = 5
+ maxPenalty = 600
+ infTries = -1
+)
+
+func NewJob(cfg JobConfig) *Job {
+ var buf bytes.Buffer
+
+ if cfg.UpdateEvery == 0 {
+ cfg.UpdateEvery = 1
+ }
+
+ j := &Job{
+ AutoDetectEvery: cfg.AutoDetectEvery,
+ AutoDetectTries: infTries,
+
+ pluginName: cfg.PluginName,
+ name: cfg.Name,
+ moduleName: cfg.ModuleName,
+ fullName: cfg.FullName,
+ updateEvery: cfg.UpdateEvery,
+ priority: cfg.Priority,
+ isStock: cfg.IsStock,
+ module: cfg.Module,
+ labels: cfg.Labels,
+ out: cfg.Out,
+ runChart: newRuntimeChart(cfg.PluginName),
+ stop: make(chan struct{}),
+ tick: make(chan int),
+ buf: &buf,
+ api: netdataapi.New(&buf),
+
+ vnodeGUID: cfg.VnodeGUID,
+ vnodeHostname: cfg.VnodeHostname,
+ vnodeLabels: cfg.VnodeLabels,
+ }
+
+ log := logger.New().With(
+ slog.String("collector", j.ModuleName()),
+ slog.String("job", j.Name()),
+ )
+
+ j.Logger = log
+ if j.module != nil {
+ j.module.GetBase().Logger = log
+ }
+
+ return j
+}
+
+// Job represents a job. It's a module wrapper.
+type Job struct {
+ pluginName string
+ name string
+ moduleName string
+ fullName string
+
+ updateEvery int
+ AutoDetectEvery int
+ AutoDetectTries int
+ priority int
+ labels map[string]string
+
+ *logger.Logger
+
+ isStock bool
+
+ module Module
+
+ initialized bool
+ panicked bool
+
+ runChart *Chart
+ charts *Charts
+ tick chan int
+ out io.Writer
+ buf *bytes.Buffer
+ api *netdataapi.API
+
+ retries int
+ prevRun time.Time
+
+ stop chan struct{}
+
+ vnodeCreated bool
+ vnodeGUID string
+ vnodeHostname string
+ vnodeLabels map[string]string
+}
+
+// NetdataChartIDMaxLength is the chart ID max length. See RRD_ID_LENGTH_MAX in the netdata source code.
+const NetdataChartIDMaxLength = 1200
+
+// FullName returns job full name.
+func (j *Job) FullName() string {
+ return j.fullName
+}
+
+// ModuleName returns job module name.
+func (j *Job) ModuleName() string {
+ return j.moduleName
+}
+
+// Name returns job name.
+func (j *Job) Name() string {
+ return j.name
+}
+
+// Panicked returns 'panicked' flag value.
+func (j *Job) Panicked() bool {
+ return j.panicked
+}
+
+// AutoDetectionEvery returns value of AutoDetectEvery.
+func (j *Job) AutoDetectionEvery() int {
+ return j.AutoDetectEvery
+}
+
+// RetryAutoDetection returns whether it is needed to retry autodetection.
+func (j *Job) RetryAutoDetection() bool {
+ return j.AutoDetectEvery > 0 && (j.AutoDetectTries == infTries || j.AutoDetectTries > 0)
+}
+
+func (j *Job) Configuration() any {
+ return j.module.Configuration()
+}
+
+// AutoDetection invokes init, check and postCheck. It handles panic.
+func (j *Job) AutoDetection() (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("panic %v", err)
+ j.panicked = true
+ j.disableAutoDetection()
+
+ j.Errorf("PANIC %v", r)
+ if logger.Level.Enabled(slog.LevelDebug) {
+ j.Errorf("STACK: %s", debug.Stack())
+ }
+ }
+ if err != nil {
+ j.module.Cleanup()
+ }
+ }()
+
+ if j.isStock {
+ j.Mute()
+ }
+
+ if err = j.init(); err != nil {
+ j.Error("init failed")
+ j.Unmute()
+ j.disableAutoDetection()
+ return err
+ }
+
+ if err = j.check(); err != nil {
+ j.Error("check failed")
+ j.Unmute()
+ return err
+ }
+
+ j.Unmute()
+ j.Info("check success")
+
+ if err = j.postCheck(); err != nil {
+ j.Error("postCheck failed")
+ j.disableAutoDetection()
+ return err
+ }
+
+ return nil
+}
+
+// Tick Tick.
+func (j *Job) Tick(clock int) {
+ select {
+ case j.tick <- clock:
+ default:
+ j.Debug("skip the tick due to previous run hasn't been finished")
+ }
+}
+
+// Start starts job main loop.
+func (j *Job) Start() {
+ j.Infof("started, data collection interval %ds", j.updateEvery)
+ defer func() { j.Info("stopped") }()
+
+LOOP:
+ for {
+ select {
+ case <-j.stop:
+ break LOOP
+ case t := <-j.tick:
+ if t%(j.updateEvery+j.penalty()) == 0 {
+ j.runOnce()
+ }
+ }
+ }
+ j.module.Cleanup()
+ j.Cleanup()
+ j.stop <- struct{}{}
+}
+
+// Stop stops job main loop. It blocks until the job is stopped.
+func (j *Job) Stop() {
+ // TODO: should have blocking and non blocking stop
+ j.stop <- struct{}{}
+ <-j.stop
+}
+
+func (j *Job) disableAutoDetection() {
+ j.AutoDetectEvery = 0
+}
+
+func (j *Job) Cleanup() {
+ j.buf.Reset()
+ if !shouldObsoleteCharts() {
+ return
+ }
+
+ if !vnodes.Disabled {
+ if !j.vnodeCreated && j.vnodeGUID != "" {
+ _ = j.api.HOSTINFO(j.vnodeGUID, j.vnodeHostname, j.vnodeLabels)
+ j.vnodeCreated = true
+ }
+ _ = j.api.HOST(j.vnodeGUID)
+ }
+
+ if j.runChart.created {
+ j.runChart.MarkRemove()
+ j.createChart(j.runChart)
+ }
+ if j.charts != nil {
+ for _, chart := range *j.charts {
+ if chart.created {
+ chart.MarkRemove()
+ j.createChart(chart)
+ }
+ }
+ }
+
+ if j.buf.Len() > 0 {
+ _, _ = io.Copy(j.out, j.buf)
+ }
+}
+
+func (j *Job) init() error {
+ if j.initialized {
+ return nil
+ }
+
+ if err := j.module.Init(); err != nil {
+ return err
+ }
+
+ j.initialized = true
+
+ return nil
+}
+
+func (j *Job) check() error {
+ if err := j.module.Check(); err != nil {
+ if j.AutoDetectTries != infTries {
+ j.AutoDetectTries--
+ }
+ return err
+ }
+ return nil
+}
+
+func (j *Job) postCheck() error {
+ if j.charts = j.module.Charts(); j.charts == nil {
+ j.Error("nil charts")
+ return errors.New("nil charts")
+ }
+ if err := checkCharts(*j.charts...); err != nil {
+ j.Errorf("charts check: %v", err)
+ return err
+ }
+ return nil
+}
+
+func (j *Job) runOnce() {
+ curTime := time.Now()
+ sinceLastRun := calcSinceLastRun(curTime, j.prevRun)
+ j.prevRun = curTime
+
+ metrics := j.collect()
+
+ if j.panicked {
+ return
+ }
+
+ if j.processMetrics(metrics, curTime, sinceLastRun) {
+ j.retries = 0
+ } else {
+ j.retries++
+ }
+
+ _, _ = io.Copy(j.out, j.buf)
+ j.buf.Reset()
+}
+
+func (j *Job) collect() (result map[string]int64) {
+ j.panicked = false
+ defer func() {
+ if r := recover(); r != nil {
+ j.panicked = true
+ j.Errorf("PANIC: %v", r)
+ if logger.Level.Enabled(slog.LevelDebug) {
+ j.Errorf("STACK: %s", debug.Stack())
+ }
+ }
+ }()
+ return j.module.Collect()
+}
+
+func (j *Job) processMetrics(metrics map[string]int64, startTime time.Time, sinceLastRun int) bool {
+ if !vnodes.Disabled {
+ if !j.vnodeCreated && j.vnodeGUID != "" {
+ _ = j.api.HOSTINFO(j.vnodeGUID, j.vnodeHostname, j.vnodeLabels)
+ j.vnodeCreated = true
+ }
+
+ _ = j.api.HOST(j.vnodeGUID)
+ }
+
+ if !ndInternalMonitoringDisabled && !j.runChart.created {
+ j.runChart.ID = fmt.Sprintf("execution_time_of_%s", j.FullName())
+ j.createChart(j.runChart)
+ }
+
+ elapsed := int64(durationTo(time.Since(startTime), time.Millisecond))
+
+ var i, updated int
+ for _, chart := range *j.charts {
+ if !chart.created {
+ typeID := fmt.Sprintf("%s.%s", j.FullName(), chart.ID)
+ if len(typeID) >= NetdataChartIDMaxLength {
+ j.Warningf("chart 'type.id' length (%d) >= max allowed (%d), the chart is ignored (%s)",
+ len(typeID), NetdataChartIDMaxLength, typeID)
+ chart.ignore = true
+ }
+ j.createChart(chart)
+ }
+ if chart.remove {
+ continue
+ }
+ (*j.charts)[i] = chart
+ i++
+ if len(metrics) == 0 || chart.Obsolete {
+ continue
+ }
+ if j.updateChart(chart, metrics, sinceLastRun) {
+ updated++
+ }
+ }
+ *j.charts = (*j.charts)[:i]
+
+ if updated == 0 {
+ return false
+ }
+ if !ndInternalMonitoringDisabled {
+ j.updateChart(j.runChart, map[string]int64{"time": elapsed}, sinceLastRun)
+ }
+
+ return true
+}
+
+func (j *Job) createChart(chart *Chart) {
+ defer func() { chart.created = true }()
+ if chart.ignore {
+ return
+ }
+
+ if chart.Priority == 0 {
+ chart.Priority = j.priority
+ j.priority++
+ }
+ _ = j.api.CHART(
+ getChartType(chart, j),
+ getChartID(chart),
+ chart.OverID,
+ chart.Title,
+ chart.Units,
+ chart.Fam,
+ chart.Ctx,
+ chart.Type.String(),
+ chart.Priority,
+ j.updateEvery,
+ chart.Opts.String(),
+ j.pluginName,
+ j.moduleName,
+ )
+
+ if chart.Obsolete {
+ _ = j.api.EMPTYLINE()
+ return
+ }
+
+ seen := make(map[string]bool)
+ for _, l := range chart.Labels {
+ if l.Key != "" {
+ seen[l.Key] = true
+ ls := l.Source
+ // the default should be auto
+ // https://github.com/netdata/netdata/blob/cc2586de697702f86a3c34e60e23652dd4ddcb42/database/rrd.h#L205
+ if ls == 0 {
+ ls = LabelSourceAuto
+ }
+ _ = j.api.CLABEL(l.Key, l.Value, ls)
+ }
+ }
+ for k, v := range j.labels {
+ if !seen[k] {
+ _ = j.api.CLABEL(k, v, LabelSourceConf)
+ }
+ }
+ _ = j.api.CLABEL("_collect_job", j.Name(), LabelSourceAuto)
+ _ = j.api.CLABELCOMMIT()
+
+ for _, dim := range chart.Dims {
+ _ = j.api.DIMENSION(
+ firstNotEmpty(dim.Name, dim.ID),
+ dim.Name,
+ dim.Algo.String(),
+ handleZero(dim.Mul),
+ handleZero(dim.Div),
+ dim.DimOpts.String(),
+ )
+ }
+ for _, v := range chart.Vars {
+ if v.Name != "" {
+ _ = j.api.VARIABLE(v.Name, v.Value)
+ } else {
+ _ = j.api.VARIABLE(v.ID, v.Value)
+ }
+ }
+ _ = j.api.EMPTYLINE()
+}
+
+func (j *Job) updateChart(chart *Chart, collected map[string]int64, sinceLastRun int) bool {
+ if chart.ignore {
+ dims := chart.Dims[:0]
+ for _, dim := range chart.Dims {
+ if !dim.remove {
+ dims = append(dims, dim)
+ }
+ }
+ chart.Dims = dims
+ return false
+ }
+
+ if !chart.updated {
+ sinceLastRun = 0
+ }
+
+ _ = j.api.BEGIN(
+ getChartType(chart, j),
+ getChartID(chart),
+ sinceLastRun,
+ )
+ var i, updated int
+ for _, dim := range chart.Dims {
+ if dim.remove {
+ continue
+ }
+ chart.Dims[i] = dim
+ i++
+ if v, ok := collected[dim.ID]; !ok {
+ _ = j.api.SETEMPTY(firstNotEmpty(dim.Name, dim.ID))
+ } else {
+ _ = j.api.SET(firstNotEmpty(dim.Name, dim.ID), v)
+ updated++
+ }
+ }
+ chart.Dims = chart.Dims[:i]
+
+ for _, vr := range chart.Vars {
+ if v, ok := collected[vr.ID]; ok {
+ if vr.Name != "" {
+ _ = j.api.VARIABLE(vr.Name, v)
+ } else {
+ _ = j.api.VARIABLE(vr.ID, v)
+ }
+ }
+
+ }
+ _ = j.api.END()
+
+ if chart.updated = updated > 0; chart.updated {
+ chart.Retries = 0
+ } else {
+ chart.Retries++
+ }
+ return chart.updated
+}
+
+func (j *Job) penalty() int {
+ v := j.retries / penaltyStep * penaltyStep * j.updateEvery / 2
+ if v > maxPenalty {
+ return maxPenalty
+ }
+ return v
+}
+
+func getChartType(chart *Chart, j *Job) string {
+ if chart.typ != "" {
+ return chart.typ
+ }
+ if !chart.IDSep {
+ chart.typ = j.FullName()
+ } else if i := strings.IndexByte(chart.ID, '.'); i != -1 {
+ chart.typ = j.FullName() + "_" + chart.ID[:i]
+ } else {
+ chart.typ = j.FullName()
+ }
+ if chart.OverModule != "" {
+ if v := strings.TrimPrefix(chart.typ, j.ModuleName()); v != chart.typ {
+ chart.typ = chart.OverModule + v
+ }
+ }
+ return chart.typ
+}
+
+func getChartID(chart *Chart) string {
+ if chart.id != "" {
+ return chart.id
+ }
+ if !chart.IDSep {
+ return chart.ID
+ }
+ if i := strings.IndexByte(chart.ID, '.'); i != -1 {
+ chart.id = chart.ID[i+1:]
+ } else {
+ chart.id = chart.ID
+ }
+ return chart.id
+}
+
+func calcSinceLastRun(curTime, prevRun time.Time) int {
+ if prevRun.IsZero() {
+ return 0
+ }
+ return int((curTime.UnixNano() - prevRun.UnixNano()) / 1000)
+}
+
+func durationTo(duration time.Duration, to time.Duration) int {
+ return int(int64(duration) / (int64(to) / int64(time.Nanosecond)))
+}
+
+func firstNotEmpty(val1, val2 string) string {
+ if val1 != "" {
+ return val1
+ }
+ return val2
+}
+
+func handleZero(v int) int {
+ if v == 0 {
+ return 1
+ }
+ return v
+}
diff --git a/src/go/plugin/go.d/agent/module/job_test.go b/src/go/plugin/go.d/agent/module/job_test.go
new file mode 100644
index 000000000..c87f840d5
--- /dev/null
+++ b/src/go/plugin/go.d/agent/module/job_test.go
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package module
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+const (
+ pluginName = "plugin"
+ modName = "module"
+ jobName = "job"
+)
+
+func newTestJob() *Job {
+ return NewJob(
+ JobConfig{
+ PluginName: pluginName,
+ Name: jobName,
+ ModuleName: modName,
+ FullName: modName + "_" + jobName,
+ Module: nil,
+ Out: io.Discard,
+ UpdateEvery: 0,
+ AutoDetectEvery: 0,
+ Priority: 0,
+ },
+ )
+}
+
+func TestNewJob(t *testing.T) {
+ assert.IsType(t, (*Job)(nil), newTestJob())
+}
+
+func TestJob_FullName(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.FullName(), fmt.Sprintf("%s_%s", modName, jobName))
+}
+
+func TestJob_ModuleName(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.ModuleName(), modName)
+}
+
+func TestJob_Name(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.Name(), jobName)
+}
+
+func TestJob_Panicked(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.Panicked(), job.panicked)
+ job.panicked = true
+ assert.Equal(t, job.Panicked(), job.panicked)
+}
+
+func TestJob_AutoDetectionEvery(t *testing.T) {
+ job := newTestJob()
+
+ assert.Equal(t, job.AutoDetectionEvery(), job.AutoDetectEvery)
+}
+
+func TestJob_RetryAutoDetection(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() error {
+ return nil
+ },
+ CheckFunc: func() error { return errors.New("check error") },
+ ChartsFunc: func() *Charts {
+ return &Charts{}
+ },
+ }
+ job.module = m
+ job.AutoDetectEvery = 1
+
+ assert.True(t, job.RetryAutoDetection())
+ assert.Equal(t, infTries, job.AutoDetectTries)
+ for i := 0; i < 1000; i++ {
+ _ = job.check()
+ }
+ assert.True(t, job.RetryAutoDetection())
+ assert.Equal(t, infTries, job.AutoDetectTries)
+
+ job.AutoDetectTries = 10
+ for i := 0; i < 10; i++ {
+ _ = job.check()
+ }
+ assert.False(t, job.RetryAutoDetection())
+ assert.Equal(t, 0, job.AutoDetectTries)
+}
+
+func TestJob_AutoDetection(t *testing.T) {
+ job := newTestJob()
+ var v int
+ m := &MockModule{
+ InitFunc: func() error {
+ v++
+ return nil
+ },
+ CheckFunc: func() error {
+ v++
+ return nil
+ },
+ ChartsFunc: func() *Charts {
+ v++
+ return &Charts{}
+ },
+ }
+ job.module = m
+
+ assert.NoError(t, job.AutoDetection())
+ assert.Equal(t, 3, v)
+}
+
+func TestJob_AutoDetection_FailInit(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() error {
+ return errors.New("init error")
+ },
+ }
+ job.module = m
+
+ assert.Error(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_FailCheck(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() error {
+ return nil
+ },
+ CheckFunc: func() error {
+ return errors.New("check error")
+ },
+ }
+ job.module = m
+
+ assert.Error(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_FailPostCheck(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() error {
+ return nil
+ },
+ CheckFunc: func() error {
+ return nil
+ },
+ ChartsFunc: func() *Charts {
+ return nil
+ },
+ }
+ job.module = m
+
+ assert.Error(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_PanicInit(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() error {
+ panic("panic in Init")
+ },
+ }
+ job.module = m
+
+ assert.Error(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_PanicCheck(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() error {
+ return nil
+ },
+ CheckFunc: func() error {
+ panic("panic in Check")
+ },
+ }
+ job.module = m
+
+ assert.Error(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_AutoDetection_PanicPostCheck(t *testing.T) {
+ job := newTestJob()
+ m := &MockModule{
+ InitFunc: func() error {
+ return nil
+ },
+ CheckFunc: func() error {
+ return nil
+ },
+ ChartsFunc: func() *Charts {
+ panic("panic in PostCheck")
+ },
+ }
+ job.module = m
+
+ assert.Error(t, job.AutoDetection())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_Start(t *testing.T) {
+ m := &MockModule{
+ ChartsFunc: func() *Charts {
+ return &Charts{
+ &Chart{
+ ID: "id",
+ Title: "title",
+ Units: "units",
+ Dims: Dims{
+ {ID: "id1"},
+ {ID: "id2"},
+ },
+ },
+ }
+ },
+ CollectFunc: func() map[string]int64 {
+ return map[string]int64{
+ "id1": 1,
+ "id2": 2,
+ }
+ },
+ }
+ job := newTestJob()
+ job.module = m
+ job.charts = job.module.Charts()
+ job.updateEvery = 1
+
+ go func() {
+ for i := 1; i < 3; i++ {
+ job.Tick(i)
+ time.Sleep(time.Second)
+ }
+ job.Stop()
+ }()
+
+ job.Start()
+
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_MainLoop_Panic(t *testing.T) {
+ m := &MockModule{
+ CollectFunc: func() map[string]int64 {
+ panic("panic in Collect")
+ },
+ }
+ job := newTestJob()
+ job.module = m
+ job.updateEvery = 1
+
+ go func() {
+ for i := 1; i < 3; i++ {
+ time.Sleep(time.Second)
+ job.Tick(i)
+ }
+ job.Stop()
+ }()
+
+ job.Start()
+
+ assert.True(t, job.Panicked())
+ assert.True(t, m.CleanupDone)
+}
+
+func TestJob_Tick(t *testing.T) {
+ job := newTestJob()
+ for i := 0; i < 3; i++ {
+ job.Tick(i)
+ }
+}
diff --git a/src/go/plugin/go.d/agent/module/mock.go b/src/go/plugin/go.d/agent/module/mock.go
new file mode 100644
index 000000000..f83c7dbcc
--- /dev/null
+++ b/src/go/plugin/go.d/agent/module/mock.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package module
+
+import "errors"
+
+const MockConfigSchema = `
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "option_str": {
+ "type": "string",
+ "description": "Option string value"
+ },
+ "option_int": {
+ "type": "integer",
+ "description": "Option integer value"
+ }
+ },
+ "required": [
+ "option_str",
+ "option_int"
+ ]
+}
+`
+
+type MockConfiguration struct {
+ OptionStr string `yaml:"option_str" json:"option_str"`
+ OptionInt int `yaml:"option_int" json:"option_int"`
+}
+
+// MockModule MockModule.
+type MockModule struct {
+ Base
+
+ Config MockConfiguration `yaml:",inline" json:""`
+
+ FailOnInit bool
+
+ InitFunc func() error
+ CheckFunc func() error
+ ChartsFunc func() *Charts
+ CollectFunc func() map[string]int64
+ CleanupFunc func()
+ CleanupDone bool
+}
+
+// Init invokes InitFunc.
+func (m *MockModule) Init() error {
+ if m.FailOnInit {
+ return errors.New("mock init error")
+ }
+ if m.InitFunc == nil {
+ return nil
+ }
+ return m.InitFunc()
+}
+
+// Check invokes CheckFunc.
+func (m *MockModule) Check() error {
+ if m.CheckFunc == nil {
+ return nil
+ }
+ return m.CheckFunc()
+}
+
+// Charts invokes ChartsFunc.
+func (m *MockModule) Charts() *Charts {
+ if m.ChartsFunc == nil {
+ return nil
+ }
+ return m.ChartsFunc()
+}
+
+// Collect invokes CollectDunc.
+func (m *MockModule) Collect() map[string]int64 {
+ if m.CollectFunc == nil {
+ return nil
+ }
+ return m.CollectFunc()
+}
+
+// Cleanup sets CleanupDone to true.
+func (m *MockModule) Cleanup() {
+ if m.CleanupFunc != nil {
+ m.CleanupFunc()
+ }
+ m.CleanupDone = true
+}
+
+func (m *MockModule) Configuration() any {
+ return m.Config
+}
diff --git a/src/go/plugin/go.d/agent/module/mock_test.go b/src/go/plugin/go.d/agent/module/mock_test.go
new file mode 100644
index 000000000..d7521911f
--- /dev/null
+++ b/src/go/plugin/go.d/agent/module/mock_test.go
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package module
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestMockModule_Init(t *testing.T) {
+ m := &MockModule{}
+
+ assert.NoError(t, m.Init())
+ m.InitFunc = func() error { return nil }
+ assert.NoError(t, m.Init())
+}
+
+func TestMockModule_Check(t *testing.T) {
+ m := &MockModule{}
+
+ assert.NoError(t, m.Check())
+ m.CheckFunc = func() error { return nil }
+ assert.NoError(t, m.Check())
+}
+
+func TestMockModule_Charts(t *testing.T) {
+ m := &MockModule{}
+ c := &Charts{}
+
+ assert.Nil(t, m.Charts())
+ m.ChartsFunc = func() *Charts { return c }
+ assert.True(t, c == m.Charts())
+}
+
+func TestMockModule_Collect(t *testing.T) {
+ m := &MockModule{}
+ d := map[string]int64{
+ "1": 1,
+ }
+
+ assert.Nil(t, m.Collect())
+ m.CollectFunc = func() map[string]int64 { return d }
+ assert.Equal(t, d, m.Collect())
+}
+
+func TestMockModule_Cleanup(t *testing.T) {
+ m := &MockModule{}
+ require.False(t, m.CleanupDone)
+
+ m.Cleanup()
+ assert.True(t, m.CleanupDone)
+}
diff --git a/src/go/plugin/go.d/agent/module/module.go b/src/go/plugin/go.d/agent/module/module.go
new file mode 100644
index 000000000..13e20f2ae
--- /dev/null
+++ b/src/go/plugin/go.d/agent/module/module.go
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package module
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+// Module is an interface that represents a module.
+type Module interface {
+ // Init does initialization.
+ // If it returns error, the job will be disabled.
+ Init() error
+
+ // Check is called after Init.
+ // If it returns error, the job will be disabled.
+ Check() error
+
+ // Charts returns the chart definition.
+ Charts() *Charts
+
+ // Collect collects metrics.
+ Collect() map[string]int64
+
+ // Cleanup Cleanup
+ Cleanup()
+
+ GetBase() *Base
+
+ Configuration() any
+}
+
+// Base is a helper struct. All modules should embed this struct.
+type Base struct {
+ *logger.Logger
+}
+
+func (b *Base) GetBase() *Base { return b }
+
+func TestConfigurationSerialize(t *testing.T, mod Module, cfgJSON, cfgYAML []byte) {
+ t.Helper()
+ tests := map[string]struct {
+ config []byte
+ unmarshal func(in []byte, out interface{}) (err error)
+ marshal func(in interface{}) (out []byte, err error)
+ }{
+ "json": {config: cfgJSON, marshal: json.Marshal, unmarshal: json.Unmarshal},
+ "yaml": {config: cfgYAML, marshal: yaml.Marshal, unmarshal: yaml.Unmarshal},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+
+ require.NoError(t, test.unmarshal(test.config, mod), "unmarshal test->mod")
+ bs, err := test.marshal(mod.Configuration())
+ require.NoError(t, err, "marshal mod config")
+
+ var want map[string]any
+ var got map[string]any
+
+ require.NoError(t, test.unmarshal(test.config, &want), "unmarshal test->map")
+ require.NoError(t, test.unmarshal(bs, &got), "unmarshal mod->map")
+
+ require.NotNil(t, want, "want map")
+ require.NotNil(t, got, "got map")
+
+ assert.Equal(t, want, got)
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/agent/module/registry.go b/src/go/plugin/go.d/agent/module/registry.go
new file mode 100644
index 000000000..1d2aa9477
--- /dev/null
+++ b/src/go/plugin/go.d/agent/module/registry.go
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package module
+
+import "fmt"
+
+const (
+ UpdateEvery = 1
+ AutoDetectionRetry = 0
+ Priority = 70000
+)
+
+// Defaults is a set of module default parameters.
+type Defaults struct {
+ UpdateEvery int
+ AutoDetectionRetry int
+ Priority int
+ Disabled bool
+}
+
+type (
+ // Creator is a Job builder.
+ Creator struct {
+ Defaults
+ Create func() Module
+ JobConfigSchema string
+ Config func() any
+ }
+ // Registry is a collection of Creators.
+ Registry map[string]Creator
+)
+
+// DefaultRegistry DefaultRegistry.
+var DefaultRegistry = Registry{}
+
+// Register registers a module in the DefaultRegistry.
+func Register(name string, creator Creator) {
+ DefaultRegistry.Register(name, creator)
+}
+
+// Register registers a module.
+func (r Registry) Register(name string, creator Creator) {
+ if _, ok := r[name]; ok {
+ panic(fmt.Sprintf("%s is already in registry", name))
+ }
+ r[name] = creator
+}
+
+func (r Registry) Lookup(name string) (Creator, bool) {
+ v, ok := r[name]
+ return v, ok
+}
diff --git a/src/go/plugin/go.d/agent/module/registry_test.go b/src/go/plugin/go.d/agent/module/registry_test.go
new file mode 100644
index 000000000..c9f31105a
--- /dev/null
+++ b/src/go/plugin/go.d/agent/module/registry_test.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package module
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRegister(t *testing.T) {
+ modName := "modName"
+ registry := make(Registry)
+
+ // OK case
+ assert.NotPanics(
+ t,
+ func() {
+ registry.Register(modName, Creator{})
+ })
+
+ _, exist := registry[modName]
+
+ require.True(t, exist)
+
+ // Panic case
+ assert.Panics(
+ t,
+ func() {
+ registry.Register(modName, Creator{})
+ })
+
+}
diff --git a/src/go/plugin/go.d/agent/netdataapi/api.go b/src/go/plugin/go.d/agent/netdataapi/api.go
new file mode 100644
index 000000000..4f2b7a9b5
--- /dev/null
+++ b/src/go/plugin/go.d/agent/netdataapi/api.go
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package netdataapi
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strconv"
+)
+
+type (
+ // API implements Netdata external plugins API.
+ // https://learn.netdata.cloud/docs/agent/collectors/plugins.d#the-output-of-the-plugin
+ API struct {
+ io.Writer
+ }
+)
+
+const quotes = "' '"
+
+var (
+ end = []byte("END\n\n")
+ clabelCommit = []byte("CLABEL_COMMIT\n")
+ newLine = []byte("\n")
+)
+
+func New(w io.Writer) *API { return &API{w} }
+
+// CHART creates or update a chart.
+func (a *API) CHART(
+ typeID string,
+ ID string,
+ name string,
+ title string,
+ units string,
+ family string,
+ context string,
+ chartType string,
+ priority int,
+ updateEvery int,
+ options string,
+ plugin string,
+ module string) error {
+ _, err := a.Write([]byte("CHART " + "'" +
+ typeID + "." + ID + quotes +
+ name + quotes +
+ title + quotes +
+ units + quotes +
+ family + quotes +
+ context + quotes +
+ chartType + quotes +
+ strconv.Itoa(priority) + quotes +
+ strconv.Itoa(updateEvery) + quotes +
+ options + quotes +
+ plugin + quotes +
+ module + "'\n"))
+ return err
+}
+
+// DIMENSION adds or update a dimension to the chart just created.
+func (a *API) DIMENSION(
+ ID string,
+ name string,
+ algorithm string,
+ multiplier int,
+ divisor int,
+ options string) error {
+ _, err := a.Write([]byte("DIMENSION '" +
+ ID + quotes +
+ name + quotes +
+ algorithm + quotes +
+ strconv.Itoa(multiplier) + quotes +
+ strconv.Itoa(divisor) + quotes +
+ options + "'\n"))
+ return err
+}
+
+// CLABEL adds or update a label to the chart.
+func (a *API) CLABEL(key, value string, source int) error {
+ _, err := a.Write([]byte("CLABEL '" +
+ key + quotes +
+ value + quotes +
+ strconv.Itoa(source) + "'\n"))
+ return err
+}
+
+// CLABELCOMMIT adds labels to the chart. Should be called after one or more CLABEL.
+func (a *API) CLABELCOMMIT() error {
+ _, err := a.Write(clabelCommit)
+ return err
+}
+
+// BEGIN initializes data collection for a chart.
+func (a *API) BEGIN(typeID string, ID string, msSince int) (err error) {
+ if msSince > 0 {
+ _, err = a.Write([]byte("BEGIN " + "'" + typeID + "." + ID + "' " + strconv.Itoa(msSince) + "\n"))
+ } else {
+ _, err = a.Write([]byte("BEGIN " + "'" + typeID + "." + ID + "'\n"))
+ }
+ return err
+}
+
+// SET sets the value of a dimension for the initialized chart.
+func (a *API) SET(ID string, value int64) error {
+ _, err := a.Write([]byte("SET '" + ID + "' = " + strconv.FormatInt(value, 10) + "\n"))
+ return err
+}
+
+// SETEMPTY sets the empty value of a dimension for the initialized chart.
+func (a *API) SETEMPTY(ID string) error {
+ _, err := a.Write([]byte("SET '" + ID + "' = \n"))
+ return err
+}
+
+// VARIABLE sets the value of a CHART scope variable for the initialized chart.
+func (a *API) VARIABLE(ID string, value int64) error {
+ _, err := a.Write([]byte("VARIABLE CHART '" + ID + "' = " + strconv.FormatInt(value, 10) + "\n"))
+ return err
+}
+
+// END completes data collection for the initialized chart.
+func (a *API) END() error {
+ _, err := a.Write(end)
+ return err
+}
+
+// DISABLE disables this plugin. This will prevent Netdata from restarting the plugin.
+func (a *API) DISABLE() error {
+ _, err := a.Write([]byte("DISABLE\n"))
+ return err
+}
+
+// EMPTYLINE writes an empty line.
+func (a *API) EMPTYLINE() error {
+ _, err := a.Write(newLine)
+ return err
+}
+
+func (a *API) HOSTINFO(guid, hostname string, labels map[string]string) error {
+ if err := a.HOSTDEFINE(guid, hostname); err != nil {
+ return err
+ }
+ for k, v := range labels {
+ if err := a.HOSTLABEL(k, v); err != nil {
+ return err
+ }
+ }
+ return a.HOSTDEFINEEND()
+}
+
+func (a *API) HOSTDEFINE(guid, hostname string) error {
+ _, err := fmt.Fprintf(a, "HOST_DEFINE '%s' '%s'\n", guid, hostname)
+ return err
+}
+
+func (a *API) HOSTLABEL(name, value string) error {
+ _, err := fmt.Fprintf(a, "HOST_LABEL '%s' '%s'\n", name, value)
+ return err
+}
+
+func (a *API) HOSTDEFINEEND() error {
+ _, err := fmt.Fprintf(a, "HOST_DEFINE_END\n\n")
+ return err
+}
+
+func (a *API) HOST(guid string) error {
+ _, err := a.Write([]byte("HOST " + "'" +
+ guid + "'\n\n"))
+ return err
+}
+
+func (a *API) FUNCRESULT(uid, contentType, payload, code, expireTimestamp string) {
+ var buf bytes.Buffer
+
+ buf.WriteString("FUNCTION_RESULT_BEGIN " +
+ uid + " " +
+ code + " " +
+ contentType + " " +
+ expireTimestamp + "\n",
+ )
+
+ if payload != "" {
+ buf.WriteString(payload + "\n")
+ }
+
+ buf.WriteString("FUNCTION_RESULT_END\n\n")
+
+ _, _ = buf.WriteTo(a)
+}
+
+func (a *API) CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string) {
+ // https://learn.netdata.cloud/docs/contributing/external-plugins/#config
+
+ _, _ = a.Write([]byte("CONFIG " +
+ id + " " +
+ "create" + " " +
+ status + " " +
+ configType + " " +
+ path + " " +
+ sourceType + " '" +
+ source + "' '" +
+ supportedCommands + "' 0x0000 0x0000\n\n",
+ ))
+}
+
+func (a *API) CONFIGDELETE(id string) {
+ _, _ = a.Write([]byte("CONFIG " + id + " delete\n\n"))
+}
+
+func (a *API) CONFIGSTATUS(id, status string) {
+ _, _ = a.Write([]byte("CONFIG " + id + " status " + status + "\n\n"))
+}
diff --git a/src/go/plugin/go.d/agent/netdataapi/api_test.go b/src/go/plugin/go.d/agent/netdataapi/api_test.go
new file mode 100644
index 000000000..e5087839b
--- /dev/null
+++ b/src/go/plugin/go.d/agent/netdataapi/api_test.go
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package netdataapi
+
+import (
+ "bytes"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestAPI_CHART(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.CHART(
+ "",
+ "id",
+ "name",
+ "title",
+ "units",
+ "family",
+ "context",
+ "line",
+ 1,
+ 1,
+ "",
+ "plugin",
+ "module",
+ )
+
+ assert.Equal(
+ t,
+ "CHART '.id' 'name' 'title' 'units' 'family' 'context' 'line' '1' '1' '' 'plugin' 'module'\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_DIMENSION(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.DIMENSION(
+ "id",
+ "name",
+ "absolute",
+ 1,
+ 1,
+ "",
+ )
+
+ assert.Equal(
+ t,
+ "DIMENSION 'id' 'name' 'absolute' '1' '1' ''\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_BEGIN(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.BEGIN(
+ "typeID",
+ "id",
+ 0,
+ )
+
+ assert.Equal(
+ t,
+ "BEGIN 'typeID.id'\n",
+ buf.String(),
+ )
+
+ buf.Reset()
+
+ _ = a.BEGIN(
+ "typeID",
+ "id",
+ 1,
+ )
+
+ assert.Equal(
+ t,
+ "BEGIN 'typeID.id' 1\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_SET(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.SET("id", 100)
+
+ assert.Equal(
+ t,
+ "SET 'id' = 100\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_SETEMPTY(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.SETEMPTY("id")
+
+ assert.Equal(
+ t,
+ "SET 'id' = \n",
+ buf.String(),
+ )
+}
+
+func TestAPI_VARIABLE(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.VARIABLE("id", 100)
+
+ assert.Equal(
+ t,
+ "VARIABLE CHART 'id' = 100\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_END(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.END()
+
+ assert.Equal(
+ t,
+ "END\n\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_CLABEL(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.CLABEL("key", "value", 1)
+
+ assert.Equal(
+ t,
+ "CLABEL 'key' 'value' '1'\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_CLABELCOMMIT(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.CLABELCOMMIT()
+
+ assert.Equal(
+ t,
+ "CLABEL_COMMIT\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_DISABLE(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.DISABLE()
+
+ assert.Equal(
+ t,
+ "DISABLE\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_EMPTYLINE(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.EMPTYLINE()
+
+ assert.Equal(
+ t,
+ "\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_HOST(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.HOST("guid")
+
+ assert.Equal(
+ t,
+ "HOST 'guid'\n\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_HOSTDEFINE(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.HOSTDEFINE("guid", "hostname")
+
+ assert.Equal(
+ t,
+ "HOST_DEFINE 'guid' 'hostname'\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_HOSTLABEL(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.HOSTLABEL("name", "value")
+
+ assert.Equal(
+ t,
+ "HOST_LABEL 'name' 'value'\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_HOSTDEFINEEND(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.HOSTDEFINEEND()
+
+ assert.Equal(
+ t,
+ "HOST_DEFINE_END\n\n",
+ buf.String(),
+ )
+}
+
+func TestAPI_HOSTINFO(t *testing.T) {
+ buf := &bytes.Buffer{}
+ a := API{Writer: buf}
+
+ _ = a.HOSTINFO("guid", "hostname", map[string]string{"label1": "value1"})
+
+ assert.Equal(
+ t,
+ `HOST_DEFINE 'guid' 'hostname'
+HOST_LABEL 'label1' 'value1'
+HOST_DEFINE_END
+
+`,
+ buf.String(),
+ )
+}
+
+func TestAPI_FUNCRESULT(t *testing.T) {
+
+}
diff --git a/src/go/plugin/go.d/agent/safewriter/writer.go b/src/go/plugin/go.d/agent/safewriter/writer.go
new file mode 100644
index 000000000..533c1055d
--- /dev/null
+++ b/src/go/plugin/go.d/agent/safewriter/writer.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package safewriter
+
+import (
+ "io"
+ "os"
+ "sync"
+)
+
+var Stdout = New(os.Stdout)
+
+func New(w io.Writer) io.Writer {
+ return &writer{
+ mx: &sync.Mutex{},
+ w: w,
+ }
+}
+
+type writer struct {
+ mx *sync.Mutex
+ w io.Writer
+}
+
+func (w *writer) Write(p []byte) (n int, err error) {
+ w.mx.Lock()
+ n, err = w.w.Write(p)
+ w.mx.Unlock()
+ return n, err
+}
diff --git a/src/go/plugin/go.d/agent/setup.go b/src/go/plugin/go.d/agent/setup.go
new file mode 100644
index 000000000..12da59380
--- /dev/null
+++ b/src/go/plugin/go.d/agent/setup.go
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package agent
+
+import (
+ "io"
+ "os"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/confgroup"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/dummy"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/file"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/discovery/sd"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/hostinfo"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/vnodes"
+
+ "gopkg.in/yaml.v2"
+)
+
+func (a *Agent) loadPluginConfig() config {
+ a.Info("loading config file")
+
+ if len(a.ConfDir) == 0 {
+ a.Info("config dir not provided, will use defaults")
+ return defaultConfig()
+ }
+
+ cfgPath := a.Name + ".conf"
+ a.Debugf("looking for '%s' in %v", cfgPath, a.ConfDir)
+
+ path, err := a.ConfDir.Find(cfgPath)
+ if err != nil || path == "" {
+ a.Warning("couldn't find config, will use defaults")
+ return defaultConfig()
+ }
+ a.Infof("found '%s", path)
+
+ cfg := defaultConfig()
+ if err := loadYAML(&cfg, path); err != nil {
+ a.Warningf("couldn't load config '%s': %v, will use defaults", path, err)
+ return defaultConfig()
+ }
+ a.Info("config successfully loaded")
+ return cfg
+}
+
+func (a *Agent) loadEnabledModules(cfg config) module.Registry {
+ a.Info("loading modules")
+
+ all := a.RunModule == "all" || a.RunModule == ""
+ enabled := module.Registry{}
+
+ for name, creator := range a.ModuleRegistry {
+ if !all && a.RunModule != name {
+ continue
+ }
+ if all {
+ // Known issue: go.d/logind high CPU usage on Alma Linux8 (https://github.com/netdata/netdata/issues/15930)
+ if !cfg.isExplicitlyEnabled(name) && (creator.Disabled || name == "logind" && hostinfo.SystemdVersion == 239) {
+ a.Infof("'%s' module disabled by default, should be explicitly enabled in the config", name)
+ continue
+ }
+ if !cfg.isImplicitlyEnabled(name) {
+ a.Infof("'%s' module disabled in the config file", name)
+ continue
+ }
+ }
+ enabled[name] = creator
+ }
+
+ a.Infof("enabled/registered modules: %d/%d", len(enabled), len(a.ModuleRegistry))
+
+ return enabled
+}
+
+func (a *Agent) buildDiscoveryConf(enabled module.Registry) discovery.Config {
+ a.Info("building discovery config")
+
+ reg := confgroup.Registry{}
+ for name, creator := range enabled {
+ reg.Register(name, confgroup.Default{
+ MinUpdateEvery: a.MinUpdateEvery,
+ UpdateEvery: creator.UpdateEvery,
+ AutoDetectionRetry: creator.AutoDetectionRetry,
+ Priority: creator.Priority,
+ })
+ }
+
+ var readPaths, dummyPaths []string
+
+ if len(a.ModulesConfDir) == 0 {
+ if hostinfo.IsInsideK8sCluster() {
+ return discovery.Config{Registry: reg}
+ }
+ a.Info("modules conf dir not provided, will use default config for all enabled modules")
+ for name := range enabled {
+ dummyPaths = append(dummyPaths, name)
+ }
+ return discovery.Config{
+ Registry: reg,
+ Dummy: dummy.Config{Names: dummyPaths},
+ }
+ }
+
+ for name := range enabled {
+ // TODO: properly handle module renaming
+ // We need to announce this change in Netdata v1.39.0 release notes and then remove this workaround.
+ // This is just a quick fix for wmi=>windows. We need to prefer user wmi.conf over windows.conf
+ // 2nd part of this fix is in /agent/job/discovery/file/parse.go parseStaticFormat()
+ if name == "windows" {
+ cfgName := "wmi.conf"
+ a.Debugf("looking for '%s' in %v", cfgName, a.ModulesConfDir)
+
+ path, err := a.ModulesConfDir.Find(cfgName)
+
+ if err == nil && strings.Contains(path, "etc/netdata") {
+ a.Infof("found '%s", path)
+ readPaths = append(readPaths, path)
+ continue
+ }
+ }
+
+ cfgName := name + ".conf"
+ a.Debugf("looking for '%s' in %v", cfgName, a.ModulesConfDir)
+
+ path, err := a.ModulesConfDir.Find(cfgName)
+ if hostinfo.IsInsideK8sCluster() {
+ if err != nil {
+ a.Infof("not found '%s', won't use default (reading stock configs is disabled in k8s)", cfgName)
+ continue
+ } else if isStockConfig(path) {
+ a.Infof("found '%s', but won't load it (reading stock configs is disabled in k8s)", cfgName)
+ continue
+ }
+ }
+ if err != nil {
+ a.Infof("couldn't find '%s' module config, will use default config", name)
+ dummyPaths = append(dummyPaths, name)
+ } else {
+ a.Debugf("found '%s", path)
+ readPaths = append(readPaths, path)
+ }
+ }
+
+ a.Infof("dummy/read/watch paths: %d/%d/%d", len(dummyPaths), len(readPaths), len(a.ModulesSDConfPath))
+
+ return discovery.Config{
+ Registry: reg,
+ File: file.Config{
+ Read: readPaths,
+ Watch: a.ModulesSDConfPath,
+ },
+ Dummy: dummy.Config{
+ Names: dummyPaths,
+ },
+ SD: sd.Config{
+ ConfDir: a.ModulesConfSDDir,
+ },
+ }
+}
+
+func (a *Agent) setupVnodeRegistry() *vnodes.Vnodes {
+ a.Debugf("looking for 'vnodes/' in %v", a.VnodesConfDir)
+
+ if len(a.VnodesConfDir) == 0 {
+ return nil
+ }
+
+ dirPath, err := a.VnodesConfDir.Find("vnodes/")
+ if err != nil || dirPath == "" {
+ return nil
+ }
+
+ reg := vnodes.New(dirPath)
+ a.Infof("found '%s' (%d vhosts)", dirPath, reg.Len())
+
+ return reg
+}
+
+func loadYAML(conf any, path string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = f.Close() }()
+
+ if err = yaml.NewDecoder(f).Decode(conf); err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ return nil
+}
+
+var (
+ envNDStockConfigDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR")
+)
+
+func isStockConfig(path string) bool {
+ if envNDStockConfigDir == "" {
+ return false
+ }
+ return strings.HasPrefix(path, envNDStockConfigDir)
+}
diff --git a/src/go/plugin/go.d/agent/setup_test.go b/src/go/plugin/go.d/agent/setup_test.go
new file mode 100644
index 000000000..148b822cf
--- /dev/null
+++ b/src/go/plugin/go.d/agent/setup_test.go
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package agent
+
+import (
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
+)
+
+func TestConfig_UnmarshalYAML(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantCfg config
+ }{
+ "valid configuration": {
+ input: "enabled: yes\ndefault_run: yes\nmodules:\n module1: yes\n module2: yes",
+ wantCfg: config{
+ Enabled: true,
+ DefaultRun: true,
+ Modules: map[string]bool{
+ "module1": true,
+ "module2": true,
+ },
+ },
+ },
+ "valid configuration with broken modules section": {
+ input: "enabled: yes\ndefault_run: yes\nmodules:\nmodule1: yes\nmodule2: yes",
+ wantCfg: config{
+ Enabled: true,
+ DefaultRun: true,
+ Modules: map[string]bool{
+ "module1": true,
+ "module2": true,
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ var cfg config
+ err := yaml.Unmarshal([]byte(test.input), &cfg)
+ require.NoError(t, err)
+ assert.Equal(t, test.wantCfg, cfg)
+ })
+ }
+}
+
+func TestAgent_loadConfig(t *testing.T) {
+ tests := map[string]struct {
+ agent Agent
+ wantCfg config
+ }{
+ "valid config file": {
+ agent: Agent{
+ Name: "agent-valid",
+ ConfDir: []string{"testdata"},
+ },
+ wantCfg: config{
+ Enabled: true,
+ DefaultRun: true,
+ MaxProcs: 1,
+ Modules: map[string]bool{
+ "module1": true,
+ "module2": true,
+ },
+ },
+ },
+ "no config path provided": {
+ agent: Agent{},
+ wantCfg: defaultConfig(),
+ },
+ "config file not found": {
+ agent: Agent{
+ Name: "agent",
+ ConfDir: []string{"testdata/not-exist"},
+ },
+ wantCfg: defaultConfig(),
+ },
+ "empty config file": {
+ agent: Agent{
+ Name: "agent-empty",
+ ConfDir: []string{"testdata"},
+ },
+ wantCfg: defaultConfig(),
+ },
+ "invalid syntax config file": {
+ agent: Agent{
+ Name: "agent-invalid-syntax",
+ ConfDir: []string{"testdata"},
+ },
+ wantCfg: defaultConfig(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.wantCfg, test.agent.loadPluginConfig())
+ })
+ }
+}
+
+func TestAgent_loadEnabledModules(t *testing.T) {
+ tests := map[string]struct {
+ agent Agent
+ cfg config
+ wantModules module.Registry
+ }{
+ "load all, module disabled by default but explicitly enabled": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{Defaults: module.Defaults{Disabled: true}},
+ },
+ },
+ cfg: config{
+ Modules: map[string]bool{"module1": true},
+ },
+ wantModules: module.Registry{
+ "module1": module.Creator{Defaults: module.Defaults{Disabled: true}},
+ },
+ },
+ "load all, module disabled by default and not explicitly enabled": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{Defaults: module.Defaults{Disabled: true}},
+ },
+ },
+ wantModules: module.Registry{},
+ },
+ "load all, module in config modules (default_run=true)": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ cfg: config{
+ Modules: map[string]bool{"module1": true},
+ DefaultRun: true,
+ },
+ wantModules: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ "load all, module not in config modules (default_run=true)": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{"module1": module.Creator{}},
+ },
+ cfg: config{
+ DefaultRun: true,
+ },
+ wantModules: module.Registry{"module1": module.Creator{}},
+ },
+ "load all, module in config modules (default_run=false)": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ cfg: config{
+ Modules: map[string]bool{"module1": true},
+ },
+ wantModules: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ "load all, module not in config modules (default_run=false)": {
+ agent: Agent{
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ wantModules: module.Registry{},
+ },
+ "load specific, module exist in registry": {
+ agent: Agent{
+ RunModule: "module1",
+ ModuleRegistry: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ wantModules: module.Registry{
+ "module1": module.Creator{},
+ },
+ },
+ "load specific, module doesnt exist in registry": {
+ agent: Agent{
+ RunModule: "module3",
+ ModuleRegistry: module.Registry{},
+ },
+ wantModules: module.Registry{},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.wantModules, test.agent.loadEnabledModules(test.cfg))
+ })
+ }
+}
+
+// TODO: tech debt
+func TestAgent_buildDiscoveryConf(t *testing.T) {
+
+}
diff --git a/src/go/plugin/go.d/agent/testdata/agent-empty.conf b/src/go/plugin/go.d/agent/testdata/agent-empty.conf
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/go/plugin/go.d/agent/testdata/agent-empty.conf
diff --git a/src/go/plugin/go.d/agent/testdata/agent-invalid-syntax.conf b/src/go/plugin/go.d/agent/testdata/agent-invalid-syntax.conf
new file mode 100644
index 000000000..c4a0b914c
--- /dev/null
+++ b/src/go/plugin/go.d/agent/testdata/agent-invalid-syntax.conf
@@ -0,0 +1,7 @@
+- enabled: yes
+default_run: yes
+max_procs: 1
+
+modules:
+ module1: yes
+ module2: yes
diff --git a/src/go/plugin/go.d/agent/testdata/agent-valid.conf b/src/go/plugin/go.d/agent/testdata/agent-valid.conf
new file mode 100644
index 000000000..ec5e1d06e
--- /dev/null
+++ b/src/go/plugin/go.d/agent/testdata/agent-valid.conf
@@ -0,0 +1,7 @@
+enabled: yes
+default_run: yes
+max_procs: 1
+
+modules:
+ module1: yes
+ module2: yes
diff --git a/src/go/plugin/go.d/agent/ticker/ticker.go b/src/go/plugin/go.d/agent/ticker/ticker.go
new file mode 100644
index 000000000..e4228fe4c
--- /dev/null
+++ b/src/go/plugin/go.d/agent/ticker/ticker.go
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ticker
+
+import "time"
+
+type (
+ // Ticker holds a channel that delivers ticks of a clock at intervals.
+ // The ticks are aligned to interval boundaries.
+ Ticker struct {
+ C <-chan int
+ done chan struct{}
+ loops int
+ interval time.Duration
+ }
+)
+
+// New returns a new Ticker containing a channel that will send the time with a period specified by the duration argument.
+// It adjusts the intervals or drops ticks to make up for slow receivers.
+// The duration must be greater than zero; if not, New will panic. Stop the Ticker to release associated resources.
+func New(interval time.Duration) *Ticker {
+ ticker := &Ticker{
+ interval: interval,
+ done: make(chan struct{}, 1),
+ }
+ ticker.start()
+ return ticker
+}
+
+func (t *Ticker) start() {
+ ch := make(chan int)
+ t.C = ch
+ go func() {
+ LOOP:
+ for {
+ now := time.Now()
+ nextRun := now.Truncate(t.interval).Add(t.interval)
+
+ time.Sleep(nextRun.Sub(now))
+ select {
+ case <-t.done:
+ close(ch)
+ break LOOP
+ case ch <- t.loops:
+ t.loops++
+ }
+ }
+ }()
+}
+
+// Stop turns off a Ticker. After Stop, no more ticks will be sent.
+// Stop does not close the channel, to prevent a read from the channel succeeding incorrectly.
+func (t *Ticker) Stop() {
+ t.done <- struct{}{}
+}
diff --git a/src/go/plugin/go.d/agent/ticker/ticket_test.go b/src/go/plugin/go.d/agent/ticker/ticket_test.go
new file mode 100644
index 000000000..193085365
--- /dev/null
+++ b/src/go/plugin/go.d/agent/ticker/ticket_test.go
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ticker
+
+import (
+ "testing"
+ "time"
+)
+
+// TODO: often fails Circle CI (~200-240)
+var allowedDelta = 500 * time.Millisecond
+
+func TestTickerParallel(t *testing.T) {
+ for i := 0; i < 100; i++ {
+ i := i
+ go func() {
+ time.Sleep(time.Second / 100 * time.Duration(i))
+ TestTicker(t)
+ }()
+ }
+ time.Sleep(4 * time.Second)
+}
+
+func TestTicker(t *testing.T) {
+ tk := New(time.Second)
+ defer tk.Stop()
+ prev := time.Now()
+ for i := 0; i < 3; i++ {
+ <-tk.C
+ now := time.Now()
+ diff := abs(now.Round(time.Second).Sub(now))
+ if diff >= allowedDelta {
+ t.Errorf("Ticker is not aligned: expect delta < %v but was: %v (%s)", allowedDelta, diff, now.Format(time.RFC3339Nano))
+ }
+ if i > 0 {
+ dt := now.Sub(prev)
+ if abs(dt-time.Second) >= allowedDelta {
+ t.Errorf("Ticker interval: expect delta < %v ns but was: %v", allowedDelta, abs(dt-time.Second))
+ }
+ }
+ prev = now
+ }
+}
+
+func abs(a time.Duration) time.Duration {
+ if a < 0 {
+ return -a
+ }
+ return a
+}
diff --git a/src/go/plugin/go.d/agent/vnodes/testdata/config.yaml b/src/go/plugin/go.d/agent/vnodes/testdata/config.yaml
new file mode 100644
index 000000000..db256d32f
--- /dev/null
+++ b/src/go/plugin/go.d/agent/vnodes/testdata/config.yaml
@@ -0,0 +1,11 @@
+- hostname: first
+ guid: 4ea21e84-93b4-418b-b83e-79397610cd6e
+ labels:
+ area: "41"
+ level: "82"
+
+- hostname: second
+ guid: 9486b0e1-b391-4d9a-bd88-5c703183f9b6
+ labels:
+ area: "51"
+ level: "92"
diff --git a/src/go/plugin/go.d/agent/vnodes/vnodes.go b/src/go/plugin/go.d/agent/vnodes/vnodes.go
new file mode 100644
index 000000000..9272f1514
--- /dev/null
+++ b/src/go/plugin/go.d/agent/vnodes/vnodes.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vnodes
+
+import (
+ "io"
+ "io/fs"
+ "log/slog"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+
+ "gopkg.in/yaml.v2"
+)
+
+var Disabled = false // TODO: remove after Netdata v1.39.0. Fix for "from source" stable-channel installations.
+
+func New(confDir string) *Vnodes {
+ vn := &Vnodes{
+ Logger: logger.New().With(
+ slog.String("component", "vnodes"),
+ ),
+
+ confDir: confDir,
+ vnodes: make(map[string]*VirtualNode),
+ }
+
+ vn.readConfDir()
+
+ return vn
+}
+
+type (
+ Vnodes struct {
+ *logger.Logger
+
+ confDir string
+ vnodes map[string]*VirtualNode
+ }
+ VirtualNode struct {
+ GUID string `yaml:"guid"`
+ Hostname string `yaml:"hostname"`
+ Labels map[string]string `yaml:"labels"`
+ }
+)
+
+func (vn *Vnodes) Lookup(key string) (*VirtualNode, bool) {
+ v, ok := vn.vnodes[key]
+ return v, ok
+}
+
+func (vn *Vnodes) Len() int {
+ return len(vn.vnodes)
+}
+
+func (vn *Vnodes) readConfDir() {
+ _ = filepath.WalkDir(vn.confDir, func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ vn.Warning(err)
+ return nil
+ }
+
+ if !d.Type().IsRegular() || !isConfigFile(path) {
+ return nil
+ }
+
+ var cfg []VirtualNode
+ if err := loadConfigFile(&cfg, path); err != nil {
+ vn.Warning(err)
+ return nil
+ }
+
+ for _, v := range cfg {
+ if v.Hostname == "" || v.GUID == "" {
+ vn.Warningf("skipping virtual node '%+v': some required fields are missing (%s)", v, path)
+ continue
+ }
+ if _, ok := vn.vnodes[v.Hostname]; ok {
+ vn.Warningf("skipping virtual node '%+v': duplicate node (%s)", v, path)
+ continue
+ }
+
+ v := v
+ vn.Debugf("adding virtual node'%+v' (%s)", v, path)
+ vn.vnodes[v.Hostname] = &v
+ }
+
+ return nil
+ })
+}
+
+func isConfigFile(path string) bool {
+ switch filepath.Ext(path) {
+ case ".yaml", ".yml", ".conf":
+ return true
+ default:
+ return false
+ }
+}
+
+func loadConfigFile(conf interface{}, path string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = f.Close() }()
+
+ if err := yaml.NewDecoder(f).Decode(conf); err != nil && err != io.EOF {
+ return err
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/agent/vnodes/vnodes_test.go b/src/go/plugin/go.d/agent/vnodes/vnodes_test.go
new file mode 100644
index 000000000..fc2c2ef35
--- /dev/null
+++ b/src/go/plugin/go.d/agent/vnodes/vnodes_test.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vnodes
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNew(t *testing.T) {
+ assert.NotNil(t, New("testdata"))
+ assert.NotNil(t, New("not_exist"))
+}
+
+func TestVnodes_Lookup(t *testing.T) {
+ req := New("testdata")
+
+ _, ok := req.Lookup("first")
+ assert.True(t, ok)
+
+ _, ok = req.Lookup("second")
+ assert.True(t, ok)
+
+ _, ok = req.Lookup("third")
+ assert.False(t, ok)
+}
diff --git a/src/go/plugin/go.d/cli/cli.go b/src/go/plugin/go.d/cli/cli.go
new file mode 100644
index 000000000..646bdf121
--- /dev/null
+++ b/src/go/plugin/go.d/cli/cli.go
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cli
+
+import (
+ "strconv"
+
+ "github.com/jessevdk/go-flags"
+)
+
+// Option defines command line options.
+type Option struct {
+ UpdateEvery int
+ Module string `short:"m" long:"modules" description:"module name to run" default:"all"`
+ ConfDir []string `short:"c" long:"config-dir" description:"config dir to read"`
+ WatchPath []string `short:"w" long:"watch-path" description:"config path to watch"`
+ Debug bool `short:"d" long:"debug" description:"debug mode"`
+ Version bool `short:"v" long:"version" description:"display the version and exit"`
+}
+
+// Parse returns parsed command-line flags in Option struct
+func Parse(args []string) (*Option, error) {
+ opt := &Option{
+ UpdateEvery: 1,
+ }
+ parser := flags.NewParser(opt, flags.Default)
+ parser.Name = "orchestrator"
+ parser.Usage = "[OPTIONS] [update every]"
+
+ rest, err := parser.ParseArgs(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(rest) > 1 {
+ if opt.UpdateEvery, err = strconv.Atoi(rest[1]); err != nil {
+ return nil, err
+ }
+ }
+
+ return opt, nil
+}
diff --git a/src/go/plugin/go.d/config/go.d.conf b/src/go/plugin/go.d/config/go.d.conf
new file mode 100644
index 000000000..198bcd086
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d.conf
@@ -0,0 +1,123 @@
+# netdata go.d.plugin configuration
+#
+# This file is in YAML format.
+
+# Enable/disable the whole go.d.plugin.
+enabled: yes
+
+# Enable/disable default value for all modules.
+default_run: yes
+
+# Maximum number of used CPUs. Zero means no limit.
+max_procs: 0
+
+# Enable/disable specific g.d.plugin module
+# If you want to change any value, you need to uncomment out it first.
+# IMPORTANT: Do not remove all spaces, just remove # symbol. There should be a space before module name.
+modules:
+# adaptec_raid: yes
+# activemq: yes
+# ap: yes
+# apache: yes
+# beanstalk: yes
+# bind: yes
+# chrony: yes
+# clickhouse: yes
+# cockroachdb: yes
+# consul: yes
+# coredns: yes
+# couchbase: yes
+# couchdb: yes
+# dmcache: yes
+# dnsdist: yes
+# dnsmasq: yes
+# dnsmasq_dhcp: yes
+# dns_query: yes
+# docker: yes
+# docker_engine: yes
+# dockerhub: yes
+# dovecot: yes
+# elasticsearch: yes
+# envoy: yes
+# example: no
+# exim: yes
+# fail2ban: yes
+# filecheck: yes
+# fluentd: yes
+# freeradius: yes
+# gearman: yes
+# haproxy: yes
+# hddtemp: yes
+# hdfs: yes
+# hpssa: yes
+# httpcheck: yes
+# icecast: yes
+# intelgpu: yes
+# ipfs: yes
+# isc_dhcpd: yes
+# k8s_kubelet: yes
+# k8s_kubeproxy: yes
+# lighttpd: yes
+# litespeed: yes
+# logind: yes
+# logstash: yes
+# lvm: yes
+# megacli: yes
+# memcached: yes
+# mongodb: yes
+# monit: yes
+# mysql: yes
+# nginx: yes
+# nginxplus: yes
+# nginxvts: yes
+# nsd: yes
+# ntpd: yes
+# nvme: yes
+# nvidia_smi: no
+# openvpn: no
+# openvpn_status_log: yes
+# ping: yes
+# pgbouncer: yes
+# phpdaemon: yes
+# phpfpm: yes
+# pihole: yes
+# pika: yes
+# portcheck: yes
+# postgres: yes
+# postfix: yes
+# powerdns: yes
+# powerdns_recursor: yes
+# prometheus: yes
+# pulsar: yes
+# puppet: yes
+# rabbitmq: yes
+# redis: yes
+# rethinkdb: yes
+# riakkv: yes
+# rspamd: yes
+# scaleio: yes
+# sensors: yes
+# snmp: yes
+# squid: yes
+# squidlog: yes
+# smartctl: yes
+# storcli: yes
+# supervisord: yes
+# systemdunits: yes
+# tengine: yes
+# tomcat: yes
+# tor: yes
+# traefik: yes
+# upsd: yes
+# unbound: yes
+# uwsgi: yes
+# vernemq: yes
+# vcsa: yes
+# vsphere: yes
+# web_log: yes
+# wireguard: yes
+# whoisquery: yes
+# windows: yes
+# x509check: yes
+# zfspool: yes
+# zookeeper: yes
diff --git a/src/go/plugin/go.d/config/go.d/activemq.conf b/src/go/plugin/go.d/config/go.d/activemq.conf
new file mode 100644
index 000000000..9bae9cc56
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/activemq.conf
@@ -0,0 +1,10 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/activemq#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:8161
+# webadmin: admin
+# - name: remote
+# url: http://203.0.113.1:8161
+# webadmin: admin
diff --git a/src/go/plugin/go.d/config/go.d/adaptec_raid.conf b/src/go/plugin/go.d/config/go.d/adaptec_raid.conf
new file mode 100644
index 000000000..eafbd0303
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/adaptec_raid.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/adaptecraid#readme
+
+jobs:
+ - name: adaptec_raid
diff --git a/src/go/plugin/go.d/config/go.d/ap.conf b/src/go/plugin/go.d/config/go.d/ap.conf
new file mode 100644
index 000000000..ef8f2d9f8
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/ap.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ap#readme
+
+jobs:
+ - name: local
+ binary_path: /usr/sbin/iw
diff --git a/src/go/plugin/go.d/config/go.d/apache.conf b/src/go/plugin/go.d/config/go.d/apache.conf
new file mode 100644
index 000000000..86f4a75c4
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/apache.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/apache#readme
+
+#jobs:
+# - name: local
+# url: http://localhost/server-status?auto
diff --git a/src/go/plugin/go.d/config/go.d/beanstalk.conf b/src/go/plugin/go.d/config/go.d/beanstalk.conf
new file mode 100644
index 000000000..45e2254b8
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/beanstalk.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/beanstalk#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:11300
diff --git a/src/go/plugin/go.d/config/go.d/bind.conf b/src/go/plugin/go.d/config/go.d/bind.conf
new file mode 100644
index 000000000..9e970e60e
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/bind.conf
@@ -0,0 +1,9 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/bind#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8653/json/v1
+#
+# - name: local
+# url: http://127.0.0.1:8653/xml/v3
diff --git a/src/go/plugin/go.d/config/go.d/cassandra.conf b/src/go/plugin/go.d/config/go.d/cassandra.conf
new file mode 100644
index 000000000..93283ee6c
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/cassandra.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/cassandra#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:7072/metrics
diff --git a/src/go/plugin/go.d/config/go.d/chrony.conf b/src/go/plugin/go.d/config/go.d/chrony.conf
new file mode 100644
index 000000000..099ba3583
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/chrony.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/chrony#readme
+
+#jobs:
+# - name: local
+# address: '127.0.0.1:323'
diff --git a/src/go/plugin/go.d/config/go.d/clickhouse.conf b/src/go/plugin/go.d/config/go.d/clickhouse.conf
new file mode 100644
index 000000000..4f416138b
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/clickhouse.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/clickhouse#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8123
diff --git a/src/go/plugin/go.d/config/go.d/cockroachdb.conf b/src/go/plugin/go.d/config/go.d/cockroachdb.conf
new file mode 100644
index 000000000..8d04dbfe0
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/cockroachdb.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/cockroachdb#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:8080/_status/vars
diff --git a/src/go/plugin/go.d/config/go.d/consul.conf b/src/go/plugin/go.d/config/go.d/consul.conf
new file mode 100644
index 000000000..624b9a6d4
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/consul.conf
@@ -0,0 +1,7 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/consul#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:8500
+# acl_token: ""
diff --git a/src/go/plugin/go.d/config/go.d/coredns.conf b/src/go/plugin/go.d/config/go.d/coredns.conf
new file mode 100644
index 000000000..9b9d6ef9a
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/coredns.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/coredns#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:9153/metrics
diff --git a/src/go/plugin/go.d/config/go.d/couchbase.conf b/src/go/plugin/go.d/config/go.d/couchbase.conf
new file mode 100644
index 000000000..aec5c342c
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/couchbase.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/couchbase#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8091
+# username: admin
+# password: password
diff --git a/src/go/plugin/go.d/config/go.d/couchdb.conf b/src/go/plugin/go.d/config/go.d/couchdb.conf
new file mode 100644
index 000000000..5b62ad191
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/couchdb.conf
@@ -0,0 +1,10 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/couchdb#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:5984
+# node: node@host
+# username: admin
+# password: password
+# databases: my-db
diff --git a/src/go/plugin/go.d/config/go.d/dmcache.conf b/src/go/plugin/go.d/config/go.d/dmcache.conf
new file mode 100644
index 000000000..8b39726cb
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/dmcache.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dmcache#readme
+
+jobs:
+ - name: dmcache
diff --git a/src/go/plugin/go.d/config/go.d/dns_query.conf b/src/go/plugin/go.d/config/go.d/dns_query.conf
new file mode 100644
index 000000000..ca24265bf
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/dns_query.conf
@@ -0,0 +1,14 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dnsquery#readme
+
+#jobs:
+# - name: example
+# record_types:
+# - A
+# domains:
+# - google.com
+# - github.com
+# - reddit.com
+# servers:
+# - 8.8.8.8
+# - 8.8.4.4
diff --git a/src/go/plugin/go.d/config/go.d/dnsdist.conf b/src/go/plugin/go.d/config/go.d/dnsdist.conf
new file mode 100644
index 000000000..cc991e018
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/dnsdist.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dnsdist#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8083
+# headers:
+# X-API-Key: 'dnsdist-api-key' # static pre-shared authentication key for access to the REST API (api-key).
diff --git a/src/go/plugin/go.d/config/go.d/dnsmasq.conf b/src/go/plugin/go.d/config/go.d/dnsmasq.conf
new file mode 100644
index 000000000..3b9b3d326
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/dnsmasq.conf
@@ -0,0 +1,7 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dnsmasq#readme
+
+#jobs:
+# - name: local
+# protocol: udp
+# address: '127.0.0.1:53'
diff --git a/src/go/plugin/go.d/config/go.d/dnsmasq_dhcp.conf b/src/go/plugin/go.d/config/go.d/dnsmasq_dhcp.conf
new file mode 100644
index 000000000..1f51415dc
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/dnsmasq_dhcp.conf
@@ -0,0 +1,13 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dnsmasq_dhcp#readme
+
+jobs:
+ - name: dnsmasq_dhcp
+ leases_path: /var/lib/misc/dnsmasq.leases
+ conf_path: /etc/dnsmasq.conf
+ conf_dir: /etc/dnsmasq.d
+
+ - name: dnsmasq_dhcp
+ leases_path: /etc/pihole/dhcp.leases
+ conf_path: /etc/dnsmasq.conf
+ conf_dir: /etc/dnsmasq.d
diff --git a/src/go/plugin/go.d/config/go.d/docker.conf b/src/go/plugin/go.d/config/go.d/docker.conf
new file mode 100644
index 000000000..084373f74
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/docker.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/docker#readme
+
+jobs:
+ - name: local
+ address: 'unix:///var/run/docker.sock'
+ timeout: 2
+ collect_container_size: no
diff --git a/src/go/plugin/go.d/config/go.d/docker_engine.conf b/src/go/plugin/go.d/config/go.d/docker_engine.conf
new file mode 100644
index 000000000..ba7342a77
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/docker_engine.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/docker_engine#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:9323/metrics
diff --git a/src/go/plugin/go.d/config/go.d/dockerhub.conf b/src/go/plugin/go.d/config/go.d/dockerhub.conf
new file mode 100644
index 000000000..96b29e26b
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/dockerhub.conf
@@ -0,0 +1,9 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dockerhub#readme
+
+#jobs:
+# - name: dockerhub
+# repositories:
+# - user1/name1
+# - user2/name2
+# - user3/name3
diff --git a/src/go/plugin/go.d/config/go.d/dovecot.conf b/src/go/plugin/go.d/config/go.d/dovecot.conf
new file mode 100644
index 000000000..5dd31bd7d
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/dovecot.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/dovecot#readme
+
+jobs:
+ - name: local
+ address: unix:///var/run/dovecot/old-stats
diff --git a/src/go/plugin/go.d/config/go.d/elasticsearch.conf b/src/go/plugin/go.d/config/go.d/elasticsearch.conf
new file mode 100644
index 000000000..26ff2c9cd
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/elasticsearch.conf
@@ -0,0 +1,7 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/elasticsearch#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:9200
+# cluster_mode: no
diff --git a/src/go/plugin/go.d/config/go.d/envoy.conf b/src/go/plugin/go.d/config/go.d/envoy.conf
new file mode 100644
index 000000000..fc30a3502
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/envoy.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/envoy#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:9901/stats/prometheus
diff --git a/src/go/plugin/go.d/config/go.d/example.conf b/src/go/plugin/go.d/config/go.d/example.conf
new file mode 100644
index 000000000..f92669a68
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/example.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/example#readme
+
+jobs:
+ - name: example
diff --git a/src/go/plugin/go.d/config/go.d/exim.conf b/src/go/plugin/go.d/config/go.d/exim.conf
new file mode 100644
index 000000000..db8813152
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/exim.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/exim#readme
+
+jobs:
+ - name: exim
diff --git a/src/go/plugin/go.d/config/go.d/fail2ban.conf b/src/go/plugin/go.d/config/go.d/fail2ban.conf
new file mode 100644
index 000000000..ac3d126b7
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/fail2ban.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/fail2ban#readme
+
+jobs:
+ - name: fail2ban
diff --git a/src/go/plugin/go.d/config/go.d/filecheck.conf b/src/go/plugin/go.d/config/go.d/filecheck.conf
new file mode 100644
index 000000000..ed33675ef
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/filecheck.conf
@@ -0,0 +1,16 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/filecheck#readme
+
+#jobs:
+# - name: files_example
+# files:
+# include:
+# - '/path/to/file1'
+# - '/path/to/file2'
+#
+# - name: dirs_example
+# dirs:
+# collect_dir_size: yes
+# include:
+# - '/path/to/dir1'
+# - '/path/to/dir2'
diff --git a/src/go/plugin/go.d/config/go.d/fluentd.conf b/src/go/plugin/go.d/config/go.d/fluentd.conf
new file mode 100644
index 000000000..a75dde619
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/fluentd.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/fluentd#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:24220
diff --git a/src/go/plugin/go.d/config/go.d/freeradius.conf b/src/go/plugin/go.d/config/go.d/freeradius.conf
new file mode 100644
index 000000000..ba8b066d0
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/freeradius.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/freeradius#readme
+
+#jobs:
+# - name: local
+# address: localhost
+# port: 18121
+# secret: adminsecret
diff --git a/src/go/plugin/go.d/config/go.d/gearman.conf b/src/go/plugin/go.d/config/go.d/gearman.conf
new file mode 100644
index 000000000..b816f27d1
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/gearman.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/gearman#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:4730
diff --git a/src/go/plugin/go.d/config/go.d/geth.conf b/src/go/plugin/go.d/config/go.d/geth.conf
new file mode 100644
index 000000000..e09fc055e
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/geth.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/geth#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:6060/debug/metrics/prometheus
diff --git a/src/go/plugin/go.d/config/go.d/haproxy.conf b/src/go/plugin/go.d/config/go.d/haproxy.conf
new file mode 100644
index 000000000..f2f8011e1
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/haproxy.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/haproxy#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8404/metrics
diff --git a/src/go/plugin/go.d/config/go.d/hddtemp.conf b/src/go/plugin/go.d/config/go.d/hddtemp.conf
new file mode 100644
index 000000000..6a9830a8d
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/hddtemp.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/hddtemp#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:7634
diff --git a/src/go/plugin/go.d/config/go.d/hdfs.conf b/src/go/plugin/go.d/config/go.d/hdfs.conf
new file mode 100644
index 000000000..93a6d24b0
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/hdfs.conf
@@ -0,0 +1,9 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/hdfs#readme
+
+#jobs:
+# - name: namenode
+# url: http://127.0.0.1:9870/jmx
+#
+# - name: datanode
+# url: http://127.0.0.1:9864/jmx
diff --git a/src/go/plugin/go.d/config/go.d/hpssa.conf b/src/go/plugin/go.d/config/go.d/hpssa.conf
new file mode 100644
index 000000000..6638b6166
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/hpssa.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/hpssa#readme
+
+jobs:
+ - name: hpssa
diff --git a/src/go/plugin/go.d/config/go.d/httpcheck.conf b/src/go/plugin/go.d/config/go.d/httpcheck.conf
new file mode 100644
index 000000000..6aba8dca2
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/httpcheck.conf
@@ -0,0 +1,12 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/httpcheck#readme
+
+#jobs:
+# - name: jira
+# url: https://jira.localdomain/
+#
+# - name: cool_website
+# url: http://cool.website:8080/home
+# status_accepted: [200, 204]
+# response_match: <title>My cool website!<\/title>
+# timeout: 2
diff --git a/src/go/plugin/go.d/config/go.d/icecast.conf b/src/go/plugin/go.d/config/go.d/icecast.conf
new file mode 100644
index 000000000..aba3e1d2c
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/icecast.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/icecast#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:8000
diff --git a/src/go/plugin/go.d/config/go.d/intelgpu.conf b/src/go/plugin/go.d/config/go.d/intelgpu.conf
new file mode 100644
index 000000000..a8b3144f2
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/intelgpu.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/intelgpu#readme
+
+jobs:
+ - name: intelgpu
diff --git a/src/go/plugin/go.d/config/go.d/ipfs.conf b/src/go/plugin/go.d/config/go.d/ipfs.conf
new file mode 100644
index 000000000..127006de5
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/ipfs.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ipfs#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:5001
diff --git a/src/go/plugin/go.d/config/go.d/isc_dhcpd.conf b/src/go/plugin/go.d/config/go.d/isc_dhcpd.conf
new file mode 100644
index 000000000..17a577bb4
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/isc_dhcpd.conf
@@ -0,0 +1,23 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/isc_dhcpd#readme
+
+#jobs:
+# - name: ipv4_example
+# leases_path: '/path/to/dhcpd.leases_ipv4'
+# pools:
+# - name: office
+# networks: '192.0.2.1-192.0.2.254'
+# - name: wifi
+# networks: '198.51.100.0/24'
+# - name: dmz
+# networks: '203.0.113.0/255.255.255.0'
+#
+# - name: ipv6_example
+# leases_path: '/path/to/dhcpd.leases_ipv6'
+# pools:
+# - name: office
+# networks: '2001:db8::/64'
+# - name: wifi
+# networks: '2001:db8:0:1::/64'
+# - name: dmz
+# networks: '2001:db8:0:2::/64'
diff --git a/src/go/plugin/go.d/config/go.d/k8s_kubelet.conf b/src/go/plugin/go.d/config/go.d/k8s_kubelet.conf
new file mode 100644
index 000000000..1c0f8cd1f
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/k8s_kubelet.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/k8s_kubelet#readme
+
+#jobs:
+# - url: http://127.0.0.1:10255/metrics
diff --git a/src/go/plugin/go.d/config/go.d/k8s_kubeproxy.conf b/src/go/plugin/go.d/config/go.d/k8s_kubeproxy.conf
new file mode 100644
index 000000000..a0b9ee240
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/k8s_kubeproxy.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/k8s_kubeproxy#readme
+
+#jobs:
+# - url: http://127.0.0.1:10249/metrics
diff --git a/src/go/plugin/go.d/config/go.d/k8s_state.conf b/src/go/plugin/go.d/config/go.d/k8s_state.conf
new file mode 100644
index 000000000..fd1c305e0
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/k8s_state.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/k8s_state#readme
+
+jobs:
+ - name: k8s_state
diff --git a/src/go/plugin/go.d/config/go.d/lighttpd.conf b/src/go/plugin/go.d/config/go.d/lighttpd.conf
new file mode 100644
index 000000000..51866bfb7
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/lighttpd.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/lighttpd#readme
+
+#jobs:
+# - name: local
+# url: http://localhost/server-status?auto
diff --git a/src/go/plugin/go.d/config/go.d/litespeed.conf b/src/go/plugin/go.d/config/go.d/litespeed.conf
new file mode 100644
index 000000000..c525ff0ac
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/litespeed.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/litespeed#readme
+
+jobs:
+ - name: local
+ reports_dir: /tmp/lshttpd/
diff --git a/src/go/plugin/go.d/config/go.d/logind.conf b/src/go/plugin/go.d/config/go.d/logind.conf
new file mode 100644
index 000000000..219b37ae2
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/logind.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/logind#readme
+
+jobs:
+ - name: logind
diff --git a/src/go/plugin/go.d/config/go.d/logstash.conf b/src/go/plugin/go.d/config/go.d/logstash.conf
new file mode 100644
index 000000000..c67819e13
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/logstash.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/logstash#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:9600
diff --git a/src/go/plugin/go.d/config/go.d/lvm.conf b/src/go/plugin/go.d/config/go.d/lvm.conf
new file mode 100644
index 000000000..54da37b1a
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/lvm.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/lvm#readme
+
+jobs:
+ - name: lvm
diff --git a/src/go/plugin/go.d/config/go.d/megacli.conf b/src/go/plugin/go.d/config/go.d/megacli.conf
new file mode 100644
index 000000000..8d26763b7
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/megacli.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/megacli#readme
+
+jobs:
+ - name: megacli
diff --git a/src/go/plugin/go.d/config/go.d/memcached.conf b/src/go/plugin/go.d/config/go.d/memcached.conf
new file mode 100644
index 000000000..60603be28
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/memcached.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/memcached#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:11211
diff --git a/src/go/plugin/go.d/config/go.d/mongodb.conf b/src/go/plugin/go.d/config/go.d/mongodb.conf
new file mode 100644
index 000000000..ae41e4c73
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/mongodb.conf
@@ -0,0 +1,10 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/mongodb#readme
+
+#jobs:
+# - name: local
+# uri: 'mongodb://localhost:27017'
+# timeout: 2
+# databases:
+# include:
+# - "* *"
diff --git a/src/go/plugin/go.d/config/go.d/monit.conf b/src/go/plugin/go.d/config/go.d/monit.conf
new file mode 100644
index 000000000..e7768d618
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/monit.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/monit#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:2812
diff --git a/src/go/plugin/go.d/config/go.d/mysql.conf b/src/go/plugin/go.d/config/go.d/mysql.conf
new file mode 100644
index 000000000..bdba6df76
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/mysql.conf
@@ -0,0 +1,12 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/mysql#readme
+
+#jobs:
+# - name: local
+# dsn: netdata@unix(/var/run/mysqld/mysql.sock)/
+#
+# - name: local
+# dsn: netdata@unix(/var/lib/mysql/mysql.sock)/
+#
+# - name: local
+# dsn: netdata@unix(/tmp/mysql.sock)/
diff --git a/src/go/plugin/go.d/config/go.d/nginx.conf b/src/go/plugin/go.d/config/go.d/nginx.conf
new file mode 100644
index 000000000..03b56d238
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/nginx.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginx#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1/basic_status
diff --git a/src/go/plugin/go.d/config/go.d/nginxplus.conf b/src/go/plugin/go.d/config/go.d/nginxplus.conf
new file mode 100644
index 000000000..f0c022853
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/nginxplus.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginxplus#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1
diff --git a/src/go/plugin/go.d/config/go.d/nginxvts.conf b/src/go/plugin/go.d/config/go.d/nginxvts.conf
new file mode 100644
index 000000000..1b4ea7f1e
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/nginxvts.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nginxvts#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1/status/format/json
diff --git a/src/go/plugin/go.d/config/go.d/nsd.conf b/src/go/plugin/go.d/config/go.d/nsd.conf
new file mode 100644
index 000000000..b3c0a7868
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/nsd.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nsd#readme
+
+jobs:
+ - name: nsd
diff --git a/src/go/plugin/go.d/config/go.d/ntpd.conf b/src/go/plugin/go.d/config/go.d/ntpd.conf
new file mode 100644
index 000000000..d607450a5
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/ntpd.conf
@@ -0,0 +1,7 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ntpd#readme
+
+#jobs:
+# - name: local
+# address: '127.0.0.1:123'
+# collect_peers: no
diff --git a/src/go/plugin/go.d/config/go.d/nvidia_smi.conf b/src/go/plugin/go.d/config/go.d/nvidia_smi.conf
new file mode 100644
index 000000000..4c1e01a40
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/nvidia_smi.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvidia_smi#readme
+
+jobs:
+ - name: nvidia_smi
+ use_csv_format: no
diff --git a/src/go/plugin/go.d/config/go.d/nvme.conf b/src/go/plugin/go.d/config/go.d/nvme.conf
new file mode 100644
index 000000000..ef0146265
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/nvme.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvme#readme
+
+jobs:
+ - name: nvme
diff --git a/src/go/plugin/go.d/config/go.d/openvpn.conf b/src/go/plugin/go.d/config/go.d/openvpn.conf
new file mode 100644
index 000000000..0bc65018e
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/openvpn.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/openvpn#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:7505
diff --git a/src/go/plugin/go.d/config/go.d/openvpn_status_log.conf b/src/go/plugin/go.d/config/go.d/openvpn_status_log.conf
new file mode 100644
index 000000000..ae401780c
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/openvpn_status_log.conf
@@ -0,0 +1,9 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/openvpn_status_log#readme
+
+jobs:
+ - name: local
+ log_path: '/var/log/openvpn/status.log'
+ #per_user_stats:
+ # includes:
+ # - "* *"
diff --git a/src/go/plugin/go.d/config/go.d/pgbouncer.conf b/src/go/plugin/go.d/config/go.d/pgbouncer.conf
new file mode 100644
index 000000000..fdc067d77
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/pgbouncer.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pgbouncer#readme
+
+jobs:
+ - name: local
+ dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'
+# - name: local
+# dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'
diff --git a/src/go/plugin/go.d/config/go.d/phpdaemon.conf b/src/go/plugin/go.d/config/go.d/phpdaemon.conf
new file mode 100644
index 000000000..2bd8c1398
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/phpdaemon.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/phpdaemon#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8509/FullStatus
diff --git a/src/go/plugin/go.d/config/go.d/phpfpm.conf b/src/go/plugin/go.d/config/go.d/phpfpm.conf
new file mode 100644
index 000000000..a159a5e40
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/phpfpm.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/phpfpm#readme
+
+#jobs:
+# - name: local
+# url: http://localhost/status?full&json
diff --git a/src/go/plugin/go.d/config/go.d/pihole.conf b/src/go/plugin/go.d/config/go.d/pihole.conf
new file mode 100644
index 000000000..3ff57d9ae
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/pihole.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pihole#readme
+
+#jobs:
+# - name: pihole
+# url: http://127.0.0.1
+# - name: pihole
+# url: http://pi.hole
diff --git a/src/go/plugin/go.d/config/go.d/pika.conf b/src/go/plugin/go.d/config/go.d/pika.conf
new file mode 100644
index 000000000..9f23d8609
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/pika.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pika#readme
+
+#jobs:
+# - name: local
+# address: 'redis://@127.0.0.1:9221'
diff --git a/src/go/plugin/go.d/config/go.d/ping.conf b/src/go/plugin/go.d/config/go.d/ping.conf
new file mode 100644
index 000000000..b87719ced
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/ping.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ping#readme
+
+#jobs:
+# - name: example
+# hosts:
+# - 192.0.2.0
+# - 192.0.2.1
diff --git a/src/go/plugin/go.d/config/go.d/portcheck.conf b/src/go/plugin/go.d/config/go.d/portcheck.conf
new file mode 100644
index 000000000..0800c9eeb
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/portcheck.conf
@@ -0,0 +1,11 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/portcheck#readme
+
+#jobs:
+# - name: job1
+# host: 10.0.0.1
+# ports: [23, 80, 8080]
+#
+# - name: job2
+# host: 10.0.0.2
+# ports: [22, 19999]
diff --git a/src/go/plugin/go.d/config/go.d/postfix.conf b/src/go/plugin/go.d/config/go.d/postfix.conf
new file mode 100644
index 000000000..5eda59658
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/postfix.conf
@@ -0,0 +1,12 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/postfix#readme
+
+jobs:
+ - name: local
+ binary_path: /usr/sbin/postqueue
+
+ - name: local
+ binary_path: /usr/local/sbin/postqueue # FreeBSD
+
+ - name: local
+ binary_path: postqueue
diff --git a/src/go/plugin/go.d/config/go.d/postgres.conf b/src/go/plugin/go.d/config/go.d/postgres.conf
new file mode 100644
index 000000000..8911d82b7
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/postgres.conf
@@ -0,0 +1,10 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/postgres#readme
+
+#jobs:
+# - name: local
+# dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'
+# #collect_databases_matching: '*'
+#
+# - name: local
+# dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'
diff --git a/src/go/plugin/go.d/config/go.d/powerdns.conf b/src/go/plugin/go.d/config/go.d/powerdns.conf
new file mode 100644
index 000000000..dd543c8a8
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/powerdns.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/powerdns#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8081
+# headers:
+# X-API-KEY: secret # static pre-shared authentication key for access to the REST API (api-key).
diff --git a/src/go/plugin/go.d/config/go.d/powerdns_recursor.conf b/src/go/plugin/go.d/config/go.d/powerdns_recursor.conf
new file mode 100644
index 000000000..19f044c6c
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/powerdns_recursor.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/powerdns_recursor#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8081
diff --git a/src/go/plugin/go.d/config/go.d/prometheus.conf b/src/go/plugin/go.d/config/go.d/prometheus.conf
new file mode 100644
index 000000000..ef051dff6
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/prometheus.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/prometheus#readme
+
+#jobs:
+# - name: node_exporter_local
+# url: 'http://127.0.0.1:9100/metrics'
diff --git a/src/go/plugin/go.d/config/go.d/proxysql.conf b/src/go/plugin/go.d/config/go.d/proxysql.conf
new file mode 100644
index 000000000..d97bf3285
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/proxysql.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/proxysql#readme
+
+#jobs:
+# - name: local
+# dsn: stats:stats@tcp(127.0.0.1:6032)/
diff --git a/src/go/plugin/go.d/config/go.d/pulsar.conf b/src/go/plugin/go.d/config/go.d/pulsar.conf
new file mode 100644
index 000000000..5dea6ade3
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/pulsar.conf
@@ -0,0 +1,7 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/pulsar#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8080/metrics
+
diff --git a/src/go/plugin/go.d/config/go.d/puppet.conf b/src/go/plugin/go.d/config/go.d/puppet.conf
new file mode 100644
index 000000000..09e64b7d0
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/puppet.conf
@@ -0,0 +1,7 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/puppet#readme
+
+#jobs:
+# - name: local
+# url: https://127.0.0.1:8140
+# tls_skip_verify: yes \ No newline at end of file
diff --git a/src/go/plugin/go.d/config/go.d/rabbitmq.conf b/src/go/plugin/go.d/config/go.d/rabbitmq.conf
new file mode 100644
index 000000000..e64a75662
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/rabbitmq.conf
@@ -0,0 +1,9 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/rabbitmq#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:15672
+# username: guest
+# password: guest
+# collect_queues_metrics: no
diff --git a/src/go/plugin/go.d/config/go.d/redis.conf b/src/go/plugin/go.d/config/go.d/redis.conf
new file mode 100644
index 000000000..8910b1547
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/redis.conf
@@ -0,0 +1,12 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/redis#readme
+
+jobs:
+ - name: local
+ address: 'unix://@/tmp/redis.sock'
+
+ - name: local
+ address: 'unix://@/var/run/redis/redis.sock'
+
+ - name: local
+ address: 'unix://@/var/lib/redis/redis.sock'
diff --git a/src/go/plugin/go.d/config/go.d/rethinkdb.conf b/src/go/plugin/go.d/config/go.d/rethinkdb.conf
new file mode 100644
index 000000000..7d0502aca
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/rethinkdb.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/rethinkdb#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:28015
diff --git a/src/go/plugin/go.d/config/go.d/riakkv.conf b/src/go/plugin/go.d/config/go.d/riakkv.conf
new file mode 100644
index 000000000..35f3b468f
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/riakkv.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/riakkv#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8098/stats
diff --git a/src/go/plugin/go.d/config/go.d/rspamd.conf b/src/go/plugin/go.d/config/go.d/rspamd.conf
new file mode 100644
index 000000000..f4db129ca
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/rspamd.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/rspamd#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:11334
diff --git a/src/go/plugin/go.d/config/go.d/scaleio.conf b/src/go/plugin/go.d/config/go.d/scaleio.conf
new file mode 100644
index 000000000..9db85cc4d
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/scaleio.conf
@@ -0,0 +1,13 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/scaleio#readme
+
+#jobs:
+# - name : local
+# url : https://127.0.0.1
+# username : admin
+# password : password
+
+# - name : remote
+# url : https://100.64.0.1
+# username : admin
+# password : password
diff --git a/src/go/plugin/go.d/config/go.d/sd/docker.conf b/src/go/plugin/go.d/config/go.d/sd/docker.conf
new file mode 100644
index 000000000..c93fbef87
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/sd/docker.conf
@@ -0,0 +1,262 @@
+disabled: no
+
+name: 'docker'
+
+discover:
+ - discoverer: docker
+ docker:
+ tags: "unknown"
+ address: "unix:///var/run/docker.sock"
+
+classify:
+ - name: "Skip"
+ selector: "unknown"
+ tags: "skip"
+ match:
+ - tags: "skip"
+ expr: |
+ {{ $netNOK := eq .NetworkMode "host" -}}
+ {{ $protoNOK := not (eq .PortProtocol "tcp") -}}
+ {{ $portNOK := empty .PrivatePort -}}
+ {{ $addrNOK := or (empty .IPAddress) (glob .PublicPortIP "*:*") -}}
+ {{ or $netNOK $protoNOK $portNOK $addrNOK }}
+ - name: "Applications"
+ selector: "!skip unknown"
+ tags: "-unknown app"
+ match:
+ - tags: "apache"
+ expr: '{{ match "sp" .Image "httpd httpd:* */apache */apache:* */apache2 */apache2:*" }}'
+ - tags: "beanstalk"
+ expr: '{{ match "sp" .Image "*/beanstalkd */beanstalkd:*" }}'
+ - tags: "cockroachdb"
+ expr: '{{ match "sp" .Image "cockroachdb/cockroach cockroachdb/cockroach:*" }}'
+ - tags: "consul"
+ expr: '{{ match "sp" .Image "consul consul:* */consul */consul:*" }}'
+ - tags: "coredns"
+ expr: '{{ match "sp" .Image "*/coredns */coredns:*" }}'
+ - tags: "couchbase"
+ expr: '{{ or (eq .PrivatePort "8091") (match "sp" .Image "couchbase couchbase:*") }}'
+ - tags: "couchdb"
+ expr: '{{ or (eq .PrivatePort "5984") (match "sp" .Image "couchdb couchdb:*") }}'
+ - tags: "dovecot"
+ expr: '{{ or (eq .PrivatePort "24242") (match "sp" .Image "*/dovecot */dovecot:*") }}'
+ - tags: "elasticsearch"
+ expr: '{{ or (eq .PrivatePort "9200") (match "sp" .Image "elasticsearch elasticsearch:* */elasticsearch */elasticsearch:* */opensearch */opensearch:*") }}'
+ - tags: "gearman"
+ expr: '{{ and (eq .PrivatePort "4730") (match "sp" .Image "*/gearmand */gearmand:*") }}'
+ - tags: "ipfs"
+ expr: '{{ and (eq .PrivatePort "5001") (match "sp" .Image "ipfs/kubo ipfs/kubo:*") }}'
+ - tags: "lighttpd"
+ expr: '{{ match "sp" .Image "*/lighttpd */lighttpd:*" }}'
+ - tags: "memcached"
+ expr: '{{ or (eq .PrivatePort "11211") (match "sp" .Image "memcached memcached:* */memcached */memcached:*") }}'
+ - tags: "mongodb"
+ expr: '{{ or (eq .PrivatePort "27017") (match "sp" .Image "mongo mongo:* */mongodb */mongodb:* */mongodb-community-server */mongodb-community-server:*") }}'
+ - tags: "mysql"
+ expr: '{{ or (eq .PrivatePort "3306") (match "sp" .Image "mysql mysql:* */mysql */mysql:* mariadb mariadb:* */mariadb */mariadb:* percona percona:* */percona-mysql */percona-mysql:*") }}'
+ - tags: "nginx"
+ expr: '{{ match "sp" .Image "nginx nginx:*" }}'
+ - tags: "pgbouncer"
+ expr: '{{ or (eq .PrivatePort "6432") (match "sp" .Image "*/pgbouncer */pgbouncer:*") }}'
+ - tags: "pika"
+ expr: '{{ match "sp" .Image "pikadb/pika pikadb/pika:*" }}'
+ - tags: "postgres"
+ expr: '{{ or (eq .PrivatePort "5432") (match "sp" .Image "postgres postgres:* */postgres */postgres:* */postgresql */postgresql:*") }}'
+ - tags: "proxysql"
+ expr: '{{ or (eq .PrivatePort "6032") (match "sp" .Image "*/proxysql */proxysql:*") }}'
+ - tags: "puppet"
+ expr: '{{ or (eq .PrivatePort "8140") (match "sp" .Image "puppet/puppetserver puppet/puppetserver:*") }}'
+ - tags: "rabbitmq"
+ expr: '{{ or (eq .PrivatePort "15672") (match "sp" .Image "rabbitmq rabbitmq:* */rabbitmq */rabbitmq:*") }}'
+ - tags: "redis"
+ expr: '{{ or (eq .PrivatePort "6379") (match "sp" .Image "redis redis:* */redis */redis:*") }}'
+ - tags: "rethinkdb"
+ expr: '{{ and (eq .PrivatePort "28015") (match "sp" .Image "rethinkdb rethinkdb:* */rethinkdb */rethinkdb:*") }}'
+ - tags: "squid"
+ expr: '{{ match "sp" .Image "*/squid */squid:*" }}'
+ - tags: "tengine"
+ expr: '{{ match "sp" .Image "*/tengine */tengine:*" }}'
+ - tags: "tor"
+ expr: '{{ and (eq .PrivatePort "9051") (match "sp" .Image "*/tor */tor:*") }}'
+ - tags: "tomcat"
+ expr: '{{ match "sp" .Image "tomcat tomcat:* */tomcat */tomcat:*" }}'
+ - tags: "vernemq"
+ expr: '{{ match "sp" .Image "*/vernemq */vernemq:*" }}'
+ - tags: "zookeeper"
+ expr: '{{ or (eq .PrivatePort "2181") (match "sp" .Image "*/zookeeper */zookeeper:*") }}'
+compose:
+ - name: "Applications"
+ selector: "app"
+ config:
+ - selector: "apache"
+ template: |
+ module: apache
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/server-status?auto
+ - selector: "beanstalk"
+ template: |
+ module: beanstalk
+ name: docker_{{.Name}}
+ address: {{.Address}}
+ - selector: "cockroachdb"
+ template: |
+ module: cockroachdb
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/_status/vars
+ - selector: "consul"
+ template: |
+ module: consul
+ name: docker_{{.Name}}
+ url: http://{{.Address}}
+ - selector: "coredns"
+ template: |
+ module: coredns
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/metrics
+ - selector: "coredns"
+ template: |
+ module: coredns
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/metrics
+ - selector: "couchbase"
+ template: |
+ module: couchbase
+ name: docker_{{.Name}}
+ url: http://{{.Address}}
+ - selector: "couchdb"
+ template: |
+ module: couchdb
+ name: docker_{{.Name}}
+ url: http://{{.Address}}
+ - selector: "dovecot"
+ template: |
+ module: dovecot
+ name: docker_{{.Name}}
+ address: {{.Address}}
+ - selector: "elasticsearch"
+ template: |
+ module: elasticsearch
+ name: docker_{{.Name}}
+ {{ if glob .Image "*elastic*" -}}
+ url: http://{{.Address}}
+ {{ else -}}
+ url: https://{{.Address}}
+ tls_skip_verify: yes
+ username: admin
+ password: admin
+ {{ end -}}
+ - selector: "gearman"
+ template: |
+ module: gearman
+ name: docker_{{.Name}}
+ address: {{.Address}}
+ - selector: "ipfs"
+ template: |
+ module: ipfs
+ name: docker_{{.Name}}
+ url: http://{{.Address}}
+ - selector: "lighttpd"
+ template: |
+ module: lighttpd
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/server-status?auto
+ - selector: "memcached"
+ template: |
+ module: memcached
+ name: docker_{{.Name}}
+ address: {{.Address}}
+ - selector: "mongodb"
+ template: |
+ module: mongodb
+ name: docker_{{.Name}}
+ uri: mongodb://{{.Address}}
+ - selector: "mysql"
+ template: |
+ module: mysql
+ name: docker_{{.Name}}
+ dsn: netdata@tcp({{.Address}})/
+ - selector: "nginx"
+ template: |
+ - module: nginx
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/stub_status
+ - module: nginx
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/basic_status
+ - module: nginx
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/nginx_status
+ - module: nginx
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/status
+ - selector: "pgbouncer"
+ template: |
+ module: pgbouncer
+ name: docker_{{.Name}}
+ dsn: postgres://netdata:postgres@{{.Address}}/pgbouncer
+ - selector: "pika"
+ template: |
+ module: pika
+ name: docker_{{.Name}}
+ address: redis://@{{.Address}}
+ - selector: "rethinkdb"
+ template: |
+ module: rethinkdb
+ name: docker_{{.Name}}
+ address: {{.Address}}
+ - selector: "postgres"
+ template: |
+ module: postgres
+ name: docker_{{.Name}}
+ dsn: postgres://netdata:postgres@{{.Address}}/postgres
+ - selector: "proxysql"
+ template: |
+ module: proxysql
+ name: docker_{{.Name}}
+ dsn: stats:stats@tcp({{.Address}})/
+ - selector: "puppet"
+ template: |
+ module: puppet
+ name: docker_{{.Name}}
+ url: https://{{.Address}}
+ tls_skip_verify: yes
+ - selector: "rabbitmq"
+ template: |
+ module: rabbitmq
+ name: docker_{{.Name}}
+ url: http://{{.Address}}
+ - selector: "redis"
+ template: |
+ module: redis
+ name: docker_{{.Name}}
+ address: redis://@{{.Address}}
+ - selector: "squid"
+ template: |
+ module: squid
+ name: docker_{{.Name}}
+ url: http://{{.Address}}
+ - selector: "tengine"
+ template: |
+ module: tengine
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/us
+ - selector: "tomcat"
+ template: |
+ module: tomcat
+ name: docker_{{.Name}}
+ url: http://{{.Address}}
+ - selector: "tor"
+ template: |
+ module: tor
+ name: docker_{{.Name}}
+ address: {{.Address}}
+ - selector: "vernemq"
+ template: |
+ module: vernemq
+ name: docker_{{.Name}}
+ url: http://{{.Address}}/metrics
+ - selector: "zookeeper"
+ template: |
+ module: zookeeper
+ name: docker_{{.Name}}
+ address: {{.Address}}
diff --git a/src/go/plugin/go.d/config/go.d/sd/net_listeners.conf b/src/go/plugin/go.d/config/go.d/sd/net_listeners.conf
new file mode 100644
index 000000000..4462fc112
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/sd/net_listeners.conf
@@ -0,0 +1,541 @@
+disabled: no
+
+name: 'network listeners'
+
+discover:
+ - discoverer: net_listeners
+ net_listeners:
+ tags: "unknown"
+
+classify:
+ - name: "Applications"
+ selector: "unknown"
+ tags: "-unknown app"
+ match:
+ - tags: "activemq"
+ expr: '{{ and (eq .Port "8161") (eq .Comm "activemq") }}'
+ - tags: "apache"
+ expr: '{{ and (eq .Port "80" "8080") (eq .Comm "apache" "apache2" "httpd") }}'
+ - tags: "beanstalk"
+ expr: '{{ or (eq .Port "11300") (eq .Comm "beanstalkd") }}'
+ - tags: "bind"
+ expr: '{{ and (eq .Port "8653") (eq .Comm "bind" "named") }}'
+ - tags: "cassandra"
+ expr: '{{ and (eq .Port "7072") (glob .Cmdline "*cassandra*") }}'
+ - tags: "chrony"
+ expr: '{{ and (eq .Port "323") (eq .Comm "chronyd") }}'
+ - tags: "clickhouse"
+ expr: '{{ and (eq .Port "8123") (eq .Comm "clickhouse-server") }}'
+ - tags: "cockroachdb"
+ expr: '{{ and (eq .Port "8080") (eq .Comm "cockroach") }}'
+ - tags: "consul"
+ expr: '{{ and (eq .Port "8500") (eq .Comm "consul") }}'
+ - tags: "coredns"
+ expr: '{{ and (eq .Port "9153") (eq .Comm "coredns") }}'
+ - tags: "couchbase"
+ expr: '{{ or (eq .Port "8091") (glob .Cmdline "*couchbase*") }}'
+ - tags: "couchdb"
+ expr: '{{ or (eq .Port "5984") (glob .Cmdline "*couchdb*") }}'
+ - tags: "dnsdist"
+ expr: '{{ and (eq .Port "8083") (eq .Comm "dnsdist") }}'
+ - tags: "dnsmasq"
+ expr: '{{ and (eq .Protocol "UDP") (eq .Port "53") (eq .Comm "dnsmasq") }}'
+ - tags: "docker_engine"
+ expr: '{{ and (eq .Port "9323") (eq .Comm "dockerd") }}'
+ - tags: "dovecot"
+ expr: '{{ and (eq .Port "24242") (eq .Comm "dovecot") }}'
+ - tags: "elasticsearch"
+ expr: '{{ or (eq .Port "9200") (glob .Cmdline "*elasticsearch*" "*opensearch*") }}'
+ - tags: "envoy"
+ expr: '{{ and (eq .Port "9901") (eq .Comm "envoy") }}'
+ - tags: "fluentd"
+ expr: '{{ and (eq .Port "24220") (glob .Cmdline "*fluentd*") }}'
+ - tags: "freeradius"
+ expr: '{{ and (eq .Port "18121") (eq .Comm "freeradius") }}'
+ - tags: "gearman"
+ expr: '{{ or (eq .Port "4730") (eq .Comm "gearmand") }}'
+ - tags: "geth"
+ expr: '{{ and (eq .Port "6060") (eq .Comm "geth") }}'
+ - tags: "haproxy"
+ expr: '{{ and (eq .Port "8404") (eq .Comm "haproxy") }}'
+ - tags: "hddtemp"
+ expr: '{{ and (eq .Port "7634") (eq .Comm "hddtemp") }}'
+ - tags: "hdfs_namenode"
+ expr: '{{ and (eq .Port "9870") (eq .Comm "hadoop") }}'
+ - tags: "hdfs_datanode"
+ expr: '{{ and (eq .Port "9864") (eq .Comm "hadoop") }}'
+ - tags: "icecast"
+ expr: '{{ and (eq .Port "8000") (eq .Comm "icecast") }}'
+ - tags: "ipfs"
+ expr: '{{ and (eq .Port "5001") (eq .Comm "ipfs") }}'
+ - tags: "kubelet"
+ expr: '{{ and (eq .Port "10250" "10255") (eq .Comm "kubelet") }}'
+ - tags: "kubeproxy"
+ expr: '{{ and (eq .Port "10249") (eq .Comm "kube-proxy") }}'
+ - tags: "lighttpd"
+ expr: '{{ and (eq .Port "80" "8080") (eq .Comm "lighttpd") }}'
+ - tags: "logstash"
+ expr: '{{ and (eq .Port "9600") (glob .Cmdline "*logstash*") }}'
+ - tags: "memcached"
+ expr: '{{ or (eq .Port "11211") (eq .Comm "memcached") }}'
+ - tags: "mongodb"
+ expr: '{{ or (eq .Port "27017") (eq .Comm "mongod") }}'
+ - tags: "monit"
+ expr: '{{ or (eq .Port "2812") (eq .Comm "monit") }}'
+ - tags: "mysql"
+ expr: '{{ or (eq .Port "3306") (eq .Comm "mysqld" "mariadbd") }}'
+ - tags: "nginx"
+ expr: '{{ and (eq .Port "80" "8080") (eq .Comm "nginx") }}'
+ - tags: "ntpd"
+ expr: '{{ or (eq .Port "123") (eq .Comm "ntpd") }}'
+ - tags: "openvpn"
+ expr: '{{ and (eq .Port "7505") (eq .Comm "openvpn") }}'
+ - tags: "pgbouncer"
+ expr: '{{ or (eq .Port "6432") (eq .Comm "pgbouncer") }}'
+ - tags: "pihole"
+ expr: '{{ and (eq .Port "53") (eq .Comm "pihole-FTL") }}'
+ - tags: "pika"
+ expr: '{{ and (eq .Port "9221") (eq .Comm "pika") }}'
+ - tags: "postgres"
+ expr: '{{ or (eq .Port "5432") (eq .Comm "postgres") }}'
+ - tags: "powerdns"
+ expr: '{{ and (eq .Port "8081") (eq .Comm "pdns_server") }}'
+ - tags: "powerdns_recursor"
+ expr: '{{ and (eq .Port "8081") (eq .Comm "pdns_recursor") }}'
+ - tags: "proxysql"
+ expr: '{{ or (eq .Port "6032") (eq .Comm "proxysql") }}'
+ - tags: "puppet"
+ expr: '{{ or (eq .Port "8140") (glob .Cmdline "*puppet-server*") }}'
+ - tags: "rabbitmq"
+ expr: '{{ or (eq .Port "15672") (glob .Cmdline "*rabbitmq*") }}'
+ - tags: "redis"
+ expr: '{{ or (eq .Port "6379") (eq .Comm "redis-server") }}'
+ - tags: "rethinkdb"
+ expr: '{{ and (eq .Port "28015") (eq .Comm "rethinkdb") }}'
+ - tags: "riak"
+ expr: '{{ and (eq .Port "8098") (glob .Cmdline "*riak*") }}'
+ - tags: "rspamd"
+ expr: '{{ and (eq .Port "11334") (eq .Comm "rspamd") }}'
+ - tags: "squid"
+ expr: '{{ and (eq .Port "3128") (eq .Comm "squid") }}'
+ - tags: "supervisord"
+ expr: '{{ and (eq .Port "9001") (eq .Comm "supervisord") }}'
+ - tags: "tomcat"
+ expr: '{{ and (eq .Port "8080") (glob .Cmdline "*tomcat*") }}'
+ - tags: "tor"
+ expr: '{{ and (eq .Port "9051") (eq .Comm "tor") }}'
+ - tags: "traefik"
+ expr: '{{ and (eq .Port "80" "8080") (eq .Comm "traefik") }}'
+ - tags: "unbound"
+ expr: '{{ and (eq .Port "8953") (eq .Comm "unbound") }}'
+ - tags: "upsd"
+ expr: '{{ or (eq .Port "3493") (eq .Comm "upsd") }}'
+ - tags: "uwsgi"
+ expr: '{{ and (eq .Port "1717") (eq .Comm "uwsgi") }}'
+ - tags: "vernemq"
+ expr: '{{ and (eq .Port "8888") (glob .Cmdline "*vernemq*") }}'
+ - tags: "zookeeper"
+ expr: '{{ or (eq .Port "2181" "2182") (glob .Cmdline "*zookeeper*") }}'
+ - name: "Prometheus exporters"
+ selector: "unknown"
+ tags: "-unknown exporter"
+ match:
+ - tags: "exporter"
+ expr: '{{ and (not (empty (promPort .Port))) (not (eq .Comm "docker-proxy")) }}'
+compose:
+ - name: "Applications"
+ selector: "app"
+ config:
+ - selector: "activemq"
+ template: |
+ module: activemq
+ name: local
+ url: http://{{.Address}}
+ webadmin: admin
+ - selector: "apache"
+ template: |
+ module: apache
+ name: local
+ url: http://{{.Address}}/server-status?auto
+ - selector: "beanstalk"
+ template: |
+ module: beanstalk
+ name: local
+ address: {{.Address}}
+ - selector: "bind"
+ template: |
+ module: bind
+ name: local
+ url: http://{{.Address}}/json/v1
+ - selector: "cassandra"
+ template: |
+ module: cassandra
+ name: local
+ url: http://{{.Address}}/metrics
+ - selector: "chrony"
+ template: |
+ module: chrony
+ name: local
+ address: {{.Address}}
+ - selector: "clickhouse"
+ template: |
+ module: clickhouse
+ name: local
+ url: http://{{.Address}}
+ - selector: "cockroachdb"
+ template: |
+ module: cockroachdb
+ name: local
+ url: http://{{.Address}}/_status/vars
+ - selector: "consul"
+ template: |
+ module: consul
+ name: local
+ url: http://{{.Address}}
+ - selector: "coredns"
+ template: |
+ module: coredns
+ name: local
+ url: http://{{.Address}}/metrics
+ - selector: "couchbase"
+ template: |
+ module: couchbase
+ name: local
+ url: http://{{.Address}}
+ - selector: "couchdb"
+ template: |
+ module: couchdb
+ name: local
+ url: http://{{.Address}}
+ node: '_local'
+ - selector: "dnsdist"
+ template: |
+ module: dnsdist
+ name: local
+ url: http://{{.Address}}
+ headers:
+ X-API-Key: 'dnsdist-api-key'
+ - selector: "dnsmasq"
+ template: |
+ module: dnsmasq
+ name: local
+ protocol: udp
+ address: {{.Address}}
+ - selector: "docker_engine"
+ template: |
+ module: docker_engine
+ name: local
+ url: http://{{.Address}}/metrics
+ - selector: "dovecot"
+ template: |
+ module: dovecot
+ name: local
+ address: {{.Address}}
+ - selector: "elasticsearch"
+ template: |
+ module: elasticsearch
+ name: local
+ {{ if glob .Cmdline "*elastic*" -}}
+ url: http://{{.Address}}
+ {{ else -}}
+ url: https://{{.Address}}
+ tls_skip_verify: yes
+ username: admin
+ password: admin
+ {{ end -}}
+ - selector: "envoy"
+ template: |
+ module: envoy
+ name: local
+ url: http://{{.Address}}/stats/prometheus
+ - selector: "envoy"
+ template: |
+ module: envoy
+ name: local
+ url: http://{{.Address}}/stats/prometheus
+ - selector: "fluentd"
+ template: |
+ module: fluentd
+ name: local
+ url: http://{{.Address}}
+ - selector: "freeradius"
+ template: |
+ module: freeradius
+ name: local
+ address: {{.IPAddress}}
+ port: {{.Port}}
+ secret: adminsecret
+ - selector: "gearman"
+ template: |
+ module: gearman
+ name: local
+ address: {{.Address}}
+ - selector: "geth"
+ template: |
+ module: geth
+ name: local
+ url: http://{{.Address}}/debug/metrics/prometheus
+ - selector: "haproxy"
+ template: |
+ module: haproxy
+ name: local
+ url: http://{{.Address}}/metrics
+ - selector: "hddtemp"
+ template: |
+ module: hddtemp
+ name: local
+ address: {{.Address}}
+ - selector: "hdfs_namenode"
+ template: |
+ module: hdfs
+ name: namenode_local
+ url: http://{{.Address}}/jmx
+ - selector: "hdfs_datanode"
+ template: |
+ module: hdfs
+ name: datanode_local
+ url: http://{{.Address}}/jmx
+ - selector: "icecast"
+ template: |
+ module: icecast
+ name: local
+ url: http://{{.Address}}
+ - selector: "ipfs"
+ template: |
+ module: ipfs
+ name: local
+ url: http://{{.Address}}
+ - selector: "kubelet"
+ template: |
+ module: k8s_kubelet
+ name: local
+ {{- if eq .Port "10255" }}
+ url: http://{{.Address}}/metrics
+ {{- else }}
+ url: https://{{.Address}}/metrics
+ tls_skip_verify: yes
+ {{- end }}
+ - selector: "kubeproxy"
+ template: |
+ module: k8s_kubeproxy
+ name: local
+ url: http://{{.Address}}/metrics
+ - selector: "lighttpd"
+ template: |
+ module: lighttpd
+ name: local
+ url: http://{{.Address}}/server-status?auto
+ - selector: "logstash"
+ template: |
+ module: logstash
+ name: local
+ url: http://{{.Address}}
+ - selector: "memcached"
+ template: |
+ module: memcached
+ name: local
+ address: {{.Address}}
+ - selector: "mongodb"
+ template: |
+ module: mongodb
+ name: local
+ uri: mongodb://{{.Address}}
+ - selector: "monit"
+ template: |
+ module: monit
+ name: local
+ url: http://{{.Address}}
+ username: admin
+ password: monit
+ - selector: "mysql"
+ template: |
+ - module: mysql
+ name: local
+ dsn: netdata@unix(/var/run/mysqld/mysqld.sock)/
+ - module: mysql
+ name: local
+ dsn: netdata@tcp({{.Address}})/
+ - selector: "nginx"
+ template: |
+ - module: nginx
+ name: local
+ url: http://{{.Address}}/stub_status
+ - module: nginx
+ name: local
+ url: http://{{.Address}}/basic_status
+ - module: nginx
+ name: local
+ url: http://{{.Address}}/nginx_status
+ - module: nginx
+ name: local
+ url: http://{{.Address}}/status
+ - selector: "ntpd"
+ template: |
+ module: ntpd
+ name: local
+ address: {{.Address}}
+ collect_peers: no
+ - selector: "openvpn"
+ template: |
+ module: openvpn
+ name: local
+ address: {{.Address}}
+ - selector: "pgbouncer"
+ template: |
+ module: pgbouncer
+ name: local
+ dsn: postgres://netdata:postgres@{{.Address}}/pgbouncer
+ - selector: "pihole"
+ template: |
+ module: pihole
+ name: local
+ url: http://{{.Address}}
+ - selector: "pika"
+ template: |
+ module: pika
+ name: local
+ address: redis://@{{.IPAddress}}:{{.Port}}
+ - selector: "rethinkdb"
+ template: |
+ module: rethinkdb
+ name: local
+ address: {{.Address}}
+ - selector: "riak"
+ template: |
+ module: riakkv
+ name: local
+ url: http://{{.Address}}/stats
+ - selector: "rspamd"
+ template: |
+ module: rspamd
+ name: local
+ url: http://{{.Address}}
+ - selector: "postgres"
+ template: |
+ - module: postgres
+ name: local
+ dsn: 'host=/var/run/postgresql dbname=postgres user=postgres'
+ - module: postgres
+ name: local
+ dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'
+ - module: postgres
+ name: local
+ dsn: postgresql://netdata@{{.Address}}/postgres
+ - selector: "powerdns"
+ template: |
+ module: powerdns
+ name: local
+ url: http://{{.Address}}
+ headers:
+ X-API-KEY: secret
+ - selector: "powerdns_recursor"
+ template: |
+ module: powerdns_recursor
+ name: local
+ url: http://{{.Address}}
+ headers:
+ X-API-KEY: secret
+ - selector: "proxysql"
+ template: |
+ module: proxysql
+ name: local
+ dsn: stats:stats@tcp({{.Address}})/
+ - selector: "puppet"
+ template: |
+ module: puppet
+ name: local
+ url: https://{{.Address}}
+ tls_skip_verify: yes
+ - selector: "rabbitmq"
+ template: |
+ module: rabbitmq
+ name: local
+ url: http://{{.Address}}
+ username: guest
+ password: guest
+ collect_queues_metrics: no
+ - selector: "redis"
+ template: |
+ module: redis
+ name: local
+ address: redis://@{{.Address}}
+ - selector: "squid"
+ template: |
+ module: squid
+ name: local
+ url: http://{{.Address}}
+ - selector: "supervisord"
+ template: |
+ module: supervisord
+ name: local
+ url: http://{{.Address}}/RPC2
+ - selector: "traefik"
+ template: |
+ module: traefik
+ name: local
+ url: http://{{.Address}}/metrics
+ - selector: "tomcat"
+ template: |
+ module: tomcat
+ name: local
+ url: http://{{.Address}}
+ - selector: "tor"
+ template: |
+ module: tor
+ name: local
+ address: {{.Address}}
+ - selector: "unbound"
+ template: |
+ module: unbound
+ name: local
+ address: {{.Address}}
+ - selector: "upsd"
+ template: |
+ module: upsd
+ name: local
+ address: {{.Address}}
+ - selector: "uwsgi"
+ template: |
+ module: uwsgi
+ name: local
+ address: {{.Address}}
+ - selector: "vernemq"
+ template: |
+ module: vernemq
+ name: local
+ url: http://{{.Address}}/metrics
+ - selector: "zookeeper"
+ template: |
+ module: zookeeper
+ name: local
+ address: {{.Address}}
+
+ - name: "Prometheus exporters generic"
+ selector: "exporter"
+ config:
+ - selector: "exporter"
+ template: |
+ {{ $name := promPort .Port -}}
+ module: prometheus
+ name: {{$name}}_local
+ url: http://{{.Address}}/metrics
+ {{ if eq $name "caddy" -}}
+ expected_prefix: 'caddy_'
+ {{ else if eq $name "openethereum" -}}
+ expected_prefix: 'blockchaincache_'
+ {{ else if eq $name "crowdsec" -}}
+ expected_prefix: 'cs_'
+ {{ else if eq $name "netbox" -}}
+ expected_prefix: 'django_'
+ {{ else if eq $name "traefik" -}}
+ expected_prefix: 'traefik_'
+ {{ else if eq $name "pushgateway" -}}
+ expected_prefix: 'pushgateway_'
+ selector:
+ allow:
+ - pushgateway_*
+ {{ else if eq $name "wireguard_exporter" -}}
+ expected_prefix: 'wireguard_exporter'
+ {{ else if eq $name "clickhouse" -}}
+ max_time_series: 3000
+ {{ end -}}
diff --git a/src/go/plugin/go.d/config/go.d/sensors.conf b/src/go/plugin/go.d/config/go.d/sensors.conf
new file mode 100644
index 000000000..d1b4c4f14
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/sensors.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/sensors#readme
+
+jobs:
+ - name: sensors
+ binary_path: /usr/bin/sensors
diff --git a/src/go/plugin/go.d/config/go.d/smartctl.conf b/src/go/plugin/go.d/config/go.d/smartctl.conf
new file mode 100644
index 000000000..7f8ca5ada
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/smartctl.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/smartctl#readme
+
+jobs:
+ - name: smartctl
diff --git a/src/go/plugin/go.d/config/go.d/snmp.conf b/src/go/plugin/go.d/config/go.d/snmp.conf
new file mode 100644
index 000000000..395fb0f01
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/snmp.conf
@@ -0,0 +1,10 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/snmp#readme
+
+#jobs:
+# - name: switch
+# update_every: 10
+# hostname: "192.0.2.1"
+# community: public
+# options:
+# version: 2
diff --git a/src/go/plugin/go.d/config/go.d/squid.conf b/src/go/plugin/go.d/config/go.d/squid.conf
new file mode 100644
index 000000000..21c711d38
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/squid.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/squid#readme
+
+#jobs:
+# - name: local
+# url: http://localhost:3128
diff --git a/src/go/plugin/go.d/config/go.d/squidlog.conf b/src/go/plugin/go.d/config/go.d/squidlog.conf
new file mode 100644
index 000000000..4c85e3849
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/squidlog.conf
@@ -0,0 +1,9 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/squidlog#readme
+
+jobs:
+ - name: squidlog
+ path: /var/log/squid/access.log
+
+ - name: squidlog
+ path: /var/log/squid3/access.log
diff --git a/src/go/plugin/go.d/config/go.d/storcli.conf b/src/go/plugin/go.d/config/go.d/storcli.conf
new file mode 100644
index 000000000..704f7579d
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/storcli.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/storcli#readme
+
+jobs:
+ - name: storcli
diff --git a/src/go/plugin/go.d/config/go.d/supervisord.conf b/src/go/plugin/go.d/config/go.d/supervisord.conf
new file mode 100644
index 000000000..5d3969b7d
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/supervisord.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/supervisord#readme
+
+#jobs:
+# - name: local
+# url: 'http://127.0.0.1:9001/RPC2'
diff --git a/src/go/plugin/go.d/config/go.d/systemdunits.conf b/src/go/plugin/go.d/config/go.d/systemdunits.conf
new file mode 100644
index 000000000..7aefd37ea
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/systemdunits.conf
@@ -0,0 +1,16 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/systemdunits#readme
+
+jobs:
+ - name: service-units
+ include:
+ - '*.service'
+
+# - name: my-specific-service-unit
+# include:
+# - 'my-specific.service'
+#
+
+# - name: socket-units
+# include:
+# - '*.socket'
diff --git a/src/go/plugin/go.d/config/go.d/tengine.conf b/src/go/plugin/go.d/config/go.d/tengine.conf
new file mode 100644
index 000000000..aefaf2ac7
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/tengine.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/tengine#readme
+
+#jobs:
+# - name: local
+# url: http://localhost/us
diff --git a/src/go/plugin/go.d/config/go.d/tomcat.conf b/src/go/plugin/go.d/config/go.d/tomcat.conf
new file mode 100644
index 000000000..cae77e862
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/tomcat.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/tomcat#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8080
diff --git a/src/go/plugin/go.d/config/go.d/tor.conf b/src/go/plugin/go.d/config/go.d/tor.conf
new file mode 100644
index 000000000..7aa949d96
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/tor.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/tor#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:9051
diff --git a/src/go/plugin/go.d/config/go.d/traefik.conf b/src/go/plugin/go.d/config/go.d/traefik.conf
new file mode 100644
index 000000000..8c005db01
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/traefik.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/traefik#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8082/metrics
diff --git a/src/go/plugin/go.d/config/go.d/unbound.conf b/src/go/plugin/go.d/config/go.d/unbound.conf
new file mode 100644
index 000000000..06552bfd9
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/unbound.conf
@@ -0,0 +1,13 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/unbound#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:8953
+# timeout: 1
+# conf_path: /etc/unbound/unbound.conf
+# cumulative_stats: no
+# use_tls: yes
+# tls_skip_verify: yes
+# tls_cert: /etc/unbound/unbound_control.pem
+# tls_key: /etc/unbound/unbound_control.key
diff --git a/src/go/plugin/go.d/config/go.d/upsd.conf b/src/go/plugin/go.d/config/go.d/upsd.conf
new file mode 100644
index 000000000..6f7b31090
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/upsd.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/upsd#readme
+
+#jobs:
+# - name: upsd
+# address: 127.0.0.1:3493
diff --git a/src/go/plugin/go.d/config/go.d/uwsgi.conf b/src/go/plugin/go.d/config/go.d/uwsgi.conf
new file mode 100644
index 000000000..f31891804
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/uwsgi.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/uwsgi#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:1717
diff --git a/src/go/plugin/go.d/config/go.d/vcsa.conf b/src/go/plugin/go.d/config/go.d/vcsa.conf
new file mode 100644
index 000000000..39ee86d91
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/vcsa.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vcsa#readme
+
+#jobs:
+# - name : vcsa1
+# url : https://203.0.113.0
+# username : admin@vsphere.local
+# password : password
diff --git a/src/go/plugin/go.d/config/go.d/vernemq.conf b/src/go/plugin/go.d/config/go.d/vernemq.conf
new file mode 100644
index 000000000..c954074f8
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/vernemq.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vernemq#readme
+
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8888/metrics
diff --git a/src/go/plugin/go.d/config/go.d/vsphere.conf b/src/go/plugin/go.d/config/go.d/vsphere.conf
new file mode 100644
index 000000000..cbc58a354
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/vsphere.conf
@@ -0,0 +1,13 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/vsphere#readme
+
+#jobs:
+# - name : vcenter1
+# url : https://203.0.113.0
+# username : admin@vsphere.local
+# password : password
+#
+# - name : vcenter2
+# url : https://203.0.113.10
+# username : admin@vsphere.local
+# password : password
diff --git a/src/go/plugin/go.d/config/go.d/web_log.conf b/src/go/plugin/go.d/config/go.d/web_log.conf
new file mode 100644
index 000000000..502fece49
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/web_log.conf
@@ -0,0 +1,44 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/web_log#readme
+
+jobs:
+ # NGINX
+ # debian, arch
+ - name: nginx
+ path: /var/log/nginx/access.log
+
+ # gentoo
+ - name: nginx
+ path: /var/log/nginx/localhost.access_log
+
+ # APACHE
+ # debian
+ - name: apache
+ path: /var/log/apache2/access.log
+
+ # gentoo
+ - name: apache
+ path: /var/log/apache2/access_log
+
+ # arch
+ - name: apache
+ path: /var/log/httpd/access_log
+
+ # debian
+ - name: apache_vhosts
+ path: /var/log/apache2/other_vhosts_access.log
+
+ # GUNICORN
+ - name: gunicorn
+ path: /var/log/gunicorn/access.log
+
+ - name: gunicorn
+ path: /var/log/gunicorn/gunicorn-access.log
+
+ # IIS
+ # This configuration assumes you are running netdata on WSL
+ - name: iis
+ path: /mnt/c/inetpub/logs/LogFiles/W3SVC1/u_ex*.log
+ log_type: csv
+ csv_config:
+ format: '- - $host $request_method $request_uri - $server_port - $remote_addr - - $status - - $request_time'
diff --git a/src/go/plugin/go.d/config/go.d/whoisquery.conf b/src/go/plugin/go.d/config/go.d/whoisquery.conf
new file mode 100644
index 000000000..41f7232da
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/whoisquery.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/whoisquery#readme
+
+#jobs:
+# - name: example
+# source: example.org
diff --git a/src/go/plugin/go.d/config/go.d/windows.conf b/src/go/plugin/go.d/config/go.d/windows.conf
new file mode 100644
index 000000000..4671c20bc
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/windows.conf
@@ -0,0 +1,8 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/windows#readme
+
+#jobs:
+# - name: win_server1
+# url: http://10.0.0.1:9182/metrics
+# - name: win_server2
+# url: http://10.0.0.2:9182/metrics
diff --git a/src/go/plugin/go.d/config/go.d/wireguard.conf b/src/go/plugin/go.d/config/go.d/wireguard.conf
new file mode 100644
index 000000000..07ed61d06
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/wireguard.conf
@@ -0,0 +1,5 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/wireguard#readme
+
+jobs:
+ - name: wireguard
diff --git a/src/go/plugin/go.d/config/go.d/x509check.conf b/src/go/plugin/go.d/config/go.d/x509check.conf
new file mode 100644
index 000000000..5231b1052
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/x509check.conf
@@ -0,0 +1,12 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/x509check#readme
+
+#jobs:
+# - name: my_site_cert
+# source: https://my_site.org:443
+#
+# - name: my_file_cert
+# source: file:///home/me/cert.pem
+#
+# - name: my_smtp_cert
+# source: smtp://smtp.my_mail.org:587
diff --git a/src/go/plugin/go.d/config/go.d/zfspool.conf b/src/go/plugin/go.d/config/go.d/zfspool.conf
new file mode 100644
index 000000000..e961d1971
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/zfspool.conf
@@ -0,0 +1,9 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/zfspool#readme
+
+jobs:
+ - name: zfspool
+ binary_path: /usr/bin/zpool
+
+ - name: zfspool
+ binary_path: /sbin/zpool # FreeBSD
diff --git a/src/go/plugin/go.d/config/go.d/zookeeper.conf b/src/go/plugin/go.d/config/go.d/zookeeper.conf
new file mode 100644
index 000000000..f200c7893
--- /dev/null
+++ b/src/go/plugin/go.d/config/go.d/zookeeper.conf
@@ -0,0 +1,6 @@
+## All available configuration options, their descriptions and default values:
+## https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/zookeeper#readme
+
+#jobs:
+# - name: local
+# address: 127.0.0.1:2181
diff --git a/src/go/plugin/go.d/docs/how-to-write-a-module.md b/src/go/plugin/go.d/docs/how-to-write-a-module.md
new file mode 100644
index 000000000..bf7d3bc6d
--- /dev/null
+++ b/src/go/plugin/go.d/docs/how-to-write-a-module.md
@@ -0,0 +1,302 @@
+<!--
+title: "How to write a Netdata collector in Go"
+description: "This guide will walk you through the technical implementation of writing a new Netdata collector in Golang, with tips on interfaces, structure, configuration files, and more."
+custom_edit_url: "/src/go/plugin/go.d/docs/how-to-write-a-module.md"
+sidebar_label: "How to write a Netdata collector in Go"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Developers/External plugins/go.d.plugin"
+sidebar_position: 20
+-->
+
+# How to write a Netdata collector in Go
+
+## Prerequisites
+
+- Take a look at our [contributing guidelines](https://github.com/netdata/.github/blob/main/CONTRIBUTING.md).
+- [Fork](https://docs.github.com/en/github/getting-started-with-github/fork-a-repo) this repository to your personal
+ GitHub account.
+- [Clone](https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/cloning-a-repository#:~:text=to%20GitHub%20Desktop-,On%20GitHub%2C%20navigate%20to%20the%20main%20page%20of%20the%20repository,Desktop%20to%20complete%20the%20clone.)
+ locally the **forked** repository (e.g `git clone https://github.com/odyslam/go.d.plugin`).
+- Using a terminal, `cd` into the directory (e.g `cd go.d.plugin`)
+
+## Write and test a simple collector
+
+> :exclamation: You can skip most of these steps if you first experiment directy with the existing
+> [example module](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/example), which
+> will
+> give you an idea of how things work.
+
+Let's assume you want to write a collector named `example2`.
+
+The steps are:
+
+- Add the source code
+ to [`modules/example2/`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules).
+ - [module interface](#module-interface).
+ - [suggested module layout](#module-layout).
+ - [helper packages](#helper-packages).
+- Add the configuration
+ to [`config/go.d/example2.conf`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/config/go.d).
+- Add the module
+ to [`config/go.d.conf`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf).
+- Import the module
+ in [`modules/init.go`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/init.go).
+- Update
+ the [`available modules list`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d#available-modules).
+- To build it, run `make` from the plugin root dir. This will create a new `go.d.plugin` binary that includes your newly
+ developed collector. It will be placed into the `bin` directory (e.g `go.d.plugin/bin`)
+- Run it in the debug mode `bin/godplugin -d -m <MODULE_NAME>`. This will output the `STDOUT` of the collector, the same
+ output that is sent to the Netdata Agent and is transformed into charts. You can read more about this collector API in
+ our [documentation](/src/collectors/plugins.d/README.md#external-plugins-api).
+- If you want to test the collector with the actual Netdata Agent, you need to replace the `go.d.plugin` binary that
+ exists in the Netdata Agent installation directory with the one you just compiled. Once
+ you restart the Netdata Agent, it will detect and run it, creating all the charts. It is advised not to remove the default `go.d.plugin` binary, but simply rename it to `go.d.plugin.old` so that the Agent doesn't run it, but you can easily rename it back once you are done.
+- Run `make clean` when you are done with testing.
+
+## Module Interface
+
+Every module should implement the following interface:
+
+```
+type Module interface {
+ Init() bool
+ Check() bool
+ Charts() *Charts
+ Collect() map[string]int64
+ Cleanup()
+}
+```
+
+### Init method
+
+- `Init` does module initialization.
+- If it returns `false`, the job will be disabled.
+
+We propose to use the following template:
+
+```
+// example.go
+
+func (e *Example) Init() bool {
+ err := e.validateConfig()
+ if err != nil {
+ e.Errorf("config validation: %v", err)
+ return false
+ }
+
+ someValue, err := e.initSomeValue()
+ if err != nil {
+ e.Errorf("someValue init: %v", err)
+ return false
+ }
+ e.someValue = someValue
+
+ // ...
+ return true
+}
+```
+
+Move specific initialization methods into the `init.go` file. See [suggested module layout](#module-Layout).
+
+### Check method
+
+- `Check` returns whether the job is able to collect metrics.
+- Called after `Init` and only if `Init` returned `true`.
+- If it returns `false`, the job will be disabled.
+
+The simplest way to implement `Check` is to see if we are getting any metrics from `Collect`. A lot of modules use such
+approach.
+
+```
+// example.go
+
+func (e *Example) Check() bool {
+ return len(e.Collect()) > 0
+}
+```
+
+### Charts method
+
+:exclamation: Netdata module
+produces [`charts`](/src/collectors/plugins.d/README.md#chart), not
+raw metrics.
+
+Use [`agent/module`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/agent/module/charts.go)
+package to create them,
+it contains charts and dimensions structs.
+
+- `Charts` returns
+ the [charts](/src/collectors/plugins.d/README.md#chart) (`*module.Charts`).
+- Called after `Check` and only if `Check` returned `true`.
+- If it returns `nil`, the job will be disabled
+- :warning: Make sure not to share returned value between module instances (jobs).
+
+Usually charts initialized in `Init` and `Chart` method just returns the charts instance:
+
+```
+// example.go
+
+func (e *Example) Charts() *Charts {
+ return e.charts
+}
+```
+
+### Collect method
+
+- `Collect` collects metrics.
+- Called only if `Check` returned `true`.
+- Called every `update_every` seconds.
+- `map[string]int64` keys are charts dimensions ids'.
+
+We propose to use the following template:
+
+```
+// example.go
+
+func (e *Example) Collect() map[string]int64 {
+ ms, err := e.collect()
+ if err != nil {
+ e.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+```
+
+Move metrics collection logic into the `collect.go` file. See [suggested module layout](#module-Layout).
+
+### Cleanup method
+
+- `Cleanup` performs the job cleanup/teardown.
+- Called if `Init` or `Check` fails, or we want to stop the job after `Collect`.
+
+If you have nothing to clean up:
+
+```
+// example.go
+
+func (Example) Cleanup() {}
+```
+
+## Module Layout
+
+The general idea is to not put everything in a single file.
+
+We recommend using one file per logical area. This approach makes it easier to maintain the module.
+
+Suggested minimal layout:
+
+| Filename | Contains |
+|---------------------------------------------------|--------------------------------------------------------|
+| [`module_name.go`](#file-module_namego) | Module configuration, implementation and registration. |
+| [`charts.go`](#file-chartsgo) | Charts, charts templates and constructor functions. |
+| [`init.go`](#file-initgo) | Initialization methods. |
+| [`collect.go`](#file-collectgo) | Metrics collection implementation. |
+| [`module_name_test.go`](#file-module_name_testgo) | Public methods/functions tests. |
+| [`testdata/`](#file-module_name_testgo) | Files containing sample data. |
+
+### File `module_name.go`
+
+> :exclamation: See the
+> example [`example.go`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/example/example.go).
+
+Don't overload this file with the implementation details.
+
+Usually it contains only:
+
+- module registration.
+- module configuration.
+- [module interface implementation](#module-interface).
+
+### File `charts.go`
+
+> :exclamation: See the
+> example: [`charts.go`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/example/charts.go).
+
+Put charts, charts templates and charts constructor functions in this file.
+
+### File `init.go`
+
+> :exclamation: See the
+> example: [`init.go`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/example/init.go).
+
+All the module initialization details should go in this file.
+
+- make a function for each value that needs to be initialized.
+- a function should return a value(s), not implicitly set/change any values in the main struct.
+
+```
+// init.go
+
+// Prefer this approach.
+func (e Example) initSomeValue() (someValue, error) {
+ // ...
+ return someValue, nil
+}
+
+// This approach is ok too, but we recommend to not use it.
+func (e *Example) initSomeValue() error {
+ // ...
+ m.someValue = someValue
+ return nil
+}
+```
+
+### File `collect.go`
+
+> :exclamation: See the
+> example: [`collect.go`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/example/collect.go).
+
+This file is the entry point for the metrics collection.
+
+Feel free to split it into several files if you think it makes the code more readable.
+
+Use `collect_` prefix for the filenames: `collect_this.go`, `collect_that.go`, etc.
+
+```
+// collect.go
+
+func (e *Example) collect() (map[string]int64, error) {
+ collected := make(map[string])int64
+ // ...
+ // ...
+ // ...
+ return collected, nil
+}
+```
+
+### File `module_name_test.go`
+
+> :exclamation: See the
+> example: [`example_test.go`](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/modules/example/example_test.go).
+
+> if you have no experience in testing we recommend starting
+> with [testing package documentation](https://golang.org/pkg/testing/).
+
+> we use `assert` and `require` packages from [github.com/stretchr/testify](https://github.com/stretchr/testify)
+> library,
+> check [their documentation](https://pkg.go.dev/github.com/stretchr/testify).
+
+Testing is mandatory.
+
+- test only public functions and methods (`New`, `Init`, `Check`, `Charts`, `Cleanup`, `Collect`).
+- do not create a test function per a case, use [table driven tests](https://github.com/golang/go/wiki/TableDrivenTests)
+ . Prefer `map[string]struct{ ... }` over `[]struct{ ... }`.
+- use helper functions _to prepare_ test cases to keep them clean and readable.
+
+### Directory `testdata/`
+
+Put files with sample data in this directory if you need any. Its name should
+be [`testdata`](https://golang.org/cmd/go/#hdr-Package_lists_and_patterns).
+
+> Directory and file names that begin with "." or "_" are ignored by the go tool, as are directories named "testdata".
+
+## Helper packages
+
+There are [some helper packages](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg) for
+writing a module.
+
diff --git a/src/go/plugin/go.d/examples/simple/main.go b/src/go/plugin/go.d/examples/simple/main.go
new file mode 100644
index 000000000..215e91f14
--- /dev/null
+++ b/src/go/plugin/go.d/examples/simple/main.go
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package main
+
+import (
+ "errors"
+ "fmt"
+ "log/slog"
+ "math/rand"
+ "os"
+ "path"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/cli"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/multipath"
+
+ "github.com/jessevdk/go-flags"
+)
+
+var version = "v0.0.1-example"
+
+type example struct {
+ module.Base
+}
+
+func (e *example) Cleanup() {}
+
+func (e *example) Init() error { return nil }
+
+func (e *example) Check() error { return nil }
+
+func (e *example) Charts() *module.Charts {
+ return &module.Charts{
+ {
+ ID: "random",
+ Title: "A Random Number", Units: "random", Fam: "random",
+ Dims: module.Dims{
+ {ID: "random0", Name: "random 0"},
+ {ID: "random1", Name: "random 1"},
+ },
+ },
+ }
+}
+func (e *example) Configuration() any { return nil }
+
+func (e *example) Collect() map[string]int64 {
+ return map[string]int64{
+ "random0": rand.Int63n(100),
+ "random1": rand.Int63n(100),
+ }
+}
+
+var (
+ cd, _ = os.Getwd()
+ name = "goplugin"
+ userDir = os.Getenv("NETDATA_USER_CONFIG_DIR")
+ stockDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR")
+)
+
+func confDir(dirs []string) (mpath multipath.MultiPath) {
+ if len(dirs) > 0 {
+ return dirs
+ }
+ if userDir != "" && stockDir != "" {
+ return multipath.New(
+ userDir,
+ stockDir,
+ )
+ }
+ return multipath.New(
+ path.Join(cd, "/../../../../etc/netdata"),
+ path.Join(cd, "/../../../../usr/lib/netdata/conf.d"),
+ )
+}
+
+func modulesConfDir(dirs []string) multipath.MultiPath {
+ if len(dirs) > 0 {
+ return dirs
+ }
+ if userDir != "" && stockDir != "" {
+ return multipath.New(
+ path.Join(userDir, name),
+ path.Join(stockDir, name),
+ )
+ }
+ return multipath.New(
+ path.Join(cd, "/../../../../etc/netdata", name),
+ path.Join(cd, "/../../../../usr/lib/netdata/conf.d", name),
+ )
+}
+
+func main() {
+ opt := parseCLI()
+
+ if opt.Debug {
+ logger.Level.Set(slog.LevelDebug)
+ }
+ if opt.Version {
+ fmt.Println(version)
+ os.Exit(0)
+ }
+
+ module.Register("example", module.Creator{
+ Create: func() module.Module { return &example{} }},
+ )
+
+ p := agent.New(agent.Config{
+ Name: name,
+ ConfDir: confDir(opt.ConfDir),
+ ModulesConfDir: modulesConfDir(opt.ConfDir),
+ ModulesConfWatchPath: opt.WatchPath,
+ RunModule: opt.Module,
+ MinUpdateEvery: opt.UpdateEvery,
+ })
+
+ p.Run()
+}
+
+func parseCLI() *cli.Option {
+ opt, err := cli.Parse(os.Args)
+ var flagsErr *flags.Error
+ if errors.As(err, &flagsErr) && errors.Is(flagsErr.Type, flags.ErrHelp) {
+ os.Exit(0)
+ } else {
+ os.Exit(1)
+ }
+ return opt
+}
diff --git a/src/go/plugin/go.d/hack/go-build.sh b/src/go/plugin/go.d/hack/go-build.sh
new file mode 100755
index 000000000..0b451f9c5
--- /dev/null
+++ b/src/go/plugin/go.d/hack/go-build.sh
@@ -0,0 +1,109 @@
+#!/usr/bin/env bash
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -e
+
+PLATFORMS=(
+ darwin/amd64
+ darwin/arm64
+ freebsd/386
+ freebsd/amd64
+ freebsd/arm
+ freebsd/arm64
+ linux/386
+ linux/amd64
+ linux/arm
+ linux/arm64
+ linux/ppc64
+ linux/ppc64le
+ linux/mips
+ linux/mipsle
+ linux/mips64
+ linux/mips64le
+)
+
+getos() {
+ local IFS=/ && read -ra array <<<"$1" && echo "${array[0]}"
+}
+
+getarch() {
+ local IFS=/ && read -ra array <<<"$1" && echo "${array[1]}"
+}
+
+WHICH="$1"
+
+VERSION="${TRAVIS_TAG:-$(git describe --tags --always --dirty)}"
+
+GOLDFLAGS=${GLDFLAGS:-}
+GOLDFLAGS="$GOLDFLAGS -w -s -X github.com/netdata/netdata/go/plugins/pkg/buildinfo.Version=$VERSION"
+
+build() {
+ echo "Building ${GOOS}/${GOARCH}"
+ CGO_ENABLED=0 GOOS="$1" GOARCH="$2" go build -ldflags "${GOLDFLAGS}" -o "$3" "github.com/netdata/netdata/go/plugins/cmd/godplugin"
+}
+
+create_config_archives() {
+ mkdir -p bin
+ tar -zcvf "bin/config.tar.gz" -C config .
+ tar -zcvf "bin/go.d.plugin-config-${VERSION}.tar.gz" -C config .
+}
+
+create_vendor_archives() {
+ mkdir -p bin
+ go mod vendor
+ tar -zc --transform "s:^:go.d.plugin-${VERSION#v}/:" -f "bin/vendor.tar.gz" vendor
+ tar -zc --transform "s:^:go.d.plugin-${VERSION#v}/:" -f "bin/go.d.plugin-vendor-${VERSION}.tar.gz" vendor
+}
+
+build_all_platforms() {
+ for PLATFORM in "${PLATFORMS[@]}"; do
+ GOOS=$(getos "$PLATFORM")
+ GOARCH=$(getarch "$PLATFORM")
+ FILE="bin/go.d.plugin-${VERSION}.${GOOS}-${GOARCH}"
+
+ build "$GOOS" "$GOARCH" "$FILE"
+
+ ARCHIVE="${FILE}.tar.gz"
+ tar -C bin -cvzf "${ARCHIVE}" "${FILE/bin\//}"
+ rm "${FILE}"
+ done
+}
+
+build_specific_platform() {
+ GOOS=$(getos "$1")
+ GOARCH=$(getarch "$1")
+ : "${GOARCH:=amd64}"
+
+ build "$GOOS" "$GOARCH" bin/godplugin
+}
+
+build_current_platform() {
+ eval "$(go env | grep -e "GOHOSTOS" -e "GOHOSTARCH")"
+ GOOS=${GOOS:-$GOHOSTOS}
+ GOARCH=${GOARCH:-$GOHOSTARCH}
+
+ build "$GOOS" "$GOARCH" bin/godplugin
+}
+
+if [[ "$WHICH" == "configs" ]]; then
+ echo "Creating config archives for version: $VERSION"
+ create_config_archives
+ exit 0
+fi
+
+if [[ "$WHICH" == "vendor" ]]; then
+ echo "Creating vendor archives for version: $VERSION"
+ create_vendor_archives
+ exit 0
+fi
+
+echo "Building binaries for version: $VERSION"
+
+if [[ "$WHICH" == "all" ]]; then
+ build_all_platforms
+elif [[ -n "$WHICH" ]]; then
+ build_specific_platform "$WHICH"
+else
+ build_current_platform
+fi
diff --git a/src/go/plugin/go.d/hack/go-fmt.sh b/src/go/plugin/go.d/hack/go-fmt.sh
new file mode 100755
index 000000000..fcc9e2d57
--- /dev/null
+++ b/src/go/plugin/go.d/hack/go-fmt.sh
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+for TARGET in "${@}"; do
+ find "${TARGET}" -name '*.go' -exec gofmt -s -w {} \+
+done
+git diff --exit-code
diff --git a/src/go/plugin/go.d/modules/activemq/README.md b/src/go/plugin/go.d/modules/activemq/README.md
new file mode 120000
index 000000000..de893d1d0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/README.md
@@ -0,0 +1 @@
+integrations/activemq.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/activemq/activemq.go b/src/go/plugin/go.d/modules/activemq/activemq.go
new file mode 100644
index 000000000..bf47be72a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/activemq.go
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package activemq
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("activemq", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *ActiveMQ {
+ return &ActiveMQ{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8161",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ Webadmin: "admin",
+ MaxQueues: 50,
+ MaxTopics: 50,
+ },
+ charts: &Charts{},
+ activeQueues: make(map[string]bool),
+ activeTopics: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ Webadmin string `yaml:"webadmin,omitempty" json:"webadmin"`
+ MaxQueues int `yaml:"max_queues" json:"max_queues"`
+ MaxTopics int `yaml:"max_topics" json:"max_topics"`
+ QueuesFilter string `yaml:"queues_filter,omitempty" json:"queues_filter"`
+ TopicsFilter string `yaml:"topics_filter,omitempty" json:"topics_filter"`
+}
+
+type ActiveMQ struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ apiClient *apiClient
+
+ activeQueues map[string]bool
+ activeTopics map[string]bool
+ queuesFilter matcher.Matcher
+ topicsFilter matcher.Matcher
+}
+
+func (a *ActiveMQ) Configuration() any {
+ return a.Config
+}
+
+func (a *ActiveMQ) Init() error {
+ if err := a.validateConfig(); err != nil {
+ a.Errorf("config validation: %v", err)
+ return err
+ }
+
+ qf, err := a.initQueuesFiler()
+ if err != nil {
+ a.Error(err)
+ return err
+ }
+ a.queuesFilter = qf
+
+ tf, err := a.initTopicsFilter()
+ if err != nil {
+ a.Error(err)
+ return err
+ }
+ a.topicsFilter = tf
+
+ client, err := web.NewHTTPClient(a.Client)
+ if err != nil {
+ a.Error(err)
+ return err
+ }
+
+ a.apiClient = newAPIClient(client, a.Request, a.Webadmin)
+
+ return nil
+}
+
+func (a *ActiveMQ) Check() error {
+ mx, err := a.collect()
+ if err != nil {
+ a.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (a *ActiveMQ) Charts() *Charts {
+ return a.charts
+}
+
+func (a *ActiveMQ) Cleanup() {
+ if a.apiClient != nil && a.apiClient.httpClient != nil {
+ a.apiClient.httpClient.CloseIdleConnections()
+ }
+}
+
+func (a *ActiveMQ) Collect() map[string]int64 {
+ mx, err := a.collect()
+
+ if err != nil {
+ a.Error(err)
+ return nil
+ }
+
+ return mx
+}
diff --git a/src/go/plugin/go.d/modules/activemq/activemq_test.go b/src/go/plugin/go.d/modules/activemq/activemq_test.go
new file mode 100644
index 000000000..e2640f440
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/activemq_test.go
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package activemq
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestActiveMQ_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &ActiveMQ{}, dataConfigJSON, dataConfigYAML)
+}
+
+var (
+ queuesData = []string{
+ `<queues>
+<queue name="sandra">
+<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/>
+<feed>
+<atom>queueBrowse/sandra?view=rss&amp;feedType=atom_1.0</atom>
+<rss>queueBrowse/sandra?view=rss&amp;feedType=rss_2.0</rss>
+</feed>
+</queue>
+<queue name="Test">
+<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/>
+<feed>
+<atom>queueBrowse/Test?view=rss&amp;feedType=atom_1.0</atom>
+<rss>queueBrowse/Test?view=rss&amp;feedType=rss_2.0</rss>
+</feed>
+</queue>
+</queues>`,
+ `<queues>
+<queue name="sandra">
+<stats size="2" consumerCount="2" enqueueCount="3" dequeueCount="2"/>
+<feed>
+<atom>queueBrowse/sandra?view=rss&amp;feedType=atom_1.0</atom>
+<rss>queueBrowse/sandra?view=rss&amp;feedType=rss_2.0</rss>
+</feed>
+</queue>
+<queue name="Test">
+<stats size="2" consumerCount="2" enqueueCount="3" dequeueCount="2"/>
+<feed>
+<atom>queueBrowse/Test?view=rss&amp;feedType=atom_1.0</atom>
+<rss>queueBrowse/Test?view=rss&amp;feedType=rss_2.0</rss>
+</feed>
+</queue>
+<queue name="Test2">
+<stats size="0" consumerCount="0" enqueueCount="0" dequeueCount="0"/>
+<feed>
+<atom>queueBrowse/Test?view=rss&amp;feedType=atom_1.0</atom>
+<rss>queueBrowse/Test?view=rss&amp;feedType=rss_2.0</rss>
+</feed>
+</queue>
+</queues>`,
+ `<queues>
+<queue name="sandra">
+<stats size="3" consumerCount="3" enqueueCount="4" dequeueCount="3"/>
+<feed>
+<atom>queueBrowse/sandra?view=rss&amp;feedType=atom_1.0</atom>
+<rss>queueBrowse/sandra?view=rss&amp;feedType=rss_2.0</rss>
+</feed>
+</queue>
+<queue name="Test">
+<stats size="3" consumerCount="3" enqueueCount="4" dequeueCount="3"/>
+<feed>
+<atom>queueBrowse/Test?view=rss&amp;feedType=atom_1.0</atom>
+<rss>queueBrowse/Test?view=rss&amp;feedType=rss_2.0</rss>
+</feed>
+</queue>
+</queues>`,
+ }
+
+ topicsData = []string{
+ `<topics>
+<topic name="ActiveMQ.Advisory.MasterBroker ">
+<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/>
+</topic>
+<topic name="AAA ">
+<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/>
+</topic>
+<topic name="ActiveMQ.Advisory.Topic ">
+<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/>
+</topic>
+<topic name="ActiveMQ.Advisory.Queue ">
+<stats size="0" consumerCount="0" enqueueCount="2" dequeueCount="0"/>
+</topic>
+<topic name="AAAA ">
+<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/>
+</topic>
+</topics>`,
+ `<topics>
+<topic name="ActiveMQ.Advisory.MasterBroker ">
+<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/>
+</topic>
+<topic name="AAA ">
+<stats size="2" consumerCount="2" enqueueCount="3" dequeueCount="2"/>
+</topic>
+<topic name="ActiveMQ.Advisory.Topic ">
+<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/>
+</topic>
+<topic name="ActiveMQ.Advisory.Queue ">
+<stats size="0" consumerCount="0" enqueueCount="2" dequeueCount="0"/>
+</topic>
+<topic name="AAAA ">
+<stats size="2" consumerCount="2" enqueueCount="3" dequeueCount="2"/>
+</topic>
+<topic name="BBB ">
+<stats size="1" consumerCount="1" enqueueCount="2" dequeueCount="1"/>
+</topic>
+</topics>`,
+ `<topics>
+<topic name="ActiveMQ.Advisory.MasterBroker ">
+<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/>
+</topic>
+<topic name="AAA ">
+<stats size="3" consumerCount="3" enqueueCount="4" dequeueCount="3"/>
+</topic>
+<topic name="ActiveMQ.Advisory.Topic ">
+<stats size="0" consumerCount="0" enqueueCount="1" dequeueCount="0"/>
+</topic>
+<topic name="ActiveMQ.Advisory.Queue ">
+<stats size="0" consumerCount="0" enqueueCount="2" dequeueCount="0"/>
+</topic>
+<topic name="AAAA ">
+<stats size="3" consumerCount="3" enqueueCount="4" dequeueCount="3"/>
+</topic>
+</topics>`,
+ }
+)
+
+func TestActiveMQ_Init(t *testing.T) {
+ job := New()
+
+ // NG case
+ job.Webadmin = ""
+ assert.Error(t, job.Init())
+
+ // OK case
+ job.Webadmin = "webadmin"
+ assert.NoError(t, job.Init())
+ assert.NotNil(t, job.apiClient)
+}
+
+func TestActiveMQ_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/webadmin/xml/queues.jsp":
+ _, _ = w.Write([]byte(queuesData[0]))
+ case "/webadmin/xml/topics.jsp":
+ _, _ = w.Write([]byte(topicsData[0]))
+ }
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.HTTP.Request = web.Request{URL: ts.URL}
+ job.Webadmin = "webadmin"
+
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+}
+
+func TestActiveMQ_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestActiveMQ_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestActiveMQ_Collect(t *testing.T) {
+ var collectNum int
+ getQueues := func() string { return queuesData[collectNum] }
+ getTopics := func() string { return topicsData[collectNum] }
+
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/webadmin/xml/queues.jsp":
+ _, _ = w.Write([]byte(getQueues()))
+ case "/webadmin/xml/topics.jsp":
+ _, _ = w.Write([]byte(getTopics()))
+ }
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.HTTP.Request = web.Request{URL: ts.URL}
+ job.Webadmin = "webadmin"
+
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ cases := []struct {
+ expected map[string]int64
+ numQueues int
+ numTopics int
+ numCharts int
+ }{
+ {
+ expected: map[string]int64{
+ "queues_sandra_consumers": 1,
+ "queues_sandra_dequeued": 1,
+ "queues_Test_enqueued": 2,
+ "queues_Test_unprocessed": 1,
+ "topics_AAA_dequeued": 1,
+ "topics_AAAA_unprocessed": 1,
+ "queues_Test_dequeued": 1,
+ "topics_AAA_enqueued": 2,
+ "topics_AAA_unprocessed": 1,
+ "topics_AAAA_consumers": 1,
+ "topics_AAAA_dequeued": 1,
+ "queues_Test_consumers": 1,
+ "queues_sandra_enqueued": 2,
+ "queues_sandra_unprocessed": 1,
+ "topics_AAA_consumers": 1,
+ "topics_AAAA_enqueued": 2,
+ },
+ numQueues: 2,
+ numTopics: 2,
+ numCharts: 12,
+ },
+ {
+ expected: map[string]int64{
+ "queues_sandra_enqueued": 3,
+ "queues_Test_enqueued": 3,
+ "queues_Test_unprocessed": 1,
+ "queues_Test2_dequeued": 0,
+ "topics_BBB_enqueued": 2,
+ "queues_sandra_dequeued": 2,
+ "queues_sandra_unprocessed": 1,
+ "queues_Test2_enqueued": 0,
+ "topics_AAAA_enqueued": 3,
+ "topics_AAAA_dequeued": 2,
+ "topics_BBB_unprocessed": 1,
+ "topics_AAA_dequeued": 2,
+ "topics_AAAA_unprocessed": 1,
+ "queues_Test_consumers": 2,
+ "queues_Test_dequeued": 2,
+ "queues_Test2_consumers": 0,
+ "queues_Test2_unprocessed": 0,
+ "topics_AAA_consumers": 2,
+ "topics_AAA_enqueued": 3,
+ "topics_BBB_dequeued": 1,
+ "queues_sandra_consumers": 2,
+ "topics_AAA_unprocessed": 1,
+ "topics_AAAA_consumers": 2,
+ "topics_BBB_consumers": 1,
+ },
+ numQueues: 3,
+ numTopics: 3,
+ numCharts: 18,
+ },
+ {
+ expected: map[string]int64{
+ "queues_sandra_unprocessed": 1,
+ "queues_Test_unprocessed": 1,
+ "queues_sandra_consumers": 3,
+ "topics_AAAA_enqueued": 4,
+ "queues_sandra_dequeued": 3,
+ "queues_Test_consumers": 3,
+ "queues_Test_enqueued": 4,
+ "queues_Test_dequeued": 3,
+ "topics_AAA_consumers": 3,
+ "topics_AAA_unprocessed": 1,
+ "topics_AAAA_consumers": 3,
+ "topics_AAAA_unprocessed": 1,
+ "queues_sandra_enqueued": 4,
+ "topics_AAA_enqueued": 4,
+ "topics_AAA_dequeued": 3,
+ "topics_AAAA_dequeued": 3,
+ },
+ numQueues: 2,
+ numTopics: 2,
+ numCharts: 18,
+ },
+ }
+
+ for _, c := range cases {
+ require.Equal(t, c.expected, job.Collect())
+ assert.Len(t, job.activeQueues, c.numQueues)
+ assert.Len(t, job.activeTopics, c.numTopics)
+ assert.Len(t, *job.charts, c.numCharts)
+ collectNum++
+ }
+}
+
+func TestActiveMQ_404(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(404)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.Webadmin = "webadmin"
+ job.HTTP.Request = web.Request{URL: ts.URL}
+
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestActiveMQ_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye!"))
+ }))
+ defer ts.Close()
+
+ mod := New()
+ mod.Webadmin = "webadmin"
+ mod.HTTP.Request = web.Request{URL: ts.URL}
+
+ require.NoError(t, mod.Init())
+ assert.Error(t, mod.Check())
+}
diff --git a/src/go/plugin/go.d/modules/activemq/apiclient.go b/src/go/plugin/go.d/modules/activemq/apiclient.go
new file mode 100644
index 000000000..7f99c9bad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/apiclient.go
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package activemq
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type topics struct {
+ XMLName xml.Name `xml:"topics"`
+ Items []topic `xml:"topic"`
+}
+
+type topic struct {
+ XMLName xml.Name `xml:"topic"`
+ Name string `xml:"name,attr"`
+ Stats stats `xml:"stats"`
+}
+
+type queues struct {
+ XMLName xml.Name `xml:"queues"`
+ Items []queue `xml:"queue"`
+}
+
+type queue struct {
+ XMLName xml.Name `xml:"queue"`
+ Name string `xml:"name,attr"`
+ Stats stats `xml:"stats"`
+}
+
+type stats struct {
+ XMLName xml.Name `xml:"stats"`
+ Size int64 `xml:"size,attr"`
+ ConsumerCount int64 `xml:"consumerCount,attr"`
+ EnqueueCount int64 `xml:"enqueueCount,attr"`
+ DequeueCount int64 `xml:"dequeueCount,attr"`
+}
+
+const pathStats = "/%s/xml/%s.jsp"
+
+func newAPIClient(client *http.Client, request web.Request, webadmin string) *apiClient {
+ return &apiClient{
+ httpClient: client,
+ request: request,
+ webadmin: webadmin,
+ }
+}
+
+type apiClient struct {
+ httpClient *http.Client
+ request web.Request
+ webadmin string
+}
+
+func (a *apiClient) getQueues() (*queues, error) {
+ req, err := a.createRequest(fmt.Sprintf(pathStats, a.webadmin, keyQueues))
+ if err != nil {
+ return nil, fmt.Errorf("error on creating request '%s' : %v", a.request.URL, err)
+ }
+
+ resp, err := a.doRequestOK(req)
+
+ defer closeBody(resp)
+
+ if err != nil {
+ return nil, err
+ }
+
+ var queues queues
+
+ if err := xml.NewDecoder(resp.Body).Decode(&queues); err != nil {
+ return nil, fmt.Errorf("error on decoding resp from %s : %s", req.URL, err)
+ }
+
+ return &queues, nil
+}
+
+func (a *apiClient) getTopics() (*topics, error) {
+ req, err := a.createRequest(fmt.Sprintf(pathStats, a.webadmin, keyTopics))
+ if err != nil {
+ return nil, fmt.Errorf("error on creating request '%s' : %v", a.request.URL, err)
+ }
+
+ resp, err := a.doRequestOK(req)
+
+ defer closeBody(resp)
+
+ if err != nil {
+ return nil, err
+ }
+
+ var topics topics
+
+ if err := xml.NewDecoder(resp.Body).Decode(&topics); err != nil {
+ return nil, fmt.Errorf("error on decoding resp from %s : %s", req.URL, err)
+ }
+
+ return &topics, nil
+}
+
+func (a *apiClient) doRequestOK(req *http.Request) (*http.Response, error) {
+ resp, err := a.httpClient.Do(req)
+ if err != nil {
+ return resp, fmt.Errorf("error on request to %s : %v", req.URL, err)
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
+ }
+
+ return resp, err
+}
+
+func (a *apiClient) createRequest(urlPath string) (*http.Request, error) {
+ req := a.request.Copy()
+ u, err := url.Parse(req.URL)
+ if err != nil {
+ return nil, err
+ }
+ u.Path = path.Join(u.Path, urlPath)
+ req.URL = u.String()
+ return web.NewHTTPRequest(req)
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/activemq/charts.go b/src/go/plugin/go.d/modules/activemq/charts.go
new file mode 100644
index 000000000..a169da01a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/charts.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package activemq
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+)
+
+var charts = Charts{
+ {
+ ID: "%s_%s_messages",
+ Title: "%s Messages",
+ Units: "messages/s",
+ Fam: "",
+ Ctx: "activemq.messages",
+ Dims: Dims{
+ {ID: "%s_%s_enqueued", Name: "enqueued", Algo: module.Incremental},
+ {ID: "%s_%s_dequeued", Name: "dequeued", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "%s_%s_unprocessed_messages",
+ Title: "%s Unprocessed Messages",
+ Units: "messages",
+ Fam: "",
+ Ctx: "activemq.unprocessed_messages",
+ Dims: Dims{
+ {ID: "%s_%s_unprocessed", Name: "unprocessed"},
+ },
+ },
+ {
+ ID: "%s_%s_consumers",
+ Title: "%s Consumers",
+ Units: "consumers",
+ Fam: "",
+ Ctx: "activemq.consumers",
+ Dims: Dims{
+ {ID: "%s_%s_consumers", Name: "consumers"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/activemq/collect.go b/src/go/plugin/go.d/modules/activemq/collect.go
new file mode 100644
index 000000000..0dbaf5544
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/collect.go
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package activemq
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ keyQueues = "queues"
+ keyTopics = "topics"
+ keyAdvisory = "Advisory"
+)
+
+var nameReplacer = strings.NewReplacer(".", "_", " ", "")
+
+func (a *ActiveMQ) collect() (map[string]int64, error) {
+ metrics := make(map[string]int64)
+
+ var (
+ queues *queues
+ topics *topics
+ err error
+ )
+
+ if queues, err = a.apiClient.getQueues(); err != nil {
+ return nil, err
+ }
+
+ if topics, err = a.apiClient.getTopics(); err != nil {
+ return nil, err
+ }
+
+ a.processQueues(queues, metrics)
+ a.processTopics(topics, metrics)
+
+ return metrics, nil
+}
+
+func (a *ActiveMQ) processQueues(queues *queues, metrics map[string]int64) {
+ var (
+ count = len(a.activeQueues)
+ updated = make(map[string]bool)
+ unp int
+ )
+
+ for _, q := range queues.Items {
+ if strings.Contains(q.Name, keyAdvisory) {
+ continue
+ }
+
+ if !a.activeQueues[q.Name] {
+ if a.MaxQueues != 0 && count > a.MaxQueues {
+ unp++
+ continue
+ }
+
+ if !a.filterQueues(q.Name) {
+ continue
+ }
+
+ a.activeQueues[q.Name] = true
+ a.addQueueTopicCharts(q.Name, keyQueues)
+ }
+
+ rname := nameReplacer.Replace(q.Name)
+
+ metrics["queues_"+rname+"_consumers"] = q.Stats.ConsumerCount
+ metrics["queues_"+rname+"_enqueued"] = q.Stats.EnqueueCount
+ metrics["queues_"+rname+"_dequeued"] = q.Stats.DequeueCount
+ metrics["queues_"+rname+"_unprocessed"] = q.Stats.EnqueueCount - q.Stats.DequeueCount
+
+ updated[q.Name] = true
+ }
+
+ for name := range a.activeQueues {
+ if !updated[name] {
+ delete(a.activeQueues, name)
+ a.removeQueueTopicCharts(name, keyQueues)
+ }
+ }
+
+ if unp > 0 {
+ a.Debugf("%d queues were unprocessed due to max_queues limit (%d)", unp, a.MaxQueues)
+ }
+}
+
+func (a *ActiveMQ) processTopics(topics *topics, metrics map[string]int64) {
+ var (
+ count = len(a.activeTopics)
+ updated = make(map[string]bool)
+ unp int
+ )
+
+ for _, t := range topics.Items {
+ if strings.Contains(t.Name, keyAdvisory) {
+ continue
+ }
+
+ if !a.activeTopics[t.Name] {
+ if a.MaxTopics != 0 && count > a.MaxTopics {
+ unp++
+ continue
+ }
+
+ if !a.filterTopics(t.Name) {
+ continue
+ }
+
+ a.activeTopics[t.Name] = true
+ a.addQueueTopicCharts(t.Name, keyTopics)
+ }
+
+ rname := nameReplacer.Replace(t.Name)
+
+ metrics["topics_"+rname+"_consumers"] = t.Stats.ConsumerCount
+ metrics["topics_"+rname+"_enqueued"] = t.Stats.EnqueueCount
+ metrics["topics_"+rname+"_dequeued"] = t.Stats.DequeueCount
+ metrics["topics_"+rname+"_unprocessed"] = t.Stats.EnqueueCount - t.Stats.DequeueCount
+
+ updated[t.Name] = true
+ }
+
+ for name := range a.activeTopics {
+ if !updated[name] {
+ // TODO: delete after timeout?
+ delete(a.activeTopics, name)
+ a.removeQueueTopicCharts(name, keyTopics)
+ }
+ }
+
+ if unp > 0 {
+ a.Debugf("%d topics were unprocessed due to max_topics limit (%d)", unp, a.MaxTopics)
+ }
+}
+
+func (a *ActiveMQ) filterQueues(line string) bool {
+ if a.queuesFilter == nil {
+ return true
+ }
+ return a.queuesFilter.MatchString(line)
+}
+
+func (a *ActiveMQ) filterTopics(line string) bool {
+ if a.topicsFilter == nil {
+ return true
+ }
+ return a.topicsFilter.MatchString(line)
+}
+
+func (a *ActiveMQ) addQueueTopicCharts(name, typ string) {
+ rname := nameReplacer.Replace(name)
+
+ charts := charts.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, typ, rname)
+ chart.Title = fmt.Sprintf(chart.Title, name)
+ chart.Fam = typ
+
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, typ, rname)
+ }
+ }
+
+ _ = a.charts.Add(*charts...)
+
+}
+
+func (a *ActiveMQ) removeQueueTopicCharts(name, typ string) {
+ rname := nameReplacer.Replace(name)
+
+ chart := a.charts.Get(fmt.Sprintf("%s_%s_messages", typ, rname))
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+
+ chart = a.charts.Get(fmt.Sprintf("%s_%s_unprocessed_messages", typ, rname))
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+
+ chart = a.charts.Get(fmt.Sprintf("%s_%s_consumers", typ, rname))
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+}
diff --git a/src/go/plugin/go.d/modules/activemq/config_schema.json b/src/go/plugin/go.d/modules/activemq/config_schema.json
new file mode 100644
index 000000000..df71bcadf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/config_schema.json
@@ -0,0 +1,234 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ActiveMQ collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the ActiveMQ [Web Console](https://activemq.apache.org/components/classic/documentation/web-console).",
+ "type": "string",
+ "default": "http://127.0.0.1:8161",
+ "format": "uri"
+ },
+ "webadmin": {
+ "title": "Webadmin path",
+ "description": "Webadmin root path.",
+ "type": "string",
+ "default": "admin"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "max_queues": {
+ "title": "Queue limit",
+ "description": "The maximum number of concurrently collected queues. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 50
+ },
+ "queues_filter": {
+ "title": "Queue selector",
+ "description": "Collect queues whose names match the specified [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme).",
+ "type": "string",
+ "minimum": 1,
+ "default": "*"
+ },
+ "max_topics": {
+ "title": "Topic limit",
+ "description": "The maximum number of concurrently collected topics. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 50
+ },
+ "topics_filter": {
+ "title": "Topic selector",
+ "description": "Collect topics whose names match the specified [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme).",
+ "type": "string",
+ "minimum": 1,
+ "default": "*"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url",
+ "webadmin"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "webadmin",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Filtering",
+ "fields": [
+ "max_queues",
+ "queues_filter",
+ "max_topics",
+ "topics_filter"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "queues_filter": {
+ "ui:help": "Use `*` to collect all queues. To exclude all queues from collection, use `!*`."
+ },
+ "topics_filter": {
+ "ui:help": "Use `*` to collect all topics. To exclude all topics from collection, use `!*`."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/activemq/init.go b/src/go/plugin/go.d/modules/activemq/init.go
new file mode 100644
index 000000000..e48dacad5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/init.go
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package activemq
+
+import (
+ "errors"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+func (a *ActiveMQ) validateConfig() error {
+ if a.URL == "" {
+ return errors.New("url not set")
+ }
+ if a.Webadmin == "" {
+ return errors.New("webadmin root path set")
+ }
+ return nil
+}
+
+func (a *ActiveMQ) initQueuesFiler() (matcher.Matcher, error) {
+ if a.QueuesFilter == "" {
+ return matcher.TRUE(), nil
+ }
+ return matcher.NewSimplePatternsMatcher(a.QueuesFilter)
+}
+
+func (a *ActiveMQ) initTopicsFilter() (matcher.Matcher, error) {
+ if a.TopicsFilter == "" {
+ return matcher.TRUE(), nil
+ }
+ return matcher.NewSimplePatternsMatcher(a.TopicsFilter)
+}
diff --git a/src/go/plugin/go.d/modules/activemq/integrations/activemq.md b/src/go/plugin/go.d/modules/activemq/integrations/activemq.md
new file mode 100644
index 000000000..fc215bfb9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/integrations/activemq.md
@@ -0,0 +1,268 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/activemq/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/activemq/metadata.yaml"
+sidebar_label: "ActiveMQ"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ActiveMQ
+
+
+<img src="https://netdata.cloud/img/activemq.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: activemq
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors ActiveMQ queues and topics.
+
+It collects metrics by sending HTTP requests to the Web Console API.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This collector discovers instances running on the local host that provide metrics on port 8161.
+On startup, it tries to collect metrics from:
+
+- http://localhost:8161
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per ActiveMQ instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| activemq.messages | enqueued, dequeued | messages/s |
+| activemq.unprocessed_messages | unprocessed | messages |
+| activemq.consumers | consumers | consumers |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/activemq.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/activemq.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://localhost:8161 | yes |
+| webadmin | Webadmin root path. | admin | yes |
+| max_queues | Maximum number of concurrently collected queues. | 50 | no |
+| max_topics | Maximum number of concurrently collected topics. | 50 | no |
+| queues_filter | Queues filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |
+| topics_filter | Topics filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| timeout | HTTP request timeout. | 1 | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8161
+ webadmin: admin
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8161
+ webadmin: admin
+ username: foo
+ password: bar
+
+```
+</details>
+
+##### Filters and limits
+
+Using filters and limits for queues and topics.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8161
+ webadmin: admin
+ max_queues: 100
+ max_topics: 100
+ queues_filter: '!sandr* *'
+ topics_filter: '!sandr* *'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8161
+ webadmin: admin
+
+ - name: remote
+ url: http://192.0.2.1:8161
+ webadmin: admin
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `activemq` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m activemq
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `activemq` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep activemq
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep activemq /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep activemq
+```
+
+
diff --git a/src/go/plugin/go.d/modules/activemq/metadata.yaml b/src/go/plugin/go.d/modules/activemq/metadata.yaml
new file mode 100644
index 000000000..5bbb0e5a2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/metadata.yaml
@@ -0,0 +1,230 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-activemq
+ module_name: activemq
+ plugin_name: go.d.plugin
+ monitored_instance:
+ categories:
+ - data-collection.message-brokers
+ icon_filename: activemq.png
+ name: ActiveMQ
+ link: https://activemq.apache.org/
+ alternative_monitored_instances: []
+ keywords:
+ - message broker
+ most_popular: false
+ info_provided_to_referring_integrations:
+ description: ""
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: go.d.plugin
+ module_name: httpcheck
+ - plugin_name: apps.plugin
+ module_name: apps
+ overview:
+ data_collection:
+ metrics_description: This collector monitors ActiveMQ queues and topics.
+ method_description: It collects metrics by sending HTTP requests to the Web Console API.
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ This collector discovers instances running on the local host that provide metrics on port 8161.
+ On startup, it tries to collect metrics from:
+
+ - http://localhost:8161
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/activemq.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://localhost:8161
+ required: true
+ - name: webadmin
+ description: Webadmin root path.
+ default_value: admin
+ required: true
+ - name: max_queues
+ description: Maximum number of concurrently collected queues.
+ default_value: 50
+ required: false
+ - name: max_topics
+ description: Maximum number of concurrently collected topics.
+ default_value: 50
+ required: false
+ - name: queues_filter
+ description: |
+ Queues filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).
+ default_value: ""
+ required: false
+ - name: topics_filter
+ description: |
+ Topics filter. Syntax is [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).
+ default_value: ""
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8161
+ webadmin: admin
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8161
+ webadmin: admin
+ username: foo
+ password: bar
+ - name: Filters and limits
+ description: Using filters and limits for queues and topics.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8161
+ webadmin: admin
+ max_queues: 100
+ max_topics: 100
+ queues_filter: '!sandr* *'
+ topics_filter: '!sandr* *'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8161
+ webadmin: admin
+
+ - name: remote
+ url: http://192.0.2.1:8161
+ webadmin: admin
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: activemq.messages
+ availability: []
+ description: Messaged
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: enqueued
+ - name: dequeued
+ - name: activemq.unprocessed_messages
+ availability: []
+ description: Unprocessed Messages
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: unprocessed
+ - name: activemq.consumers
+ availability: []
+ description: Consumers
+ unit: consumers
+ chart_type: line
+ dimensions:
+ - name: consumers
diff --git a/src/go/plugin/go.d/modules/activemq/testdata/config.json b/src/go/plugin/go.d/modules/activemq/testdata/config.json
new file mode 100644
index 000000000..13327dd3f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/testdata/config.json
@@ -0,0 +1,25 @@
+{
+ "update_every": 123,
+ "webadmin": "ok",
+ "max_queues": 123,
+ "max_topics": 123,
+ "queues_filter": "ok",
+ "topics_filter": "ok",
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/activemq/testdata/config.yaml b/src/go/plugin/go.d/modules/activemq/testdata/config.yaml
new file mode 100644
index 000000000..dbb4232e9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/activemq/testdata/config.yaml
@@ -0,0 +1,22 @@
+update_every: 123
+webadmin: "ok"
+max_queues: 123
+max_topics: 123
+queues_filter: "ok"
+topics_filter: "ok"
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/adaptecraid/README.md b/src/go/plugin/go.d/modules/adaptecraid/README.md
new file mode 120000
index 000000000..0a1566188
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/README.md
@@ -0,0 +1 @@
+integrations/adaptec_raid.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/adaptecraid/adaptec.go b/src/go/plugin/go.d/modules/adaptecraid/adaptec.go
new file mode 100644
index 000000000..264390e10
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/adaptec.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package adaptecraid
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("adaptec_raid", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *AdaptecRaid {
+ return &AdaptecRaid{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ lds: make(map[string]bool),
+ pds: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ AdaptecRaid struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec arcconfCli
+
+ lds map[string]bool
+ pds map[string]bool
+ }
+ arcconfCli interface {
+ logicalDevicesInfo() ([]byte, error)
+ physicalDevicesInfo() ([]byte, error)
+ }
+)
+
+func (a *AdaptecRaid) Configuration() any {
+ return a.Config
+}
+
+func (a *AdaptecRaid) Init() error {
+ arcconfExec, err := a.initArcconfCliExec()
+ if err != nil {
+ a.Errorf("arcconf exec initialization: %v", err)
+ return err
+ }
+ a.exec = arcconfExec
+
+ return nil
+}
+
+func (a *AdaptecRaid) Check() error {
+ mx, err := a.collect()
+ if err != nil {
+ a.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (a *AdaptecRaid) Charts() *module.Charts {
+ return a.charts
+}
+
+func (a *AdaptecRaid) Collect() map[string]int64 {
+ mx, err := a.collect()
+ if err != nil {
+ a.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (a *AdaptecRaid) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/adaptec_test.go b/src/go/plugin/go.d/modules/adaptecraid/adaptec_test.go
new file mode 100644
index 000000000..9abe5c984
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/adaptec_test.go
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package adaptecraid
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataLogicalDevicesOld, _ = os.ReadFile("testdata/getconfig-ld-old.txt")
+ dataPhysicalDevicesOld, _ = os.ReadFile("testdata/getconfig-pd-old.txt")
+ dataLogicalDevicesCurrent, _ = os.ReadFile("testdata/getconfig-ld-current.txt")
+ dataPhysicalDevicesCurrent, _ = os.ReadFile("testdata/getconfig-pd-current.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataLogicalDevicesOld": dataLogicalDevicesOld,
+ "dataPhysicalDevicesOld": dataPhysicalDevicesOld,
+ "dataLogicalDevicesCurrent": dataLogicalDevicesCurrent,
+ "dataPhysicalDevicesCurrent": dataPhysicalDevicesCurrent,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestAdaptecRaid_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &AdaptecRaid{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestAdaptecRaid_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if 'ndsudo' not found": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ adaptec := New()
+
+ if test.wantFail {
+ assert.Error(t, adaptec.Init())
+ } else {
+ assert.NoError(t, adaptec.Init())
+ }
+ })
+ }
+}
+
+func TestAdaptecRaid_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *AdaptecRaid
+ }{
+ "not initialized exec": {
+ prepare: func() *AdaptecRaid {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *AdaptecRaid {
+ adaptec := New()
+ adaptec.exec = prepareMockOkCurrent()
+ _ = adaptec.Check()
+ return adaptec
+ },
+ },
+ "after collect": {
+ prepare: func() *AdaptecRaid {
+ adaptec := New()
+ adaptec.exec = prepareMockOkCurrent()
+ _ = adaptec.Collect()
+ return adaptec
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ adaptec := test.prepare()
+
+ assert.NotPanics(t, adaptec.Cleanup)
+ })
+ }
+}
+
+func TestAdaptecRaid_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestAdaptecRaid_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockArcconfExec
+ wantFail bool
+ }{
+ "success case old data": {
+ wantFail: false,
+ prepareMock: prepareMockOkOld,
+ },
+ "success case current data": {
+ wantFail: false,
+ prepareMock: prepareMockOkCurrent,
+ },
+ "err on exec": {
+ wantFail: true,
+ prepareMock: prepareMockErr,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ adaptec := New()
+ mock := test.prepareMock()
+ adaptec.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, adaptec.Check())
+ } else {
+ assert.NoError(t, adaptec.Check())
+ }
+ })
+ }
+}
+
+func TestAdaptecRaid_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockArcconfExec
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success case old data": {
+ prepareMock: prepareMockOkOld,
+ wantCharts: len(ldChartsTmpl)*1 + (len(pdChartsTmpl)-1)*4,
+ wantMetrics: map[string]int64{
+ "ld_0_health_state_critical": 0,
+ "ld_0_health_state_ok": 1,
+ "pd_0_health_state_critical": 0,
+ "pd_0_health_state_ok": 1,
+ "pd_0_smart_warnings": 0,
+ "pd_1_health_state_critical": 0,
+ "pd_1_health_state_ok": 1,
+ "pd_1_smart_warnings": 0,
+ "pd_2_health_state_critical": 0,
+ "pd_2_health_state_ok": 1,
+ "pd_2_smart_warnings": 0,
+ "pd_3_health_state_critical": 0,
+ "pd_3_health_state_ok": 1,
+ "pd_3_smart_warnings": 0,
+ },
+ },
+ "success case current data": {
+ prepareMock: prepareMockOkCurrent,
+ wantCharts: len(ldChartsTmpl)*1 + (len(pdChartsTmpl)-1)*6,
+ wantMetrics: map[string]int64{
+ "ld_0_health_state_critical": 0,
+ "ld_0_health_state_ok": 1,
+ "pd_0_health_state_critical": 0,
+ "pd_0_health_state_ok": 1,
+ "pd_0_smart_warnings": 0,
+ "pd_1_health_state_critical": 0,
+ "pd_1_health_state_ok": 1,
+ "pd_1_smart_warnings": 0,
+ "pd_2_health_state_critical": 0,
+ "pd_2_health_state_ok": 1,
+ "pd_2_smart_warnings": 0,
+ "pd_3_health_state_critical": 0,
+ "pd_3_health_state_ok": 1,
+ "pd_3_smart_warnings": 0,
+ "pd_4_health_state_critical": 0,
+ "pd_4_health_state_ok": 1,
+ "pd_4_smart_warnings": 0,
+ "pd_5_health_state_critical": 0,
+ "pd_5_health_state_ok": 1,
+ "pd_5_smart_warnings": 0,
+ },
+ },
+ "err on exec": {
+ prepareMock: prepareMockErr,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ adaptec := New()
+ mock := test.prepareMock()
+ adaptec.exec = mock
+
+ mx := adaptec.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ assert.Len(t, *adaptec.Charts(), test.wantCharts)
+ })
+ }
+}
+
+func prepareMockOkOld() *mockArcconfExec {
+ return &mockArcconfExec{
+ ldData: dataLogicalDevicesOld,
+ pdData: dataPhysicalDevicesOld,
+ }
+}
+
+func prepareMockOkCurrent() *mockArcconfExec {
+ return &mockArcconfExec{
+ ldData: dataLogicalDevicesCurrent,
+ pdData: dataPhysicalDevicesCurrent,
+ }
+}
+
+func prepareMockErr() *mockArcconfExec {
+ return &mockArcconfExec{
+ errOnInfo: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockArcconfExec {
+ resp := []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`)
+ return &mockArcconfExec{
+ ldData: resp,
+ pdData: resp,
+ }
+}
+
+func prepareMockEmptyResponse() *mockArcconfExec {
+ return &mockArcconfExec{}
+}
+
+type mockArcconfExec struct {
+ errOnInfo bool
+ ldData []byte
+ pdData []byte
+}
+
+func (m *mockArcconfExec) logicalDevicesInfo() ([]byte, error) {
+ if m.errOnInfo {
+ return nil, errors.New("mock.logicalDevicesInfo() error")
+ }
+ return m.ldData, nil
+}
+
+func (m *mockArcconfExec) physicalDevicesInfo() ([]byte, error) {
+ if m.errOnInfo {
+ return nil, errors.New("mock.physicalDevicesInfo() error")
+ }
+ return m.pdData, nil
+}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/charts.go b/src/go/plugin/go.d/modules/adaptecraid/charts.go
new file mode 100644
index 000000000..65be20199
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/charts.go
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package adaptecraid
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioLDStatus = module.Priority + iota
+
+ prioPDState
+ prioPDSmartWarnings
+ prioPDSmartTemperature
+)
+
+var ldChartsTmpl = module.Charts{
+ ldStatusChartTmpl.Copy(),
+}
+
+var (
+ ldStatusChartTmpl = module.Chart{
+ ID: "logical_device_%s_status",
+ Title: "Logical Device status",
+ Units: "status",
+ Fam: "ld health",
+ Ctx: "adaptecraid.logical_device_status",
+ Type: module.Line,
+ Priority: prioLDStatus,
+ Dims: module.Dims{
+ {ID: "ld_%s_health_state_ok", Name: "ok"},
+ {ID: "ld_%s_health_state_critical", Name: "critical"},
+ },
+ }
+)
+
+var pdChartsTmpl = module.Charts{
+ pdStateChartTmpl.Copy(),
+ pdSmartWarningChartTmpl.Copy(),
+ pdTemperatureChartTmpl.Copy(),
+}
+
+var (
+ pdStateChartTmpl = module.Chart{
+ ID: "physical_device_%s_state",
+ Title: "Physical Device state",
+ Units: "state",
+ Fam: "pd health",
+ Ctx: "adaptecraid.physical_device_state",
+ Type: module.Line,
+ Priority: prioPDState,
+ Dims: module.Dims{
+ {ID: "pd_%s_health_state_ok", Name: "ok"},
+ {ID: "pd_%s_health_state_critical", Name: "critical"},
+ },
+ }
+ pdSmartWarningChartTmpl = module.Chart{
+ ID: "physical_device_%s_smart_warnings",
+ Title: "Physical Device SMART warnings",
+ Units: "warnings",
+ Fam: "pd smart",
+ Ctx: "adaptecraid.physical_device_smart_warnings",
+ Type: module.Line,
+ Priority: prioPDSmartWarnings,
+ Dims: module.Dims{
+ {ID: "pd_%s_smart_warnings", Name: "smart"},
+ },
+ }
+ pdTemperatureChartTmpl = module.Chart{
+ ID: "physical_device_%s_temperature",
+ Title: "Physical Device temperature",
+ Units: "Celsius",
+ Fam: "pd temperature",
+ Ctx: "adaptecraid.physical_device_temperature",
+ Type: module.Line,
+ Priority: prioPDSmartTemperature,
+ Dims: module.Dims{
+ {ID: "pd_%s_temperature", Name: "temperature"},
+ },
+ }
+)
+
+func (a *AdaptecRaid) addLogicalDeviceCharts(ld *logicalDevice) {
+ charts := ldChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, ld.number)
+ chart.Labels = []module.Label{
+ {Key: "ld_number", Value: ld.number},
+ {Key: "ld_name", Value: ld.name},
+ {Key: "raid_level", Value: ld.raidLevel},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, ld.number)
+ }
+ }
+
+ if err := a.Charts().Add(*charts...); err != nil {
+ a.Warning(err)
+ }
+}
+
+func (a *AdaptecRaid) addPhysicalDeviceCharts(pd *physicalDevice) {
+ charts := pdChartsTmpl.Copy()
+
+ if _, err := strconv.ParseInt(pd.temperature, 10, 64); err != nil {
+ _ = charts.Remove(pdTemperatureChartTmpl.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, pd.number)
+ chart.Labels = []module.Label{
+ {Key: "pd_number", Value: pd.number},
+ {Key: "location", Value: pd.location},
+ {Key: "vendor", Value: pd.vendor},
+ {Key: "model", Value: pd.model},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, pd.number)
+ }
+ }
+
+ if err := a.Charts().Add(*charts...); err != nil {
+ a.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/collect.go b/src/go/plugin/go.d/modules/adaptecraid/collect.go
new file mode 100644
index 000000000..b4439ba8e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/collect.go
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package adaptecraid
+
+import (
+ "strings"
+)
+
+func (a *AdaptecRaid) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := a.collectLogicalDevices(mx); err != nil {
+ return nil, err
+ }
+ if err := a.collectPhysicalDevices(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func getColonSepValue(line string) string {
+ i := strings.IndexByte(line, ':')
+ if i == -1 {
+ return ""
+ }
+ return strings.TrimSpace(line[i+1:])
+}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/collect_ld.go b/src/go/plugin/go.d/modules/adaptecraid/collect_ld.go
new file mode 100644
index 000000000..180f97490
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/collect_ld.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package adaptecraid
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+)
+
+type logicalDevice struct {
+ number string
+ name string
+ raidLevel string
+ status string
+ failedStripes string
+}
+
+func (a *AdaptecRaid) collectLogicalDevices(mx map[string]int64) error {
+ bs, err := a.exec.logicalDevicesInfo()
+ if err != nil {
+ return err
+ }
+
+ devices, err := parseLogicDevInfo(bs)
+ if err != nil {
+ return err
+ }
+
+ if len(devices) == 0 {
+ return errors.New("no logical devices found")
+ }
+
+ for _, ld := range devices {
+ if !a.lds[ld.number] {
+ a.lds[ld.number] = true
+ a.addLogicalDeviceCharts(ld)
+ }
+
+ px := fmt.Sprintf("ld_%s_", ld.number)
+
+ // Unfortunately, all available states are unknown.
+ mx[px+"health_state_ok"] = 0
+ mx[px+"health_state_critical"] = 0
+ if isOkLDStatus(ld) {
+ mx[px+"health_state_ok"] = 1
+ } else {
+ mx[px+"health_state_critical"] = 1
+ }
+ }
+
+ return nil
+}
+
+func isOkLDStatus(ld *logicalDevice) bool {
+ // https://github.com/thomas-krenn/check_adaptec_raid/blob/a104fd88deede87df4f07403b44394bffb30c5c3/check_adaptec_raid#L340
+ return ld.status == "Optimal"
+}
+
+func parseLogicDevInfo(bs []byte) (map[string]*logicalDevice, error) {
+ devices := make(map[string]*logicalDevice)
+
+ var ld *logicalDevice
+
+ sc := bufio.NewScanner(bytes.NewReader(bs))
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+
+ if strings.HasPrefix(line, "Logical device number") ||
+ strings.HasPrefix(line, "Logical Device number") {
+ parts := strings.Fields(line)
+ num := parts[len(parts)-1]
+ ld = &logicalDevice{number: num}
+ devices[num] = ld
+ continue
+ }
+
+ if ld == nil {
+ continue
+ }
+
+ switch {
+ case strings.HasPrefix(line, "Logical device name"),
+ strings.HasPrefix(line, "Logical Device name"):
+ ld.name = getColonSepValue(line)
+ case strings.HasPrefix(line, "RAID level"):
+ ld.raidLevel = getColonSepValue(line)
+ case strings.HasPrefix(line, "Status of logical device"),
+ strings.HasPrefix(line, "Status of Logical Device"):
+ ld.status = getColonSepValue(line)
+ case strings.HasPrefix(line, "Failed stripes"):
+ ld.failedStripes = getColonSepValue(line)
+ }
+ }
+
+ return devices, nil
+}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/collect_pd.go b/src/go/plugin/go.d/modules/adaptecraid/collect_pd.go
new file mode 100644
index 000000000..272266b47
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/collect_pd.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package adaptecraid
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type physicalDevice struct {
+ number string
+ state string
+ location string
+ vendor string
+ model string
+ smart string
+ smartWarnings string
+ powerState string
+ temperature string
+}
+
+func (a *AdaptecRaid) collectPhysicalDevices(mx map[string]int64) error {
+ bs, err := a.exec.physicalDevicesInfo()
+ if err != nil {
+ return err
+ }
+
+ devices, err := parsePhysDevInfo(bs)
+ if err != nil {
+ return err
+ }
+
+ if len(devices) == 0 {
+ return errors.New("no physical devices found")
+ }
+
+ for _, pd := range devices {
+ if !a.pds[pd.number] {
+ a.pds[pd.number] = true
+ a.addPhysicalDeviceCharts(pd)
+ }
+
+ px := fmt.Sprintf("pd_%s_", pd.number)
+
+ // Unfortunately, all available states are unknown.
+ mx[px+"health_state_ok"] = 0
+ mx[px+"health_state_critical"] = 0
+ if isOkPDState(pd) {
+ mx[px+"health_state_ok"] = 1
+ } else {
+ mx[px+"health_state_critical"] = 1
+ }
+
+ if v, err := strconv.ParseInt(pd.smartWarnings, 10, 64); err == nil {
+ mx[px+"smart_warnings"] = v
+ }
+ if v, err := strconv.ParseInt(pd.temperature, 10, 64); err == nil {
+ mx[px+"temperature"] = v
+ }
+ }
+
+ return nil
+}
+
+func isOkPDState(pd *physicalDevice) bool {
+ // https://github.com/thomas-krenn/check_adaptec_raid/blob/a104fd88deede87df4f07403b44394bffb30c5c3/check_adaptec_raid#L455
+ switch pd.state {
+ case "Online",
+ "Global Hot-Spare",
+ "Dedicated Hot-Spare",
+ "Pooled Hot-Spare",
+ "Hot Spare",
+ "Ready",
+ "Online (JBOD)",
+ "Raw (Pass Through)":
+ return true
+ }
+ return false
+}
+
+func parsePhysDevInfo(bs []byte) (map[string]*physicalDevice, error) {
+ devices := make(map[string]*physicalDevice)
+
+ var pd *physicalDevice
+
+ sc := bufio.NewScanner(bytes.NewReader(bs))
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+
+ if strings.HasPrefix(line, "Device #") {
+ num := strings.TrimPrefix(line, "Device #")
+ pd = &physicalDevice{number: num}
+ devices[num] = pd
+ continue
+ }
+
+ if pd == nil {
+ continue
+ }
+
+ switch {
+ case strings.HasPrefix(line, "State"):
+ pd.state = getColonSepValue(line)
+ case strings.HasPrefix(line, "Reported Location"):
+ pd.location = getColonSepValue(line)
+ case strings.HasPrefix(line, "Vendor"):
+ pd.vendor = getColonSepValue(line)
+ case strings.HasPrefix(line, "Model"):
+ pd.model = getColonSepValue(line)
+ case strings.HasPrefix(line, "S.M.A.R.T. warnings"):
+ pd.smartWarnings = getColonSepValue(line)
+ case strings.HasPrefix(line, "S.M.A.R.T."):
+ pd.smart = getColonSepValue(line)
+ case strings.HasPrefix(line, "Power State"):
+ pd.powerState = getColonSepValue(line)
+ case strings.HasPrefix(line, "Temperature"):
+ v := getColonSepValue(line) // '42 C/ 107 F' or 'Not Supported'
+ pd.temperature = strings.Fields(v)[0]
+ }
+ }
+
+ return devices, nil
+}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/config_schema.json b/src/go/plugin/go.d/modules/adaptecraid/config_schema.json
new file mode 100644
index 000000000..ad54f1585
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Adaptec RAID collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/exec.go b/src/go/plugin/go.d/modules/adaptecraid/exec.go
new file mode 100644
index 000000000..0577e6234
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/exec.go
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package adaptecraid
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newArcconfCliExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *arcconfCliExec {
+ return &arcconfCliExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ }
+}
+
+type arcconfCliExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (e *arcconfCliExec) logicalDevicesInfo() ([]byte, error) {
+ return e.execute("arcconf-ld-info")
+}
+
+func (e *arcconfCliExec) physicalDevicesInfo() ([]byte, error) {
+ return e.execute("arcconf-pd-info")
+}
+
+func (e *arcconfCliExec) execute(args ...string) ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.ndsudoPath, args...)
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/init.go b/src/go/plugin/go.d/modules/adaptecraid/init.go
new file mode 100644
index 000000000..de8acc273
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/init.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package adaptecraid
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (a *AdaptecRaid) initArcconfCliExec() (arcconfCli, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+ }
+
+ arcconfExec := newArcconfCliExec(ndsudoPath, a.Timeout.Duration(), a.Logger)
+
+ return arcconfExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/integrations/adaptec_raid.md b/src/go/plugin/go.d/modules/adaptecraid/integrations/adaptec_raid.md
new file mode 100644
index 000000000..a38207ffb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/integrations/adaptec_raid.md
@@ -0,0 +1,229 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/adaptecraid/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/adaptecraid/metadata.yaml"
+sidebar_label: "Adaptec RAID"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Adaptec RAID
+
+
+<img src="https://netdata.cloud/img/adaptec.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: adaptec_raid
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitors the health of Adaptec Hardware RAID by tracking the status of logical and physical devices in your storage system.
+It relies on the `arcconf` CLI tool but avoids directly executing the binary.
+Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+Executed commands:
+- `arcconf GETCONFIG 1 LD`
+- `arcconf GETCONFIG 1 PD`
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per logical device
+
+These metrics refer to the Logical Device (LD).
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| ld_number | Logical device index number |
+| ld_name | Logical device name |
+| raid_level | RAID level |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| adaptecraid.logical_device_status | ok, critical | status |
+
+### Per physical device
+
+These metrics refer to the Physical Device (PD).
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| pd_number | Physical device index number |
+| location | Physical device location (e.g. Connector 0, Device 1) |
+| vendor | Physical device vendor |
+| model | Physical device model |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| adaptecraid.physical_device_state | ok, critical | status |
+| adaptecraid.physical_device_smart_warnings | smart | warnings |
+| adaptecraid.physical_device_temperature | temperature | Celsius |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ adaptec_raid_ld_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.logical_device_status | Adaptec RAID logical device (number ${label:ld_number} name ${label:ld_name}) health status is critical |
+| [ adaptec_raid_pd_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf) | adaptecraid.physical_device_state | Adaptec RAID physical device (number ${label:pd_number} location ${label:location}) health state is critical |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/adaptec_raid.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/adaptec_raid.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| timeout | arcconf binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: adaptec_raid
+ update_every: 5 # Collect Adaptec Hardware RAID statistics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `adaptec_raid` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m adaptec_raid
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `adaptec_raid` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep adaptec_raid
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep adaptec_raid /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep adaptec_raid
+```
+
+
diff --git a/src/go/plugin/go.d/modules/adaptecraid/metadata.yaml b/src/go/plugin/go.d/modules/adaptecraid/metadata.yaml
new file mode 100644
index 000000000..e573994f5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/metadata.yaml
@@ -0,0 +1,146 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-adaptecraid
+ plugin_name: go.d.plugin
+ module_name: adaptec_raid
+ monitored_instance:
+ name: Adaptec RAID
+ link: "https://www.microchip.com/en-us/products/storage"
+ icon_filename: "adaptec.svg"
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - storage
+ - raid-controller
+ - manage-disks
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Monitors the health of Adaptec Hardware RAID by tracking the status of logical and physical devices in your storage system.
+ It relies on the `arcconf` CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+ Executed commands:
+ - `arcconf GETCONFIG 1 LD`
+ - `arcconf GETCONFIG 1 PD`
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/adaptec_raid.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: arcconf binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: adaptec_raid
+ update_every: 5 # Collect Adaptec Hardware RAID statistics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: adaptec_raid_ld_health_status
+ metric: adaptecraid.logical_device_status
+ info: Adaptec RAID logical device (number ${label:ld_number} name ${label:ld_name}) health status is critical
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf
+ - name: adaptec_raid_pd_health_state
+ metric: adaptecraid.physical_device_state
+ info: Adaptec RAID physical device (number ${label:pd_number} location ${label:location}) health state is critical
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/adaptec_raid.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: logical device
+ description: These metrics refer to the Logical Device (LD).
+ labels:
+ - name: ld_number
+ description: Logical device index number
+ - name: ld_name
+ description: Logical device name
+ - name: raid_level
+ description: RAID level
+ metrics:
+ - name: adaptecraid.logical_device_status
+ description: Logical Device status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: critical
+ - name: physical device
+ description: These metrics refer to the Physical Device (PD).
+ labels:
+ - name: pd_number
+ description: Physical device index number
+ - name: location
+ description: Physical device location (e.g. Connector 0, Device 1)
+ - name: vendor
+ description: Physical device vendor
+ - name: model
+ description: Physical device model
+ metrics:
+ - name: adaptecraid.physical_device_state
+ description: Physical Device state
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: critical
+ - name: adaptecraid.physical_device_smart_warnings
+ description: Physical Device SMART warnings
+ unit: warnings
+ chart_type: line
+ dimensions:
+ - name: smart
+ - name: adaptecraid.physical_device_temperature
+ description: Physical Device temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/adaptecraid/testdata/config.json b/src/go/plugin/go.d/modules/adaptecraid/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/adaptecraid/testdata/config.yaml b/src/go/plugin/go.d/modules/adaptecraid/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-ld-current.txt b/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-ld-current.txt
new file mode 100644
index 000000000..b5a14b665
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-ld-current.txt
@@ -0,0 +1,30 @@
+Logical device information
+----------------------------------------------------------------------
+Logical Device number 0
+ Logical Device name : LogicalDrv 0
+ Block Size of member drives : 512 Bytes
+ RAID level : 10
+ Unique Identifier : 488046B2
+ Status of Logical Device : Optimal
+ Additional details : Quick initialized
+ Size : 915446 MB
+ Parity space : 915456 MB
+ Stripe-unit size : 256 KB
+ Interface Type : Serial ATA
+ Device Type : HDD
+ Read-cache setting : Enabled
+ Read-cache status : On
+ Write-cache setting : Enabled
+ Write-cache status : On
+ Partitioned : Yes
+ Protected by Hot-Spare : No
+ Bootable : Yes
+ Failed stripes : No
+ Power settings : Disabled
+ --------------------------------------------------------
+ Logical Device segment information
+ --------------------------------------------------------
+ Group 0, Segment 0 : Present (457862MB, SATA, SSD, Connector:0, Device:0) 7CS009RP
+ Group 0, Segment 1 : Present (457862MB, SATA, SSD, Connector:0, Device:1) 7CS009RQ
+ Group 1, Segment 0 : Present (457862MB, SATA, SSD, Connector:0, Device:2) 7CS00AAD
+ Group 1, Segment 1 : Present (457862MB, SATA, SSD, Connector:0, Device:3) 7CS00AAH
diff --git a/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-ld-old.txt b/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-ld-old.txt
new file mode 100644
index 000000000..0c3b46917
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-ld-old.txt
@@ -0,0 +1,33 @@
+Controllers found: 1
+----------------------------------------------------------------------
+Logical device information
+----------------------------------------------------------------------
+Logical device number 0
+ Logical device name : LogicalDrv 0
+ Block Size of member drives : 512 Bytes
+ RAID level : 10
+ Unique Identifier : 488046B2
+ Status of logical device : Optimal
+ Size : 915446 MB
+ Parity space : 915456 MB
+ Stripe-unit size : 256 KB
+ Read-cache setting : Enabled
+ Read-cache status : On
+ Write-cache setting : Enabled
+ Write-cache status : On
+ Partitioned : Yes
+ Protected by Hot-Spare : No
+ Bootable : Yes
+ Failed stripes : No
+ Power settings : Disabled
+ --------------------------------------------------------
+ Logical device segment information
+ --------------------------------------------------------
+ Group 0, Segment 0 : Present (Controller:1,Connector:0,Device:0) 7CS009RP
+ Group 0, Segment 1 : Present (Controller:1,Connector:0,Device:1) 7CS009RQ
+ Group 1, Segment 0 : Present (Controller:1,Connector:0,Device:2) 7CS00AAD
+ Group 1, Segment 1 : Present (Controller:1,Connector:0,Device:3) 7CS00AAH
+
+
+
+Command completed successfully.
diff --git a/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-pd-current.txt b/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-pd-current.txt
new file mode 100644
index 000000000..62beff83c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-pd-current.txt
@@ -0,0 +1,216 @@
+Controllers found: 1
+----------------------------------------------------------------------
+Physical Device information
+----------------------------------------------------------------------
+ Device #0
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SAS 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,1(1:0)
+ Reported Location : Connector 0, Device 1
+ Vendor : NETAPP
+ Model : X422_HCOBE600A10
+ Firmware : NA00
+ Reserved Size : 956312 KB
+ Used Size : 571392 MB
+ Unused Size : 64 KB
+ Total Size : 572325 MB
+ Write Cache : Enabled (write-back)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full rpm,Powered off
+ SSD : No
+ Temperature : Not Supported
+ ----------------------------------------------------------------
+ Device Phy Information
+ ----------------------------------------------------------------
+ Phy #0
+ PHY Identifier : 0
+ SAS Address : 5000
+ Attached PHY Identifier : 2
+ Attached SAS Address : 5000
+ Phy #1
+ PHY Identifier : 1
+ SAS Address : 5000
+
+ Device #1
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SAS 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,2(2:0)
+ Reported Location : Connector 0, Device 2
+ Vendor : NETAPP
+ Model : X422_HCOBE600A10
+ Firmware : NA02
+ Reserved Size : 956312 KB
+ Used Size : 571392 MB
+ Unused Size : 64 KB
+ Total Size : 572325 MB
+ Write Cache : Enabled (write-back)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full rpm,Powered off
+ SSD : No
+ Temperature : Not Supported
+ ----------------------------------------------------------------
+ Device Phy Information
+ ----------------------------------------------------------------
+ Phy #0
+ PHY Identifier : 0
+ SAS Address : 5000
+ Attached PHY Identifier : 1
+ Attached SAS Address : 5000
+ Phy #1
+ PHY Identifier : 1
+ SAS Address : 5000
+
+ Device #2
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SAS 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,4(4:0)
+ Reported Location : Connector 1, Device 0
+ Vendor : NETAPP
+ Model : X422_HCOBD600A10
+ Firmware : NA05
+ Reserved Size : 956312 KB
+ Used Size : 571392 MB
+ Unused Size : 64 KB
+ Total Size : 572325 MB
+ Write Cache : Enabled (write-back)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full rpm,Powered off
+ SSD : No
+ Temperature : Not Supported
+ ----------------------------------------------------------------
+ Device Phy Information
+ ----------------------------------------------------------------
+ Phy #0
+ PHY Identifier : 0
+ SAS Address : 5000
+ Attached PHY Identifier : 7
+ Attached SAS Address : 5000
+ Phy #1
+ PHY Identifier : 1
+ SAS Address : 5000
+
+ Device #3
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SAS 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,5(5:0)
+ Reported Location : Connector 1, Device 1
+ Vendor : NETAPP
+ Model : X422_HCOBD600A10
+ Firmware : NA05
+ Reserved Size : 956312 KB
+ Used Size : 571392 MB
+ Unused Size : 64 KB
+ Total Size : 572325 MB
+ Write Cache : Enabled (write-back)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full rpm,Powered off
+ SSD : No
+ Temperature : Not Supported
+ ----------------------------------------------------------------
+ Device Phy Information
+ ----------------------------------------------------------------
+ Phy #0
+ PHY Identifier : 0
+ SAS Address : 5000
+ Attached PHY Identifier : 6
+ Attached SAS Address : 5000
+ Phy #1
+ PHY Identifier : 1
+ SAS Address : 5000
+
+ Device #4
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SAS 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,6(6:0)
+ Reported Location : Connector 1, Device 2
+ Vendor : NETAPP
+ Model : X422_HCOBD600A10
+ Firmware : NA05
+ Reserved Size : 956312 KB
+ Used Size : 571392 MB
+ Unused Size : 64 KB
+ Total Size : 572325 MB
+ Write Cache : Enabled (write-back)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full rpm,Powered off
+ SSD : No
+ Temperature : Not Supported
+ ----------------------------------------------------------------
+ Device Phy Information
+ ----------------------------------------------------------------
+ Phy #0
+ PHY Identifier : 0
+ SAS Address : 5000
+ Attached PHY Identifier : 5
+ Attached SAS Address : 5000
+ Phy #1
+ PHY Identifier : 1
+ SAS Address : 5000
+
+ Device #5
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SAS 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,7(7:0)
+ Reported Location : Connector 1, Device 3
+ Vendor : NETAPP
+ Model : X422_HCOBD600A10
+ Firmware : NA05
+ Reserved Size : 956312 KB
+ Used Size : 571392 MB
+ Unused Size : 64 KB
+ Total Size : 572325 MB
+ Write Cache : Enabled (write-back)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full rpm,Powered off
+ SSD : No
+ Temperature : Not Supported
+ ----------------------------------------------------------------
+ Device Phy Information
+ ----------------------------------------------------------------
+ Phy #0
+ PHY Identifier : 0
+ SAS Address : 5000
+ Attached PHY Identifier : 4
+ Attached SAS Address : 5000
+ PHY Identifier : 1
+ SAS Address : 5000
+
+
+
+Command completed successfully.
diff --git a/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-pd-old.txt b/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-pd-old.txt
new file mode 100644
index 000000000..2114df6be
--- /dev/null
+++ b/src/go/plugin/go.d/modules/adaptecraid/testdata/getconfig-pd-old.txt
@@ -0,0 +1,107 @@
+Controllers found: 1
+----------------------------------------------------------------------
+Physical Device information
+----------------------------------------------------------------------
+ Device #0
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SATA 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,0(0:0)
+ Reported Location : Connector 0, Device 0
+ Vendor : ATA
+ Model : XF1230-1A0480
+ Firmware : ST200354
+ Serial number : 7CS009RP
+ World-wide name : 5000C500813BF05B
+ Reserved Size : 138008 KB
+ Used Size : 457728 MB
+ Unused Size : 64 KB
+ Total Size : 457862 MB
+ Write Cache : Disabled (write-through)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full power,Powered off
+ SSD : Yes
+ NCQ status : Enabled
+ Device #1
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SATA 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,1(1:0)
+ Reported Location : Connector 0, Device 1
+ Vendor : ATA
+ Model : XF1230-1A0480
+ Firmware : ST200354
+ Serial number : 7CS009RQ
+ World-wide name : 5000C500813BF05C
+ Reserved Size : 138008 KB
+ Used Size : 457728 MB
+ Unused Size : 64 KB
+ Total Size : 457862 MB
+ Write Cache : Disabled (write-through)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full power,Powered off
+ SSD : Yes
+ NCQ status : Enabled
+ Device #2
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SATA 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,2(2:0)
+ Reported Location : Connector 0, Device 2
+ Vendor : ATA
+ Model : XF1230-1A0480
+ Firmware : ST200354
+ Serial number : 7CS00AAD
+ World-wide name : 5000C500813BF320
+ Reserved Size : 138008 KB
+ Used Size : 457728 MB
+ Unused Size : 64 KB
+ Total Size : 457862 MB
+ Write Cache : Disabled (write-through)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full power,Powered off
+ SSD : Yes
+ NCQ status : Enabled
+ Device #3
+ Device is a Hard drive
+ State : Online
+ Block Size : 512 Bytes
+ Supported : Yes
+ Transfer Speed : SATA 6.0 Gb/s
+ Reported Channel,Device(T:L) : 0,3(3:0)
+ Reported Location : Connector 0, Device 3
+ Vendor : ATA
+ Model : XF1230-1A0480
+ Firmware : ST200354
+ Serial number : 7CS00AAH
+ World-wide name : 5000C500813BF324
+ Reserved Size : 138008 KB
+ Used Size : 457728 MB
+ Unused Size : 64 KB
+ Total Size : 457862 MB
+ Write Cache : Disabled (write-through)
+ FRU : None
+ S.M.A.R.T. : No
+ S.M.A.R.T. warnings : 0
+ Power State : Full rpm
+ Supported Power States : Full power,Powered off
+ SSD : Yes
+ NCQ status : Enabled
+
+
+Command completed successfully.
diff --git a/src/go/plugin/go.d/modules/ap/README.md b/src/go/plugin/go.d/modules/ap/README.md
new file mode 120000
index 000000000..5b6e75130
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/README.md
@@ -0,0 +1 @@
+integrations/access_points.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/ap/ap.go b/src/go/plugin/go.d/modules/ap/ap.go
new file mode 100644
index 000000000..93dd06d08
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/ap.go
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ap
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("ap", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *AP {
+ return &AP{
+ Config: Config{
+ BinaryPath: "/usr/sbin/iw",
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ seenIfaces: make(map[string]*iwInterface),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ BinaryPath string `yaml:"binary_path,omitempty" json:"binary_path"`
+}
+
+type (
+ AP struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec iwBinary
+
+ seenIfaces map[string]*iwInterface
+ }
+ iwBinary interface {
+ devices() ([]byte, error)
+ stationStatistics(ifaceName string) ([]byte, error)
+ }
+)
+
+func (a *AP) Configuration() any {
+ return a.Config
+}
+
+func (a *AP) Init() error {
+ if err := a.validateConfig(); err != nil {
+ a.Errorf("config validation: %s", err)
+ return err
+ }
+
+ iw, err := a.initIwExec()
+ if err != nil {
+ a.Errorf("iw dev exec initialization: %v", err)
+ return err
+ }
+ a.exec = iw
+
+ return nil
+}
+
+func (a *AP) Check() error {
+ mx, err := a.collect()
+ if err != nil {
+ a.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (a *AP) Charts() *module.Charts {
+ return a.charts
+}
+
+func (a *AP) Collect() map[string]int64 {
+ mx, err := a.collect()
+ if err != nil {
+ a.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (a *AP) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/ap/ap_test.go b/src/go/plugin/go.d/modules/ap/ap_test.go
new file mode 100644
index 000000000..237e00e9e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/ap_test.go
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ap
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataIwDevManaged, _ = os.ReadFile("testdata/iw_dev_managed.txt")
+
+ dataIwDevAP, _ = os.ReadFile("testdata/iw_dev_ap.txt")
+ dataIwStationDump, _ = os.ReadFile("testdata/station_dump.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataIwDevManaged": dataIwDevManaged,
+ "dataIwDevAP": dataIwDevAP,
+ "dataIwStationDump": dataIwStationDump,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestAP_Configuration(t *testing.T) {
+ module.TestConfigurationSerialize(t, &AP{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestAP_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if 'binary_path' is not set": {
+ wantFail: true,
+ config: Config{
+ BinaryPath: "",
+ },
+ },
+ "fails if failed to find binary": {
+ wantFail: true,
+ config: Config{
+ BinaryPath: "iw!!!",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pf := New()
+ pf.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, pf.Init())
+ } else {
+ assert.NoError(t, pf.Init())
+ }
+ })
+ }
+}
+
+func TestAP_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *AP
+ }{
+ "not initialized exec": {
+ prepare: func() *AP {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *AP {
+ ap := New()
+ ap.exec = prepareMockOk()
+ _ = ap.Check()
+ return ap
+ },
+ },
+ "after collect": {
+ prepare: func() *AP {
+ ap := New()
+ ap.exec = prepareMockOk()
+ _ = ap.Collect()
+ return ap
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pf := test.prepare()
+
+ assert.NotPanics(t, pf.Cleanup)
+ })
+ }
+}
+
+func TestAP_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestAP_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockIwExec
+ wantFail bool
+ }{
+ "success case": {
+ wantFail: false,
+ prepareMock: prepareMockOk,
+ },
+ "no ap devices": {
+ wantFail: true,
+ prepareMock: prepareMockNoAPDevices,
+ },
+ "error on devices call": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnDevices,
+ },
+ "error on station stats call": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnStationStats,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ap := New()
+ ap.exec = test.prepareMock()
+
+ if test.wantFail {
+ assert.Error(t, ap.Check())
+ } else {
+ assert.NoError(t, ap.Check())
+ }
+ })
+ }
+}
+
+func TestAP_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockIwExec
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success case": {
+ prepareMock: prepareMockOk,
+ wantCharts: len(apChartsTmpl) * 2,
+ wantMetrics: map[string]int64{
+ "ap_wlp1s0_testing_average_signal": -34000,
+ "ap_wlp1s0_testing_bitrate_receive": 65500,
+ "ap_wlp1s0_testing_bitrate_transmit": 65000,
+ "ap_wlp1s0_testing_bw_received": 95117,
+ "ap_wlp1s0_testing_bw_sent": 8270,
+ "ap_wlp1s0_testing_clients": 2,
+ "ap_wlp1s0_testing_issues_failures": 1,
+ "ap_wlp1s0_testing_issues_retries": 1,
+ "ap_wlp1s0_testing_packets_received": 2531,
+ "ap_wlp1s0_testing_packets_sent": 38,
+ "ap_wlp1s1_testing_average_signal": -34000,
+ "ap_wlp1s1_testing_bitrate_receive": 65500,
+ "ap_wlp1s1_testing_bitrate_transmit": 65000,
+ "ap_wlp1s1_testing_bw_received": 95117,
+ "ap_wlp1s1_testing_bw_sent": 8270,
+ "ap_wlp1s1_testing_clients": 2,
+ "ap_wlp1s1_testing_issues_failures": 1,
+ "ap_wlp1s1_testing_issues_retries": 1,
+ "ap_wlp1s1_testing_packets_received": 2531,
+ "ap_wlp1s1_testing_packets_sent": 38,
+ },
+ },
+ "no ap devices": {
+ prepareMock: prepareMockNoAPDevices,
+ wantMetrics: nil,
+ },
+ "error on devices call": {
+ prepareMock: prepareMockErrOnDevices,
+ wantMetrics: nil,
+ },
+ "error on statis stats call": {
+ prepareMock: prepareMockErrOnStationStats,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ap := New()
+ ap.exec = test.prepareMock()
+
+ mx := ap.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ assert.Equal(t, test.wantCharts, len(*ap.Charts()), "Charts")
+ testMetricsHasAllChartsDims(t, ap, mx)
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, ap *AP, mx map[string]int64) {
+ for _, chart := range *ap.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareMockOk() *mockIwExec {
+ return &mockIwExec{
+ devicesData: dataIwDevAP,
+ stationStatsData: dataIwStationDump,
+ }
+}
+
+func prepareMockNoAPDevices() *mockIwExec {
+ return &mockIwExec{
+ devicesData: dataIwDevManaged,
+ }
+}
+
+func prepareMockErrOnDevices() *mockIwExec {
+ return &mockIwExec{
+ errOnDevices: true,
+ }
+}
+
+func prepareMockErrOnStationStats() *mockIwExec {
+ return &mockIwExec{
+ devicesData: dataIwDevAP,
+ errOnStationStats: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockIwExec {
+ return &mockIwExec{
+ devicesData: []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`),
+ }
+}
+
+type mockIwExec struct {
+ errOnDevices bool
+ errOnStationStats bool
+ devicesData []byte
+ stationStatsData []byte
+}
+
+func (m *mockIwExec) devices() ([]byte, error) {
+ if m.errOnDevices {
+ return nil, errors.New("mock.devices() error")
+ }
+
+ return m.devicesData, nil
+}
+
+func (m *mockIwExec) stationStatistics(_ string) ([]byte, error) {
+ if m.errOnStationStats {
+ return nil, errors.New("mock.stationStatistics() error")
+ }
+ return m.stationStatsData, nil
+}
diff --git a/src/go/plugin/go.d/modules/ap/charts.go b/src/go/plugin/go.d/modules/ap/charts.go
new file mode 100644
index 000000000..b8c51c433
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/charts.go
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ap
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioClients = module.Priority + iota
+ prioBandwidth
+ prioPackets
+ prioIssues
+ prioSignal
+ prioBitrate
+)
+
+var apChartsTmpl = module.Charts{
+ apClientsChartTmpl.Copy(),
+ apBandwidthChartTmpl.Copy(),
+ apPacketsChartTmpl.Copy(),
+ apIssuesChartTmpl.Copy(),
+ apSignalChartTmpl.Copy(),
+ apBitrateChartTmpl.Copy(),
+}
+
+var (
+ apClientsChartTmpl = module.Chart{
+ ID: "ap_%s_%s_clients",
+ Title: "Connected clients",
+ Fam: "clients",
+ Units: "clients",
+ Ctx: "ap.clients",
+ Type: module.Line,
+ Priority: prioClients,
+ Dims: module.Dims{
+ {ID: "ap_%s_%s_clients", Name: "clients"},
+ },
+ }
+
+ apBandwidthChartTmpl = module.Chart{
+ ID: "ap_%s_%s_bandwidth",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "traffic",
+ Ctx: "ap.net",
+ Type: module.Area,
+ Priority: prioBandwidth,
+ Dims: module.Dims{
+ {ID: "ap_%s_%s_bw_received", Name: "received", Algo: module.Incremental, Mul: 8, Div: 1000},
+ {ID: "ap_%s_%s_bw_sent", Name: "sent", Algo: module.Incremental, Mul: -8, Div: 1000},
+ },
+ }
+
+ apPacketsChartTmpl = module.Chart{
+ ID: "ap_%s_%s_packets",
+ Title: "Packets",
+ Fam: "packets",
+ Units: "packets/s",
+ Ctx: "ap.packets",
+ Type: module.Line,
+ Priority: prioPackets,
+ Dims: module.Dims{
+ {ID: "ap_%s_%s_packets_received", Name: "received", Algo: module.Incremental},
+ {ID: "ap_%s_%s_packets_sent", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+
+ apIssuesChartTmpl = module.Chart{
+ ID: "ap_%s_%s_issues",
+ Title: "Transmit issues",
+ Fam: "issues",
+ Units: "issues/s",
+ Ctx: "ap.issues",
+ Type: module.Line,
+ Priority: prioIssues,
+ Dims: module.Dims{
+ {ID: "ap_%s_%s_issues_retries", Name: "tx retries", Algo: module.Incremental},
+ {ID: "ap_%s_%s_issues_failures", Name: "tx failures", Algo: module.Incremental, Mul: -1},
+ },
+ }
+
+ apSignalChartTmpl = module.Chart{
+ ID: "ap_%s_%s_signal",
+ Title: "Average Signal",
+ Units: "dBm",
+ Fam: "signal",
+ Ctx: "ap.signal",
+ Type: module.Line,
+ Priority: prioSignal,
+ Dims: module.Dims{
+ {ID: "ap_%s_%s_average_signal", Name: "average signal", Div: precision},
+ },
+ }
+
+ apBitrateChartTmpl = module.Chart{
+ ID: "ap_%s_%s_bitrate",
+ Title: "Bitrate",
+ Units: "Mbps",
+ Fam: "bitrate",
+ Ctx: "ap.bitrate",
+ Type: module.Line,
+ Priority: prioBitrate,
+ Dims: module.Dims{
+ {ID: "ap_%s_%s_bitrate_receive", Name: "receive", Div: precision},
+ {ID: "ap_%s_%s_bitrate_transmit", Name: "transmit", Mul: -1, Div: precision},
+ },
+ }
+)
+
+func (a *AP) addInterfaceCharts(dev *iwInterface) {
+ charts := apChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, dev.name, cleanSSID(dev.ssid))
+ chart.Labels = []module.Label{
+ {Key: "device", Value: dev.name},
+ {Key: "ssid", Value: dev.ssid},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, dev.name, dev.ssid)
+ }
+ }
+
+ if err := a.Charts().Add(*charts...); err != nil {
+ a.Warning(err)
+ }
+
+}
+
+func (a *AP) removeInterfaceCharts(dev *iwInterface) {
+ px := fmt.Sprintf("ap_%s_%s_", dev.name, cleanSSID(dev.ssid))
+ for _, chart := range *a.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func cleanSSID(ssid string) string {
+ r := strings.NewReplacer(" ", "_", ".", "_")
+ return r.Replace(ssid)
+}
diff --git a/src/go/plugin/go.d/modules/ap/collect.go b/src/go/plugin/go.d/modules/ap/collect.go
new file mode 100644
index 000000000..ba32f3ef7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/collect.go
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ap
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+const precision = 1000
+
+type iwInterface struct {
+ name string
+ ssid string
+ typ string
+}
+
+type stationStats struct {
+ clients int64
+ rxBytes int64
+ rxPackets int64
+ txBytes int64
+ txPackets int64
+ txRetries int64
+ txFailed int64
+ signalAvg int64
+ txBitrate float64
+ rxBitrate float64
+}
+
+func (a *AP) collect() (map[string]int64, error) {
+ bs, err := a.exec.devices()
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: call this periodically, not on every data collection
+ apInterfaces, err := parseIwDevices(bs)
+ if err != nil {
+ return nil, fmt.Errorf("parsing AP interfaces: %v", err)
+ }
+
+ if len(apInterfaces) == 0 {
+ return nil, errors.New("no type AP interfaces found")
+ }
+
+ mx := make(map[string]int64)
+ seen := make(map[string]bool)
+
+ for _, iface := range apInterfaces {
+ bs, err = a.exec.stationStatistics(iface.name)
+ if err != nil {
+ return nil, fmt.Errorf("getting station statistics for %s: %v", iface, err)
+ }
+
+ stats, err := parseIwStationStatistics(bs)
+ if err != nil {
+ return nil, fmt.Errorf("parsing station statistics for %s: %v", iface, err)
+ }
+
+ key := fmt.Sprintf("%s-%s", iface.name, iface.ssid)
+
+ seen[key] = true
+
+ if _, ok := a.seenIfaces[key]; !ok {
+ a.seenIfaces[key] = iface
+ a.addInterfaceCharts(iface)
+ }
+
+ px := fmt.Sprintf("ap_%s_%s_", iface.name, iface.ssid)
+
+ mx[px+"clients"] = stats.clients
+ mx[px+"bw_received"] = stats.rxBytes
+ mx[px+"bw_sent"] = stats.txBytes
+ mx[px+"packets_received"] = stats.rxPackets
+ mx[px+"packets_sent"] = stats.txPackets
+ mx[px+"issues_retries"] = stats.txRetries
+ mx[px+"issues_failures"] = stats.txFailed
+ mx[px+"average_signal"], mx[px+"bitrate_receive"], mx[px+"bitrate_transmit"] = 0, 0, 0
+ if clients := float64(stats.clients); clients > 0 {
+ mx[px+"average_signal"] = int64(float64(stats.signalAvg) / clients * precision)
+ mx[px+"bitrate_receive"] = int64(stats.rxBitrate / clients * precision)
+ mx[px+"bitrate_transmit"] = int64(stats.txBitrate / clients * precision)
+ }
+ }
+
+ for key, iface := range a.seenIfaces {
+ if !seen[key] {
+ delete(a.seenIfaces, key)
+ a.removeInterfaceCharts(iface)
+ }
+ }
+
+ return mx, nil
+}
+
+func parseIwDevices(resp []byte) ([]*iwInterface, error) {
+ ifaces := make(map[string]*iwInterface)
+ var iface *iwInterface
+
+ sc := bufio.NewScanner(bytes.NewReader(resp))
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+
+ switch {
+ case strings.HasPrefix(line, "Interface"):
+ parts := strings.Fields(line)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid interface line: '%s'", line)
+ }
+ name := parts[1]
+ if _, ok := ifaces[name]; !ok {
+ iface = &iwInterface{name: name}
+ ifaces[name] = iface
+ }
+ case strings.HasPrefix(line, "ssid") && iface != nil:
+ parts := strings.Fields(line)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid ssid line: '%s'", line)
+ }
+ iface.ssid = parts[1]
+ case strings.HasPrefix(line, "type") && iface != nil:
+ parts := strings.Fields(line)
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("invalid type line: '%s'", line)
+ }
+ iface.typ = parts[1]
+ }
+ }
+
+ var apIfaces []*iwInterface
+
+ for _, iface := range ifaces {
+ if strings.ToLower(iface.typ) == "ap" {
+ apIfaces = append(apIfaces, iface)
+ }
+ }
+
+ return apIfaces, nil
+}
+
+func parseIwStationStatistics(resp []byte) (*stationStats, error) {
+ var stats stationStats
+
+ sc := bufio.NewScanner(bytes.NewReader(resp))
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+
+ var v float64
+ var err error
+
+ switch {
+ case strings.HasPrefix(line, "Station"):
+ stats.clients++
+ case strings.HasPrefix(line, "rx bytes:"):
+ if v, err = get3rdValue(line); err == nil {
+ stats.rxBytes += int64(v)
+ }
+ case strings.HasPrefix(line, "rx packets:"):
+ if v, err = get3rdValue(line); err == nil {
+ stats.rxPackets += int64(v)
+ }
+ case strings.HasPrefix(line, "tx bytes:"):
+ if v, err = get3rdValue(line); err == nil {
+ stats.txBytes += int64(v)
+ }
+ case strings.HasPrefix(line, "tx packets:"):
+ if v, err = get3rdValue(line); err == nil {
+ stats.txPackets += int64(v)
+ }
+ case strings.HasPrefix(line, "tx retries:"):
+ if v, err = get3rdValue(line); err == nil {
+ stats.txRetries += int64(v)
+ }
+ case strings.HasPrefix(line, "tx failed:"):
+ if v, err = get3rdValue(line); err == nil {
+ stats.txFailed += int64(v)
+ }
+ case strings.HasPrefix(line, "signal avg:"):
+ if v, err = get3rdValue(line); err == nil {
+ stats.signalAvg += int64(v)
+ }
+ case strings.HasPrefix(line, "tx bitrate:"):
+ if v, err = get3rdValue(line); err == nil {
+ stats.txBitrate += v
+ }
+ case strings.HasPrefix(line, "rx bitrate:"):
+ if v, err = get3rdValue(line); err == nil {
+ stats.rxBitrate += v
+ }
+ default:
+ continue
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("parsing line '%s': %v", line, err)
+ }
+ }
+
+ return &stats, nil
+}
+
+func get3rdValue(line string) (float64, error) {
+ parts := strings.Fields(line)
+ if len(parts) < 3 {
+ return 0.0, errors.New("invalid format")
+ }
+
+ v := parts[2]
+
+ if v == "-" {
+ return 0.0, nil
+ }
+ return strconv.ParseFloat(v, 64)
+}
diff --git a/src/go/plugin/go.d/modules/ap/config_schema.json b/src/go/plugin/go.d/modules/ap/config_schema.json
new file mode 100644
index 000000000..4566247f1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/config_schema.json
@@ -0,0 +1,47 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Access Point collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "binary_path": {
+ "title": "Binary path",
+ "description": "Path to the `iw` binary.",
+ "type": "string",
+ "default": "/usr/sbin/iw"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "required": [
+ "binary_path"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "binary_path": {
+ "ui:help": "If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/ap/exec.go b/src/go/plugin/go.d/modules/ap/exec.go
new file mode 100644
index 000000000..8c25f6777
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/exec.go
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ap
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newIwExec(binPath string, timeout time.Duration) *iwCliExec {
+ return &iwCliExec{
+ binPath: binPath,
+ timeout: timeout,
+ }
+}
+
+type iwCliExec struct {
+ *logger.Logger
+
+ binPath string
+ timeout time.Duration
+}
+
+func (e *iwCliExec) devices() ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.binPath, "dev")
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
+
+func (e *iwCliExec) stationStatistics(ifaceName string) ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.binPath, ifaceName, "station", "dump")
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/ap/init.go b/src/go/plugin/go.d/modules/ap/init.go
new file mode 100644
index 000000000..6031f6caa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/init.go
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ap
+
+import (
+ "errors"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func (a *AP) validateConfig() error {
+ if a.BinaryPath == "" {
+ return errors.New("no iw binary path specified")
+ }
+ return nil
+}
+
+func (a *AP) initIwExec() (iwBinary, error) {
+ binPath := a.BinaryPath
+
+ if !strings.HasPrefix(binPath, "/") {
+ path, err := exec.LookPath(binPath)
+ if err != nil {
+ return nil, err
+ }
+ binPath = path
+ }
+
+ if _, err := os.Stat(binPath); err != nil {
+ return nil, err
+ }
+
+ iw := newIwExec(binPath, a.Timeout.Duration())
+
+ return iw, nil
+}
diff --git a/src/go/plugin/go.d/modules/ap/integrations/access_points.md b/src/go/plugin/go.d/modules/ap/integrations/access_points.md
new file mode 100644
index 000000000..fa2134ed1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/integrations/access_points.md
@@ -0,0 +1,202 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/ap/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/ap/metadata.yaml"
+sidebar_label: "Access Points"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Linux Systems/Network"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Access Points
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: ap
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors various wireless access point metrics like connected clients, bandwidth, packets, transmit issues, signal strength, and bitrate for each device and its associated SSID.
+
+
+This tool uses the `iw` command-line utility to discover nearby access points. It starts by running `iw dev`, which provides information about all wireless interfaces. Then, for each interface identified as an access point (type AP), the `iw INTERFACE station dump` command is executed to gather relevant metrics.
+
+
+This collector is only supported on the following platforms:
+
+- Linux
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The plugin is able to auto-detect any access points on your Linux machine.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per wireless device
+
+These metrics refer to the entire monitored application.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | Wireless interface name |
+| ssid | SSID |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ap.clients | clients | clients |
+| ap.net | received, sent | kilobits/s |
+| ap.packets | received, sent | packets/s |
+| ap.issues | retries, failures | issues/s |
+| ap.signal | average signal | dBm |
+| ap.bitrate | receive, transmit | Mbps |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### `iw` utility.
+
+Make sure the `iw` utility is installed.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/ap.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/ap.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| binary_path | Path to the `iw` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/iw | yes |
+| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom binary path
+
+The executable is not in the directories specified in the PATH environment variable.
+
+```yaml
+jobs:
+ - name: custom_iw
+ binary_path: /usr/local/sbin/iw
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `ap` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m ap
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `ap` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep ap
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep ap /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep ap
+```
+
+
diff --git a/src/go/plugin/go.d/modules/ap/metadata.yaml b/src/go/plugin/go.d/modules/ap/metadata.yaml
new file mode 100644
index 000000000..848684d30
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/metadata.yaml
@@ -0,0 +1,141 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ plugin_name: go.d.plugin
+ module_name: ap
+ monitored_instance:
+ name: Access Points
+ link: ""
+ categories:
+ - data-collection.linux-systems.network-metrics
+ icon_filename: "network-wired.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - ap
+ - access
+ - point
+ - wireless
+ - network
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors various wireless access point metrics like connected clients, bandwidth, packets, transmit issues, signal strength, and bitrate for each device and its associated SSID.
+ method_description: >
+ This tool uses the `iw` command-line utility to discover nearby access points.
+ It starts by running `iw dev`, which provides information about all wireless interfaces.
+ Then, for each interface identified as an access point (type AP), the `iw INTERFACE station dump` command is executed to gather relevant metrics.
+ supported_platforms:
+ include: [Linux]
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "The plugin is able to auto-detect any access points on your Linux machine."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "`iw` utility."
+ description: "Make sure the `iw` utility is installed."
+ configuration:
+ file:
+ name: go.d/ap.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: binary_path
+ description: Path to the `iw` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable.
+ default_value: /usr/sbin/iw
+ required: true
+ - name: timeout
+ description: Timeout for executing the binary, specified in seconds.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: ""
+ enabled: false
+ list:
+ - name: Custom binary path
+ description: The executable is not in the directories specified in the PATH environment variable.
+ config: |
+ jobs:
+ - name: custom_iw
+ binary_path: /usr/local/sbin/iw
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: wireless device
+ description: "These metrics refer to the entire monitored application."
+ labels:
+ - name: device
+ description: Wireless interface name
+ - name: ssid
+ description: SSID
+ metrics:
+ - name: ap.clients
+ description: Connected clients
+ unit: "clients"
+ chart_type: line
+ dimensions:
+ - name: clients
+ - name: ap.net
+ description: Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: ap.packets
+ description: Packets
+ unit: "packets/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: ap.issues
+ description: Transmit Issues
+ unit: "issues/s"
+ chart_type: line
+ dimensions:
+ - name: retries
+ - name: failures
+ - name: ap.signal
+ description: Average Signal
+ unit: "dBm"
+ chart_type: line
+ dimensions:
+ - name: average signal
+ - name: ap.bitrate
+ description: Bitrate
+ unit: "Mbps"
+ chart_type: line
+ dimensions:
+ - name: receive
+ - name: transmit
diff --git a/src/go/plugin/go.d/modules/ap/testdata/config.json b/src/go/plugin/go.d/modules/ap/testdata/config.json
new file mode 100644
index 000000000..095713193
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "binary_path": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/ap/testdata/config.yaml b/src/go/plugin/go.d/modules/ap/testdata/config.yaml
new file mode 100644
index 000000000..baf3bcd0b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+timeout: 123.123
+binary_path: "ok"
diff --git a/src/go/plugin/go.d/modules/ap/testdata/iw_dev_ap.txt b/src/go/plugin/go.d/modules/ap/testdata/iw_dev_ap.txt
new file mode 100644
index 000000000..0b1e40779
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/testdata/iw_dev_ap.txt
@@ -0,0 +1,25 @@
+phy#0
+ Interface wlp1s0
+ ifindex 2
+ wdev 0x1
+ addr 28:cd:c4:b8:63:cb
+ ssid testing
+ type AP
+ channel 1 (2412 MHz), width: 20 MHz, center1: 2412 MHz
+ txpower 20.00 dBm
+ multicast TXQ:
+ qsz-byt qsz-pkt flows drops marks overlmt hashcol tx-bytes tx-packets
+ 0 0 2 0 0 0 0 16447 226
+
+phy#1
+ Interface wlp1s1
+ ifindex 3
+ wdev 0x1
+ addr 28:cd:c4:b8:63:cc
+ ssid testing
+ type AP
+ channel 1 (2412 MHz), width: 20 MHz, center1: 2412 MHz
+ txpower 20.00 dBm
+ multicast TXQ:
+ qsz-byt qsz-pkt flows drops marks overlmt hashcol tx-bytes tx-packets
+ 0 0 2 0 0 0 0 16447 226
diff --git a/src/go/plugin/go.d/modules/ap/testdata/iw_dev_managed.txt b/src/go/plugin/go.d/modules/ap/testdata/iw_dev_managed.txt
new file mode 100644
index 000000000..5bb09a85f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/testdata/iw_dev_managed.txt
@@ -0,0 +1,11 @@
+phy#0
+ Interface wlp1s0
+ ifindex 2
+ wdev 0x1
+ addr 28:cd:c4:b8:63:cb
+ type managed
+ channel 4 (2427 MHz), width: 20 MHz, center1: 2427 MHz
+ txpower 20.00 dBm
+ multicast TXQ:
+ qsz-byt qsz-pkt flows drops marks overlmt hashcol tx-bytes tx-packets
+ 0 0 0 0 0 0 0 0 0
diff --git a/src/go/plugin/go.d/modules/ap/testdata/station_dump.txt b/src/go/plugin/go.d/modules/ap/testdata/station_dump.txt
new file mode 100644
index 000000000..683a6818d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ap/testdata/station_dump.txt
@@ -0,0 +1,58 @@
+Station 7e:0d:a5:a6:91:2b (on wlp1s0)
+ inactive time: 58264 ms
+ rx bytes: 89675
+ rx packets: 2446
+ tx bytes: 6918
+ tx packets: 30
+ tx retries: 1
+ tx failed: 1
+ rx drop misc: 0
+ signal: -44 [-51, -44] dBm
+ signal avg: -38 [-39, -39] dBm
+ tx bitrate: 65.0 MBit/s MCS 7
+ tx duration: 0 us
+ rx bitrate: 130.0 MBit/s MCS 15
+ rx duration: 0 us
+ authorized: yes
+ authenticated: yes
+ associated: yes
+ preamble: short
+ WMM/WME: yes
+ MFP: no
+ TDLS peer: no
+ DTIM period: 2
+ beacon interval:100
+ short slot time:yes
+ connected time: 796 seconds
+ associated at [boottime]: 12650.576s
+ associated at: 1720705279930 ms
+ current time: 1720706075344 ms
+Station fa:50:db:c1:1c:18 (on wlp1s0)
+ inactive time: 93 ms
+ rx bytes: 5442
+ rx packets: 85
+ tx bytes: 1352
+ tx packets: 8
+ tx retries: 0
+ tx failed: 0
+ rx drop misc: 0
+ signal: -31 [-31, -39] dBm
+ signal avg: -30 [-30, -38] dBm
+ tx bitrate: 65.0 MBit/s MCS 7
+ tx duration: 0 us
+ rx bitrate: 1.0 MBit/s
+ rx duration: 0 us
+ authorized: yes
+ authenticated: yes
+ associated: yes
+ preamble: short
+ WMM/WME: yes
+ MFP: no
+ TDLS peer: no
+ DTIM period: 2
+ beacon interval:100
+ short slot time:yes
+ connected time: 6 seconds
+ associated at [boottime]: 13440.167s
+ associated at: 1720706069520 ms
+ current time: 1720706075344 ms
diff --git a/src/go/plugin/go.d/modules/apache/README.md b/src/go/plugin/go.d/modules/apache/README.md
new file mode 120000
index 000000000..066ee4162
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/README.md
@@ -0,0 +1 @@
+integrations/apache.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/apache/apache.go b/src/go/plugin/go.d/modules/apache/apache.go
new file mode 100644
index 000000000..d0869353d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/apache.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package apache
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("apache", module.Creator{
+ Create: func() module.Module { return New() },
+ JobConfigSchema: configSchema,
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Apache {
+ return &Apache{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1/server-status?auto",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: &module.Charts{},
+ once: &sync.Once{},
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Apache struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ once *sync.Once
+}
+
+func (a *Apache) Configuration() any {
+ return a.Config
+}
+
+func (a *Apache) Init() error {
+ if err := a.validateConfig(); err != nil {
+ a.Errorf("config validation: %v", err)
+ return err
+ }
+
+ httpClient, err := a.initHTTPClient()
+ if err != nil {
+ a.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ a.httpClient = httpClient
+
+ a.Debugf("using URL %s", a.URL)
+ a.Debugf("using timeout: %s", a.Timeout)
+
+ return nil
+}
+
+func (a *Apache) Check() error {
+ mx, err := a.collect()
+ if err != nil {
+ a.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (a *Apache) Charts() *module.Charts {
+ return a.charts
+}
+
+func (a *Apache) Collect() map[string]int64 {
+ mx, err := a.collect()
+ if err != nil {
+ a.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (a *Apache) Cleanup() {
+ if a.httpClient != nil {
+ a.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/apache/apache_test.go b/src/go/plugin/go.d/modules/apache/apache_test.go
new file mode 100644
index 000000000..64fa6ed96
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/apache_test.go
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package apache
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataSimpleStatusMPMEvent, _ = os.ReadFile("testdata/simple-status-mpm-event.txt")
+ dataExtendedStatusMPMEvent, _ = os.ReadFile("testdata/extended-status-mpm-event.txt")
+ dataExtendedStatusMPMPrefork, _ = os.ReadFile("testdata/extended-status-mpm-prefork.txt")
+ dataLighttpdStatus, _ = os.ReadFile("testdata/lighttpd-status.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataSimpleStatusMPMEvent": dataSimpleStatusMPMEvent,
+ "dataExtendedStatusMPMEvent": dataExtendedStatusMPMEvent,
+ "dataExtendedStatusMPMPrefork": dataExtendedStatusMPMPrefork,
+ "dataLighttpdStatus": dataLighttpdStatus,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestApache_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Apache{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestApache_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ "fail when URL has no wantMetrics suffix": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: "http://127.0.0.1:38001"},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ apache := New()
+ apache.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, apache.Init())
+ } else {
+ assert.NoError(t, apache.Init())
+ }
+ })
+ }
+}
+
+func TestApache_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (apache *Apache, cleanup func())
+ }{
+ "success on simple status MPM Event": {
+ wantFail: false,
+ prepare: caseMPMEventSimpleStatus,
+ },
+ "success on extended status MPM Event": {
+ wantFail: false,
+ prepare: caseMPMEventExtendedStatus,
+ },
+ "success on extended status MPM Prefork": {
+ wantFail: false,
+ prepare: caseMPMPreforkExtendedStatus,
+ },
+ "fail on Lighttpd response": {
+ wantFail: true,
+ prepare: caseLighttpdResponse,
+ },
+ "fail on invalid data response": {
+ wantFail: true,
+ prepare: caseInvalidDataResponse,
+ },
+ "fail on connection refused": {
+ wantFail: true,
+ prepare: caseConnectionRefused,
+ },
+ "fail on 404 response": {
+ wantFail: true,
+ prepare: case404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ apache, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, apache.Check())
+ } else {
+ assert.NoError(t, apache.Check())
+ }
+ })
+ }
+}
+
+func TestApache_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestApache_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (apache *Apache, cleanup func())
+ wantNumOfCharts int
+ wantMetrics map[string]int64
+ }{
+ "success on simple status MPM Event": {
+ prepare: caseMPMEventSimpleStatus,
+ wantNumOfCharts: len(baseCharts),
+ wantMetrics: map[string]int64{
+ "busy_workers": 1,
+ "conns_async_closing": 0,
+ "conns_async_keep_alive": 0,
+ "conns_async_writing": 0,
+ "conns_total": 0,
+ "idle_workers": 74,
+ "scoreboard_closing": 0,
+ "scoreboard_dns_lookup": 0,
+ "scoreboard_finishing": 0,
+ "scoreboard_idle_cleanup": 0,
+ "scoreboard_keepalive": 0,
+ "scoreboard_logging": 0,
+ "scoreboard_open": 325,
+ "scoreboard_reading": 0,
+ "scoreboard_sending": 1,
+ "scoreboard_starting": 0,
+ "scoreboard_waiting": 74,
+ },
+ },
+ "success on extended status MPM Event": {
+ prepare: caseMPMEventExtendedStatus,
+ wantNumOfCharts: len(baseCharts) + len(extendedCharts),
+ wantMetrics: map[string]int64{
+ "busy_workers": 1,
+ "bytes_per_req": 136533000,
+ "bytes_per_sec": 4800000,
+ "conns_async_closing": 0,
+ "conns_async_keep_alive": 0,
+ "conns_async_writing": 0,
+ "conns_total": 0,
+ "idle_workers": 99,
+ "req_per_sec": 3515,
+ "scoreboard_closing": 0,
+ "scoreboard_dns_lookup": 0,
+ "scoreboard_finishing": 0,
+ "scoreboard_idle_cleanup": 0,
+ "scoreboard_keepalive": 0,
+ "scoreboard_logging": 0,
+ "scoreboard_open": 300,
+ "scoreboard_reading": 0,
+ "scoreboard_sending": 1,
+ "scoreboard_starting": 0,
+ "scoreboard_waiting": 99,
+ "total_accesses": 9,
+ "total_kBytes": 12,
+ "uptime": 256,
+ },
+ },
+ "success on extended status MPM Prefork": {
+ prepare: caseMPMPreforkExtendedStatus,
+ wantNumOfCharts: len(baseCharts) + len(extendedCharts) - 2,
+ wantMetrics: map[string]int64{
+ "busy_workers": 70,
+ "bytes_per_req": 3617880000,
+ "bytes_per_sec": 614250000000,
+ "idle_workers": 1037,
+ "req_per_sec": 16978100,
+ "scoreboard_closing": 0,
+ "scoreboard_dns_lookup": 0,
+ "scoreboard_finishing": 0,
+ "scoreboard_idle_cleanup": 0,
+ "scoreboard_keepalive": 0,
+ "scoreboard_logging": 0,
+ "scoreboard_open": 3,
+ "scoreboard_reading": 0,
+ "scoreboard_sending": 0,
+ "scoreboard_starting": 0,
+ "scoreboard_waiting": 3,
+ "total_accesses": 120358784,
+ "total_kBytes": 4252382776,
+ "uptime": 708904,
+ },
+ },
+ "fail on Lighttpd response": {
+ prepare: caseLighttpdResponse,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on invalid data response": {
+ prepare: caseInvalidDataResponse,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on connection refused": {
+ prepare: caseConnectionRefused,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on 404 response": {
+ prepare: case404,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ apache, cleanup := test.prepare(t)
+ defer cleanup()
+
+ _ = apache.Check()
+
+ collected := apache.Collect()
+
+ require.Equal(t, test.wantMetrics, collected)
+ assert.Equal(t, test.wantNumOfCharts, len(*apache.Charts()))
+ })
+ }
+}
+
+func caseMPMEventSimpleStatus(t *testing.T) (*Apache, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataSimpleStatusMPMEvent)
+ }))
+ apache := New()
+ apache.URL = srv.URL + "/server-status?auto"
+ require.NoError(t, apache.Init())
+
+ return apache, srv.Close
+}
+
+func caseMPMEventExtendedStatus(t *testing.T) (*Apache, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataExtendedStatusMPMEvent)
+ }))
+ apache := New()
+ apache.URL = srv.URL + "/server-status?auto"
+ require.NoError(t, apache.Init())
+
+ return apache, srv.Close
+}
+
+func caseMPMPreforkExtendedStatus(t *testing.T) (*Apache, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataExtendedStatusMPMPrefork)
+ }))
+ apache := New()
+ apache.URL = srv.URL + "/server-status?auto"
+ require.NoError(t, apache.Init())
+
+ return apache, srv.Close
+}
+
+func caseLighttpdResponse(t *testing.T) (*Apache, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataLighttpdStatus)
+ }))
+ apache := New()
+ apache.URL = srv.URL + "/server-status?auto"
+ require.NoError(t, apache.Init())
+
+ return apache, srv.Close
+}
+
+func caseInvalidDataResponse(t *testing.T) (*Apache, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ apache := New()
+ apache.URL = srv.URL + "/server-status?auto"
+ require.NoError(t, apache.Init())
+
+ return apache, srv.Close
+}
+
+func caseConnectionRefused(t *testing.T) (*Apache, func()) {
+ t.Helper()
+ apache := New()
+ apache.URL = "http://127.0.0.1:65001/server-status?auto"
+ require.NoError(t, apache.Init())
+
+ return apache, func() {}
+}
+
+func case404(t *testing.T) (*Apache, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ apache := New()
+ apache.URL = srv.URL + "/server-status?auto"
+ require.NoError(t, apache.Init())
+
+ return apache, srv.Close
+}
diff --git a/src/go/plugin/go.d/modules/apache/charts.go b/src/go/plugin/go.d/modules/apache/charts.go
new file mode 100644
index 000000000..ad83112d2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/charts.go
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package apache
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+const (
+ prioRequests = module.Priority + iota
+ prioConnection
+ prioConnsAsync
+ prioScoreboard
+ prioNet
+ prioWorkers
+ prioReqPerSec
+ prioBytesPerSec
+ prioBytesPerReq
+ prioUptime
+)
+
+var baseCharts = module.Charts{
+ chartConnections.Copy(),
+ chartConnsAsync.Copy(),
+ chartWorkers.Copy(),
+ chartScoreboard.Copy(),
+}
+
+var extendedCharts = module.Charts{
+ chartRequests.Copy(),
+ chartBandwidth.Copy(),
+ chartReqPerSec.Copy(),
+ chartBytesPerSec.Copy(),
+ chartBytesPerReq.Copy(),
+ chartUptime.Copy(),
+}
+
+func newCharts(s *serverStatus) *module.Charts {
+ charts := baseCharts.Copy()
+
+ // ServerMPM: prefork
+ if s.Connections.Total == nil {
+ _ = charts.Remove(chartConnections.ID)
+ }
+ if s.Connections.Async.KeepAlive == nil {
+ _ = charts.Remove(chartConnsAsync.ID)
+ }
+
+ if s.Total.Accesses != nil {
+ _ = charts.Add(*extendedCharts.Copy()...)
+ }
+
+ return charts
+}
+
+// simple status
+var (
+ chartConnections = module.Chart{
+ ID: "connections",
+ Title: "Connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "apache.connections",
+ Priority: prioConnection,
+ Dims: module.Dims{
+ {ID: "conns_total", Name: "connections"},
+ },
+ }
+ chartConnsAsync = module.Chart{
+ ID: "conns_async",
+ Title: "Async Connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "apache.conns_async",
+ Type: module.Stacked,
+ Priority: prioConnsAsync,
+ Dims: module.Dims{
+ {ID: "conns_async_keep_alive", Name: "keepalive"},
+ {ID: "conns_async_closing", Name: "closing"},
+ {ID: "conns_async_writing", Name: "writing"},
+ },
+ }
+ chartWorkers = module.Chart{
+ ID: "workers",
+ Title: "Workers Threads",
+ Units: "workers",
+ Fam: "workers",
+ Ctx: "apache.workers",
+ Type: module.Stacked,
+ Priority: prioWorkers,
+ Dims: module.Dims{
+ {ID: "idle_workers", Name: "idle"},
+ {ID: "busy_workers", Name: "busy"},
+ },
+ }
+ chartScoreboard = module.Chart{
+ ID: "scoreboard",
+ Title: "Scoreboard",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "apache.scoreboard",
+ Priority: prioScoreboard,
+ Dims: module.Dims{
+ {ID: "scoreboard_waiting", Name: "waiting"},
+ {ID: "scoreboard_starting", Name: "starting"},
+ {ID: "scoreboard_reading", Name: "reading"},
+ {ID: "scoreboard_sending", Name: "sending"},
+ {ID: "scoreboard_keepalive", Name: "keepalive"},
+ {ID: "scoreboard_dns_lookup", Name: "dns_lookup"},
+ {ID: "scoreboard_closing", Name: "closing"},
+ {ID: "scoreboard_logging", Name: "logging"},
+ {ID: "scoreboard_finishing", Name: "finishing"},
+ {ID: "scoreboard_idle_cleanup", Name: "idle_cleanup"},
+ {ID: "scoreboard_open", Name: "open"},
+ },
+ }
+)
+
+// extended status
+var (
+ chartRequests = module.Chart{
+ ID: "requests",
+ Title: "Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "apache.requests",
+ Priority: prioRequests,
+ Dims: module.Dims{
+ {ID: "total_accesses", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ chartBandwidth = module.Chart{
+ ID: "net",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "bandwidth",
+ Ctx: "apache.net",
+ Type: module.Area,
+ Priority: prioNet,
+ Dims: module.Dims{
+ {ID: "total_kBytes", Name: "sent", Algo: module.Incremental, Mul: 8},
+ },
+ }
+ chartReqPerSec = module.Chart{
+ ID: "reqpersec",
+ Title: "Lifetime Average Number Of Requests Per Second",
+ Units: "requests/s",
+ Fam: "statistics",
+ Ctx: "apache.reqpersec",
+ Type: module.Area,
+ Priority: prioReqPerSec,
+ Dims: module.Dims{
+ {ID: "req_per_sec", Name: "requests", Div: 100000},
+ },
+ }
+ chartBytesPerSec = module.Chart{
+ ID: "bytespersec",
+ Title: "Lifetime Average Number Of Bytes Served Per Second",
+ Units: "KiB/s",
+ Fam: "statistics",
+ Ctx: "apache.bytespersec",
+ Type: module.Area,
+ Priority: prioBytesPerSec,
+ Dims: module.Dims{
+ {ID: "bytes_per_sec", Name: "served", Mul: 8, Div: 1024 * 100000},
+ },
+ }
+ chartBytesPerReq = module.Chart{
+ ID: "bytesperreq",
+ Title: "Lifetime Average Response Size",
+ Units: "KiB",
+ Fam: "statistics",
+ Ctx: "apache.bytesperreq",
+ Type: module.Area,
+ Priority: prioBytesPerReq,
+ Dims: module.Dims{
+ {ID: "bytes_per_req", Name: "size", Div: 1024 * 100000},
+ },
+ }
+ chartUptime = module.Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "availability",
+ Ctx: "apache.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "uptime"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/apache/collect.go b/src/go/plugin/go.d/modules/apache/collect.go
new file mode 100644
index 000000000..79de7722a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/collect.go
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package apache
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (a *Apache) collect() (map[string]int64, error) {
+ status, err := a.scrapeStatus()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := stm.ToMap(status)
+ if len(mx) == 0 {
+ return nil, fmt.Errorf("nothing was collected from %s", a.URL)
+ }
+
+ a.once.Do(func() { a.charts = newCharts(status) })
+
+ return mx, nil
+}
+
+func (a *Apache) scrapeStatus() (*serverStatus, error) {
+ req, err := web.NewHTTPRequest(a.Request)
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := a.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ return parseResponse(resp.Body)
+}
+
+func parseResponse(r io.Reader) (*serverStatus, error) {
+ s := bufio.NewScanner(r)
+ var status serverStatus
+
+ for s.Scan() {
+ parts := strings.Split(s.Text(), ":")
+ if len(parts) != 2 {
+ continue
+ }
+
+ key, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])
+
+ switch key {
+ default:
+ case "BusyServers", "IdleServers":
+ return nil, fmt.Errorf("found '%s', Lighttpd data", key)
+ case "BusyWorkers":
+ status.Workers.Busy = parseInt(value)
+ case "IdleWorkers":
+ status.Workers.Idle = parseInt(value)
+ case "ConnsTotal":
+ status.Connections.Total = parseInt(value)
+ case "ConnsAsyncWriting":
+ status.Connections.Async.Writing = parseInt(value)
+ case "ConnsAsyncKeepAlive":
+ status.Connections.Async.KeepAlive = parseInt(value)
+ case "ConnsAsyncClosing":
+ status.Connections.Async.Closing = parseInt(value)
+ case "Total Accesses":
+ status.Total.Accesses = parseInt(value)
+ case "Total kBytes":
+ status.Total.KBytes = parseInt(value)
+ case "Uptime":
+ status.Uptime = parseInt(value)
+ case "ReqPerSec":
+ status.Averages.ReqPerSec = parseFloat(value)
+ case "BytesPerSec":
+ status.Averages.BytesPerSec = parseFloat(value)
+ case "BytesPerReq":
+ status.Averages.BytesPerReq = parseFloat(value)
+ case "Scoreboard":
+ status.Scoreboard = parseScoreboard(value)
+ }
+ }
+
+ return &status, nil
+}
+
+func parseScoreboard(line string) *scoreboard {
+ // “_” Waiting for Connection
+ // “S” Starting up
+ // “R” Reading Request
+ // “W” Sending Reply
+ // “K” Keepalive (read)
+ // “D” DNS Lookup
+ // “C” Closing connection
+ // “L” Logging
+ // “G” Gracefully finishing
+ // “I” Idle cleanup of worker
+ // “.” Open slot with no current process
+ var sb scoreboard
+ for _, s := range strings.Split(line, "") {
+ switch s {
+ case "_":
+ sb.Waiting++
+ case "S":
+ sb.Starting++
+ case "R":
+ sb.Reading++
+ case "W":
+ sb.Sending++
+ case "K":
+ sb.KeepAlive++
+ case "D":
+ sb.DNSLookup++
+ case "C":
+ sb.Closing++
+ case "L":
+ sb.Logging++
+ case "G":
+ sb.Finishing++
+ case "I":
+ sb.IdleCleanup++
+ case ".":
+ sb.Open++
+ }
+ }
+ return &sb
+}
+
+func parseInt(value string) *int64 {
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return nil
+ }
+ return &v
+}
+
+func parseFloat(value string) *float64 {
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return nil
+ }
+ return &v
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/apache/config_schema.json b/src/go/plugin/go.d/modules/apache/config_schema.json
new file mode 100644
index 000000000..b92363e93
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Apache collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Apache machine readable [status page](https://httpd.apache.org/docs/2.4/mod/mod_status.html).",
+ "type": "string",
+ "default": "http://127.0.0.1/server-status?auto",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/apache/init.go b/src/go/plugin/go.d/modules/apache/init.go
new file mode 100644
index 000000000..e13186f01
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/init.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package apache
+
+import (
+ "errors"
+ "net/http"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (a *Apache) validateConfig() error {
+ if a.URL == "" {
+ return errors.New("url not set")
+ }
+ if !strings.HasSuffix(a.URL, "?auto") {
+ return errors.New("invalid URL, should ends in '?auto'")
+ }
+ return nil
+}
+
+func (a *Apache) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(a.Client)
+}
diff --git a/src/go/plugin/go.d/modules/apache/integrations/apache.md b/src/go/plugin/go.d/modules/apache/integrations/apache.md
new file mode 100644
index 000000000..ec9f88883
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/integrations/apache.md
@@ -0,0 +1,273 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/apache/integrations/apache.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/apache/metadata.yaml"
+sidebar_label: "Apache"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Apache
+
+
+<img src="https://netdata.cloud/img/apache.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: apache
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.
+
+
+It sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html),
+which is a built-in location that provides metrics about the Apache server.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Apache instances running on localhost that are listening on port 80.
+On startup, it tries to collect metrics from:
+
+- http://localhost/server-status?auto
+- http://127.0.0.1/server-status?auto
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+All metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.
+
+
+### Per Apache instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | Basic | Extended |
+|:------|:----------|:----|:---:|:---:|
+| apache.connections | connections | connections | • | • |
+| apache.conns_async | keepalive, closing, writing | connections | • | • |
+| apache.workers | idle, busy | workers | • | • |
+| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | • | • |
+| apache.requests | requests | requests/s | | • |
+| apache.net | sent | kilobit/s | | • |
+| apache.reqpersec | requests | requests/s | | • |
+| apache.bytespersec | served | KiB/s | | • |
+| apache.bytesperreq | size | KiB | | • |
+| apache.uptime | uptime | seconds | | • |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable Apache status support
+
+- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).
+- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/apache.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/apache.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1/server-status?auto | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Apache with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1/server-status?auto
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+
+ - name: remote
+ url: http://192.0.2.1/server-status?auto
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m apache
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `apache` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep apache
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep apache /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep apache
+```
+
+
diff --git a/src/go/plugin/go.d/modules/apache/integrations/httpd.md b/src/go/plugin/go.d/modules/apache/integrations/httpd.md
new file mode 100644
index 000000000..258365180
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/integrations/httpd.md
@@ -0,0 +1,273 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/apache/integrations/httpd.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/apache/metadata.yaml"
+sidebar_label: "HTTPD"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HTTPD
+
+
+<img src="https://netdata.cloud/img/apache.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: apache
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.
+
+
+It sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html),
+which is a built-in location that provides metrics about the Apache server.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Apache instances running on localhost that are listening on port 80.
+On startup, it tries to collect metrics from:
+
+- http://localhost/server-status?auto
+- http://127.0.0.1/server-status?auto
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+All metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.
+
+
+### Per Apache instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | Basic | Extended |
+|:------|:----------|:----|:---:|:---:|
+| apache.connections | connections | connections | • | • |
+| apache.conns_async | keepalive, closing, writing | connections | • | • |
+| apache.workers | idle, busy | workers | • | • |
+| apache.scoreboard | waiting, starting, reading, sending, keepalive, dns_lookup, closing, logging, finishing, idle_cleanup, open | connections | • | • |
+| apache.requests | requests | requests/s | | • |
+| apache.net | sent | kilobit/s | | • |
+| apache.reqpersec | requests | requests/s | | • |
+| apache.bytespersec | served | KiB/s | | • |
+| apache.bytesperreq | size | KiB | | • |
+| apache.uptime | uptime | seconds | | • |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable Apache status support
+
+- Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).
+- Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/apache.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/apache.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1/server-status?auto | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Apache with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1/server-status?auto
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+
+ - name: remote
+ url: http://192.0.2.1/server-status?auto
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m apache
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `apache` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep apache
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep apache /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep apache
+```
+
+
diff --git a/src/go/plugin/go.d/modules/apache/metadata.yaml b/src/go/plugin/go.d/modules/apache/metadata.yaml
new file mode 100644
index 000000000..bfab73fcf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/metadata.yaml
@@ -0,0 +1,302 @@
+plugin_name: go.d.plugin
+modules:
+ - &module
+ meta: &meta
+ id: collector-go.d.plugin-apache
+ plugin_name: go.d.plugin
+ module_name: apache
+ monitored_instance:
+ name: Apache
+ link: https://httpd.apache.org/
+ icon_filename: apache.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - webserver
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: go.d.plugin
+ module_name: weblog
+ - plugin_name: go.d.plugin
+ module_name: httpcheck
+ - plugin_name: apps.plugin
+ module_name: apps
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the activity and performance of Apache servers, and collects metrics such as the number of connections, workers, requests and more.
+ method_description: |
+ It sends HTTP requests to the Apache location [server-status](https://httpd.apache.org/docs/2.4/mod/mod_status.html),
+ which is a built-in location that provides metrics about the Apache server.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Apache instances running on localhost that are listening on port 80.
+ On startup, it tries to collect metrics from:
+
+ - http://localhost/server-status?auto
+ - http://127.0.0.1/server-status?auto
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable Apache status support
+ description: |
+ - Enable and configure [status_module](https://httpd.apache.org/docs/2.4/mod/mod_status.html).
+ - Ensure that you have [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/mod_status.html#troubleshoot) set on (enabled by default since Apache v2.3.6).
+ configuration:
+ file:
+ name: go.d/apache.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1/server-status?auto
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: Apache with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1/server-status?auto
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+
+ - name: remote
+ url: http://192.0.2.1/server-status?auto
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: |
+ All metrics available only if [ExtendedStatus](https://httpd.apache.org/docs/2.4/mod/core.html#extendedstatus) is on.
+ availability:
+ - Basic
+ - Extended
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: apache.connections
+ availability:
+ - Basic
+ - Extended
+ description: Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: apache.conns_async
+ availability:
+ - Basic
+ - Extended
+ description: Active Connections
+ unit: connections
+ chart_type: stacked
+ dimensions:
+ - name: keepalive
+ - name: closing
+ - name: writing
+ - name: apache.workers
+ availability:
+ - Basic
+ - Extended
+ description: Workers Threads
+ unit: workers
+ chart_type: stacked
+ dimensions:
+ - name: idle
+ - name: busy
+ - name: apache.scoreboard
+ availability:
+ - Basic
+ - Extended
+ description: Scoreboard
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: waiting
+ - name: starting
+ - name: reading
+ - name: sending
+ - name: keepalive
+ - name: dns_lookup
+ - name: closing
+ - name: logging
+ - name: finishing
+ - name: idle_cleanup
+ - name: open
+ - name: apache.requests
+ availability:
+ - Extended
+ description: Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: apache.net
+ availability:
+ - Extended
+ description: Bandwidth
+ unit: kilobit/s
+ chart_type: area
+ dimensions:
+ - name: sent
+ - name: apache.reqpersec
+ availability:
+ - Extended
+ description: Lifetime Average Number Of Requests Per Second
+ unit: requests/s
+ chart_type: area
+ dimensions:
+ - name: requests
+ - name: apache.bytespersec
+ availability:
+ - Extended
+ description: Lifetime Average Number Of Bytes Served Per Second
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: served
+ - name: apache.bytesperreq
+ availability:
+ - Extended
+ description: Lifetime Average Response Size
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: size
+ - name: apache.uptime
+ availability:
+ - Extended
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-httpd
+ monitored_instance:
+ name: HTTPD
+ link: https://httpd.apache.org/
+ icon_filename: apache.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
diff --git a/src/go/plugin/go.d/modules/apache/metrics.go b/src/go/plugin/go.d/modules/apache/metrics.go
new file mode 100644
index 000000000..953bd42c3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/metrics.go
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package apache
+
+type (
+ serverStatus struct {
+ // ExtendedStatus
+ Total struct {
+ // Total number of accesses.
+ Accesses *int64 `stm:"accesses"`
+ // Total number of byte count served.
+ // This metric reflects the bytes that should have been served,
+ // which is not necessarily equal to the bytes actually (successfully) served.
+ KBytes *int64 `stm:"kBytes"`
+ } `stm:"total"`
+ Averages struct {
+ //Average number of requests per second.
+ ReqPerSec *float64 `stm:"req_per_sec,100000,1"`
+ // Average number of bytes served per second.
+ BytesPerSec *float64 `stm:"bytes_per_sec,100000,1"`
+ // Average number of bytes per request.
+ BytesPerReq *float64 `stm:"bytes_per_req,100000,1"`
+ } `stm:""`
+ Uptime *int64 `stm:"uptime"`
+
+ Workers struct {
+ // Total number of busy worker threads/processes.
+ // A worker is considered “busy” if it is in any of the following states:
+ // reading, writing, keep-alive, logging, closing, or gracefully finishing.
+ Busy *int64 `stm:"busy_workers"`
+ // Total number of idle worker threads/processes.
+ // An “idle” worker is not in any of the busy states.
+ Idle *int64 `stm:"idle_workers"`
+ } `stm:""`
+ Connections struct {
+ Total *int64 `stm:"total"`
+ Async struct {
+ // Number of async connections in writing state (only applicable to event MPM).
+ Writing *int64 `stm:"writing"`
+ // Number of async connections in keep-alive state (only applicable to event MPM).
+ KeepAlive *int64 `stm:"keep_alive"`
+ // Number of async connections in closing state (only applicable to event MPM).
+ Closing *int64 `stm:"closing"`
+ } `stm:"async"`
+ } `stm:"conns"`
+ Scoreboard *scoreboard `stm:"scoreboard"`
+ }
+ scoreboard struct {
+ Waiting int64 `stm:"waiting"`
+ Starting int64 `stm:"starting"`
+ Reading int64 `stm:"reading"`
+ Sending int64 `stm:"sending"`
+ KeepAlive int64 `stm:"keepalive"`
+ DNSLookup int64 `stm:"dns_lookup"`
+ Closing int64 `stm:"closing"`
+ Logging int64 `stm:"logging"`
+ Finishing int64 `stm:"finishing"`
+ IdleCleanup int64 `stm:"idle_cleanup"`
+ Open int64 `stm:"open"`
+ }
+)
diff --git a/src/go/plugin/go.d/modules/apache/testdata/config.json b/src/go/plugin/go.d/modules/apache/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/apache/testdata/config.yaml b/src/go/plugin/go.d/modules/apache/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/apache/testdata/extended-status-mpm-event.txt b/src/go/plugin/go.d/modules/apache/testdata/extended-status-mpm-event.txt
new file mode 100644
index 000000000..136b69363
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/testdata/extended-status-mpm-event.txt
@@ -0,0 +1,39 @@
+127.0.0.1
+ServerVersion: Apache/2.4.37 (Unix)
+ServerMPM: event
+Server Built: Oct 23 2018 18:27:46
+CurrentTime: Sunday, 13-Jan-2019 20:39:30 MSK
+RestartTime: Sunday, 13-Jan-2019 20:35:13 MSK
+ParentServerConfigGeneration: 1
+ParentServerMPMGeneration: 0
+ServerUptimeSeconds: 256
+ServerUptime: 4 minutes 16 seconds
+Load1: 1.02
+Load5: 1.30
+Load15: 1.41
+Total Accesses: 9
+Total kBytes: 12
+Total Duration: 1
+CPUUser: 0
+CPUSystem: .01
+CPUChildrenUser: 0
+CPUChildrenSystem: 0
+CPULoad: .00390625
+Uptime: 256
+ReqPerSec: .0351563
+BytesPerSec: 48
+BytesPerReq: 1365.33
+DurationPerReq: .111111
+BusyWorkers: 1
+IdleWorkers: 99
+Processes: 4
+Stopping: 0
+BusyWorkers: 1
+IdleWorkers: 99
+ConnsTotal: 0
+ConnsAsyncWriting: 0
+ConnsAsyncKeepAlive: 0
+ConnsAsyncClosing: 0
+Scoreboard: ____________________________________________________________W_______________________________________............................................................................................................................................................................................................................................................................................................
+Using GnuTLS version: 3.6.5
+Built against GnuTLS version: 3.5.19 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/apache/testdata/extended-status-mpm-prefork.txt b/src/go/plugin/go.d/modules/apache/testdata/extended-status-mpm-prefork.txt
new file mode 100644
index 000000000..eeafb4983
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/testdata/extended-status-mpm-prefork.txt
@@ -0,0 +1,48 @@
+some.host.name
+ServerVersion: Apache/2.4.53 (Unix) OpenSSL/1.1.1
+ServerMPM: prefork
+Server Built: Apr 6 2022 16:30:59
+CurrentTime: Monday, 18-Apr-2022 11:52:39 CEST
+RestartTime: Sunday, 10-Apr-2022 06:57:34 CEST
+ParentServerConfigGeneration: 9
+ParentServerMPMGeneration: 8
+ServerUptimeSeconds: 708904
+ServerUptime: 8 days 4 hours 55 minutes 4 seconds
+Load1: 7.18
+Load5: 7.29
+Load15: 8.25
+Total Accesses: 120358784
+Total kBytes: 4252382776
+Total Duration: 35583107177
+CPUUser: 4549.96
+CPUSystem: 4142.92
+CPUChildrenUser: 776666
+CPUChildrenSystem: 609619
+CPULoad: 196.78
+Uptime: 708904
+ReqPerSec: 169.781
+BytesPerSec: 6142500
+BytesPerReq: 36178.8
+DurationPerReq: 295.642
+BusyWorkers: 70
+IdleWorkers: 1037
+Scoreboard: ___...
+TLSSessionCacheStatus
+CacheType: SHMCB
+CacheSharedMemory: 512000
+CacheCurrentEntries: 1969
+CacheSubcaches: 32
+CacheIndexesPerSubcaches: 88
+CacheTimeLeftOldestAvg: 295
+CacheTimeLeftOldestMin: 295
+CacheTimeLeftOldestMax: 296
+CacheIndexUsage: 69%
+CacheUsage: 99%
+CacheStoreCount: 22984008
+CacheReplaceCount: 0
+CacheExpireCount: 0
+CacheDiscardCount: 22976594
+CacheRetrieveHitCount: 5501
+CacheRetrieveMissCount: 4630
+CacheRemoveHitCount: 5491
+CacheRemoveMissCount: 51 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/apache/testdata/lighttpd-status.txt b/src/go/plugin/go.d/modules/apache/testdata/lighttpd-status.txt
new file mode 100644
index 000000000..07d8e06e8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/testdata/lighttpd-status.txt
@@ -0,0 +1,6 @@
+Total Accesses: 12
+Total kBytes: 4
+Uptime: 11
+BusyServers: 3
+IdleServers: 125
+Scoreboard: khr_____________________________________________________________________________________________________________________________ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/apache/testdata/simple-status-mpm-event.txt b/src/go/plugin/go.d/modules/apache/testdata/simple-status-mpm-event.txt
new file mode 100644
index 000000000..8093eacf9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/apache/testdata/simple-status-mpm-event.txt
@@ -0,0 +1,24 @@
+127.0.0.1
+ServerVersion: Apache/2.4.37 (Unix)
+ServerMPM: event
+Server Built: Oct 23 2018 18:27:46
+CurrentTime: Sunday, 13-Jan-2019 21:43:56 MSK
+RestartTime: Sunday, 13-Jan-2019 21:43:53 MSK
+ParentServerConfigGeneration: 1
+ParentServerMPMGeneration: 0
+ServerUptimeSeconds: 2
+ServerUptime: 2 seconds
+Load1: 0.77
+Load5: 0.93
+Load15: 1.03
+BusyWorkers: 1
+IdleWorkers: 74
+Processes: 3
+Stopping: 0
+BusyWorkers: 1
+IdleWorkers: 74
+ConnsTotal: 0
+ConnsAsyncWriting: 0
+ConnsAsyncKeepAlive: 0
+ConnsAsyncClosing: 0
+Scoreboard: ________________________________________________________________W__________..................................................................................................................................................................................................................................................................................................................................... \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/beanstalk/README.md b/src/go/plugin/go.d/modules/beanstalk/README.md
new file mode 120000
index 000000000..4efe13889
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/README.md
@@ -0,0 +1 @@
+integrations/beanstalk.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/beanstalk/beanstalk.go b/src/go/plugin/go.d/modules/beanstalk/beanstalk.go
new file mode 100644
index 000000000..f37cbeda4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/beanstalk.go
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package beanstalk
+
+import (
+ _ "embed"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("beanstalk", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Beanstalk {
+ return &Beanstalk{
+ Config: Config{
+ Address: "127.0.0.1:11300",
+ Timeout: web.Duration(time.Second * 1),
+ TubeSelector: "*",
+ },
+
+ charts: statsCharts.Copy(),
+ newConn: newBeanstalkConn,
+ discoverTubesEvery: time.Minute * 1,
+ tubeSr: matcher.TRUE(),
+ seenTubes: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ TubeSelector string `yaml:"tube_selector,omitempty" json:"tube_selector"`
+}
+
+type Beanstalk struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ newConn func(Config, *logger.Logger) beanstalkConn
+ conn beanstalkConn
+
+ discoverTubesEvery time.Duration
+ lastDiscoverTubesTime time.Time
+ discoveredTubes []string
+ tubeSr matcher.Matcher
+ seenTubes map[string]bool
+}
+
+func (b *Beanstalk) Configuration() any {
+ return b.Config
+}
+
+func (b *Beanstalk) Init() error {
+ if err := b.validateConfig(); err != nil {
+ return fmt.Errorf("config validation: %v", err)
+ }
+
+ sr, err := b.initTubeSelector()
+ if err != nil {
+ return fmt.Errorf("failed to init tube selector: %v", err)
+ }
+ b.tubeSr = sr
+
+ return nil
+}
+
+func (b *Beanstalk) Check() error {
+ mx, err := b.collect()
+ if err != nil {
+ b.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (b *Beanstalk) Charts() *module.Charts {
+ return b.charts
+}
+
+func (b *Beanstalk) Collect() map[string]int64 {
+ mx, err := b.collect()
+ if err != nil {
+ b.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (b *Beanstalk) Cleanup() {
+ if b.conn != nil {
+ if err := b.conn.disconnect(); err != nil {
+ b.Warningf("error on disconnect: %s", err)
+ }
+ b.conn = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/beanstalk/beanstalk_test.go b/src/go/plugin/go.d/modules/beanstalk/beanstalk_test.go
new file mode 100644
index 000000000..da1fcaf08
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/beanstalk_test.go
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package beanstalk
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStats, _ = os.ReadFile("testdata/stats.txt")
+ dataListTubes, _ = os.ReadFile("testdata/list-tubes.txt")
+ dataStatsTubeDefault, _ = os.ReadFile("testdata/stats-tube-default.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStats": dataStats,
+ "dataListTubes": dataListTubes,
+ "dataStatsTubeDefault": dataStatsTubeDefault,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestBeanstalk_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Beanstalk{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestBeanstalk_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success with default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if address not set": {
+ wantFail: true,
+ config: func() Config {
+ conf := New().Config
+ conf.Address = ""
+ return conf
+ }(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ beans := New()
+ beans.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, beans.Init())
+ } else {
+ assert.NoError(t, beans.Init())
+ }
+ })
+ }
+}
+
+func TestBeanstalk_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestBeanstalk_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (*Beanstalk, *mockBeanstalkDaemon)
+ wantFail bool
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: prepareCaseOk,
+ },
+ "fails on unexpected response": {
+ wantFail: true,
+ prepare: prepareCaseUnexpectedResponse,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ beanstalk, daemon := test.prepare()
+
+ defer func() {
+ assert.NoError(t, daemon.Close(), "daemon.Close()")
+ }()
+ go func() {
+ assert.NoError(t, daemon.Run(), "daemon.Run()")
+ }()
+
+ select {
+ case <-daemon.started:
+ case <-time.After(time.Second * 3):
+ t.Errorf("mock beanstalk daemon start timed out")
+ }
+
+ require.NoError(t, beanstalk.Init())
+
+ if test.wantFail {
+ assert.Error(t, beanstalk.Check())
+ } else {
+ assert.NoError(t, beanstalk.Check())
+ }
+
+ beanstalk.Cleanup()
+
+ select {
+ case <-daemon.stopped:
+ case <-time.After(time.Second * 3):
+ t.Errorf("mock beanstalk daemon stop timed out")
+ }
+ })
+ }
+}
+
+func TestBeanstalk_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (*Beanstalk, *mockBeanstalkDaemon)
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success on valid response": {
+ prepare: prepareCaseOk,
+ wantMetrics: map[string]int64{
+ "binlog-records-migrated": 0,
+ "binlog-records-written": 0,
+ "cmd-bury": 0,
+ "cmd-delete": 0,
+ "cmd-ignore": 0,
+ "cmd-kick": 0,
+ "cmd-list-tube-used": 0,
+ "cmd-list-tubes": 317,
+ "cmd-list-tubes-watched": 0,
+ "cmd-pause-tube": 0,
+ "cmd-peek": 0,
+ "cmd-peek-buried": 0,
+ "cmd-peek-delayed": 0,
+ "cmd-peek-ready": 0,
+ "cmd-put": 0,
+ "cmd-release": 0,
+ "cmd-reserve": 0,
+ "cmd-reserve-with-timeout": 0,
+ "cmd-stats": 23619,
+ "cmd-stats-job": 0,
+ "cmd-stats-tube": 18964,
+ "cmd-touch": 0,
+ "cmd-use": 0,
+ "cmd-watch": 0,
+ "current-connections": 2,
+ "current-jobs-buried": 0,
+ "current-jobs-delayed": 0,
+ "current-jobs-ready": 0,
+ "current-jobs-reserved": 0,
+ "current-jobs-urgent": 0,
+ "current-producers": 0,
+ "current-tubes": 1,
+ "current-waiting": 0,
+ "current-workers": 0,
+ "job-timeouts": 0,
+ "rusage-stime": 3922,
+ "rusage-utime": 1602,
+ "total-connections": 72,
+ "total-jobs": 0,
+ "tube_default_cmd-delete": 0,
+ "tube_default_cmd-pause-tube": 0,
+ "tube_default_current-jobs-buried": 0,
+ "tube_default_current-jobs-delayed": 0,
+ "tube_default_current-jobs-ready": 0,
+ "tube_default_current-jobs-reserved": 0,
+ "tube_default_current-jobs-urgent": 0,
+ "tube_default_current-using": 2,
+ "tube_default_current-waiting": 0,
+ "tube_default_current-watching": 2,
+ "tube_default_pause": 0,
+ "tube_default_pause-time-left": 0,
+ "tube_default_total-jobs": 0,
+ "uptime": 105881,
+ },
+ wantCharts: len(statsCharts) + len(tubeChartsTmpl)*1,
+ },
+ "fails on unexpected response": {
+ prepare: prepareCaseUnexpectedResponse,
+ wantCharts: len(statsCharts),
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ wantCharts: len(statsCharts),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ beanstalk, daemon := test.prepare()
+
+ defer func() {
+ assert.NoError(t, daemon.Close(), "daemon.Close()")
+ }()
+ go func() {
+ assert.NoError(t, daemon.Run(), "daemon.Run()")
+ }()
+
+ select {
+ case <-daemon.started:
+ case <-time.After(time.Second * 3):
+ t.Errorf("mock beanstalk daemon start timed out")
+ }
+
+ require.NoError(t, beanstalk.Init())
+
+ mx := beanstalk.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ assert.Equal(t, test.wantCharts, len(*beanstalk.Charts()), "want charts")
+
+ if len(test.wantMetrics) > 0 {
+ module.TestMetricsHasAllChartsDims(t, beanstalk.Charts(), mx)
+ }
+
+ beanstalk.Cleanup()
+
+ select {
+ case <-daemon.stopped:
+ case <-time.After(time.Second * 3):
+ t.Errorf("mock beanstalk daemon stop timed out")
+ }
+ })
+ }
+}
+
+func prepareCaseOk() (*Beanstalk, *mockBeanstalkDaemon) {
+ daemon := &mockBeanstalkDaemon{
+ addr: "127.0.0.1:65001",
+ started: make(chan struct{}),
+ stopped: make(chan struct{}),
+ dataStats: dataStats,
+ dataListTubes: dataListTubes,
+ dataStatsTube: dataStatsTubeDefault,
+ }
+
+ beanstalk := New()
+ beanstalk.Address = daemon.addr
+
+ return beanstalk, daemon
+}
+
+func prepareCaseUnexpectedResponse() (*Beanstalk, *mockBeanstalkDaemon) {
+ daemon := &mockBeanstalkDaemon{
+ addr: "127.0.0.1:65001",
+ started: make(chan struct{}),
+ stopped: make(chan struct{}),
+ dataStats: []byte("INTERNAL_ERROR\n"),
+ dataListTubes: []byte("INTERNAL_ERROR\n"),
+ dataStatsTube: []byte("INTERNAL_ERROR\n"),
+ }
+
+ beanstalk := New()
+ beanstalk.Address = daemon.addr
+
+ return beanstalk, daemon
+}
+
+func prepareCaseConnectionRefused() (*Beanstalk, *mockBeanstalkDaemon) {
+ ch := make(chan struct{})
+ close(ch)
+ daemon := &mockBeanstalkDaemon{
+ addr: "127.0.0.1:65001",
+ dontStart: true,
+ started: ch,
+ stopped: ch,
+ }
+
+ beanstalk := New()
+ beanstalk.Address = daemon.addr
+
+ return beanstalk, daemon
+}
+
+type mockBeanstalkDaemon struct {
+ addr string
+ srv net.Listener
+ started chan struct{}
+ stopped chan struct{}
+ dontStart bool
+
+ dataStats []byte
+ dataListTubes []byte
+ dataStatsTube []byte
+}
+
+func (m *mockBeanstalkDaemon) Run() error {
+ if m.dontStart {
+ return nil
+ }
+
+ srv, err := net.Listen("tcp", m.addr)
+ if err != nil {
+ return err
+ }
+
+ m.srv = srv
+
+ close(m.started)
+ defer close(m.stopped)
+
+ return m.handleConnections()
+}
+
+func (m *mockBeanstalkDaemon) Close() error {
+ if m.srv != nil {
+ err := m.srv.Close()
+ m.srv = nil
+ return err
+ }
+ return nil
+}
+
+func (m *mockBeanstalkDaemon) handleConnections() error {
+ conn, err := m.srv.Accept()
+ if err != nil || conn == nil {
+ return errors.New("could not accept connection")
+ }
+ return m.handleConnection(conn)
+}
+
+func (m *mockBeanstalkDaemon) handleConnection(conn net.Conn) error {
+ defer func() { _ = conn.Close() }()
+
+ rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
+ var line string
+ var err error
+
+ for {
+ if line, err = rw.ReadString('\n'); err != nil {
+ return fmt.Errorf("error reading from connection: %v", err)
+ }
+
+ line = strings.TrimSpace(line)
+
+ cmd, param, _ := strings.Cut(line, " ")
+
+ switch cmd {
+ case cmdQuit:
+ return nil
+ case cmdStats:
+ _, err = rw.Write(m.dataStats)
+ case cmdListTubes:
+ _, err = rw.Write(m.dataListTubes)
+ case cmdStatsTube:
+ if param == "default" {
+ _, err = rw.Write(m.dataStatsTube)
+ } else {
+ _, err = rw.WriteString("NOT_FOUND\n")
+ }
+ default:
+ return fmt.Errorf("unexpected command: %s", line)
+ }
+ _ = rw.Flush()
+ if err != nil {
+ return err
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/beanstalk/charts.go b/src/go/plugin/go.d/modules/beanstalk/charts.go
new file mode 100644
index 000000000..fb2f22628
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/charts.go
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package beanstalk
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioCurrentJobs = module.Priority + iota
+ prioJobsRate
+ prioJobsTimeouts
+
+ prioCurrentTubes
+
+ prioCommandsRate
+
+ prioCurrentConnections
+ prioConnectionsRate
+
+ prioBinlogRecords
+
+ prioCpuUsage
+
+ prioUptime
+
+ prioTubeCurrentJobs
+ prioTubeJobsRate
+
+ prioTubeCommands
+
+ prioTubeCurrentConnections
+
+ prioTubePauseTime
+)
+
+var (
+ statsCharts = module.Charts{
+ currentJobs.Copy(),
+ jobsRateChart.Copy(),
+ jobsTimeoutsChart.Copy(),
+
+ currentTubesChart.Copy(),
+
+ commandsRateChart.Copy(),
+
+ currentConnectionsChart.Copy(),
+ connectionsRateChart.Copy(),
+
+ binlogRecordsChart.Copy(),
+
+ cpuUsageChart.Copy(),
+
+ uptimeChart.Copy(),
+ }
+
+ currentJobs = module.Chart{
+ ID: "current_jobs",
+ Title: "Current Jobs",
+ Units: "jobs",
+ Fam: "jobs",
+ Ctx: "beanstalk.current_jobs",
+ Type: module.Stacked,
+ Priority: prioCurrentJobs,
+ Dims: module.Dims{
+ {ID: "current-jobs-ready", Name: "ready"},
+ {ID: "current-jobs-buried", Name: "buried"},
+ {ID: "current-jobs-urgent", Name: "urgent"},
+ {ID: "current-jobs-delayed", Name: "delayed"},
+ {ID: "current-jobs-reserved", Name: "reserved"},
+ },
+ }
+ jobsRateChart = module.Chart{
+ ID: "jobs_rate",
+ Title: "Jobs Rate",
+ Units: "jobs/s",
+ Fam: "jobs",
+ Ctx: "beanstalk.jobs_rate",
+ Type: module.Line,
+ Priority: prioJobsRate,
+ Dims: module.Dims{
+ {ID: "total-jobs", Name: "created", Algo: module.Incremental},
+ },
+ }
+ jobsTimeoutsChart = module.Chart{
+ ID: "jobs_timeouts",
+ Title: "Timed Out Jobs",
+ Units: "jobs/s",
+ Fam: "jobs",
+ Ctx: "beanstalk.jobs_timeouts",
+ Type: module.Line,
+ Priority: prioJobsTimeouts,
+ Dims: module.Dims{
+ {ID: "job-timeouts", Name: "timeouts", Algo: module.Incremental},
+ },
+ }
+
+ currentTubesChart = module.Chart{
+ ID: "current_tubes",
+ Title: "Current Tubes",
+ Units: "tubes",
+ Fam: "tubes",
+ Ctx: "beanstalk.current_tubes",
+ Type: module.Line,
+ Priority: prioCurrentTubes,
+ Dims: module.Dims{
+ {ID: "current-tubes", Name: "tubes"},
+ },
+ }
+
+ commandsRateChart = module.Chart{
+ ID: "commands_rate",
+ Title: "Commands Rate",
+ Units: "commands/s",
+ Fam: "commands",
+ Ctx: "beanstalk.commands_rate",
+ Type: module.Stacked,
+ Priority: prioCommandsRate,
+ Dims: module.Dims{
+ {ID: "cmd-put", Name: "put", Algo: module.Incremental},
+ {ID: "cmd-peek", Name: "peek", Algo: module.Incremental},
+ {ID: "cmd-peek-ready", Name: "peek-ready", Algo: module.Incremental},
+ {ID: "cmd-peek-delayed", Name: "peek-delayed", Algo: module.Incremental},
+ {ID: "cmd-peek-buried", Name: "peek-buried", Algo: module.Incremental},
+ {ID: "cmd-reserve", Name: "reserve", Algo: module.Incremental},
+ {ID: "cmd-reserve-with-timeout", Name: "reserve-with-timeout", Algo: module.Incremental},
+ {ID: "cmd-touch", Name: "touch", Algo: module.Incremental},
+ {ID: "cmd-use", Name: "use", Algo: module.Incremental},
+ {ID: "cmd-watch", Name: "watch", Algo: module.Incremental},
+ {ID: "cmd-ignore", Name: "ignore", Algo: module.Incremental},
+ {ID: "cmd-delete", Name: "delete", Algo: module.Incremental},
+ {ID: "cmd-release", Name: "release", Algo: module.Incremental},
+ {ID: "cmd-bury", Name: "bury", Algo: module.Incremental},
+ {ID: "cmd-kick", Name: "kick", Algo: module.Incremental},
+ {ID: "cmd-stats", Name: "stats", Algo: module.Incremental},
+ {ID: "cmd-stats-job", Name: "stats-job", Algo: module.Incremental},
+ {ID: "cmd-stats-tube", Name: "stats-tube", Algo: module.Incremental},
+ {ID: "cmd-list-tubes", Name: "list-tubes", Algo: module.Incremental},
+ {ID: "cmd-list-tube-used", Name: "list-tube-used", Algo: module.Incremental},
+ {ID: "cmd-list-tubes-watched", Name: "list-tubes-watched", Algo: module.Incremental},
+ {ID: "cmd-pause-tube", Name: "pause-tube", Algo: module.Incremental},
+ },
+ }
+
+ currentConnectionsChart = module.Chart{
+ ID: "current_connections",
+ Title: "Current Connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "beanstalk.current_connections",
+ Type: module.Line,
+ Priority: prioCurrentConnections,
+ Dims: module.Dims{
+ {ID: "current-connections", Name: "open"},
+ {ID: "current-producers", Name: "producers"},
+ {ID: "current-workers", Name: "workers"},
+ {ID: "current-waiting", Name: "waiting"},
+ },
+ }
+ connectionsRateChart = module.Chart{
+ ID: "connections_rate",
+ Title: "Connections Rate",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "beanstalk.connections_rate",
+ Type: module.Line,
+ Priority: prioConnectionsRate,
+ Dims: module.Dims{
+ {ID: "total-connections", Name: "created", Algo: module.Incremental},
+ },
+ }
+
+ binlogRecordsChart = module.Chart{
+ ID: "binlog_records",
+ Title: "Binlog Records",
+ Units: "records/s",
+ Fam: "binlog",
+ Ctx: "beanstalk.binlog_records",
+ Type: module.Line,
+ Priority: prioBinlogRecords,
+ Dims: module.Dims{
+ {ID: "binlog-records-written", Name: "written", Algo: module.Incremental},
+ {ID: "binlog-records-migrated", Name: "migrated", Algo: module.Incremental},
+ },
+ }
+
+ cpuUsageChart = module.Chart{
+ ID: "cpu_usage",
+ Title: "CPU Usage",
+ Units: "percent",
+ Fam: "cpu usage",
+ Ctx: "beanstalk.cpu_usage",
+ Type: module.Stacked,
+ Priority: prioCpuUsage,
+ Dims: module.Dims{
+ {ID: "rusage-utime", Name: "user", Algo: module.Incremental, Mul: 100, Div: 1000},
+ {ID: "rusage-stime", Name: "system", Algo: module.Incremental, Mul: 100, Div: 1000},
+ },
+ }
+
+ uptimeChart = module.Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "beanstalk.uptime",
+ Type: module.Line,
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "uptime"},
+ },
+ }
+)
+
+var (
+ tubeChartsTmpl = module.Charts{
+ tubeCurrentJobsChartTmpl.Copy(),
+ tubeJobsRateChartTmpl.Copy(),
+
+ tubeCommandsRateChartTmpl.Copy(),
+
+ tubeCurrentConnectionsChartTmpl.Copy(),
+
+ tubePauseTimeChartTmpl.Copy(),
+ }
+
+ tubeCurrentJobsChartTmpl = module.Chart{
+ ID: "tube_%s_current_jobs",
+ Title: "Tube Current Jobs",
+ Units: "jobs",
+ Fam: "tube jobs",
+ Ctx: "beanstalk.tube_current_jobs",
+ Type: module.Stacked,
+ Priority: prioTubeCurrentJobs,
+ Dims: module.Dims{
+ {ID: "tube_%s_current-jobs-ready", Name: "ready"},
+ {ID: "tube_%s_current-jobs-buried", Name: "buried"},
+ {ID: "tube_%s_current-jobs-urgent", Name: "urgent"},
+ {ID: "tube_%s_current-jobs-delayed", Name: "delayed"},
+ {ID: "tube_%s_current-jobs-reserved", Name: "reserved"},
+ },
+ }
+ tubeJobsRateChartTmpl = module.Chart{
+ ID: "tube_%s_jobs_rate",
+ Title: "Tube Jobs Rate",
+ Units: "jobs/s",
+ Fam: "tube jobs",
+ Ctx: "beanstalk.tube_jobs_rate",
+ Type: module.Line,
+ Priority: prioTubeJobsRate,
+ Dims: module.Dims{
+ {ID: "tube_%s_total-jobs", Name: "created", Algo: module.Incremental},
+ },
+ }
+ tubeCommandsRateChartTmpl = module.Chart{
+ ID: "tube_%s_commands_rate",
+ Title: "Tube Commands",
+ Units: "commands/s",
+ Fam: "tube commands",
+ Ctx: "beanstalk.tube_commands_rate",
+ Type: module.Stacked,
+ Priority: prioTubeCommands,
+ Dims: module.Dims{
+ {ID: "tube_%s_cmd-delete", Name: "delete", Algo: module.Incremental},
+ {ID: "tube_%s_cmd-pause-tube", Name: "pause-tube", Algo: module.Incremental},
+ },
+ }
+ tubeCurrentConnectionsChartTmpl = module.Chart{
+ ID: "tube_%s_current_connections",
+ Title: "Tube Current Connections",
+ Units: "connections",
+ Fam: "tube connections",
+ Ctx: "beanstalk.tube_current_connections",
+ Type: module.Stacked,
+ Priority: prioTubeCurrentConnections,
+ Dims: module.Dims{
+ {ID: "tube_%s_current-using", Name: "using"},
+ {ID: "tube_%s_current-waiting", Name: "waiting"},
+ {ID: "tube_%s_current-watching", Name: "watching"},
+ },
+ }
+ tubePauseTimeChartTmpl = module.Chart{
+ ID: "tube_%s_pause_time",
+ Title: "Tube Pause Time",
+ Units: "seconds",
+ Fam: "tube pause",
+ Ctx: "beanstalk.tube_pause",
+ Type: module.Line,
+ Priority: prioTubePauseTime,
+ Dims: module.Dims{
+ {ID: "tube_%s_pause", Name: "since"},
+ {ID: "tube_%s_pause-time-left", Name: "left"},
+ },
+ }
+)
+
+func (b *Beanstalk) addTubeCharts(name string) {
+ charts := tubeChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanTubeName(name))
+ chart.Labels = []module.Label{
+ {Key: "tube_name", Value: name},
+ }
+
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := b.Charts().Add(*charts...); err != nil {
+ b.Warning(err)
+ }
+}
+
+func (b *Beanstalk) removeTubeCharts(name string) {
+ px := fmt.Sprintf("tube_%s_", cleanTubeName(name))
+
+ for _, chart := range *b.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func cleanTubeName(name string) string {
+ r := strings.NewReplacer(" ", "_", ".", "_", ",", "_")
+ return r.Replace(name)
+}
diff --git a/src/go/plugin/go.d/modules/beanstalk/client.go b/src/go/plugin/go.d/modules/beanstalk/client.go
new file mode 100644
index 000000000..66a8b1cef
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/client.go
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package beanstalk
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+
+ "gopkg.in/yaml.v2"
+)
+
+type beanstalkConn interface {
+ connect() error
+ disconnect() error
+ queryStats() (*beanstalkdStats, error)
+ queryListTubes() ([]string, error)
+ queryStatsTube(string) (*tubeStats, error)
+}
+
+// https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L553
+type beanstalkdStats struct {
+ CurrentJobsUrgent int64 `yaml:"current-jobs-urgent" stm:"current-jobs-urgent"`
+ CurrentJobsReady int64 `yaml:"current-jobs-ready" stm:"current-jobs-ready"`
+ CurrentJobsReserved int64 `yaml:"current-jobs-reserved" stm:"current-jobs-reserved"`
+ CurrentJobsDelayed int64 `yaml:"current-jobs-delayed" stm:"current-jobs-delayed"`
+ CurrentJobsBuried int64 `yaml:"current-jobs-buried" stm:"current-jobs-buried"`
+ CmdPut int64 `yaml:"cmd-put" stm:"cmd-put"`
+ CmdPeek int64 `yaml:"cmd-peek" stm:"cmd-peek"`
+ CmdPeekReady int64 `yaml:"cmd-peek-ready" stm:"cmd-peek-ready"`
+ CmdPeekDelayed int64 `yaml:"cmd-peek-delayed" stm:"cmd-peek-delayed"`
+ CmdPeekBuried int64 `yaml:"cmd-peek-buried" stm:"cmd-peek-buried"`
+ CmdReserve int64 `yaml:"cmd-reserve" stm:"cmd-reserve"`
+ CmdReserveWithTimeout int64 `yaml:"cmd-reserve-with-timeout" stm:"cmd-reserve-with-timeout"`
+ CmdTouch int64 `yaml:"cmd-touch" stm:"cmd-touch"`
+ CmdUse int64 `yaml:"cmd-use" stm:"cmd-use"`
+ CmdWatch int64 `yaml:"cmd-watch" stm:"cmd-watch"`
+ CmdIgnore int64 `yaml:"cmd-ignore" stm:"cmd-ignore"`
+ CmdDelete int64 `yaml:"cmd-delete" stm:"cmd-delete"`
+ CmdRelease int64 `yaml:"cmd-release" stm:"cmd-release"`
+ CmdBury int64 `yaml:"cmd-bury" stm:"cmd-bury"`
+ CmdKick int64 `yaml:"cmd-kick" stm:"cmd-kick"`
+ CmdStats int64 `yaml:"cmd-stats" stm:"cmd-stats"`
+ CmdStatsJob int64 `yaml:"cmd-stats-job" stm:"cmd-stats-job"`
+ CmdStatsTube int64 `yaml:"cmd-stats-tube" stm:"cmd-stats-tube"`
+ CmdListTubes int64 `yaml:"cmd-list-tubes" stm:"cmd-list-tubes"`
+ CmdListTubeUsed int64 `yaml:"cmd-list-tube-used" stm:"cmd-list-tube-used"`
+ CmdListTubesWatched int64 `yaml:"cmd-list-tubes-watched" stm:"cmd-list-tubes-watched"`
+ CmdPauseTube int64 `yaml:"cmd-pause-tube" stm:"cmd-pause-tube"`
+ JobTimeouts int64 `yaml:"job-timeouts" stm:"job-timeouts"`
+ TotalJobs int64 `yaml:"total-jobs" stm:"total-jobs"`
+ CurrentTubes int64 `yaml:"current-tubes" stm:"current-tubes"`
+ CurrentConnections int64 `yaml:"current-connections" stm:"current-connections"`
+ CurrentProducers int64 `yaml:"current-producers" stm:"current-producers"`
+ CurrentWorkers int64 `yaml:"current-workers" stm:"current-workers"`
+ CurrentWaiting int64 `yaml:"current-waiting" stm:"current-waiting"`
+ TotalConnections int64 `yaml:"total-connections" stm:"total-connections"`
+ RusageUtime float64 `yaml:"rusage-utime" stm:"rusage-utime,1000,1"`
+ RusageStime float64 `yaml:"rusage-stime" stm:"rusage-stime,1000,1"`
+ Uptime int64 `yaml:"uptime" stm:"uptime"`
+ BinlogRecordsWritten int64 `yaml:"binlog-records-written" stm:"binlog-records-written"`
+ BinlogRecordsMigrated int64 `yaml:"binlog-records-migrated" stm:"binlog-records-migrated"`
+}
+
+// https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L497
+type tubeStats struct {
+ Name string `yaml:"name"`
+ CurrentJobsUrgent int64 `yaml:"current-jobs-urgent" stm:"current-jobs-urgent"`
+ CurrentJobsReady int64 `yaml:"current-jobs-ready" stm:"current-jobs-ready"`
+ CurrentJobsReserved int64 `yaml:"current-jobs-reserved" stm:"current-jobs-reserved"`
+ CurrentJobsDelayed int64 `yaml:"current-jobs-delayed" stm:"current-jobs-delayed"`
+ CurrentJobsBuried int64 `yaml:"current-jobs-buried" stm:"current-jobs-buried"`
+ TotalJobs int64 `yaml:"total-jobs" stm:"total-jobs"`
+ CurrentUsing int64 `yaml:"current-using" stm:"current-using"`
+ CurrentWaiting int64 `yaml:"current-waiting" stm:"current-waiting"`
+ CurrentWatching int64 `yaml:"current-watching" stm:"current-watching"`
+ Pause float64 `yaml:"pause" stm:"pause"`
+ CmdDelete int64 `yaml:"cmd-delete" stm:"cmd-delete"`
+ CmdPauseTube int64 `yaml:"cmd-pause-tube" stm:"cmd-pause-tube"`
+ PauseTimeLeft float64 `yaml:"pause-time-left" stm:"pause-time-left"`
+}
+
+func newBeanstalkConn(conf Config, log *logger.Logger) beanstalkConn {
+ return &beanstalkClient{
+ Logger: log,
+ client: socket.New(socket.Config{
+ Address: conf.Address,
+ ConnectTimeout: conf.Timeout.Duration(),
+ ReadTimeout: conf.Timeout.Duration(),
+ WriteTimeout: conf.Timeout.Duration(),
+ TLSConf: nil,
+ }),
+ }
+}
+
+const (
+ cmdQuit = "quit"
+ cmdStats = "stats"
+ cmdListTubes = "list-tubes"
+ cmdStatsTube = "stats-tube"
+)
+
+type beanstalkClient struct {
+ *logger.Logger
+
+ client socket.Client
+}
+
+func (c *beanstalkClient) connect() error {
+ return c.client.Connect()
+}
+
+func (c *beanstalkClient) disconnect() error {
+ _, _, _ = c.query(cmdQuit)
+ return c.client.Disconnect()
+}
+
+func (c *beanstalkClient) queryStats() (*beanstalkdStats, error) {
+ cmd := cmdStats
+
+ resp, data, err := c.query(cmd)
+ if err != nil {
+ return nil, err
+ }
+ if resp != "OK" {
+ return nil, fmt.Errorf("command '%s' bad response: %s", cmd, resp)
+ }
+
+ var stats beanstalkdStats
+
+ if err := yaml.Unmarshal(data, &stats); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (c *beanstalkClient) queryListTubes() ([]string, error) {
+ cmd := cmdListTubes
+
+ resp, data, err := c.query(cmd)
+ if err != nil {
+ return nil, err
+ }
+ if resp != "OK" {
+ return nil, fmt.Errorf("command '%s' bad response: %s", cmd, resp)
+ }
+
+ var tubes []string
+
+ if err := yaml.Unmarshal(data, &tubes); err != nil {
+ return nil, err
+ }
+
+ return tubes, nil
+}
+
+func (c *beanstalkClient) queryStatsTube(tubeName string) (*tubeStats, error) {
+ cmd := fmt.Sprintf("%s %s", cmdStatsTube, tubeName)
+
+ resp, data, err := c.query(cmd)
+ if err != nil {
+ return nil, err
+ }
+ if resp == "NOT_FOUND" {
+ return nil, nil
+ }
+ if resp != "OK" {
+ return nil, fmt.Errorf("command '%s' bad response: %s", cmd, resp)
+ }
+
+ var stats tubeStats
+ if err := yaml.Unmarshal(data, &stats); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (c *beanstalkClient) query(command string) (string, []byte, error) {
+ var resp string
+ var length int
+ var body []byte
+ var err error
+
+ c.Debugf("executing command: %s", command)
+
+ const limitReadLines = 1000
+ var num int
+
+ clientErr := c.client.Command(command+"\r\n", func(line []byte) bool {
+ if resp == "" {
+ s := string(line)
+ c.Debugf("command '%s' response: '%s'", command, s)
+
+ resp, length, err = parseResponseLine(s)
+ if err != nil {
+ err = fmt.Errorf("command '%s' line '%s': %v", command, s, err)
+ }
+ return err == nil && resp == "OK"
+ }
+
+ if num++; num >= limitReadLines {
+ err = fmt.Errorf("command '%s': read line limit exceeded (%d)", command, limitReadLines)
+ return false
+ }
+
+ body = append(body, line...)
+ body = append(body, '\n')
+
+ return len(body) < length
+ })
+ if clientErr != nil {
+ return "", nil, fmt.Errorf("command '%s' client error: %v", command, clientErr)
+ }
+ if err != nil {
+ return "", nil, err
+ }
+
+ return resp, body, nil
+}
+
+func parseResponseLine(line string) (string, int, error) {
+ parts := strings.Fields(line)
+ if len(parts) == 0 {
+ return "", 0, errors.New("empty response")
+ }
+
+ resp := parts[0]
+
+ if resp != "OK" {
+ return resp, 0, nil
+ }
+
+ if len(parts) < 2 {
+ return "", 0, errors.New("missing bytes count")
+ }
+
+ length, err := strconv.Atoi(parts[1])
+ if err != nil {
+ return "", 0, errors.New("invalid bytes count")
+ }
+
+ return resp, length, nil
+}
diff --git a/src/go/plugin/go.d/modules/beanstalk/collect.go b/src/go/plugin/go.d/modules/beanstalk/collect.go
new file mode 100644
index 000000000..f85b24028
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/collect.go
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package beanstalk
+
+import (
+ "fmt"
+ "slices"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func (b *Beanstalk) collect() (map[string]int64, error) {
+ if b.conn == nil {
+ conn, err := b.establishConn()
+ if err != nil {
+ return nil, err
+ }
+ b.conn = conn
+ }
+
+ mx := make(map[string]int64)
+
+ if err := b.collectStats(mx); err != nil {
+ b.Cleanup()
+ return nil, err
+ }
+ if err := b.collectTubesStats(mx); err != nil {
+ return mx, err
+ }
+
+ return mx, nil
+}
+
+func (b *Beanstalk) collectStats(mx map[string]int64) error {
+ stats, err := b.conn.queryStats()
+ if err != nil {
+ return err
+ }
+ for k, v := range stm.ToMap(stats) {
+ mx[k] = v
+ }
+ return nil
+}
+
+func (b *Beanstalk) collectTubesStats(mx map[string]int64) error {
+ now := time.Now()
+
+ if now.Sub(b.lastDiscoverTubesTime) > b.discoverTubesEvery {
+ tubes, err := b.conn.queryListTubes()
+ if err != nil {
+ return err
+ }
+
+ b.Debugf("discovered tubes (%d): %v", len(tubes), tubes)
+ v := slices.DeleteFunc(tubes, func(s string) bool { return !b.tubeSr.MatchString(s) })
+ if len(tubes) != len(v) {
+ b.Debugf("discovered tubes after filtering (%d): %v", len(v), v)
+ }
+
+ b.discoveredTubes = v
+ b.lastDiscoverTubesTime = now
+ }
+
+ seen := make(map[string]bool)
+
+ for i, tube := range b.discoveredTubes {
+ if tube == "" {
+ continue
+ }
+
+ stats, err := b.conn.queryStatsTube(tube)
+ if err != nil {
+ return err
+ }
+
+ if stats == nil {
+ b.Infof("tube '%s' stats object not found (tube does not exist)", tube)
+ b.discoveredTubes[i] = ""
+ continue
+ }
+ if stats.Name == "" {
+ b.Debugf("tube '%s' stats object has an empty name, ignoring it", tube)
+ b.discoveredTubes[i] = ""
+ continue
+ }
+
+ seen[stats.Name] = true
+ if !b.seenTubes[stats.Name] {
+ b.seenTubes[stats.Name] = true
+ b.addTubeCharts(stats.Name)
+ }
+
+ px := fmt.Sprintf("tube_%s_", stats.Name)
+ for k, v := range stm.ToMap(stats) {
+ mx[px+k] = v
+ }
+ }
+
+ for tube := range b.seenTubes {
+ if !seen[tube] {
+ delete(b.seenTubes, tube)
+ b.removeTubeCharts(tube)
+ }
+ }
+
+ return nil
+}
+
+func (b *Beanstalk) establishConn() (beanstalkConn, error) {
+ conn := b.newConn(b.Config, b.Logger)
+
+ if err := conn.connect(); err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
diff --git a/src/go/plugin/go.d/modules/beanstalk/config_schema.json b/src/go/plugin/go.d/modules/beanstalk/config_schema.json
new file mode 100644
index 000000000..aa600ac03
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/config_schema.json
@@ -0,0 +1,54 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Beanstalk collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the Beanstalk service listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:11300"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "tube_selector": {
+ "title": "Tube selector",
+ "description": "Specifies a [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme) for which Beanstalk tubes Netdata will collect statistics. Only tubes whose names match the provided pattern will be included.",
+ "type": "string",
+ "minimum": 1,
+ "default": "*"
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "tube_selector": {
+ "ui:help": "Leave blank or use `*` to collect data for all tubes."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/beanstalk/init.go b/src/go/plugin/go.d/modules/beanstalk/init.go
new file mode 100644
index 000000000..50916b3a7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/init.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package beanstalk
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+func (b *Beanstalk) validateConfig() error {
+ if b.Address == "" {
+ return errors.New("beanstalk address is required")
+ }
+ return nil
+}
+
+func (b *Beanstalk) initTubeSelector() (matcher.Matcher, error) {
+ if b.TubeSelector == "" {
+ return matcher.TRUE(), nil
+ }
+
+ m, err := matcher.NewSimplePatternsMatcher(b.TubeSelector)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/src/go/plugin/go.d/modules/beanstalk/integrations/beanstalk.md b/src/go/plugin/go.d/modules/beanstalk/integrations/beanstalk.md
new file mode 100644
index 000000000..c8efd988a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/integrations/beanstalk.md
@@ -0,0 +1,253 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/beanstalk/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/beanstalk/metadata.yaml"
+sidebar_label: "Beanstalk"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Beanstalk
+
+
+<img src="https://netdata.cloud/img/beanstalk.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: beanstalk
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Beanstalk server performance and provides detailed statistics for each tube.
+
+
+Using the [beanstalkd protocol](https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt), it communicates with the Beanstalk daemon to gather essential metrics that help understand the server's performance and activity.
+Executed commands:
+
+- [stats](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L553).
+- [list-tubes](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L688).
+- [stats-tube](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L497).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Beanstalk instances running on localhost that are listening on port 11300.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Beanstalk instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| beanstalk.current_jobs | ready, buried, urgent, delayed, reserved | jobs |
+| beanstalk.jobs_rate | created | jobs/s |
+| beanstalk.jobs_timeouts | timeouts | jobs/s |
+| beanstalk.current_tubes | tubes | tubes |
+| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, reserve-with-timeout, touch, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |
+| beanstalk.current_connections | open, producers, workers, waiting | connections |
+| beanstalk.connections_rate | created | connections/s |
+| beanstalk.binlog_records | written, migrated | records/s |
+| beanstalk.cpu_usage | user, system | percent |
+| beanstalk.uptime | uptime | seconds |
+
+### Per tube
+
+Metrics related to Beanstalk tubes. This set of metrics is provided for each tube.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| tube_name | Tube name. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| beanstalk.tube_current_jobs | ready, buried, urgent, delayed, reserved | jobs |
+| beanstalk.tube_jobs_rate | created | jobs/s |
+| beanstalk.tube_commands_rate | delete, pause-tube | commands/s |
+| beanstalk.tube_current_connections | using, waiting, watching | connections |
+| beanstalk.tube_pause_time | since, left | seconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/beanstalk.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/beanstalk.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | The IP address and port where the Beanstalk service listens for connections. | 127.0.0.1:11300 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
+| tube_selector | Specifies a [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme) for which Beanstalk tubes Netdata will collect statistics. | * | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:11300
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:11300
+
+ - name: remote
+ address: 203.0.113.0:11300
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `beanstalk` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m beanstalk
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `beanstalk` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep beanstalk
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep beanstalk /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep beanstalk
+```
+
+
diff --git a/src/go/plugin/go.d/modules/beanstalk/metadata.yaml b/src/go/plugin/go.d/modules/beanstalk/metadata.yaml
new file mode 100644
index 000000000..60aaf77e5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/metadata.yaml
@@ -0,0 +1,255 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-beanstalk
+ plugin_name: go.d.plugin
+ module_name: beanstalk
+ monitored_instance:
+ name: Beanstalk
+ link: https://beanstalkd.github.io/
+ categories:
+ - data-collection.message-brokers
+ icon_filename: "beanstalk.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - beanstalk
+ - beanstalkd
+ - message
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Beanstalk server performance and provides detailed statistics for each tube.
+ method_description: |
+ Using the [beanstalkd protocol](https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt), it communicates with the Beanstalk daemon to gather essential metrics that help understand the server's performance and activity.
+ Executed commands:
+
+ - [stats](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L553).
+ - [list-tubes](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L688).
+ - [stats-tube](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L497).
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Beanstalk instances running on localhost that are listening on port 11300.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/beanstalk.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: The IP address and port where the Beanstalk service listens for connections.
+ default_value: 127.0.0.1:11300
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
+ - name: tube_selector
+ description: "Specifies a [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme) for which Beanstalk tubes Netdata will collect statistics."
+ default_value: "*"
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: Config
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:11300
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:11300
+
+ - name: remote
+ address: 203.0.113.0:11300
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: beanstalk_server_buried_jobs
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf
+ metric: beanstalk.current_jobs
+ info: number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs.
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: beanstalk.current_jobs
+ description: Current Jobs
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: ready
+ - name: buried
+ - name: urgent
+ - name: delayed
+ - name: reserved
+ - name: beanstalk.jobs_rate
+ description: Jobs Rate
+ unit: "jobs/s"
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: beanstalk.jobs_timeouts
+ description: Timed Out Jobs
+ unit: "jobs/s"
+ chart_type: line
+ dimensions:
+ - name: timeouts
+ - name: beanstalk.current_tubes
+ description: Current Tubes
+ unit: "tubes"
+ chart_type: line
+ dimensions:
+ - name: tubes
+ - name: beanstalk.commands_rate
+ description: Commands Rate
+ unit: "commands/s"
+ chart_type: stacked
+ dimensions:
+ - name: put
+ - name: peek
+ - name: peek-ready
+ - name: peek-delayed
+ - name: peek-buried
+ - name: reserve
+ - name: reserve-with-timeout
+ - name: touch
+ - name: use
+ - name: watch
+ - name: ignore
+ - name: delete
+ - name: bury
+ - name: kick
+ - name: stats
+ - name: stats-job
+ - name: stats-tube
+ - name: list-tubes
+ - name: list-tube-used
+ - name: list-tubes-watched
+ - name: pause-tube
+ - name: beanstalk.current_connections
+ description: Current Connections
+ unit: "connections"
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: producers
+ - name: workers
+ - name: waiting
+ - name: beanstalk.connections_rate
+ description: Connections Rate
+ unit: "connections/s"
+ chart_type: area
+ dimensions:
+ - name: created
+ - name: beanstalk.binlog_records
+ description: Binlog Records
+ unit: "records/s"
+ chart_type: line
+ dimensions:
+ - name: written
+ - name: migrated
+ - name: beanstalk.cpu_usage
+ description: Cpu Usage
+ unit: "percent"
+ chart_type: stacked
+ dimensions:
+ - name: user
+ - name: system
+ - name: beanstalk.uptime
+ description: seconds
+ unit: "seconds"
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: tube
+ description: "Metrics related to Beanstalk tubes. This set of metrics is provided for each tube."
+ labels:
+ - name: tube_name
+ description: Tube name.
+ metrics:
+ - name: beanstalk.tube_current_jobs
+ description: Tube Current Jobs
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: ready
+ - name: buried
+ - name: urgent
+ - name: delayed
+ - name: reserved
+ - name: beanstalk.tube_jobs_rate
+ description: Tube Jobs Rate
+ unit: "jobs/s"
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: beanstalk.tube_commands_rate
+ description: Tube Commands
+ unit: "commands/s"
+ chart_type: stacked
+ dimensions:
+ - name: delete
+ - name: pause-tube
+ - name: beanstalk.tube_current_connections
+ description: Tube Current Connections
+ unit: "connections"
+ chart_type: stacked
+ dimensions:
+ - name: using
+ - name: waiting
+ - name: watching
+ - name: beanstalk.tube_pause_time
+ description: Tube Pause Time
+ unit: "seconds"
+ chart_type: line
+ dimensions:
+ - name: since
+ - name: left
diff --git a/src/go/plugin/go.d/modules/beanstalk/testdata/config.json b/src/go/plugin/go.d/modules/beanstalk/testdata/config.json
new file mode 100644
index 000000000..c8da279a8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "tube_selector": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/beanstalk/testdata/config.yaml b/src/go/plugin/go.d/modules/beanstalk/testdata/config.yaml
new file mode 100644
index 000000000..7fe212a96
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+tube_selector: "ok"
diff --git a/src/go/plugin/go.d/modules/beanstalk/testdata/list-tubes.txt b/src/go/plugin/go.d/modules/beanstalk/testdata/list-tubes.txt
new file mode 100644
index 000000000..4fec61ef1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/testdata/list-tubes.txt
@@ -0,0 +1,3 @@
+OK 14
+---
+- default
diff --git a/src/go/plugin/go.d/modules/beanstalk/testdata/stats-tube-default.txt b/src/go/plugin/go.d/modules/beanstalk/testdata/stats-tube-default.txt
new file mode 100644
index 000000000..888ff3da4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/testdata/stats-tube-default.txt
@@ -0,0 +1,16 @@
+OK 265
+---
+name: default
+current-jobs-urgent: 0
+current-jobs-ready: 0
+current-jobs-reserved: 0
+current-jobs-delayed: 0
+current-jobs-buried: 0
+total-jobs: 0
+current-using: 2
+current-watching: 2
+current-waiting: 0
+cmd-delete: 0
+cmd-pause-tube: 0
+pause: 0
+pause-time-left: 0
diff --git a/src/go/plugin/go.d/modules/beanstalk/testdata/stats.txt b/src/go/plugin/go.d/modules/beanstalk/testdata/stats.txt
new file mode 100644
index 000000000..69b06e4c5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/beanstalk/testdata/stats.txt
@@ -0,0 +1,50 @@
+OK 913
+---
+current-jobs-urgent: 0
+current-jobs-ready: 0
+current-jobs-reserved: 0
+current-jobs-delayed: 0
+current-jobs-buried: 0
+cmd-put: 0
+cmd-peek: 0
+cmd-peek-ready: 0
+cmd-peek-delayed: 0
+cmd-peek-buried: 0
+cmd-reserve: 0
+cmd-reserve-with-timeout: 0
+cmd-delete: 0
+cmd-release: 0
+cmd-use: 0
+cmd-watch: 0
+cmd-ignore: 0
+cmd-bury: 0
+cmd-kick: 0
+cmd-touch: 0
+cmd-stats: 23619
+cmd-stats-job: 0
+cmd-stats-tube: 18964
+cmd-list-tubes: 317
+cmd-list-tube-used: 0
+cmd-list-tubes-watched: 0
+cmd-pause-tube: 0
+job-timeouts: 0
+total-jobs: 0
+max-job-size: 65535
+current-tubes: 1
+current-connections: 2
+current-producers: 0
+current-workers: 0
+current-waiting: 0
+total-connections: 72
+pid: 1
+version: 1.10
+rusage-utime: 1.602079
+rusage-stime: 3.922748
+uptime: 105881
+binlog-oldest-index: 0
+binlog-current-index: 0
+binlog-records-migrated: 0
+binlog-records-written: 0
+binlog-max-size: 10485760
+id: 5a0667a881cd05e0
+hostname: c6796814b94b
diff --git a/src/go/plugin/go.d/modules/bind/README.md b/src/go/plugin/go.d/modules/bind/README.md
new file mode 100644
index 000000000..90906ac21
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/README.md
@@ -0,0 +1,117 @@
+<!--
+title: "Bind9 monitoring with Netdata"
+description: "Monitor the health and performance of Bind9 DNS servers with zero configuration, per-second metric granularity, and interactive visualizations."
+custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/bind/README.md"
+sidebar_label: "Bind9"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Integrations/Monitor/Webapps"
+-->
+
+# Bind9 collector
+
+[`Bind9`](https://www.isc.org/bind/) (or named) is a very flexible, full-featured DNS system.
+
+This module will monitor one or more `Bind9` servers, depending on your configuration.
+
+## Requirements
+
+- `bind` version 9.9+ with configured `statistics-channels`
+
+For detail information on how to get your bind installation ready, please refer to the following articles:
+
+- [bind statistics channel developer comments](http://jpmens.net/2013/03/18/json-in-bind-9-s-statistics-server/)
+- [bind documentation](https://ftp.isc.org/isc/bind/9.10.3/doc/arm/Bv9ARM.ch06.html#statistics)
+- [bind Knowledge Base article AA-01123](https://kb.isc.org/article/AA-01123/0).
+
+Normally, you will need something like this in your `named.conf.options`:
+
+```
+statistics-channels {
+ inet 127.0.0.1 port 8653 allow { 127.0.0.1; };
+ inet ::1 port 8653 allow { ::1; };
+};
+```
+
+## Charts
+
+It produces the following charts:
+
+- Global Received Requests by IP version (IPv4, IPv6) in `requests/s`
+- Global Successful Queries in `queries/s`
+- Global Recursive Clients in `clients`
+- Global Queries by IP Protocol (TCP, UDP) in `queries/s`
+- Global Queries Analysis in `queries/s`
+- Global Received Updates in `updates/s`
+- Global Query Failures in `failures/s`
+- Global Query Failures Analysis in `failures/s`
+- Global Server Statistics in `operations/s`
+- Global Incoming Requests by OpCode in `requests/s`
+- Global Incoming Requests by Query Type in `requests/s`
+
+Per View Statistics (the following set will be added for each bind view):
+
+- Resolver Active Queries in `queries`
+- Resolver Statistics in `operations/s`
+- Resolver Round Trip Time in `queries/s`
+- Resolver Requests by Query Type in `requests/s`
+- Resolver Cache Hits in `operations/s`
+
+## Configuration
+
+Edit the `go.d/bind.conf` configuration file using `edit-config` from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config go.d/bind.conf
+```
+
+Needs only `url`. Here is an example for several servers:
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8653/json/v1
+
+ - name: local
+ url: http://127.0.0.1:8653/xml/v3
+
+ - name: remote
+ url: http://203.0.113.10:8653/xml/v3
+
+ - name: local_with_views
+ url: http://127.0.0.1:8653/json/v1
+ permit_view: '!_* *'
+```
+
+View filter syntax: [simple patterns](https://docs.netdata.cloud/libnetdata/simple_pattern/).
+
+For all available options please see
+module [configuration file](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d/bind.conf).
+
+## Troubleshooting
+
+To troubleshoot issues with the `bind` collector, run the `go.d.plugin` with the debug option enabled. The output should
+give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m bind
+ ```
+
+
diff --git a/src/go/plugin/go.d/modules/bind/bind.go b/src/go/plugin/go.d/modules/bind/bind.go
new file mode 100644
index 000000000..6087f6f74
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/bind.go
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package bind
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("bind", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Bind {
+ return &Bind{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8653/json/v1",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: &Charts{},
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ PermitView string `yaml:"permit_view,omitempty" json:"permit_view"`
+}
+
+type (
+ Bind struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ httpClient *http.Client
+ bindAPIClient
+
+ permitView matcher.Matcher
+ }
+
+ bindAPIClient interface {
+ serverStats() (*serverStats, error)
+ }
+)
+
+func (b *Bind) Configuration() any {
+ return b.Config
+}
+
+func (b *Bind) Init() error {
+ if err := b.validateConfig(); err != nil {
+ b.Errorf("config verification: %v", err)
+ return err
+ }
+
+ pvm, err := b.initPermitViewMatcher()
+ if err != nil {
+ b.Error(err)
+ return err
+ }
+ if pvm != nil {
+ b.permitView = pvm
+ }
+
+ httpClient, err := web.NewHTTPClient(b.Client)
+ if err != nil {
+ b.Errorf("creating http client : %v", err)
+ return err
+ }
+ b.httpClient = httpClient
+
+ bindClient, err := b.initBindApiClient(httpClient)
+ if err != nil {
+ b.Error(err)
+ return err
+ }
+ b.bindAPIClient = bindClient
+
+ return nil
+}
+
+func (b *Bind) Check() error {
+ mx, err := b.collect()
+ if err != nil {
+ b.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (b *Bind) Charts() *Charts {
+ return b.charts
+}
+
+func (b *Bind) Collect() map[string]int64 {
+ mx, err := b.collect()
+
+ if err != nil {
+ b.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (b *Bind) Cleanup() {
+ if b.httpClient != nil {
+ b.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/bind/bind_test.go b/src/go/plugin/go.d/modules/bind/bind_test.go
new file mode 100644
index 000000000..d1ce5c2b6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/bind_test.go
@@ -0,0 +1,532 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package bind
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataServerStatsJSON, _ = os.ReadFile("testdata/query-server.json")
+ dataServerStatsXML, _ = os.ReadFile("testdata/query-server.xml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataServerStatsJSON": dataServerStatsJSON,
+ "dataServerStatsXML": dataServerStatsXML,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestBind_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Bind{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestBind_Cleanup(t *testing.T) { New().Cleanup() }
+
+func TestBind_Init(t *testing.T) {
+ // OK
+ job := New()
+ assert.NoError(t, job.Init())
+ assert.NotNil(t, job.bindAPIClient)
+
+ //NG
+ job = New()
+ job.URL = ""
+ assert.Error(t, job.Init())
+}
+
+func TestBind_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/json/v1/server" {
+ _, _ = w.Write(dataServerStatsJSON)
+ }
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/json/v1"
+
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+}
+
+func TestBind_CheckNG(t *testing.T) {
+ job := New()
+
+ job.URL = "http://127.0.0.1:38001/xml/v3"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestBind_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestBind_CollectJSON(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/json/v1/server" {
+ _, _ = w.Write(dataServerStatsJSON)
+ }
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/json/v1"
+ job.PermitView = "*"
+
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "_default_Queryv4": 4503685324,
+ "_default_NSEC": 53193,
+ "_default_NSEC3PARAM": 993,
+ "_default_ANY": 5149356,
+ "QryFORMERR": 8,
+ "CookieMatch": 125065,
+ "A6": 538255,
+ "MAILA": 44,
+ "ExpireOpt": 195,
+ "CNAME": 534171,
+ "TYPE115": 285,
+ "_default_RESERVED0": 19,
+ "_default_ClientCookieOut": 3790767469,
+ "_default_CookieClientOk": 297765763,
+ "QryFailure": 225786697,
+ "TYPE127": 1,
+ "_default_GlueFetchv4": 110619519,
+ "_default_Queryv6": 291939086,
+ "UPDATE": 18836,
+ "RESERVED0": 13705,
+ "_default_CacheHits": 405229520524,
+ "Requestv6": 155,
+ "QryTCP": 4226324,
+ "RESERVED15": 0,
+ "QUERY": 36766967932,
+ "EUI64": 627,
+ "_default_NXDOMAIN": 1245990908,
+ "_default_REFUSED": 106664780,
+ "_default_EUI64": 2087,
+ "QrySERVFAIL": 219515158,
+ "QryRecursion": 3666523564,
+ "MX": 1483690,
+ "DNSKEY": 143483,
+ "_default_TYPE115": 112,
+ "_default_Others": 813,
+ "_default_CacheMisses": 127371,
+ "RateDropped": 219,
+ "NAPTR": 109959,
+ "NSEC": 81,
+ "AAAA": 3304112238,
+ "_default_QryRTT500": 2071767970,
+ "_default_TYPE127": 2,
+ "_default_A6": 556692,
+ "QryAuthAns": 440508475,
+ "RecursClients": 74,
+ "XfrRej": 97,
+ "LOC": 52,
+ "CookieIn": 1217208,
+ "RRSIG": 25192,
+ "_default_LOC": 21,
+ "ReqBadEDNSVer": 450,
+ "MG": 4,
+ "_default_GlueFetchv6": 121100044,
+ "_default_HINFO": 1,
+ "IQUERY": 199,
+ "_default_BadCookieRcode": 14779,
+ "AuthQryRej": 148023,
+ "QrySuccess": 28766465065,
+ "SRV": 27637747,
+ "TYPE223": 2,
+ "CookieNew": 1058677,
+ "_default_QryRTT10": 628295,
+ "_default_ServerCookieOut": 364811250,
+ "RESERVED11": 3,
+ "_default_CookieIn": 298084581,
+ "_default_DS": 973892,
+ "_bind_CacheHits": 0,
+ "STATUS": 35546,
+ "TLSA": 297,
+ "_default_SERVFAIL": 6523360,
+ "_default_GlueFetchv4Fail": 3949012,
+ "_default_NULL": 3548,
+ "UpdateRej": 15661,
+ "RESERVED10": 5,
+ "_default_EDNS0Fail": 3982564,
+ "_default_DLV": 20418,
+ "ANY": 298451299,
+ "_default_GlueFetchv6Fail": 91728801,
+ "_default_RP": 134,
+ "_default_AAAA": 817525939,
+ "X25": 2,
+ "NS": 5537956,
+ "_default_NumFetch": 100,
+ "_default_DNSKEY": 182224,
+ "QryUDP": 36455909449,
+ "QryReferral": 1152155,
+ "QryNXDOMAIN": 5902446156,
+ "TruncatedResp": 25882799,
+ "DNAME": 1,
+ "DLV": 37676,
+ "_default_FORMERR": 3827518,
+ "_default_RRSIG": 191628,
+ "RecQryRej": 225638588,
+ "QryDropped": 52141050,
+ "Response": 36426730232,
+ "RESERVED14": 0,
+ "_default_SPF": 16521,
+ "_default_DNAME": 6,
+ "Requestv4": 36767496594,
+ "CookieNoMatch": 33466,
+ "RESERVED9": 0,
+ "_default_QryRTT800": 2709649,
+ "_default_QryRTT1600": 455315,
+ "_default_OtherError": 1426431,
+ "_default_MX": 1575795,
+ "QryNoauthAns": 35538538399,
+ "NSIDOpt": 81,
+ "ReqTCP": 4234792,
+ "SOA": 3860272,
+ "RESERVED8": 0,
+ "RESERVED13": 8,
+ "MAILB": 42,
+ "AXFR": 105,
+ "QryNxrrset": 1308983498,
+ "SPF": 2872,
+ "PTR": 693769261,
+ "_default_Responsev4": 4169576370,
+ "_default_QryRTT100": 2086168894,
+ "_default_Retry": 783763680,
+ "_default_SRV": 3848459,
+ "QryDuplicate": 288617636,
+ "ECSOpt": 8742938,
+ "A": 32327037206,
+ "DS": 1687895,
+ "RESERVED12": 1,
+ "_default_QryRTT1600+": 27639,
+ "_default_TXT": 43595113,
+ "_default_CDS": 251,
+ "RESERVED6": 7401,
+ "RESERVED3": 2,
+ "_default_Truncated": 14015078,
+ "_default_NextItem": 1788902,
+ "_default_Responsev6": 151,
+ "_default_QueryTimeout": 335575100,
+ "_default_A": 3673673090,
+ "ReqEdns0": 532104182,
+ "OtherOpt": 3425542,
+ "NULL": 3604,
+ "HINFO": 9,
+ "_default_SOA": 1326766,
+ "_default_NAPTR": 30685,
+ "_default_PTR": 208067284,
+ "_default_CNAME": 38153754,
+ "RespEDNS0": 527991455,
+ "RESERVED7": 0,
+ "TXT": 100045556,
+ "_default_Lame": 1975334,
+ "_bind_CacheMisses": 509,
+ "IXFR": 33,
+ "_default_NS": 675609,
+ "_default_AFSDB": 5,
+ "NOTIFY": 390443,
+ "Others": 74006,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+ assert.Len(t, *job.charts, 17)
+}
+
+func TestBind_CollectXML3(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path == "/xml/v3/server" {
+ _, _ = w.Write(dataServerStatsXML)
+ }
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.PermitView = "*"
+ job.URL = ts.URL + "/xml/v3"
+
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "_bind_CookieClientOk": 0,
+ "_bind_ValNegOk": 0,
+ "_bind_GlueFetchv4Fail": 0,
+ "_bind_ValFail": 0,
+ "RateSlipped": 0,
+ "_default_ValFail": 0,
+ "_default_TYPE127": 2,
+ "TLSA": 299,
+ "_default_FORMERR": 3831796,
+ "_default_ValNegOk": 0,
+ "_default_RRSIG": 191877,
+ "_default_CacheHits": 405816752908,
+ "CookieBadTime": 0,
+ "RESERVED14": 0,
+ "_default_SPF": 16563,
+ "RESERVED3": 2,
+ "NS": 5545011,
+ "QrySERVFAIL": 219790234,
+ "UPDATE": 18839,
+ "_default_NAPTR": 30706,
+ "RESERVED13": 8,
+ "_default_CookieIn": 298556974,
+ "_bind_Retry": 0,
+ "_default_SOA": 1327966,
+ "_bind_Truncated": 0,
+ "RESERVED6": 7401,
+ "_default_CookieClientOk": 298237641,
+ "_default_QueryTimeout": 336165169,
+ "SPF": 2887,
+ "_default_DNAME": 6,
+ "_bind_Lame": 0,
+ "QryUDP": 36511992002,
+ "NOTIFY": 390521,
+ "DNAME": 1,
+ "DS": 1688561,
+ "_default_OtherError": 1464741,
+ "_default_Retry": 784916992,
+ "_default_TXT": 43650696,
+ "QryBADCOOKIE": 0,
+ "RespEDNS0": 528451140,
+ "TXT": 100195931,
+ "OtherOpt": 3431439,
+ "_default_HINFO": 1,
+ "RESERVED0": 13705,
+ "_bind_CacheHits": 0,
+ "ReqTCP": 4241537,
+ "RespTSIG": 0,
+ "RESERVED11": 3,
+ "_default_QryRTT100": 2087797539,
+ "_default_REFUSED": 106782830,
+ "_bind_SERVFAIL": 0,
+ "X25": 2,
+ "_default_RP": 134,
+ "QryDuplicate": 289518897,
+ "CookieNoMatch": 34013,
+ "_default_BadCookieRcode": 15399,
+ "_default_CacheMisses": 127371,
+ "_bind_Mismatch": 0,
+ "_default_ServerCookieOut": 365308714,
+ "_bind_QryRTT500": 0,
+ "RPZRewrites": 0,
+ "A": 32377004350,
+ "_default_NextItem": 1790135,
+ "_default_MX": 1576150,
+ "_bind_REFUSED": 0,
+ "_bind_ZoneQuota": 0,
+ "_default_ServerQuota": 0,
+ "_default_ANY": 5149916,
+ "_default_EUI64": 2087,
+ "_default_QueryCurUDP": 0,
+ "RESERVED7": 0,
+ "IXFR": 33,
+ "_default_Queryv4": 4509791268,
+ "_default_GlueFetchv4": 110749701,
+ "_default_TYPE115": 112,
+ "_bind_QueryAbort": 0,
+ "UpdateReqFwd": 0,
+ "_default_NSEC3PARAM": 995,
+ "_bind_NextItem": 0,
+ "RecursClients": 64,
+ "QryReferral": 1152178,
+ "QryFORMERR": 8,
+ "CookieIn": 1220424,
+ "NSIDOpt": 81,
+ "MAILA": 44,
+ "TYPE223": 2,
+ "RRSIG": 25193,
+ "UpdateBadPrereq": 0,
+ "UpdateRej": 15661,
+ "QryAuthAns": 440885288,
+ "_default_PTR": 208337408,
+ "_default_Others": 813,
+ "_default_NS": 676773,
+ "_bind_GlueFetchv4": 0,
+ "QryNoauthAns": 35593104164,
+ "QryRecursion": 3671792792,
+ "_default_ClientCookieOut": 3795901994,
+ "_bind_BadEDNSVersion": 0,
+ "ReqEdns0": 532586114,
+ "RateDropped": 230,
+ "_default_ValOk": 0,
+ "CNAME": 535141,
+ "AuthQryRej": 148159,
+ "RESERVED10": 5,
+ "_default_QueryCurTCP": 0,
+ "_bind_Queryv4": 0,
+ "_bind_CacheMisses": 509,
+ "ExpireOpt": 195,
+ "XfrRej": 97,
+ "_default_DNSKEY": 182399,
+ "RecQryRej": 225832466,
+ "NSEC": 81,
+ "_default_Responsev4": 4175093103,
+ "_bind_ValOk": 0,
+ "_bind_QueryCurTCP": 0,
+ "Requestv4": 36823884979,
+ "DNSKEY": 143600,
+ "_default_LOC": 21,
+ "UpdateRespFwd": 0,
+ "AXFR": 105,
+ "_bind_CookieIn": 0,
+ "_default_QryRTT1600": 455849,
+ "_bind_BadCookieRcode": 0,
+ "QryNXDOMAIN": 5911582433,
+ "ReqSIG0": 0,
+ "QUERY": 36823356081,
+ "NULL": 3606,
+ "_default_Lame": 1979599,
+ "_default_DS": 974240,
+ "SRV": 27709732,
+ "_bind_QuerySockFail": 0,
+ "MG": 4,
+ "_default_QryRTT800": 2712733,
+ "_bind_QryRTT1600+": 0,
+ "DNS64": 0,
+ "_default_Truncated": 14028716,
+ "_default_QryRTT10": 629577,
+ "_default_SERVFAIL": 6533579,
+ "_default_AFSDB": 5,
+ "STATUS": 35585,
+ "Response": 36482142477,
+ "KeyTagOpt": 0,
+ "_default_Mismatch": 0,
+ "Requestv6": 156,
+ "LOC": 52,
+ "_bind_NXDOMAIN": 0,
+ "PTR": 694347710,
+ "_default_NSEC": 53712,
+ "_bind_QryRTT100": 0,
+ "RESERVED8": 0,
+ "DLV": 37712,
+ "HINFO": 9,
+ "_default_AAAA": 818803359,
+ "QryNXRedirRLookup": 0,
+ "TYPE127": 1,
+ "_default_EDNS0Fail": 3987571,
+ "_default_CDS": 251,
+ "_bind_ServerCookieOut": 0,
+ "_bind_QueryCurUDP": 0,
+ "_bind_GlueFetchv6Fail": 0,
+ "UpdateFail": 0,
+ "_default_ZoneQuota": 0,
+ "_default_QuerySockFail": 0,
+ "_default_GlueFetchv6Fail": 91852240,
+ "RespSIG0": 0,
+ "_default_GlueFetchv4Fail": 3964627,
+ "_bind_Responsev6": 0,
+ "_default_GlueFetchv6": 121268854,
+ "_default_Queryv6": 292282376,
+ "TruncatedResp": 25899017,
+ "ReqTSIG": 0,
+ "_default_BadEDNSVersion": 0,
+ "_bind_NumFetch": 0,
+ "RESERVED12": 1,
+ "_default_Responsev6": 152,
+ "_default_SRV": 3855156,
+ "ANY": 298567781,
+ "_default_CNAME": 38213966,
+ "_bind_ClientCookieOut": 0,
+ "NAPTR": 109998,
+ "_default_QryRTT500": 2075608518,
+ "_default_A6": 558874,
+ "_bind_OtherError": 0,
+ "CookieMatch": 125340,
+ "_default_QryRTT1600+": 27681,
+ "_default_DLV": 20468,
+ "_default_NULL": 3554,
+ "_bind_Queryv6": 0,
+ "_bind_QueryTimeout": 0,
+ "_bind_ValAttempt": 0,
+ "RESERVED9": 0,
+ "A6": 539773,
+ "MX": 1484497,
+ "QrySuccess": 28810069822,
+ "XfrReqDone": 0,
+ "RESERVED15": 0,
+ "MAILB": 42,
+ "Others": 74007,
+ "_bind_ServerQuota": 0,
+ "_bind_EDNS0Fail": 0,
+ "QryNxrrset": 1311185019,
+ "QryFailure": 225980711,
+ "ReqBadSIG": 0,
+ "UpdateFwdFail": 0,
+ "ECSOpt": 8743959,
+ "QryDropped": 52215943,
+ "EUI64": 627,
+ "_default_ValAttempt": 0,
+ "_default_A": 3678445415,
+ "_bind_QryRTT800": 0,
+ "_default_NXDOMAIN": 1247746765,
+ "_default_RESERVED0": 19,
+ "_default_NumFetch": 62,
+ "_bind_Responsev4": 0,
+ "_bind_QryRTT1600": 0,
+ "CookieNew": 1061071,
+ "ReqBadEDNSVer": 450,
+ "TYPE115": 285,
+ "_bind_FORMERR": 0,
+ "SOA": 3863889,
+ "_bind_QryRTT10": 0,
+ "CookieBadSize": 0,
+ "_bind_GlueFetchv6": 0,
+ "QryNXRedir": 0,
+ "AAAA": 3309600766,
+ "_default_QueryAbort": 0,
+ "QryTCP": 4233061,
+ "UpdateDone": 0,
+ "IQUERY": 199,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+ assert.Len(t, *job.charts, 20)
+}
+
+func TestBind_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { _, _ = w.Write([]byte("hello and goodbye")) }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/json/v1"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestBind_404(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(404) }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/json/v1"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
diff --git a/src/go/plugin/go.d/modules/bind/charts.go b/src/go/plugin/go.d/modules/bind/charts.go
new file mode 100644
index 000000000..2fa868daf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/charts.go
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package bind
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+type (
+ // Charts is an alias for module.Charts.
+ Charts = module.Charts
+ // Chart is an alias for module.Chart.
+ Chart = module.Chart
+ // Dims is an alias for module.Dims.
+ Dims = module.Dims
+ // Dim is an alias for module.Dim.
+ Dim = module.Dim
+)
+
+const (
+ // TODO: add to orchestrator module
+ basePriority = 70000
+
+ keyReceivedRequests = "received_requests"
+ keyQueriesSuccess = "queries_success"
+ keyRecursiveClients = "recursive_clients"
+ keyProtocolsQueries = "protocols_queries"
+ keyQueriesAnalysis = "queries_analysis"
+ keyReceivedUpdates = "received_updates"
+ keyQueryFailures = "query_failures"
+ keyQueryFailuresDetail = "query_failures_detail"
+ keyNSStats = "nsstats"
+ keyInOpCodes = "in_opcodes"
+ keyInQTypes = "in_qtypes"
+ keyInSockStats = "in_sockstats"
+
+ keyResolverStats = "view_resolver_stats_%s"
+ keyResolverRTT = "view_resolver_rtt_%s"
+ keyResolverInQTypes = "view_resolver_qtypes_%s"
+ keyResolverCacheHits = "view_resolver_cachehits_%s"
+ keyResolverNumFetch = "view_resolver_numfetch_%s"
+)
+
+var charts = map[string]Chart{
+ keyRecursiveClients: {
+ ID: keyRecursiveClients,
+ Title: "Global Recursive Clients",
+ Units: "clients",
+ Fam: "clients",
+ Ctx: "bind.recursive_clients",
+ Priority: basePriority + 1,
+ },
+ keyReceivedRequests: {
+ ID: keyReceivedRequests,
+ Title: "Global Received Requests by IP version",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "bind.requests",
+ Type: module.Stacked,
+ Priority: basePriority + 2,
+ },
+ keyQueriesSuccess: {
+ ID: keyQueriesSuccess,
+ Title: "Global Successful Queries",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "bind.queries_success",
+ Priority: basePriority + 3,
+ },
+ keyProtocolsQueries: {
+ ID: keyProtocolsQueries,
+ Title: "Global Queries by IP Protocol",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "bind.protocol_queries",
+ Type: module.Stacked,
+ Priority: basePriority + 4,
+ },
+ keyQueriesAnalysis: {
+ ID: keyQueriesAnalysis,
+ Title: "Global Queries Analysis",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "bind.global_queries",
+ Type: module.Stacked,
+ Priority: basePriority + 5,
+ },
+ keyReceivedUpdates: {
+ ID: keyReceivedUpdates,
+ Title: "Global Received Updates",
+ Units: "updates/s",
+ Fam: "updates",
+ Ctx: "bind.global_updates",
+ Type: module.Stacked,
+ Priority: basePriority + 6,
+ },
+ keyQueryFailures: {
+ ID: keyQueryFailures,
+ Title: "Global Query Failures",
+ Units: "failures/s",
+ Fam: "failures",
+ Ctx: "bind.global_failures",
+ Priority: basePriority + 7,
+ },
+ keyQueryFailuresDetail: {
+ ID: keyQueryFailuresDetail,
+ Title: "Global Query Failures Analysis",
+ Units: "failures/s",
+ Fam: "failures",
+ Ctx: "bind.global_failures_detail",
+ Type: module.Stacked,
+ Priority: basePriority + 8,
+ },
+ keyNSStats: {
+ ID: keyNSStats,
+ Title: "Global Server Statistics",
+ Units: "operations/s",
+ Fam: "other",
+ Ctx: "bind.nsstats",
+ Priority: basePriority + 9,
+ },
+ keyInOpCodes: {
+ ID: keyInOpCodes,
+ Title: "Incoming Requests by OpCode",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "bind.in_opcodes",
+ Type: module.Stacked,
+ Priority: basePriority + 10,
+ },
+ keyInQTypes: {
+ ID: keyInQTypes,
+ Title: "Incoming Requests by Query Type",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "bind.in_qtypes",
+ Type: module.Stacked,
+ Priority: basePriority + 11,
+ },
+ keyInSockStats: {
+ ID: keyInSockStats,
+ Title: "Socket Statistics",
+ Units: "operations/s",
+ Fam: "sockets",
+ Ctx: "bind.in_sockstats",
+ Priority: basePriority + 12,
+ },
+
+ keyResolverRTT: {
+ ID: keyResolverRTT,
+ Title: "Resolver Round Trip Time",
+ Units: "queries/s",
+ Fam: "view %s",
+ Ctx: "bind.resolver_rtt",
+ Type: module.Stacked,
+ Priority: basePriority + 22,
+ },
+ keyResolverStats: {
+ ID: keyResolverStats,
+ Title: "Resolver Statistics",
+ Units: "operations/s",
+ Fam: "view %s",
+ Ctx: "bind.resolver_stats",
+ Priority: basePriority + 23,
+ },
+ keyResolverInQTypes: {
+ ID: keyResolverInQTypes,
+ Title: "Resolver Requests by Query Type",
+ Units: "requests/s",
+ Fam: "view %s",
+ Ctx: "bind.resolver_qtypes",
+ Type: module.Stacked,
+ Priority: basePriority + 24,
+ },
+ keyResolverNumFetch: {
+ ID: keyResolverNumFetch,
+ Title: "Resolver Active Queries",
+ Units: "queries",
+ Fam: "view %s",
+ Ctx: "bind.resolver_active_queries",
+ Priority: basePriority + 25,
+ },
+ keyResolverCacheHits: {
+ ID: keyResolverCacheHits,
+ Title: "Resolver Cache Hits",
+ Units: "operations/s",
+ Fam: "view %s",
+ Ctx: "bind.resolver_cachehits",
+ Type: module.Area,
+ Priority: basePriority + 26,
+ Dims: Dims{
+ {ID: "%s_CacheHits", Name: "hits", Algo: module.Incremental},
+ {ID: "%s_CacheMisses", Name: "misses", Algo: module.Incremental, Mul: -1},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/bind/collect.go b/src/go/plugin/go.d/modules/bind/collect.go
new file mode 100644
index 000000000..4f38f3909
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/collect.go
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package bind
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (b *Bind) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ s, err := b.serverStats()
+ if err != nil {
+ return nil, err
+ }
+ b.collectServerStats(mx, s)
+
+ return mx, nil
+}
+
+func (b *Bind) collectServerStats(metrics map[string]int64, stats *serverStats) {
+ var chart *Chart
+
+ for k, v := range stats.NSStats {
+ var (
+ algo = module.Incremental
+ dimName = k
+ chartID string
+ )
+ switch {
+ default:
+ continue
+ case k == "RecursClients":
+ dimName = "clients"
+ chartID = keyRecursiveClients
+ algo = module.Absolute
+ case k == "Requestv4":
+ dimName = "IPv4"
+ chartID = keyReceivedRequests
+ case k == "Requestv6":
+ dimName = "IPv6"
+ chartID = keyReceivedRequests
+ case k == "QryFailure":
+ dimName = "failures"
+ chartID = keyQueryFailures
+ case k == "QryUDP":
+ dimName = "UDP"
+ chartID = keyProtocolsQueries
+ case k == "QryTCP":
+ dimName = "TCP"
+ chartID = keyProtocolsQueries
+ case k == "QrySuccess":
+ dimName = "queries"
+ chartID = keyQueriesSuccess
+ case strings.HasSuffix(k, "QryRej"):
+ chartID = keyQueryFailuresDetail
+ case strings.HasPrefix(k, "Qry"):
+ chartID = keyQueriesAnalysis
+ case strings.HasPrefix(k, "Update"):
+ chartID = keyReceivedUpdates
+ }
+
+ if !b.charts.Has(chartID) {
+ _ = b.charts.Add(charts[chartID].Copy())
+ }
+
+ chart = b.charts.Get(chartID)
+
+ if !chart.HasDim(k) {
+ _ = chart.AddDim(&Dim{ID: k, Name: dimName, Algo: algo})
+ chart.MarkNotCreated()
+ }
+
+ delete(stats.NSStats, k)
+ metrics[k] = v
+ }
+
+ for _, v := range []struct {
+ item map[string]int64
+ chartID string
+ }{
+ {item: stats.NSStats, chartID: keyNSStats},
+ {item: stats.OpCodes, chartID: keyInOpCodes},
+ {item: stats.QTypes, chartID: keyInQTypes},
+ {item: stats.SockStats, chartID: keyInSockStats},
+ } {
+ if len(v.item) == 0 {
+ continue
+ }
+
+ if !b.charts.Has(v.chartID) {
+ _ = b.charts.Add(charts[v.chartID].Copy())
+ }
+
+ chart = b.charts.Get(v.chartID)
+
+ for key, val := range v.item {
+ if !chart.HasDim(key) {
+ _ = chart.AddDim(&Dim{ID: key, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+
+ metrics[key] = val
+ }
+ }
+
+ if !(b.permitView != nil && len(stats.Views) > 0) {
+ return
+ }
+
+ for name, view := range stats.Views {
+ if !b.permitView.MatchString(name) {
+ continue
+ }
+ r := view.Resolver
+
+ delete(r.Stats, "BucketSize")
+
+ for key, val := range r.Stats {
+ var (
+ algo = module.Incremental
+ dimName = key
+ chartKey string
+ )
+
+ switch {
+ default:
+ chartKey = keyResolverStats
+ case key == "NumFetch":
+ chartKey = keyResolverNumFetch
+ dimName = "queries"
+ algo = module.Absolute
+ case strings.HasPrefix(key, "QryRTT"):
+ // TODO: not ordered
+ chartKey = keyResolverRTT
+ }
+
+ chartID := fmt.Sprintf(chartKey, name)
+
+ if !b.charts.Has(chartID) {
+ chart = charts[chartKey].Copy()
+ chart.ID = chartID
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ _ = b.charts.Add(chart)
+ }
+
+ chart = b.charts.Get(chartID)
+ dimID := fmt.Sprintf("%s_%s", name, key)
+
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: dimName, Algo: algo})
+ chart.MarkNotCreated()
+ }
+
+ metrics[dimID] = val
+ }
+
+ if len(r.QTypes) > 0 {
+ chartID := fmt.Sprintf(keyResolverInQTypes, name)
+
+ if !b.charts.Has(chartID) {
+ chart = charts[keyResolverInQTypes].Copy()
+ chart.ID = chartID
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ _ = b.charts.Add(chart)
+ }
+
+ chart = b.charts.Get(chartID)
+
+ for key, val := range r.QTypes {
+ dimID := fmt.Sprintf("%s_%s", name, key)
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: key, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ metrics[dimID] = val
+ }
+ }
+
+ if len(r.CacheStats) > 0 {
+ chartID := fmt.Sprintf(keyResolverCacheHits, name)
+
+ if !b.charts.Has(chartID) {
+ chart = charts[keyResolverCacheHits].Copy()
+ chart.ID = chartID
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ _ = b.charts.Add(chart)
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ metrics[name+"_CacheHits"] = r.CacheStats["CacheHits"]
+ metrics[name+"_CacheMisses"] = r.CacheStats["CacheMisses"]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/bind/config_schema.json b/src/go/plugin/go.d/modules/bind/config_schema.json
new file mode 100644
index 000000000..29bb739ea
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Bind collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Bind [statistics endpoint](https://kb.isc.org/docs/monitoring-recommendations-for-bind-9#bind-9-http-statistics-channel).",
+ "type": "string",
+ "default": "http://127.0.0.1:8653/json/v1",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/bind/init.go b/src/go/plugin/go.d/modules/bind/init.go
new file mode 100644
index 000000000..fe533b974
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/init.go
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package bind
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+func (b *Bind) validateConfig() error {
+ if b.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (b *Bind) initPermitViewMatcher() (matcher.Matcher, error) {
+ if b.PermitView == "" {
+ return nil, nil
+ }
+ return matcher.NewSimplePatternsMatcher(b.PermitView)
+}
+
+func (b *Bind) initBindApiClient(httpClient *http.Client) (bindAPIClient, error) {
+ switch {
+ case strings.HasSuffix(b.URL, "/xml/v3"): // BIND 9.9+
+ return newXML3Client(httpClient, b.Request), nil
+ case strings.HasSuffix(b.URL, "/json/v1"): // BIND 9.10+
+ return newJSONClient(httpClient, b.Request), nil
+ default:
+ return nil, fmt.Errorf("URL %s is wrong, supported endpoints: `/xml/v3`, `/json/v1`", b.URL)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/bind/json_client.go b/src/go/plugin/go.d/modules/bind/json_client.go
new file mode 100644
index 000000000..04eecdb04
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/json_client.go
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package bind
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type serverStats = jsonServerStats
+
+type jsonServerStats struct {
+ OpCodes map[string]int64
+ QTypes map[string]int64
+ NSStats map[string]int64
+ SockStats map[string]int64
+ Views map[string]jsonView
+}
+
+type jsonView struct {
+ Resolver jsonViewResolver
+}
+
+type jsonViewResolver struct {
+ Stats map[string]int64
+ QTypes map[string]int64
+ CacheStats map[string]int64
+}
+
+func newJSONClient(client *http.Client, request web.Request) *jsonClient {
+ return &jsonClient{httpClient: client, request: request}
+}
+
+type jsonClient struct {
+ httpClient *http.Client
+ request web.Request
+}
+
+func (c jsonClient) serverStats() (*serverStats, error) {
+ req := c.request.Copy()
+ u, err := url.Parse(req.URL)
+ if err != nil {
+ return nil, fmt.Errorf("error on parsing URL: %v", err)
+ }
+
+ u.Path = path.Join(u.Path, "/server")
+ req.URL = u.String()
+
+ httpReq, err := web.NewHTTPRequest(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating HTTP request: %v", err)
+ }
+
+ resp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return nil, fmt.Errorf("error on request : %v", err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("%s returned HTTP status %d", httpReq.URL, resp.StatusCode)
+ }
+
+ stats := &jsonServerStats{}
+ if err = json.NewDecoder(resp.Body).Decode(stats); err != nil {
+ return nil, fmt.Errorf("error on decoding response from %s : %v", httpReq.URL, err)
+ }
+ return stats, nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/bind/testdata/config.json b/src/go/plugin/go.d/modules/bind/testdata/config.json
new file mode 100644
index 000000000..145df9ff4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "permit_view": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/bind/testdata/config.yaml b/src/go/plugin/go.d/modules/bind/testdata/config.yaml
new file mode 100644
index 000000000..cc0a33b74
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+permit_view: "ok"
diff --git a/src/go/plugin/go.d/modules/bind/testdata/query-server.json b/src/go/plugin/go.d/modules/bind/testdata/query-server.json
new file mode 100644
index 000000000..885a4e28e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/testdata/query-server.json
@@ -0,0 +1,302 @@
+{
+ "json-stats-version":"1.2",
+ "boot-time":"2018-04-26T08:27:05.582Z",
+ "config-time":"2019-02-05T21:24:44.108Z",
+ "current-time":"2019-02-06T07:01:27.538Z",
+ "version":"9.11.3-1~bpo9+1-Debian",
+ "opcodes":{
+ "QUERY":36766967932,
+ "IQUERY":199,
+ "STATUS":35546,
+ "RESERVED3":2,
+ "NOTIFY":390443,
+ "UPDATE":18836,
+ "RESERVED6":7401,
+ "RESERVED7":0,
+ "RESERVED8":0,
+ "RESERVED9":0,
+ "RESERVED10":5,
+ "RESERVED11":3,
+ "RESERVED12":1,
+ "RESERVED13":8,
+ "RESERVED14":0,
+ "RESERVED15":0
+ },
+ "rcodes":{
+ "NOERROR":30078966646,
+ "FORMERR":0,
+ "SERVFAIL":219515158,
+ "NXDOMAIN":5902446156,
+ "NOTIMP":0,
+ "REFUSED":225802272,
+ "YXDOMAIN":0,
+ "YXRRSET":0,
+ "NXRRSET":0,
+ "NOTAUTH":0,
+ "NOTZONE":0,
+ "RESERVED11":0,
+ "RESERVED12":0,
+ "RESERVED13":0,
+ "RESERVED14":0,
+ "RESERVED15":0,
+ "BADVERS":0,
+ "17":0,
+ "18":0,
+ "19":0,
+ "20":0,
+ "21":0,
+ "22":0,
+ "BADCOOKIE":0
+ },
+ "qtypes":{
+ "RESERVED0":13705,
+ "A":32327037206,
+ "NS":5537956,
+ "CNAME":534171,
+ "SOA":3860272,
+ "MG":4,
+ "NULL":3604,
+ "PTR":693769261,
+ "HINFO":9,
+ "MX":1483690,
+ "TXT":100045556,
+ "X25":2,
+ "AAAA":3304112238,
+ "LOC":52,
+ "SRV":27637747,
+ "NAPTR":109959,
+ "A6":538255,
+ "DNAME":1,
+ "DS":1687895,
+ "RRSIG":25192,
+ "NSEC":81,
+ "DNSKEY":143483,
+ "TLSA":297,
+ "SPF":2872,
+ "EUI64":627,
+ "TYPE115":285,
+ "TYPE127":1,
+ "TYPE223":2,
+ "IXFR":33,
+ "AXFR":105,
+ "MAILB":42,
+ "MAILA":44,
+ "ANY":298451299,
+ "DLV":37676,
+ "Others":74006
+ },
+ "nsstats":{
+ "Requestv4":36767496594,
+ "Requestv6":155,
+ "ReqEdns0":532104182,
+ "ReqBadEDNSVer":450,
+ "ReqTCP":4234792,
+ "AuthQryRej":148023,
+ "RecQryRej":225638588,
+ "XfrRej":97,
+ "UpdateRej":15661,
+ "Response":36426730232,
+ "TruncatedResp":25882799,
+ "RespEDNS0":527991455,
+ "QrySuccess":28766465065,
+ "QryAuthAns":440508475,
+ "QryNoauthAns":35538538399,
+ "QryReferral":1152155,
+ "QryNxrrset":1308983498,
+ "QrySERVFAIL":219515158,
+ "QryFORMERR":8,
+ "QryNXDOMAIN":5902446156,
+ "QryRecursion":3666523564,
+ "QryDuplicate":288617636,
+ "QryDropped":52141050,
+ "QryFailure":225786697,
+ "RecursClients":74,
+ "RateDropped":219,
+ "QryUDP":36455909449,
+ "QryTCP":4226324,
+ "NSIDOpt":81,
+ "ExpireOpt":195,
+ "OtherOpt":3425542,
+ "CookieIn":1217208,
+ "CookieNew":1058677,
+ "CookieNoMatch":33466,
+ "CookieMatch":125065,
+ "ECSOpt":8742938
+ },
+ "zonestats":{
+ "NotifyOutv4":992661,
+ "NotifyOutv6":691098,
+ "NotifyInv4":376341,
+ "NotifyRej":1,
+ "SOAOutv4":129981,
+ "AXFRReqv4":2044,
+ "IXFRReqv4":22794,
+ "XfrSuccess":50,
+ "XfrFail":25132
+ },
+ "resstats":{
+ "Mismatch":20050151,
+ "QuerySockFail":341338,
+ "QueryCurUDP":91
+ },
+ "views":{
+ "_default":{
+ "resolver":{
+ "stats":{
+ "Queryv4":4503685324,
+ "Queryv6":291939086,
+ "Responsev4":4169576370,
+ "Responsev6":151,
+ "NXDOMAIN":1245990908,
+ "SERVFAIL":6523360,
+ "FORMERR":3827518,
+ "OtherError":1426431,
+ "EDNS0Fail":3982564,
+ "Truncated":14015078,
+ "Lame":1975334,
+ "Retry":783763680,
+ "QueryTimeout":335575100,
+ "GlueFetchv4":110619519,
+ "GlueFetchv6":121100044,
+ "GlueFetchv4Fail":3949012,
+ "GlueFetchv6Fail":91728801,
+ "QryRTT10":628295,
+ "QryRTT100":2086168894,
+ "QryRTT500":2071767970,
+ "QryRTT800":2709649,
+ "QryRTT1600":455315,
+ "QryRTT1600+":27639,
+ "NumFetch":100,
+ "BucketSize":31,
+ "REFUSED":106664780,
+ "ClientCookieOut":3790767469,
+ "ServerCookieOut":364811250,
+ "CookieIn":298084581,
+ "CookieClientOk":297765763,
+ "BadCookieRcode":14779,
+ "NextItem":1788902
+ },
+ "qtypes":{
+ "RESERVED0":19,
+ "A":3673673090,
+ "NS":675609,
+ "CNAME":38153754,
+ "SOA":1326766,
+ "NULL":3548,
+ "PTR":208067284,
+ "HINFO":1,
+ "MX":1575795,
+ "TXT":43595113,
+ "RP":134,
+ "AFSDB":5,
+ "AAAA":817525939,
+ "LOC":21,
+ "SRV":3848459,
+ "NAPTR":30685,
+ "A6":556692,
+ "DNAME":6,
+ "DS":973892,
+ "RRSIG":191628,
+ "NSEC":53193,
+ "DNSKEY":182224,
+ "NSEC3PARAM":993,
+ "CDS":251,
+ "SPF":16521,
+ "EUI64":2087,
+ "TYPE115":112,
+ "TYPE127":2,
+ "ANY":5149356,
+ "DLV":20418,
+ "Others":813
+ },
+ "cache":{
+ "A":169353,
+ "NS":307028,
+ "CNAME":37960,
+ "SOA":16,
+ "PTR":76913,
+ "MX":91,
+ "TXT":12499,
+ "AAAA":15550,
+ "SRV":42,
+ "DNAME":5,
+ "DS":3300,
+ "RRSIG":26832,
+ "NSEC":18379,
+ "DNSKEY":62,
+ "NSEC3PARAM":1,
+ "SPF":3,
+ "Others":1,
+ "!A":247,
+ "!NS":28,
+ "!SOA":6,
+ "!PTR":7,
+ "!MX":3,
+ "!TXT":247,
+ "!AAAA":22631,
+ "!SRV":72,
+ "!NAPTR":1,
+ "!A6":51,
+ "!DS":16,
+ "!SPF":1,
+ "NXDOMAIN":205872,
+ "#RRSIG":1,
+ "#NSEC":1
+ },
+ "cachestats":{
+ "CacheHits":405229520524,
+ "CacheMisses":127371,
+ "QueryHits":171622440929,
+ "QueryMisses":5114505254,
+ "DeleteLRU":0,
+ "DeleteTTL":1673818609,
+ "CacheNodes":839357,
+ "CacheBuckets":532479,
+ "TreeMemTotal":1438467514974,
+ "TreeMemInUse":489426131,
+ "TreeMemMax":820437431,
+ "HeapMemTotal":455163904,
+ "HeapMemInUse":10855424,
+ "HeapMemMax":11527168
+ },
+ "adb":{
+ "nentries":6143,
+ "entriescnt":47619,
+ "nnames":6143,
+ "namescnt":46743
+ }
+ }
+ },
+ "_bind":{
+ "resolver":{
+ "stats":{
+ "BucketSize":31
+ },
+ "qtypes":{
+ },
+ "cache":{
+ },
+ "cachestats":{
+ "CacheHits":0,
+ "CacheMisses":509,
+ "QueryHits":0,
+ "QueryMisses":509,
+ "DeleteLRU":0,
+ "DeleteTTL":0,
+ "CacheNodes":0,
+ "CacheBuckets":64,
+ "TreeMemTotal":287792,
+ "TreeMemInUse":29952,
+ "TreeMemMax":29952,
+ "HeapMemTotal":262144,
+ "HeapMemInUse":1024,
+ "HeapMemMax":1024
+ },
+ "adb":{
+ "nentries":1021,
+ "nnames":1021
+ }
+ }
+ }
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/bind/testdata/query-server.xml b/src/go/plugin/go.d/modules/bind/testdata/query-server.xml
new file mode 100644
index 000000000..515cdeaba
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/testdata/query-server.xml
@@ -0,0 +1,470 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="/bind9.xsl"?>
+<statistics version="3.8">
+ <server>
+ <boot-time>2018-04-26T08:27:05.582Z</boot-time>
+ <config-time>2019-02-06T12:25:35.919Z</config-time>
+ <current-time>2019-02-06T12:29:31.168Z</current-time>
+ <version>9.11.3-1~bpo9+1-Debian</version>
+ <counters type="opcode">
+ <counter name="QUERY">36823356081</counter>
+ <counter name="IQUERY">199</counter>
+ <counter name="STATUS">35585</counter>
+ <counter name="RESERVED3">2</counter>
+ <counter name="NOTIFY">390521</counter>
+ <counter name="UPDATE">18839</counter>
+ <counter name="RESERVED6">7401</counter>
+ <counter name="RESERVED7">0</counter>
+ <counter name="RESERVED8">0</counter>
+ <counter name="RESERVED9">0</counter>
+ <counter name="RESERVED10">5</counter>
+ <counter name="RESERVED11">3</counter>
+ <counter name="RESERVED12">1</counter>
+ <counter name="RESERVED13">8</counter>
+ <counter name="RESERVED14">0</counter>
+ <counter name="RESERVED15">0</counter>
+ </counters>
+ <counters type="rcode">
+ <counter name="NOERROR">30124773524</counter>
+ <counter name="FORMERR">0</counter>
+ <counter name="SERVFAIL">219790234</counter>
+ <counter name="NXDOMAIN">5911582433</counter>
+ <counter name="NOTIMP">0</counter>
+ <counter name="REFUSED">225996286</counter>
+ <counter name="YXDOMAIN">0</counter>
+ <counter name="YXRRSET">0</counter>
+ <counter name="NXRRSET">0</counter>
+ <counter name="NOTAUTH">0</counter>
+ <counter name="NOTZONE">0</counter>
+ <counter name="RESERVED11">0</counter>
+ <counter name="RESERVED12">0</counter>
+ <counter name="RESERVED13">0</counter>
+ <counter name="RESERVED14">0</counter>
+ <counter name="RESERVED15">0</counter>
+ <counter name="BADVERS">0</counter>
+ <counter name="17">0</counter>
+ <counter name="18">0</counter>
+ <counter name="19">0</counter>
+ <counter name="20">0</counter>
+ <counter name="21">0</counter>
+ <counter name="22">0</counter>
+ <counter name="BADCOOKIE">0</counter>
+ </counters>
+ <counters type="qtype">
+ <counter name="RESERVED0">13705</counter>
+ <counter name="A">32377004350</counter>
+ <counter name="NS">5545011</counter>
+ <counter name="CNAME">535141</counter>
+ <counter name="SOA">3863889</counter>
+ <counter name="MG">4</counter>
+ <counter name="NULL">3606</counter>
+ <counter name="PTR">694347710</counter>
+ <counter name="HINFO">9</counter>
+ <counter name="MX">1484497</counter>
+ <counter name="TXT">100195931</counter>
+ <counter name="X25">2</counter>
+ <counter name="AAAA">3309600766</counter>
+ <counter name="LOC">52</counter>
+ <counter name="SRV">27709732</counter>
+ <counter name="NAPTR">109998</counter>
+ <counter name="A6">539773</counter>
+ <counter name="DNAME">1</counter>
+ <counter name="DS">1688561</counter>
+ <counter name="RRSIG">25193</counter>
+ <counter name="NSEC">81</counter>
+ <counter name="DNSKEY">143600</counter>
+ <counter name="TLSA">299</counter>
+ <counter name="SPF">2887</counter>
+ <counter name="EUI64">627</counter>
+ <counter name="TYPE115">285</counter>
+ <counter name="TYPE127">1</counter>
+ <counter name="TYPE223">2</counter>
+ <counter name="IXFR">33</counter>
+ <counter name="AXFR">105</counter>
+ <counter name="MAILB">42</counter>
+ <counter name="MAILA">44</counter>
+ <counter name="ANY">298567781</counter>
+ <counter name="DLV">37712</counter>
+ <counter name="Others">74007</counter>
+ </counters>
+ <counters type="nsstat">
+ <counter name="Requestv4">36823884979</counter>
+ <counter name="Requestv6">156</counter>
+ <counter name="ReqEdns0">532586114</counter>
+ <counter name="ReqBadEDNSVer">450</counter>
+ <counter name="ReqTSIG">0</counter>
+ <counter name="ReqSIG0">0</counter>
+ <counter name="ReqBadSIG">0</counter>
+ <counter name="ReqTCP">4241537</counter>
+ <counter name="AuthQryRej">148159</counter>
+ <counter name="RecQryRej">225832466</counter>
+ <counter name="XfrRej">97</counter>
+ <counter name="UpdateRej">15661</counter>
+ <counter name="Response">36482142477</counter>
+ <counter name="TruncatedResp">25899017</counter>
+ <counter name="RespEDNS0">528451140</counter>
+ <counter name="RespTSIG">0</counter>
+ <counter name="RespSIG0">0</counter>
+ <counter name="QrySuccess">28810069822</counter>
+ <counter name="QryAuthAns">440885288</counter>
+ <counter name="QryNoauthAns">35593104164</counter>
+ <counter name="QryReferral">1152178</counter>
+ <counter name="QryNxrrset">1311185019</counter>
+ <counter name="QrySERVFAIL">219790234</counter>
+ <counter name="QryFORMERR">8</counter>
+ <counter name="QryNXDOMAIN">5911582433</counter>
+ <counter name="QryRecursion">3671792792</counter>
+ <counter name="QryDuplicate">289518897</counter>
+ <counter name="QryDropped">52215943</counter>
+ <counter name="QryFailure">225980711</counter>
+ <counter name="XfrReqDone">0</counter>
+ <counter name="UpdateReqFwd">0</counter>
+ <counter name="UpdateRespFwd">0</counter>
+ <counter name="UpdateFwdFail">0</counter>
+ <counter name="UpdateDone">0</counter>
+ <counter name="UpdateFail">0</counter>
+ <counter name="UpdateBadPrereq">0</counter>
+ <counter name="RecursClients">64</counter>
+ <counter name="DNS64">0</counter>
+ <counter name="RateDropped">230</counter>
+ <counter name="RateSlipped">0</counter>
+ <counter name="RPZRewrites">0</counter>
+ <counter name="QryUDP">36511992002</counter>
+ <counter name="QryTCP">4233061</counter>
+ <counter name="NSIDOpt">81</counter>
+ <counter name="ExpireOpt">195</counter>
+ <counter name="OtherOpt">3431439</counter>
+ <counter name="CookieIn">1220424</counter>
+ <counter name="CookieNew">1061071</counter>
+ <counter name="CookieBadSize">0</counter>
+ <counter name="CookieBadTime">0</counter>
+ <counter name="CookieNoMatch">34013</counter>
+ <counter name="CookieMatch">125340</counter>
+ <counter name="ECSOpt">8743959</counter>
+ <counter name="QryNXRedir">0</counter>
+ <counter name="QryNXRedirRLookup">0</counter>
+ <counter name="QryBADCOOKIE">0</counter>
+ <counter name="KeyTagOpt">0</counter>
+ </counters>
+ <counters type="zonestat">
+ <counter name="NotifyOutv4">992895</counter>
+ <counter name="NotifyOutv6">691254</counter>
+ <counter name="NotifyInv4">376354</counter>
+ <counter name="NotifyInv6">0</counter>
+ <counter name="NotifyRej">1</counter>
+ <counter name="SOAOutv4">130105</counter>
+ <counter name="SOAOutv6">0</counter>
+ <counter name="AXFRReqv4">2047</counter>
+ <counter name="AXFRReqv6">0</counter>
+ <counter name="IXFRReqv4">22814</counter>
+ <counter name="IXFRReqv6">0</counter>
+ <counter name="XfrSuccess">50</counter>
+ <counter name="XfrFail">25155</counter>
+ </counters>
+ <counters type="resstat">
+ <counter name="Mismatch">20059475</counter>
+ <counter name="QuerySockFail">341338</counter>
+ <counter name="QueryCurUDP">58</counter>
+ <counter name="QueryCurTCP">1</counter>
+ </counters>
+ </server>
+ <views>
+ <view name="_default">
+ <counters type="resqtype">
+ <counter name="RESERVED0">19</counter>
+ <counter name="A">3678445415</counter>
+ <counter name="NS">676773</counter>
+ <counter name="CNAME">38213966</counter>
+ <counter name="SOA">1327966</counter>
+ <counter name="NULL">3554</counter>
+ <counter name="PTR">208337408</counter>
+ <counter name="HINFO">1</counter>
+ <counter name="MX">1576150</counter>
+ <counter name="TXT">43650696</counter>
+ <counter name="RP">134</counter>
+ <counter name="AFSDB">5</counter>
+ <counter name="AAAA">818803359</counter>
+ <counter name="LOC">21</counter>
+ <counter name="SRV">3855156</counter>
+ <counter name="NAPTR">30706</counter>
+ <counter name="A6">558874</counter>
+ <counter name="DNAME">6</counter>
+ <counter name="DS">974240</counter>
+ <counter name="RRSIG">191877</counter>
+ <counter name="NSEC">53712</counter>
+ <counter name="DNSKEY">182399</counter>
+ <counter name="NSEC3PARAM">995</counter>
+ <counter name="CDS">251</counter>
+ <counter name="SPF">16563</counter>
+ <counter name="EUI64">2087</counter>
+ <counter name="TYPE115">112</counter>
+ <counter name="TYPE127">2</counter>
+ <counter name="ANY">5149916</counter>
+ <counter name="DLV">20468</counter>
+ <counter name="Others">813</counter>
+ </counters>
+ <counters type="resstats">
+ <counter name="Queryv4">4509791268</counter>
+ <counter name="Queryv6">292282376</counter>
+ <counter name="Responsev4">4175093103</counter>
+ <counter name="Responsev6">152</counter>
+ <counter name="NXDOMAIN">1247746765</counter>
+ <counter name="SERVFAIL">6533579</counter>
+ <counter name="FORMERR">3831796</counter>
+ <counter name="OtherError">1464741</counter>
+ <counter name="EDNS0Fail">3987571</counter>
+ <counter name="Mismatch">0</counter>
+ <counter name="Truncated">14028716</counter>
+ <counter name="Lame">1979599</counter>
+ <counter name="Retry">784916992</counter>
+ <counter name="QueryAbort">0</counter>
+ <counter name="QuerySockFail">0</counter>
+ <counter name="QueryCurUDP">0</counter>
+ <counter name="QueryCurTCP">0</counter>
+ <counter name="QueryTimeout">336165169</counter>
+ <counter name="GlueFetchv4">110749701</counter>
+ <counter name="GlueFetchv6">121268854</counter>
+ <counter name="GlueFetchv4Fail">3964627</counter>
+ <counter name="GlueFetchv6Fail">91852240</counter>
+ <counter name="ValAttempt">0</counter>
+ <counter name="ValOk">0</counter>
+ <counter name="ValNegOk">0</counter>
+ <counter name="ValFail">0</counter>
+ <counter name="QryRTT10">629577</counter>
+ <counter name="QryRTT100">2087797539</counter>
+ <counter name="QryRTT500">2075608518</counter>
+ <counter name="QryRTT800">2712733</counter>
+ <counter name="QryRTT1600">455849</counter>
+ <counter name="QryRTT1600+">27681</counter>
+ <counter name="NumFetch">62</counter>
+ <counter name="BucketSize">31</counter>
+ <counter name="REFUSED">106782830</counter>
+ <counter name="ClientCookieOut">3795901994</counter>
+ <counter name="ServerCookieOut">365308714</counter>
+ <counter name="CookieIn">298556974</counter>
+ <counter name="CookieClientOk">298237641</counter>
+ <counter name="BadEDNSVersion">0</counter>
+ <counter name="BadCookieRcode">15399</counter>
+ <counter name="ZoneQuota">0</counter>
+ <counter name="ServerQuota">0</counter>
+ <counter name="NextItem">1790135</counter>
+ </counters>
+ <cache name="_default">
+ <rrset>
+ <name>A</name>
+ <counter>192185</counter>
+ </rrset>
+ <rrset>
+ <name>NS</name>
+ <counter>326554</counter>
+ </rrset>
+ <rrset>
+ <name>CNAME</name>
+ <counter>41900</counter>
+ </rrset>
+ <rrset>
+ <name>SOA</name>
+ <counter>15</counter>
+ </rrset>
+ <rrset>
+ <name>PTR</name>
+ <counter>82398</counter>
+ </rrset>
+ <rrset>
+ <name>MX</name>
+ <counter>80</counter>
+ </rrset>
+ <rrset>
+ <name>TXT</name>
+ <counter>11952</counter>
+ </rrset>
+ <rrset>
+ <name>AAAA</name>
+ <counter>16361</counter>
+ </rrset>
+ <rrset>
+ <name>SRV</name>
+ <counter>55</counter>
+ </rrset>
+ <rrset>
+ <name>NAPTR</name>
+ <counter>1</counter>
+ </rrset>
+ <rrset>
+ <name>DNAME</name>
+ <counter>1</counter>
+ </rrset>
+ <rrset>
+ <name>DS</name>
+ <counter>3760</counter>
+ </rrset>
+ <rrset>
+ <name>RRSIG</name>
+ <counter>28542</counter>
+ </rrset>
+ <rrset>
+ <name>NSEC</name>
+ <counter>19250</counter>
+ </rrset>
+ <rrset>
+ <name>DNSKEY</name>
+ <counter>57</counter>
+ </rrset>
+ <rrset>
+ <name>NSEC3PARAM</name>
+ <counter>1</counter>
+ </rrset>
+ <rrset>
+ <name>SPF</name>
+ <counter>4</counter>
+ </rrset>
+ <rrset>
+ <name>Others</name>
+ <counter>2</counter>
+ </rrset>
+ <rrset>
+ <name>!A</name>
+ <counter>287</counter>
+ </rrset>
+ <rrset>
+ <name>!NS</name>
+ <counter>42</counter>
+ </rrset>
+ <rrset>
+ <name>!SOA</name>
+ <counter>10</counter>
+ </rrset>
+ <rrset>
+ <name>!PTR</name>
+ <counter>6</counter>
+ </rrset>
+ <rrset>
+ <name>!MX</name>
+ <counter>2</counter>
+ </rrset>
+ <rrset>
+ <name>!TXT</name>
+ <counter>280</counter>
+ </rrset>
+ <rrset>
+ <name>!AAAA</name>
+ <counter>27381</counter>
+ </rrset>
+ <rrset>
+ <name>!SRV</name>
+ <counter>81</counter>
+ </rrset>
+ <rrset>
+ <name>!NAPTR</name>
+ <counter>2</counter>
+ </rrset>
+ <rrset>
+ <name>!A6</name>
+ <counter>38</counter>
+ </rrset>
+ <rrset>
+ <name>!DS</name>
+ <counter>20</counter>
+ </rrset>
+ <rrset>
+ <name>NXDOMAIN</name>
+ <counter>315286</counter>
+ </rrset>
+ </cache>
+ <counters type="adbstat">
+ <counter name="nentries">2039</counter>
+ <counter name="entriescnt">14535</counter>
+ <counter name="nnames">2039</counter>
+ <counter name="namescnt">12286</counter>
+ </counters>
+ <counters type="cachestats">
+ <counter name="CacheHits">405816752908</counter>
+ <counter name="CacheMisses">127371</counter>
+ <counter name="QueryHits">171876840110</counter>
+ <counter name="QueryMisses">5120854081</counter>
+ <counter name="DeleteLRU">0</counter>
+ <counter name="DeleteTTL">1675820766</counter>
+ <counter name="CacheNodes">1000477</counter>
+ <counter name="CacheBuckets">532479</counter>
+ <counter name="TreeMemTotal">1440529356195</counter>
+ <counter name="TreeMemInUse">642752571</counter>
+ <counter name="TreeMemMax">820437431</counter>
+ <counter name="HeapMemTotal">455163904</counter>
+ <counter name="HeapMemInUse">10855424</counter>
+ <counter name="HeapMemMax">11527168</counter>
+ </counters>
+ </view>
+ <view name="_bind">
+ <counters type="resqtype" />
+ <counters type="resstats">
+ <counter name="Queryv4">0</counter>
+ <counter name="Queryv6">0</counter>
+ <counter name="Responsev4">0</counter>
+ <counter name="Responsev6">0</counter>
+ <counter name="NXDOMAIN">0</counter>
+ <counter name="SERVFAIL">0</counter>
+ <counter name="FORMERR">0</counter>
+ <counter name="OtherError">0</counter>
+ <counter name="EDNS0Fail">0</counter>
+ <counter name="Mismatch">0</counter>
+ <counter name="Truncated">0</counter>
+ <counter name="Lame">0</counter>
+ <counter name="Retry">0</counter>
+ <counter name="QueryAbort">0</counter>
+ <counter name="QuerySockFail">0</counter>
+ <counter name="QueryCurUDP">0</counter>
+ <counter name="QueryCurTCP">0</counter>
+ <counter name="QueryTimeout">0</counter>
+ <counter name="GlueFetchv4">0</counter>
+ <counter name="GlueFetchv6">0</counter>
+ <counter name="GlueFetchv4Fail">0</counter>
+ <counter name="GlueFetchv6Fail">0</counter>
+ <counter name="ValAttempt">0</counter>
+ <counter name="ValOk">0</counter>
+ <counter name="ValNegOk">0</counter>
+ <counter name="ValFail">0</counter>
+ <counter name="QryRTT10">0</counter>
+ <counter name="QryRTT100">0</counter>
+ <counter name="QryRTT500">0</counter>
+ <counter name="QryRTT800">0</counter>
+ <counter name="QryRTT1600">0</counter>
+ <counter name="QryRTT1600+">0</counter>
+ <counter name="NumFetch">0</counter>
+ <counter name="BucketSize">31</counter>
+ <counter name="REFUSED">0</counter>
+ <counter name="ClientCookieOut">0</counter>
+ <counter name="ServerCookieOut">0</counter>
+ <counter name="CookieIn">0</counter>
+ <counter name="CookieClientOk">0</counter>
+ <counter name="BadEDNSVersion">0</counter>
+ <counter name="BadCookieRcode">0</counter>
+ <counter name="ZoneQuota">0</counter>
+ <counter name="ServerQuota">0</counter>
+ <counter name="NextItem">0</counter>
+ </counters>
+ <cache name="_bind" />
+ <counters type="adbstat">
+ <counter name="nentries">1021</counter>
+ <counter name="entriescnt">0</counter>
+ <counter name="nnames">1021</counter>
+ <counter name="namescnt">0</counter>
+ </counters>
+ <counters type="cachestats">
+ <counter name="CacheHits">0</counter>
+ <counter name="CacheMisses">509</counter>
+ <counter name="QueryHits">0</counter>
+ <counter name="QueryMisses">509</counter>
+ <counter name="DeleteLRU">0</counter>
+ <counter name="DeleteTTL">0</counter>
+ <counter name="CacheNodes">0</counter>
+ <counter name="CacheBuckets">64</counter>
+ <counter name="TreeMemTotal">287792</counter>
+ <counter name="TreeMemInUse">29952</counter>
+ <counter name="TreeMemMax">29952</counter>
+ <counter name="HeapMemTotal">262144</counter>
+ <counter name="HeapMemInUse">1024</counter>
+ <counter name="HeapMemMax">1024</counter>
+ </counters>
+ </view>
+ </views>
+</statistics> \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/bind/xml3_client.go b/src/go/plugin/go.d/modules/bind/xml3_client.go
new file mode 100644
index 000000000..c48d1af31
--- /dev/null
+++ b/src/go/plugin/go.d/modules/bind/xml3_client.go
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package bind
+
+import (
+ "encoding/xml"
+ "fmt"
+ "net/http"
+ "net/url"
+ "path"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type xml3Stats struct {
+ Server xml3Server `xml:"server"`
+ Views []xml3View `xml:"views>view"`
+}
+
+type xml3Server struct {
+ CounterGroups []xml3CounterGroup `xml:"counters"`
+}
+
+type xml3CounterGroup struct {
+ Type string `xml:"type,attr"`
+ Counters []struct {
+ Name string `xml:"name,attr"`
+ Value int64 `xml:",chardata"`
+ } `xml:"counter"`
+}
+
+type xml3View struct {
+ Name string `xml:"name,attr"`
+ CounterGroups []xml3CounterGroup `xml:"counters"`
+}
+
+func newXML3Client(client *http.Client, request web.Request) *xml3Client {
+ return &xml3Client{httpClient: client, request: request}
+}
+
+type xml3Client struct {
+ httpClient *http.Client
+ request web.Request
+}
+
+func (c xml3Client) serverStats() (*serverStats, error) {
+ req := c.request.Copy()
+ u, err := url.Parse(req.URL)
+ if err != nil {
+ return nil, fmt.Errorf("error on parsing URL: %v", err)
+ }
+
+ u.Path = path.Join(u.Path, "/server")
+ req.URL = u.String()
+
+ httpReq, err := web.NewHTTPRequest(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating HTTP request: %v", err)
+ }
+
+ resp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ return nil, fmt.Errorf("error on request : %v", err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("%s returned HTTP status %d", httpReq.URL, resp.StatusCode)
+ }
+
+ stats := xml3Stats{}
+ if err = xml.NewDecoder(resp.Body).Decode(&stats); err != nil {
+ return nil, fmt.Errorf("error on decoding response from %s : %v", httpReq.URL, err)
+ }
+ return convertXML(stats), nil
+}
+
+func convertXML(xmlStats xml3Stats) *serverStats {
+ stats := serverStats{
+ OpCodes: make(map[string]int64),
+ NSStats: make(map[string]int64),
+ QTypes: make(map[string]int64),
+ SockStats: make(map[string]int64),
+ Views: make(map[string]jsonView),
+ }
+
+ var m map[string]int64
+
+ for _, group := range xmlStats.Server.CounterGroups {
+ switch group.Type {
+ default:
+ continue
+ case "opcode":
+ m = stats.OpCodes
+ case "qtype":
+ m = stats.QTypes
+ case "nsstat":
+ m = stats.NSStats
+ case "sockstat":
+ m = stats.SockStats
+ }
+
+ for _, v := range group.Counters {
+ m[v.Name] = v.Value
+ }
+ }
+
+ for _, view := range xmlStats.Views {
+ stats.Views[view.Name] = jsonView{
+ Resolver: jsonViewResolver{
+ Stats: make(map[string]int64),
+ QTypes: make(map[string]int64),
+ CacheStats: make(map[string]int64),
+ },
+ }
+ for _, viewGroup := range view.CounterGroups {
+ switch viewGroup.Type {
+ default:
+ continue
+ case "resqtype":
+ m = stats.Views[view.Name].Resolver.QTypes
+ case "resstats":
+ m = stats.Views[view.Name].Resolver.Stats
+ case "cachestats":
+ m = stats.Views[view.Name].Resolver.CacheStats
+ }
+ for _, viewCounter := range viewGroup.Counters {
+ m[viewCounter.Name] = viewCounter.Value
+ }
+ }
+ }
+ return &stats
+}
diff --git a/src/go/plugin/go.d/modules/cassandra/README.md b/src/go/plugin/go.d/modules/cassandra/README.md
new file mode 120000
index 000000000..99b5b9da5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/README.md
@@ -0,0 +1 @@
+integrations/cassandra.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/cassandra/cassandra.go b/src/go/plugin/go.d/modules/cassandra/cassandra.go
new file mode 100644
index 000000000..5352703df
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/cassandra.go
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cassandra
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("cassandra", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Cassandra {
+ return &Cassandra{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:7072/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 5),
+ },
+ },
+ },
+ charts: baseCharts.Copy(),
+ validateMetrics: true,
+ mx: newCassandraMetrics(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Cassandra struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prom prometheus.Prometheus
+
+ validateMetrics bool
+
+ mx *cassandraMetrics
+}
+
+func (c *Cassandra) Configuration() any {
+ return c.Config
+}
+
+func (c *Cassandra) Init() error {
+ if err := c.validateConfig(); err != nil {
+ c.Errorf("error on validating config: %v", err)
+ return err
+ }
+
+ prom, err := c.initPrometheusClient()
+ if err != nil {
+ c.Errorf("error on init prometheus client: %v", err)
+ return err
+ }
+ c.prom = prom
+
+ return nil
+}
+
+func (c *Cassandra) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (c *Cassandra) Charts() *module.Charts {
+ return c.charts
+}
+
+func (c *Cassandra) Collect() map[string]int64 {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (c *Cassandra) Cleanup() {
+ if c.prom != nil && c.prom.HTTPClient() != nil {
+ c.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/cassandra/cassandra_test.go b/src/go/plugin/go.d/modules/cassandra/cassandra_test.go
new file mode 100644
index 000000000..0b6af9362
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/cassandra_test.go
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cassandra
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataExpectedMetrics, _ = os.ReadFile("testdata/metrics.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataExpectedMetrics": dataExpectedMetrics,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestCassandra_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Cassandra{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNew(t *testing.T) {
+ assert.IsType(t, (*Cassandra)(nil), New())
+}
+
+func TestCassandra_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success if 'url' is set": {
+ config: Config{
+ HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:7072"}}},
+ },
+ "success on default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if 'url' is unset": {
+ wantFail: true,
+ config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ c := New()
+ c.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, c.Init())
+ } else {
+ assert.NoError(t, c.Init())
+ }
+ })
+ }
+}
+
+func TestCassandra_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (c *Cassandra, cleanup func())
+ wantFail bool
+ }{
+ "success on valid response": {
+ prepare: prepareCassandra,
+ },
+ "fails if endpoint returns invalid data": {
+ wantFail: true,
+ prepare: prepareCassandraInvalidData,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCassandraConnectionRefused,
+ },
+ "fails on 404 response": {
+ wantFail: true,
+ prepare: prepareCassandraResponse404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ c, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, c.Init())
+
+ if test.wantFail {
+ assert.Error(t, c.Check())
+ } else {
+ assert.NoError(t, c.Check())
+ }
+ })
+ }
+}
+
+func TestCassandra_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestCassandra_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (c *Cassandra, cleanup func())
+ wantCollected map[string]int64
+ }{
+ "success on valid response": {
+ prepare: prepareCassandra,
+ wantCollected: map[string]int64{
+ "client_request_failures_reads": 0,
+ "client_request_failures_writes": 0,
+ "client_request_latency_reads": 333316,
+ "client_request_latency_writes": 331841,
+ "client_request_read_latency_p50": 61,
+ "client_request_read_latency_p75": 88,
+ "client_request_read_latency_p95": 126,
+ "client_request_read_latency_p98": 182,
+ "client_request_read_latency_p99": 219,
+ "client_request_read_latency_p999": 454,
+ "client_request_timeouts_reads": 0,
+ "client_request_timeouts_writes": 0,
+ "client_request_total_latency_reads": 23688998,
+ "client_request_total_latency_writes": 14253267,
+ "client_request_unavailables_reads": 0,
+ "client_request_unavailables_writes": 0,
+ "client_request_write_latency_p50": 35,
+ "client_request_write_latency_p75": 61,
+ "client_request_write_latency_p95": 105,
+ "client_request_write_latency_p98": 126,
+ "client_request_write_latency_p99": 152,
+ "client_request_write_latency_p999": 315,
+ "compaction_bytes_compacted": 2532,
+ "compaction_completed_tasks": 1078,
+ "compaction_pending_tasks": 0,
+ "dropped_messages": 0,
+ "jvm_gc_cms_count": 1,
+ "jvm_gc_cms_time": 59,
+ "jvm_gc_parnew_count": 218,
+ "jvm_gc_parnew_time": 1617,
+ "jvm_memory_heap_used": 1134866288,
+ "jvm_memory_nonheap_used": 96565696,
+ "key_cache_hit_ratio": 87273,
+ "key_cache_hits": 1336427,
+ "key_cache_misses": 194890,
+ "key_cache_size": 196559936,
+ "key_cache_utilization": 20828,
+ "row_cache_hit_ratio": 0,
+ "row_cache_hits": 0,
+ "row_cache_misses": 0,
+ "row_cache_size": 0,
+ "row_cache_utilization": 0,
+ "storage_exceptions": 0,
+ "storage_load": 858272986,
+ "thread_pool_CacheCleanupExecutor_active_tasks": 0,
+ "thread_pool_CacheCleanupExecutor_blocked_tasks": 0,
+ "thread_pool_CacheCleanupExecutor_pending_tasks": 0,
+ "thread_pool_CacheCleanupExecutor_total_blocked_tasks": 0,
+ "thread_pool_CompactionExecutor_active_tasks": 0,
+ "thread_pool_CompactionExecutor_blocked_tasks": 0,
+ "thread_pool_CompactionExecutor_pending_tasks": 0,
+ "thread_pool_CompactionExecutor_total_blocked_tasks": 0,
+ "thread_pool_GossipStage_active_tasks": 0,
+ "thread_pool_GossipStage_blocked_tasks": 0,
+ "thread_pool_GossipStage_pending_tasks": 0,
+ "thread_pool_GossipStage_total_blocked_tasks": 0,
+ "thread_pool_HintsDispatcher_active_tasks": 0,
+ "thread_pool_HintsDispatcher_blocked_tasks": 0,
+ "thread_pool_HintsDispatcher_pending_tasks": 0,
+ "thread_pool_HintsDispatcher_total_blocked_tasks": 0,
+ "thread_pool_MemtableFlushWriter_active_tasks": 0,
+ "thread_pool_MemtableFlushWriter_blocked_tasks": 0,
+ "thread_pool_MemtableFlushWriter_pending_tasks": 0,
+ "thread_pool_MemtableFlushWriter_total_blocked_tasks": 0,
+ "thread_pool_MemtablePostFlush_active_tasks": 0,
+ "thread_pool_MemtablePostFlush_blocked_tasks": 0,
+ "thread_pool_MemtablePostFlush_pending_tasks": 0,
+ "thread_pool_MemtablePostFlush_total_blocked_tasks": 0,
+ "thread_pool_MemtableReclaimMemory_active_tasks": 0,
+ "thread_pool_MemtableReclaimMemory_blocked_tasks": 0,
+ "thread_pool_MemtableReclaimMemory_pending_tasks": 0,
+ "thread_pool_MemtableReclaimMemory_total_blocked_tasks": 0,
+ "thread_pool_MutationStage_active_tasks": 0,
+ "thread_pool_MutationStage_blocked_tasks": 0,
+ "thread_pool_MutationStage_pending_tasks": 0,
+ "thread_pool_MutationStage_total_blocked_tasks": 0,
+ "thread_pool_Native-Transport-Requests_active_tasks": 0,
+ "thread_pool_Native-Transport-Requests_blocked_tasks": 0,
+ "thread_pool_Native-Transport-Requests_pending_tasks": 0,
+ "thread_pool_Native-Transport-Requests_total_blocked_tasks": 0,
+ "thread_pool_PendingRangeCalculator_active_tasks": 0,
+ "thread_pool_PendingRangeCalculator_blocked_tasks": 0,
+ "thread_pool_PendingRangeCalculator_pending_tasks": 0,
+ "thread_pool_PendingRangeCalculator_total_blocked_tasks": 0,
+ "thread_pool_PerDiskMemtableFlushWriter_0_active_tasks": 0,
+ "thread_pool_PerDiskMemtableFlushWriter_0_blocked_tasks": 0,
+ "thread_pool_PerDiskMemtableFlushWriter_0_pending_tasks": 0,
+ "thread_pool_PerDiskMemtableFlushWriter_0_total_blocked_tasks": 0,
+ "thread_pool_ReadStage_active_tasks": 0,
+ "thread_pool_ReadStage_blocked_tasks": 0,
+ "thread_pool_ReadStage_pending_tasks": 0,
+ "thread_pool_ReadStage_total_blocked_tasks": 0,
+ "thread_pool_Sampler_active_tasks": 0,
+ "thread_pool_Sampler_blocked_tasks": 0,
+ "thread_pool_Sampler_pending_tasks": 0,
+ "thread_pool_Sampler_total_blocked_tasks": 0,
+ "thread_pool_SecondaryIndexManagement_active_tasks": 0,
+ "thread_pool_SecondaryIndexManagement_blocked_tasks": 0,
+ "thread_pool_SecondaryIndexManagement_pending_tasks": 0,
+ "thread_pool_SecondaryIndexManagement_total_blocked_tasks": 0,
+ "thread_pool_ValidationExecutor_active_tasks": 0,
+ "thread_pool_ValidationExecutor_blocked_tasks": 0,
+ "thread_pool_ValidationExecutor_pending_tasks": 0,
+ "thread_pool_ValidationExecutor_total_blocked_tasks": 0,
+ "thread_pool_ViewBuildExecutor_active_tasks": 0,
+ "thread_pool_ViewBuildExecutor_blocked_tasks": 0,
+ "thread_pool_ViewBuildExecutor_pending_tasks": 0,
+ "thread_pool_ViewBuildExecutor_total_blocked_tasks": 0,
+ },
+ },
+ "fails if endpoint returns invalid data": {
+ prepare: prepareCassandraInvalidData,
+ },
+ "fails on connection refused": {
+ prepare: prepareCassandraConnectionRefused,
+ },
+ "fails on 404 response": {
+ prepare: prepareCassandraResponse404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ c, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, c.Init())
+
+ mx := c.Collect()
+
+ assert.Equal(t, test.wantCollected, mx)
+ })
+ }
+}
+
+func prepareCassandra() (c *Cassandra, cleanup func()) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataExpectedMetrics)
+ }))
+
+ c = New()
+ c.URL = ts.URL
+ return c, ts.Close
+}
+
+func prepareCassandraInvalidData() (c *Cassandra, cleanup func()) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ c = New()
+ c.URL = ts.URL
+ return c, ts.Close
+}
+
+func prepareCassandraConnectionRefused() (c *Cassandra, cleanup func()) {
+ c = New()
+ c.URL = "http://127.0.0.1:38001"
+ return c, func() {}
+}
+
+func prepareCassandraResponse404() (c *Cassandra, cleanup func()) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+
+ c = New()
+ c.URL = ts.URL
+ return c, ts.Close
+}
diff --git a/src/go/plugin/go.d/modules/cassandra/charts.go b/src/go/plugin/go.d/modules/cassandra/charts.go
new file mode 100644
index 000000000..a909c7ba0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/charts.go
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cassandra
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioClientRequestsRate = module.Priority + iota
+
+ prioClientRequestReadLatency
+ prioClientRequestWriteLatency
+ prioClientRequestsLatency
+
+ prioKeyCacheHitRatio
+ prioRowCacheHitRatio
+ prioKeyCacheHitRate
+ prioRowCacheHitRate
+ prioKeyCacheUtilization
+ prioRowCacheUtilization
+ prioKeyCacheSize
+ prioRowCacheSize
+
+ prioStorageLiveDiskSpaceUsed
+
+ prioCompactionCompletedTasksRate
+ prioCompactionPendingTasksCount
+ prioCompactionBytesCompactedRate
+
+ prioThreadPoolActiveTasksCount
+ prioThreadPoolPendingTasksCount
+ prioThreadPoolBlockedTasksCount
+ prioThreadPoolBlockedTasksRate
+
+ prioJVMMemoryUsed
+ prioJVMGCCount
+ prioJVMGCTime
+
+ prioDroppedMessagesRate
+ prioRequestsTimeoutsRate
+ prioRequestsUnavailablesRate
+ prioRequestsFailuresRate
+ prioStorageExceptionsRate
+)
+
+var baseCharts = module.Charts{
+ chartClientRequestsRate.Copy(),
+
+ chartClientRequestsLatency.Copy(),
+ chartClientRequestReadLatencyHistogram.Copy(),
+ chartClientRequestWriteLatencyHistogram.Copy(),
+
+ chartKeyCacheHitRatio.Copy(),
+ chartRowCacheHitRatio.Copy(),
+ chartKeyCacheHitRate.Copy(),
+ chartRowCacheHitRate.Copy(),
+ chartKeyCacheUtilization.Copy(),
+ chartRowCacheUtilization.Copy(),
+ chartKeyCacheSize.Copy(),
+ chartRowCacheSize.Copy(),
+
+ chartStorageLiveDiskSpaceUsed.Copy(),
+
+ chartCompactionCompletedTasksRate.Copy(),
+ chartCompactionPendingTasksCount.Copy(),
+ chartCompactionBytesCompactedRate.Copy(),
+
+ chartJVMMemoryUsed.Copy(),
+ chartJVMGCRate.Copy(),
+ chartJVMGCTime.Copy(),
+
+ chartDroppedMessagesRate.Copy(),
+ chartClientRequestTimeoutsRate.Copy(),
+ chartClientRequestUnavailablesRate.Copy(),
+ chartClientRequestFailuresRate.Copy(),
+ chartStorageExceptionsRate.Copy(),
+}
+
+var (
+ chartClientRequestsRate = module.Chart{
+ ID: "client_requests_rate",
+ Title: "Client requests rate",
+ Units: "requests/s",
+ Fam: "throughput",
+ Ctx: "cassandra.client_requests_rate",
+ Priority: prioClientRequestsRate,
+ Dims: module.Dims{
+ {ID: "client_request_latency_reads", Name: "read", Algo: module.Incremental},
+ {ID: "client_request_latency_writes", Name: "write", Algo: module.Incremental, Mul: -1},
+ },
+ }
+)
+
+var (
+ chartClientRequestReadLatencyHistogram = module.Chart{
+ ID: "client_request_read_latency_histogram",
+ Title: "Client request read latency histogram",
+ Units: "seconds",
+ Fam: "latency",
+ Ctx: "cassandra.client_request_read_latency_histogram",
+ Priority: prioClientRequestReadLatency,
+ Dims: module.Dims{
+ {ID: "client_request_read_latency_p50", Name: "p50", Div: 1e6},
+ {ID: "client_request_read_latency_p75", Name: "p75", Div: 1e6},
+ {ID: "client_request_read_latency_p95", Name: "p95", Div: 1e6},
+ {ID: "client_request_read_latency_p98", Name: "p98", Div: 1e6},
+ {ID: "client_request_read_latency_p99", Name: "p99", Div: 1e6},
+ {ID: "client_request_read_latency_p999", Name: "p999", Div: 1e6},
+ },
+ }
+ chartClientRequestWriteLatencyHistogram = module.Chart{
+ ID: "client_request_write_latency_histogram",
+ Title: "Client request write latency histogram",
+ Units: "seconds",
+ Fam: "latency",
+ Ctx: "cassandra.client_request_write_latency_histogram",
+ Priority: prioClientRequestWriteLatency,
+ Dims: module.Dims{
+ {ID: "client_request_write_latency_p50", Name: "p50", Div: 1e6},
+ {ID: "client_request_write_latency_p75", Name: "p75", Div: 1e6},
+ {ID: "client_request_write_latency_p95", Name: "p95", Div: 1e6},
+ {ID: "client_request_write_latency_p98", Name: "p98", Div: 1e6},
+ {ID: "client_request_write_latency_p99", Name: "p99", Div: 1e6},
+ {ID: "client_request_write_latency_p999", Name: "p999", Div: 1e6},
+ },
+ }
+ chartClientRequestsLatency = module.Chart{
+ ID: "client_requests_latency",
+ Title: "Client requests total latency",
+ Units: "seconds",
+ Fam: "latency",
+ Ctx: "cassandra.client_requests_latency",
+ Priority: prioClientRequestsLatency,
+ Dims: module.Dims{
+ {ID: "client_request_total_latency_reads", Name: "read", Algo: module.Incremental, Div: 1e6},
+ {ID: "client_request_total_latency_writes", Name: "write", Algo: module.Incremental, Div: 1e6},
+ },
+ }
+)
+
+var (
+ chartKeyCacheHitRatio = module.Chart{
+ ID: "key_cache_hit_ratio",
+ Title: "Key cache hit ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "cassandra.key_cache_hit_ratio",
+ Priority: prioKeyCacheHitRatio,
+ Dims: module.Dims{
+ {ID: "key_cache_hit_ratio", Name: "hit_ratio", Div: 1000},
+ },
+ }
+ chartKeyCacheHitRate = module.Chart{
+ ID: "key_cache_hit_rate",
+ Title: "Key cache hit rate",
+ Units: "events/s",
+ Fam: "cache",
+ Ctx: "cassandra.key_cache_hit_rate",
+ Priority: prioKeyCacheHitRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "key_cache_hits", Name: "hits", Algo: module.Incremental},
+ {ID: "key_cache_misses", Name: "misses", Algo: module.Incremental},
+ },
+ }
+ chartKeyCacheUtilization = module.Chart{
+ ID: "key_cache_utilization",
+ Title: "Key cache utilization",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "cassandra.key_cache_utilization",
+ Priority: prioKeyCacheUtilization,
+ Dims: module.Dims{
+ {ID: "key_cache_utilization", Name: "used", Div: 1000},
+ },
+ }
+ chartKeyCacheSize = module.Chart{
+ ID: "key_cache_size",
+ Title: "Key cache size",
+ Units: "bytes",
+ Fam: "cache",
+ Ctx: "cassandra.key_cache_size",
+ Priority: prioKeyCacheSize,
+ Dims: module.Dims{
+ {ID: "key_cache_size", Name: "size"},
+ },
+ }
+
+ chartRowCacheHitRatio = module.Chart{
+ ID: "row_cache_hit_ratio",
+ Title: "Row cache hit ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "cassandra.row_cache_hit_ratio",
+ Priority: prioRowCacheHitRatio,
+ Dims: module.Dims{
+ {ID: "row_cache_hit_ratio", Name: "hit_ratio", Div: 1000},
+ },
+ }
+ chartRowCacheHitRate = module.Chart{
+ ID: "row_cache_hit_rate",
+ Title: "Row cache hit rate",
+ Units: "events/s",
+ Fam: "cache",
+ Ctx: "cassandra.row_cache_hit_rate",
+ Priority: prioRowCacheHitRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "row_cache_hits", Name: "hits", Algo: module.Incremental},
+ {ID: "row_cache_misses", Name: "misses", Algo: module.Incremental},
+ },
+ }
+ chartRowCacheUtilization = module.Chart{
+ ID: "row_cache_utilization",
+ Title: "Row cache utilization",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "cassandra.row_cache_utilization",
+ Priority: prioRowCacheUtilization,
+ Dims: module.Dims{
+ {ID: "row_cache_utilization", Name: "used", Div: 1000},
+ },
+ }
+ chartRowCacheSize = module.Chart{
+ ID: "row_cache_size",
+ Title: "Row cache size",
+ Units: "bytes",
+ Fam: "cache",
+ Ctx: "cassandra.row_cache_size",
+ Priority: prioRowCacheSize,
+ Dims: module.Dims{
+ {ID: "row_cache_size", Name: "size"},
+ },
+ }
+)
+
+var (
+ chartStorageLiveDiskSpaceUsed = module.Chart{
+ ID: "storage_live_disk_space_used",
+ Title: "Disk space used by live data",
+ Units: "bytes",
+ Fam: "disk usage",
+ Ctx: "cassandra.storage_live_disk_space_used",
+ Priority: prioStorageLiveDiskSpaceUsed,
+ Dims: module.Dims{
+ {ID: "storage_load", Name: "used"},
+ },
+ }
+)
+
+var (
+ chartCompactionCompletedTasksRate = module.Chart{
+ ID: "compaction_completed_tasks_rate",
+ Title: "Completed compactions rate",
+ Units: "tasks/s",
+ Fam: "compaction",
+ Ctx: "cassandra.compaction_completed_tasks_rate",
+ Priority: prioCompactionCompletedTasksRate,
+ Dims: module.Dims{
+ {ID: "compaction_completed_tasks", Name: "completed", Algo: module.Incremental},
+ },
+ }
+ chartCompactionPendingTasksCount = module.Chart{
+ ID: "compaction_pending_tasks_count",
+ Title: "Pending compactions",
+ Units: "tasks",
+ Fam: "compaction",
+ Ctx: "cassandra.compaction_pending_tasks_count",
+ Priority: prioCompactionPendingTasksCount,
+ Dims: module.Dims{
+ {ID: "compaction_pending_tasks", Name: "pending"},
+ },
+ }
+ chartCompactionBytesCompactedRate = module.Chart{
+ ID: "compaction_compacted_rate",
+ Title: "Compaction data rate",
+ Units: "bytes/s",
+ Fam: "compaction",
+ Ctx: "cassandra.compaction_compacted_rate",
+ Priority: prioCompactionBytesCompactedRate,
+ Dims: module.Dims{
+ {ID: "compaction_bytes_compacted", Name: "compacted", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartsTmplThreadPool = module.Charts{
+ chartTmplThreadPoolActiveTasksCount.Copy(),
+ chartTmplThreadPoolPendingTasksCount.Copy(),
+ chartTmplThreadPoolBlockedTasksCount.Copy(),
+ chartTmplThreadPoolBlockedTasksRate.Copy(),
+ }
+
+ chartTmplThreadPoolActiveTasksCount = module.Chart{
+ ID: "thread_pool_%s_active_tasks_count",
+ Title: "Active tasks",
+ Units: "tasks",
+ Fam: "thread pools",
+ Ctx: "cassandra.thread_pool_active_tasks_count",
+ Priority: prioThreadPoolActiveTasksCount,
+ Dims: module.Dims{
+ {ID: "thread_pool_%s_active_tasks", Name: "active"},
+ },
+ }
+ chartTmplThreadPoolPendingTasksCount = module.Chart{
+ ID: "thread_pool_%s_pending_tasks_count",
+ Title: "Pending tasks",
+ Units: "tasks",
+ Fam: "thread pools",
+ Ctx: "cassandra.thread_pool_pending_tasks_count",
+ Priority: prioThreadPoolPendingTasksCount,
+ Dims: module.Dims{
+ {ID: "thread_pool_%s_pending_tasks", Name: "pending"},
+ },
+ }
+ chartTmplThreadPoolBlockedTasksCount = module.Chart{
+ ID: "thread_pool_%s_blocked_tasks_count",
+ Title: "Blocked tasks",
+ Units: "tasks",
+ Fam: "thread pools",
+ Ctx: "cassandra.thread_pool_blocked_tasks_count",
+ Priority: prioThreadPoolBlockedTasksCount,
+ Dims: module.Dims{
+ {ID: "thread_pool_%s_blocked_tasks", Name: "blocked"},
+ },
+ }
+ chartTmplThreadPoolBlockedTasksRate = module.Chart{
+ ID: "thread_pool_%s_blocked_tasks_rate",
+ Title: "Blocked tasks rate",
+ Units: "tasks/s",
+ Fam: "thread pools",
+ Ctx: "cassandra.thread_pool_blocked_tasks_rate",
+ Priority: prioThreadPoolBlockedTasksRate,
+ Dims: module.Dims{
+ {ID: "thread_pool_%s_total_blocked_tasks", Name: "blocked", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartJVMMemoryUsed = module.Chart{
+ ID: "jvm_memory_used",
+ Title: "Memory used",
+ Units: "bytes",
+ Fam: "jvm runtime",
+ Ctx: "cassandra.jvm_memory_used",
+ Priority: prioJVMMemoryUsed,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "jvm_memory_heap_used", Name: "heap"},
+ {ID: "jvm_memory_nonheap_used", Name: "nonheap"},
+ },
+ }
+ chartJVMGCRate = module.Chart{
+ ID: "jvm_gc_rate",
+ Title: "Garbage collections rate",
+ Units: "gc/s",
+ Fam: "jvm runtime",
+ Ctx: "cassandra.jvm_gc_rate",
+ Priority: prioJVMGCCount,
+ Dims: module.Dims{
+ {ID: "jvm_gc_parnew_count", Name: "parnew", Algo: module.Incremental},
+ {ID: "jvm_gc_cms_count", Name: "cms", Algo: module.Incremental},
+ },
+ }
+ chartJVMGCTime = module.Chart{
+ ID: "jvm_gc_time",
+ Title: "Garbage collection time",
+ Units: "seconds",
+ Fam: "jvm runtime",
+ Ctx: "cassandra.jvm_gc_time",
+ Priority: prioJVMGCTime,
+ Dims: module.Dims{
+ {ID: "jvm_gc_parnew_time", Name: "parnew", Algo: module.Incremental, Div: 1e9},
+ {ID: "jvm_gc_cms_time", Name: "cms", Algo: module.Incremental, Div: 1e9},
+ },
+ }
+)
+
+var (
+ chartDroppedMessagesRate = module.Chart{
+ ID: "dropped_messages_rate",
+ Title: "Dropped messages rate",
+ Units: "messages/s",
+ Fam: "errors",
+ Ctx: "cassandra.dropped_messages_rate",
+ Priority: prioDroppedMessagesRate,
+ Dims: module.Dims{
+ {ID: "dropped_messages", Name: "dropped"},
+ },
+ }
+ chartClientRequestTimeoutsRate = module.Chart{
+ ID: "client_requests_timeouts_rate",
+ Title: "Client requests timeouts rate",
+ Units: "timeouts/s",
+ Fam: "errors",
+ Ctx: "cassandra.client_requests_timeouts_rate",
+ Priority: prioRequestsTimeoutsRate,
+ Dims: module.Dims{
+ {ID: "client_request_timeouts_reads", Name: "read", Algo: module.Incremental},
+ {ID: "client_request_timeouts_writes", Name: "write", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartClientRequestUnavailablesRate = module.Chart{
+ ID: "client_requests_unavailables_rate",
+ Title: "Client requests unavailable exceptions rate",
+ Units: "exceptions/s",
+ Fam: "errors",
+ Ctx: "cassandra.client_requests_unavailables_rate",
+ Priority: prioRequestsUnavailablesRate,
+ Dims: module.Dims{
+ {ID: "client_request_unavailables_reads", Name: "read", Algo: module.Incremental},
+ {ID: "client_request_unavailables_writes", Name: "write", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartClientRequestFailuresRate = module.Chart{
+ ID: "client_requests_failures_rate",
+ Title: "Client requests failures rate",
+ Units: "failures/s",
+ Fam: "errors",
+ Ctx: "cassandra.client_requests_failures_rate",
+ Priority: prioRequestsFailuresRate,
+ Dims: module.Dims{
+ {ID: "client_request_failures_reads", Name: "read", Algo: module.Incremental},
+ {ID: "client_request_failures_writes", Name: "write", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartStorageExceptionsRate = module.Chart{
+ ID: "storage_exceptions_rate",
+ Title: "Storage exceptions rate",
+ Units: "exceptions/s",
+ Fam: "errors",
+ Ctx: "cassandra.storage_exceptions_rate",
+ Priority: prioStorageExceptionsRate,
+ Dims: module.Dims{
+ {ID: "storage_exceptions", Name: "storage", Algo: module.Incremental},
+ },
+ }
+)
+
+func (c *Cassandra) addThreadPoolCharts(pool *threadPoolMetrics) {
+ charts := chartsTmplThreadPool.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, pool.name)
+ chart.Labels = []module.Label{
+ {Key: "thread_pool", Value: pool.name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, pool.name)
+ }
+ }
+
+ if err := c.Charts().Add(*charts...); err != nil {
+ c.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/cassandra/collect.go b/src/go/plugin/go.d/modules/cassandra/collect.go
new file mode 100644
index 000000000..08cdfbe94
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/collect.go
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cassandra
+
+import (
+ "errors"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "strings"
+)
+
+const (
+ suffixCount = "_count"
+ suffixValue = "_value"
+)
+
+func (c *Cassandra) collect() (map[string]int64, error) {
+ pms, err := c.prom.ScrapeSeries()
+ if err != nil {
+ return nil, err
+ }
+
+ if c.validateMetrics {
+ if !isCassandraMetrics(pms) {
+ return nil, errors.New("collected metrics aren't Cassandra metrics")
+ }
+ c.validateMetrics = false
+ }
+
+ mx := make(map[string]int64)
+
+ c.resetMetrics()
+ c.collectMetrics(pms)
+ c.processMetric(mx)
+
+ return mx, nil
+}
+
+func (c *Cassandra) resetMetrics() {
+ cm := newCassandraMetrics()
+ for key, p := range c.mx.threadPools {
+ cm.threadPools[key] = &threadPoolMetrics{
+ name: p.name,
+ hasCharts: p.hasCharts,
+ }
+ }
+ c.mx = cm
+}
+
+func (c *Cassandra) processMetric(mx map[string]int64) {
+ c.mx.clientReqTotalLatencyReads.write(mx, "client_request_total_latency_reads")
+ c.mx.clientReqTotalLatencyWrites.write(mx, "client_request_total_latency_writes")
+ c.mx.clientReqLatencyReads.write(mx, "client_request_latency_reads")
+ c.mx.clientReqLatencyWrites.write(mx, "client_request_latency_writes")
+ c.mx.clientReqTimeoutsReads.write(mx, "client_request_timeouts_reads")
+ c.mx.clientReqTimeoutsWrites.write(mx, "client_request_timeouts_writes")
+ c.mx.clientReqUnavailablesReads.write(mx, "client_request_unavailables_reads")
+ c.mx.clientReqUnavailablesWrites.write(mx, "client_request_unavailables_writes")
+ c.mx.clientReqFailuresReads.write(mx, "client_request_failures_reads")
+ c.mx.clientReqFailuresWrites.write(mx, "client_request_failures_writes")
+
+ c.mx.clientReqReadLatencyP50.write(mx, "client_request_read_latency_p50")
+ c.mx.clientReqReadLatencyP75.write(mx, "client_request_read_latency_p75")
+ c.mx.clientReqReadLatencyP95.write(mx, "client_request_read_latency_p95")
+ c.mx.clientReqReadLatencyP98.write(mx, "client_request_read_latency_p98")
+ c.mx.clientReqReadLatencyP99.write(mx, "client_request_read_latency_p99")
+ c.mx.clientReqReadLatencyP999.write(mx, "client_request_read_latency_p999")
+ c.mx.clientReqWriteLatencyP50.write(mx, "client_request_write_latency_p50")
+ c.mx.clientReqWriteLatencyP75.write(mx, "client_request_write_latency_p75")
+ c.mx.clientReqWriteLatencyP95.write(mx, "client_request_write_latency_p95")
+ c.mx.clientReqWriteLatencyP98.write(mx, "client_request_write_latency_p98")
+ c.mx.clientReqWriteLatencyP99.write(mx, "client_request_write_latency_p99")
+ c.mx.clientReqWriteLatencyP999.write(mx, "client_request_write_latency_p999")
+
+ c.mx.rowCacheHits.write(mx, "row_cache_hits")
+ c.mx.rowCacheMisses.write(mx, "row_cache_misses")
+ c.mx.rowCacheSize.write(mx, "row_cache_size")
+ if c.mx.rowCacheHits.isSet && c.mx.rowCacheMisses.isSet {
+ if s := c.mx.rowCacheHits.value + c.mx.rowCacheMisses.value; s > 0 {
+ mx["row_cache_hit_ratio"] = int64((c.mx.rowCacheHits.value * 100 / s) * 1000)
+ } else {
+ mx["row_cache_hit_ratio"] = 0
+ }
+ }
+ if c.mx.rowCacheCapacity.isSet && c.mx.rowCacheSize.isSet {
+ if s := c.mx.rowCacheCapacity.value; s > 0 {
+ mx["row_cache_utilization"] = int64((c.mx.rowCacheSize.value * 100 / s) * 1000)
+ } else {
+ mx["row_cache_utilization"] = 0
+ }
+ }
+
+ c.mx.keyCacheHits.write(mx, "key_cache_hits")
+ c.mx.keyCacheMisses.write(mx, "key_cache_misses")
+ c.mx.keyCacheSize.write(mx, "key_cache_size")
+ if c.mx.keyCacheHits.isSet && c.mx.keyCacheMisses.isSet {
+ if s := c.mx.keyCacheHits.value + c.mx.keyCacheMisses.value; s > 0 {
+ mx["key_cache_hit_ratio"] = int64((c.mx.keyCacheHits.value * 100 / s) * 1000)
+ } else {
+ mx["key_cache_hit_ratio"] = 0
+ }
+ }
+ if c.mx.keyCacheCapacity.isSet && c.mx.keyCacheSize.isSet {
+ if s := c.mx.keyCacheCapacity.value; s > 0 {
+ mx["key_cache_utilization"] = int64((c.mx.keyCacheSize.value * 100 / s) * 1000)
+ } else {
+ mx["key_cache_utilization"] = 0
+ }
+ }
+
+ c.mx.droppedMessages.write1k(mx, "dropped_messages")
+
+ c.mx.storageLoad.write(mx, "storage_load")
+ c.mx.storageExceptions.write(mx, "storage_exceptions")
+
+ c.mx.compactionBytesCompacted.write(mx, "compaction_bytes_compacted")
+ c.mx.compactionPendingTasks.write(mx, "compaction_pending_tasks")
+ c.mx.compactionCompletedTasks.write(mx, "compaction_completed_tasks")
+
+ c.mx.jvmMemoryHeapUsed.write(mx, "jvm_memory_heap_used")
+ c.mx.jvmMemoryNonHeapUsed.write(mx, "jvm_memory_nonheap_used")
+ c.mx.jvmGCParNewCount.write(mx, "jvm_gc_parnew_count")
+ c.mx.jvmGCParNewTime.write1k(mx, "jvm_gc_parnew_time")
+ c.mx.jvmGCCMSCount.write(mx, "jvm_gc_cms_count")
+ c.mx.jvmGCCMSTime.write1k(mx, "jvm_gc_cms_time")
+
+ for _, p := range c.mx.threadPools {
+ if !p.hasCharts {
+ p.hasCharts = true
+ c.addThreadPoolCharts(p)
+ }
+
+ px := "thread_pool_" + p.name + "_"
+ p.activeTasks.write(mx, px+"active_tasks")
+ p.pendingTasks.write(mx, px+"pending_tasks")
+ p.blockedTasks.write(mx, px+"blocked_tasks")
+ p.totalBlockedTasks.write(mx, px+"total_blocked_tasks")
+ }
+}
+
+func (c *Cassandra) collectMetrics(pms prometheus.Series) {
+ c.collectClientRequestMetrics(pms)
+ c.collectDroppedMessagesMetrics(pms)
+ c.collectThreadPoolsMetrics(pms)
+ c.collectStorageMetrics(pms)
+ c.collectCacheMetrics(pms)
+ c.collectJVMMetrics(pms)
+ c.collectCompactionMetrics(pms)
+}
+
+func (c *Cassandra) collectClientRequestMetrics(pms prometheus.Series) {
+ const metric = "org_apache_cassandra_metrics_clientrequest"
+
+ var rw struct{ read, write *metricValue }
+ for _, pm := range pms.FindByName(metric + suffixCount) {
+ name := pm.Labels.Get("name")
+ scope := pm.Labels.Get("scope")
+
+ switch name {
+ case "TotalLatency":
+ rw.read, rw.write = &c.mx.clientReqTotalLatencyReads, &c.mx.clientReqTotalLatencyWrites
+ case "Latency":
+ rw.read, rw.write = &c.mx.clientReqLatencyReads, &c.mx.clientReqLatencyWrites
+ case "Timeouts":
+ rw.read, rw.write = &c.mx.clientReqTimeoutsReads, &c.mx.clientReqTimeoutsWrites
+ case "Unavailables":
+ rw.read, rw.write = &c.mx.clientReqUnavailablesReads, &c.mx.clientReqUnavailablesWrites
+ case "Failures":
+ rw.read, rw.write = &c.mx.clientReqFailuresReads, &c.mx.clientReqFailuresWrites
+ default:
+ continue
+ }
+
+ switch scope {
+ case "Read":
+ rw.read.add(pm.Value)
+ case "Write":
+ rw.write.add(pm.Value)
+ }
+ }
+
+ rw = struct{ read, write *metricValue }{}
+
+ for _, pm := range pms.FindByNames(
+ metric+"_50thpercentile",
+ metric+"_75thpercentile",
+ metric+"_95thpercentile",
+ metric+"_98thpercentile",
+ metric+"_99thpercentile",
+ metric+"_999thpercentile",
+ ) {
+ name := pm.Labels.Get("name")
+ scope := pm.Labels.Get("scope")
+
+ if name != "Latency" {
+ continue
+ }
+
+ switch {
+ case strings.HasSuffix(pm.Name(), "_50thpercentile"):
+ rw.read, rw.write = &c.mx.clientReqReadLatencyP50, &c.mx.clientReqWriteLatencyP50
+ case strings.HasSuffix(pm.Name(), "_75thpercentile"):
+ rw.read, rw.write = &c.mx.clientReqReadLatencyP75, &c.mx.clientReqWriteLatencyP75
+ case strings.HasSuffix(pm.Name(), "_95thpercentile"):
+ rw.read, rw.write = &c.mx.clientReqReadLatencyP95, &c.mx.clientReqWriteLatencyP95
+ case strings.HasSuffix(pm.Name(), "_98thpercentile"):
+ rw.read, rw.write = &c.mx.clientReqReadLatencyP98, &c.mx.clientReqWriteLatencyP98
+ case strings.HasSuffix(pm.Name(), "_99thpercentile"):
+ rw.read, rw.write = &c.mx.clientReqReadLatencyP99, &c.mx.clientReqWriteLatencyP99
+ case strings.HasSuffix(pm.Name(), "_999thpercentile"):
+ rw.read, rw.write = &c.mx.clientReqReadLatencyP999, &c.mx.clientReqWriteLatencyP999
+ default:
+ continue
+ }
+
+ switch scope {
+ case "Read":
+ rw.read.add(pm.Value)
+ case "Write":
+ rw.write.add(pm.Value)
+ }
+ }
+}
+
+func (c *Cassandra) collectCacheMetrics(pms prometheus.Series) {
+ const metric = "org_apache_cassandra_metrics_cache"
+
+ var hm struct{ hits, misses *metricValue }
+ for _, pm := range pms.FindByName(metric + suffixCount) {
+ name := pm.Labels.Get("name")
+ scope := pm.Labels.Get("scope")
+
+ switch scope {
+ case "KeyCache":
+ hm.hits, hm.misses = &c.mx.keyCacheHits, &c.mx.keyCacheMisses
+ case "RowCache":
+ hm.hits, hm.misses = &c.mx.rowCacheHits, &c.mx.rowCacheMisses
+ default:
+ continue
+ }
+
+ switch name {
+ case "Hits":
+ hm.hits.add(pm.Value)
+ case "Misses":
+ hm.misses.add(pm.Value)
+ }
+ }
+
+ var cs struct{ cap, size *metricValue }
+ for _, pm := range pms.FindByName(metric + suffixValue) {
+ name := pm.Labels.Get("name")
+ scope := pm.Labels.Get("scope")
+
+ switch scope {
+ case "KeyCache":
+ cs.cap, cs.size = &c.mx.keyCacheCapacity, &c.mx.keyCacheSize
+ case "RowCache":
+ cs.cap, cs.size = &c.mx.rowCacheCapacity, &c.mx.rowCacheSize
+ default:
+ continue
+ }
+
+ switch name {
+ case "Capacity":
+ cs.cap.add(pm.Value)
+ case "Size":
+ cs.size.add(pm.Value)
+ }
+ }
+}
+
+func (c *Cassandra) collectThreadPoolsMetrics(pms prometheus.Series) {
+ const metric = "org_apache_cassandra_metrics_threadpools"
+
+ for _, pm := range pms.FindByName(metric + suffixValue) {
+ name := pm.Labels.Get("name")
+ scope := pm.Labels.Get("scope")
+ pool := c.getThreadPoolMetrics(scope)
+
+ switch name {
+ case "ActiveTasks":
+ pool.activeTasks.add(pm.Value)
+ case "PendingTasks":
+ pool.pendingTasks.add(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metric + suffixCount) {
+ name := pm.Labels.Get("name")
+ scope := pm.Labels.Get("scope")
+ pool := c.getThreadPoolMetrics(scope)
+
+ switch name {
+ case "CompletedTasks":
+ pool.totalBlockedTasks.add(pm.Value)
+ case "TotalBlockedTasks":
+ pool.totalBlockedTasks.add(pm.Value)
+ case "CurrentlyBlockedTasks":
+ pool.blockedTasks.add(pm.Value)
+ }
+ }
+}
+
+func (c *Cassandra) collectStorageMetrics(pms prometheus.Series) {
+ const metric = "org_apache_cassandra_metrics_storage"
+
+ for _, pm := range pms.FindByName(metric + suffixCount) {
+ name := pm.Labels.Get("name")
+
+ switch name {
+ case "Load":
+ c.mx.storageLoad.add(pm.Value)
+ case "Exceptions":
+ c.mx.storageExceptions.add(pm.Value)
+ }
+ }
+}
+
+func (c *Cassandra) collectDroppedMessagesMetrics(pms prometheus.Series) {
+ const metric = "org_apache_cassandra_metrics_droppedmessage"
+
+ for _, pm := range pms.FindByName(metric + suffixCount) {
+ c.mx.droppedMessages.add(pm.Value)
+ }
+}
+
+func (c *Cassandra) collectJVMMetrics(pms prometheus.Series) {
+ const metricMemUsed = "jvm_memory_bytes_used"
+ const metricGC = "jvm_gc_collection_seconds"
+
+ for _, pm := range pms.FindByName(metricMemUsed) {
+ area := pm.Labels.Get("area")
+
+ switch area {
+ case "heap":
+ c.mx.jvmMemoryHeapUsed.add(pm.Value)
+ case "nonheap":
+ c.mx.jvmMemoryNonHeapUsed.add(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricGC + suffixCount) {
+ gc := pm.Labels.Get("gc")
+
+ switch gc {
+ case "ParNew":
+ c.mx.jvmGCParNewCount.add(pm.Value)
+ case "ConcurrentMarkSweep":
+ c.mx.jvmGCCMSCount.add(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricGC + "_sum") {
+ gc := pm.Labels.Get("gc")
+
+ switch gc {
+ case "ParNew":
+ c.mx.jvmGCParNewTime.add(pm.Value)
+ case "ConcurrentMarkSweep":
+ c.mx.jvmGCCMSTime.add(pm.Value)
+ }
+ }
+}
+
+func (c *Cassandra) collectCompactionMetrics(pms prometheus.Series) {
+ const metric = "org_apache_cassandra_metrics_compaction"
+
+ for _, pm := range pms.FindByName(metric + suffixValue) {
+ name := pm.Labels.Get("name")
+
+ switch name {
+ case "CompletedTasks":
+ c.mx.compactionCompletedTasks.add(pm.Value)
+ case "PendingTasks":
+ c.mx.compactionPendingTasks.add(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metric + suffixCount) {
+ name := pm.Labels.Get("name")
+
+ switch name {
+ case "BytesCompacted":
+ c.mx.compactionBytesCompacted.add(pm.Value)
+ }
+ }
+}
+
+func (c *Cassandra) getThreadPoolMetrics(name string) *threadPoolMetrics {
+ pool, ok := c.mx.threadPools[name]
+ if !ok {
+ pool = &threadPoolMetrics{name: name}
+ c.mx.threadPools[name] = pool
+ }
+ return pool
+}
+
+func isCassandraMetrics(pms prometheus.Series) bool {
+ for _, pm := range pms {
+ if strings.HasPrefix(pm.Name(), "org_apache_cassandra_metrics") {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/go/plugin/go.d/modules/cassandra/config_schema.json b/src/go/plugin/go.d/modules/cassandra/config_schema.json
new file mode 100644
index 000000000..c4ca5f4f9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Cassandra collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Cassandra [JMX exporter](https://github.com/prometheus/jmx_exporter) metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:7072/metrics",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 5
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/cassandra/init.go b/src/go/plugin/go.d/modules/cassandra/init.go
new file mode 100644
index 000000000..1a74fdf9b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/init.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cassandra
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (c *Cassandra) validateConfig() error {
+ if c.URL == "" {
+ return errors.New("'url' is not set")
+ }
+ return nil
+}
+
+func (c *Cassandra) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(c.Client)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.New(client, c.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md b/src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md
new file mode 100644
index 000000000..61c4d1439
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/integrations/cassandra.md
@@ -0,0 +1,313 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/cassandra/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/cassandra/metadata.yaml"
+sidebar_label: "Cassandra"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Cassandra
+
+
+<img src="https://netdata.cloud/img/cassandra.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: cassandra
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.
+
+
+The [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This collector discovers instances running on the local host that provide metrics on port 7072.
+
+On startup, it tries to collect metrics from:
+
+- http://127.0.0.1:7072/metrics
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Cassandra instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cassandra.client_requests_rate | read, write | requests/s |
+| cassandra.client_request_read_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |
+| cassandra.client_request_write_latency_histogram | p50, p75, p95, p98, p99, p999 | seconds |
+| cassandra.client_requests_latency | read, write | seconds |
+| cassandra.row_cache_hit_ratio | hit_ratio | percentage |
+| cassandra.row_cache_hit_rate | hits, misses | events/s |
+| cassandra.row_cache_utilization | used | percentage |
+| cassandra.row_cache_size | size | bytes |
+| cassandra.key_cache_hit_ratio | hit_ratio | percentage |
+| cassandra.key_cache_hit_rate | hits, misses | events/s |
+| cassandra.key_cache_utilization | used | percentage |
+| cassandra.key_cache_size | size | bytes |
+| cassandra.storage_live_disk_space_used | used | bytes |
+| cassandra.compaction_completed_tasks_rate | completed | tasks/s |
+| cassandra.compaction_pending_tasks_count | pending | tasks |
+| cassandra.compaction_compacted_rate | compacted | bytes/s |
+| cassandra.jvm_memory_used | heap, nonheap | bytes |
+| cassandra.jvm_gc_rate | parnew, cms | gc/s |
+| cassandra.jvm_gc_time | parnew, cms | seconds |
+| cassandra.dropped_messages_rate | dropped | messages/s |
+| cassandra.client_requests_timeouts_rate | read, write | timeout/s |
+| cassandra.client_requests_unavailables_rate | read, write | exceptions/s |
+| cassandra.client_requests_failures_rate | read, write | failures/s |
+| cassandra.storage_exceptions_rate | storage | exceptions/s |
+
+### Per thread pool
+
+Metrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| thread_pool | thread pool name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cassandra.thread_pool_active_tasks_count | active | tasks |
+| cassandra.thread_pool_pending_tasks_count | pending | tasks |
+| cassandra.thread_pool_blocked_tasks_count | blocked | tasks |
+| cassandra.thread_pool_blocked_tasks_rate | blocked | tasks/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure Cassandra with Prometheus JMX Exporter
+
+To configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):
+
+> **Note**: paths can differ depends on your setup.
+
+- Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file
+ and install it in a directory where Cassandra can access it.
+- Add
+ the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)
+ file to `/etc/cassandra`.
+- Add the following line to `/etc/cassandra/cassandra-env.sh`
+ ```
+ JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml
+ ```
+- Restart cassandra service.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/cassandra.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/cassandra.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:7072/metrics | yes |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| timeout | HTTP request timeout. | 2 | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:7072/metrics
+
+```
+##### HTTP authentication
+
+Local server with basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:7072/metrics
+ username: foo
+ password: bar
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Local server with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:7072/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:7072/metrics
+
+ - name: remote
+ url: http://192.0.2.1:7072/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `cassandra` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m cassandra
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `cassandra` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep cassandra
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep cassandra /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep cassandra
+```
+
+
diff --git a/src/go/plugin/go.d/modules/cassandra/jmx_exporter.yaml b/src/go/plugin/go.d/modules/cassandra/jmx_exporter.yaml
new file mode 100644
index 000000000..983f6f9b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/jmx_exporter.yaml
@@ -0,0 +1,31 @@
+lowercaseOutputLabelNames: true
+lowercaseOutputName: true
+whitelistObjectNames: ["org.apache.cassandra.metrics:*"]
+blacklistObjectNames:
+ - "org.apache.cassandra.metrics:type=ColumnFamily,*"
+ - "org.apache.cassandra.metrics:type=Table,*"
+rules:
+ # Throughput and Latency
+ - pattern: org.apache.cassandra.metrics<type=(ClientRequest), scope=(Write|Read), name=(TotalLatency|Latency|Timeouts|Unavailables|Failures)><>(Count)
+ - pattern: org.apache.cassandra.metrics<type=(ClientRequest), scope=(Write|Read), name=(Latency)><>(\S*Percentile)
+
+ # Dropped messages
+ - pattern: org.apache.cassandra.metrics<type=(DroppedMessage), scope=(\S*), name=(Dropped)><>(Count)
+
+ # Cache
+ - pattern: org.apache.cassandra.metrics<type=Cache, scope=(KeyCache|RowCache), name=(Hits|Misses)><>(Count)
+ - pattern: org.apache.cassandra.metrics<type=Cache, scope=(KeyCache|RowCache), name=(Capacity|Size)><>(Value)
+
+ # Storage
+ - pattern: org.apache.cassandra.metrics<type=(Storage), name=(Load|Exceptions)><>(Count)
+
+ # Tables
+ # - pattern: org.apache.cassandra.metrics<type=(Table), keyspace=(\S*), scope=(\S*), name=(TotalDiskSpaceUsed)><>(Count)
+
+ # Compaction
+ - pattern: org.apache.cassandra.metrics<type=(Compaction), name=(CompletedTasks|PendingTasks)><>(Value)
+ - pattern: org.apache.cassandra.metrics<type=(Compaction), name=(BytesCompacted)><>(Count)
+
+ # Thread Pools
+ - pattern: org.apache.cassandra.metrics<type=(ThreadPools), path=(\S*), scope=(\S*), name=(ActiveTasks|PendingTasks)><>(Value)
+ - pattern: org.apache.cassandra.metrics<type=(ThreadPools), path=(\S*), scope=(\S*), name=(CurrentlyBlockedTasks|TotalBlockedTasks)><>(Count)
diff --git a/src/go/plugin/go.d/modules/cassandra/metadata.yaml b/src/go/plugin/go.d/modules/cassandra/metadata.yaml
new file mode 100644
index 000000000..ef9458c03
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/metadata.yaml
@@ -0,0 +1,410 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-cassandra
+ module_name: cassandra
+ plugin_name: go.d.plugin
+ monitored_instance:
+ categories:
+ - data-collection.database-servers
+ icon_filename: cassandra.svg
+ name: Cassandra
+ link: https://cassandra.apache.org/_/index.html
+ alternative_monitored_instances: []
+ keywords:
+ - nosql
+ - dbms
+ - db
+ - database
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector gathers metrics about client requests, cache hits, and many more, while also providing metrics per each thread pool.
+ method_description: |
+ The [JMX Exporter](https://github.com/prometheus/jmx_exporter) is used to fetch metrics from a Cassandra instance and make them available at an endpoint like `http://127.0.0.1:7072/metrics`.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ This collector discovers instances running on the local host that provide metrics on port 7072.
+
+ On startup, it tries to collect metrics from:
+
+ - http://127.0.0.1:7072/metrics
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Configure Cassandra with Prometheus JMX Exporter
+ description: |
+ To configure Cassandra with the [JMX Exporter](https://github.com/prometheus/jmx_exporter):
+
+ > **Note**: paths can differ depends on your setup.
+
+ - Download latest [jmx_exporter](https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/) jar file
+ and install it in a directory where Cassandra can access it.
+ - Add
+ the [jmx_exporter.yaml](https://raw.githubusercontent.com/netdata/go.d.plugin/master/modules/cassandra/jmx_exporter.yaml)
+ file to `/etc/cassandra`.
+ - Add the following line to `/etc/cassandra/cassandra-env.sh`
+ ```
+ JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS -javaagent:/opt/jmx_exporter/jmx_exporter.jar=7072:/etc/cassandra/jmx_exporter.yaml
+ ```
+ - Restart cassandra service.
+ configuration:
+ file:
+ name: go.d/cassandra.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:7072/metrics
+ required: true
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 2
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:7072/metrics
+ - name: HTTP authentication
+ description: Local server with basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:7072/metrics
+ username: foo
+ password: bar
+ - name: HTTPS with self-signed certificate
+ description: Local server with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:7072/metrics
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:7072/metrics
+
+ - name: remote
+ url: http://192.0.2.1:7072/metrics
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: cassandra.client_requests_rate
+ availability: []
+ description: Client requests rate
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: cassandra.client_request_read_latency_histogram
+ availability: []
+ description: Client request read latency histogram
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: p50
+ - name: p75
+ - name: p95
+ - name: p98
+ - name: p99
+ - name: p999
+ - name: cassandra.client_request_write_latency_histogram
+ availability: []
+ description: Client request write latency histogram
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: p50
+ - name: p75
+ - name: p95
+ - name: p98
+ - name: p99
+ - name: p999
+ - name: cassandra.client_requests_latency
+ availability: []
+ description: Client requests total latency
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: cassandra.row_cache_hit_ratio
+ availability: []
+ description: Key cache hit ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: hit_ratio
+ - name: cassandra.row_cache_hit_rate
+ availability: []
+ description: Key cache hit rate
+ unit: events/s
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: cassandra.row_cache_utilization
+ availability: []
+ description: Key cache utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: cassandra.row_cache_size
+ availability: []
+ description: Key cache size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: cassandra.key_cache_hit_ratio
+ availability: []
+ description: Row cache hit ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: hit_ratio
+ - name: cassandra.key_cache_hit_rate
+ availability: []
+ description: Row cache hit rate
+ unit: events/s
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: cassandra.key_cache_utilization
+ availability: []
+ description: Row cache utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: cassandra.key_cache_size
+ availability: []
+ description: Row cache size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: cassandra.storage_live_disk_space_used
+ availability: []
+ description: Disk space used by live data
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: cassandra.compaction_completed_tasks_rate
+ availability: []
+ description: Completed compactions rate
+ unit: tasks/s
+ chart_type: line
+ dimensions:
+ - name: completed
+ - name: cassandra.compaction_pending_tasks_count
+ availability: []
+ description: Pending compactions
+ unit: tasks
+ chart_type: line
+ dimensions:
+ - name: pending
+ - name: cassandra.compaction_compacted_rate
+ availability: []
+ description: Compaction data rate
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: compacted
+ - name: cassandra.jvm_memory_used
+ availability: []
+ description: Memory used
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: heap
+ - name: nonheap
+ - name: cassandra.jvm_gc_rate
+ availability: []
+ description: Garbage collections rate
+ unit: gc/s
+ chart_type: line
+ dimensions:
+ - name: parnew
+ - name: cms
+ - name: cassandra.jvm_gc_time
+ availability: []
+ description: Garbage collection time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: parnew
+ - name: cms
+ - name: cassandra.dropped_messages_rate
+ availability: []
+ description: Dropped messages rate
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: dropped
+ - name: cassandra.client_requests_timeouts_rate
+ availability: []
+ description: Client requests timeouts rate
+ unit: timeout/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: cassandra.client_requests_unavailables_rate
+ availability: []
+ description: Client requests unavailable exceptions rate
+ unit: exceptions/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: cassandra.client_requests_failures_rate
+ availability: []
+ description: Client requests failures rate
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: cassandra.storage_exceptions_rate
+ availability: []
+ description: Storage exceptions rate
+ unit: exceptions/s
+ chart_type: line
+ dimensions:
+ - name: storage
+ - name: thread pool
+ description: Metrics related to Cassandra's thread pools. Each thread pool provides its own set of the following metrics.
+ labels:
+ - name: thread_pool
+ description: thread pool name
+ metrics:
+ - name: cassandra.thread_pool_active_tasks_count
+ availability: []
+ description: Active tasks
+ unit: tasks
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: cassandra.thread_pool_pending_tasks_count
+ availability: []
+ description: Pending tasks
+ unit: tasks
+ chart_type: line
+ dimensions:
+ - name: pending
+ - name: cassandra.thread_pool_blocked_tasks_count
+ availability: []
+ description: Blocked tasks
+ unit: tasks
+ chart_type: line
+ dimensions:
+ - name: blocked
+ - name: cassandra.thread_pool_blocked_tasks_rate
+ availability: []
+ description: Blocked tasks rate
+ unit: tasks/s
+ chart_type: line
+ dimensions:
+ - name: blocked
diff --git a/src/go/plugin/go.d/modules/cassandra/metrics.go b/src/go/plugin/go.d/modules/cassandra/metrics.go
new file mode 100644
index 000000000..6533c694c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/metrics.go
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cassandra
+
+// https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#table-metrics
+// https://www.datadoghq.com/blog/how-to-collect-cassandra-metrics/
+// https://docs.opennms.com/horizon/29/deployment/time-series-storage/newts/cassandra-jmx.html
+
+func newCassandraMetrics() *cassandraMetrics {
+ return &cassandraMetrics{
+ threadPools: make(map[string]*threadPoolMetrics),
+ }
+}
+
+type cassandraMetrics struct {
+ clientReqTotalLatencyReads metricValue
+ clientReqTotalLatencyWrites metricValue
+ clientReqLatencyReads metricValue
+ clientReqLatencyWrites metricValue
+ clientReqTimeoutsReads metricValue
+ clientReqTimeoutsWrites metricValue
+ clientReqUnavailablesReads metricValue
+ clientReqUnavailablesWrites metricValue
+ clientReqFailuresReads metricValue
+ clientReqFailuresWrites metricValue
+
+ clientReqReadLatencyP50 metricValue
+ clientReqReadLatencyP75 metricValue
+ clientReqReadLatencyP95 metricValue
+ clientReqReadLatencyP98 metricValue
+ clientReqReadLatencyP99 metricValue
+ clientReqReadLatencyP999 metricValue
+ clientReqWriteLatencyP50 metricValue
+ clientReqWriteLatencyP75 metricValue
+ clientReqWriteLatencyP95 metricValue
+ clientReqWriteLatencyP98 metricValue
+ clientReqWriteLatencyP99 metricValue
+ clientReqWriteLatencyP999 metricValue
+
+ rowCacheHits metricValue
+ rowCacheMisses metricValue
+ rowCacheCapacity metricValue
+ rowCacheSize metricValue
+ keyCacheHits metricValue
+ keyCacheMisses metricValue
+ keyCacheCapacity metricValue
+ keyCacheSize metricValue
+
+ // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#dropped-metrics
+ droppedMessages metricValue
+
+ // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#storage-metrics
+ storageLoad metricValue
+ storageExceptions metricValue
+
+ // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#compaction-metrics
+ compactionBytesCompacted metricValue
+ compactionPendingTasks metricValue
+ compactionCompletedTasks metricValue
+
+ // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#memory
+ jvmMemoryHeapUsed metricValue
+ jvmMemoryNonHeapUsed metricValue
+ // https://cassandra.apache.org/doc/latest/cassandra/operating/metrics.html#garbagecollector
+ jvmGCParNewCount metricValue
+ jvmGCParNewTime metricValue
+ jvmGCCMSCount metricValue
+ jvmGCCMSTime metricValue
+
+ threadPools map[string]*threadPoolMetrics
+}
+
+type threadPoolMetrics struct {
+ name string
+ hasCharts bool
+
+ activeTasks metricValue
+ pendingTasks metricValue
+ blockedTasks metricValue
+ totalBlockedTasks metricValue
+}
+
+type metricValue struct {
+ isSet bool
+ value float64
+}
+
+func (mv *metricValue) add(v float64) {
+ mv.isSet = true
+ mv.value += v
+}
+
+func (mv *metricValue) write(mx map[string]int64, key string) {
+ if mv.isSet {
+ mx[key] = int64(mv.value)
+ }
+}
+
+func (mv *metricValue) write1k(mx map[string]int64, key string) {
+ if mv.isSet {
+ mx[key] = int64(mv.value * 1000)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/cassandra/testdata/config.json b/src/go/plugin/go.d/modules/cassandra/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/cassandra/testdata/config.yaml b/src/go/plugin/go.d/modules/cassandra/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/cassandra/testdata/metrics.txt b/src/go/plugin/go.d/modules/cassandra/testdata/metrics.txt
new file mode 100644
index 000000000..663a68080
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cassandra/testdata/metrics.txt
@@ -0,0 +1,402 @@
+# HELP jvm_threads_current Current thread count of a JVM
+# TYPE jvm_threads_current gauge
+jvm_threads_current 93.0
+# HELP jvm_threads_daemon Daemon thread count of a JVM
+# TYPE jvm_threads_daemon gauge
+jvm_threads_daemon 82.0
+# HELP jvm_threads_peak Peak thread count of a JVM
+# TYPE jvm_threads_peak gauge
+jvm_threads_peak 94.0
+# HELP jvm_threads_started_total Started thread count of a JVM
+# TYPE jvm_threads_started_total counter
+jvm_threads_started_total 1860.0
+# HELP jvm_threads_deadlocked Cycles of JVM-threads that are in deadlock waiting to acquire object monitors or ownable synchronizers
+# TYPE jvm_threads_deadlocked gauge
+jvm_threads_deadlocked 0.0
+# HELP jvm_threads_deadlocked_monitor Cycles of JVM-threads that are in deadlock waiting to acquire object monitors
+# TYPE jvm_threads_deadlocked_monitor gauge
+jvm_threads_deadlocked_monitor 0.0
+# HELP jvm_threads_state Current count of threads by state
+# TYPE jvm_threads_state gauge
+jvm_threads_state{state="NEW",} 0.0
+jvm_threads_state{state="TERMINATED",} 0.0
+jvm_threads_state{state="RUNNABLE",} 16.0
+jvm_threads_state{state="BLOCKED",} 0.0
+jvm_threads_state{state="WAITING",} 46.0
+jvm_threads_state{state="TIMED_WAITING",} 31.0
+jvm_threads_state{state="UNKNOWN",} 0.0
+# HELP jvm_memory_pool_allocated_bytes_total Total bytes allocated in a given JVM memory pool. Only updated after GC, not continuously.
+# TYPE jvm_memory_pool_allocated_bytes_total counter
+jvm_memory_pool_allocated_bytes_total{pool="Par Survivor Space",} 1.52801872E8
+jvm_memory_pool_allocated_bytes_total{pool="CMS Old Gen",} 8.55035344E8
+jvm_memory_pool_allocated_bytes_total{pool="CodeHeap 'profiled nmethods'",} 2.4841216E7
+jvm_memory_pool_allocated_bytes_total{pool="CodeHeap 'non-profiled nmethods'",} 1.3023104E7
+jvm_memory_pool_allocated_bytes_total{pool="Compressed Class Space",} 6640584.0
+jvm_memory_pool_allocated_bytes_total{pool="Metaspace",} 5.3862968E7
+jvm_memory_pool_allocated_bytes_total{pool="Par Eden Space",} 7.3147804328E10
+jvm_memory_pool_allocated_bytes_total{pool="CodeHeap 'non-nmethods'",} 1530112.0
+# HELP jvm_gc_collection_seconds Time spent in a given JVM garbage collector in seconds.
+# TYPE jvm_gc_collection_seconds summary
+jvm_gc_collection_seconds_count{gc="ParNew",} 218.0
+jvm_gc_collection_seconds_sum{gc="ParNew",} 1.617
+jvm_gc_collection_seconds_count{gc="ConcurrentMarkSweep",} 1.0
+jvm_gc_collection_seconds_sum{gc="ConcurrentMarkSweep",} 0.059
+# HELP jvm_classes_currently_loaded The number of classes that are currently loaded in the JVM
+# TYPE jvm_classes_currently_loaded gauge
+jvm_classes_currently_loaded 9663.0
+# HELP jvm_classes_loaded_total The total number of classes that have been loaded since the JVM has started execution
+# TYPE jvm_classes_loaded_total counter
+jvm_classes_loaded_total 9663.0
+# HELP jvm_classes_unloaded_total The total number of classes that have been unloaded since the JVM has started execution
+# TYPE jvm_classes_unloaded_total counter
+jvm_classes_unloaded_total 0.0
+# HELP jmx_config_reload_success_total Number of times configuration have successfully been reloaded.
+# TYPE jmx_config_reload_success_total counter
+jmx_config_reload_success_total 0.0
+# HELP jmx_config_reload_failure_total Number of times configuration have failed to be reloaded.
+# TYPE jmx_config_reload_failure_total counter
+jmx_config_reload_failure_total 0.0
+# HELP org_apache_cassandra_metrics_clientrequest_50thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=50thPercentile
+# TYPE org_apache_cassandra_metrics_clientrequest_50thpercentile untyped
+org_apache_cassandra_metrics_clientrequest_50thpercentile{scope="Read",name="Latency",} 61.214
+org_apache_cassandra_metrics_clientrequest_50thpercentile{scope="Write",name="Latency",} 35.425000000000004
+# HELP org_apache_cassandra_metrics_clientrequest_95thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=95thPercentile
+# TYPE org_apache_cassandra_metrics_clientrequest_95thpercentile untyped
+org_apache_cassandra_metrics_clientrequest_95thpercentile{scope="Read",name="Latency",} 126.934
+org_apache_cassandra_metrics_clientrequest_95thpercentile{scope="Write",name="Latency",} 105.778
+# HELP org_apache_cassandra_metrics_cache_count Attribute exposed for management org.apache.cassandra.metrics:name=Misses,type=Cache,attribute=Count
+# TYPE org_apache_cassandra_metrics_cache_count untyped
+org_apache_cassandra_metrics_cache_count{scope="KeyCache",name="Misses",} 194890.0
+org_apache_cassandra_metrics_cache_count{scope="KeyCache",name="Hits",} 1336427.0
+org_apache_cassandra_metrics_cache_count{scope="RowCache",name="Hits",} 0.0
+org_apache_cassandra_metrics_cache_count{scope="RowCache",name="Misses",} 0.0
+# HELP org_apache_cassandra_metrics_storage_count Attribute exposed for management org.apache.cassandra.metrics:name=Exceptions,type=Storage,attribute=Count
+# TYPE org_apache_cassandra_metrics_storage_count untyped
+org_apache_cassandra_metrics_storage_count{name="Exceptions",} 0.0
+org_apache_cassandra_metrics_storage_count{name="Load",} 8.58272986E8
+# HELP org_apache_cassandra_metrics_compaction_count Attribute exposed for management org.apache.cassandra.metrics:name=BytesCompacted,type=Compaction,attribute=Count
+# TYPE org_apache_cassandra_metrics_compaction_count untyped
+org_apache_cassandra_metrics_compaction_count{name="BytesCompacted",} 2532.0
+# HELP org_apache_cassandra_metrics_clientrequest_count Attribute exposed for management org.apache.cassandra.metrics:name=Timeouts,type=ClientRequest,attribute=Count
+# TYPE org_apache_cassandra_metrics_clientrequest_count untyped
+org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="Timeouts",} 0.0
+org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="Latency",} 333316.0
+org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="Unavailables",} 0.0
+org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="TotalLatency",} 1.4253267E7
+org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="Timeouts",} 0.0
+org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="Failures",} 0.0
+org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="Latency",} 331841.0
+org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="Failures",} 0.0
+org_apache_cassandra_metrics_clientrequest_count{scope="Read",name="TotalLatency",} 2.3688998E7
+org_apache_cassandra_metrics_clientrequest_count{scope="Write",name="Unavailables",} 0.0
+# HELP org_apache_cassandra_metrics_cache_value Attribute exposed for management org.apache.cassandra.metrics:name=Size,type=Cache,attribute=Value
+# TYPE org_apache_cassandra_metrics_cache_value untyped
+org_apache_cassandra_metrics_cache_value{scope="RowCache",name="Size",} 0.0
+org_apache_cassandra_metrics_cache_value{scope="KeyCache",name="Size",} 1.96559936E8
+org_apache_cassandra_metrics_cache_value{scope="RowCache",name="Capacity",} 0.0
+org_apache_cassandra_metrics_cache_value{scope="KeyCache",name="Capacity",} 9.437184E8
+# HELP org_apache_cassandra_metrics_clientrequest_75thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=75thPercentile
+# TYPE org_apache_cassandra_metrics_clientrequest_75thpercentile untyped
+org_apache_cassandra_metrics_clientrequest_75thpercentile{scope="Read",name="Latency",} 88.148
+org_apache_cassandra_metrics_clientrequest_75thpercentile{scope="Write",name="Latency",} 61.214
+# HELP org_apache_cassandra_metrics_clientrequest_999thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=999thPercentile
+# TYPE org_apache_cassandra_metrics_clientrequest_999thpercentile untyped
+org_apache_cassandra_metrics_clientrequest_999thpercentile{scope="Read",name="Latency",} 454.826
+org_apache_cassandra_metrics_clientrequest_999thpercentile{scope="Write",name="Latency",} 315.85200000000003
+# HELP org_apache_cassandra_metrics_clientrequest_99thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=99thPercentile
+# TYPE org_apache_cassandra_metrics_clientrequest_99thpercentile untyped
+org_apache_cassandra_metrics_clientrequest_99thpercentile{scope="Read",name="Latency",} 219.342
+org_apache_cassandra_metrics_clientrequest_99thpercentile{scope="Write",name="Latency",} 152.321
+# HELP org_apache_cassandra_metrics_threadpools_value Attribute exposed for management org.apache.cassandra.metrics:name=ActiveTasks,type=ThreadPools,attribute=Value
+# TYPE org_apache_cassandra_metrics_threadpools_value untyped
+org_apache_cassandra_metrics_threadpools_value{path="transport",scope="Native-Transport-Requests",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="HintsDispatcher",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="transport",scope="Native-Transport-Requests",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtableFlushWriter",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="CompactionExecutor",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="Sampler",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtableReclaimMemory",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="ViewBuildExecutor",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtableReclaimMemory",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="PerDiskMemtableFlushWriter_0",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtablePostFlush",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="SecondaryIndexManagement",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="ValidationExecutor",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="Sampler",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtableFlushWriter",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="ValidationExecutor",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="HintsDispatcher",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="SecondaryIndexManagement",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="request",scope="MutationStage",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="request",scope="ReadStage",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="GossipStage",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="CacheCleanupExecutor",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="CompactionExecutor",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="request",scope="MutationStage",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="PendingRangeCalculator",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="CacheCleanupExecutor",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="MemtablePostFlush",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="ViewBuildExecutor",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="request",scope="ReadStage",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="PerDiskMemtableFlushWriter_0",name="PendingTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="PendingRangeCalculator",name="ActiveTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_value{path="internal",scope="GossipStage",name="ActiveTasks",} 0.0
+# HELP org_apache_cassandra_metrics_droppedmessage_count Attribute exposed for management org.apache.cassandra.metrics:name=Dropped,type=DroppedMessage,attribute=Count
+# TYPE org_apache_cassandra_metrics_droppedmessage_count untyped
+org_apache_cassandra_metrics_droppedmessage_count{scope="FINALIZE_PROPOSE_MSG",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="COUNTER_MUTATION_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_REMOVE",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="MUTATION_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_PULL_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="READ_REPAIR",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="_TEST_2",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_STORE_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_COMMIT_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_PROPOSE_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="RANGE_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SNAPSHOT_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_VERSION_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PING_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SNAPSHOT_MSG",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="VALIDATION_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="GOSSIP_DIGEST_SYN",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="HINT_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="FINALIZE_PROMISE_MSG",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="GOSSIP_SHUTDOWN",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_PROPOSE_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="RANGE_SLICE",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="REPAIR_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="COUNTER_MUTATION_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="MUTATION",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="_TRACE",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PING_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="CLEANUP_MSG",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_STORE",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="REQUEST_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_STORE_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="ECHO_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="READ_REPAIR_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="READ_REPAIR_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="STATUS_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="REPLICATION_DONE_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="REQUEST_RESPONSE",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PREPARE_CONSISTENT_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="_SAMPLE",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_VERSION_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="FAILURE_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="GOSSIP_DIGEST_ACK2",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SYNC_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="TRUNCATE_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="COUNTER_MUTATION",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="UNUSED_CUSTOM_VERB",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="READ_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PREPARE_MSG",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_REMOVE_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="ECHO_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="FAILED_SESSION_MSG",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_PREPARE_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="STATUS_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="_TEST_1",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="HINT",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_PUSH_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PREPARE_CONSISTENT_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="REPLICATION_DONE_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="MUTATION_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_PULL_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="READ_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="GOSSIP_DIGEST_ACK",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="FINALIZE_COMMIT_MSG",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="RANGE_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SYNC_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="INTERNAL_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="TRUNCATE_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_COMMIT_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="READ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="BATCH_REMOVE_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="HINT_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SNAPSHOT_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="PAXOS_PREPARE_REQ",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="VALIDATION_RSP",name="Dropped",} 0.0
+org_apache_cassandra_metrics_droppedmessage_count{scope="SCHEMA_PUSH_RSP",name="Dropped",} 0.0
+# HELP org_apache_cassandra_metrics_clientrequest_98thpercentile Attribute exposed for management org.apache.cassandra.metrics:name=Latency,type=ClientRequest,attribute=98thPercentile
+# TYPE org_apache_cassandra_metrics_clientrequest_98thpercentile untyped
+org_apache_cassandra_metrics_clientrequest_98thpercentile{scope="Read",name="Latency",} 182.785
+org_apache_cassandra_metrics_clientrequest_98thpercentile{scope="Write",name="Latency",} 126.934
+# HELP org_apache_cassandra_metrics_threadpools_count Attribute exposed for management org.apache.cassandra.metrics:name=TotalBlockedTasks,type=ThreadPools,attribute=Count
+# TYPE org_apache_cassandra_metrics_threadpools_count untyped
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="HintsDispatcher",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="request",scope="MutationStage",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="Sampler",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="GossipStage",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtableFlushWriter",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="Sampler",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="PerDiskMemtableFlushWriter_0",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtableFlushWriter",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="CacheCleanupExecutor",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="ValidationExecutor",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtableReclaimMemory",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="GossipStage",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtablePostFlush",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="ViewBuildExecutor",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="CacheCleanupExecutor",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="PendingRangeCalculator",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtableReclaimMemory",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="HintsDispatcher",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="CompactionExecutor",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="transport",scope="Native-Transport-Requests",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="transport",scope="Native-Transport-Requests",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="SecondaryIndexManagement",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="MemtablePostFlush",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="request",scope="MutationStage",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="ValidationExecutor",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="PerDiskMemtableFlushWriter_0",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="request",scope="ReadStage",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="ViewBuildExecutor",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="SecondaryIndexManagement",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="PendingRangeCalculator",name="CurrentlyBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="internal",scope="CompactionExecutor",name="TotalBlockedTasks",} 0.0
+org_apache_cassandra_metrics_threadpools_count{path="request",scope="ReadStage",name="TotalBlockedTasks",} 0.0
+# HELP org_apache_cassandra_metrics_compaction_value Attribute exposed for management org.apache.cassandra.metrics:name=CompletedTasks,type=Compaction,attribute=Value
+# TYPE org_apache_cassandra_metrics_compaction_value untyped
+org_apache_cassandra_metrics_compaction_value{name="CompletedTasks",} 1078.0
+org_apache_cassandra_metrics_compaction_value{name="PendingTasks",} 0.0
+# HELP jmx_scrape_duration_seconds Time this JMX scrape took, in seconds.
+# TYPE jmx_scrape_duration_seconds gauge
+jmx_scrape_duration_seconds 0.102931999
+# HELP jmx_scrape_error Non-zero if this scrape failed.
+# TYPE jmx_scrape_error gauge
+jmx_scrape_error 0.0
+# HELP jmx_scrape_cached_beans Number of beans with their matching rule cached
+# TYPE jmx_scrape_cached_beans gauge
+jmx_scrape_cached_beans 0.0
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 155.0
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.666810482687E9
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 213.0
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 100000.0
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 5.105344512E9
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 3.464957952E9
+# HELP jvm_memory_objects_pending_finalization The number of objects waiting in the finalizer queue.
+# TYPE jvm_memory_objects_pending_finalization gauge
+jvm_memory_objects_pending_finalization 0.0
+# HELP jvm_memory_bytes_used Used bytes of a given JVM memory area.
+# TYPE jvm_memory_bytes_used gauge
+jvm_memory_bytes_used{area="heap",} 1.134866288E9
+jvm_memory_bytes_used{area="nonheap",} 9.6565696E7
+# HELP jvm_memory_bytes_committed Committed (bytes) of a given JVM memory area.
+# TYPE jvm_memory_bytes_committed gauge
+jvm_memory_bytes_committed{area="heap",} 2.0447232E9
+jvm_memory_bytes_committed{area="nonheap",} 1.01838848E8
+# HELP jvm_memory_bytes_max Max (bytes) of a given JVM memory area.
+# TYPE jvm_memory_bytes_max gauge
+jvm_memory_bytes_max{area="heap",} 2.0447232E9
+jvm_memory_bytes_max{area="nonheap",} -1.0
+# HELP jvm_memory_bytes_init Initial bytes of a given JVM memory area.
+# TYPE jvm_memory_bytes_init gauge
+jvm_memory_bytes_init{area="heap",} 2.08666624E9
+jvm_memory_bytes_init{area="nonheap",} 7667712.0
+# HELP jvm_memory_pool_bytes_used Used bytes of a given JVM memory pool.
+# TYPE jvm_memory_pool_bytes_used gauge
+jvm_memory_pool_bytes_used{pool="CodeHeap 'non-nmethods'",} 1443712.0
+jvm_memory_pool_bytes_used{pool="Metaspace",} 5.386508E7
+jvm_memory_pool_bytes_used{pool="CodeHeap 'profiled nmethods'",} 2.2212992E7
+jvm_memory_pool_bytes_used{pool="Compressed Class Space",} 6640584.0
+jvm_memory_pool_bytes_used{pool="Par Eden Space",} 2.6869912E8
+jvm_memory_pool_bytes_used{pool="Par Survivor Space",} 1.1131824E7
+jvm_memory_pool_bytes_used{pool="CodeHeap 'non-profiled nmethods'",} 1.2403328E7
+jvm_memory_pool_bytes_used{pool="CMS Old Gen",} 8.55035344E8
+# HELP jvm_memory_pool_bytes_committed Committed bytes of a given JVM memory pool.
+# TYPE jvm_memory_pool_bytes_committed gauge
+jvm_memory_pool_bytes_committed{pool="CodeHeap 'non-nmethods'",} 2555904.0
+jvm_memory_pool_bytes_committed{pool="Metaspace",} 5.574656E7
+jvm_memory_pool_bytes_committed{pool="CodeHeap 'profiled nmethods'",} 2.3724032E7
+jvm_memory_pool_bytes_committed{pool="Compressed Class Space",} 7360512.0
+jvm_memory_pool_bytes_committed{pool="Par Eden Space",} 3.3554432E8
+jvm_memory_pool_bytes_committed{pool="Par Survivor Space",} 4.194304E7
+jvm_memory_pool_bytes_committed{pool="CodeHeap 'non-profiled nmethods'",} 1.245184E7
+jvm_memory_pool_bytes_committed{pool="CMS Old Gen",} 1.66723584E9
+# HELP jvm_memory_pool_bytes_max Max bytes of a given JVM memory pool.
+# TYPE jvm_memory_pool_bytes_max gauge
+jvm_memory_pool_bytes_max{pool="CodeHeap 'non-nmethods'",} 5832704.0
+jvm_memory_pool_bytes_max{pool="Metaspace",} -1.0
+jvm_memory_pool_bytes_max{pool="CodeHeap 'profiled nmethods'",} 1.22912768E8
+jvm_memory_pool_bytes_max{pool="Compressed Class Space",} 1.073741824E9
+jvm_memory_pool_bytes_max{pool="Par Eden Space",} 3.3554432E8
+jvm_memory_pool_bytes_max{pool="Par Survivor Space",} 4.194304E7
+jvm_memory_pool_bytes_max{pool="CodeHeap 'non-profiled nmethods'",} 1.22912768E8
+jvm_memory_pool_bytes_max{pool="CMS Old Gen",} 1.66723584E9
+# HELP jvm_memory_pool_bytes_init Initial bytes of a given JVM memory pool.
+# TYPE jvm_memory_pool_bytes_init gauge
+jvm_memory_pool_bytes_init{pool="CodeHeap 'non-nmethods'",} 2555904.0
+jvm_memory_pool_bytes_init{pool="Metaspace",} 0.0
+jvm_memory_pool_bytes_init{pool="CodeHeap 'profiled nmethods'",} 2555904.0
+jvm_memory_pool_bytes_init{pool="Compressed Class Space",} 0.0
+jvm_memory_pool_bytes_init{pool="Par Eden Space",} 3.3554432E8
+jvm_memory_pool_bytes_init{pool="Par Survivor Space",} 4.194304E7
+jvm_memory_pool_bytes_init{pool="CodeHeap 'non-profiled nmethods'",} 2555904.0
+jvm_memory_pool_bytes_init{pool="CMS Old Gen",} 1.66723584E9
+# HELP jvm_memory_pool_collection_used_bytes Used bytes after last collection of a given JVM memory pool.
+# TYPE jvm_memory_pool_collection_used_bytes gauge
+jvm_memory_pool_collection_used_bytes{pool="Par Eden Space",} 0.0
+jvm_memory_pool_collection_used_bytes{pool="Par Survivor Space",} 1.1131824E7
+jvm_memory_pool_collection_used_bytes{pool="CMS Old Gen",} 0.0
+# HELP jvm_memory_pool_collection_committed_bytes Committed after last collection bytes of a given JVM memory pool.
+# TYPE jvm_memory_pool_collection_committed_bytes gauge
+jvm_memory_pool_collection_committed_bytes{pool="Par Eden Space",} 3.3554432E8
+jvm_memory_pool_collection_committed_bytes{pool="Par Survivor Space",} 4.194304E7
+jvm_memory_pool_collection_committed_bytes{pool="CMS Old Gen",} 1.66723584E9
+# HELP jvm_memory_pool_collection_max_bytes Max bytes after last collection of a given JVM memory pool.
+# TYPE jvm_memory_pool_collection_max_bytes gauge
+jvm_memory_pool_collection_max_bytes{pool="Par Eden Space",} 3.3554432E8
+jvm_memory_pool_collection_max_bytes{pool="Par Survivor Space",} 4.194304E7
+jvm_memory_pool_collection_max_bytes{pool="CMS Old Gen",} 1.66723584E9
+# HELP jvm_memory_pool_collection_init_bytes Initial after last collection bytes of a given JVM memory pool.
+# TYPE jvm_memory_pool_collection_init_bytes gauge
+jvm_memory_pool_collection_init_bytes{pool="Par Eden Space",} 3.3554432E8
+jvm_memory_pool_collection_init_bytes{pool="Par Survivor Space",} 4.194304E7
+jvm_memory_pool_collection_init_bytes{pool="CMS Old Gen",} 1.66723584E9
+# HELP jvm_info VM version info
+# TYPE jvm_info gauge
+jvm_info{runtime="OpenJDK Runtime Environment",vendor="Debian",version="11.0.16+8-post-Debian-1deb11u1",} 1.0
+# HELP jvm_buffer_pool_used_bytes Used bytes of a given JVM buffer pool.
+# TYPE jvm_buffer_pool_used_bytes gauge
+jvm_buffer_pool_used_bytes{pool="mapped",} 9.20360582E8
+jvm_buffer_pool_used_bytes{pool="direct",} 5.1679788E7
+# HELP jvm_buffer_pool_capacity_bytes Bytes capacity of a given JVM buffer pool.
+# TYPE jvm_buffer_pool_capacity_bytes gauge
+jvm_buffer_pool_capacity_bytes{pool="mapped",} 9.20360582E8
+jvm_buffer_pool_capacity_bytes{pool="direct",} 5.1679786E7
+# HELP jvm_buffer_pool_used_buffers Used buffers of a given JVM buffer pool.
+# TYPE jvm_buffer_pool_used_buffers gauge
+jvm_buffer_pool_used_buffers{pool="mapped",} 74.0
+jvm_buffer_pool_used_buffers{pool="direct",} 34.0
+# HELP jmx_exporter_build_info A metric with a constant '1' value labeled with the version of the JMX exporter.
+# TYPE jmx_exporter_build_info gauge
+jmx_exporter_build_info{version="0.17.2",name="jmx_prometheus_javaagent",} 1.0
+# HELP jmx_config_reload_failure_created Number of times configuration have failed to be reloaded.
+# TYPE jmx_config_reload_failure_created gauge
+jmx_config_reload_failure_created 1.666810482756E9
+# HELP jmx_config_reload_success_created Number of times configuration have successfully been reloaded.
+# TYPE jmx_config_reload_success_created gauge
+jmx_config_reload_success_created 1.666810482755E9
+# HELP jvm_memory_pool_allocated_bytes_created Total bytes allocated in a given JVM memory pool. Only updated after GC, not continuously.
+# TYPE jvm_memory_pool_allocated_bytes_created gauge
+jvm_memory_pool_allocated_bytes_created{pool="Par Survivor Space",} 1.666810483789E9
+jvm_memory_pool_allocated_bytes_created{pool="CMS Old Gen",} 1.666810484715E9
+jvm_memory_pool_allocated_bytes_created{pool="CodeHeap 'profiled nmethods'",} 1.666810483788E9
+jvm_memory_pool_allocated_bytes_created{pool="CodeHeap 'non-profiled nmethods'",} 1.666810483789E9
+jvm_memory_pool_allocated_bytes_created{pool="Compressed Class Space",} 1.666810483789E9
+jvm_memory_pool_allocated_bytes_created{pool="Metaspace",} 1.666810483789E9
+jvm_memory_pool_allocated_bytes_created{pool="Par Eden Space",} 1.666810483789E9
+jvm_memory_pool_allocated_bytes_created{pool="CodeHeap 'non-nmethods'",} 1.666810483789E9 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/chrony/README.md b/src/go/plugin/go.d/modules/chrony/README.md
new file mode 120000
index 000000000..4a58f3733
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/README.md
@@ -0,0 +1 @@
+integrations/chrony.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/chrony/charts.go b/src/go/plugin/go.d/modules/chrony/charts.go
new file mode 100644
index 000000000..37a6fa3e6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/charts.go
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package chrony
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioStratum = module.Priority + iota
+ prioCurrentCorrection
+ prioRootDelay
+ prioRootDispersion
+ prioLastOffset
+ prioRmsOffset
+ prioFrequency
+ prioResidualFrequency
+ prioSkew
+ prioUpdateInterval
+ prioRefMeasurementTime
+ prioLeapStatus
+ prioActivity
+ //prioNTPPackets
+ //prioCommandPackets
+ //prioNKEConnections
+ //prioClientLogRecords
+)
+
+var charts = module.Charts{
+ stratumChart.Copy(),
+
+ currentCorrectionChart.Copy(),
+
+ rootDelayChart.Copy(),
+ rootDispersionChart.Copy(),
+
+ lastOffsetChart.Copy(),
+ rmsOffsetChart.Copy(),
+
+ frequencyChart.Copy(),
+ residualFrequencyChart.Copy(),
+
+ skewChart.Copy(),
+
+ updateIntervalChart.Copy(),
+ refMeasurementTimeChart.Copy(),
+
+ leapStatusChart.Copy(),
+
+ activityChart.Copy(),
+}
+
+// Tracking charts
+var (
+ stratumChart = module.Chart{
+ ID: "stratum",
+ Title: "Distance to the reference clock",
+ Units: "level",
+ Fam: "stratum",
+ Ctx: "chrony.stratum",
+ Priority: prioStratum,
+ Dims: module.Dims{
+ {ID: "stratum", Name: "stratum"},
+ },
+ }
+
+ currentCorrectionChart = module.Chart{
+ ID: "current_correction",
+ Title: "Current correction",
+ Units: "seconds",
+ Fam: "correction",
+ Ctx: "chrony.current_correction",
+ Priority: prioCurrentCorrection,
+ Dims: module.Dims{
+ {ID: "current_correction", Div: scaleFactor},
+ },
+ }
+
+ rootDelayChart = module.Chart{
+ ID: "root_delay",
+ Title: "Network path delay to stratum-1",
+ Units: "seconds",
+ Fam: "root",
+ Ctx: "chrony.root_delay",
+ Priority: prioRootDelay,
+ Dims: module.Dims{
+ {ID: "root_delay", Div: scaleFactor},
+ },
+ }
+ rootDispersionChart = module.Chart{
+ ID: "root_dispersion",
+ Title: "Dispersion accumulated back to stratum-1",
+ Units: "seconds",
+ Fam: "root",
+ Ctx: "chrony.root_dispersion",
+ Priority: prioRootDispersion,
+ Dims: module.Dims{
+ {ID: "root_dispersion", Div: scaleFactor},
+ },
+ }
+
+ lastOffsetChart = module.Chart{
+ ID: "last_offset",
+ Title: "Offset on the last clock update",
+ Units: "seconds",
+ Fam: "offset",
+ Ctx: "chrony.last_offset",
+ Priority: prioLastOffset,
+ Dims: module.Dims{
+ {ID: "last_offset", Name: "offset", Div: scaleFactor},
+ },
+ }
+ rmsOffsetChart = module.Chart{
+ ID: "rms_offset",
+ Title: "Long-term average of the offset value",
+ Units: "seconds",
+ Fam: "offset",
+ Ctx: "chrony.rms_offset",
+ Priority: prioRmsOffset,
+ Dims: module.Dims{
+ {ID: "rms_offset", Name: "offset", Div: scaleFactor},
+ },
+ }
+
+ frequencyChart = module.Chart{
+ ID: "frequency",
+ Title: "Frequency",
+ Units: "ppm",
+ Fam: "frequency",
+ Ctx: "chrony.frequency",
+ Priority: prioFrequency,
+ Dims: module.Dims{
+ {ID: "frequency", Div: scaleFactor},
+ },
+ }
+ residualFrequencyChart = module.Chart{
+ ID: "residual_frequency",
+ Title: "Residual frequency",
+ Units: "ppm",
+ Fam: "frequency",
+ Ctx: "chrony.residual_frequency",
+ Priority: prioResidualFrequency,
+ Dims: module.Dims{
+ {ID: "residual_frequency", Div: scaleFactor},
+ },
+ }
+
+ skewChart = module.Chart{
+ ID: "skew",
+ Title: "Skew",
+ Units: "ppm",
+ Fam: "frequency",
+ Ctx: "chrony.skew",
+ Priority: prioSkew,
+ Dims: module.Dims{
+ {ID: "skew", Div: scaleFactor},
+ },
+ }
+
+ updateIntervalChart = module.Chart{
+ ID: "update_interval",
+ Title: "Interval between the last two clock updates",
+ Units: "seconds",
+ Fam: "updates",
+ Ctx: "chrony.update_interval",
+ Priority: prioUpdateInterval,
+ Dims: module.Dims{
+ {ID: "update_interval", Div: scaleFactor},
+ },
+ }
+ refMeasurementTimeChart = module.Chart{
+ ID: "ref_measurement_time",
+ Title: "Time since the last measurement",
+ Units: "seconds",
+ Fam: "updates",
+ Ctx: "chrony.ref_measurement_time",
+ Priority: prioRefMeasurementTime,
+ Dims: module.Dims{
+ {ID: "ref_measurement_time"},
+ },
+ }
+
+ leapStatusChart = module.Chart{
+ ID: "leap_status",
+ Title: "Leap status",
+ Units: "status",
+ Fam: "leap status",
+ Ctx: "chrony.leap_status",
+ Priority: prioLeapStatus,
+ Dims: module.Dims{
+ {ID: "leap_status_normal", Name: "normal"},
+ {ID: "leap_status_insert_second", Name: "insert_second"},
+ {ID: "leap_status_delete_second", Name: "delete_second"},
+ {ID: "leap_status_unsynchronised", Name: "unsynchronised"},
+ },
+ }
+)
+
+// Activity charts
+var (
+ activityChart = module.Chart{
+ ID: "activity",
+ Title: "Peers activity",
+ Units: "sources",
+ Fam: "activity",
+ Ctx: "chrony.activity",
+ Type: module.Stacked,
+ Priority: prioActivity,
+ Dims: module.Dims{
+ {ID: "online_sources", Name: "online"},
+ {ID: "offline_sources", Name: "offline"},
+ {ID: "burst_online_sources", Name: "burst_online"},
+ {ID: "burst_offline_sources", Name: "burst_offline"},
+ {ID: "unresolved_sources", Name: "unresolved"},
+ },
+ }
+)
+
+//var serverStatsVer1Charts = module.Charts{
+// ntpPacketsChart.Copy(),
+// commandPacketsChart.Copy(),
+// clientLogRecordsChart.Copy(),
+//}
+//
+//var serverStatsVer2Charts = module.Charts{
+// ntpPacketsChart.Copy(),
+// commandPacketsChart.Copy(),
+// clientLogRecordsChart.Copy(),
+// nkeConnectionChart.Copy(),
+//}
+//
+//var serverStatsVer3Charts = module.Charts{
+// ntpPacketsChart.Copy(),
+// commandPacketsChart.Copy(),
+// clientLogRecordsChart.Copy(),
+// nkeConnectionChart.Copy(),
+//}
+//
+//var serverStatsVer4Charts = module.Charts{
+// ntpPacketsChart.Copy(),
+// commandPacketsChart.Copy(),
+// clientLogRecordsChart.Copy(),
+// nkeConnectionChart.Copy(),
+//}
+
+// ServerStats charts
+//var (
+// ntpPacketsChart = module.Chart{
+// ID: "ntp_packets",
+// Title: "NTP packets",
+// Units: "packets/s",
+// Fam: "client requests",
+// Ctx: "chrony.ntp_packets",
+// Type: module.Stacked,
+// Priority: prioNTPPackets,
+// Dims: module.Dims{
+// {ID: "ntp_packets_received", Name: "received", Algo: module.Incremental},
+// {ID: "ntp_packets_dropped", Name: "dropped", Algo: module.Incremental},
+// },
+// }
+// commandPacketsChart = module.Chart{
+// ID: "command_packets",
+// Title: "Command packets",
+// Units: "packets/s",
+// Fam: "client requests",
+// Ctx: "chrony.command_packets",
+// Type: module.Stacked,
+// Priority: prioCommandPackets,
+// Dims: module.Dims{
+// {ID: "command_packets_received", Name: "received", Algo: module.Incremental},
+// {ID: "command_packets_dropped", Name: "dropped", Algo: module.Incremental},
+// },
+// }
+// nkeConnectionChart = module.Chart{
+// ID: "nke_connections",
+// Title: "NTS-KE connections",
+// Units: "connections/s",
+// Fam: "client requests",
+// Ctx: "chrony.nke_connections",
+// Type: module.Stacked,
+// Priority: prioNKEConnections,
+// Dims: module.Dims{
+// {ID: "nke_connections_accepted", Name: "accepted", Algo: module.Incremental},
+// {ID: "nke_connections_dropped", Name: "dropped", Algo: module.Incremental},
+// },
+// }
+// clientLogRecordsChart = module.Chart{
+// ID: "client_log_records",
+// Title: "Client log records",
+// Units: "records/s",
+// Fam: "client requests",
+// Ctx: "chrony.client_log_records",
+// Type: module.Stacked,
+// Priority: prioClientLogRecords,
+// Dims: module.Dims{
+// {ID: "client_log_records_dropped", Name: "dropped", Algo: module.Incremental},
+// },
+// }
+//)
+
+//func (c *Chrony) addServerStatsCharts(stats *serverStats) {
+// var err error
+//
+// switch {
+// case stats.v1 != nil:
+// err = c.Charts().Add(*serverStatsVer1Charts.Copy()...)
+// case stats.v2 != nil:
+// err = c.Charts().Add(*serverStatsVer2Charts.Copy()...)
+// case stats.v3 != nil:
+// err = c.Charts().Add(*serverStatsVer3Charts.Copy()...)
+// case stats.v4 != nil:
+// err = c.Charts().Add(*serverStatsVer4Charts.Copy()...)
+// default:
+// err = errors.New("unknown stats chart")
+// }
+//
+// if err != nil {
+// c.Warning(err)
+// }
+//}
diff --git a/src/go/plugin/go.d/modules/chrony/chrony.go b/src/go/plugin/go.d/modules/chrony/chrony.go
new file mode 100644
index 000000000..0bdd3183c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/chrony.go
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package chrony
+
+import (
+ _ "embed"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/facebook/time/ntp/chrony"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("chrony", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Chrony {
+ return &Chrony{
+ Config: Config{
+ Address: "127.0.0.1:323",
+ Timeout: web.Duration(time.Second),
+ },
+ charts: charts.Copy(),
+ addStatsChartsOnce: &sync.Once{},
+ newClient: newChronyClient,
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ Chrony struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addStatsChartsOnce *sync.Once
+
+ client chronyClient
+ newClient func(c Config) (chronyClient, error)
+ }
+ chronyClient interface {
+ Tracking() (*chrony.ReplyTracking, error)
+ Activity() (*chrony.ReplyActivity, error)
+ ServerStats() (*serverStats, error)
+ Close()
+ }
+)
+
+func (c *Chrony) Configuration() any {
+ return c.Config
+}
+
+func (c *Chrony) Init() error {
+ if err := c.validateConfig(); err != nil {
+ c.Errorf("config validation: %v", err)
+ return err
+ }
+
+ return nil
+}
+
+func (c *Chrony) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (c *Chrony) Charts() *module.Charts {
+ return c.charts
+}
+
+func (c *Chrony) Collect() map[string]int64 {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (c *Chrony) Cleanup() {
+ if c.client != nil {
+ c.client.Close()
+ c.client = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/chrony/chrony_test.go b/src/go/plugin/go.d/modules/chrony/chrony_test.go
new file mode 100644
index 000000000..407724e75
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/chrony_test.go
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package chrony
+
+import (
+ "errors"
+ "net"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/facebook/time/ntp/chrony"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestChrony_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Chrony{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestChrony_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default config": {
+ config: New().Config,
+ },
+ "unset 'address'": {
+ wantFail: true,
+ config: Config{
+ Address: "",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ c := New()
+ c.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, c.Init())
+ } else {
+ assert.NoError(t, c.Init())
+ }
+ })
+ }
+}
+
+func TestChrony_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Chrony
+ wantFail bool
+ }{
+ "tracking: success, activity: success": {
+ wantFail: false,
+ prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{}) },
+ },
+ "tracking: success, activity: fail": {
+ wantFail: true,
+ prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnActivity: true}) },
+ },
+ "tracking: fail, activity: success": {
+ wantFail: true,
+ prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnTracking: true}) },
+ },
+ "tracking: fail, activity: fail": {
+ wantFail: true,
+ prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnTracking: true}) },
+ },
+ "fail on creating client": {
+ wantFail: true,
+ prepare: func() *Chrony { return prepareChronyWithMock(nil) },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ c := test.prepare()
+
+ require.NoError(t, c.Init())
+
+ if test.wantFail {
+ assert.Error(t, c.Check())
+ } else {
+ assert.NoError(t, c.Check())
+ }
+ })
+ }
+}
+
+func TestChrony_Charts(t *testing.T) {
+ assert.Equal(t, len(charts), len(*New().Charts()))
+}
+
+func TestChrony_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(c *Chrony)
+ wantClose bool
+ }{
+ "after New": {
+ wantClose: false,
+ prepare: func(c *Chrony) {},
+ },
+ "after Init": {
+ wantClose: false,
+ prepare: func(c *Chrony) { _ = c.Init() },
+ },
+ "after Check": {
+ wantClose: true,
+ prepare: func(c *Chrony) { _ = c.Init(); _ = c.Check() },
+ },
+ "after Collect": {
+ wantClose: true,
+ prepare: func(c *Chrony) { _ = c.Init(); _ = c.Collect() },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ m := &mockClient{}
+ c := prepareChronyWithMock(m)
+ test.prepare(c)
+
+ require.NotPanics(t, c.Cleanup)
+
+ if test.wantClose {
+ assert.True(t, m.closeCalled)
+ } else {
+ assert.False(t, m.closeCalled)
+ }
+ })
+ }
+}
+
+func TestChrony_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Chrony
+ expected map[string]int64
+ }{
+ "tracking: success, activity: success": {
+ prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{}) },
+ expected: map[string]int64{
+ "burst_offline_sources": 3,
+ "burst_online_sources": 4,
+ "current_correction": 154872,
+ "frequency": 51051185607,
+ "last_offset": 3095,
+ "leap_status_delete_second": 0,
+ "leap_status_insert_second": 1,
+ "leap_status_normal": 0,
+ "leap_status_unsynchronised": 0,
+ "offline_sources": 2,
+ "online_sources": 8,
+ "ref_measurement_time": 63793323616,
+ "residual_frequency": -571789,
+ "rms_offset": 130089,
+ "root_delay": 59576179,
+ "root_dispersion": 1089275,
+ "skew": 41821926,
+ "stratum": 4,
+ "unresolved_sources": 1,
+ "update_interval": 1044219238281,
+ },
+ },
+ "tracking: success, activity: fail": {
+ prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnActivity: true}) },
+ expected: map[string]int64{
+ "current_correction": 154872,
+ "frequency": 51051185607,
+ "last_offset": 3095,
+ "leap_status_delete_second": 0,
+ "leap_status_insert_second": 1,
+ "leap_status_normal": 0,
+ "leap_status_unsynchronised": 0,
+ "ref_measurement_time": 63793323586,
+ "residual_frequency": -571789,
+ "rms_offset": 130089,
+ "root_delay": 59576179,
+ "root_dispersion": 1089275,
+ "skew": 41821926,
+ "stratum": 4,
+ "update_interval": 1044219238281,
+ },
+ },
+ "tracking: fail, activity: success": {
+ prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnTracking: true}) },
+ expected: nil,
+ },
+ "tracking: fail, activity: fail": {
+ prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnTracking: true}) },
+ expected: nil,
+ },
+ "fail on creating client": {
+ prepare: func() *Chrony { return prepareChronyWithMock(nil) },
+ expected: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ c := test.prepare()
+
+ require.NoError(t, c.Init())
+ _ = c.Check()
+
+ collected := c.Collect()
+ copyRefMeasurementTime(collected, test.expected)
+
+ assert.Equal(t, test.expected, collected)
+ })
+ }
+}
+
+func prepareChronyWithMock(m *mockClient) *Chrony {
+ c := New()
+ if m == nil {
+ c.newClient = func(_ Config) (chronyClient, error) { return nil, errors.New("mock.newClient error") }
+ } else {
+ c.newClient = func(_ Config) (chronyClient, error) { return m, nil }
+ }
+ return c
+}
+
+type mockClient struct {
+ errOnTracking bool
+ errOnActivity bool
+ errOnServerStats bool
+ closeCalled bool
+}
+
+func (m *mockClient) Tracking() (*chrony.ReplyTracking, error) {
+ if m.errOnTracking {
+ return nil, errors.New("mockClient.Tracking call error")
+ }
+ reply := chrony.ReplyTracking{
+ Tracking: chrony.Tracking{
+ RefID: 2728380539,
+ IPAddr: net.IP("192.0.2.0"),
+ Stratum: 4,
+ LeapStatus: 1,
+ RefTime: time.Time{},
+ CurrentCorrection: 0.00015487267228309065,
+ LastOffset: 3.0953951863921247e-06,
+ RMSOffset: 0.00013008920359425247,
+ FreqPPM: -51.051185607910156,
+ ResidFreqPPM: -0.0005717896274290979,
+ SkewPPM: 0.0418219268321991,
+ RootDelay: 0.05957617983222008,
+ RootDispersion: 0.0010892755817621946,
+ LastUpdateInterval: 1044.21923828125,
+ },
+ }
+ return &reply, nil
+}
+
+func (m *mockClient) Activity() (*chrony.ReplyActivity, error) {
+ if m.errOnActivity {
+ return nil, errors.New("mockClient.Activity call error")
+ }
+ reply := chrony.ReplyActivity{
+ Activity: chrony.Activity{
+ Online: 8,
+ Offline: 2,
+ BurstOnline: 4,
+ BurstOffline: 3,
+ Unresolved: 1,
+ },
+ }
+ return &reply, nil
+}
+
+func (m *mockClient) ServerStats() (*serverStats, error) {
+ if m.errOnServerStats {
+ return nil, errors.New("mockClient.ServerStats call error")
+ }
+
+ reply := serverStats{
+ v3: &chrony.ServerStats3{
+ NTPHits: 10,
+ NKEHits: 10,
+ CMDHits: 10,
+ NTPDrops: 1,
+ NKEDrops: 1,
+ CMDDrops: 1,
+ LogDrops: 1,
+ NTPAuthHits: 10,
+ NTPInterleavedHits: 10,
+ NTPTimestamps: 0,
+ NTPSpanSeconds: 0,
+ },
+ }
+
+ return &reply, nil
+}
+
+func (m *mockClient) Close() {
+ m.closeCalled = true
+}
+
+func copyRefMeasurementTime(dst, src map[string]int64) {
+ if _, ok := dst["ref_measurement_time"]; !ok {
+ return
+ }
+ if _, ok := src["ref_measurement_time"]; !ok {
+ return
+ }
+ dst["ref_measurement_time"] = src["ref_measurement_time"]
+}
diff --git a/src/go/plugin/go.d/modules/chrony/client.go b/src/go/plugin/go.d/modules/chrony/client.go
new file mode 100644
index 000000000..233e78f19
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/client.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package chrony
+
+import (
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/facebook/time/ntp/chrony"
+)
+
+func newChronyClient(c Config) (chronyClient, error) {
+ conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration())
+ if err != nil {
+ return nil, err
+ }
+
+ client := &simpleClient{
+ conn: conn,
+ client: &chrony.Client{Connection: &connWithTimeout{
+ Conn: conn,
+ timeout: c.Timeout.Duration(),
+ }},
+ }
+
+ return client, nil
+}
+
+type connWithTimeout struct {
+ net.Conn
+ timeout time.Duration
+}
+
+func (c *connWithTimeout) Read(p []byte) (n int, err error) {
+ if err := c.Conn.SetReadDeadline(c.deadline()); err != nil {
+ return 0, err
+ }
+ return c.Conn.Read(p)
+}
+
+func (c *connWithTimeout) Write(p []byte) (n int, err error) {
+ if err := c.Conn.SetWriteDeadline(c.deadline()); err != nil {
+ return 0, err
+ }
+ return c.Conn.Write(p)
+}
+
+func (c *connWithTimeout) deadline() time.Time {
+ return time.Now().Add(c.timeout)
+}
+
+type simpleClient struct {
+ conn net.Conn
+ client *chrony.Client
+}
+
+func (sc *simpleClient) Tracking() (*chrony.ReplyTracking, error) {
+ req := chrony.NewTrackingPacket()
+
+ reply, err := sc.client.Communicate(req)
+ if err != nil {
+ return nil, err
+ }
+
+ tracking, ok := reply.(*chrony.ReplyTracking)
+ if !ok {
+ return nil, fmt.Errorf("unexpected reply type, want=%T, got=%T", &chrony.ReplyTracking{}, reply)
+ }
+ return tracking, nil
+}
+
+func (sc *simpleClient) Activity() (*chrony.ReplyActivity, error) {
+ req := chrony.NewActivityPacket()
+
+ reply, err := sc.client.Communicate(req)
+ if err != nil {
+ return nil, err
+ }
+
+ activity, ok := reply.(*chrony.ReplyActivity)
+ if !ok {
+ return nil, fmt.Errorf("unexpected reply type, want=%T, got=%T", &chrony.ReplyActivity{}, reply)
+ }
+ return activity, nil
+}
+
+type serverStats struct {
+ v1 *chrony.ServerStats
+ v2 *chrony.ServerStats2
+ v3 *chrony.ServerStats3
+ v4 *chrony.ServerStats4
+}
+
+func (sc *simpleClient) ServerStats() (*serverStats, error) {
+ req := chrony.NewServerStatsPacket()
+
+ reply, err := sc.client.Communicate(req)
+ if err != nil {
+ return nil, err
+ }
+
+ var stats serverStats
+
+ switch v := reply.(type) {
+ case *chrony.ReplyServerStats:
+ stats.v1 = &chrony.ServerStats{
+ NTPHits: v.NTPHits,
+ CMDHits: v.CMDHits,
+ NTPDrops: v.NTPDrops,
+ CMDDrops: v.CMDDrops,
+ LogDrops: v.LogDrops,
+ }
+ case *chrony.ReplyServerStats2:
+ stats.v2 = &chrony.ServerStats2{
+ NTPHits: v.NTPHits,
+ NKEHits: v.NKEHits,
+ CMDHits: v.CMDHits,
+ NTPDrops: v.NTPDrops,
+ NKEDrops: v.NKEDrops,
+ CMDDrops: v.CMDDrops,
+ LogDrops: v.LogDrops,
+ NTPAuthHits: v.NTPAuthHits,
+ }
+ case *chrony.ReplyServerStats3:
+ stats.v3 = &chrony.ServerStats3{
+ NTPHits: v.NTPHits,
+ NKEHits: v.NKEHits,
+ CMDHits: v.CMDHits,
+ NTPDrops: v.NTPDrops,
+ NKEDrops: v.NKEDrops,
+ CMDDrops: v.CMDDrops,
+ LogDrops: v.LogDrops,
+ NTPAuthHits: v.NTPAuthHits,
+ NTPInterleavedHits: v.NTPInterleavedHits,
+ NTPTimestamps: v.NTPTimestamps,
+ NTPSpanSeconds: v.NTPSpanSeconds,
+ }
+ case *chrony.ReplyServerStats4:
+ stats.v4 = &chrony.ServerStats4{
+ NTPHits: v.NTPHits,
+ NKEHits: v.NKEHits,
+ CMDHits: v.CMDHits,
+ NTPDrops: v.NTPDrops,
+ NKEDrops: v.NKEDrops,
+ CMDDrops: v.CMDDrops,
+ LogDrops: v.LogDrops,
+ NTPAuthHits: v.NTPAuthHits,
+ NTPInterleavedHits: v.NTPInterleavedHits,
+ NTPTimestamps: v.NTPTimestamps,
+ NTPSpanSeconds: v.NTPSpanSeconds,
+ NTPDaemonRxtimestamps: v.NTPDaemonRxtimestamps,
+ NTPDaemonTxtimestamps: v.NTPDaemonTxtimestamps,
+ NTPKernelRxtimestamps: v.NTPKernelRxtimestamps,
+ NTPKernelTxtimestamps: v.NTPKernelTxtimestamps,
+ NTPHwRxTimestamps: v.NTPHwRxTimestamps,
+ NTPHwTxTimestamps: v.NTPHwTxTimestamps,
+ }
+ default:
+ return nil, fmt.Errorf("unexpected reply type, want=ReplyServerStats, got=%T", reply)
+ }
+
+ return &stats, nil
+}
+
+func (sc *simpleClient) Close() {
+ if sc.conn != nil {
+ _ = sc.conn.Close()
+ sc.conn = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/chrony/collect.go b/src/go/plugin/go.d/modules/chrony/collect.go
new file mode 100644
index 000000000..1a3a286fc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/collect.go
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package chrony
+
+import (
+ "fmt"
+ "time"
+)
+
+const scaleFactor = 1000000000
+
+func (c *Chrony) collect() (map[string]int64, error) {
+ if c.client == nil {
+ client, err := c.newClient(c.Config)
+ if err != nil {
+ return nil, err
+ }
+ c.client = client
+ }
+
+ mx := make(map[string]int64)
+
+ if err := c.collectTracking(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectActivity(mx); err != nil {
+ return mx, err
+ }
+ //if strings.HasPrefix(c.Address, "/") {
+ // TODO: Allowed only through the Unix domain socket (requires "_chrony" group membership).
+ // See https://github.com/facebook/time/blob/18207c5d8ddc7242e8d4192985898b6dbe66932c/cmd/ntpcheck/checker/chrony.go#L38
+ // ^^ For some reason doesn't work, Chrony doesn't respond. Additional configuration needed?
+ //if err := c.collectServerStats(mx); err != nil {
+ // return mx, err
+ //}
+ //}
+
+ return mx, nil
+}
+
+const (
+ // https://github.com/mlichvar/chrony/blob/7daf34675a5a2487895c74d1578241ca91a4eb70/ntp.h#L70-L75
+ leapStatusNormal = 0
+ leapStatusInsertSecond = 1
+ leapStatusDeleteSecond = 2
+ leapStatusUnsynchronised = 3
+)
+
+func (c *Chrony) collectTracking(mx map[string]int64) error {
+ reply, err := c.client.Tracking()
+ if err != nil {
+ return fmt.Errorf("error on collecting tracking: %v", err)
+ }
+
+ mx["stratum"] = int64(reply.Stratum)
+ mx["leap_status_normal"] = boolToInt(reply.LeapStatus == leapStatusNormal)
+ mx["leap_status_insert_second"] = boolToInt(reply.LeapStatus == leapStatusInsertSecond)
+ mx["leap_status_delete_second"] = boolToInt(reply.LeapStatus == leapStatusDeleteSecond)
+ mx["leap_status_unsynchronised"] = boolToInt(reply.LeapStatus == leapStatusUnsynchronised)
+ mx["root_delay"] = int64(reply.RootDelay * scaleFactor)
+ mx["root_dispersion"] = int64(reply.RootDispersion * scaleFactor)
+ mx["skew"] = int64(reply.SkewPPM * scaleFactor)
+ mx["last_offset"] = int64(reply.LastOffset * scaleFactor)
+ mx["rms_offset"] = int64(reply.RMSOffset * scaleFactor)
+ mx["update_interval"] = int64(reply.LastUpdateInterval * scaleFactor)
+ // handle chrony restarts
+ if reply.RefTime.Year() != 1970 {
+ mx["ref_measurement_time"] = time.Now().Unix() - reply.RefTime.Unix()
+ }
+ mx["residual_frequency"] = int64(reply.ResidFreqPPM * scaleFactor)
+ // https://github.com/mlichvar/chrony/blob/5b04f3ca902e5d10aa5948fb7587d30b43941049/client.c#L1706
+ mx["current_correction"] = abs(int64(reply.CurrentCorrection * scaleFactor))
+ mx["frequency"] = abs(int64(reply.FreqPPM * scaleFactor))
+
+ return nil
+}
+
+func (c *Chrony) collectActivity(mx map[string]int64) error {
+ reply, err := c.client.Activity()
+ if err != nil {
+ return fmt.Errorf("error on collecting activity: %v", err)
+ }
+
+ mx["online_sources"] = int64(reply.Online)
+ mx["offline_sources"] = int64(reply.Offline)
+ mx["burst_online_sources"] = int64(reply.BurstOnline)
+ mx["burst_offline_sources"] = int64(reply.BurstOffline)
+ mx["unresolved_sources"] = int64(reply.Unresolved)
+
+ return nil
+}
+
+//func (c *Chrony) collectServerStats(mx map[string]int64) error {
+// stats, err := c.client.ServerStats()
+// if err != nil {
+// return fmt.Errorf("error on collecting server stats: %v", err)
+// }
+//
+// switch {
+// case stats.v4 != nil:
+// mx["ntp_packets_received"] = int64(stats.v4.NTPHits)
+// mx["ntp_packets_dropped"] = int64(stats.v4.NTPDrops)
+// mx["command_packets_received"] = int64(stats.v4.CMDHits)
+// mx["command_packets_dropped"] = int64(stats.v4.CMDDrops)
+// mx["client_log_records_dropped"] = int64(stats.v4.LogDrops)
+// mx["nke_connections_accepted"] = int64(stats.v4.NKEHits)
+// mx["nke_connections_dropped"] = int64(stats.v4.NKEDrops)
+// mx["authenticated_ntp_packets"] = int64(stats.v4.NTPAuthHits)
+// mx["interleaved_ntp_packets"] = int64(stats.v4.NTPInterleavedHits)
+// case stats.v3 != nil:
+// mx["ntp_packets_received"] = int64(stats.v3.NTPHits)
+// mx["ntp_packets_dropped"] = int64(stats.v3.NTPDrops)
+// mx["command_packets_received"] = int64(stats.v3.CMDHits)
+// mx["command_packets_dropped"] = int64(stats.v3.CMDDrops)
+// mx["client_log_records_dropped"] = int64(stats.v3.LogDrops)
+// mx["nke_connections_accepted"] = int64(stats.v3.NKEHits)
+// mx["nke_connections_dropped"] = int64(stats.v3.NKEDrops)
+// mx["authenticated_ntp_packets"] = int64(stats.v3.NTPAuthHits)
+// mx["interleaved_ntp_packets"] = int64(stats.v3.NTPInterleavedHits)
+// case stats.v2 != nil:
+// mx["ntp_packets_received"] = int64(stats.v2.NTPHits)
+// mx["ntp_packets_dropped"] = int64(stats.v2.NTPDrops)
+// mx["command_packets_received"] = int64(stats.v2.CMDHits)
+// mx["command_packets_dropped"] = int64(stats.v2.CMDDrops)
+// mx["client_log_records_dropped"] = int64(stats.v2.LogDrops)
+// mx["nke_connections_accepted"] = int64(stats.v2.NKEHits)
+// mx["nke_connections_dropped"] = int64(stats.v2.NKEDrops)
+// mx["authenticated_ntp_packets"] = int64(stats.v2.NTPAuthHits)
+// case stats.v1 != nil:
+// mx["ntp_packets_received"] = int64(stats.v1.NTPHits)
+// mx["ntp_packets_dropped"] = int64(stats.v1.NTPDrops)
+// mx["command_packets_received"] = int64(stats.v1.CMDHits)
+// mx["command_packets_dropped"] = int64(stats.v1.CMDDrops)
+// mx["client_log_records_dropped"] = int64(stats.v1.LogDrops)
+// default:
+// return errors.New("invalid server stats reply")
+// }
+//
+// //c.addStatsChartsOnce.Do(func() { c.addServerStatsCharts(stats) })
+//
+// return nil
+//}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
+
+func abs(v int64) int64 {
+ if v < 0 {
+ return -v
+ }
+ return v
+}
diff --git a/src/go/plugin/go.d/modules/chrony/config_schema.json b/src/go/plugin/go.d/modules/chrony/config_schema.json
new file mode 100644
index 000000000..5de10a822
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/config_schema.json
@@ -0,0 +1,43 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Chrony collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where Chrony daemon listens for incoming connections.",
+ "type": "string",
+ "default": "127.0.0.1:323"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "default": 1
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/chrony/init.go b/src/go/plugin/go.d/modules/chrony/init.go
new file mode 100644
index 000000000..828112c9d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/init.go
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package chrony
+
+import (
+ "errors"
+)
+
+func (c *Chrony) validateConfig() error {
+ if c.Address == "" {
+ return errors.New("empty 'address'")
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/chrony/integrations/chrony.md b/src/go/plugin/go.d/modules/chrony/integrations/chrony.md
new file mode 100644
index 000000000..e9b9454d9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/integrations/chrony.md
@@ -0,0 +1,222 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/chrony/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/chrony/metadata.yaml"
+sidebar_label: "Chrony"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/System Clock and NTP"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Chrony
+
+
+<img src="https://netdata.cloud/img/chrony.jpg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: chrony
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the system's clock performance and peers activity status
+
+It collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This collector discovers Chrony instance running on the local host and listening on port 323.
+On startup, it tries to collect metrics from:
+
+- 127.0.0.1:323
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Chrony instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| chrony.stratum | stratum | level |
+| chrony.current_correction | current_correction | seconds |
+| chrony.root_delay | root_delay | seconds |
+| chrony.root_dispersion | root_delay | seconds |
+| chrony.last_offset | offset | seconds |
+| chrony.rms_offset | offset | seconds |
+| chrony.frequency | frequency | ppm |
+| chrony.residual_frequency | residual_frequency | ppm |
+| chrony.skew | skew | ppm |
+| chrony.update_interval | update_interval | seconds |
+| chrony.ref_measurement_time | ref_measurement_time | seconds |
+| chrony.leap_status | normal, insert_second, delete_second, unsynchronised | status |
+| chrony.activity | online, offline, burst_online, burst_offline, unresolved | sources |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/chrony.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/chrony.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Server address. The format is IP:PORT. | 127.0.0.1:323 | yes |
+| timeout | Connection timeout. Zero means no timeout. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:323
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:323
+
+ - name: remote
+ address: 192.0.2.1:323
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `chrony` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m chrony
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `chrony` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep chrony
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep chrony /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep chrony
+```
+
+
diff --git a/src/go/plugin/go.d/modules/chrony/metadata.yaml b/src/go/plugin/go.d/modules/chrony/metadata.yaml
new file mode 100644
index 000000000..18f9152e6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/metadata.yaml
@@ -0,0 +1,208 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-chrony
+ module_name: chrony
+ plugin_name: go.d.plugin
+ monitored_instance:
+ categories:
+ - data-collection.system-clock-and-ntp
+ icon_filename: chrony.jpg
+ name: Chrony
+ link: https://chrony.tuxfamily.org/
+ alternative_monitored_instances: []
+ keywords: []
+ info_provided_to_referring_integrations:
+ description: ""
+ related_resources:
+ integrations:
+ list: []
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: This collector monitors the system's clock performance and peers activity status
+ method_description: It collects metrics by sending UDP packets to chronyd using the Chrony communication protocol v6.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ This collector discovers Chrony instance running on the local host and listening on port 323.
+ On startup, it tries to collect metrics from:
+
+ - 127.0.0.1:323
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/chrony.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: Server address. The format is IP:PORT.
+ default_value: 127.0.0.1:323
+ required: true
+ - name: timeout
+ description: Connection timeout. Zero means no timeout.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:323
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:323
+
+ - name: remote
+ address: 192.0.2.1:323
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: chrony.stratum
+ availability: []
+ description: Distance to the reference clock
+ unit: level
+ chart_type: line
+ dimensions:
+ - name: stratum
+ - name: chrony.current_correction
+ availability: []
+ description: Current correction
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: current_correction
+ - name: chrony.root_delay
+ availability: []
+ description: Network path delay to stratum-1
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: root_delay
+ - name: chrony.root_dispersion
+ availability: []
+ description: Dispersion accumulated back to stratum-1
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: root_delay
+ - name: chrony.last_offset
+ availability: []
+ description: Offset on the last clock update
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: offset
+ - name: chrony.rms_offset
+ availability: []
+ description: Long-term average of the offset value
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: offset
+ - name: chrony.frequency
+ availability: []
+ description: Frequency
+ unit: ppm
+ chart_type: line
+ dimensions:
+ - name: frequency
+ - name: chrony.residual_frequency
+ availability: []
+ description: Residual frequency
+ unit: ppm
+ chart_type: line
+ dimensions:
+ - name: residual_frequency
+ - name: chrony.skew
+ availability: []
+ description: Skew
+ unit: ppm
+ chart_type: line
+ dimensions:
+ - name: skew
+ - name: chrony.update_interval
+ availability: []
+ description: Interval between the last two clock updates
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: update_interval
+ - name: chrony.ref_measurement_time
+ availability: []
+ description: Time since the last measurement
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: ref_measurement_time
+ - name: chrony.leap_status
+ availability: []
+ description: Leap status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: normal
+ - name: insert_second
+ - name: delete_second
+ - name: unsynchronised
+ - name: chrony.activity
+ availability: []
+ description: Peers activity
+ unit: sources
+ chart_type: stacked
+ dimensions:
+ - name: online
+ - name: offline
+ - name: burst_online
+ - name: burst_offline
+ - name: unresolved
diff --git a/src/go/plugin/go.d/modules/chrony/testdata/config.json b/src/go/plugin/go.d/modules/chrony/testdata/config.json
new file mode 100644
index 000000000..e86834720
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/chrony/testdata/config.yaml b/src/go/plugin/go.d/modules/chrony/testdata/config.yaml
new file mode 100644
index 000000000..1b81d09eb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/chrony/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/clickhouse/README.md b/src/go/plugin/go.d/modules/clickhouse/README.md
new file mode 120000
index 000000000..078a1eee2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/README.md
@@ -0,0 +1 @@
+integrations/clickhouse.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/clickhouse/charts.go b/src/go/plugin/go.d/modules/clickhouse/charts.go
new file mode 100644
index 000000000..dcae16008
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/charts.go
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioConnections = module.Priority + iota
+
+ prioSlowReads
+ prioReadBackoff
+
+ prioMemoryUsage
+
+ prioDiskSpaceUsage
+
+ prioRunningQueries
+ prioQueriesPreempted
+ prioQueries
+ prioSelectQueries
+ prioInsertQueries
+ prioQueriesMemoryLimitExceeded
+
+ prioLongestRunningQueryTime
+ prioQueriesLatency
+ prioSelectQueriesLatency
+ prioInsertQueriesLatency
+
+ prioIO
+ prioIOPS
+ prioIOErrors
+ prioIOSeeks
+ prioIOFileOpens
+
+ prioDatabaseTableSize
+ prioDatabaseTableParts
+ prioDatabaseTableRows
+
+ prioReplicatedPartsCurrentActivity
+ prioReplicasMaxAbsoluteDelay
+ prioReadOnlyReplica
+ prioReplicatedDataLoss
+ prioReplicatedPartFetches
+ prioReplicatedPartFetchesOfMerged
+ prioReplicatedPartMerges
+
+ prioInsertedBytes
+ prioInsertedRows
+ prioRejectedInserts
+ prioDelayedInserts
+ prioDelayedInsertsThrottleTime
+
+ prioSelectedBytes
+ prioSelectedRows
+ prioSelectedParts
+ prioSelectedRanges
+ prioSelectedMarks
+
+ prioMerges
+ prioMergesLatency
+ prioMergedUncompressedBytes
+ prioMergedRows
+
+ prioMergeTreeDataWriterRows
+ prioMergeTreeDataWriterUncompressedBytes
+ prioMergeTreeDataWriterCompressedBytes
+
+ prioUncompressedCacheRequests
+ prioMarkCacheRequests
+
+ prioMaxPartCountForPartition
+ prioParts
+
+ prioDistributedSend
+ prioDistributedConnectionTries
+ prioDistributedConnectionFailTry
+ prioDistributedConnectionFailAtAll
+
+ prioDistributedFilesToInsert
+ prioDistributedRejectedInserts
+ prioDistributedDelayedInserts
+ prioDistributedDelayedInsertsMilliseconds
+ prioDistributedSyncInsertionTimeoutExceeded
+ prioDistributedAsyncInsertionFailures
+
+ prioUptime
+)
+
+var chCharts = module.Charts{
+ chartConnections.Copy(),
+
+ chartMemoryUsage.Copy(),
+
+ chartSlowReads.Copy(),
+ chartReadBackoff.Copy(),
+
+ chartRunningQueries.Copy(),
+ chartQueries.Copy(),
+ chartSelectQueries.Copy(),
+ chartInsertQueries.Copy(),
+ chartQueriesPreempted.Copy(),
+ chartQueriesMemoryLimitExceeded.Copy(),
+
+ chartLongestRunningQueryTime.Copy(),
+ chartQueriesLatency.Copy(),
+ chartSelectQueriesLatency.Copy(),
+ chartInsertQueriesLatency.Copy(),
+
+ chartFileDescriptorIO.Copy(),
+ chartFileDescriptorIOPS.Copy(),
+ chartFileDescriptorIOErrors.Copy(),
+ chartIOSeeks.Copy(),
+ chartIOFileOpens.Copy(),
+
+ chartReplicatedPartsActivity.Copy(),
+ chartReplicasMaxAbsoluteDelay.Copy(),
+ chartReadonlyReplica.Copy(),
+ chartReplicatedDataLoss.Copy(),
+ chartReplicatedPartFetches.Copy(),
+ chartReplicatedPartMerges.Copy(),
+ chartReplicatedPartFetchesOfMerged.Copy(),
+
+ chartInsertedRows.Copy(),
+ chartInsertedBytes.Copy(),
+ chartRejectedInserts.Copy(),
+ chartDelayedInserts.Copy(),
+ chartDelayedInsertsThrottleTime.Copy(),
+
+ chartSelectedRows.Copy(),
+ chartSelectedBytes.Copy(),
+ chartSelectedParts.Copy(),
+ chartSelectedRanges.Copy(),
+ chartSelectedMarks.Copy(),
+
+ chartMerges.Copy(),
+ chartMergesLatency.Copy(),
+ chartMergedUncompressedBytes.Copy(),
+ chartMergedRows.Copy(),
+
+ chartMergeTreeDataWriterInsertedRows.Copy(),
+ chartMergeTreeDataWriterUncompressedBytes.Copy(),
+ chartMergeTreeDataWriterCompressedBytes.Copy(),
+
+ chartUncompressedCacheRequests.Copy(),
+ chartMarkCacheRequests.Copy(),
+
+ chartMaxPartCountForPartition.Copy(),
+ chartPartsCount.Copy(),
+
+ chartDistributedConnections.Copy(),
+ chartDistributedConnectionAttempts.Copy(),
+ chartDistributedConnectionFailRetries.Copy(),
+ chartDistributedConnectionFailExhaustedRetries.Copy(),
+
+ chartDistributedFilesToInsert.Copy(),
+ chartDistributedRejectedInserts.Copy(),
+ chartDistributedDelayedInserts.Copy(),
+ chartDistributedDelayedInsertsLatency.Copy(),
+ chartDistributedSyncInsertionTimeoutExceeded.Copy(),
+ chartDistributedAsyncInsertionFailures.Copy(),
+
+ chartUptime.Copy(),
+}
+
+var (
+ chartConnections = module.Chart{
+ ID: "connections",
+ Title: "Connections",
+ Units: "connections",
+ Fam: "conns",
+ Ctx: "clickhouse.connections",
+ Priority: prioConnections,
+ Dims: module.Dims{
+ {ID: "metrics_TCPConnection", Name: "tcp"},
+ {ID: "metrics_HTTPConnection", Name: "http"},
+ {ID: "metrics_MySQLConnection", Name: "mysql"},
+ {ID: "metrics_PostgreSQLConnection", Name: "postgresql"},
+ {ID: "metrics_InterserverConnection", Name: "interserver"},
+ },
+ }
+)
+
+var (
+ chartSlowReads = module.Chart{
+ ID: "slow_reads",
+ Title: "Slow reads from a file",
+ Units: "reads/s",
+ Fam: "slow reads",
+ Ctx: "clickhouse.slow_reads",
+ Priority: prioSlowReads,
+ Dims: module.Dims{
+ {ID: "events_SlowRead", Name: "slow"},
+ },
+ }
+ chartReadBackoff = module.Chart{
+ ID: "read_backoff",
+ Title: "Read backoff events",
+ Units: "events/s",
+ Fam: "slow reads",
+ Ctx: "clickhouse.read_backoff",
+ Priority: prioReadBackoff,
+ Dims: module.Dims{
+ {ID: "events_ReadBackoff", Name: "read_backoff"},
+ },
+ }
+)
+
+var (
+ chartMemoryUsage = module.Chart{
+ ID: "memory_usage",
+ Title: "Memory usage",
+ Units: "bytes",
+ Fam: "mem",
+ Ctx: "clickhouse.memory_usage",
+ Priority: prioMemoryUsage,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "metrics_MemoryTracking", Name: "used"},
+ },
+ }
+)
+
+var diskChartsTmpl = module.Charts{
+ diskSpaceUsageChartTmpl.Copy(),
+}
+
+var (
+ diskSpaceUsageChartTmpl = module.Chart{
+ ID: "disk_%s_space_usage",
+ Title: "Disk space usage",
+ Units: "bytes",
+ Fam: "disk space",
+ Ctx: "clickhouse.disk_space_usage",
+ Type: module.Stacked,
+ Priority: prioDiskSpaceUsage,
+ Dims: module.Dims{
+ {ID: "disk_%s_free_space_bytes", Name: "free"},
+ {ID: "disk_%s_used_space_bytes", Name: "used"},
+ },
+ }
+)
+
+var (
+ chartRunningQueries = module.Chart{
+ ID: "running_queries",
+ Title: "Running queries",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "clickhouse.running_queries",
+ Priority: prioRunningQueries,
+ Dims: module.Dims{
+ {ID: "metrics_Query", Name: "running"},
+ },
+ }
+ chartQueriesPreempted = module.Chart{
+ ID: "queries_preempted",
+ Title: "Queries waiting due to priority",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "clickhouse.queries_preempted",
+ Priority: prioQueriesPreempted,
+ Dims: module.Dims{
+ {ID: "metrics_QueryPreempted", Name: "preempted"},
+ },
+ }
+ chartQueries = module.Chart{
+ ID: "queries",
+ Title: "Queries",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "clickhouse.queries",
+ Priority: prioQueries,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "events_SuccessfulQuery", Name: "successful", Algo: module.Incremental},
+ {ID: "events_FailedQuery", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartSelectQueries = module.Chart{
+ ID: "select_queries",
+ Title: "Select queries",
+ Units: "selects/s",
+ Fam: "queries",
+ Ctx: "clickhouse.select_queries",
+ Priority: prioSelectQueries,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "events_SuccessfulSelectQuery", Name: "successful", Algo: module.Incremental},
+ {ID: "events_FailedSelectQuery", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartInsertQueries = module.Chart{
+ ID: "insert_queries",
+ Title: "Insert queries",
+ Units: "inserts/s",
+ Fam: "queries",
+ Ctx: "clickhouse.insert_queries",
+ Priority: prioInsertQueries,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "events_SuccessfulInsertQuery", Name: "successful", Algo: module.Incremental},
+ {ID: "events_FailedInsertQuery", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartQueriesMemoryLimitExceeded = module.Chart{
+ ID: "queries_memory_limit_exceeded",
+ Title: "Memory limit exceeded for query",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "clickhouse.queries_memory_limit_exceeded",
+ Priority: prioQueriesMemoryLimitExceeded,
+ Dims: module.Dims{
+ {ID: "events_QueryMemoryLimitExceeded", Name: "mem_limit_exceeded"},
+ },
+ }
+)
+
+var (
+ chartLongestRunningQueryTime = module.Chart{
+ ID: "longest_running_query_time",
+ Title: "Longest running query time",
+ Units: "seconds",
+ Fam: "query latency",
+ Ctx: "clickhouse.longest_running_query_time",
+ Priority: prioLongestRunningQueryTime,
+ Dims: module.Dims{
+ {ID: "LongestRunningQueryTime", Name: "longest_query_time", Div: precision},
+ },
+ }
+ chartQueriesLatency = module.Chart{
+ ID: "queries_latency",
+ Title: "Queries latency",
+ Units: "microseconds",
+ Fam: "query latency",
+ Ctx: "clickhouse.queries_latency",
+ Priority: prioQueriesLatency,
+ Dims: module.Dims{
+ {ID: "events_QueryTimeMicroseconds", Name: "queries_time", Algo: module.Incremental},
+ },
+ }
+ chartSelectQueriesLatency = module.Chart{
+ ID: "select_queries_latency",
+ Title: "Select queries latency",
+ Units: "microseconds",
+ Fam: "query latency",
+ Ctx: "clickhouse.select_queries_latency",
+ Priority: prioSelectQueriesLatency,
+ Dims: module.Dims{
+ {ID: "events_SelectQueryTimeMicroseconds", Name: "selects_time", Algo: module.Incremental},
+ },
+ }
+ chartInsertQueriesLatency = module.Chart{
+ ID: "insert_queries_latency",
+ Title: "Insert queries latency",
+ Units: "microseconds",
+ Fam: "query latency",
+ Ctx: "clickhouse.insert_queries_latency",
+ Priority: prioInsertQueriesLatency,
+ Dims: module.Dims{
+ {ID: "events_InsertQueryTimeMicroseconds", Name: "inserts_time", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartFileDescriptorIO = module.Chart{
+ ID: "file_descriptor_io",
+ Title: "Read and written data",
+ Units: "bytes/s",
+ Fam: "io",
+ Ctx: "clickhouse.io",
+ Priority: prioIO,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "events_ReadBufferFromFileDescriptorReadBytes", Name: "reads", Algo: module.Incremental},
+ {ID: "events_WriteBufferFromFileDescriptorWriteBytes", Name: "writes", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ chartFileDescriptorIOPS = module.Chart{
+ ID: "file_descriptor_iops",
+ Title: "Read and write operations",
+ Units: "ops/s",
+ Fam: "io",
+ Ctx: "clickhouse.iops",
+ Priority: prioIOPS,
+ Dims: module.Dims{
+ {ID: "events_ReadBufferFromFileDescriptorRead", Name: "reads", Algo: module.Incremental},
+ {ID: "events_WriteBufferFromFileDescriptorWrite", Name: "writes", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ chartFileDescriptorIOErrors = module.Chart{
+ ID: "file_descriptor_io_errors",
+ Title: "Read and write errors",
+ Units: "errors/s",
+ Fam: "io",
+ Ctx: "clickhouse.io_errors",
+ Priority: prioIOErrors,
+ Dims: module.Dims{
+ {ID: "events_ReadBufferFromFileDescriptorReadFailed", Name: "read", Algo: module.Incremental},
+ {ID: "events_WriteBufferFromFileDescriptorWriteFailed", Name: "write", Algo: module.Incremental},
+ },
+ }
+ chartIOSeeks = module.Chart{
+ ID: "io_seeks",
+ Title: "lseek function calls",
+ Units: "ops/s",
+ Fam: "io",
+ Ctx: "clickhouse.io_seeks",
+ Priority: prioIOSeeks,
+ Dims: module.Dims{
+ {ID: "events_Seek", Name: "lseek", Algo: module.Incremental},
+ },
+ }
+ chartIOFileOpens = module.Chart{
+ ID: "io_file_opens",
+ Title: "File opens",
+ Units: "ops/s",
+ Fam: "io",
+ Ctx: "clickhouse.io_file_opens",
+ Priority: prioIOFileOpens,
+ Dims: module.Dims{
+ {ID: "events_FileOpen", Name: "file_open", Algo: module.Incremental},
+ },
+ }
+)
+
+var tableChartsTmpl = module.Charts{
+ tableSizeChartTmpl.Copy(),
+ tablePartsChartTmpl.Copy(),
+ tableRowsChartTmpl.Copy(),
+}
+
+var (
+ tableSizeChartTmpl = module.Chart{
+ ID: "table_%s_database_%s_size",
+ Title: "Table size",
+ Units: "bytes",
+ Fam: "tables",
+ Ctx: "clickhouse.database_table_size",
+ Type: module.Area,
+ Priority: prioDatabaseTableSize,
+ Dims: module.Dims{
+ {ID: "table_%s_database_%s_size_bytes", Name: "size"},
+ },
+ }
+ tablePartsChartTmpl = module.Chart{
+ ID: "table_%s_database_%s_parts",
+ Title: "Table parts",
+ Units: "parts",
+ Fam: "tables",
+ Ctx: "clickhouse.database_table_parts",
+ Priority: prioDatabaseTableParts,
+ Dims: module.Dims{
+ {ID: "table_%s_database_%s_parts", Name: "parts"},
+ },
+ }
+ tableRowsChartTmpl = module.Chart{
+ ID: "table_%s_database_%s_rows",
+ Title: "Table rows",
+ Units: "rows",
+ Fam: "tables",
+ Ctx: "clickhouse.database_table_rows",
+ Priority: prioDatabaseTableRows,
+ Dims: module.Dims{
+ {ID: "table_%s_database_%s_rows", Name: "rows"},
+ },
+ }
+)
+
+var (
+ chartReplicatedPartsActivity = module.Chart{
+ ID: "replicated_parts_activity",
+ Title: "Replicated parts current activity",
+ Units: "parts",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_parts_current_activity",
+ Priority: prioReplicatedPartsCurrentActivity,
+ Dims: module.Dims{
+ {ID: "metrics_ReplicatedFetch", Name: "fetch"},
+ {ID: "metrics_ReplicatedSend", Name: "send"},
+ {ID: "metrics_ReplicatedChecks", Name: "check"},
+ },
+ }
+ chartReplicasMaxAbsoluteDelay = module.Chart{
+ ID: "replicas_max_absolute_delay",
+ Title: "Replicas max absolute delay",
+ Units: "seconds",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicas_max_absolute_delay",
+ Priority: prioReplicasMaxAbsoluteDelay,
+ Dims: module.Dims{
+ {ID: "async_metrics_ReplicasMaxAbsoluteDelay", Name: "replication_delay", Div: precision},
+ },
+ }
+ chartReadonlyReplica = module.Chart{
+ ID: "readonly_replica",
+ Title: "Replicated tables in readonly state",
+ Units: "tables",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_readonly_tables",
+ Priority: prioReadOnlyReplica,
+ Dims: module.Dims{
+ {ID: "metrics_ReadonlyReplica", Name: "read_only"},
+ },
+ }
+ chartReplicatedDataLoss = module.Chart{
+ ID: "replicated_data_loss",
+ Title: "Replicated data loss",
+ Units: "events/s",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_data_loss",
+ Priority: prioReplicatedDataLoss,
+ Dims: module.Dims{
+ {ID: "events_ReplicatedDataLoss", Name: "data_loss", Algo: module.Incremental},
+ },
+ }
+ chartReplicatedPartFetches = module.Chart{
+ ID: "replicated_part_fetches",
+ Title: "Replicated part fetches",
+ Units: "fetches/s",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_part_fetches",
+ Priority: prioReplicatedPartFetches,
+ Dims: module.Dims{
+ {ID: "events_ReplicatedPartFetches", Name: "successful", Algo: module.Incremental},
+ {ID: "events_ReplicatedPartFailedFetches", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartReplicatedPartFetchesOfMerged = module.Chart{
+ ID: "replicated_part_fetches_of_merged",
+ Title: "Replicated part fetches of merged",
+ Units: "fetches/s",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_part_fetches_of_merged",
+ Priority: prioReplicatedPartFetchesOfMerged,
+ Dims: module.Dims{
+ {ID: "events_ReplicatedPartFetchesOfMerged", Name: "merged", Algo: module.Incremental},
+ },
+ }
+ chartReplicatedPartMerges = module.Chart{
+ ID: "replicated_part_merges",
+ Title: "Replicated part merges",
+ Units: "merges/s",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_part_merges",
+ Priority: prioReplicatedPartMerges,
+ Dims: module.Dims{
+ {ID: "events_ReplicatedPartMerges", Name: "merges", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartInsertedBytes = module.Chart{
+ ID: "inserted_bytes",
+ Title: "Inserted data",
+ Units: "bytes/s",
+ Fam: "inserts",
+ Ctx: "clickhouse.inserted_bytes",
+ Priority: prioInsertedBytes,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "events_InsertedBytes", Name: "inserted", Algo: module.Incremental},
+ },
+ }
+ chartInsertedRows = module.Chart{
+ ID: "inserted_rows",
+ Title: "Inserted rows",
+ Units: "rows/s",
+ Fam: "inserts",
+ Ctx: "clickhouse.inserted_rows",
+ Priority: prioInsertedRows,
+ Dims: module.Dims{
+ {ID: "events_InsertedRows", Name: "inserted", Algo: module.Incremental},
+ },
+ }
+ chartRejectedInserts = module.Chart{
+ ID: "rejected_inserts",
+ Title: "Rejected inserts",
+ Units: "inserts/s",
+ Fam: "inserts",
+ Ctx: "clickhouse.rejected_inserts",
+ Priority: prioRejectedInserts,
+ Dims: module.Dims{
+ {ID: "events_RejectedInserts", Name: "rejected", Algo: module.Incremental},
+ },
+ }
+ chartDelayedInserts = module.Chart{
+ ID: "delayed_inserts",
+ Title: "Delayed inserts",
+ Units: "inserts/s",
+ Fam: "inserts",
+ Ctx: "clickhouse.delayed_inserts",
+ Priority: prioDelayedInserts,
+ Dims: module.Dims{
+ {ID: "events_DelayedInserts", Name: "delayed", Algo: module.Incremental},
+ },
+ }
+ chartDelayedInsertsThrottleTime = module.Chart{
+ ID: "delayed_inserts_throttle_time",
+ Title: "Delayed inserts throttle time",
+ Units: "milliseconds",
+ Fam: "inserts",
+ Ctx: "clickhouse.delayed_inserts_throttle_time",
+ Priority: prioDelayedInsertsThrottleTime,
+ Dims: module.Dims{
+ {ID: "events_DelayedInsertsMilliseconds", Name: "delayed_inserts_throttle_time", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartSelectedBytes = module.Chart{
+ ID: "selected_bytes",
+ Title: "Selected data",
+ Units: "bytes/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_bytes",
+ Priority: prioSelectedBytes,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "events_SelectedBytes", Name: "selected", Algo: module.Incremental},
+ },
+ }
+ chartSelectedRows = module.Chart{
+ ID: "selected_rows",
+ Title: "Selected rows",
+ Units: "rows/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_rows",
+ Priority: prioSelectedRows,
+ Dims: module.Dims{
+ {ID: "events_SelectedRows", Name: "selected", Algo: module.Incremental},
+ },
+ }
+ chartSelectedParts = module.Chart{
+ ID: "selected_parts",
+ Title: "Selected parts",
+ Units: "parts/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_parts",
+ Priority: prioSelectedParts,
+ Dims: module.Dims{
+ {ID: "events_SelectedParts", Name: "selected", Algo: module.Incremental},
+ },
+ }
+ chartSelectedRanges = module.Chart{
+ ID: "selected_ranges",
+ Title: "Selected ranges",
+ Units: "ranges/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_ranges",
+ Priority: prioSelectedRanges,
+ Dims: module.Dims{
+ {ID: "events_SelectedRanges", Name: "selected", Algo: module.Incremental},
+ },
+ }
+ chartSelectedMarks = module.Chart{
+ ID: "selected_marks",
+ Title: "Selected marks",
+ Units: "marks/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_marks",
+ Priority: prioSelectedMarks,
+ Dims: module.Dims{
+ {ID: "events_SelectedMarks", Name: "selected", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartMerges = module.Chart{
+ ID: "merges",
+ Title: "Merge operations",
+ Units: "ops/s",
+ Fam: "merges",
+ Ctx: "clickhouse.merges",
+ Priority: prioMerges,
+ Dims: module.Dims{
+ {ID: "events_Merge", Name: "merge", Algo: module.Incremental},
+ },
+ }
+ chartMergesLatency = module.Chart{
+ ID: "merges_latency",
+ Title: "Time spent for background merges",
+ Units: "milliseconds",
+ Fam: "merges",
+ Ctx: "clickhouse.merges_latency",
+ Priority: prioMergesLatency,
+ Dims: module.Dims{
+ {ID: "events_MergesTimeMilliseconds", Name: "merges_time", Algo: module.Incremental},
+ },
+ }
+ chartMergedUncompressedBytes = module.Chart{
+ ID: "merged_uncompressed_bytes",
+ Title: "Uncompressed data read for background merges",
+ Units: "bytes/s",
+ Fam: "merges",
+ Ctx: "clickhouse.merged_uncompressed_bytes",
+ Priority: prioMergedUncompressedBytes,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "events_MergedUncompressedBytes", Name: "merged_uncompressed", Algo: module.Incremental},
+ },
+ }
+ chartMergedRows = module.Chart{
+ ID: "merged_rows",
+ Title: "Merged rows",
+ Units: "rows/s",
+ Fam: "merges",
+ Ctx: "clickhouse.merged_rows",
+ Priority: prioMergedRows,
+ Dims: module.Dims{
+ {ID: "events_MergedRows", Name: "merged", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartMergeTreeDataWriterInsertedRows = module.Chart{
+ ID: "merge_tree_data_writer_inserted_rows",
+ Title: "Rows INSERTed to MergeTree tables",
+ Units: "rows/s",
+ Fam: "merge tree",
+ Ctx: "clickhouse.merge_tree_data_writer_inserted_rows",
+ Priority: prioMergeTreeDataWriterRows,
+ Dims: module.Dims{
+ {ID: "events_MergeTreeDataWriterRows", Name: "inserted", Algo: module.Incremental},
+ },
+ }
+ chartMergeTreeDataWriterUncompressedBytes = module.Chart{
+ ID: "merge_tree_data_writer_uncompressed_bytes",
+ Title: "Data INSERTed to MergeTree tables",
+ Units: "bytes/s",
+ Fam: "merge tree",
+ Ctx: "clickhouse.merge_tree_data_writer_uncompressed_bytes",
+ Type: module.Area,
+ Priority: prioMergeTreeDataWriterUncompressedBytes,
+ Dims: module.Dims{
+ {ID: "events_MergeTreeDataWriterUncompressedBytes", Name: "inserted", Algo: module.Incremental},
+ },
+ }
+ chartMergeTreeDataWriterCompressedBytes = module.Chart{
+ ID: "merge_tree_data_writer_compressed_bytes",
+ Title: "Data written to disk for data INSERTed to MergeTree tables",
+ Units: "bytes/s",
+ Fam: "merge tree",
+ Ctx: "clickhouse.merge_tree_data_writer_compressed_bytes",
+ Type: module.Area,
+ Priority: prioMergeTreeDataWriterCompressedBytes,
+ Dims: module.Dims{
+ {ID: "events_MergeTreeDataWriterCompressedBytes", Name: "written", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartUncompressedCacheRequests = module.Chart{
+ ID: "uncompressed_cache_requests",
+ Title: "Uncompressed cache requests",
+ Units: "requests/s",
+ Fam: "cache",
+ Ctx: "clickhouse.uncompressed_cache_requests",
+ Priority: prioUncompressedCacheRequests,
+ Dims: module.Dims{
+ {ID: "events_UncompressedCacheHits", Name: "hits", Algo: module.Incremental},
+ {ID: "events_UncompressedCacheMisses", Name: "misses", Algo: module.Incremental},
+ },
+ }
+ chartMarkCacheRequests = module.Chart{
+ ID: "mark_cache_requests",
+ Title: "Mark cache requests",
+ Units: "requests/s",
+ Fam: "cache",
+ Ctx: "clickhouse.mark_cache_requests",
+ Priority: prioMarkCacheRequests,
+ Dims: module.Dims{
+ {ID: "events_MarkCacheHits", Name: "hits", Algo: module.Incremental},
+ {ID: "events_MarkCacheMisses", Name: "misses", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartMaxPartCountForPartition = module.Chart{
+ ID: "max_part_count_for_partition",
+ Title: "Max part count for partition",
+ Units: "parts",
+ Fam: "parts",
+ Ctx: "clickhouse.max_part_count_for_partition",
+ Priority: prioMaxPartCountForPartition,
+ Dims: module.Dims{
+ {ID: "async_metrics_MaxPartCountForPartition", Name: "max_parts_partition"},
+ },
+ }
+ chartPartsCount = module.Chart{
+ ID: "parts_count",
+ Title: "Parts",
+ Units: "parts",
+ Fam: "parts",
+ Ctx: "clickhouse.parts_count",
+ Priority: prioParts,
+ Dims: module.Dims{
+ {ID: "metrics_PartsTemporary", Name: "temporary"},
+ {ID: "metrics_PartsPreActive", Name: "pre_active"},
+ {ID: "metrics_PartsActive", Name: "active"},
+ {ID: "metrics_PartsDeleting", Name: "deleting"},
+ {ID: "metrics_PartsDeleteOnDestroy", Name: "delete_on_destroy"},
+ {ID: "metrics_PartsOutdated", Name: "outdated"},
+ {ID: "metrics_PartsWide", Name: "wide"},
+ {ID: "metrics_PartsCompact", Name: "compact"},
+ },
+ }
+)
+
+var (
+ chartDistributedConnections = module.Chart{
+ ID: "distributes_connections",
+ Title: "Active distributed connection",
+ Units: "connections",
+ Fam: "distributed conns",
+ Ctx: "clickhouse.distributed_connections",
+ Priority: prioDistributedSend,
+ Dims: module.Dims{
+ {ID: "metrics_DistributedSend", Name: "active"},
+ },
+ }
+ chartDistributedConnectionAttempts = module.Chart{
+ ID: "distributes_connections_attempts",
+ Title: "Distributed connection attempts",
+ Units: "attempts/s",
+ Fam: "distributed conns",
+ Ctx: "clickhouse.distributed_connections_attempts",
+ Priority: prioDistributedConnectionTries,
+ Dims: module.Dims{
+ {ID: "events_DistributedConnectionTries", Name: "connection", Algo: module.Incremental},
+ },
+ }
+ chartDistributedConnectionFailRetries = module.Chart{
+ ID: "distributes_connections_fail_retries",
+ Title: "Distributed connection fails with retry",
+ Units: "fails/s",
+ Fam: "distributed conns",
+ Ctx: "clickhouse.distributed_connections_fail_retries",
+ Priority: prioDistributedConnectionFailTry,
+ Dims: module.Dims{
+ {ID: "events_DistributedConnectionFailTry", Name: "connection_retry", Algo: module.Incremental},
+ },
+ }
+ chartDistributedConnectionFailExhaustedRetries = module.Chart{
+ ID: "distributes_connections_fail_exhausted_retries",
+ Title: "Distributed connection fails after all retries finished",
+ Units: "fails/s",
+ Fam: "distributed conns",
+ Ctx: "clickhouse.distributed_connections_fail_exhausted_retries",
+ Priority: prioDistributedConnectionFailAtAll,
+ Dims: module.Dims{
+ {ID: "events_DistributedConnectionFailAtAll", Name: "connection_retry_exhausted", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartDistributedFilesToInsert = module.Chart{
+ ID: "distributes_files_to_insert",
+ Title: "Pending files to process for asynchronous insertion into Distributed tables",
+ Units: "files",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_files_to_insert",
+ Priority: prioDistributedFilesToInsert,
+ Dims: module.Dims{
+ {ID: "metrics_DistributedFilesToInsert", Name: "pending_insertions"},
+ },
+ }
+ chartDistributedRejectedInserts = module.Chart{
+ ID: "distributes_rejected_inserts",
+ Title: "Rejected INSERTs to a Distributed table",
+ Units: "inserts/s",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_rejected_inserts",
+ Priority: prioDistributedRejectedInserts,
+ Dims: module.Dims{
+ {ID: "events_DistributedRejectedInserts", Name: "rejected", Algo: module.Incremental},
+ },
+ }
+ chartDistributedDelayedInserts = module.Chart{
+ ID: "distributes_delayed_inserts",
+ Title: "Delayed INSERTs to a Distributed table",
+ Units: "inserts/s",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_delayed_inserts",
+ Priority: prioDistributedDelayedInserts,
+ Dims: module.Dims{
+ {ID: "events_DistributedDelayedInserts", Name: "delayed", Algo: module.Incremental},
+ },
+ }
+ chartDistributedDelayedInsertsLatency = module.Chart{
+ ID: "distributes_delayed_inserts_latency",
+ Title: "Time spent while the INSERT of a block to a Distributed table was throttled",
+ Units: "milliseconds",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_delayed_inserts_latency",
+ Priority: prioDistributedDelayedInsertsMilliseconds,
+ Dims: module.Dims{
+ {ID: "events_DistributedDelayedInsertsMilliseconds", Name: "delayed_time", Algo: module.Incremental},
+ },
+ }
+ chartDistributedSyncInsertionTimeoutExceeded = module.Chart{
+ ID: "distributes_sync_insertion_timeout_exceeded",
+ Title: "Distributed table sync insertions timeouts",
+ Units: "timeouts/s",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_sync_insertion_timeout_exceeded",
+ Priority: prioDistributedSyncInsertionTimeoutExceeded,
+ Dims: module.Dims{
+ {ID: "events_DistributedSyncInsertionTimeoutExceeded", Name: "sync_insertion", Algo: module.Incremental},
+ },
+ }
+ chartDistributedAsyncInsertionFailures = module.Chart{
+ ID: "distributes_async_insertions_failures",
+ Title: "Distributed table async insertion failures",
+ Units: "failures/s",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_async_insertions_failures",
+ Priority: prioDistributedAsyncInsertionFailures,
+ Dims: module.Dims{
+ {ID: "events_DistributedAsyncInsertionFailures", Name: "async_insertions", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartUptime = module.Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "clickhouse.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "async_metrics_Uptime", Name: "uptime"},
+ },
+ }
+)
+
+func (c *ClickHouse) addDiskCharts(disk *seenDisk) {
+ charts := diskChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, disk.disk)
+ chart.Labels = []module.Label{
+ {Key: "disk_name", Value: disk.disk},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, disk.disk)
+ }
+ }
+
+ if err := c.Charts().Add(*charts...); err != nil {
+ c.Warning(err)
+ }
+}
+
+func (c *ClickHouse) removeDiskCharts(disk *seenDisk) {
+ px := fmt.Sprintf("disk_%s_", disk.disk)
+ c.removeCharts(px)
+}
+
+func (c *ClickHouse) addTableCharts(table *seenTable) {
+ charts := tableChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, table.table, table.db)
+ chart.Labels = []module.Label{
+ {Key: "database", Value: table.db},
+ {Key: "table", Value: table.table},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, table.table, table.db)
+ }
+ }
+
+ if err := c.Charts().Add(*charts...); err != nil {
+ c.Warning(err)
+ }
+}
+
+func (c *ClickHouse) removeTableCharts(table *seenTable) {
+ px := fmt.Sprintf("table_%s_database_%s_", table.table, table.db)
+ c.removeCharts(px)
+}
+
+func (c *ClickHouse) removeCharts(prefix string) {
+ for _, chart := range *c.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/clickhouse.go b/src/go/plugin/go.d/modules/clickhouse/clickhouse.go
new file mode 100644
index 000000000..3e34f7261
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/clickhouse.go
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("clickhouse", module.Creator{
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ JobConfigSchema: configSchema,
+ })
+}
+
+func New() *ClickHouse {
+ return &ClickHouse{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8123",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: chCharts.Copy(),
+ seenDisks: make(map[string]*seenDisk),
+ seenDbTables: make(map[string]*seenTable),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type (
+ ClickHouse struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ seenDisks map[string]*seenDisk
+ seenDbTables map[string]*seenTable
+ }
+ seenDisk struct{ disk string }
+ seenTable struct{ db, table string }
+)
+
+func (c *ClickHouse) Configuration() any {
+ return c.Config
+}
+
+func (c *ClickHouse) Init() error {
+ if err := c.validateConfig(); err != nil {
+ c.Errorf("config validation: %v", err)
+ return err
+ }
+
+ httpClient, err := c.initHTTPClient()
+ if err != nil {
+ c.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ c.httpClient = httpClient
+
+ c.Debugf("using URL %s", c.URL)
+ c.Debugf("using timeout: %s", c.Timeout)
+
+ return nil
+}
+
+func (c *ClickHouse) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (c *ClickHouse) Charts() *module.Charts {
+ return c.charts
+}
+
+func (c *ClickHouse) Collect() map[string]int64 {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (c *ClickHouse) Cleanup() {
+ if c.httpClient != nil {
+ c.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/clickhouse_test.go b/src/go/plugin/go.d/modules/clickhouse/clickhouse_test.go
new file mode 100644
index 000000000..c3defbda7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/clickhouse_test.go
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataRespSystemAsyncMetrics, _ = os.ReadFile("testdata/resp_system_async_metrics.csv")
+ dataRespSystemMetrics, _ = os.ReadFile("testdata/resp_system_metrics.csv")
+ dataRespSystemEvents, _ = os.ReadFile("testdata/resp_system_events.csv")
+ dataRespSystemParts, _ = os.ReadFile("testdata/resp_system_parts.csv")
+ dataRespSystemDisks, _ = os.ReadFile("testdata/resp_system_disks.csv")
+ dataRespLongestQueryTime, _ = os.ReadFile("testdata/resp_longest_query_time.csv")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataRespSystemAsyncMetrics": dataRespSystemAsyncMetrics,
+ "dataRespSystemMetrics": dataRespSystemMetrics,
+ "dataRespSystemEvents": dataRespSystemEvents,
+ "dataRespSystemParts": dataRespSystemParts,
+ "dataRespSystemDisks": dataRespSystemDisks,
+ "dataRespLongestQueryTime": dataRespLongestQueryTime,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestClickhouse_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &ClickHouse{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestClickHouse_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ click := New()
+ click.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, click.Init())
+ } else {
+ assert.NoError(t, click.Init())
+ }
+ })
+ }
+}
+
+func TestClickHouse_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestClickHouse_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (*ClickHouse, func())
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: prepareCaseOk,
+ },
+ "fails on unexpected response": {
+ wantFail: true,
+ prepare: prepareCaseUnexpectedResponse,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ click, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, click.Check())
+ } else {
+ assert.NoError(t, click.Check())
+ }
+ })
+ }
+}
+
+func TestClickHouse_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (*ClickHouse, func())
+ wantMetrics map[string]int64
+ }{
+ "success on valid response": {
+ prepare: prepareCaseOk,
+ wantMetrics: map[string]int64{
+ "LongestRunningQueryTime": 73,
+ "async_metrics_MaxPartCountForPartition": 7,
+ "async_metrics_ReplicasMaxAbsoluteDelay": 0,
+ "async_metrics_Uptime": 64380,
+ "disk_default_free_space_bytes": 165494767616,
+ "disk_default_used_space_bytes": 45184565248,
+ "events_DelayedInserts": 0,
+ "events_DelayedInsertsMilliseconds": 0,
+ "events_DistributedAsyncInsertionFailures": 0,
+ "events_DistributedConnectionFailAtAll": 0,
+ "events_DistributedConnectionFailTry": 0,
+ "events_DistributedConnectionTries": 0,
+ "events_DistributedDelayedInserts": 0,
+ "events_DistributedDelayedInsertsMilliseconds": 0,
+ "events_DistributedRejectedInserts": 0,
+ "events_DistributedSyncInsertionTimeoutExceeded": 0,
+ "events_FailedInsertQuery": 0,
+ "events_FailedQuery": 0,
+ "events_FailedSelectQuery": 0,
+ "events_FileOpen": 1568962,
+ "events_InsertQuery": 0,
+ "events_InsertQueryTimeMicroseconds": 0,
+ "events_InsertedBytes": 0,
+ "events_InsertedRows": 0,
+ "events_MarkCacheHits": 0,
+ "events_MarkCacheMisses": 0,
+ "events_Merge": 0,
+ "events_MergeTreeDataWriterCompressedBytes": 0,
+ "events_MergeTreeDataWriterRows": 0,
+ "events_MergeTreeDataWriterUncompressedBytes": 0,
+ "events_MergedRows": 0,
+ "events_MergedUncompressedBytes": 0,
+ "events_MergesTimeMilliseconds": 0,
+ "events_Query": 0,
+ "events_QueryMemoryLimitExceeded": 0,
+ "events_QueryPreempted": 0,
+ "events_QueryTimeMicroseconds": 0,
+ "events_ReadBackoff": 0,
+ "events_ReadBufferFromFileDescriptorRead": 0,
+ "events_ReadBufferFromFileDescriptorReadBytes": 0,
+ "events_ReadBufferFromFileDescriptorReadFailed": 0,
+ "events_RejectedInserts": 0,
+ "events_ReplicatedDataLoss": 0,
+ "events_ReplicatedPartFailedFetches": 0,
+ "events_ReplicatedPartFetches": 0,
+ "events_ReplicatedPartFetchesOfMerged": 0,
+ "events_ReplicatedPartMerges": 0,
+ "events_Seek": 0,
+ "events_SelectQuery": 0,
+ "events_SelectQueryTimeMicroseconds": 0,
+ "events_SelectedBytes": 0,
+ "events_SelectedMarks": 0,
+ "events_SelectedParts": 0,
+ "events_SelectedRanges": 0,
+ "events_SelectedRows": 0,
+ "events_SlowRead": 0,
+ "events_SuccessfulInsertQuery": 0,
+ "events_SuccessfulQuery": 0,
+ "events_SuccessfulSelectQuery": 0,
+ "events_UncompressedCacheHits": 0,
+ "events_UncompressedCacheMisses": 0,
+ "events_WriteBufferFromFileDescriptorWrite": 0,
+ "events_WriteBufferFromFileDescriptorWriteBytes": 0,
+ "events_WriteBufferFromFileDescriptorWriteFailed": 0,
+ "metrics_DistributedFilesToInsert": 0,
+ "metrics_DistributedSend": 0,
+ "metrics_HTTPConnection": 0,
+ "metrics_InterserverConnection": 0,
+ "metrics_MemoryTracking": 1270999152,
+ "metrics_MySQLConnection": 0,
+ "metrics_PartsActive": 25,
+ "metrics_PartsCompact": 233,
+ "metrics_PartsDeleteOnDestroy": 0,
+ "metrics_PartsDeleting": 0,
+ "metrics_PartsOutdated": 284,
+ "metrics_PartsPreActive": 0,
+ "metrics_PartsTemporary": 0,
+ "metrics_PartsWide": 76,
+ "metrics_PostgreSQLConnection": 0,
+ "metrics_Query": 1,
+ "metrics_QueryPreempted": 0,
+ "metrics_ReadonlyReplica": 0,
+ "metrics_ReplicatedChecks": 0,
+ "metrics_ReplicatedFetch": 0,
+ "metrics_ReplicatedSend": 0,
+ "metrics_TCPConnection": 1,
+ "table_asynchronous_metric_log_database_system_parts": 6,
+ "table_asynchronous_metric_log_database_system_rows": 70377261,
+ "table_asynchronous_metric_log_database_system_size_bytes": 19113663,
+ "table_metric_log_database_system_parts": 6,
+ "table_metric_log_database_system_rows": 162718,
+ "table_metric_log_database_system_size_bytes": 18302533,
+ "table_processors_profile_log_database_system_parts": 5,
+ "table_processors_profile_log_database_system_rows": 20107,
+ "table_processors_profile_log_database_system_size_bytes": 391629,
+ "table_query_log_database_system_parts": 5,
+ "table_query_log_database_system_rows": 761,
+ "table_query_log_database_system_size_bytes": 196403,
+ "table_trace_log_database_system_parts": 8,
+ "table_trace_log_database_system_rows": 1733076,
+ "table_trace_log_database_system_size_bytes": 28695023,
+ },
+ },
+ "fails on unexpected response": {
+ prepare: prepareCaseUnexpectedResponse,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ click, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := click.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ testMetricsHasAllChartsDims(t, click, mx)
+ }
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, click *ClickHouse, mx map[string]int64) {
+ for _, chart := range *click.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ }
+}
+
+func prepareCaseOk(t *testing.T) (*ClickHouse, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Query().Get("query") {
+ case querySystemEvents:
+ _, _ = w.Write(dataRespSystemEvents)
+ case querySystemMetrics:
+ _, _ = w.Write(dataRespSystemMetrics)
+ case querySystemAsyncMetrics:
+ _, _ = w.Write(dataRespSystemAsyncMetrics)
+ case querySystemParts:
+ _, _ = w.Write(dataRespSystemParts)
+ case querySystemDisks:
+ _, _ = w.Write(dataRespSystemDisks)
+ case queryLongestQueryTime:
+ _, _ = w.Write(dataRespLongestQueryTime)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ click := New()
+ click.URL = srv.URL
+ require.NoError(t, click.Init())
+
+ return click, srv.Close
+}
+
+func prepareCaseUnexpectedResponse(t *testing.T) (*ClickHouse, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ click := New()
+ click.URL = srv.URL
+ require.NoError(t, click.Init())
+
+ return click, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*ClickHouse, func()) {
+ t.Helper()
+ click := New()
+ click.URL = "http://127.0.0.1:65001/stat"
+ require.NoError(t, click.Init())
+
+ return click, func() {}
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/collect.go b/src/go/plugin/go.d/modules/clickhouse/collect.go
new file mode 100644
index 000000000..8bb756528
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/collect.go
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "encoding/csv"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "slices"
+)
+
+const precision = 1000
+
+func (c *ClickHouse) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := c.collectSystemEvents(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectSystemMetrics(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectSystemAsyncMetrics(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectSystemParts(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectSystemDisks(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectLongestRunningQueryTime(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (c *ClickHouse) doOKDecodeCSV(req *http.Request, assign func(column, value string, lineEnd bool)) error {
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ return readCSVResponseData(resp.Body, assign)
+}
+
+func readCSVResponseData(reader io.Reader, assign func(column, value string, lineEnd bool)) error {
+ r := csv.NewReader(reader)
+ r.ReuseRecord = true
+
+ var columns []string
+
+ for {
+ record, err := r.Read()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ if len(columns) == 0 {
+ columns = slices.Clone(record)
+ continue
+ }
+
+ if len(columns) != len(record) {
+ return fmt.Errorf("column count mismatch: %d vs %d", len(columns), len(record))
+ }
+
+ for i, l := 0, len(record); i < l; i++ {
+ assign(columns[i], record[i], i == l-1)
+ }
+ }
+
+ return nil
+}
+
+func makeURLQuery(q string) string {
+ return url.Values{"query": {q}}.Encode()
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_async_metrics.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_async_metrics.go
new file mode 100644
index 000000000..79b7e0ffd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_async_metrics.go
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "errors"
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const querySystemAsyncMetrics = `
+SELECT
+ metric,
+ value
+FROM
+ system.asynchronous_metrics
+where
+ metric LIKE 'Uptime'
+ OR metric LIKE 'MaxPartCountForPartition'
+ OR metric LIKE 'ReplicasMaxAbsoluteDelay' FORMAT CSVWithNames
+`
+
+func (c *ClickHouse) collectSystemAsyncMetrics(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemAsyncMetrics)
+
+ want := map[string]float64{
+ "Uptime": 1,
+ "MaxPartCountForPartition": 1,
+ "ReplicasMaxAbsoluteDelay": precision,
+ }
+
+ px := "async_metrics_"
+ var metric string
+ var n int
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "metric":
+ metric = value
+ case "value":
+ mul, ok := want[metric]
+ if !ok {
+ return
+ }
+ n++
+ if v, err := strconv.ParseFloat(value, 64); err == nil {
+ mx[px+metric] = int64(v * mul)
+ }
+ }
+ })
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return errors.New("no system async metrics data returned")
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_disks.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_disks.go
new file mode 100644
index 000000000..4b9829bf6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_disks.go
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const querySystemDisks = `
+SELECT
+ name,
+ sum(free_space) as free_space,
+ sum(total_space) as total_space
+FROM
+ system.disks
+GROUP BY
+ name FORMAT CSVWithNames
+`
+
+type diskStats struct {
+ name string
+ totalBytes int64
+ freeBytes int64
+}
+
+func (c *ClickHouse) collectSystemDisks(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemDisks)
+
+ seen := make(map[string]*diskStats)
+
+ getDisk := func(name string) *diskStats {
+ s, ok := seen[name]
+ if !ok {
+ s = &diskStats{name: name}
+ seen[name] = s
+ }
+ return s
+ }
+
+ var name string
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "name":
+ name = value
+ case "free_space":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getDisk(name).freeBytes = v
+ case "total_space":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getDisk(name).totalBytes = v
+ }
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, disk := range seen {
+ if _, ok := c.seenDisks[disk.name]; !ok {
+ v := &seenDisk{disk: disk.name}
+ c.seenDisks[disk.name] = v
+ c.addDiskCharts(v)
+ }
+
+ px := "disk_" + disk.name + "_"
+
+ mx[px+"free_space_bytes"] = disk.freeBytes
+ mx[px+"used_space_bytes"] = disk.totalBytes - disk.freeBytes
+ }
+
+ for k, v := range c.seenDisks {
+ if _, ok := seen[k]; !ok {
+ delete(c.seenDisks, k)
+ c.removeDiskCharts(v)
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_events.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_events.go
new file mode 100644
index 000000000..de3c33a1e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_events.go
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "errors"
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const querySystemEvents = `
+SELECT
+ event,
+ value
+FROM
+ system.events FORMAT CSVWithNames
+`
+
+func (c *ClickHouse) collectSystemEvents(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemEvents)
+
+ px := "events_"
+ var event string
+ var n int
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "event":
+ event = value
+ case "value":
+ if !wantSystemEvents[event] {
+ return
+ }
+ n++
+ if v, err := strconv.ParseInt(value, 10, 64); err == nil {
+ mx[px+event] = v
+ }
+ }
+ })
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return errors.New("no system events data returned")
+ }
+
+ // CH doesn't expose events with 0 values
+ for k := range wantSystemEvents {
+ k = px + k
+ if _, ok := mx[k]; !ok {
+ mx[k] = 0
+ }
+ }
+
+ mx["events_SuccessfulQuery"] = mx["events_Query"] - mx["events_FailedQuery"]
+ mx["events_SuccessfulSelectQuery"] = mx["events_SelectQuery"] - mx["events_FailedSelectQuery"]
+ mx["events_SuccessfulInsertQuery"] = mx["events_InsertQuery"] - mx["events_FailedInsertQuery"]
+
+ return nil
+}
+
+var wantSystemEvents = map[string]bool{
+ "SlowRead": true,
+ "ReadBackoff": true,
+ "Query": true,
+ "FailedQuery": true,
+ "QueryTimeMicroseconds": true,
+ "SelectQuery": true,
+ "FailedSelectQuery": true,
+ "SelectQueryTimeMicroseconds": true,
+ "InsertQuery": true,
+ "FailedInsertQuery": true,
+ "InsertQueryTimeMicroseconds": true,
+ "QueryPreempted": true,
+ "QueryMemoryLimitExceeded": true,
+ "InsertedRows": true,
+ "InsertedBytes": true,
+ "DelayedInserts": true,
+ "DelayedInsertsMilliseconds": true,
+ "RejectedInserts": true,
+ "SelectedRows": true,
+ "SelectedBytes": true,
+ "SelectedParts": true,
+ "SelectedRanges": true,
+ "SelectedMarks": true,
+ "Merge": true,
+ "MergedRows": true,
+ "MergedUncompressedBytes": true,
+ "MergesTimeMilliseconds": true,
+ "MergeTreeDataWriterRows": true,
+ "MergeTreeDataWriterUncompressedBytes": true,
+ "MergeTreeDataWriterCompressedBytes": true,
+ "UncompressedCacheHits": true,
+ "UncompressedCacheMisses": true,
+ "MarkCacheHits": true,
+ "MarkCacheMisses": true,
+ "Seek": true,
+ "FileOpen": true,
+ "ReadBufferFromFileDescriptorReadBytes": true,
+ "WriteBufferFromFileDescriptorWriteBytes": true,
+ "ReadBufferFromFileDescriptorRead": true,
+ "WriteBufferFromFileDescriptorWrite": true,
+ "ReadBufferFromFileDescriptorReadFailed": true,
+ "WriteBufferFromFileDescriptorWriteFailed": true,
+ "DistributedConnectionTries": true,
+ "DistributedConnectionFailTry": true,
+ "DistributedConnectionFailAtAll": true,
+ "DistributedRejectedInserts": true,
+ "DistributedDelayedInserts": true,
+ "DistributedDelayedInsertsMilliseconds": true,
+ "DistributedSyncInsertionTimeoutExceeded": true,
+ "DistributedAsyncInsertionFailures": true,
+ "ReplicatedDataLoss": true,
+ "ReplicatedPartFetches": true,
+ "ReplicatedPartFailedFetches": true,
+ "ReplicatedPartMerges": true,
+ "ReplicatedPartFetchesOfMerged": true,
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_metrics.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_metrics.go
new file mode 100644
index 000000000..26891f808
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_metrics.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "errors"
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const querySystemMetrics = `
+SELECT
+ metric,
+ value
+FROM
+ system.metrics FORMAT CSVWithNames
+`
+
+func (c *ClickHouse) collectSystemMetrics(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemMetrics)
+
+ px := "metrics_"
+ var metric string
+ var n int
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "metric":
+ metric = value
+ case "value":
+ if !wantSystemMetrics[metric] {
+ return
+ }
+ n++
+ if v, err := strconv.ParseInt(value, 10, 64); err == nil {
+ mx[px+metric] = v
+ }
+ }
+ })
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return errors.New("no system metrics data returned")
+ }
+
+ return nil
+}
+
+var wantSystemMetrics = map[string]bool{
+ "Query": true,
+ "TCPConnection": true,
+ "HTTPConnection": true,
+ "MySQLConnection": true,
+ "PostgreSQLConnection": true,
+ "InterserverConnection": true,
+ "MemoryTracking": true,
+ "QueryPreempted": true,
+ "ReplicatedFetch": true,
+ "ReplicatedSend": true,
+ "ReplicatedChecks": true,
+ "ReadonlyReplica": true,
+ "PartsTemporary": true,
+ "PartsPreActive": true,
+ "PartsActive": true,
+ "PartsDeleting": true,
+ "PartsDeleteOnDestroy": true,
+ "PartsOutdated": true,
+ "PartsWide": true,
+ "PartsCompact": true,
+ "DistributedSend": true,
+ "DistributedFilesToInsert": true,
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_parts.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_parts.go
new file mode 100644
index 000000000..3e9dc6ac2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_parts.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const querySystemParts = `
+SELECT
+ database,
+ table,
+ sum(bytes) as bytes,
+ count() as parts,
+ sum(rows) as rows
+FROM
+ system.parts
+WHERE
+ active = 1
+GROUP BY
+ database,
+ table FORMAT CSVWithNames
+`
+
+type tableStats struct {
+ database string
+ table string
+ bytes int64
+ parts int64
+ rows int64
+}
+
+func (c *ClickHouse) collectSystemParts(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemParts)
+
+ seen := make(map[string]*tableStats)
+
+ getTable := func(db, table string) *tableStats {
+ k := table + db
+ s, ok := seen[k]
+ if !ok {
+ s = &tableStats{database: db, table: table}
+ seen[k] = s
+ }
+ return s
+ }
+
+ var database, table string
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "database":
+ database = value
+ case "table":
+ table = value
+ case "bytes":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getTable(database, table).bytes = v
+ case "parts":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getTable(database, table).parts = v
+ case "rows":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getTable(database, table).rows = v
+ }
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, table := range seen {
+ k := table.table + table.database
+ if _, ok := c.seenDbTables[k]; !ok {
+ v := &seenTable{db: table.database, table: table.table}
+ c.seenDbTables[k] = v
+ c.addTableCharts(v)
+ }
+
+ px := fmt.Sprintf("table_%s_database_%s_", table.table, table.database)
+
+ mx[px+"size_bytes"] = table.bytes
+ mx[px+"parts"] = table.parts
+ mx[px+"rows"] = table.rows
+ }
+
+ for k, v := range c.seenDbTables {
+ if _, ok := seen[k]; !ok {
+ delete(c.seenDbTables, k)
+ c.removeTableCharts(v)
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/collect_system_processes.go b/src/go/plugin/go.d/modules/clickhouse/collect_system_processes.go
new file mode 100644
index 000000000..53698ea6c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/collect_system_processes.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const queryLongestQueryTime = `
+SELECT
+ toString(max(elapsed)) as value
+FROM
+ system.processes FORMAT CSVWithNames
+`
+
+func (c *ClickHouse) collectLongestRunningQueryTime(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(queryLongestQueryTime)
+
+ return c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ if column == "value" {
+ if v, err := strconv.ParseFloat(value, 64); err == nil {
+ mx["LongestRunningQueryTime"] = int64(v * precision)
+ }
+ }
+ })
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/config_schema.json b/src/go/plugin/go.d/modules/clickhouse/config_schema.json
new file mode 100644
index 000000000..8b0129ece
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ClickHouse collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the ClickHouse server.",
+ "type": "string",
+ "default": "http://127.0.0.1:8123",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/init.go b/src/go/plugin/go.d/modules/clickhouse/init.go
new file mode 100644
index 000000000..4b8ce3e4f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/init.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (c *ClickHouse) validateConfig() error {
+ if c.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (c *ClickHouse) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(c.Client)
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md b/src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md
new file mode 100644
index 000000000..c4f1384c0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/integrations/clickhouse.md
@@ -0,0 +1,368 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/clickhouse/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/clickhouse/metadata.yaml"
+sidebar_label: "ClickHouse"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ClickHouse
+
+
+<img src="https://netdata.cloud/img/clickhouse.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: clickhouse
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector retrieves performance data from ClickHouse for connections, queries, resources, replication, IO, and data operations (inserts, selects, merges) using HTTP requests and ClickHouse system tables. It monitors your ClickHouse server's health and activity.
+
+
+It sends HTTP requests to the ClickHouse [HTTP interface](https://clickhouse.com/docs/en/interfaces/http), executing SELECT queries to retrieve data from various system tables.
+Specifically, it collects metrics from the following tables:
+
+- system.metrics
+- system.async_metrics
+- system.events
+- system.disks
+- system.parts
+- system.processes
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects ClickHouse instances running on localhost that are listening on port 8123.
+On startup, it tries to collect metrics from:
+
+- http://127.0.0.1:8123
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per ClickHouse instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| clickhouse.connections | tcp, http, mysql, postgresql, interserver | connections |
+| clickhouse.slow_reads | slow | reads/s |
+| clickhouse.read_backoff | read_backoff | events/s |
+| clickhouse.memory_usage | used | bytes |
+| clickhouse.running_queries | running | queries |
+| clickhouse.queries_preempted | preempted | queries |
+| clickhouse.queries | successful, failed | queries/s |
+| clickhouse.select_queries | successful, failed | selects/s |
+| clickhouse.insert_queries | successful, failed | inserts/s |
+| clickhouse.queries_memory_limit_exceeded | mem_limit_exceeded | queries/s |
+| clickhouse.longest_running_query_time | longest_query_time | seconds |
+| clickhouse.queries_latency | queries_time | microseconds |
+| clickhouse.select_queries_latency | selects_time | microseconds |
+| clickhouse.insert_queries_latency | inserts_time | microseconds |
+| clickhouse.io | reads, writes | bytes/s |
+| clickhouse.iops | reads, writes | ops/s |
+| clickhouse.io_errors | read, write | errors/s |
+| clickhouse.io_seeks | lseek | ops/s |
+| clickhouse.io_file_opens | file_open | ops/s |
+| clickhouse.replicated_parts_current_activity | fetch, send, check | parts |
+| clickhouse.replicas_max_absolute_dela | replication_delay | seconds |
+| clickhouse.replicated_readonly_tables | read_only | tables |
+| clickhouse.replicated_data_loss | data_loss | events |
+| clickhouse.replicated_part_fetches | successful, failed | fetches/s |
+| clickhouse.inserted_rows | inserted | rows/s |
+| clickhouse.inserted_bytes | inserted | bytes/s |
+| clickhouse.rejected_inserts | rejected | inserts/s |
+| clickhouse.delayed_inserts | delayed | inserts/s |
+| clickhouse.delayed_inserts_throttle_time | delayed_inserts_throttle_time | milliseconds |
+| clickhouse.selected_bytes | selected | bytes/s |
+| clickhouse.selected_rows | selected | rows/s |
+| clickhouse.selected_parts | selected | parts/s |
+| clickhouse.selected_ranges | selected | ranges/s |
+| clickhouse.selected_marks | selected | marks/s |
+| clickhouse.merges | merge | ops/s |
+| clickhouse.merges_latency | merges_time | milliseconds |
+| clickhouse.merged_uncompressed_bytes | merged_uncompressed | bytes/s |
+| clickhouse.merged_rows | merged | rows/s |
+| clickhouse.merge_tree_data_writer_inserted_rows | inserted | rows/s |
+| clickhouse.merge_tree_data_writer_uncompressed_bytes | inserted | bytes/s |
+| clickhouse.merge_tree_data_writer_compressed_bytes | written | bytes/s |
+| clickhouse.uncompressed_cache_requests | hits, misses | requests/s |
+| clickhouse.mark_cache_requests | hits, misses | requests/s |
+| clickhouse.max_part_count_for_partition | max_parts_partition | parts |
+| clickhouse.parts_count | temporary, pre_active, active, deleting, delete_on_destroy, outdated, wide, compact | parts |
+| distributed_connections | active | connections |
+| distributed_connections_attempts | connection | attempts/s |
+| distributed_connections_fail_retries | connection_retry | fails/s |
+| distributed_connections_fail_exhausted_retries | connection_retry_exhausted | fails/s |
+| distributed_files_to_insert | pending_insertions | files |
+| distributed_rejected_inserts | rejected | inserts/s |
+| distributed_delayed_inserts | delayed | inserts/s |
+| distributed_delayed_inserts_latency | delayed_time | milliseconds |
+| distributed_sync_insertion_timeout_exceeded | sync_insertion | timeouts/s |
+| distributed_async_insertions_failures | async_insertions | failures/s |
+| clickhouse.uptime | uptime | seconds |
+
+### Per disk
+
+These metrics refer to the Disk.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| disk_name | Name of the disk as defined in the [server configuration](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes_configure). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| clickhouse.disk_space_usage | free, used | bytes |
+
+### Per table
+
+These metrics refer to the Database Table.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | Name of the database. |
+| table | Name of the table. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| clickhouse.database_table_size | size | bytes |
+| clickhouse.database_table_parts | parts | parts |
+| clickhouse.database_table_rows | rows | rows |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ clickhouse_restarted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.uptime | ClickHouse has recently been restarted |
+| [ clickhouse_queries_preempted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.queries_preempted | ClickHouse has queries that are stopped and waiting due to priority setting |
+| [ clickhouse_long_running_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.longest_running_query_time | ClickHouse has a long-running query exceeding the threshold |
+| [ clickhouse_rejected_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.rejected_inserts | ClickHouse has INSERT queries that are rejected due to high number of active data parts for partition in a MergeTree |
+| [ clickhouse_delayed_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.delayed_inserts | ClickHouse has INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree |
+| [ clickhouse_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicas_max_absolute_delay | ClickHouse is experiencing replication lag greater than 5 minutes |
+| [ clickhouse_replicated_readonly_tables ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicated_readonly_tables | ClickHouse has replicated tables in readonly state due to ZooKeeper session loss/startup without ZooKeeper configured |
+| [ clickhouse_max_part_count_for_partition ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.max_part_count_for_partition | ClickHouse high number of parts per partition |
+| [ clickhouse_distributed_connections_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_connections_fail_exhausted_retries | ClickHouse has failed distributed connections after exhausting all retry attempts |
+| [ clickhouse_distributed_files_to_insert ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_files_to_insert | ClickHouse high number of pending files to process for asynchronous insertion into Distributed tables |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/clickhouse.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/clickhouse.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8123 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+ClickHouse with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:8123
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+
+ - name: remote
+ url: http://192.0.2.1:8123
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `clickhouse` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m clickhouse
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `clickhouse` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep clickhouse
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep clickhouse /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep clickhouse
+```
+
+
diff --git a/src/go/plugin/go.d/modules/clickhouse/metadata.yaml b/src/go/plugin/go.d/modules/clickhouse/metadata.yaml
new file mode 100644
index 000000000..e9a6b9152
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/metadata.yaml
@@ -0,0 +1,624 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-clickhouse
+ plugin_name: go.d.plugin
+ module_name: clickhouse
+ monitored_instance:
+ name: ClickHouse
+ link: https://clickhouse.com/
+ icon_filename: clickhouse.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - database
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector retrieves performance data from ClickHouse for connections, queries, resources, replication, IO, and data operations (inserts, selects, merges) using HTTP requests and ClickHouse system tables. It monitors your ClickHouse server's health and activity.
+ method_description: |
+ It sends HTTP requests to the ClickHouse [HTTP interface](https://clickhouse.com/docs/en/interfaces/http), executing SELECT queries to retrieve data from various system tables.
+ Specifically, it collects metrics from the following tables:
+
+ - system.metrics
+ - system.async_metrics
+ - system.events
+ - system.disks
+ - system.parts
+ - system.processes
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects ClickHouse instances running on localhost that are listening on port 8123.
+ On startup, it tries to collect metrics from:
+
+ - http://127.0.0.1:8123
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/clickhouse.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8123
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: ClickHouse with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:8123
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+
+ - name: remote
+ url: http://192.0.2.1:8123
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: clickhouse_restarted
+ metric: clickhouse.uptime
+ info: ClickHouse has recently been restarted
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_queries_preempted
+ metric: clickhouse.queries_preempted
+ info: ClickHouse has queries that are stopped and waiting due to priority setting
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_long_running_query
+ metric: clickhouse.longest_running_query_time
+ info: ClickHouse has a long-running query exceeding the threshold
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_rejected_inserts
+ metric: clickhouse.rejected_inserts
+ info: ClickHouse has INSERT queries that are rejected due to high number of active data parts for partition in a MergeTree
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_delayed_inserts
+ metric: clickhouse.delayed_inserts
+ info: ClickHouse has INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_replication_lag
+ metric: clickhouse.replicas_max_absolute_delay
+ info: ClickHouse is experiencing replication lag greater than 5 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_replicated_readonly_tables
+ metric: clickhouse.replicated_readonly_tables
+ info: ClickHouse has replicated tables in readonly state due to ZooKeeper session loss/startup without ZooKeeper configured
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_max_part_count_for_partition
+ metric: clickhouse.max_part_count_for_partition
+ info: ClickHouse high number of parts per partition
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_distributed_connections_failures
+ metric: clickhouse.distributed_connections_fail_exhausted_retries
+ info: ClickHouse has failed distributed connections after exhausting all retry attempts
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_distributed_files_to_insert
+ metric: clickhouse.distributed_files_to_insert
+ info: ClickHouse high number of pending files to process for asynchronous insertion into Distributed tables
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: clickhouse.connections
+ description: Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: tcp
+ - name: http
+ - name: mysql
+ - name: postgresql
+ - name: interserver
+ - name: clickhouse.slow_reads
+ description: Slow reads from a file
+ unit: reads/s
+ chart_type: line
+ dimensions:
+ - name: slow
+ - name: clickhouse.read_backoff
+ description: Read backoff events
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: read_backoff
+ - name: clickhouse.memory_usage
+ description: Memory usage
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: clickhouse.running_queries
+ description: Running queries
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: running
+ - name: clickhouse.queries_preempted
+ description: Queries waiting due to priority
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: preempted
+ - name: clickhouse.queries
+ description: Queries
+ unit: queries/s
+ chart_type: stacked
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: clickhouse.select_queries
+ description: Select queries
+ unit: selects/s
+ chart_type: stacked
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: clickhouse.insert_queries
+ description: Insert queries
+ unit: inserts/s
+ chart_type: stacked
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: clickhouse.queries_memory_limit_exceeded
+ description: Memory limit exceeded for query
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: mem_limit_exceeded
+ - name: clickhouse.longest_running_query_time
+ description: Longest running query time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: longest_query_time
+ - name: clickhouse.queries_latency
+ description: Queries latency
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: queries_time
+ - name: clickhouse.select_queries_latency
+ description: Select queries latency
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: selects_time
+ - name: clickhouse.insert_queries_latency
+ description: Insert queries latency
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: inserts_time
+ - name: clickhouse.io
+ description: Read and written data
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: clickhouse.iops
+ description: Read and write operations
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: clickhouse.io_errors
+ description: Read and write errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: clickhouse.io_seeks
+ description: lseek function calls
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: lseek
+ - name: clickhouse.io_file_opens
+ description: File opens
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: file_open
+ - name: clickhouse.replicated_parts_current_activity
+ description: Replicated parts current activity
+ unit: parts
+ chart_type: line
+ dimensions:
+ - name: fetch
+ - name: send
+ - name: check
+ - name: clickhouse.replicas_max_absolute_dela
+ description: Replicas max absolute delay
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: replication_delay
+ - name: clickhouse.replicated_readonly_tables
+ description: Replicated tables in readonly state
+ unit: tables
+ chart_type: line
+ dimensions:
+ - name: read_only
+ - name: clickhouse.replicated_data_loss
+ description: Replicated data loss
+ unit: events
+ chart_type: line
+ dimensions:
+ - name: data_loss
+ - name: clickhouse.replicated_part_fetches
+ description: Replicated part fetches
+ unit: fetches/s
+ chart_type: line
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: clickhouse.inserted_rows
+ description: Inserted rows
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: inserted
+ - name: clickhouse.inserted_bytes
+ description: Inserted data
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: inserted
+ - name: clickhouse.rejected_inserts
+ description: Rejected inserts
+ unit: inserts/s
+ chart_type: line
+ dimensions:
+ - name: rejected
+ - name: clickhouse.delayed_inserts
+ description: Delayed inserts
+ unit: inserts/s
+ chart_type: line
+ dimensions:
+ - name: delayed
+ - name: clickhouse.delayed_inserts_throttle_time
+ description: Delayed inserts throttle time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: delayed_inserts_throttle_time
+ - name: clickhouse.selected_bytes
+ description: Selected data
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: selected
+ - name: clickhouse.selected_rows
+ description: Selected rows
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: selected
+ - name: clickhouse.selected_parts
+ description: Selected parts
+ unit: parts/s
+ chart_type: line
+ dimensions:
+ - name: selected
+ - name: clickhouse.selected_ranges
+ description: Selected ranges
+ unit: ranges/s
+ chart_type: line
+ dimensions:
+ - name: selected
+ - name: clickhouse.selected_marks
+ description: Selected marks
+ unit: marks/s
+ chart_type: line
+ dimensions:
+ - name: selected
+ - name: clickhouse.merges
+ description: Merge operations
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: merge
+ - name: clickhouse.merges_latency
+ description: Time spent for background merges
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: merges_time
+ - name: clickhouse.merged_uncompressed_bytes
+ description: Uncompressed data read for background merges
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: merged_uncompressed
+ - name: clickhouse.merged_rows
+ description: Merged rows
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: merged
+ - name: clickhouse.merge_tree_data_writer_inserted_rows
+ description: Rows INSERTed to MergeTree tables
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: inserted
+ - name: clickhouse.merge_tree_data_writer_uncompressed_bytes
+ description: Data INSERTed to MergeTree tables
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: inserted
+ - name: clickhouse.merge_tree_data_writer_compressed_bytes
+ description: Data written to disk for data INSERTed to MergeTree tables
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: written
+ - name: clickhouse.uncompressed_cache_requests
+ description: Uncompressed cache requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: clickhouse.mark_cache_requests
+ description: Mark cache requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: clickhouse.max_part_count_for_partition
+ description: Max part count for partition
+ unit: parts
+ chart_type: line
+ dimensions:
+ - name: max_parts_partition
+ - name: clickhouse.parts_count
+ description: Parts
+ unit: parts
+ chart_type: line
+ dimensions:
+ - name: temporary
+ - name: pre_active
+ - name: active
+ - name: deleting
+ - name: delete_on_destroy
+ - name: outdated
+ - name: wide
+ - name: compact
+ - name: distributed_connections
+ description: Active distributed connection
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: distributed_connections_attempts
+ description: Distributed connection attempts
+ unit: attempts/s
+ chart_type: line
+ dimensions:
+ - name: connection
+ - name: distributed_connections_fail_retries
+ description: Distributed connection fails with retry
+ unit: fails/s
+ chart_type: line
+ dimensions:
+ - name: connection_retry
+ - name: distributed_connections_fail_exhausted_retries
+ description: Distributed connection fails after all retries finished
+ unit: fails/s
+ chart_type: line
+ dimensions:
+ - name: connection_retry_exhausted
+ - name: distributed_files_to_insert
+ description: Pending files to process for asynchronous insertion into Distributed tables
+ unit: files
+ chart_type: line
+ dimensions:
+ - name: pending_insertions
+ - name: distributed_rejected_inserts
+ description: Rejected INSERTs to a Distributed table
+ unit: inserts/s
+ chart_type: line
+ dimensions:
+ - name: rejected
+ - name: distributed_delayed_inserts
+ description: Delayed INSERTs to a Distributed table
+ unit: inserts/s
+ chart_type: line
+ dimensions:
+ - name: delayed
+ - name: distributed_delayed_inserts_latency
+ description: Time spent while the INSERT of a block to a Distributed table was throttled
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: delayed_time
+ - name: distributed_sync_insertion_timeout_exceeded
+ description: Distributed table sync insertions timeouts
+ unit: timeouts/s
+ chart_type: line
+ dimensions:
+ - name: sync_insertion
+ - name: distributed_async_insertions_failures
+ description: Distributed table async insertion failures
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: async_insertions
+ - name: clickhouse.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: disk
+ description: These metrics refer to the Disk.
+ labels:
+ - name: disk_name
+ description: Name of the disk as defined in the [server configuration](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes_configure).
+ metrics:
+ - name: clickhouse.disk_space_usage
+ description: Disk space usage
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: table
+ description: These metrics refer to the Database Table.
+ labels:
+ - name: database
+ description: Name of the database.
+ - name: table
+ description: Name of the table.
+ metrics:
+ - name: clickhouse.database_table_size
+ description: Table size
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: size
+ - name: clickhouse.database_table_parts
+ description: Table parts
+ unit: parts
+ chart_type: line
+ dimensions:
+ - name: parts
+ - name: clickhouse.database_table_rows
+ description: Table rows
+ unit: rows
+ chart_type: line
+ dimensions:
+ - name: rows
diff --git a/src/go/plugin/go.d/modules/clickhouse/testdata/config.json b/src/go/plugin/go.d/modules/clickhouse/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/clickhouse/testdata/config.yaml b/src/go/plugin/go.d/modules/clickhouse/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/clickhouse/testdata/resp_longest_query_time.csv b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_longest_query_time.csv
new file mode 100644
index 000000000..85119aa6f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_longest_query_time.csv
@@ -0,0 +1,2 @@
+"value"
+"0.0738"
diff --git a/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_async_metrics.csv b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_async_metrics.csv
new file mode 100644
index 000000000..7c9da4f46
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_async_metrics.csv
@@ -0,0 +1,434 @@
+"metric","value"
+"AsynchronousMetricsCalculationTimeSpent",0.003263409
+"NumberOfDetachedByUserParts",0
+"NumberOfDetachedParts",0
+"AsynchronousHeavyMetricsCalculationTimeSpent",1.6e-7
+"TotalPrimaryKeyBytesInMemoryAllocated",342740
+"TotalPartsOfMergeTreeTablesSystem",26
+"TotalRowsOfMergeTreeTablesSystem",72275588
+"TotalBytesOfMergeTreeTablesSystem",66552963
+"TotalRowsOfMergeTreeTables",72275588
+"TotalBytesOfMergeTreeTables",66552963
+"NumberOfDatabases",4
+"MaxPartCountForPartition",7
+"ReplicasSumMergesInQueue",0
+"ReplicasSumInsertsInQueue",0
+"ReplicasSumQueueSize",0
+"ReplicasMaxInsertsInQueue",0
+"ReplicasMaxQueueSize",0
+"DiskUnreserved_default",165498056704
+"DiskAvailable_default",165498056704
+"DiskUsed_default",45181276160
+"FilesystemLogsPathAvailableINodes",12209636
+"FilesystemLogsPathUsedBytes",45181276160
+"FilesystemLogsPathAvailableBytes",165498056704
+"FilesystemLogsPathTotalBytes",210679332864
+"FilesystemMainPathUsedINodes",847436
+"FilesystemMainPathAvailableBytes",165498056704
+"AsynchronousHeavyMetricsUpdateInterval",120.000112
+"HashTableStatsCacheMisses",35
+"OSNiceTimeCPU2",0
+"Uptime",64380.292003818
+"FilesystemCacheBytes",0
+"OSSoftIrqTimeCPU0",0.0199978602289555
+"QueryCacheEntries",0
+"QueryCacheBytes",0
+"IndexMarkCacheBytes",0
+"OSNiceTimeCPU10",0
+"UncompressedCacheCells",0
+"UncompressedCacheBytes",0
+"IndexUncompressedCacheCells",0
+"HTTPThreads",1
+"InterserverThreads",0
+"PageCacheBytes",0
+"OSSoftIrqTimeCPU3",0
+"VMNumMaps",3034
+"BlockWriteTime_sr0",0
+"NetworkSendDrop_veth22b9458",0
+"NetworkSendBytes_veth22b9458",0
+"CPUFrequencyMHz_11",2000
+"NetworkReceiveDrop_veth22b9458",0
+"OSSystemTimeCPU13",0
+"NetworkReceiveBytes_veth22b9458",0
+"BlockWriteOps_dm-0",0
+"NetworkSendErrors_vethe1fd940",0
+"OSGuestNiceTimeCPU9",0
+"NetworkSendPackets_vethe1fd940",0
+"BlockReadMerges_dm-0",0
+"NetworkSendBytes_vethe1fd940",0
+"NetworkReceiveDrop_vethe1fd940",0
+"OSUserTimeCPU5",0
+"NetworkSendDrop_veth0cdb608",0
+"NetworkReceiveErrors_veth0cdb608",0
+"CPUFrequencyMHz_2",2000
+"OSGuestNiceTime",0
+"NetworkReceivePackets_veth0cdb608",0
+"CompiledExpressionCacheCount",5
+"NetworkSendDrop_veth8415c5c",0
+"NetworkReceiveErrors_veth8415c5c",0
+"OSUserTimeCPU9",0
+"NetworkReceivePackets_veth8415c5c",0
+"NetworkReceiveBytes_veth8415c5c",0
+"NetworkSendPackets_vethfa2b7f2",0
+"NetworkSendErrors_vethb608e1b",0
+"OSNiceTimeCPU8",0
+"NetworkReceiveDrop_vethb608e1b",0
+"OSIOWaitTimeCPU7",0
+"NetworkReceiveErrors_vethe1fd940",0
+"NetworkReceiveErrors_vethb608e1b",0
+"CompiledExpressionCacheBytes",65536
+"NetworkSendDrop_vethfa2b7f2",0
+"IndexUncompressedCacheBytes",0
+"CPUFrequencyMHz_12",2000
+"NetworkSendBytes_vethfa2b7f2",0
+"NetworkReceiveDrop_vethfa2b7f2",0
+"NetworkSendErrors_veth0cdb608",0
+"OSSystemTimeCPU7",0
+"NetworkReceivePackets_vethfa2b7f2",0
+"OSThreadsTotal",1432
+"NetworkSendBytes_docker0",0
+"NetworkReceiveDrop_docker0",0
+"OSIOWaitTimeCPU2",0
+"NetworkReceiveBytes_vethb608e1b",0
+"OSIOWaitTimeCPU10",0
+"NetworkReceiveBytes_vethfa2b7f2",0
+"jemalloc.epoch",64382
+"NetworkReceiveErrors_docker0",0
+"OSGuestNiceTimeCPU6",0
+"NetworkSendDrop_br-392a8d0e2863",0
+"OSStealTimeCPU13",0
+"NetworkSendErrors_br-392a8d0e2863",0
+"NetworkSendBytes_br-392a8d0e2863",0
+"NetworkReceiveBytes_veth0cdb608",0
+"NetworkReceiveBytes_br-392a8d0e2863",0
+"NetworkSendDrop_dummy0",0
+"jemalloc.retained",94481276928
+"NetworkSendPackets_dummy0",0
+"NetworkSendBytes_dummy0",0
+"OSStealTimeCPU3",0
+"NetworkReceiveErrors_dummy0",0
+"NetworkReceivePackets_dummy0",0
+"NetworkReceiveBytes_dummy0",0
+"NetworkReceiveBytes_vethe1fd940",0
+"OSUserTimeCPU6",0
+"NetworkSendDrop_ens18",0
+"NetworkSendErrors_ens18",0
+"OSNiceTimeNormalized",0
+"NetworkReceiveErrors_ens18",0
+"BlockQueueTime_sr0",0
+"FilesystemCacheFiles",0
+"BlockActiveTime_dm-0",0
+"BlockActiveTime_sr0",0
+"BlockInFlightOps_sr0",0
+"BlockDiscardTime_sr0",0
+"OSGuestNiceTimeCPU1",0
+"BlockReadBytes_sr0",0
+"BlockDiscardMerges_sr0",0
+"BlockReadMerges_sr0",0
+"OSSoftIrqTimeCPU1",0
+"BlockDiscardTime_dm-0",0
+"OSUserTimeCPU7",0.00999893011447775
+"BlockWriteBytes_dm-0",0
+"BlockReadBytes_dm-0",0
+"OSContextSwitches",5242
+"FilesystemMainPathAvailableINodes",12209636
+"BlockDiscardOps_dm-0",0
+"BlockWriteTime_dm-0",0
+"BlockQueueTime_sda",0
+"OSIrqTimeCPU11",0
+"BlockActiveTime_sda",0
+"OSIrqTimeCPU4",0
+"NetworkSendPackets_ens18",3
+"BlockDiscardTime_sda",0
+"Jitter",0.000107
+"BlockReadBytes_sda",0
+"BlockDiscardMerges_sda",0
+"NetworkSendErrors_veth8415c5c",0
+"BlockReadMerges_sda",0
+"NetworkSendPackets_br-392a8d0e2863",0
+"BlockDiscardOps_sda",0
+"BlockWriteOps_sda",0
+"OSSystemTimeCPU8",0
+"CPUFrequencyMHz_15",2000
+"CPUFrequencyMHz_14",2000
+"OSNiceTimeCPU9",0
+"NetworkReceivePackets_veth22b9458",0
+"BlockWriteOps_sr0",0
+"BlockWriteTime_sda",0
+"OSGuestTimeCPU6",0
+"NetworkReceiveBytes_ens18",442
+"CPUFrequencyMHz_10",2000
+"BlockReadOps_sr0",0
+"CPUFrequencyMHz_7",2000
+"CPUFrequencyMHz_13",2000
+"CPUFrequencyMHz_6",2000
+"CPUFrequencyMHz_5",2000
+"BlockDiscardOps_sr0",0
+"NetworkReceiveErrors_vethfa2b7f2",0
+"CPUFrequencyMHz_4",2000
+"CPUFrequencyMHz_3",2000
+"CPUFrequencyMHz_1",2000
+"OSNiceTimeCPU6",0
+"NetworkSendPackets_vethb608e1b",0
+"CPUFrequencyMHz_0",2000
+"OSGuestNiceTimeCPU0",0
+"OSMemoryFreePlusCached",28217331712
+"TCPThreads",1
+"OSStealTimeCPU6",0
+"OSMemoryCached",9402826752
+"OSMemoryBuffers",1473716224
+"OSMemoryAvailable",29864460288
+"MemoryDataAndStack",101636190208
+"OSUserTimeCPU11",0
+"ReplicasMaxMergesInQueue",0
+"OSGuestTimeNormalized",0
+"OSSoftIrqTimeNormalized",0.0012498662643097187
+"OSGuestNiceTimeCPU13",0
+"OSIrqTimeNormalized",0
+"OSUserTimeCPU4",0
+"OSIOWaitTimeNormalized",0
+"OSIdleTimeNormalized",0.9892691482011424
+"OSSystemTimeNormalized",0.004999465057238875
+"BlockWriteBytes_sr0",0
+"NetworkReceivePackets_docker0",0
+"OSUserTimeNormalized",0.0024997325286194375
+"OSSystemTime",0.079991440915822
+"NetworkReceivePackets_br-392a8d0e2863",0
+"OSProcessesCreated",7
+"OSInterrupts",2971
+"BlockWriteMerges_sr0",0
+"OSProcessesRunning",7
+"BlockInFlightOps_sda",0
+"CPUFrequencyMHz_9",2000
+"OSGuestTimeCPU5",0
+"OSNiceTimeCPU13",0
+"TotalPartsOfMergeTreeTables",26
+"OSGuestTimeCPU15",0
+"OSStealTimeCPU15",0
+"OSIrqTimeCPU15",0
+"OSGuestTimeCPU10",0
+"OSStealTimeCPU1",0
+"OSSystemTimeCPU15",0
+"OSNiceTimeCPU15",0
+"PageCachePinnedBytes",0
+"OSUserTimeCPU15",0
+"OSSoftIrqTimeCPU14",0
+"OSIdleTimeCPU14",0.999893011447775
+"OSIdleTimeCPU1",0.9898940813332973
+"OSSoftIrqTimeCPU13",0
+"OSStealTimeCPU12",0
+"OSIdleTimeCPU13",0.9898940813332973
+"NetworkSendBytes_veth0cdb608",0
+"MemoryResidentMax",15751659520
+"OSIdleTimeCPU15",0.999893011447775
+"OSGuestNiceTimeCPU5",0
+"OSGuestNiceTimeCPU12",0
+"OSIrqTime",0
+"OSGuestTimeCPU9",0
+"OSGuestTimeCPU12",0
+"OSSoftIrqTimeCPU12",0
+"OSIdleTime",15.828306371218279
+"OSSoftIrqTimeCPU7",0
+"OSUserTimeCPU2",0
+"OSIdleTimeCPU12",0.9898940813332973
+"OSGuestTimeCPU2",0
+"OSSystemTimeCPU12",0.00999893011447775
+"OSSystemTimeCPU10",0.00999893011447775
+"OSGuestTimeCPU11",0
+"OSStealTimeCPU11",0
+"OSSoftIrqTimeCPU11",0
+"OSIOWaitTimeCPU11",0
+"BlockQueueTime_dm-0",0
+"OSIdleTimeCPU11",0.999893011447775
+"OSIrqTimeCPU8",0
+"OSNiceTimeCPU11",0
+"OSStealTimeCPU10",0
+"NetworkReceivePackets_vethb608e1b",0
+"NetworkReceiveErrors_br-392a8d0e2863",0
+"OSNiceTimeCPU14",0
+"NetworkSendPackets_veth22b9458",0
+"OSSystemTimeCPU2",0
+"OSSoftIrqTimeCPU10",0
+"ReplicasMaxRelativeDelay",0
+"OSIrqTimeCPU10",0
+"MMapCacheCells",0
+"OSIrqTimeCPU9",0
+"NetworkReceiveDrop_veth0cdb608",0
+"OSIrqTimeCPU1",0
+"OSIrqTimeCPU14",0
+"OSIrqTimeCPU13",0
+"NetworkSendDrop_docker0",0
+"OSIdleTimeCPU10",0.9798951512188194
+"OSGuestTimeCPU14",0
+"OSUserTimeCPU13",0
+"OSIOWaitTimeCPU14",0
+"OSStealTimeCPU9",0
+"NetworkReceiveErrors_veth22b9458",0
+"NetworkReceiveDrop_dummy0",0
+"OSSoftIrqTimeCPU9",0
+"OSIOWaitTimeCPU9",0
+"OSIdleTimeCPU9",0.999893011447775
+"OSGuestTimeCPU3",0
+"OSIOWaitTimeCPU13",0
+"BlockReadTime_sr0",0
+"OSSoftIrqTimeCPU15",0
+"OSIOWaitTimeCPU12",0
+"OSGuestTimeCPU8",0
+"OSStealTimeCPU8",0
+"OSUserTimeCPU0",0
+"OSIdleTimeCPU8",0.9798951512188194
+"OSGuestNiceTimeCPU4",0
+"OSUserTimeCPU8",0.00999893011447775
+"OSSystemTimeCPU9",0
+"PrometheusThreads",1
+"NetworkSendErrors_veth22b9458",0
+"OSMemoryTotal",33652854784
+"OSGuestNiceTimeCPU7",0
+"OSStealTimeCPU7",0
+"BlockReadOps_dm-0",0
+"OSStealTimeCPU14",0
+"OSGuestNiceTimeCPU8",0
+"NetworkReceivePackets_vethe1fd940",0
+"OSIdleTimeCPU7",0.9898940813332973
+"OSSoftIrqTimeCPU8",0
+"OSNiceTimeCPU7",0
+"OSIOWaitTimeCPU8",0
+"OSIOWaitTimeCPU6",0
+"NetworkSendBytes_vethb608e1b",0
+"NetworkSendBytes_ens18",714
+"OSIdleTimeCPU3",0.9898940813332973
+"OSGuestNiceTimeCPU11",0
+"jemalloc.background_thread.num_threads",0
+"BlockReadTime_sda",0
+"OSSoftIrqTimeCPU6",0
+"OSSoftIrqTimeCPU2",0
+"OSGuestNiceTimeCPU2",0
+"OSUserTimeCPU12",0
+"LoadAverage5",0.22
+"OSGuestNiceTimeCPU10",0
+"jemalloc.background_thread.num_runs",0
+"FilesystemLogsPathUsedINodes",847436
+"OSIrqTimeCPU6",0
+"OSIrqTimeCPU7",0
+"HashTableStatsCacheHits",124
+"OSNiceTimeCPU0",0
+"CPUFrequencyMHz_8",2000
+"MarkCacheBytes",76560
+"FilesystemMainPathTotalINodes",13057072
+"NetworkSendPackets_veth0cdb608",0
+"NetworkReceiveDrop_br-392a8d0e2863",0
+"OSStealTimeNormalized",0
+"NetworkReceiveDrop_veth8415c5c",0
+"BlockDiscardBytes_sr0",0
+"OSSystemTimeCPU5",0.00999893011447775
+"OSUptime",1444727.52
+"FilesystemLogsPathTotalINodes",13057072
+"NetworkSendPackets_veth8415c5c",0
+"OSStealTimeCPU4",0
+"OSSoftIrqTimeCPU4",0
+"NetworkSendErrors_vethfa2b7f2",0
+"NetworkSendBytes_veth8415c5c",0
+"MemoryShared",333160448
+"OSGuestTimeCPU4",0
+"jemalloc.metadata",265969056
+"OSGuestNiceTimeCPU15",0
+"NumberOfTablesSystem",100
+"OSSystemTimeCPU11",0
+"OSIdleTimeCPU4",0.9898940813332973
+"IndexMarkCacheFiles",0
+"OSIOWaitTimeCPU15",0
+"OSNiceTimeCPU4",0
+"jemalloc.arenas.all.pactive",113928
+"OSUserTimeCPU1",0
+"NetworkReceivePackets_ens18",3
+"OSIrqTimeCPU3",0
+"OSIOWaitTimeCPU3",0
+"OSUserTime",0.039995720457911
+"OSIdleTimeCPU5",0.9898940813332973
+"OSOpenFiles",3008
+"OSIrqTimeCPU5",0
+"PostgreSQLThreads",0
+"OSUserTimeCPU10",0
+"TotalPrimaryKeyBytesInMemory",74202
+"OSIrqTimeCPU2",0
+"jemalloc.mapped",1036132352
+"BlockWriteMerges_dm-0",0
+"OSMemoryFreeWithoutCached",18814504960
+"DiskTotal_default",210679332864
+"OSGuestNiceTimeCPU3",0
+"OSIrqTimeCPU12",0
+"OSIOWaitTimeCPU5",0
+"OSNiceTimeCPU1",0
+"NetworkReceiveDrop_ens18",0
+"jemalloc.active",466649088
+"OSNiceTimeCPU12",0
+"OSGuestNiceTimeNormalized",0
+"OSSystemTimeCPU14",0
+"OSSoftIrqTimeCPU5",0
+"BlockReadTime_dm-0",0
+"MemoryResident",1279381504
+"BlockDiscardMerges_dm-0",0
+"OSGuestTimeCPU0",0
+"NetworkSendDrop_vethe1fd940",0
+"MemoryVirtual",102527639552
+"OSSystemTimeCPU3",0.00999893011447775
+"OSNiceTimeCPU3",0
+"FilesystemMainPathUsedBytes",45181276160
+"OSUserTimeCPU14",0
+"OSSoftIrqTime",0.0199978602289555
+"jemalloc.metadata_thp",0
+"OSStealTimeCPU0",0
+"OSSystemTimeCPU0",0
+"OSGuestNiceTimeCPU14",0
+"OSStealTimeCPU2",0
+"OSSystemTimeCPU1",0
+"BlockInFlightOps_dm-0",0
+"OSSystemTimeCPU6",0.02999679034343325
+"VMMaxMapCount",262144
+"NetworkSendPackets_docker0",0
+"OSUserTimeCPU3",0.00999893011447775
+"OSGuestTime",0
+"OSNiceTimeCPU5",0
+"OSIOWaitTimeCPU1",0
+"OSIOWaitTime",0
+"jemalloc.arenas.all.pmuzzy",0
+"NetworkSendErrors_docker0",0
+"OSNiceTime",0
+"LoadAverage15",0.24
+"OSIdleTimeCPU0",0.9898940813332973
+"MarkCacheFiles",54
+"OSIOWaitTimeCPU4",0
+"BlockWriteMerges_sda",0
+"NumberOfTables",128
+"OSIrqTimeCPU0",0
+"jemalloc.arenas.all.muzzy_purged",0
+"NetworkSendDrop_vethb608e1b",0
+"OSProcessesBlocked",0
+"jemalloc.allocated",412954608
+"NetworkSendErrors_dummy0",0
+"ReplicasMaxAbsoluteDelay",0
+"OSThreadsRunnable",8
+"OSStealTimeCPU5",0
+"LoadAverage1",0.18
+"OSGuestTimeCPU7",0
+"BlockDiscardBytes_sda",0
+"jemalloc.arenas.all.pdirty",60698
+"OSIdleTimeCPU6",0.9698962211043417
+"OSSystemTimeCPU4",0
+"OSStealTime",0
+"MemoryCode",283267072
+"AsynchronousMetricsUpdateInterval",1.000107
+"NetworkReceiveBytes_docker0",0
+"BlockDiscardBytes_dm-0",0
+"jemalloc.arenas.all.dirty_purged",363559728
+"HashTableStatsCacheEntries",9
+"MySQLThreads",0
+"FilesystemMainPathTotalBytes",210679332864
+"jemalloc.resident",960614400
+"OSGuestTimeCPU13",0
+"jemalloc.background_thread.run_intervals",0
+"BlockReadOps_sda",0
+"OSIOWaitTimeCPU0",0
+"OSIdleTimeCPU2",0.9898940813332973
+"BlockWriteBytes_sda",0
+"OSGuestTimeCPU1",0
diff --git a/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_disks.csv b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_disks.csv
new file mode 100644
index 000000000..42751e54e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_disks.csv
@@ -0,0 +1,2 @@
+"name","free_space","total_space"
+"default",165494767616,210679332864
diff --git a/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_events.csv b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_events.csv
new file mode 100644
index 000000000..546e7e7e0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_events.csv
@@ -0,0 +1,102 @@
+"event","value"
+"Query", 94
+"SelectQuery", 94
+"InitialQuery", 94
+"QueriesWithSubqueries", 174
+"SelectQueriesWithSubqueries", 174
+"FailedQuery", 4
+"FailedSelectQuery", 4
+"QueryTimeMicroseconds", 870752
+"SelectQueryTimeMicroseconds", 870752
+"FileOpen",1568962
+"Seek", 27585
+"ReadBufferFromFileDescriptorRead", 8578903
+"ReadBufferFromFileDescriptorReadBytes", 29425788980
+"WriteBufferFromFileDescriptorWrite", 1413953
+"WriteBufferFromFileDescriptorWriteBytes", 19290175707
+"ReadCompressedBytes", 17765217623
+"CompressedReadBufferBlocks", 22044981
+"CompressedReadBufferBytes", 639439855526
+"OpenedFileCacheHits", 138
+"OpenedFileCacheMisses",346495
+"OpenedFileCacheMicroseconds", 307462
+"IOBufferAllocs", 4887901
+"IOBufferAllocBytes", 1142983723070
+"ArenaAllocChunks", 196
+"ArenaAllocBytes", 1556480
+"FunctionExecute", 8055228
+"TableFunctionExecute", 41
+"MarkCacheHits", 229
+"MarkCacheMisses", 54
+"CreatedReadBufferOrdinary",346633
+"DiskReadElapsedMicroseconds", 64340828296
+"DiskWriteElapsedMicroseconds", 33066436
+"NetworkReceiveElapsedMicroseconds", 17548931930
+"NetworkSendElapsedMicroseconds", 3313835
+"NetworkReceiveBytes", 2121500
+"NetworkSendBytes", 5492910494
+"InsertedRows", 28386169
+"InsertedBytes", 1104809236
+"CompileFunction", 5
+"CompileExpressionsMicroseconds",134487
+"CompileExpressionsBytes", 65536
+"ExternalProcessingFilesTotal", 605
+"SelectedParts", 68
+"SelectedRanges", 68
+"SelectedMarks", 836
+"SelectedRows", 29975411
+"SelectedBytes", 1127243995
+"WaitMarksLoadMicroseconds", 2823449
+"LoadedMarksCount", 31080118
+"LoadedMarksMemoryBytes",48212328
+"Merge", 3929781
+"MergedRows", 30485884752
+"MergedUncompressedBytes", 641315961578
+"MergesTimeMilliseconds", 4586438
+"MergeTreeDataWriterRows", 28386169
+"MergeTreeDataWriterUncompressedBytes", 1104809236
+"MergeTreeDataWriterCompressedBytes", 1127957207
+"MergeTreeDataWriterBlocks", 25840
+"MergeTreeDataWriterBlocksAlreadySorted", 16671
+"MergeTreeDataWriterSortingBlocksMicroseconds",7881192
+"MergeTreeDataWriterMergingBlocksMicroseconds", 30988
+"InsertedCompactParts", 25840
+"MergedIntoWideParts", 11349
+"MergedIntoCompactParts", 3132
+"ContextLock", 1727098
+"ContextLockWaitMicroseconds", 4763
+"RWLockAcquiredReadLocks", 322474
+"RWLockReadersWaitMilliseconds", 660
+"PartsLockHoldMicroseconds", 23499086
+"PartsLockWaitMicroseconds",13947
+"RealTimeMicroseconds", 5484454350
+"UserTimeMicroseconds", 4588286964
+"SystemTimeMicroseconds", 890356522
+"MemoryAllocatorPurge", 2
+"MemoryAllocatorPurgeTimeMicroseconds", 135392
+"SoftPageFaults", 71033079
+"OSCPUWaitMicroseconds", 1028066
+"OSCPUVirtualTimeMicroseconds", 5475763006
+"OSReadBytes", 8192
+"OSWriteBytes",20749721600
+"OSReadChars", 17832770560
+"OSWriteChars", 17947151360
+"QueryProfilerRuns", 7153
+"ThreadPoolReaderPageCacheHit", 428
+"ThreadPoolReaderPageCacheHitBytes", 8568656
+"ThreadPoolReaderPageCacheHitElapsedMicroseconds", 11635
+"SynchronousReadWaitMicroseconds", 11965
+"MainConfigLoads", 1
+"AggregationOptimizedEqualRangesOfKeys", 17
+"ServerStartupMilliseconds",463
+"AsyncLoaderWaitMicroseconds", 65377
+"LogTrace", 285068
+"LogDebug", 171106
+"LogInfo", 47
+"LogWarning", 2
+"LogError", 52
+"InterfaceHTTPSendBytes", 37853
+"InterfaceHTTPReceiveBytes", 72018
+"InterfaceNativeReceiveBytes", 11646
+"InterfacePrometheusSendBytes",3817434
+"InterfacePrometheusReceiveBytes", 2037836
diff --git a/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_metrics.csv b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_metrics.csv
new file mode 100644
index 000000000..d5ecc29a7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_metrics.csv
@@ -0,0 +1,283 @@
+"metric","value"
+"Query",1
+"Merge",0
+"Move",0
+"PartMutation",0
+"ReplicatedFetch",0
+"ReplicatedSend",0
+"ReplicatedChecks",0
+"BackgroundMergesAndMutationsPoolTask",0
+"BackgroundMergesAndMutationsPoolSize",64
+"BackgroundFetchesPoolTask",0
+"BackgroundFetchesPoolSize",32
+"BackgroundCommonPoolTask",0
+"BackgroundCommonPoolSize",16
+"BackgroundMovePoolTask",0
+"BackgroundMovePoolSize",16
+"BackgroundSchedulePoolTask",0
+"BackgroundSchedulePoolSize",512
+"BackgroundBufferFlushSchedulePoolTask",0
+"BackgroundBufferFlushSchedulePoolSize",16
+"BackgroundDistributedSchedulePoolTask",0
+"BackgroundDistributedSchedulePoolSize",16
+"BackgroundMessageBrokerSchedulePoolTask",0
+"BackgroundMessageBrokerSchedulePoolSize",16
+"CacheDictionaryUpdateQueueBatches",0
+"CacheDictionaryUpdateQueueKeys",0
+"DiskSpaceReservedForMerge",0
+"DistributedSend",0
+"QueryPreempted",0
+"TCPConnection",1
+"MySQLConnection",0
+"HTTPConnection",0
+"InterserverConnection",0
+"PostgreSQLConnection",0
+"OpenFileForRead",14
+"OpenFileForWrite",0
+"TotalTemporaryFiles",0
+"TemporaryFilesForSort",0
+"TemporaryFilesForAggregation",0
+"TemporaryFilesForJoin",0
+"TemporaryFilesUnknown",0
+"Read",2
+"RemoteRead",0
+"Write",0
+"NetworkReceive",0
+"NetworkSend",0
+"SendScalars",0
+"SendExternalTables",0
+"QueryThread",0
+"ReadonlyReplica",0
+"MemoryTracking",1270999152
+"MergesMutationsMemoryTracking",0
+"EphemeralNode",0
+"ZooKeeperSession",0
+"ZooKeeperWatch",0
+"ZooKeeperRequest",0
+"DelayedInserts",0
+"ContextLockWait",0
+"StorageBufferRows",0
+"StorageBufferBytes",0
+"DictCacheRequests",0
+"Revision",54486
+"VersionInteger",24005001
+"RWLockWaitingReaders",0
+"RWLockWaitingWriters",0
+"RWLockActiveReaders",1
+"RWLockActiveWriters",0
+"GlobalThread",714
+"GlobalThreadActive",651
+"GlobalThreadScheduled",651
+"LocalThread",0
+"LocalThreadActive",0
+"LocalThreadScheduled",0
+"MergeTreeDataSelectExecutorThreads",0
+"MergeTreeDataSelectExecutorThreadsActive",0
+"MergeTreeDataSelectExecutorThreadsScheduled",0
+"BackupsThreads",0
+"BackupsThreadsActive",0
+"BackupsThreadsScheduled",0
+"RestoreThreads",0
+"RestoreThreadsActive",0
+"RestoreThreadsScheduled",0
+"MarksLoaderThreads",0
+"MarksLoaderThreadsActive",0
+"MarksLoaderThreadsScheduled",0
+"IOPrefetchThreads",0
+"IOPrefetchThreadsActive",0
+"IOPrefetchThreadsScheduled",0
+"IOWriterThreads",0
+"IOWriterThreadsActive",0
+"IOWriterThreadsScheduled",0
+"IOThreads",0
+"IOThreadsActive",0
+"IOThreadsScheduled",0
+"ThreadPoolRemoteFSReaderThreads",0
+"ThreadPoolRemoteFSReaderThreadsActive",0
+"ThreadPoolRemoteFSReaderThreadsScheduled",0
+"ThreadPoolFSReaderThreads",0
+"ThreadPoolFSReaderThreadsActive",0
+"ThreadPoolFSReaderThreadsScheduled",0
+"BackupsIOThreads",0
+"BackupsIOThreadsActive",0
+"BackupsIOThreadsScheduled",0
+"DiskObjectStorageAsyncThreads",0
+"DiskObjectStorageAsyncThreadsActive",0
+"StorageHiveThreads",0
+"StorageHiveThreadsActive",0
+"StorageHiveThreadsScheduled",0
+"TablesLoaderBackgroundThreads",0
+"TablesLoaderBackgroundThreadsActive",0
+"TablesLoaderBackgroundThreadsScheduled",0
+"TablesLoaderForegroundThreads",0
+"TablesLoaderForegroundThreadsActive",0
+"TablesLoaderForegroundThreadsScheduled",0
+"DatabaseOnDiskThreads",0
+"DatabaseOnDiskThreadsActive",0
+"DatabaseOnDiskThreadsScheduled",0
+"DatabaseCatalogThreads",0
+"DatabaseCatalogThreadsActive",0
+"DatabaseCatalogThreadsScheduled",0
+"DestroyAggregatesThreads",0
+"DestroyAggregatesThreadsActive",0
+"DestroyAggregatesThreadsScheduled",0
+"HashedDictionaryThreads",0
+"HashedDictionaryThreadsActive",0
+"HashedDictionaryThreadsScheduled",0
+"CacheDictionaryThreads",0
+"CacheDictionaryThreadsActive",0
+"CacheDictionaryThreadsScheduled",0
+"ParallelFormattingOutputFormatThreads",0
+"ParallelFormattingOutputFormatThreadsActive",0
+"ParallelFormattingOutputFormatThreadsScheduled",0
+"ParallelParsingInputFormatThreads",0
+"ParallelParsingInputFormatThreadsActive",0
+"ParallelParsingInputFormatThreadsScheduled",0
+"MergeTreeBackgroundExecutorThreads",48
+"MergeTreeBackgroundExecutorThreadsActive",48
+"MergeTreeBackgroundExecutorThreadsScheduled",48
+"AsynchronousInsertThreads",0
+"AsynchronousInsertThreadsActive",0
+"AsynchronousInsertThreadsScheduled",0
+"AsynchronousInsertQueueSize",0
+"AsynchronousInsertQueueBytes",0
+"StartupSystemTablesThreads",0
+"StartupSystemTablesThreadsActive",0
+"StartupSystemTablesThreadsScheduled",0
+"AggregatorThreads",0
+"AggregatorThreadsActive",0
+"AggregatorThreadsScheduled",0
+"DDLWorkerThreads",0
+"DDLWorkerThreadsActive",0
+"DDLWorkerThreadsScheduled",0
+"StorageDistributedThreads",0
+"StorageDistributedThreadsActive",0
+"StorageDistributedThreadsScheduled",0
+"DistributedInsertThreads",0
+"DistributedInsertThreadsActive",0
+"DistributedInsertThreadsScheduled",0
+"StorageS3Threads",0
+"StorageS3ThreadsActive",0
+"StorageS3ThreadsScheduled",0
+"ObjectStorageS3Threads",0
+"ObjectStorageS3ThreadsActive",0
+"ObjectStorageS3ThreadsScheduled",0
+"ObjectStorageAzureThreads",0
+"ObjectStorageAzureThreadsActive",0
+"ObjectStorageAzureThreadsScheduled",0
+"MergeTreePartsLoaderThreads",0
+"MergeTreePartsLoaderThreadsActive",0
+"MergeTreePartsLoaderThreadsScheduled",0
+"MergeTreeOutdatedPartsLoaderThreads",0
+"MergeTreeOutdatedPartsLoaderThreadsActive",0
+"MergeTreeOutdatedPartsLoaderThreadsScheduled",0
+"MergeTreeUnexpectedPartsLoaderThreads",0
+"MergeTreeUnexpectedPartsLoaderThreadsActive",0
+"MergeTreeUnexpectedPartsLoaderThreadsScheduled",0
+"MergeTreePartsCleanerThreads",0
+"MergeTreePartsCleanerThreadsActive",0
+"MergeTreePartsCleanerThreadsScheduled",0
+"DatabaseReplicatedCreateTablesThreads",0
+"DatabaseReplicatedCreateTablesThreadsActive",0
+"DatabaseReplicatedCreateTablesThreadsScheduled",0
+"IDiskCopierThreads",0
+"IDiskCopierThreadsActive",0
+"IDiskCopierThreadsScheduled",0
+"SystemReplicasThreads",0
+"SystemReplicasThreadsActive",0
+"SystemReplicasThreadsScheduled",0
+"RestartReplicaThreads",0
+"RestartReplicaThreadsActive",0
+"RestartReplicaThreadsScheduled",0
+"QueryPipelineExecutorThreads",0
+"QueryPipelineExecutorThreadsActive",0
+"QueryPipelineExecutorThreadsScheduled",0
+"ParquetDecoderThreads",0
+"ParquetDecoderThreadsActive",0
+"ParquetDecoderThreadsScheduled",0
+"ParquetEncoderThreads",0
+"ParquetEncoderThreadsActive",0
+"ParquetEncoderThreadsScheduled",0
+"DWARFReaderThreads",0
+"DWARFReaderThreadsActive",0
+"DWARFReaderThreadsScheduled",0
+"OutdatedPartsLoadingThreads",0
+"OutdatedPartsLoadingThreadsActive",0
+"OutdatedPartsLoadingThreadsScheduled",0
+"DistributedBytesToInsert",0
+"BrokenDistributedBytesToInsert",0
+"DistributedFilesToInsert",0
+"BrokenDistributedFilesToInsert",0
+"TablesToDropQueueSize",0
+"MaxDDLEntryID",0
+"MaxPushedDDLEntryID",0
+"PartsTemporary",0
+"PartsPreCommitted",0
+"PartsCommitted",25
+"PartsPreActive",0
+"PartsActive",25
+"AttachedDatabase",5
+"AttachedTable",128
+"PartsOutdated",284
+"PartsDeleting",0
+"PartsDeleteOnDestroy",0
+"PartsWide",76
+"PartsCompact",233
+"MMappedFiles",2
+"MMappedFileBytes",498055184
+"AsynchronousReadWait",0
+"PendingAsyncInsert",0
+"KafkaConsumers",0
+"KafkaConsumersWithAssignment",0
+"KafkaProducers",0
+"KafkaLibrdkafkaThreads",0
+"KafkaBackgroundReads",0
+"KafkaConsumersInUse",0
+"KafkaWrites",0
+"KafkaAssignedPartitions",0
+"FilesystemCacheReadBuffers",0
+"CacheFileSegments",0
+"CacheDetachedFileSegments",0
+"FilesystemCacheSize",0
+"FilesystemCacheSizeLimit",0
+"FilesystemCacheElements",0
+"FilesystemCacheDownloadQueueElements",0
+"FilesystemCacheDelayedCleanupElements",0
+"FilesystemCacheHoldFileSegments",0
+"AsyncInsertCacheSize",0
+"S3Requests",0
+"KeeperAliveConnections",0
+"KeeperOutstandingRequets",0
+"ThreadsInOvercommitTracker",0
+"IOUringPendingEvents",0
+"IOUringInFlightEvents",0
+"ReadTaskRequestsSent",0
+"MergeTreeReadTaskRequestsSent",0
+"MergeTreeAllRangesAnnouncementsSent",0
+"CreatedTimersInQueryProfiler",166
+"ActiveTimersInQueryProfiler",2
+"RefreshableViews",0
+"RefreshingViews",0
+"StorageBufferFlushThreads",0
+"StorageBufferFlushThreadsActive",0
+"StorageBufferFlushThreadsScheduled",0
+"SharedMergeTreeThreads",0
+"SharedMergeTreeThreadsActive",0
+"SharedMergeTreeThreadsScheduled",0
+"SharedMergeTreeFetch",0
+"CacheWarmerBytesInProgress",0
+"DistrCacheOpenedConnections",0
+"DistrCacheUsedConnections",0
+"DistrCacheReadRequests",0
+"DistrCacheWriteRequests",0
+"DistrCacheServerConnections",0
+"StorageConnectionsStored",0
+"StorageConnectionsTotal",0
+"DiskConnectionsStored",0
+"DiskConnectionsTotal",0
+"HTTPConnectionsStored",0
+"HTTPConnectionsTotal",0
+"AddressesActive",0
+"AddressesBanned",0
+"FilteringMarksWithPrimaryKey",0
+"FilteringMarksWithSecondaryKeys",0
diff --git a/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_parts.csv b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_parts.csv
new file mode 100644
index 000000000..6ade3324a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/clickhouse/testdata/resp_system_parts.csv
@@ -0,0 +1,6 @@
+"database","table","bytes","parts","rows"
+"system","processors_profile_log",391629,5,20107
+"system","metric_log",18302533,6,162718
+"system","query_log",196403,5,761
+"system","asynchronous_metric_log",19113663,6,70377261
+"system","trace_log",28695023,8,1733076
diff --git a/src/go/plugin/go.d/modules/cockroachdb/README.md b/src/go/plugin/go.d/modules/cockroachdb/README.md
new file mode 120000
index 000000000..a8130f262
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/README.md
@@ -0,0 +1 @@
+integrations/cockroachdb.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/cockroachdb/charts.go b/src/go/plugin/go.d/modules/cockroachdb/charts.go
new file mode 100644
index 000000000..2adfc5f9d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/charts.go
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cockroachdb
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ Charts = module.Charts
+ Chart = module.Chart
+ Dims = module.Dims
+ Vars = module.Vars
+)
+
+var charts = Charts{
+ chartProcessCPUCombinedPercent.Copy(),
+ chartProcessCPUPercent.Copy(),
+ chartProcessCPUUsage.Copy(),
+ chartProcessMemory.Copy(),
+ chartProcessFDUsage.Copy(),
+ chartProcessUptime.Copy(),
+
+ chartHostDiskBandwidth.Copy(),
+ chartHostDiskOperations.Copy(),
+ chartHostDiskIOPS.Copy(),
+ chartHostNetworkBandwidth.Copy(),
+ chartHostNetworkPackets.Copy(),
+
+ chartLiveNodes.Copy(),
+ chartHeartBeats.Copy(),
+
+ chartCapacity.Copy(),
+ chartCapacityUsability.Copy(),
+ chartCapacityUsable.Copy(),
+ chartCapacityUsedPercentage.Copy(),
+
+ chartSQLConnections.Copy(),
+ chartSQLTraffic.Copy(),
+ chartSQLStatementsTotal.Copy(),
+ chartSQLErrors.Copy(),
+ chartSQLStartedDDLStatements.Copy(),
+ chartSQLExecutedDDLStatements.Copy(),
+ chartSQLStartedDMLStatements.Copy(),
+ chartSQLExecutedDMLStatements.Copy(),
+ chartSQLStartedTCLStatements.Copy(),
+ chartSQLExecutedTCLStatements.Copy(),
+ chartSQLActiveDistQueries.Copy(),
+ chartSQLActiveFlowsForDistQueries.Copy(),
+
+ chartUsedLiveData.Copy(),
+ chartLogicalData.Copy(),
+ chartLogicalDataCount.Copy(),
+
+ chartKVTransactions.Copy(),
+ chartKVTransactionsRestarts.Copy(),
+
+ chartRanges.Copy(),
+ chartRangesWithProblems.Copy(),
+ chartRangesEvents.Copy(),
+ chartRangesSnapshotEvents.Copy(),
+
+ chartRocksDBReadAmplification.Copy(),
+ chartRocksDBTableOperations.Copy(),
+ chartRocksDBCacheUsage.Copy(),
+ chartRocksDBCacheOperations.Copy(),
+ chartRocksDBCacheHitRage.Copy(),
+ chartRocksDBSSTables.Copy(),
+
+ chartReplicas.Copy(),
+ chartReplicasQuiescence.Copy(),
+ chartReplicasLeaders.Copy(),
+ chartReplicasLeaseHolder.Copy(),
+
+ chartQueuesProcessingFailures.Copy(),
+
+ chartRebalancingQueries.Copy(),
+ chartRebalancingWrites.Copy(),
+
+ chartTimeSeriesWrittenSamples.Copy(),
+ chartTimeSeriesWriteErrors.Copy(),
+ chartTimeSeriesWrittenBytes.Copy(),
+
+ chartSlowRequests.Copy(),
+
+ chartGoroutines.Copy(),
+ chartGoCgoHeapMemory.Copy(),
+ chartCGoCalls.Copy(),
+ chartGCRuns.Copy(),
+ chartGCPauseTime.Copy(),
+}
+
+// Process
+var (
+ chartProcessCPUCombinedPercent = Chart{
+ ID: "process_cpu_time_combined_percentage",
+ Title: "Combined CPU Time Percentage, Normalized 0-1 by Number of Cores",
+ Units: "percentage",
+ Fam: "process",
+ Ctx: "cockroachdb.process_cpu_time_combined_percentage",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricSysCPUCombinedPercentNormalized, Name: "used", Div: precision},
+ },
+ }
+ chartProcessCPUPercent = Chart{
+ ID: "process_cpu_time_percentage",
+ Title: "CPU Time Percentage",
+ Units: "percentage",
+ Fam: "process",
+ Ctx: "cockroachdb.process_cpu_time_percentage",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricSysCPUUserPercent, Name: "user", Div: precision},
+ {ID: metricSysCPUSysPercent, Name: "sys", Div: precision},
+ },
+ }
+ chartProcessCPUUsage = Chart{
+ ID: "process_cpu_time",
+ Title: "CPU Time",
+ Units: "ms",
+ Fam: "process",
+ Ctx: "cockroachdb.process_cpu_time",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricSysCPUUserNs, Name: "user", Algo: module.Incremental, Div: 1e6},
+ {ID: metricSysCPUSysNs, Name: "sys", Algo: module.Incremental, Div: 1e6},
+ },
+ }
+ chartProcessMemory = Chart{
+ ID: "process_memory",
+ Title: "Memory Usage",
+ Units: "KiB",
+ Fam: "process",
+ Ctx: "cockroachdb.process_memory",
+ Dims: Dims{
+ {ID: metricSysRSS, Name: "rss", Div: 1024},
+ },
+ }
+ chartProcessFDUsage = Chart{
+ ID: "process_file_descriptors",
+ Title: "File Descriptors",
+ Units: "fd",
+ Fam: "process",
+ Ctx: "cockroachdb.process_file_descriptors",
+ Dims: Dims{
+ {ID: metricSysFDOpen, Name: "open"},
+ },
+ Vars: Vars{
+ {ID: metricSysFDSoftLimit},
+ },
+ }
+ chartProcessUptime = Chart{
+ ID: "process_uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "process",
+ Ctx: "cockroachdb.process_uptime",
+ Dims: Dims{
+ {ID: metricSysUptime, Name: "uptime"},
+ },
+ }
+)
+
+// Host
+// Host
+var (
+ chartHostDiskBandwidth = Chart{
+ ID: "host_disk_bandwidth",
+ Title: "Host Disk Cumulative Bandwidth",
+ Units: "KiB",
+ Fam: "host",
+ Ctx: "cockroachdb.host_disk_bandwidth",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricSysHostDiskReadBytes, Name: "read", Div: 1024, Algo: module.Incremental},
+ {ID: metricSysHostDiskWriteBytes, Name: "write", Div: -1024, Algo: module.Incremental},
+ },
+ }
+ chartHostDiskOperations = Chart{
+ ID: "host_disk_operations",
+ Title: "Host Disk Cumulative Operations",
+ Units: "operations",
+ Fam: "host",
+ Ctx: "cockroachdb.host_disk_operations",
+ Dims: Dims{
+ {ID: metricSysHostDiskReadCount, Name: "reads", Algo: module.Incremental},
+ {ID: metricSysHostDiskWriteCount, Name: "writes", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ chartHostDiskIOPS = Chart{
+ ID: "host_disk_iops_in_progress",
+ Title: "Host Disk Cumulative IOPS In Progress",
+ Units: "iops",
+ Fam: "host",
+ Ctx: "cockroachdb.host_disk_iops_in_progress",
+ Dims: Dims{
+ {ID: metricSysHostDiskIOPSInProgress, Name: "in progress"},
+ },
+ }
+ chartHostNetworkBandwidth = Chart{
+ ID: "host_network_bandwidth",
+ Title: "Host Network Cumulative Bandwidth",
+ Units: "kilobits",
+ Fam: "host",
+ Ctx: "cockroachdb.host_network_bandwidth",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricSysHostNetRecvBytes, Name: "received", Div: 1000, Algo: module.Incremental},
+ {ID: metricSysHostNetSendBytes, Name: "sent", Div: -1000, Algo: module.Incremental},
+ },
+ }
+ chartHostNetworkPackets = Chart{
+ ID: "host_network_packets",
+ Title: "Host Network Cumulative Packets",
+ Units: "packets",
+ Fam: "host",
+ Ctx: "cockroachdb.host_network_packets",
+ Dims: Dims{
+ {ID: metricSysHostNetRecvPackets, Name: "received", Algo: module.Incremental},
+ {ID: metricSysHostNetSendPackets, Name: "sent", Mul: -1, Algo: module.Incremental},
+ },
+ }
+)
+
+// Liveness
+var (
+ chartLiveNodes = Chart{
+ ID: "live_nodes",
+ Title: "Live Nodes in the Cluster",
+ Units: "nodes",
+ Fam: "liveness",
+ Ctx: "cockroachdb.live_nodes",
+ Dims: Dims{
+ {ID: metricLiveNodes, Name: "live nodes"},
+ },
+ }
+ chartHeartBeats = Chart{
+ ID: "node_liveness_heartbeats",
+ Title: "Node Liveness Heartbeats",
+ Units: "heartbeats",
+ Fam: "liveness",
+ Ctx: "cockroachdb.node_liveness_heartbeats",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricHeartBeatSuccesses, Name: "successful", Algo: module.Incremental},
+ {ID: metricHeartBeatFailures, Name: "failed", Algo: module.Incremental},
+ },
+ }
+)
+
+// Capacity
+var (
+ chartCapacity = Chart{
+ ID: "total_storage_capacity",
+ Title: "Total Storage Capacity",
+ Units: "KiB",
+ Fam: "capacity",
+ Ctx: "cockroachdb.total_storage_capacity",
+ Dims: Dims{
+ {ID: metricCapacity, Name: "total", Div: 1024},
+ },
+ }
+ chartCapacityUsability = Chart{
+ ID: "storage_capacity_usability",
+ Title: "Storage Capacity Usability",
+ Units: "KiB",
+ Fam: "capacity",
+ Ctx: "cockroachdb.storage_capacity_usability",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricCapacityUsable, Name: "usable", Div: 1024},
+ {ID: metricCapacityUnusable, Name: "unusable", Div: 1024},
+ },
+ }
+ chartCapacityUsable = Chart{
+ ID: "storage_usable_capacity",
+ Title: "Storage Usable Capacity",
+ Units: "KiB",
+ Fam: "capacity",
+ Ctx: "cockroachdb.storage_usable_capacity",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricCapacityAvailable, Name: "available", Div: 1024},
+ {ID: metricCapacityUsed, Name: "used", Div: 1024},
+ },
+ }
+ chartCapacityUsedPercentage = Chart{
+ ID: "storage_used_capacity_percentage",
+ Title: "Storage Used Capacity Utilization",
+ Units: "percentage",
+ Fam: "capacity",
+ Ctx: "cockroachdb.storage_used_capacity_percentage",
+ Dims: Dims{
+ {ID: metricCapacityUsedPercentage, Name: "total", Div: precision},
+ {ID: metricCapacityUsableUsedPercentage, Name: "usable", Div: precision},
+ },
+ }
+)
+
+// SQL
+var (
+ chartSQLConnections = Chart{
+ ID: "sql_connections",
+ Title: "Active SQL Connections",
+ Units: "connections",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_connections",
+ Dims: Dims{
+ {ID: metricSQLConnections, Name: "active"},
+ },
+ }
+ chartSQLTraffic = Chart{
+ ID: "sql_bandwidth",
+ Title: "SQL Bandwidth",
+ Units: "KiB",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_bandwidth",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricSQLBytesIn, Name: "received", Div: 1024, Algo: module.Incremental},
+ {ID: metricSQLBytesOut, Name: "sent", Div: -1024, Algo: module.Incremental},
+ },
+ }
+ chartSQLStatementsTotal = Chart{
+ ID: "sql_statements_total",
+ Title: "SQL Statements Total",
+ Units: "statements",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_statements_total",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricSQLQueryStartedCount, Name: "started", Algo: module.Incremental},
+ {ID: metricSQLQueryCount, Name: "executed", Algo: module.Incremental},
+ },
+ }
+ chartSQLErrors = Chart{
+ ID: "sql_errors",
+ Title: "SQL Statements and Transaction Errors",
+ Units: "errors",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_errors",
+ Dims: Dims{
+ {ID: metricSQLFailureCount, Name: "statement", Algo: module.Incremental},
+ {ID: metricSQLTXNAbortCount, Name: "transaction", Algo: module.Incremental},
+ },
+ }
+ chartSQLStartedDDLStatements = Chart{
+ ID: "sql_started_ddl_statements",
+ Title: "SQL Started DDL Statements",
+ Units: "statements",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_started_ddl_statements",
+ Dims: Dims{
+ {ID: metricSQLDDLStartedCount, Name: "DDL"},
+ },
+ }
+ chartSQLExecutedDDLStatements = Chart{
+ ID: "sql_executed_ddl_statements",
+ Title: "SQL Executed DDL Statements",
+ Units: "statements",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_executed_ddl_statements",
+ Dims: Dims{
+ {ID: metricSQLDDLCount, Name: "DDL"},
+ },
+ }
+ chartSQLStartedDMLStatements = Chart{
+ ID: "sql_started_dml_statements",
+ Title: "SQL Started DML Statements",
+ Units: "statements",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_started_dml_statements",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricSQLSelectStartedCount, Name: "SELECT", Algo: module.Incremental},
+ {ID: metricSQLUpdateStartedCount, Name: "UPDATE", Algo: module.Incremental},
+ {ID: metricSQLInsertStartedCount, Name: "INSERT", Algo: module.Incremental},
+ {ID: metricSQLDeleteStartedCount, Name: "DELETE", Algo: module.Incremental},
+ },
+ }
+ chartSQLExecutedDMLStatements = Chart{
+ ID: "sql_executed_dml_statements",
+ Title: "SQL Executed DML Statements",
+ Units: "statements",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_executed_dml_statements",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricSQLSelectCount, Name: "SELECT", Algo: module.Incremental},
+ {ID: metricSQLUpdateCount, Name: "UPDATE", Algo: module.Incremental},
+ {ID: metricSQLInsertCount, Name: "INSERT", Algo: module.Incremental},
+ {ID: metricSQLDeleteCount, Name: "DELETE", Algo: module.Incremental},
+ },
+ }
+ chartSQLStartedTCLStatements = Chart{
+ ID: "sql_started_tcl_statements",
+ Title: "SQL Started TCL Statements",
+ Units: "statements",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_started_tcl_statements",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricSQLTXNBeginStartedCount, Name: "BEGIN", Algo: module.Incremental},
+ {ID: metricSQLTXNCommitStartedCount, Name: "COMMIT", Algo: module.Incremental},
+ {ID: metricSQLTXNRollbackStartedCount, Name: "ROLLBACK", Algo: module.Incremental},
+ {ID: metricSQLSavepointStartedCount, Name: "SAVEPOINT", Algo: module.Incremental},
+ {ID: metricSQLRestartSavepointStartedCount, Name: "SAVEPOINT cockroach_restart", Algo: module.Incremental},
+ {ID: metricSQLRestartSavepointReleaseStartedCount, Name: "RELEASE SAVEPOINT cockroach_restart", Algo: module.Incremental},
+ {ID: metricSQLRestartSavepointRollbackStartedCount, Name: "ROLLBACK TO SAVEPOINT cockroach_restart", Algo: module.Incremental},
+ },
+ }
+ chartSQLExecutedTCLStatements = Chart{
+ ID: "sql_executed_tcl_statements",
+ Title: "SQL Executed TCL Statements",
+ Units: "statements",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_executed_tcl_statements",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricSQLTXNBeginCount, Name: "BEGIN", Algo: module.Incremental},
+ {ID: metricSQLTXNCommitCount, Name: "COMMIT", Algo: module.Incremental},
+ {ID: metricSQLTXNRollbackCount, Name: "ROLLBACK", Algo: module.Incremental},
+ {ID: metricSQLSavepointCount, Name: "SAVEPOINT", Algo: module.Incremental},
+ {ID: metricSQLRestartSavepointCount, Name: "SAVEPOINT cockroach_restart", Algo: module.Incremental},
+ {ID: metricSQLRestartSavepointReleaseCount, Name: "RELEASE SAVEPOINT cockroach_restart", Algo: module.Incremental},
+ {ID: metricSQLRestartSavepointRollbackCount, Name: "ROLLBACK TO SAVEPOINT cockroach_restart", Algo: module.Incremental},
+ },
+ }
+ chartSQLActiveDistQueries = Chart{
+ ID: "sql_active_distributed_queries",
+ Title: "Active Distributed SQL Queries",
+ Units: "queries",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_active_distributed_queries",
+ Dims: Dims{
+ {ID: metricSQLDistSQLQueriesActive, Name: "active"},
+ },
+ }
+ chartSQLActiveFlowsForDistQueries = Chart{
+ ID: "sql_distributed_flows",
+ Title: "Distributed SQL Flows",
+ Units: "flows",
+ Fam: "sql",
+ Ctx: "cockroachdb.sql_distributed_flows",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricSQLDistSQLFlowsActive, Name: "active"},
+ {ID: metricSQLDistSQLFlowsQueued, Name: "queued"},
+ },
+ }
+)
+
+// Storage
+var (
+ chartUsedLiveData = Chart{
+ ID: "live_bytes",
+ Title: "Used Live Data",
+ Units: "KiB",
+ Fam: "storage",
+ Ctx: "cockroachdb.live_bytes",
+ Dims: Dims{
+ {ID: metricLiveBytes, Name: "applications", Div: 1024},
+ {ID: metricSysBytes, Name: "system", Div: 1024},
+ },
+ }
+ chartLogicalData = Chart{
+ ID: "logical_data",
+ Title: "Logical Data",
+ Units: "KiB",
+ Fam: "storage",
+ Ctx: "cockroachdb.logical_data",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricKeyBytes, Name: "keys", Div: 1024},
+ {ID: metricValBytes, Name: "values", Div: 1024},
+ },
+ }
+ chartLogicalDataCount = Chart{
+ ID: "logical_data_count",
+ Title: "Logical Data Count",
+ Units: "num",
+ Fam: "storage",
+ Ctx: "cockroachdb.logical_data_count",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricKeyCount, Name: "keys"},
+ {ID: metricValCount, Name: "values"},
+ },
+ }
+)
+
+// KV Transactions
+var (
+ chartKVTransactions = Chart{
+ ID: "kv_transactions",
+ Title: "KV Transactions",
+ Units: "transactions",
+ Fam: "kv transactions",
+ Ctx: "cockroachdb.kv_transactions",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricTxnCommits, Name: "committed", Algo: module.Incremental},
+ {ID: metricTxnCommits1PC, Name: "fast-path_committed", Algo: module.Incremental},
+ {ID: metricTxnAborts, Name: "aborted", Algo: module.Incremental},
+ },
+ }
+ chartKVTransactionsRestarts = Chart{
+ ID: "kv_transaction_restarts",
+ Title: "KV Transaction Restarts",
+ Units: "restarts",
+ Fam: "kv transactions",
+ Ctx: "cockroachdb.kv_transaction_restarts",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricTxnRestartsWriteTooOld, Name: "write too old", Algo: module.Incremental},
+ {ID: metricTxnRestartsWriteTooOldMulti, Name: "write too old (multiple)", Algo: module.Incremental},
+ {ID: metricTxnRestartsSerializable, Name: "forwarded timestamp (iso=serializable)", Algo: module.Incremental},
+ {ID: metricTxnRestartsPossibleReplay, Name: "possible reply", Algo: module.Incremental},
+ {ID: metricTxnRestartsAsyncWriteFailure, Name: "async consensus failure", Algo: module.Incremental},
+ {ID: metricTxnRestartsReadWithInUncertainty, Name: "read within uncertainty interval", Algo: module.Incremental},
+ {ID: metricTxnRestartsTxnAborted, Name: "aborted", Algo: module.Incremental},
+ {ID: metricTxnRestartsTxnPush, Name: "push failure", Algo: module.Incremental},
+ {ID: metricTxnRestartsUnknown, Name: "unknown", Algo: module.Incremental},
+ },
+ }
+)
+
+// Ranges
+var (
+ chartRanges = Chart{
+ ID: "ranges",
+ Title: "Ranges",
+ Units: "ranges",
+ Fam: "ranges",
+ Ctx: "cockroachdb.ranges",
+ Dims: Dims{
+ {ID: metricRanges, Name: "ranges"},
+ },
+ }
+ chartRangesWithProblems = Chart{
+ ID: "ranges_replication_problem",
+ Title: "Ranges Replication Problems",
+ Units: "ranges",
+ Fam: "ranges",
+ Ctx: "cockroachdb.ranges_replication_problem",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricRangesUnavailable, Name: "unavailable"},
+ {ID: metricRangesUnderReplicated, Name: "under_replicated"},
+ {ID: metricRangesOverReplicated, Name: "over_replicated"},
+ },
+ }
+ chartRangesEvents = Chart{
+ ID: "range_events",
+ Title: "Range Events",
+ Units: "events",
+ Fam: "ranges",
+ Ctx: "cockroachdb.range_events",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricRangeSplits, Name: "split", Algo: module.Incremental},
+ {ID: metricRangeAdds, Name: "add", Algo: module.Incremental},
+ {ID: metricRangeRemoves, Name: "remove", Algo: module.Incremental},
+ {ID: metricRangeMerges, Name: "merge", Algo: module.Incremental},
+ },
+ }
+ chartRangesSnapshotEvents = Chart{
+ ID: "range_snapshot_events",
+ Title: "Range Snapshot Events",
+ Units: "events",
+ Fam: "ranges",
+ Ctx: "cockroachdb.range_snapshot_events",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricRangeSnapshotsGenerated, Name: "generated", Algo: module.Incremental},
+ {ID: metricRangeSnapshotsNormalApplied, Name: "applied (raft-initiated)", Algo: module.Incremental},
+ {ID: metricRangeSnapshotsLearnerApplied, Name: "applied (learner)", Algo: module.Incremental},
+ {ID: metricRangeSnapshotsPreemptiveApplied, Name: "applied (preemptive)", Algo: module.Incremental},
+ },
+ }
+)
+
+// RocksDB
+var (
+ chartRocksDBReadAmplification = Chart{
+ ID: "rocksdb_read_amplification",
+ Title: "RocksDB Read Amplification",
+ Units: "reads/query",
+ Fam: "rocksdb",
+ Ctx: "cockroachdb.rocksdb_read_amplification",
+ Dims: Dims{
+ {ID: metricRocksDBReadAmplification, Name: "reads"},
+ },
+ }
+ chartRocksDBTableOperations = Chart{
+ ID: "rocksdb_table_operations",
+ Title: "RocksDB Table Operations",
+ Units: "operations",
+ Fam: "rocksdb",
+ Ctx: "cockroachdb.rocksdb_table_operations",
+ Dims: Dims{
+ {ID: metricRocksDBCompactions, Name: "compactions", Algo: module.Incremental},
+ {ID: metricRocksDBFlushes, Name: "flushes", Algo: module.Incremental},
+ },
+ }
+ chartRocksDBCacheUsage = Chart{
+ ID: "rocksdb_cache_usage",
+ Title: "RocksDB Block Cache Usage",
+ Units: "KiB",
+ Fam: "rocksdb",
+ Ctx: "cockroachdb.rocksdb_cache_usage",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricRocksDBBlockCacheUsage, Name: "used", Div: 1024},
+ },
+ }
+ chartRocksDBCacheOperations = Chart{
+ ID: "rocksdb_cache_operations",
+ Title: "RocksDB Block Cache Operations",
+ Units: "operations",
+ Fam: "rocksdb",
+ Ctx: "cockroachdb.rocksdb_cache_operations",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricRocksDBBlockCacheHits, Name: "hits", Algo: module.Incremental},
+ {ID: metricRocksDBBlockCacheMisses, Name: "misses", Algo: module.Incremental},
+ },
+ }
+ chartRocksDBCacheHitRage = Chart{
+ ID: "rocksdb_cache_hit_rate",
+ Title: "RocksDB Block Cache Hit Rate",
+ Units: "percentage",
+ Fam: "rocksdb",
+ Ctx: "cockroachdb.rocksdb_cache_hit_rate",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricRocksDBBlockCacheHitRate, Name: "hit rate"},
+ },
+ }
+ chartRocksDBSSTables = Chart{
+ ID: "rocksdb_sstables",
+ Title: "RocksDB SSTables",
+ Units: "sstables",
+ Fam: "rocksdb",
+ Ctx: "cockroachdb.rocksdb_sstables",
+ Dims: Dims{
+ {ID: metricRocksDBNumSSTables, Name: "sstables"},
+ },
+ }
+)
+
+// Replicas
+var (
+ chartReplicas = Chart{
+ ID: "replicas",
+ Title: "Number of Replicas",
+ Units: "replicas",
+ Fam: "replication",
+ Ctx: "cockroachdb.replicas",
+ Dims: Dims{
+ {ID: metricReplicas, Name: "replicas"},
+ },
+ }
+ chartReplicasQuiescence = Chart{
+ ID: "replicas_quiescence",
+ Title: "Replicas Quiescence",
+ Units: "replicas",
+ Fam: "replication",
+ Ctx: "cockroachdb.replicas_quiescence",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricReplicasQuiescent, Name: "quiescent"},
+ {ID: metricReplicasActive, Name: "active"},
+ },
+ }
+ chartReplicasLeaders = Chart{
+ ID: "replicas_leaders",
+ Title: "Number of Raft Leaders",
+ Units: "replicas",
+ Fam: "replication",
+ Ctx: "cockroachdb.replicas_leaders",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricReplicasLeaders, Name: "leaders"},
+ {ID: metricReplicasLeadersNotLeaseholders, Name: "not leaseholders"},
+ },
+ }
+ chartReplicasLeaseHolder = Chart{
+ ID: "replicas_leaseholders",
+ Title: "Number of Leaseholders",
+ Units: "leaseholders",
+ Fam: "replication",
+ Ctx: "cockroachdb.replicas_leaseholders",
+ Dims: Dims{
+ {ID: metricReplicasLeaseholders, Name: "leaseholders"},
+ },
+ }
+)
+
+// Queues
+var (
+ chartQueuesProcessingFailures = Chart{
+ ID: "queue_processing_failures",
+ Title: "Queues Processing Failures",
+ Units: "failures",
+ Fam: "queues",
+ Ctx: "cockroachdb.queue_processing_failures",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricQueueGCProcessFailure, Name: "gc", Algo: module.Incremental},
+ {ID: metricQueueReplicaGCProcessFailure, Name: "replica gc", Algo: module.Incremental},
+ {ID: metricQueueReplicateProcessFailure, Name: "replication", Algo: module.Incremental},
+ {ID: metricQueueSplitProcessFailure, Name: "split", Algo: module.Incremental},
+ {ID: metricQueueConsistencyProcessFailure, Name: "consistency", Algo: module.Incremental},
+ {ID: metricQueueRaftLogProcessFailure, Name: "raft log", Algo: module.Incremental},
+ {ID: metricQueueRaftSnapshotProcessFailure, Name: "raft snapshot", Algo: module.Incremental},
+ {ID: metricQueueTSMaintenanceProcessFailure, Name: "time series maintenance", Algo: module.Incremental},
+ },
+ }
+)
+
+// Rebalancing
+var (
+ chartRebalancingQueries = Chart{
+ ID: "rebalancing_queries",
+ Title: "Rebalancing Average Queries",
+ Units: "queries/s",
+ Fam: "rebalancing",
+ Ctx: "cockroachdb.rebalancing_queries",
+ Dims: Dims{
+ {ID: metricRebalancingQueriesPerSecond, Name: "avg", Div: precision},
+ },
+ }
+ chartRebalancingWrites = Chart{
+ ID: "rebalancing_writes",
+ Title: "Rebalancing Average Writes",
+ Units: "writes/s",
+ Fam: "rebalancing",
+ Ctx: "cockroachdb.rebalancing_writes",
+ Dims: Dims{
+ {ID: metricRebalancingWritesPerSecond, Name: "avg", Div: precision},
+ },
+ }
+)
+
+// Time Series
+var (
+ chartTimeSeriesWrittenSamples = Chart{
+ ID: "timeseries_samples",
+ Title: "Time Series Written Samples",
+ Units: "samples",
+ Fam: "time series",
+ Ctx: "cockroachdb.timeseries_samples",
+ Dims: Dims{
+ {ID: metricTimeSeriesWriteSamples, Name: "written", Algo: module.Incremental},
+ },
+ }
+ chartTimeSeriesWriteErrors = Chart{
+ ID: "timeseries_write_errors",
+ Title: "Time Series Write Errors",
+ Units: "errors",
+ Fam: "time series",
+ Ctx: "cockroachdb.timeseries_write_errors",
+ Dims: Dims{
+ {ID: metricTimeSeriesWriteErrors, Name: "write", Algo: module.Incremental},
+ },
+ }
+ chartTimeSeriesWrittenBytes = Chart{
+ ID: "timeseries_write_bytes",
+ Title: "Time Series Bytes Written",
+ Units: "KiB",
+ Fam: "time series",
+ Ctx: "cockroachdb.timeseries_write_bytes",
+ Dims: Dims{
+ {ID: metricTimeSeriesWriteBytes, Name: "written", Algo: module.Incremental},
+ },
+ }
+)
+
+// Slow Requests
+var (
+ chartSlowRequests = Chart{
+ ID: "slow_requests",
+ Title: "Slow Requests",
+ Units: "requests",
+ Fam: "slow requests",
+ Ctx: "cockroachdb.slow_requests",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricRequestsSlowLatch, Name: "acquiring latches"},
+ {ID: metricRequestsSlowLease, Name: "acquiring lease"},
+ {ID: metricRequestsSlowRaft, Name: "in raft"},
+ },
+ }
+)
+
+// Go/Cgo
+var (
+ chartGoCgoHeapMemory = Chart{
+ ID: "code_heap_memory_usage",
+ Title: "Heap Memory Usage",
+ Units: "KiB",
+ Fam: "go/cgo",
+ Ctx: "cockroachdb.code_heap_memory_usage",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricSysGoAllocBytes, Name: "go", Div: 1024},
+ {ID: metricSysCGoAllocBytes, Name: "cgo", Div: 1024},
+ },
+ }
+ chartGoroutines = Chart{
+ ID: "goroutines_count",
+ Title: "Number of Goroutines",
+ Units: "goroutines",
+ Fam: "go/cgo",
+ Ctx: "cockroachdb.goroutines",
+ Dims: Dims{
+ {ID: metricSysGoroutines, Name: "goroutines"},
+ },
+ }
+ chartGCRuns = Chart{
+ ID: "gc_count",
+ Title: "GC Runs",
+ Units: "invokes",
+ Fam: "go/cgo",
+ Ctx: "cockroachdb.gc_count",
+ Dims: Dims{
+ {ID: metricSysGCCount, Name: "gc", Algo: module.Incremental},
+ },
+ }
+ chartGCPauseTime = Chart{
+ ID: "gc_pause",
+ Title: "GC Pause Time",
+ Units: "us",
+ Fam: "go/cgo",
+ Ctx: "cockroachdb.gc_pause",
+ Dims: Dims{
+ {ID: metricSysGCPauseNs, Name: "pause", Algo: module.Incremental, Div: 1e3},
+ },
+ }
+ chartCGoCalls = Chart{
+ ID: "cgo_calls",
+ Title: "Cgo Calls",
+ Units: "calls",
+ Fam: "go/cgo",
+ Ctx: "cockroachdb.cgo_calls",
+ Dims: Dims{
+ {ID: metricSysCGoCalls, Name: "cgo", Algo: module.Incremental},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/cockroachdb/cockroachdb.go b/src/go/plugin/go.d/modules/cockroachdb/cockroachdb.go
new file mode 100644
index 000000000..32d13fa78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/cockroachdb.go
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cockroachdb
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+// DefaultMetricsSampleInterval hard coded to 10
+// https://github.com/cockroachdb/cockroach/blob/d5ffbf76fb4c4ef802836529188e4628476879bd/pkg/server/config.go#L56-L58
+const dbSamplingInterval = 10
+
+func init() {
+ module.Register("cockroachdb", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: dbSamplingInterval,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *CockroachDB {
+ return &CockroachDB{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8080/_status/vars",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type CockroachDB struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ prom prometheus.Prometheus
+}
+
+func (c *CockroachDB) Configuration() any {
+ return c.Config
+}
+
+func (c *CockroachDB) Init() error {
+ if err := c.validateConfig(); err != nil {
+ c.Errorf("error on validating config: %v", err)
+ return err
+ }
+
+ prom, err := c.initPrometheusClient()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+ c.prom = prom
+
+ if c.UpdateEvery < dbSamplingInterval {
+ c.Warningf("'update_every'(%d) is lower then CockroachDB default sampling interval (%d)",
+ c.UpdateEvery, dbSamplingInterval)
+ }
+
+ return nil
+}
+
+func (c *CockroachDB) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (c *CockroachDB) Charts() *Charts {
+ return c.charts
+}
+
+func (c *CockroachDB) Collect() map[string]int64 {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (c *CockroachDB) Cleanup() {
+ if c.prom != nil && c.prom.HTTPClient() != nil {
+ c.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/cockroachdb/cockroachdb_test.go b/src/go/plugin/go.d/modules/cockroachdb/cockroachdb_test.go
new file mode 100644
index 000000000..886b65fab
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/cockroachdb_test.go
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cockroachdb
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataExpectedMetrics, _ = os.ReadFile("testdata/metrics.txt")
+ dataUnexpectedMetrics, _ = os.ReadFile("testdata/non_cockroachdb.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataExpectedMetrics": dataExpectedMetrics,
+ "dataUnexpectedMetrics": dataUnexpectedMetrics,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestCockroachDB_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &CockroachDB{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNew(t *testing.T) {
+ assert.Implements(t, (*module.Module)(nil), New())
+}
+
+func TestCockroachDB_Init(t *testing.T) {
+ cdb := prepareCockroachDB()
+
+ assert.NoError(t, cdb.Init())
+}
+
+func TestCockroachDB_Init_ReturnsFalseIfConfigURLIsNotSet(t *testing.T) {
+ cdb := prepareCockroachDB()
+ cdb.URL = ""
+
+ assert.Error(t, cdb.Init())
+}
+
+func TestCockroachDB_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) {
+ cdb := prepareCockroachDB()
+ cdb.Client.TLSConfig.TLSCA = "testdata/tls"
+
+ assert.Error(t, cdb.Init())
+}
+
+func TestCockroachDB_Check(t *testing.T) {
+ cdb, srv := prepareClientServer(t)
+ defer srv.Close()
+
+ assert.NoError(t, cdb.Check())
+}
+
+func TestCockroachDB_Check_ReturnsFalseIfConnectionRefused(t *testing.T) {
+ cdb := New()
+ cdb.URL = "http://127.0.0.1:38001/metrics"
+ require.NoError(t, cdb.Init())
+
+ assert.Error(t, cdb.Check())
+}
+
+func TestCockroachDB_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestCockroachDB_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestCockroachDB_Collect(t *testing.T) {
+ cdb, srv := prepareClientServer(t)
+ defer srv.Close()
+
+ expected := map[string]int64{
+ "capacity": 64202351837184,
+ "capacity_available": 40402062147584,
+ "capacity_unusable": 23800157791684,
+ "capacity_usable": 40402194045500,
+ "capacity_usable_used_percent": 0,
+ "capacity_used": 131897916,
+ "capacity_used_percent": 37070,
+ "keybytes": 6730852,
+ "keycount": 119307,
+ "livebytes": 81979227,
+ "liveness_heartbeatfailures": 2,
+ "liveness_heartbeatsuccesses": 2720,
+ "liveness_livenodes": 3,
+ "queue_consistency_process_failure": 0,
+ "queue_gc_process_failure": 0,
+ "queue_raftlog_process_failure": 0,
+ "queue_raftsnapshot_process_failure": 0,
+ "queue_replicagc_process_failure": 0,
+ "queue_replicate_process_failure": 0,
+ "queue_split_process_failure": 0,
+ "queue_tsmaintenance_process_failure": 0,
+ "range_adds": 0,
+ "range_merges": 0,
+ "range_removes": 0,
+ "range_snapshots_generated": 0,
+ "range_snapshots_learner_applied": 0,
+ "range_snapshots_normal_applied": 0,
+ "range_snapshots_preemptive_applied": 0,
+ "range_splits": 0,
+ "ranges": 34,
+ "ranges_overreplicated": 0,
+ "ranges_unavailable": 0,
+ "ranges_underreplicated": 0,
+ "rebalancing_queriespersecond": 801,
+ "rebalancing_writespersecond": 213023,
+ "replicas": 34,
+ "replicas_active": 0,
+ "replicas_leaders": 7,
+ "replicas_leaders_not_leaseholders": 0,
+ "replicas_leaseholders": 7,
+ "replicas_quiescent": 34,
+ "requests_slow_latch": 0,
+ "requests_slow_lease": 0,
+ "requests_slow_raft": 0,
+ "rocksdb_block_cache_hit_rate": 92104,
+ "rocksdb_block_cache_hits": 94825,
+ "rocksdb_block_cache_misses": 8129,
+ "rocksdb_block_cache_usage": 39397184,
+ "rocksdb_compactions": 7,
+ "rocksdb_flushes": 13,
+ "rocksdb_num_sstables": 8,
+ "rocksdb_read_amplification": 1,
+ "sql_bytesin": 0,
+ "sql_bytesout": 0,
+ "sql_conns": 0,
+ "sql_ddl_count": 0,
+ "sql_ddl_started_count": 0,
+ "sql_delete_count": 0,
+ "sql_delete_started_count": 0,
+ "sql_distsql_flows_active": 0,
+ "sql_distsql_flows_queued": 0,
+ "sql_distsql_queries_active": 0,
+ "sql_failure_count": 0,
+ "sql_insert_count": 0,
+ "sql_insert_started_count": 0,
+ "sql_misc_count": 0,
+ "sql_misc_started_count": 0,
+ "sql_query_count": 0,
+ "sql_query_started_count": 0,
+ "sql_restart_savepoint_count": 0,
+ "sql_restart_savepoint_release_count": 0,
+ "sql_restart_savepoint_release_started_count": 0,
+ "sql_restart_savepoint_rollback_count": 0,
+ "sql_restart_savepoint_rollback_started_count": 0,
+ "sql_restart_savepoint_started_count": 0,
+ "sql_savepoint_count": 0,
+ "sql_savepoint_started_count": 0,
+ "sql_select_count": 0,
+ "sql_select_started_count": 0,
+ "sql_txn_abort_count": 0,
+ "sql_txn_begin_count": 0,
+ "sql_txn_begin_started_count": 0,
+ "sql_txn_commit_count": 0,
+ "sql_txn_commit_started_count": 0,
+ "sql_txn_rollback_count": 0,
+ "sql_txn_rollback_started_count": 0,
+ "sql_update_count": 0,
+ "sql_update_started_count": 0,
+ "sys_cgo_allocbytes": 63363512,
+ "sys_cgocalls": 577778,
+ "sys_cpu_combined_percent_normalized": 851,
+ "sys_cpu_sys_ns": 154420000000,
+ "sys_cpu_sys_percent": 1403,
+ "sys_cpu_user_ns": 227620000000,
+ "sys_cpu_user_percent": 2004,
+ "sys_fd_open": 47,
+ "sys_fd_softlimit": 1048576,
+ "sys_gc_count": 279,
+ "sys_gc_pause_ns": 60700450,
+ "sys_go_allocbytes": 106576224,
+ "sys_goroutines": 235,
+ "sys_host_disk_iopsinprogress": 0,
+ "sys_host_disk_read_bytes": 43319296,
+ "sys_host_disk_read_count": 1176,
+ "sys_host_disk_write_bytes": 942080,
+ "sys_host_disk_write_count": 106,
+ "sys_host_net_recv_bytes": 234392325,
+ "sys_host_net_recv_packets": 593876,
+ "sys_host_net_send_bytes": 461746036,
+ "sys_host_net_send_packets": 644128,
+ "sys_rss": 314691584,
+ "sys_uptime": 12224,
+ "sysbytes": 13327,
+ "timeseries_write_bytes": 82810041,
+ "timeseries_write_errors": 0,
+ "timeseries_write_samples": 845784,
+ "txn_aborts": 1,
+ "txn_commits": 7472,
+ "txn_commits1PC": 3206,
+ "txn_restarts_asyncwritefailure": 0,
+ "txn_restarts_possiblereplay": 0,
+ "txn_restarts_readwithinuncertainty": 0,
+ "txn_restarts_serializable": 0,
+ "txn_restarts_txnaborted": 0,
+ "txn_restarts_txnpush": 0,
+ "txn_restarts_unknown": 0,
+ "txn_restarts_writetooold": 0,
+ "txn_restarts_writetoooldmulti": 0,
+ "valbytes": 75527718,
+ "valcount": 124081,
+ }
+
+ collected := cdb.Collect()
+ assert.Equal(t, expected, collected)
+ testCharts(t, cdb, collected)
+}
+
+func TestCockroachDB_Collect_ReturnsNilIfNotCockroachDBMetrics(t *testing.T) {
+ cdb, srv := prepareClientServerNotCockroachDBMetricResponse(t)
+ defer srv.Close()
+
+ assert.Nil(t, cdb.Collect())
+}
+
+func TestCockroachDB_Collect_ReturnsNilIfConnectionRefused(t *testing.T) {
+ cdb := prepareCockroachDB()
+ require.NoError(t, cdb.Init())
+
+ assert.Nil(t, cdb.Collect())
+}
+
+func TestCockroachDB_Collect_ReturnsNilIfReceiveInvalidResponse(t *testing.T) {
+ cdb, ts := prepareClientServerInvalidDataResponse(t)
+ defer ts.Close()
+
+ assert.Nil(t, cdb.Collect())
+}
+
+func TestCockroachDB_Collect_ReturnsNilIfReceiveResponse404(t *testing.T) {
+ cdb, ts := prepareClientServerResponse404(t)
+ defer ts.Close()
+
+ assert.Nil(t, cdb.Collect())
+}
+
+func testCharts(t *testing.T, cdb *CockroachDB, collected map[string]int64) {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, cdb, collected)
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, c *CockroachDB, collected map[string]int64) {
+ for _, chart := range *c.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareCockroachDB() *CockroachDB {
+ cdb := New()
+ cdb.URL = "http://127.0.0.1:38001/metrics"
+ return cdb
+}
+
+func prepareClientServer(t *testing.T) (*CockroachDB, *httptest.Server) {
+ t.Helper()
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataExpectedMetrics)
+ }))
+
+ cdb := New()
+ cdb.URL = ts.URL
+ require.NoError(t, cdb.Init())
+
+ return cdb, ts
+}
+
+func prepareClientServerNotCockroachDBMetricResponse(t *testing.T) (*CockroachDB, *httptest.Server) {
+ t.Helper()
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataUnexpectedMetrics)
+ }))
+
+ cdb := New()
+ cdb.URL = ts.URL
+ require.NoError(t, cdb.Init())
+
+ return cdb, ts
+}
+
+func prepareClientServerInvalidDataResponse(t *testing.T) (*CockroachDB, *httptest.Server) {
+ t.Helper()
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ cdb := New()
+ cdb.URL = ts.URL
+ require.NoError(t, cdb.Init())
+
+ return cdb, ts
+}
+
+func prepareClientServerResponse404(t *testing.T) (*CockroachDB, *httptest.Server) {
+ t.Helper()
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+
+ cdb := New()
+ cdb.URL = ts.URL
+ require.NoError(t, cdb.Init())
+ return cdb, ts
+}
diff --git a/src/go/plugin/go.d/modules/cockroachdb/collect.go b/src/go/plugin/go.d/modules/cockroachdb/collect.go
new file mode 100644
index 000000000..9ba255132
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/collect.go
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cockroachdb
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func validCockroachDBMetrics(scraped prometheus.Series) bool {
+ return scraped.FindByName("sql_restart_savepoint_count_internal").Len() > 0
+}
+
+func (c *CockroachDB) collect() (map[string]int64, error) {
+ scraped, err := c.prom.ScrapeSeries()
+ if err != nil {
+ return nil, err
+ }
+
+ if !validCockroachDBMetrics(scraped) {
+ return nil, errors.New("returned metrics aren't CockroachDB metrics")
+ }
+
+ mx := collectScraped(scraped, metrics)
+ calcUsableCapacity(mx)
+ calcUnusableCapacity(mx)
+ calcTotalCapacityUsedPercentage(mx)
+ calcUsableCapacityUsedPercentage(mx)
+ calcRocksDBCacheHitRate(mx)
+ calcActiveReplicas(mx)
+ calcCPUUsagePercent(mx)
+
+ return stm.ToMap(mx), nil
+}
+
+const precision = 1000
+
+func collectScraped(scraped prometheus.Series, metricList []string) map[string]float64 {
+ mx := make(map[string]float64)
+ for _, name := range metricList {
+ for _, m := range scraped.FindByName(name) {
+ if isMetricFloat(name) {
+ mx[name] += m.Value * precision
+ } else {
+ mx[name] += m.Value
+ }
+ }
+ }
+ return mx
+}
+
+func calcUsableCapacity(mx map[string]float64) {
+ if !hasAll(mx, metricCapacityAvailable, metricCapacityUsed) {
+ return
+ }
+ available := mx[metricCapacityAvailable]
+ used := mx[metricCapacityUsed]
+
+ mx[metricCapacityUsable] = available + used
+}
+
+func calcUnusableCapacity(mx map[string]float64) {
+ if !hasAll(mx, metricCapacity, metricCapacityAvailable, metricCapacityUsed) {
+ return
+ }
+ total := mx[metricCapacity]
+ available := mx[metricCapacityAvailable]
+ used := mx[metricCapacityUsed]
+
+ mx[metricCapacityUnusable] = total - (available + used)
+}
+
+func calcTotalCapacityUsedPercentage(mx map[string]float64) {
+ if !hasAll(mx, metricCapacity, metricCapacityUnusable, metricCapacityUsed) {
+ return
+ }
+ total := mx[metricCapacity]
+ unusable := mx[metricCapacityUnusable]
+ used := mx[metricCapacityUsed]
+
+ if mx[metricCapacity] == 0 {
+ mx[metricCapacityUsedPercentage] = 0
+ } else {
+ mx[metricCapacityUsedPercentage] = (unusable + used) / total * 100 * precision
+ }
+}
+
+func calcUsableCapacityUsedPercentage(mx map[string]float64) {
+ if !hasAll(mx, metricCapacityUsable, metricCapacityUsed) {
+ return
+ }
+ usable := mx[metricCapacityUsable]
+ used := mx[metricCapacityUsed]
+
+ if usable == 0 {
+ mx[metricCapacityUsableUsedPercentage] = 0
+ } else {
+ mx[metricCapacityUsableUsedPercentage] = used / usable * 100 * precision
+ }
+}
+
+func calcRocksDBCacheHitRate(mx map[string]float64) {
+ if !hasAll(mx, metricRocksDBBlockCacheHits, metricRocksDBBlockCacheMisses) {
+ return
+ }
+ hits := mx[metricRocksDBBlockCacheHits]
+ misses := mx[metricRocksDBBlockCacheMisses]
+
+ if sum := hits + misses; sum == 0 {
+ mx[metricRocksDBBlockCacheHitRate] = 0
+ } else {
+ mx[metricRocksDBBlockCacheHitRate] = hits / sum * 100 * precision
+ }
+}
+
+func calcActiveReplicas(mx map[string]float64) {
+ if !hasAll(mx, metricReplicasQuiescent) {
+ return
+ }
+ total := mx[metricReplicas]
+ quiescent := mx[metricReplicasQuiescent]
+
+ mx[metricReplicasActive] = total - quiescent
+}
+
+func calcCPUUsagePercent(mx map[string]float64) {
+ if hasAll(mx, metricSysCPUUserPercent) {
+ mx[metricSysCPUUserPercent] *= 100
+ }
+ if hasAll(mx, metricSysCPUSysPercent) {
+ mx[metricSysCPUSysPercent] *= 100
+ }
+ if hasAll(mx, metricSysCPUCombinedPercentNormalized) {
+ mx[metricSysCPUCombinedPercentNormalized] *= 100
+ }
+}
+
+func isMetricFloat(name string) bool {
+ // only Float metrics (see NewGaugeFloat64 in the cockroach repo):
+ // - GcPausePercent, CPUUserPercent, CPUCombinedPercentNorm, AverageQueriesPerSecond, AverageWritesPerSecond
+ switch name {
+ case metricSysCPUUserPercent,
+ metricSysCPUSysPercent,
+ metricSysCPUCombinedPercentNormalized,
+ metricRebalancingQueriesPerSecond,
+ metricRebalancingWritesPerSecond:
+ return true
+ }
+ return false
+}
+
+func hasAll(mx map[string]float64, key string, rest ...string) bool {
+ _, ok := mx[key]
+ if len(rest) == 0 {
+ return ok
+ }
+ return ok && hasAll(mx, rest[0], rest[1:]...)
+}
diff --git a/src/go/plugin/go.d/modules/cockroachdb/config_schema.json b/src/go/plugin/go.d/modules/cockroachdb/config_schema.json
new file mode 100644
index 000000000..51b94f6a6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "CockroachDB collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the CockroachDB [Prometheus endpoint](https://www.cockroachlabs.com/docs/stable/monitoring-and-alerting#prometheus-endpoint).",
+ "type": "string",
+ "default": "http://127.0.0.1:8080/_status/vars",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/cockroachdb/init.go b/src/go/plugin/go.d/modules/cockroachdb/init.go
new file mode 100644
index 000000000..7558e9952
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/init.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cockroachdb
+
+import (
+ "errors"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+func (c *CockroachDB) validateConfig() error {
+ if c.URL == "" {
+ return errors.New("URL is not set")
+ }
+ return nil
+}
+
+func (c *CockroachDB) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(c.Client)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.New(client, c.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/cockroachdb/integrations/cockroachdb.md b/src/go/plugin/go.d/modules/cockroachdb/integrations/cockroachdb.md
new file mode 100644
index 000000000..52e27a87e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/integrations/cockroachdb.md
@@ -0,0 +1,323 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/cockroachdb/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/cockroachdb/metadata.yaml"
+sidebar_label: "CockroachDB"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# CockroachDB
+
+
+<img src="https://netdata.cloud/img/cockroachdb.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: cockroachdb
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors CockroachDB servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per CockroachDB instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| cockroachdb.process_cpu_time_combined_percentage | used | percentage |
+| cockroachdb.process_cpu_time_percentage | user, sys | percentage |
+| cockroachdb.process_cpu_time | user, sys | ms |
+| cockroachdb.process_memory | rss | KiB |
+| cockroachdb.process_file_descriptors | open | fd |
+| cockroachdb.process_uptime | uptime | seconds |
+| cockroachdb.host_disk_bandwidth | read, write | KiB |
+| cockroachdb.host_disk_operations | reads, writes | operations |
+| cockroachdb.host_disk_iops_in_progress | in_progress | iops |
+| cockroachdb.host_network_bandwidth | received, sent | kilobits |
+| cockroachdb.host_network_packets | received, sent | packets |
+| cockroachdb.live_nodes | live_nodes | nodes |
+| cockroachdb.node_liveness_heartbeats | successful, failed | heartbeats |
+| cockroachdb.total_storage_capacity | total | KiB |
+| cockroachdb.storage_capacity_usability | usable, unusable | KiB |
+| cockroachdb.storage_usable_capacity | available, used | KiB |
+| cockroachdb.storage_used_capacity_percentage | total, usable | percentage |
+| cockroachdb.sql_connections | active | connections |
+| cockroachdb.sql_bandwidth | received, sent | KiB |
+| cockroachdb.sql_statements_total | started, executed | statements |
+| cockroachdb.sql_errors | statement, transaction | errors |
+| cockroachdb.sql_started_ddl_statements | ddl | statements |
+| cockroachdb.sql_executed_ddl_statements | ddl | statements |
+| cockroachdb.sql_started_dml_statements | select, update, delete, insert | statements |
+| cockroachdb.sql_executed_dml_statements | select, update, delete, insert | statements |
+| cockroachdb.sql_started_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |
+| cockroachdb.sql_executed_tcl_statements | begin, commit, rollback, savepoint, savepoint_cockroach_restart, release_savepoint_cockroach_restart, rollback_to_savepoint_cockroach_restart | statements |
+| cockroachdb.sql_active_distributed_queries | active | queries |
+| cockroachdb.sql_distributed_flows | active, queued | flows |
+| cockroachdb.live_bytes | applications, system | KiB |
+| cockroachdb.logical_data | keys, values | KiB |
+| cockroachdb.logical_data_count | keys, values | num |
+| cockroachdb.kv_transactions | committed, fast-path_committed, aborted | transactions |
+| cockroachdb.kv_transaction_restarts | write_too_old, write_too_old_multiple, forwarded_timestamp, possible_reply, async_consensus_failure, read_within_uncertainty_interval, aborted, push_failure, unknown | restarts |
+| cockroachdb.ranges | ranges | ranges |
+| cockroachdb.ranges_replication_problem | unavailable, under_replicated, over_replicated | ranges |
+| cockroachdb.range_events | split, add, remove, merge | events |
+| cockroachdb.range_snapshot_events | generated, applied_raft_initiated, applied_learner, applied_preemptive | events |
+| cockroachdb.rocksdb_read_amplification | reads | reads/query |
+| cockroachdb.rocksdb_table_operations | compactions, flushes | operations |
+| cockroachdb.rocksdb_cache_usage | used | KiB |
+| cockroachdb.rocksdb_cache_operations | hits, misses | operations |
+| cockroachdb.rocksdb_cache_hit_rate | hit_rate | percentage |
+| cockroachdb.rocksdb_sstables | sstables | sstables |
+| cockroachdb.replicas | replicas | replicas |
+| cockroachdb.replicas_quiescence | quiescent, active | replicas |
+| cockroachdb.replicas_leaders | leaders, not_leaseholders | replicas |
+| cockroachdb.replicas_leaseholders | leaseholders | leaseholders |
+| cockroachdb.queue_processing_failures | gc, replica_gc, replication, split, consistency, raft_log, raft_snapshot, time_series_maintenance | failures |
+| cockroachdb.rebalancing_queries | avg | queries/s |
+| cockroachdb.rebalancing_writes | avg | writes/s |
+| cockroachdb.timeseries_samples | written | samples |
+| cockroachdb.timeseries_write_errors | write | errors |
+| cockroachdb.timeseries_write_bytes | written | KiB |
+| cockroachdb.slow_requests | acquiring_latches, acquiring_lease, in_raft | requests |
+| cockroachdb.code_heap_memory_usage | go, cgo | KiB |
+| cockroachdb.goroutines | goroutines | goroutines |
+| cockroachdb.gc_count | gc | invokes |
+| cockroachdb.gc_pause | pause | us |
+| cockroachdb.cgo_calls | cgo | calls |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ cockroachdb_used_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage capacity utilization |
+| [ cockroachdb_used_usable_storage_capacity ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.storage_used_capacity_percentage | storage usable space utilization |
+| [ cockroachdb_unavailable_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than needed for quorum |
+| [ cockroachdb_underreplicated_ranges ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.ranges_replication_problem | number of ranges with fewer live replicas than the replication target |
+| [ cockroachdb_open_file_descriptors_limit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf) | cockroachdb.process_file_descriptors | open file descriptors utilization (against softlimit) |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/cockroachdb.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/cockroachdb.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8080/_status/vars | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080/_status/vars
+
+```
+</details>
+
+##### HTTP authentication
+
+Local server with basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080/_status/vars
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+CockroachDB with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:8080/_status/vars
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080/_status/vars
+
+ - name: remote
+ url: http://203.0.113.10:8080/_status/vars
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `cockroachdb` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m cockroachdb
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `cockroachdb` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep cockroachdb
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep cockroachdb /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep cockroachdb
+```
+
+
diff --git a/src/go/plugin/go.d/modules/cockroachdb/metadata.yaml b/src/go/plugin/go.d/modules/cockroachdb/metadata.yaml
new file mode 100644
index 000000000..522f200ac
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/metadata.yaml
@@ -0,0 +1,620 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-cockroachdb
+ plugin_name: go.d.plugin
+ module_name: cockroachdb
+ monitored_instance:
+ name: CockroachDB
+ link: https://www.cockroachlabs.com/
+ icon_filename: cockroachdb.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - cockroachdb
+ - databases
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors CockroachDB servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/cockroachdb.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8080/_status/vars
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080/_status/vars
+ - name: HTTP authentication
+ description: Local server with basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080/_status/vars
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: CockroachDB with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:8080/_status/vars
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080/_status/vars
+
+ - name: remote
+ url: http://203.0.113.10:8080/_status/vars
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: cockroachdb_used_storage_capacity
+ metric: cockroachdb.storage_used_capacity_percentage
+ info: storage capacity utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf
+ - name: cockroachdb_used_usable_storage_capacity
+ metric: cockroachdb.storage_used_capacity_percentage
+ info: storage usable space utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf
+ - name: cockroachdb_unavailable_ranges
+ metric: cockroachdb.ranges_replication_problem
+ info: number of ranges with fewer live replicas than needed for quorum
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf
+ - name: cockroachdb_underreplicated_ranges
+ metric: cockroachdb.ranges_replication_problem
+ info: number of ranges with fewer live replicas than the replication target
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf
+ - name: cockroachdb_open_file_descriptors_limit
+ metric: cockroachdb.process_file_descriptors
+ info: "open file descriptors utilization (against softlimit)"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/cockroachdb.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: cockroachdb.process_cpu_time_combined_percentage
+ description: Combined CPU Time Percentage, Normalized 0-1 by Number of Cores
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: cockroachdb.process_cpu_time_percentage
+ description: CPU Time Percentage
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: user
+ - name: sys
+ - name: cockroachdb.process_cpu_time
+ description: CPU Time
+ unit: ms
+ chart_type: stacked
+ dimensions:
+ - name: user
+ - name: sys
+ - name: cockroachdb.process_memory
+ description: Memory Usage
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: rss
+ - name: cockroachdb.process_file_descriptors
+ description: File Descriptors
+ unit: fd
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: cockroachdb.process_uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: cockroachdb.host_disk_bandwidth
+ description: Host Disk Cumulative Bandwidth
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: cockroachdb.host_disk_operations
+ description: Host Disk Cumulative Operations
+ unit: operations
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: cockroachdb.host_disk_iops_in_progress
+ description: Host Disk Cumulative IOPS In Progress
+ unit: iops
+ chart_type: line
+ dimensions:
+ - name: in_progress
+ - name: cockroachdb.host_network_bandwidth
+ description: Host Network Cumulative Bandwidth
+ unit: kilobits
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: cockroachdb.host_network_packets
+ description: Host Network Cumulative Packets
+ unit: packets
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: cockroachdb.live_nodes
+ description: Live Nodes in the Cluster
+ unit: nodes
+ chart_type: line
+ dimensions:
+ - name: live_nodes
+ - name: cockroachdb.node_liveness_heartbeats
+ description: Node Liveness Heartbeats
+ unit: heartbeats
+ chart_type: stacked
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: cockroachdb.total_storage_capacity
+ description: Total Storage Capacity
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: cockroachdb.storage_capacity_usability
+ description: Storage Capacity Usability
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: usable
+ - name: unusable
+ - name: cockroachdb.storage_usable_capacity
+ description: Storage Usable Capacity
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: cockroachdb.storage_used_capacity_percentage
+ description: Storage Used Capacity Utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: usable
+ - name: cockroachdb.sql_connections
+ description: Active SQL Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: cockroachdb.sql_bandwidth
+ description: SQL Bandwidth
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: cockroachdb.sql_statements_total
+ description: SQL Statements Total
+ unit: statements
+ chart_type: area
+ dimensions:
+ - name: started
+ - name: executed
+ - name: cockroachdb.sql_errors
+ description: SQL Statements and Transaction Errors
+ unit: errors
+ chart_type: line
+ dimensions:
+ - name: statement
+ - name: transaction
+ - name: cockroachdb.sql_started_ddl_statements
+ description: SQL Started DDL Statements
+ unit: statements
+ chart_type: line
+ dimensions:
+ - name: ddl
+ - name: cockroachdb.sql_executed_ddl_statements
+ description: SQL Executed DDL Statements
+ unit: statements
+ chart_type: line
+ dimensions:
+ - name: ddl
+ - name: cockroachdb.sql_started_dml_statements
+ description: SQL Started DML Statements
+ unit: statements
+ chart_type: stacked
+ dimensions:
+ - name: select
+ - name: update
+ - name: delete
+ - name: insert
+ - name: cockroachdb.sql_executed_dml_statements
+ description: SQL Executed DML Statements
+ unit: statements
+ chart_type: stacked
+ dimensions:
+ - name: select
+ - name: update
+ - name: delete
+ - name: insert
+ - name: cockroachdb.sql_started_tcl_statements
+ description: SQL Started TCL Statements
+ unit: statements
+ chart_type: stacked
+ dimensions:
+ - name: begin
+ - name: commit
+ - name: rollback
+ - name: savepoint
+ - name: savepoint_cockroach_restart
+ - name: release_savepoint_cockroach_restart
+ - name: rollback_to_savepoint_cockroach_restart
+ - name: cockroachdb.sql_executed_tcl_statements
+ description: SQL Executed TCL Statements
+ unit: statements
+ chart_type: stacked
+ dimensions:
+ - name: begin
+ - name: commit
+ - name: rollback
+ - name: savepoint
+ - name: savepoint_cockroach_restart
+ - name: release_savepoint_cockroach_restart
+ - name: rollback_to_savepoint_cockroach_restart
+ - name: cockroachdb.sql_active_distributed_queries
+ description: Active Distributed SQL Queries
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: cockroachdb.sql_distributed_flows
+ description: Distributed SQL Flows
+ unit: flows
+ chart_type: stacked
+ dimensions:
+ - name: active
+ - name: queued
+ - name: cockroachdb.live_bytes
+ description: Used Live Data
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: applications
+ - name: system
+ - name: cockroachdb.logical_data
+ description: Logical Data
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: keys
+ - name: values
+ - name: cockroachdb.logical_data_count
+ description: Logical Data Count
+ unit: num
+ chart_type: stacked
+ dimensions:
+ - name: keys
+ - name: values
+ - name: cockroachdb.kv_transactions
+ description: KV Transactions
+ unit: transactions
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: fast-path_committed
+ - name: aborted
+ - name: cockroachdb.kv_transaction_restarts
+ description: KV Transaction Restarts
+ unit: restarts
+ chart_type: stacked
+ dimensions:
+ - name: write_too_old
+ - name: write_too_old_multiple
+ - name: forwarded_timestamp
+ - name: possible_reply
+ - name: async_consensus_failure
+ - name: read_within_uncertainty_interval
+ - name: aborted
+ - name: push_failure
+ - name: unknown
+ - name: cockroachdb.ranges
+ description: Ranges
+ unit: ranges
+ chart_type: line
+ dimensions:
+ - name: ranges
+ - name: cockroachdb.ranges_replication_problem
+ description: Ranges Replication Problems
+ unit: ranges
+ chart_type: stacked
+ dimensions:
+ - name: unavailable
+ - name: under_replicated
+ - name: over_replicated
+ - name: cockroachdb.range_events
+ description: Range Events
+ unit: events
+ chart_type: stacked
+ dimensions:
+ - name: split
+ - name: add
+ - name: remove
+ - name: merge
+ - name: cockroachdb.range_snapshot_events
+ description: Range Snapshot Events
+ unit: events
+ chart_type: stacked
+ dimensions:
+ - name: generated
+ - name: applied_raft_initiated
+ - name: applied_learner
+ - name: applied_preemptive
+ - name: cockroachdb.rocksdb_read_amplification
+ description: RocksDB Read Amplification
+ unit: reads/query
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: cockroachdb.rocksdb_table_operations
+ description: RocksDB Table Operations
+ unit: operations
+ chart_type: line
+ dimensions:
+ - name: compactions
+ - name: flushes
+ - name: cockroachdb.rocksdb_cache_usage
+ description: RocksDB Block Cache Usage
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: cockroachdb.rocksdb_cache_operations
+ description: RocksDB Block Cache Operations
+ unit: operations
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: cockroachdb.rocksdb_cache_hit_rate
+ description: RocksDB Block Cache Hit Rate
+ unit: percentage
+ chart_type: area
+ dimensions:
+ - name: hit_rate
+ - name: cockroachdb.rocksdb_sstables
+ description: RocksDB SSTables
+ unit: sstables
+ chart_type: line
+ dimensions:
+ - name: sstables
+ - name: cockroachdb.replicas
+ description: Number of Replicas
+ unit: replicas
+ chart_type: line
+ dimensions:
+ - name: replicas
+ - name: cockroachdb.replicas_quiescence
+ description: Replicas Quiescence
+ unit: replicas
+ chart_type: stacked
+ dimensions:
+ - name: quiescent
+ - name: active
+ - name: cockroachdb.replicas_leaders
+ description: Number of Raft Leaders
+ unit: replicas
+ chart_type: area
+ dimensions:
+ - name: leaders
+ - name: not_leaseholders
+ - name: cockroachdb.replicas_leaseholders
+ description: Number of Leaseholders
+ unit: leaseholders
+ chart_type: line
+ dimensions:
+ - name: leaseholders
+ - name: cockroachdb.queue_processing_failures
+ description: Queues Processing Failures
+ unit: failures
+ chart_type: stacked
+ dimensions:
+ - name: gc
+ - name: replica_gc
+ - name: replication
+ - name: split
+ - name: consistency
+ - name: raft_log
+ - name: raft_snapshot
+ - name: time_series_maintenance
+ - name: cockroachdb.rebalancing_queries
+ description: Rebalancing Average Queries
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: avg
+ - name: cockroachdb.rebalancing_writes
+ description: Rebalancing Average Writes
+ unit: writes/s
+ chart_type: line
+ dimensions:
+ - name: avg
+ - name: cockroachdb.timeseries_samples
+ description: Time Series Written Samples
+ unit: samples
+ chart_type: line
+ dimensions:
+ - name: written
+ - name: cockroachdb.timeseries_write_errors
+ description: Time Series Write Errors
+ unit: errors
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: cockroachdb.timeseries_write_bytes
+ description: Time Series Bytes Written
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: written
+ - name: cockroachdb.slow_requests
+ description: Slow Requests
+ unit: requests
+ chart_type: stacked
+ dimensions:
+ - name: acquiring_latches
+ - name: acquiring_lease
+ - name: in_raft
+ - name: cockroachdb.code_heap_memory_usage
+ description: Heap Memory Usage
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: go
+ - name: cgo
+ - name: cockroachdb.goroutines
+ description: Number of Goroutines
+ unit: goroutines
+ chart_type: line
+ dimensions:
+ - name: goroutines
+ - name: cockroachdb.gc_count
+ description: GC Runs
+ unit: invokes
+ chart_type: line
+ dimensions:
+ - name: gc
+ - name: cockroachdb.gc_pause
+ description: GC Pause Time
+ unit: us
+ chart_type: line
+ dimensions:
+ - name: pause
+ - name: cockroachdb.cgo_calls
+ description: Cgo Calls
+ unit: calls
+ chart_type: line
+ dimensions:
+ - name: cgo
diff --git a/src/go/plugin/go.d/modules/cockroachdb/metrics.go b/src/go/plugin/go.d/modules/cockroachdb/metrics.go
new file mode 100644
index 000000000..fabd25499
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/metrics.go
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cockroachdb
+
+// Architecture Overview
+// https://www.cockroachlabs.com/docs/stable/architecture/overview.html
+
+// Web Dashboards
+// https://github.com/cockroachdb/cockroach/tree/master/pkg/ui/src/views/cluster/containers/nodeGraphs/dashboards
+
+// Process
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/server/status/runtime.go
+ metricSysCPUUserNs = "sys_cpu_user_ns"
+ metricSysCPUSysNs = "sys_cpu_sys_ns"
+ metricSysCPUUserPercent = "sys_cpu_user_percent"
+ metricSysCPUSysPercent = "sys_cpu_sys_percent"
+ metricSysCPUCombinedPercentNormalized = "sys_cpu_combined_percent_normalized"
+ metricSysRSS = "sys_rss"
+ metricSysFDOpen = "sys_fd_open"
+ metricSysFDSoftLimit = "sys_fd_softlimit"
+ metricSysUptime = "sys_uptime"
+)
+
+// Host Disk/Network Cumulative
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/server/status/runtime.go
+ metricSysHostDiskReadBytes = "sys_host_disk_read_bytes"
+ metricSysHostDiskWriteBytes = "sys_host_disk_write_bytes"
+ metricSysHostDiskReadCount = "sys_host_disk_read_count"
+ metricSysHostDiskWriteCount = "sys_host_disk_write_count"
+ metricSysHostDiskIOPSInProgress = "sys_host_disk_iopsinprogress"
+ metricSysHostNetSendBytes = "sys_host_net_send_bytes"
+ metricSysHostNetRecvBytes = "sys_host_net_recv_bytes"
+ metricSysHostNetSendPackets = "sys_host_net_send_packets"
+ metricSysHostNetRecvPackets = "sys_host_net_recv_packets"
+)
+
+// Liveness
+const (
+ //https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/node_liveness.go
+ metricLiveNodes = "liveness_livenodes"
+ metricHeartBeatSuccesses = "liveness_heartbeatsuccesses"
+ metricHeartBeatFailures = "liveness_heartbeatfailures"
+)
+
+// Capacity
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go
+ metricCapacity = "capacity"
+ metricCapacityAvailable = "capacity_available"
+ metricCapacityUsed = "capacity_used"
+ //metricCapacityReserved = "capacity_reserved"
+)
+
+// SQL
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/pgwire/server.go
+ metricSQLConnections = "sql_conns"
+ metricSQLBytesIn = "sql_bytesin"
+ metricSQLBytesOut = "sql_bytesout"
+
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/exec_util.go
+ // Started Statements
+ metricSQLQueryStartedCount = "sql_query_started_count" // Cumulative (Statements + Transaction Statements)
+ metricSQLSelectStartedCount = "sql_select_started_count"
+ metricSQLUpdateStartedCount = "sql_update_started_count"
+ metricSQLInsertStartedCount = "sql_insert_started_count"
+ metricSQLDeleteStartedCount = "sql_delete_started_count"
+ metricSQLSavepointStartedCount = "sql_savepoint_started_count"
+ metricSQLRestartSavepointStartedCount = "sql_restart_savepoint_started_count"
+ metricSQLRestartSavepointReleaseStartedCount = "sql_restart_savepoint_release_started_count"
+ metricSQLRestartSavepointRollbackStartedCount = "sql_restart_savepoint_rollback_started_count"
+ metricSQLDDLStartedCount = "sql_ddl_started_count"
+ metricSQLMiscStartedCount = "sql_misc_started_count"
+ // Started Transaction Statements
+ metricSQLTXNBeginStartedCount = "sql_txn_begin_started_count"
+ metricSQLTXNCommitStartedCount = "sql_txn_commit_started_count"
+ metricSQLTXNRollbackStartedCount = "sql_txn_rollback_started_count"
+
+ // Executed Statements
+ metricSQLQueryCount = "sql_query_count" // Cumulative (Statements + Transaction Statements)
+ metricSQLSelectCount = "sql_select_count"
+ metricSQLUpdateCount = "sql_update_count"
+ metricSQLInsertCount = "sql_insert_count"
+ metricSQLDeleteCount = "sql_delete_count"
+ metricSQLSavepointCount = "sql_savepoint_count"
+ metricSQLRestartSavepointCount = "sql_restart_savepoint_count"
+ metricSQLRestartSavepointReleaseCount = "sql_restart_savepoint_release_count"
+ metricSQLRestartSavepointRollbackCount = "sql_restart_savepoint_rollback_count"
+ metricSQLDDLCount = "sql_ddl_count"
+ metricSQLMiscCount = "sql_misc_count"
+ // Executed Transaction statements
+ metricSQLTXNBeginCount = "sql_txn_begin_count"
+ metricSQLTXNCommitCount = "sql_txn_commit_count"
+ metricSQLTXNRollbackCount = "sql_txn_rollback_count"
+
+ // Statements Resulted In An Error
+ metricSQLFailureCount = "sql_failure_count"
+ // Transaction Resulted In Abort Errors
+ metricSQLTXNAbortCount = "sql_txn_abort_count"
+
+ // Distributed SQL
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/sql/execinfra/metrics.go
+ metricSQLDistSQLQueriesActive = "sql_distsql_queries_active"
+ metricSQLDistSQLFlowsActive = "sql_distsql_flows_active"
+ metricSQLDistSQLFlowsQueued = "sql_distsql_flows_queued"
+)
+
+// Storage
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go
+ metricLiveBytes = "livebytes"
+ metricSysBytes = "sysbytes"
+ metricKeyBytes = "keybytes"
+ metricValBytes = "valbytes"
+ metricKeyCount = "keycount"
+ metricValCount = "valcount"
+)
+
+// KV Transactions
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/kv/txn_metrics.go
+ metricTxnCommits = "txn_commits"
+ metricTxnCommits1PC = "txn_commits1PC"
+ metricTxnAborts = "txn_aborts"
+ metricTxnRestartsWriteTooOld = "txn_restarts_writetooold"
+ metricTxnRestartsWriteTooOldMulti = "txn_restarts_writetoooldmulti"
+ metricTxnRestartsSerializable = "txn_restarts_serializable"
+ metricTxnRestartsPossibleReplay = "txn_restarts_possiblereplay"
+ metricTxnRestartsAsyncWriteFailure = "txn_restarts_asyncwritefailure"
+ metricTxnRestartsReadWithInUncertainty = "txn_restarts_readwithinuncertainty"
+ metricTxnRestartsTxnAborted = "txn_restarts_txnaborted"
+ metricTxnRestartsTxnPush = "txn_restarts_txnpush"
+ metricTxnRestartsUnknown = "txn_restarts_unknown"
+)
+
+// Ranges
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go
+ metricRanges = "ranges"
+ metricRangesUnavailable = "ranges_unavailable"
+ metricRangesUnderReplicated = "ranges_underreplicated"
+ metricRangesOverReplicated = "ranges_overreplicated"
+ // Range Events Metrics
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go
+ metricRangeSplits = "range_splits"
+ metricRangeAdds = "range_adds"
+ metricRangeRemoves = "range_removes"
+ metricRangeMerges = "range_merges"
+ metricRangeSnapshotsGenerated = "range_snapshots_generated"
+ metricRangeSnapshotsPreemptiveApplied = "range_snapshots_preemptive_applied"
+ metricRangeSnapshotsLearnerApplied = "range_snapshots_learner_applied"
+ metricRangeSnapshotsNormalApplied = "range_snapshots_normal_applied"
+)
+
+// RocksDB
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go
+ metricRocksDBReadAmplification = "rocksdb_read_amplification"
+ metricRocksDBNumSSTables = "rocksdb_num_sstables"
+ metricRocksDBBlockCacheUsage = "rocksdb_block_cache_usage"
+ metricRocksDBBlockCacheHits = "rocksdb_block_cache_hits"
+ metricRocksDBBlockCacheMisses = "rocksdb_block_cache_misses"
+ metricRocksDBCompactions = "rocksdb_compactions"
+ metricRocksDBFlushes = "rocksdb_flushes"
+)
+
+// Replication
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go
+ metricReplicas = "replicas"
+ // metricReplicasReserved = "replicas_reserved"
+ metricReplicasLeaders = "replicas_leaders"
+ metricReplicasLeadersNotLeaseholders = "replicas_leaders_not_leaseholders"
+ metricReplicasLeaseholders = "replicas_leaseholders"
+ metricReplicasQuiescent = "replicas_quiescent"
+)
+
+// Queues
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go
+ metricQueueGCProcessFailure = "queue_gc_process_failure"
+ metricQueueReplicaGCProcessFailure = "queue_replicagc_process_failure"
+ metricQueueReplicateProcessFailure = "queue_replicate_process_failure"
+ metricQueueSplitProcessFailure = "queue_split_process_failure"
+ metricQueueConsistencyProcessFailure = "queue_consistency_process_failure"
+ metricQueueRaftLogProcessFailure = "queue_raftlog_process_failure"
+ metricQueueRaftSnapshotProcessFailure = "queue_raftsnapshot_process_failure"
+ metricQueueTSMaintenanceProcessFailure = "queue_tsmaintenance_process_failure"
+)
+
+// Rebalancing
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go
+ metricRebalancingQueriesPerSecond = "rebalancing_queriespersecond"
+ metricRebalancingWritesPerSecond = "rebalancing_writespersecond"
+)
+
+// Slow Requests
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/storage/metrics.go
+ metricRequestsSlowLease = "requests_slow_lease"
+ metricRequestsSlowLatch = "requests_slow_latch"
+ metricRequestsSlowRaft = "requests_slow_raft"
+)
+
+// Time Series
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/ts/metrics.go
+ metricTimeSeriesWriteSamples = "timeseries_write_samples"
+ metricTimeSeriesWriteErrors = "timeseries_write_errors"
+ metricTimeSeriesWriteBytes = "timeseries_write_bytes"
+)
+
+// Go/Cgo
+const (
+ // https://github.com/cockroachdb/cockroach/blob/master/pkg/server/status/runtime.go
+ metricSysGoAllocBytes = "sys_go_allocbytes"
+ metricSysCGoAllocBytes = "sys_cgo_allocbytes"
+ metricSysCGoCalls = "sys_cgocalls"
+ metricSysGoroutines = "sys_goroutines"
+ metricSysGCCount = "sys_gc_count"
+ metricSysGCPauseNs = "sys_gc_pause_ns"
+)
+
+const (
+ // Calculated Metrics
+ metricCapacityUsable = "capacity_usable"
+ metricCapacityUnusable = "capacity_unusable"
+ metricCapacityUsedPercentage = "capacity_used_percent"
+ metricCapacityUsableUsedPercentage = "capacity_usable_used_percent"
+ metricRocksDBBlockCacheHitRate = "rocksdb_block_cache_hit_rate"
+ metricReplicasActive = "replicas_active"
+)
+
+var metrics = []string{
+ metricSysCPUUserNs,
+ metricSysCPUSysNs,
+ metricSysCPUUserPercent,
+ metricSysCPUSysPercent,
+ metricSysCPUCombinedPercentNormalized,
+ metricSysRSS,
+ metricSysFDOpen,
+ metricSysFDSoftLimit,
+ metricSysUptime,
+
+ metricSysHostDiskReadBytes,
+ metricSysHostDiskWriteBytes,
+ metricSysHostDiskReadCount,
+ metricSysHostDiskWriteCount,
+ metricSysHostDiskIOPSInProgress,
+ metricSysHostNetSendBytes,
+ metricSysHostNetRecvBytes,
+ metricSysHostNetSendPackets,
+ metricSysHostNetRecvPackets,
+
+ metricLiveNodes,
+ metricHeartBeatSuccesses,
+ metricHeartBeatFailures,
+
+ metricCapacity,
+ metricCapacityAvailable,
+ metricCapacityUsed,
+
+ metricSQLConnections,
+ metricSQLBytesIn,
+ metricSQLBytesOut,
+ metricSQLQueryStartedCount,
+ metricSQLSelectStartedCount,
+ metricSQLUpdateStartedCount,
+ metricSQLInsertStartedCount,
+ metricSQLDeleteStartedCount,
+ metricSQLSavepointStartedCount,
+ metricSQLRestartSavepointStartedCount,
+ metricSQLRestartSavepointReleaseStartedCount,
+ metricSQLRestartSavepointRollbackStartedCount,
+ metricSQLDDLStartedCount,
+ metricSQLMiscStartedCount,
+ metricSQLTXNBeginStartedCount,
+ metricSQLTXNCommitStartedCount,
+ metricSQLTXNRollbackStartedCount,
+ metricSQLQueryCount,
+ metricSQLSelectCount,
+ metricSQLUpdateCount,
+ metricSQLInsertCount,
+ metricSQLDeleteCount,
+ metricSQLSavepointCount,
+ metricSQLRestartSavepointCount,
+ metricSQLRestartSavepointReleaseCount,
+ metricSQLRestartSavepointRollbackCount,
+ metricSQLDDLCount,
+ metricSQLMiscCount,
+ metricSQLTXNBeginCount,
+ metricSQLTXNCommitCount,
+ metricSQLTXNRollbackCount,
+ metricSQLFailureCount,
+ metricSQLTXNAbortCount,
+ metricSQLDistSQLQueriesActive,
+ metricSQLDistSQLFlowsActive,
+ metricSQLDistSQLFlowsQueued,
+
+ metricLiveBytes,
+ metricSysBytes,
+ metricKeyBytes,
+ metricValBytes,
+ metricKeyCount,
+ metricValCount,
+
+ metricTxnCommits,
+ metricTxnCommits1PC,
+ metricTxnAborts,
+ metricTxnRestartsWriteTooOld,
+ metricTxnRestartsWriteTooOldMulti,
+ metricTxnRestartsSerializable,
+ metricTxnRestartsPossibleReplay,
+ metricTxnRestartsAsyncWriteFailure,
+ metricTxnRestartsReadWithInUncertainty,
+ metricTxnRestartsTxnAborted,
+ metricTxnRestartsTxnPush,
+ metricTxnRestartsUnknown,
+
+ metricRanges,
+ metricRangesUnavailable,
+ metricRangesUnderReplicated,
+ metricRangesOverReplicated,
+ metricRangeSplits,
+ metricRangeAdds,
+ metricRangeRemoves,
+ metricRangeMerges,
+ metricRangeSnapshotsGenerated,
+ metricRangeSnapshotsPreemptiveApplied,
+ metricRangeSnapshotsLearnerApplied,
+ metricRangeSnapshotsNormalApplied,
+
+ metricRocksDBReadAmplification,
+ metricRocksDBNumSSTables,
+ metricRocksDBBlockCacheUsage,
+ metricRocksDBBlockCacheHits,
+ metricRocksDBBlockCacheMisses,
+ metricRocksDBCompactions,
+ metricRocksDBFlushes,
+
+ metricReplicas,
+ metricReplicasLeaders,
+ metricReplicasLeadersNotLeaseholders,
+ metricReplicasLeaseholders,
+ metricReplicasQuiescent,
+
+ metricQueueGCProcessFailure,
+ metricQueueReplicaGCProcessFailure,
+ metricQueueReplicateProcessFailure,
+ metricQueueSplitProcessFailure,
+ metricQueueConsistencyProcessFailure,
+ metricQueueRaftLogProcessFailure,
+ metricQueueRaftSnapshotProcessFailure,
+ metricQueueTSMaintenanceProcessFailure,
+
+ metricRebalancingQueriesPerSecond,
+ metricRebalancingWritesPerSecond,
+
+ metricTimeSeriesWriteSamples,
+ metricTimeSeriesWriteErrors,
+ metricTimeSeriesWriteBytes,
+
+ metricRequestsSlowLease,
+ metricRequestsSlowLatch,
+ metricRequestsSlowRaft,
+
+ metricSysGoAllocBytes,
+ metricSysCGoAllocBytes,
+ metricSysCGoCalls,
+ metricSysGoroutines,
+ metricSysGCCount,
+ metricSysGCPauseNs,
+}
diff --git a/src/go/plugin/go.d/modules/cockroachdb/testdata/config.json b/src/go/plugin/go.d/modules/cockroachdb/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/cockroachdb/testdata/config.yaml b/src/go/plugin/go.d/modules/cockroachdb/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/cockroachdb/testdata/metrics.txt b/src/go/plugin/go.d/modules/cockroachdb/testdata/metrics.txt
new file mode 100644
index 000000000..ca537e101
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/testdata/metrics.txt
@@ -0,0 +1,2952 @@
+# HELP sql_distsql_flows_active Number of distributed SQL flows currently active
+# TYPE sql_distsql_flows_active gauge
+sql_distsql_flows_active 0.0
+# HELP queue_consistency_process_failure Number of replicas which failed processing in the consistency checker queue
+# TYPE queue_consistency_process_failure counter
+queue_consistency_process_failure{store="1"} 0.0
+# HELP queue_replicate_process_success Number of replicas successfully processed by the replicate queue
+# TYPE queue_replicate_process_success counter
+queue_replicate_process_success{store="1"} 0.0
+# HELP distsender_batches Number of batches processed
+# TYPE distsender_batches counter
+distsender_batches 56336.0
+# HELP changefeed_table_metadata_nanos Time blocked while verifying table metadata histories
+# TYPE changefeed_table_metadata_nanos counter
+changefeed_table_metadata_nanos 0.0
+# HELP sql_update_started_count Number of SQL UPDATE statements started
+# TYPE sql_update_started_count counter
+sql_update_started_count 0.0
+# HELP raft_process_handleready_latency Latency histogram for handling a Raft ready
+# TYPE raft_process_handleready_latency histogram
+raft_process_handleready_latency_bucket{store="1",le="671.0"} 3.0
+raft_process_handleready_latency_bucket{store="1",le="703.0"} 4.0
+raft_process_handleready_latency_bucket{store="1",le="735.0"} 5.0
+raft_process_handleready_latency_bucket{store="1",le="767.0"} 11.0
+raft_process_handleready_latency_bucket{store="1",le="799.0"} 14.0
+raft_process_handleready_latency_bucket{store="1",le="831.0"} 19.0
+raft_process_handleready_latency_bucket{store="1",le="863.0"} 27.0
+raft_process_handleready_latency_bucket{store="1",le="895.0"} 34.0
+raft_process_handleready_latency_bucket{store="1",le="927.0"} 48.0
+raft_process_handleready_latency_bucket{store="1",le="959.0"} 70.0
+raft_process_handleready_latency_bucket{store="1",le="991.0"} 85.0
+raft_process_handleready_latency_bucket{store="1",le="1023.0"} 110.0
+raft_process_handleready_latency_bucket{store="1",le="1087.0"} 153.0
+raft_process_handleready_latency_bucket{store="1",le="1151.0"} 222.0
+raft_process_handleready_latency_bucket{store="1",le="1215.0"} 326.0
+raft_process_handleready_latency_bucket{store="1",le="1279.0"} 439.0
+raft_process_handleready_latency_bucket{store="1",le="1343.0"} 537.0
+raft_process_handleready_latency_bucket{store="1",le="1407.0"} 649.0
+raft_process_handleready_latency_bucket{store="1",le="1471.0"} 784.0
+raft_process_handleready_latency_bucket{store="1",le="1535.0"} 889.0
+raft_process_handleready_latency_bucket{store="1",le="1599.0"} 996.0
+raft_process_handleready_latency_bucket{store="1",le="1663.0"} 1078.0
+raft_process_handleready_latency_bucket{store="1",le="1727.0"} 1153.0
+raft_process_handleready_latency_bucket{store="1",le="1791.0"} 1228.0
+raft_process_handleready_latency_bucket{store="1",le="1855.0"} 1301.0
+raft_process_handleready_latency_bucket{store="1",le="1919.0"} 1370.0
+raft_process_handleready_latency_bucket{store="1",le="1983.0"} 1434.0
+raft_process_handleready_latency_bucket{store="1",le="2047.0"} 1493.0
+raft_process_handleready_latency_bucket{store="1",le="2175.0"} 1605.0
+raft_process_handleready_latency_bucket{store="1",le="2303.0"} 1693.0
+raft_process_handleready_latency_bucket{store="1",le="2431.0"} 1746.0
+raft_process_handleready_latency_bucket{store="1",le="2559.0"} 1806.0
+raft_process_handleready_latency_bucket{store="1",le="2687.0"} 1861.0
+raft_process_handleready_latency_bucket{store="1",le="2815.0"} 1922.0
+raft_process_handleready_latency_bucket{store="1",le="2943.0"} 1977.0
+raft_process_handleready_latency_bucket{store="1",le="3071.0"} 2031.0
+raft_process_handleready_latency_bucket{store="1",le="3199.0"} 2087.0
+raft_process_handleready_latency_bucket{store="1",le="3327.0"} 2138.0
+raft_process_handleready_latency_bucket{store="1",le="3455.0"} 2215.0
+raft_process_handleready_latency_bucket{store="1",le="3583.0"} 2284.0
+raft_process_handleready_latency_bucket{store="1",le="3711.0"} 2365.0
+raft_process_handleready_latency_bucket{store="1",le="3839.0"} 2471.0
+raft_process_handleready_latency_bucket{store="1",le="3967.0"} 2571.0
+raft_process_handleready_latency_bucket{store="1",le="4095.0"} 2680.0
+raft_process_handleready_latency_bucket{store="1",le="4351.0"} 2916.0
+raft_process_handleready_latency_bucket{store="1",le="4607.0"} 3225.0
+raft_process_handleready_latency_bucket{store="1",le="4863.0"} 3662.0
+raft_process_handleready_latency_bucket{store="1",le="5119.0"} 4195.0
+raft_process_handleready_latency_bucket{store="1",le="5375.0"} 4922.0
+raft_process_handleready_latency_bucket{store="1",le="5631.0"} 5692.0
+raft_process_handleready_latency_bucket{store="1",le="5887.0"} 6311.0
+raft_process_handleready_latency_bucket{store="1",le="6143.0"} 6798.0
+raft_process_handleready_latency_bucket{store="1",le="6399.0"} 7181.0
+raft_process_handleready_latency_bucket{store="1",le="6655.0"} 7432.0
+raft_process_handleready_latency_bucket{store="1",le="6911.0"} 7638.0
+raft_process_handleready_latency_bucket{store="1",le="7167.0"} 7763.0
+raft_process_handleready_latency_bucket{store="1",le="7423.0"} 7843.0
+raft_process_handleready_latency_bucket{store="1",le="7679.0"} 7910.0
+raft_process_handleready_latency_bucket{store="1",le="7935.0"} 7961.0
+raft_process_handleready_latency_bucket{store="1",le="8191.0"} 8011.0
+raft_process_handleready_latency_bucket{store="1",le="8703.0"} 8058.0
+raft_process_handleready_latency_bucket{store="1",le="9215.0"} 8111.0
+raft_process_handleready_latency_bucket{store="1",le="9727.0"} 8151.0
+raft_process_handleready_latency_bucket{store="1",le="10239.0"} 8182.0
+raft_process_handleready_latency_bucket{store="1",le="10751.0"} 8213.0
+raft_process_handleready_latency_bucket{store="1",le="11263.0"} 8235.0
+raft_process_handleready_latency_bucket{store="1",le="11775.0"} 8266.0
+raft_process_handleready_latency_bucket{store="1",le="12287.0"} 8290.0
+raft_process_handleready_latency_bucket{store="1",le="12799.0"} 8316.0
+raft_process_handleready_latency_bucket{store="1",le="13311.0"} 8330.0
+raft_process_handleready_latency_bucket{store="1",le="13823.0"} 8347.0
+raft_process_handleready_latency_bucket{store="1",le="14335.0"} 8374.0
+raft_process_handleready_latency_bucket{store="1",le="14847.0"} 8398.0
+raft_process_handleready_latency_bucket{store="1",le="15359.0"} 8427.0
+raft_process_handleready_latency_bucket{store="1",le="15871.0"} 8450.0
+raft_process_handleready_latency_bucket{store="1",le="16383.0"} 8476.0
+raft_process_handleready_latency_bucket{store="1",le="17407.0"} 8518.0
+raft_process_handleready_latency_bucket{store="1",le="18431.0"} 8561.0
+raft_process_handleready_latency_bucket{store="1",le="19455.0"} 8585.0
+raft_process_handleready_latency_bucket{store="1",le="20479.0"} 8605.0
+raft_process_handleready_latency_bucket{store="1",le="21503.0"} 8630.0
+raft_process_handleready_latency_bucket{store="1",le="22527.0"} 8652.0
+raft_process_handleready_latency_bucket{store="1",le="23551.0"} 8664.0
+raft_process_handleready_latency_bucket{store="1",le="24575.0"} 8673.0
+raft_process_handleready_latency_bucket{store="1",le="25599.0"} 8681.0
+raft_process_handleready_latency_bucket{store="1",le="26623.0"} 8692.0
+raft_process_handleready_latency_bucket{store="1",le="27647.0"} 8696.0
+raft_process_handleready_latency_bucket{store="1",le="28671.0"} 8704.0
+raft_process_handleready_latency_bucket{store="1",le="29695.0"} 8713.0
+raft_process_handleready_latency_bucket{store="1",le="30719.0"} 8727.0
+raft_process_handleready_latency_bucket{store="1",le="31743.0"} 8734.0
+raft_process_handleready_latency_bucket{store="1",le="32767.0"} 8744.0
+raft_process_handleready_latency_bucket{store="1",le="34815.0"} 8764.0
+raft_process_handleready_latency_bucket{store="1",le="36863.0"} 8776.0
+raft_process_handleready_latency_bucket{store="1",le="38911.0"} 8788.0
+raft_process_handleready_latency_bucket{store="1",le="40959.0"} 8796.0
+raft_process_handleready_latency_bucket{store="1",le="43007.0"} 8802.0
+raft_process_handleready_latency_bucket{store="1",le="45055.0"} 8812.0
+raft_process_handleready_latency_bucket{store="1",le="47103.0"} 8822.0
+raft_process_handleready_latency_bucket{store="1",le="49151.0"} 8828.0
+raft_process_handleready_latency_bucket{store="1",le="51199.0"} 8832.0
+raft_process_handleready_latency_bucket{store="1",le="53247.0"} 8836.0
+raft_process_handleready_latency_bucket{store="1",le="55295.0"} 8841.0
+raft_process_handleready_latency_bucket{store="1",le="57343.0"} 8844.0
+raft_process_handleready_latency_bucket{store="1",le="59391.0"} 8849.0
+raft_process_handleready_latency_bucket{store="1",le="61439.0"} 8857.0
+raft_process_handleready_latency_bucket{store="1",le="63487.0"} 8866.0
+raft_process_handleready_latency_bucket{store="1",le="65535.0"} 8871.0
+raft_process_handleready_latency_bucket{store="1",le="69631.0"} 8884.0
+raft_process_handleready_latency_bucket{store="1",le="73727.0"} 8894.0
+raft_process_handleready_latency_bucket{store="1",le="77823.0"} 8904.0
+raft_process_handleready_latency_bucket{store="1",le="81919.0"} 8909.0
+raft_process_handleready_latency_bucket{store="1",le="86015.0"} 8916.0
+raft_process_handleready_latency_bucket{store="1",le="90111.0"} 8926.0
+raft_process_handleready_latency_bucket{store="1",le="94207.0"} 8929.0
+raft_process_handleready_latency_bucket{store="1",le="98303.0"} 8930.0
+raft_process_handleready_latency_bucket{store="1",le="102399.0"} 8935.0
+raft_process_handleready_latency_bucket{store="1",le="106495.0"} 8940.0
+raft_process_handleready_latency_bucket{store="1",le="110591.0"} 8941.0
+raft_process_handleready_latency_bucket{store="1",le="114687.0"} 8943.0
+raft_process_handleready_latency_bucket{store="1",le="118783.0"} 8947.0
+raft_process_handleready_latency_bucket{store="1",le="122879.0"} 8948.0
+raft_process_handleready_latency_bucket{store="1",le="126975.0"} 8951.0
+raft_process_handleready_latency_bucket{store="1",le="131071.0"} 8952.0
+raft_process_handleready_latency_bucket{store="1",le="139263.0"} 8954.0
+raft_process_handleready_latency_bucket{store="1",le="147455.0"} 8959.0
+raft_process_handleready_latency_bucket{store="1",le="155647.0"} 8961.0
+raft_process_handleready_latency_bucket{store="1",le="163839.0"} 8962.0
+raft_process_handleready_latency_bucket{store="1",le="172031.0"} 8964.0
+raft_process_handleready_latency_bucket{store="1",le="188415.0"} 8965.0
+raft_process_handleready_latency_bucket{store="1",le="196607.0"} 8968.0
+raft_process_handleready_latency_bucket{store="1",le="204799.0"} 8969.0
+raft_process_handleready_latency_bucket{store="1",le="221183.0"} 8971.0
+raft_process_handleready_latency_bucket{store="1",le="237567.0"} 8972.0
+raft_process_handleready_latency_bucket{store="1",le="245759.0"} 8973.0
+raft_process_handleready_latency_bucket{store="1",le="253951.0"} 8974.0
+raft_process_handleready_latency_bucket{store="1",le="294911.0"} 8975.0
+raft_process_handleready_latency_bucket{store="1",le="311295.0"} 8976.0
+raft_process_handleready_latency_bucket{store="1",le="327679.0"} 8981.0
+raft_process_handleready_latency_bucket{store="1",le="344063.0"} 8984.0
+raft_process_handleready_latency_bucket{store="1",le="360447.0"} 8989.0
+raft_process_handleready_latency_bucket{store="1",le="376831.0"} 8998.0
+raft_process_handleready_latency_bucket{store="1",le="393215.0"} 9013.0
+raft_process_handleready_latency_bucket{store="1",le="409599.0"} 9040.0
+raft_process_handleready_latency_bucket{store="1",le="425983.0"} 9074.0
+raft_process_handleready_latency_bucket{store="1",le="442367.0"} 9111.0
+raft_process_handleready_latency_bucket{store="1",le="458751.0"} 9167.0
+raft_process_handleready_latency_bucket{store="1",le="475135.0"} 9254.0
+raft_process_handleready_latency_bucket{store="1",le="491519.0"} 9336.0
+raft_process_handleready_latency_bucket{store="1",le="507903.0"} 9426.0
+raft_process_handleready_latency_bucket{store="1",le="524287.0"} 9528.0
+raft_process_handleready_latency_bucket{store="1",le="557055.0"} 9797.0
+raft_process_handleready_latency_bucket{store="1",le="589823.0"} 10152.0
+raft_process_handleready_latency_bucket{store="1",le="622591.0"} 10535.0
+raft_process_handleready_latency_bucket{store="1",le="655359.0"} 11015.0
+raft_process_handleready_latency_bucket{store="1",le="688127.0"} 11550.0
+raft_process_handleready_latency_bucket{store="1",le="720895.0"} 12107.0
+raft_process_handleready_latency_bucket{store="1",le="753663.0"} 12736.0
+raft_process_handleready_latency_bucket{store="1",le="786431.0"} 13366.0
+raft_process_handleready_latency_bucket{store="1",le="819199.0"} 14043.0
+raft_process_handleready_latency_bucket{store="1",le="851967.0"} 14742.0
+raft_process_handleready_latency_bucket{store="1",le="884735.0"} 15425.0
+raft_process_handleready_latency_bucket{store="1",le="917503.0"} 16120.0
+raft_process_handleready_latency_bucket{store="1",le="950271.0"} 16774.0
+raft_process_handleready_latency_bucket{store="1",le="983039.0"} 17410.0
+raft_process_handleready_latency_bucket{store="1",le="1.015807e+06"} 18030.0
+raft_process_handleready_latency_bucket{store="1",le="1.048575e+06"} 18574.0
+raft_process_handleready_latency_bucket{store="1",le="1.114111e+06"} 19559.0
+raft_process_handleready_latency_bucket{store="1",le="1.179647e+06"} 20407.0
+raft_process_handleready_latency_bucket{store="1",le="1.245183e+06"} 21059.0
+raft_process_handleready_latency_bucket{store="1",le="1.310719e+06"} 21649.0
+raft_process_handleready_latency_bucket{store="1",le="1.376255e+06"} 22120.0
+raft_process_handleready_latency_bucket{store="1",le="1.441791e+06"} 22513.0
+raft_process_handleready_latency_bucket{store="1",le="1.507327e+06"} 22863.0
+raft_process_handleready_latency_bucket{store="1",le="1.572863e+06"} 23168.0
+raft_process_handleready_latency_bucket{store="1",le="1.638399e+06"} 23475.0
+raft_process_handleready_latency_bucket{store="1",le="1.703935e+06"} 23751.0
+raft_process_handleready_latency_bucket{store="1",le="1.769471e+06"} 24004.0
+raft_process_handleready_latency_bucket{store="1",le="1.835007e+06"} 24246.0
+raft_process_handleready_latency_bucket{store="1",le="1.900543e+06"} 24494.0
+raft_process_handleready_latency_bucket{store="1",le="1.966079e+06"} 24695.0
+raft_process_handleready_latency_bucket{store="1",le="2.031615e+06"} 24883.0
+raft_process_handleready_latency_bucket{store="1",le="2.097151e+06"} 25036.0
+raft_process_handleready_latency_bucket{store="1",le="2.228223e+06"} 25278.0
+raft_process_handleready_latency_bucket{store="1",le="2.359295e+06"} 25461.0
+raft_process_handleready_latency_bucket{store="1",le="2.490367e+06"} 25606.0
+raft_process_handleready_latency_bucket{store="1",le="2.621439e+06"} 25691.0
+raft_process_handleready_latency_bucket{store="1",le="2.752511e+06"} 25765.0
+raft_process_handleready_latency_bucket{store="1",le="2.883583e+06"} 25826.0
+raft_process_handleready_latency_bucket{store="1",le="3.014655e+06"} 25873.0
+raft_process_handleready_latency_bucket{store="1",le="3.145727e+06"} 25909.0
+raft_process_handleready_latency_bucket{store="1",le="3.276799e+06"} 25943.0
+raft_process_handleready_latency_bucket{store="1",le="3.407871e+06"} 25964.0
+raft_process_handleready_latency_bucket{store="1",le="3.538943e+06"} 25992.0
+raft_process_handleready_latency_bucket{store="1",le="3.670015e+06"} 26012.0
+raft_process_handleready_latency_bucket{store="1",le="3.801087e+06"} 26027.0
+raft_process_handleready_latency_bucket{store="1",le="3.932159e+06"} 26042.0
+raft_process_handleready_latency_bucket{store="1",le="4.063231e+06"} 26052.0
+raft_process_handleready_latency_bucket{store="1",le="4.194303e+06"} 26057.0
+raft_process_handleready_latency_bucket{store="1",le="4.456447e+06"} 26062.0
+raft_process_handleready_latency_bucket{store="1",le="4.718591e+06"} 26073.0
+raft_process_handleready_latency_bucket{store="1",le="4.980735e+06"} 26081.0
+raft_process_handleready_latency_bucket{store="1",le="5.242879e+06"} 26090.0
+raft_process_handleready_latency_bucket{store="1",le="5.505023e+06"} 26097.0
+raft_process_handleready_latency_bucket{store="1",le="5.767167e+06"} 26105.0
+raft_process_handleready_latency_bucket{store="1",le="6.029311e+06"} 26107.0
+raft_process_handleready_latency_bucket{store="1",le="6.291455e+06"} 26111.0
+raft_process_handleready_latency_bucket{store="1",le="6.553599e+06"} 26114.0
+raft_process_handleready_latency_bucket{store="1",le="6.815743e+06"} 26115.0
+raft_process_handleready_latency_bucket{store="1",le="7.077887e+06"} 26118.0
+raft_process_handleready_latency_bucket{store="1",le="7.340031e+06"} 26119.0
+raft_process_handleready_latency_bucket{store="1",le="7.602175e+06"} 26121.0
+raft_process_handleready_latency_bucket{store="1",le="7.864319e+06"} 26122.0
+raft_process_handleready_latency_bucket{store="1",le="8.126463e+06"} 26124.0
+raft_process_handleready_latency_bucket{store="1",le="8.388607e+06"} 26127.0
+raft_process_handleready_latency_bucket{store="1",le="9.437183e+06"} 26133.0
+raft_process_handleready_latency_bucket{store="1",le="9.961471e+06"} 26134.0
+raft_process_handleready_latency_bucket{store="1",le="1.0485759e+07"} 26135.0
+raft_process_handleready_latency_bucket{store="1",le="1.1010047e+07"} 26136.0
+raft_process_handleready_latency_bucket{store="1",le="1.2058623e+07"} 26137.0
+raft_process_handleready_latency_bucket{store="1",le="1.2582911e+07"} 26138.0
+raft_process_handleready_latency_bucket{store="1",le="1.3631487e+07"} 26139.0
+raft_process_handleready_latency_bucket{store="1",le="2.5165823e+07"} 26140.0
+raft_process_handleready_latency_bucket{store="1",le="3.1457279e+07"} 26141.0
+raft_process_handleready_latency_bucket{store="1",le="3.7748735e+07"} 26142.0
+raft_process_handleready_latency_bucket{store="1",le="4.1943039e+07"} 26143.0
+raft_process_handleready_latency_bucket{store="1",le="4.8234495e+07"} 26144.0
+raft_process_handleready_latency_bucket{store="1",le="9.05969663e+08"} 26145.0
+raft_process_handleready_latency_bucket{store="1",le="9.73078527e+08"} 26146.0
+raft_process_handleready_latency_bucket{store="1",le="1.006632959e+09"} 26147.0
+raft_process_handleready_latency_bucket{store="1",le="1.040187391e+09"} 26148.0
+raft_process_handleready_latency_bucket{store="1",le="1.0200547327e+10"} 26149.0
+raft_process_handleready_latency_bucket{store="1",le="+Inf"} 26149.0
+raft_process_handleready_latency_sum{store="1"} 3.4720430875e+10
+raft_process_handleready_latency_count{store="1"} 26149.0
+# HELP txn_parallelcommits Number of KV transaction parallel commit attempts
+# TYPE txn_parallelcommits counter
+txn_parallelcommits 517.0
+# HELP txn_restarts_readwithinuncertainty Number of restarts due to reading a new value within the uncertainty interval
+# TYPE txn_restarts_readwithinuncertainty counter
+txn_restarts_readwithinuncertainty 0.0
+# HELP sys_host_net_send_packets Packets sent on all network interfaces since this process started
+# TYPE sys_host_net_send_packets gauge
+sys_host_net_send_packets 644128.0
+# HELP queue_merge_processingnanos Nanoseconds spent processing replicas in the merge queue
+# TYPE queue_merge_processingnanos counter
+queue_merge_processingnanos{store="1"} 0.0
+# HELP queue_raftlog_pending Number of pending replicas in the Raft log queue
+# TYPE queue_raftlog_pending gauge
+queue_raftlog_pending{store="1"} 0.0
+# HELP queue_split_processingnanos Nanoseconds spent processing replicas in the split queue
+# TYPE queue_split_processingnanos counter
+queue_split_processingnanos{store="1"} 0.0
+# HELP txnrecovery_attempts_total Number of transaction recovery attempts executed
+# TYPE txnrecovery_attempts_total counter
+txnrecovery_attempts_total{store="1"} 0.0
+# HELP gossip_connections_outgoing Number of active outgoing gossip connections
+# TYPE gossip_connections_outgoing gauge
+gossip_connections_outgoing 2.0
+# HELP sql_mem_sql_max Memory usage per sql statement for sql
+# TYPE sql_mem_sql_max histogram
+sql_mem_sql_max_bucket{le="+Inf"} 0.0
+sql_mem_sql_max_sum 0.0
+sql_mem_sql_max_count 0.0
+sql_mem_sql_max_bucket{le="+Inf"} 0.0
+sql_mem_sql_max_sum 0.0
+sql_mem_sql_max_count 0.0
+# HELP intents_resolve_attempts Count of (point or range) intent commit evaluation attempts
+# TYPE intents_resolve_attempts counter
+intents_resolve_attempts{store="1"} 4.0
+# HELP raft_rcvd_snap Number of MsgSnap messages received by this store
+# TYPE raft_rcvd_snap counter
+raft_rcvd_snap{store="1"} 0.0
+# HELP queue_raftlog_process_failure Number of replicas which failed processing in the Raft log queue
+# TYPE queue_raftlog_process_failure counter
+queue_raftlog_process_failure{store="1"} 0.0
+# HELP queue_gc_info_resolvetotal Number of attempted intent resolutions
+# TYPE queue_gc_info_resolvetotal counter
+queue_gc_info_resolvetotal{store="1"} 0.0
+# HELP sys_gc_pause_percent Current GC pause percentage
+# TYPE sys_gc_pause_percent gauge
+sys_gc_pause_percent 2.582156232137188e-06
+# HELP exec_error Number of batch KV requests that failed to execute on this node
+# TYPE exec_error counter
+exec_error 18.0
+# HELP rocksdb_read_amplification Number of disk reads per query
+# TYPE rocksdb_read_amplification gauge
+rocksdb_read_amplification{store="1"} 1.0
+# HELP raft_rcvd_timeoutnow Number of MsgTimeoutNow messages received by this store
+# TYPE raft_rcvd_timeoutnow counter
+raft_rcvd_timeoutnow{store="1"} 2.0
+# HELP queue_raftsnapshot_processingnanos Nanoseconds spent processing replicas in the Raft repair queue
+# TYPE queue_raftsnapshot_processingnanos counter
+queue_raftsnapshot_processingnanos{store="1"} 0.0
+# HELP queue_replicagc_process_success Number of replicas successfully processed by the replica GC queue
+# TYPE queue_replicagc_process_success counter
+queue_replicagc_process_success{store="1"} 9.0
+# HELP sql_mem_internal_session_current Current sql session memory usage for internal
+# TYPE sql_mem_internal_session_current gauge
+sql_mem_internal_session_current 0.0
+# HELP distsender_errors_notleaseholder Number of NotLeaseHolderErrors encountered
+# TYPE distsender_errors_notleaseholder counter
+distsender_errors_notleaseholder 15.0
+# HELP timeseries_write_errors Total errors encountered while attempting to write metrics to disk
+# TYPE timeseries_write_errors counter
+timeseries_write_errors 0.0
+# HELP sys_cgocalls Total number of cgo calls
+# TYPE sys_cgocalls gauge
+sys_cgocalls 577778.0
+# HELP exec_latency Latency of batch KV requests executed on this node
+# TYPE exec_latency histogram
+exec_latency_bucket{le="32767.0"} 1.0
+exec_latency_bucket{le="38911.0"} 2.0
+exec_latency_bucket{le="40959.0"} 5.0
+exec_latency_bucket{le="43007.0"} 7.0
+exec_latency_bucket{le="45055.0"} 8.0
+exec_latency_bucket{le="47103.0"} 11.0
+exec_latency_bucket{le="49151.0"} 14.0
+exec_latency_bucket{le="51199.0"} 18.0
+exec_latency_bucket{le="53247.0"} 19.0
+exec_latency_bucket{le="55295.0"} 20.0
+exec_latency_bucket{le="57343.0"} 23.0
+exec_latency_bucket{le="59391.0"} 26.0
+exec_latency_bucket{le="63487.0"} 32.0
+exec_latency_bucket{le="65535.0"} 35.0
+exec_latency_bucket{le="69631.0"} 43.0
+exec_latency_bucket{le="73727.0"} 60.0
+exec_latency_bucket{le="77823.0"} 80.0
+exec_latency_bucket{le="81919.0"} 102.0
+exec_latency_bucket{le="86015.0"} 118.0
+exec_latency_bucket{le="90111.0"} 147.0
+exec_latency_bucket{le="94207.0"} 170.0
+exec_latency_bucket{le="98303.0"} 199.0
+exec_latency_bucket{le="102399.0"} 227.0
+exec_latency_bucket{le="106495.0"} 255.0
+exec_latency_bucket{le="110591.0"} 289.0
+exec_latency_bucket{le="114687.0"} 327.0
+exec_latency_bucket{le="118783.0"} 369.0
+exec_latency_bucket{le="122879.0"} 412.0
+exec_latency_bucket{le="126975.0"} 460.0
+exec_latency_bucket{le="131071.0"} 497.0
+exec_latency_bucket{le="139263.0"} 562.0
+exec_latency_bucket{le="147455.0"} 633.0
+exec_latency_bucket{le="155647.0"} 700.0
+exec_latency_bucket{le="163839.0"} 792.0
+exec_latency_bucket{le="172031.0"} 862.0
+exec_latency_bucket{le="180223.0"} 948.0
+exec_latency_bucket{le="188415.0"} 1021.0
+exec_latency_bucket{le="196607.0"} 1065.0
+exec_latency_bucket{le="204799.0"} 1110.0
+exec_latency_bucket{le="212991.0"} 1148.0
+exec_latency_bucket{le="221183.0"} 1186.0
+exec_latency_bucket{le="229375.0"} 1227.0
+exec_latency_bucket{le="237567.0"} 1250.0
+exec_latency_bucket{le="245759.0"} 1280.0
+exec_latency_bucket{le="253951.0"} 1311.0
+exec_latency_bucket{le="262143.0"} 1333.0
+exec_latency_bucket{le="278527.0"} 1366.0
+exec_latency_bucket{le="294911.0"} 1396.0
+exec_latency_bucket{le="311295.0"} 1416.0
+exec_latency_bucket{le="327679.0"} 1439.0
+exec_latency_bucket{le="344063.0"} 1457.0
+exec_latency_bucket{le="360447.0"} 1473.0
+exec_latency_bucket{le="376831.0"} 1483.0
+exec_latency_bucket{le="393215.0"} 1493.0
+exec_latency_bucket{le="409599.0"} 1503.0
+exec_latency_bucket{le="425983.0"} 1514.0
+exec_latency_bucket{le="442367.0"} 1520.0
+exec_latency_bucket{le="458751.0"} 1526.0
+exec_latency_bucket{le="475135.0"} 1533.0
+exec_latency_bucket{le="491519.0"} 1538.0
+exec_latency_bucket{le="507903.0"} 1542.0
+exec_latency_bucket{le="524287.0"} 1549.0
+exec_latency_bucket{le="557055.0"} 1556.0
+exec_latency_bucket{le="589823.0"} 1564.0
+exec_latency_bucket{le="622591.0"} 1568.0
+exec_latency_bucket{le="655359.0"} 1575.0
+exec_latency_bucket{le="688127.0"} 1578.0
+exec_latency_bucket{le="720895.0"} 1583.0
+exec_latency_bucket{le="753663.0"} 1589.0
+exec_latency_bucket{le="786431.0"} 1597.0
+exec_latency_bucket{le="819199.0"} 1599.0
+exec_latency_bucket{le="851967.0"} 1602.0
+exec_latency_bucket{le="884735.0"} 1606.0
+exec_latency_bucket{le="917503.0"} 1608.0
+exec_latency_bucket{le="950271.0"} 1609.0
+exec_latency_bucket{le="983039.0"} 1611.0
+exec_latency_bucket{le="1.015807e+06"} 1612.0
+exec_latency_bucket{le="1.048575e+06"} 1617.0
+exec_latency_bucket{le="1.114111e+06"} 1621.0
+exec_latency_bucket{le="1.179647e+06"} 1623.0
+exec_latency_bucket{le="1.245183e+06"} 1626.0
+exec_latency_bucket{le="1.310719e+06"} 1629.0
+exec_latency_bucket{le="1.376255e+06"} 1634.0
+exec_latency_bucket{le="1.441791e+06"} 1637.0
+exec_latency_bucket{le="1.507327e+06"} 1642.0
+exec_latency_bucket{le="1.572863e+06"} 1649.0
+exec_latency_bucket{le="1.638399e+06"} 1653.0
+exec_latency_bucket{le="1.703935e+06"} 1661.0
+exec_latency_bucket{le="1.769471e+06"} 1675.0
+exec_latency_bucket{le="1.835007e+06"} 1694.0
+exec_latency_bucket{le="1.900543e+06"} 1727.0
+exec_latency_bucket{le="1.966079e+06"} 1761.0
+exec_latency_bucket{le="2.031615e+06"} 1816.0
+exec_latency_bucket{le="2.097151e+06"} 1897.0
+exec_latency_bucket{le="2.228223e+06"} 2127.0
+exec_latency_bucket{le="2.359295e+06"} 2463.0
+exec_latency_bucket{le="2.490367e+06"} 2938.0
+exec_latency_bucket{le="2.621439e+06"} 3489.0
+exec_latency_bucket{le="2.752511e+06"} 4097.0
+exec_latency_bucket{le="2.883583e+06"} 4692.0
+exec_latency_bucket{le="3.014655e+06"} 5308.0
+exec_latency_bucket{le="3.145727e+06"} 5929.0
+exec_latency_bucket{le="3.276799e+06"} 6485.0
+exec_latency_bucket{le="3.407871e+06"} 6942.0
+exec_latency_bucket{le="3.538943e+06"} 7392.0
+exec_latency_bucket{le="3.670015e+06"} 7782.0
+exec_latency_bucket{le="3.801087e+06"} 8065.0
+exec_latency_bucket{le="3.932159e+06"} 8301.0
+exec_latency_bucket{le="4.063231e+06"} 8508.0
+exec_latency_bucket{le="4.194303e+06"} 8676.0
+exec_latency_bucket{le="4.456447e+06"} 8957.0
+exec_latency_bucket{le="4.718591e+06"} 9130.0
+exec_latency_bucket{le="4.980735e+06"} 9277.0
+exec_latency_bucket{le="5.242879e+06"} 9399.0
+exec_latency_bucket{le="5.505023e+06"} 9498.0
+exec_latency_bucket{le="5.767167e+06"} 9586.0
+exec_latency_bucket{le="6.029311e+06"} 9659.0
+exec_latency_bucket{le="6.291455e+06"} 9740.0
+exec_latency_bucket{le="6.553599e+06"} 9795.0
+exec_latency_bucket{le="6.815743e+06"} 9847.0
+exec_latency_bucket{le="7.077887e+06"} 9887.0
+exec_latency_bucket{le="7.340031e+06"} 9907.0
+exec_latency_bucket{le="7.602175e+06"} 9932.0
+exec_latency_bucket{le="7.864319e+06"} 9952.0
+exec_latency_bucket{le="8.126463e+06"} 9967.0
+exec_latency_bucket{le="8.388607e+06"} 9981.0
+exec_latency_bucket{le="8.912895e+06"} 10005.0
+exec_latency_bucket{le="9.437183e+06"} 10017.0
+exec_latency_bucket{le="9.961471e+06"} 10031.0
+exec_latency_bucket{le="1.0485759e+07"} 10035.0
+exec_latency_bucket{le="1.1010047e+07"} 10044.0
+exec_latency_bucket{le="1.1534335e+07"} 10050.0
+exec_latency_bucket{le="1.2058623e+07"} 10056.0
+exec_latency_bucket{le="1.2582911e+07"} 10058.0
+exec_latency_bucket{le="1.3107199e+07"} 10061.0
+exec_latency_bucket{le="1.3631487e+07"} 10065.0
+exec_latency_bucket{le="1.4155775e+07"} 10067.0
+exec_latency_bucket{le="1.5204351e+07"} 10068.0
+exec_latency_bucket{le="1.6252927e+07"} 10069.0
+exec_latency_bucket{le="1.7825791e+07"} 10070.0
+exec_latency_bucket{le="1.8874367e+07"} 10073.0
+exec_latency_bucket{le="1.9922943e+07"} 10074.0
+exec_latency_bucket{le="2.0971519e+07"} 10076.0
+exec_latency_bucket{le="2.8311551e+07"} 10077.0
+exec_latency_bucket{le="3.7748735e+07"} 10078.0
+exec_latency_bucket{le="3.9845887e+07"} 10079.0
+exec_latency_bucket{le="4.4040191e+07"} 10080.0
+exec_latency_bucket{le="1.04857599e+08"} 10081.0
+exec_latency_bucket{le="1.09051903e+08"} 10082.0
+exec_latency_bucket{le="4.19430399e+08"} 10083.0
+exec_latency_bucket{le="7.38197503e+08"} 10084.0
+exec_latency_bucket{le="8.38860799e+08"} 10085.0
+exec_latency_bucket{le="9.05969663e+08"} 10086.0
+exec_latency_bucket{le="9.73078527e+08"} 10087.0
+exec_latency_bucket{le="1.040187391e+09"} 10089.0
+exec_latency_bucket{le="1.342177279e+09"} 10090.0
+exec_latency_bucket{le="3.087007743e+09"} 10091.0
+exec_latency_bucket{le="1.0200547327e+10"} 10092.0
+exec_latency_bucket{le="+Inf"} 10092.0
+exec_latency_sum 5.1143858324e+10
+exec_latency_count 10092.0
+# HELP changefeed_flush_nanos Total time spent flushing all feeds
+# TYPE changefeed_flush_nanos counter
+changefeed_flush_nanos 0.0
+# HELP sql_restart_savepoint_release_started_count Number of `RELEASE SAVEPOINT cockroach_restart` statements started
+# TYPE sql_restart_savepoint_release_started_count counter
+sql_restart_savepoint_release_started_count 0.0
+# HELP sql_select_started_count_internal Number of SQL SELECT statements started (internal queries)
+# TYPE sql_select_started_count_internal counter
+sql_select_started_count_internal 1607.0
+# HELP gossip_bytes_sent Number of sent gossip bytes
+# TYPE gossip_bytes_sent counter
+gossip_bytes_sent 4462.0
+# HELP sql_txn_commit_count_internal Number of SQL transaction COMMIT statements successfully executed (internal queries)
+# TYPE sql_txn_commit_count_internal counter
+sql_txn_commit_count_internal 0.0
+# HELP leases_transfers_success Number of successful lease transfers
+# TYPE leases_transfers_success counter
+leases_transfers_success{store="1"} 0.0
+# HELP compactor_suggestionbytes_skipped Number of logical bytes in suggested compactions which were not compacted
+# TYPE compactor_suggestionbytes_skipped counter
+compactor_suggestionbytes_skipped{store="1"} 0.0
+# HELP sql_savepoint_started_count_internal Number of SQL SAVEPOINT statements started (internal queries)
+# TYPE sql_savepoint_started_count_internal counter
+sql_savepoint_started_count_internal 0.0
+# HELP sql_mem_admin_txn_current Current sql transaction memory usage for admin
+# TYPE sql_mem_admin_txn_current gauge
+sql_mem_admin_txn_current 0.0
+# HELP sql_optimizer_count Number of statements which ran with the cost-based optimizer
+# TYPE sql_optimizer_count counter
+sql_optimizer_count 0.0
+# HELP sql_restart_savepoint_started_count_internal Number of `SAVEPOINT cockroach_restart` statements started (internal queries)
+# TYPE sql_restart_savepoint_started_count_internal counter
+sql_restart_savepoint_started_count_internal 0.0
+# HELP sql_restart_savepoint_rollback_count_internal Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed (internal queries)
+# TYPE sql_restart_savepoint_rollback_count_internal counter
+sql_restart_savepoint_rollback_count_internal 0.0
+# HELP txn_restarts_possiblereplay Number of restarts due to possible replays of command batches at the storage layer
+# TYPE txn_restarts_possiblereplay counter
+txn_restarts_possiblereplay 0.0
+# HELP kv_closed_timestamp_max_behind_nanos Largest latency between realtime and replica max closed timestamp
+# TYPE kv_closed_timestamp_max_behind_nanos gauge
+kv_closed_timestamp_max_behind_nanos{store="1"} 1.2220867530922e+13
+# HELP queue_merge_purgatory Number of replicas in the merge queue's purgatory, waiting to become mergeable
+# TYPE queue_merge_purgatory gauge
+queue_merge_purgatory{store="1"} 0.0
+# HELP sys_cpu_user_ns Total user cpu time
+# TYPE sys_cpu_user_ns gauge
+sys_cpu_user_ns 2.2762e+11
+# HELP sql_service_latency Latency of SQL request execution
+# TYPE sql_service_latency histogram
+sql_service_latency_bucket{le="+Inf"} 0.0
+sql_service_latency_sum 0.0
+sql_service_latency_count 0.0
+# HELP raft_process_tickingnanos Nanoseconds spent in store.processRaft() processing replica.Tick()
+# TYPE raft_process_tickingnanos counter
+raft_process_tickingnanos{store="1"} 5.15943156e+08
+# HELP queue_raftsnapshot_process_failure Number of replicas which failed processing in the Raft repair queue
+# TYPE queue_raftsnapshot_process_failure counter
+queue_raftsnapshot_process_failure{store="1"} 0.0
+# HELP kv_rangefeed_catchup_scan_nanos Time spent in RangeFeed catchup scan
+# TYPE kv_rangefeed_catchup_scan_nanos counter
+kv_rangefeed_catchup_scan_nanos{store="1"} 0.0
+# HELP txn_commits1PC Number of KV transaction on-phase commit attempts
+# TYPE txn_commits1PC counter
+txn_commits1PC 3206.0
+# HELP sql_mem_conns_current Current sql statement memory usage for conns
+# TYPE sql_mem_conns_current gauge
+sql_mem_conns_current 0.0
+# HELP sql_txn_rollback_count Number of SQL transaction ROLLBACK statements successfully executed
+# TYPE sql_txn_rollback_count counter
+sql_txn_rollback_count 0.0
+# HELP sql_query_count_internal Number of SQL queries executed (internal queries)
+# TYPE sql_query_count_internal counter
+sql_query_count_internal 2650.0
+# HELP sql_exec_latency_internal Latency of SQL statement execution (internal queries)
+# TYPE sql_exec_latency_internal histogram
+sql_exec_latency_internal_bucket{le="139263.0"} 1.0
+sql_exec_latency_internal_bucket{le="147455.0"} 3.0
+sql_exec_latency_internal_bucket{le="163839.0"} 4.0
+sql_exec_latency_internal_bucket{le="172031.0"} 8.0
+sql_exec_latency_internal_bucket{le="188415.0"} 9.0
+sql_exec_latency_internal_bucket{le="204799.0"} 10.0
+sql_exec_latency_internal_bucket{le="212991.0"} 12.0
+sql_exec_latency_internal_bucket{le="237567.0"} 13.0
+sql_exec_latency_internal_bucket{le="245759.0"} 17.0
+sql_exec_latency_internal_bucket{le="253951.0"} 19.0
+sql_exec_latency_internal_bucket{le="262143.0"} 24.0
+sql_exec_latency_internal_bucket{le="278527.0"} 40.0
+sql_exec_latency_internal_bucket{le="294911.0"} 71.0
+sql_exec_latency_internal_bucket{le="311295.0"} 86.0
+sql_exec_latency_internal_bucket{le="327679.0"} 107.0
+sql_exec_latency_internal_bucket{le="344063.0"} 138.0
+sql_exec_latency_internal_bucket{le="360447.0"} 169.0
+sql_exec_latency_internal_bucket{le="376831.0"} 202.0
+sql_exec_latency_internal_bucket{le="393215.0"} 244.0
+sql_exec_latency_internal_bucket{le="409599.0"} 272.0
+sql_exec_latency_internal_bucket{le="425983.0"} 289.0
+sql_exec_latency_internal_bucket{le="442367.0"} 312.0
+sql_exec_latency_internal_bucket{le="458751.0"} 330.0
+sql_exec_latency_internal_bucket{le="475135.0"} 345.0
+sql_exec_latency_internal_bucket{le="491519.0"} 356.0
+sql_exec_latency_internal_bucket{le="507903.0"} 367.0
+sql_exec_latency_internal_bucket{le="524287.0"} 374.0
+sql_exec_latency_internal_bucket{le="557055.0"} 385.0
+sql_exec_latency_internal_bucket{le="589823.0"} 399.0
+sql_exec_latency_internal_bucket{le="622591.0"} 409.0
+sql_exec_latency_internal_bucket{le="655359.0"} 416.0
+sql_exec_latency_internal_bucket{le="688127.0"} 424.0
+sql_exec_latency_internal_bucket{le="720895.0"} 431.0
+sql_exec_latency_internal_bucket{le="753663.0"} 437.0
+sql_exec_latency_internal_bucket{le="786431.0"} 442.0
+sql_exec_latency_internal_bucket{le="819199.0"} 446.0
+sql_exec_latency_internal_bucket{le="851967.0"} 447.0
+sql_exec_latency_internal_bucket{le="884735.0"} 455.0
+sql_exec_latency_internal_bucket{le="917503.0"} 459.0
+sql_exec_latency_internal_bucket{le="950271.0"} 463.0
+sql_exec_latency_internal_bucket{le="983039.0"} 470.0
+sql_exec_latency_internal_bucket{le="1.015807e+06"} 472.0
+sql_exec_latency_internal_bucket{le="1.048575e+06"} 479.0
+sql_exec_latency_internal_bucket{le="1.114111e+06"} 491.0
+sql_exec_latency_internal_bucket{le="1.179647e+06"} 502.0
+sql_exec_latency_internal_bucket{le="1.245183e+06"} 518.0
+sql_exec_latency_internal_bucket{le="1.310719e+06"} 532.0
+sql_exec_latency_internal_bucket{le="1.376255e+06"} 551.0
+sql_exec_latency_internal_bucket{le="1.441791e+06"} 567.0
+sql_exec_latency_internal_bucket{le="1.507327e+06"} 583.0
+sql_exec_latency_internal_bucket{le="1.572863e+06"} 598.0
+sql_exec_latency_internal_bucket{le="1.638399e+06"} 617.0
+sql_exec_latency_internal_bucket{le="1.703935e+06"} 634.0
+sql_exec_latency_internal_bucket{le="1.769471e+06"} 659.0
+sql_exec_latency_internal_bucket{le="1.835007e+06"} 676.0
+sql_exec_latency_internal_bucket{le="1.900543e+06"} 714.0
+sql_exec_latency_internal_bucket{le="1.966079e+06"} 754.0
+sql_exec_latency_internal_bucket{le="2.031615e+06"} 791.0
+sql_exec_latency_internal_bucket{le="2.097151e+06"} 840.0
+sql_exec_latency_internal_bucket{le="2.228223e+06"} 937.0
+sql_exec_latency_internal_bucket{le="2.359295e+06"} 1046.0
+sql_exec_latency_internal_bucket{le="2.490367e+06"} 1154.0
+sql_exec_latency_internal_bucket{le="2.621439e+06"} 1254.0
+sql_exec_latency_internal_bucket{le="2.752511e+06"} 1357.0
+sql_exec_latency_internal_bucket{le="2.883583e+06"} 1444.0
+sql_exec_latency_internal_bucket{le="3.014655e+06"} 1534.0
+sql_exec_latency_internal_bucket{le="3.145727e+06"} 1609.0
+sql_exec_latency_internal_bucket{le="3.276799e+06"} 1675.0
+sql_exec_latency_internal_bucket{le="3.407871e+06"} 1738.0
+sql_exec_latency_internal_bucket{le="3.538943e+06"} 1793.0
+sql_exec_latency_internal_bucket{le="3.670015e+06"} 1847.0
+sql_exec_latency_internal_bucket{le="3.801087e+06"} 1896.0
+sql_exec_latency_internal_bucket{le="3.932159e+06"} 1952.0
+sql_exec_latency_internal_bucket{le="4.063231e+06"} 1994.0
+sql_exec_latency_internal_bucket{le="4.194303e+06"} 2040.0
+sql_exec_latency_internal_bucket{le="4.456447e+06"} 2136.0
+sql_exec_latency_internal_bucket{le="4.718591e+06"} 2208.0
+sql_exec_latency_internal_bucket{le="4.980735e+06"} 2261.0
+sql_exec_latency_internal_bucket{le="5.242879e+06"} 2326.0
+sql_exec_latency_internal_bucket{le="5.505023e+06"} 2363.0
+sql_exec_latency_internal_bucket{le="5.767167e+06"} 2389.0
+sql_exec_latency_internal_bucket{le="6.029311e+06"} 2424.0
+sql_exec_latency_internal_bucket{le="6.291455e+06"} 2450.0
+sql_exec_latency_internal_bucket{le="6.553599e+06"} 2481.0
+sql_exec_latency_internal_bucket{le="6.815743e+06"} 2508.0
+sql_exec_latency_internal_bucket{le="7.077887e+06"} 2540.0
+sql_exec_latency_internal_bucket{le="7.340031e+06"} 2549.0
+sql_exec_latency_internal_bucket{le="7.602175e+06"} 2562.0
+sql_exec_latency_internal_bucket{le="7.864319e+06"} 2572.0
+sql_exec_latency_internal_bucket{le="8.126463e+06"} 2577.0
+sql_exec_latency_internal_bucket{le="8.388607e+06"} 2582.0
+sql_exec_latency_internal_bucket{le="8.912895e+06"} 2596.0
+sql_exec_latency_internal_bucket{le="9.437183e+06"} 2608.0
+sql_exec_latency_internal_bucket{le="9.961471e+06"} 2616.0
+sql_exec_latency_internal_bucket{le="1.0485759e+07"} 2621.0
+sql_exec_latency_internal_bucket{le="1.1010047e+07"} 2625.0
+sql_exec_latency_internal_bucket{le="1.1534335e+07"} 2629.0
+sql_exec_latency_internal_bucket{le="1.2058623e+07"} 2630.0
+sql_exec_latency_internal_bucket{le="1.2582911e+07"} 2634.0
+sql_exec_latency_internal_bucket{le="1.4155775e+07"} 2635.0
+sql_exec_latency_internal_bucket{le="1.4680063e+07"} 2638.0
+sql_exec_latency_internal_bucket{le="1.6777215e+07"} 2639.0
+sql_exec_latency_internal_bucket{le="1.7825791e+07"} 2640.0
+sql_exec_latency_internal_bucket{le="1.8874367e+07"} 2642.0
+sql_exec_latency_internal_bucket{le="2.2020095e+07"} 2644.0
+sql_exec_latency_internal_bucket{le="2.3068671e+07"} 2645.0
+sql_exec_latency_internal_bucket{le="2.5165823e+07"} 2646.0
+sql_exec_latency_internal_bucket{le="2.9360127e+07"} 2647.0
+sql_exec_latency_internal_bucket{le="3.5651583e+07"} 2648.0
+sql_exec_latency_internal_bucket{le="4.1943039e+07"} 2649.0
+sql_exec_latency_internal_bucket{le="4.8234495e+07"} 2650.0
+sql_exec_latency_internal_bucket{le="1.25829119e+08"} 2651.0
+sql_exec_latency_internal_bucket{le="1.30023423e+08"} 2652.0
+sql_exec_latency_internal_bucket{le="2.18103807e+08"} 2653.0
+sql_exec_latency_internal_bucket{le="2.26492415e+08"} 2654.0
+sql_exec_latency_internal_bucket{le="5.20093695e+08"} 2655.0
+sql_exec_latency_internal_bucket{le="1.0200547327e+10"} 2656.0
+sql_exec_latency_internal_bucket{le="+Inf"} 2656.0
+sql_exec_latency_internal_sum 1.9847050656e+10
+sql_exec_latency_internal_count 2656.0
+# HELP rebalancing_queriespersecond Number of kv-level requests received per second by the store, averaged over a large time period as used in rebalancing decisions
+# TYPE rebalancing_queriespersecond gauge
+rebalancing_queriespersecond{store="1"} 0.8014446777604269
+# HELP raft_process_applycommitted_latency Latency histogram for applying all committed Raft commands in a Raft ready
+# TYPE raft_process_applycommitted_latency histogram
+raft_process_applycommitted_latency_bucket{store="1",le="59.0"} 4.0
+raft_process_applycommitted_latency_bucket{store="1",le="61.0"} 19.0
+raft_process_applycommitted_latency_bucket{store="1",le="63.0"} 57.0
+raft_process_applycommitted_latency_bucket{store="1",le="67.0"} 261.0
+raft_process_applycommitted_latency_bucket{store="1",le="71.0"} 1674.0
+raft_process_applycommitted_latency_bucket{store="1",le="75.0"} 4513.0
+raft_process_applycommitted_latency_bucket{store="1",le="79.0"} 7653.0
+raft_process_applycommitted_latency_bucket{store="1",le="83.0"} 10075.0
+raft_process_applycommitted_latency_bucket{store="1",le="87.0"} 12079.0
+raft_process_applycommitted_latency_bucket{store="1",le="91.0"} 14825.0
+raft_process_applycommitted_latency_bucket{store="1",le="95.0"} 17083.0
+raft_process_applycommitted_latency_bucket{store="1",le="99.0"} 18993.0
+raft_process_applycommitted_latency_bucket{store="1",le="103.0"} 20504.0
+raft_process_applycommitted_latency_bucket{store="1",le="107.0"} 21540.0
+raft_process_applycommitted_latency_bucket{store="1",le="111.0"} 22621.0
+raft_process_applycommitted_latency_bucket{store="1",le="115.0"} 23464.0
+raft_process_applycommitted_latency_bucket{store="1",le="119.0"} 24266.0
+raft_process_applycommitted_latency_bucket{store="1",le="123.0"} 25183.0
+raft_process_applycommitted_latency_bucket{store="1",le="127.0"} 25896.0
+raft_process_applycommitted_latency_bucket{store="1",le="135.0"} 27600.0
+raft_process_applycommitted_latency_bucket{store="1",le="143.0"} 29871.0
+raft_process_applycommitted_latency_bucket{store="1",le="151.0"} 31645.0
+raft_process_applycommitted_latency_bucket{store="1",le="159.0"} 33100.0
+raft_process_applycommitted_latency_bucket{store="1",le="167.0"} 34182.0
+raft_process_applycommitted_latency_bucket{store="1",le="175.0"} 35102.0
+raft_process_applycommitted_latency_bucket{store="1",le="183.0"} 36118.0
+raft_process_applycommitted_latency_bucket{store="1",le="191.0"} 37125.0
+raft_process_applycommitted_latency_bucket{store="1",le="199.0"} 37989.0
+raft_process_applycommitted_latency_bucket{store="1",le="207.0"} 38819.0
+raft_process_applycommitted_latency_bucket{store="1",le="215.0"} 39480.0
+raft_process_applycommitted_latency_bucket{store="1",le="223.0"} 40029.0
+raft_process_applycommitted_latency_bucket{store="1",le="231.0"} 40456.0
+raft_process_applycommitted_latency_bucket{store="1",le="239.0"} 40788.0
+raft_process_applycommitted_latency_bucket{store="1",le="247.0"} 41080.0
+raft_process_applycommitted_latency_bucket{store="1",le="255.0"} 41298.0
+raft_process_applycommitted_latency_bucket{store="1",le="271.0"} 41598.0
+raft_process_applycommitted_latency_bucket{store="1",le="287.0"} 41781.0
+raft_process_applycommitted_latency_bucket{store="1",le="303.0"} 41898.0
+raft_process_applycommitted_latency_bucket{store="1",le="319.0"} 41964.0
+raft_process_applycommitted_latency_bucket{store="1",le="335.0"} 42029.0
+raft_process_applycommitted_latency_bucket{store="1",le="351.0"} 42086.0
+raft_process_applycommitted_latency_bucket{store="1",le="367.0"} 42128.0
+raft_process_applycommitted_latency_bucket{store="1",le="383.0"} 42159.0
+raft_process_applycommitted_latency_bucket{store="1",le="399.0"} 42182.0
+raft_process_applycommitted_latency_bucket{store="1",le="415.0"} 42212.0
+raft_process_applycommitted_latency_bucket{store="1",le="431.0"} 42231.0
+raft_process_applycommitted_latency_bucket{store="1",le="447.0"} 42255.0
+raft_process_applycommitted_latency_bucket{store="1",le="463.0"} 42274.0
+raft_process_applycommitted_latency_bucket{store="1",le="479.0"} 42284.0
+raft_process_applycommitted_latency_bucket{store="1",le="495.0"} 42299.0
+raft_process_applycommitted_latency_bucket{store="1",le="511.0"} 42308.0
+raft_process_applycommitted_latency_bucket{store="1",le="543.0"} 42324.0
+raft_process_applycommitted_latency_bucket{store="1",le="575.0"} 42335.0
+raft_process_applycommitted_latency_bucket{store="1",le="607.0"} 42347.0
+raft_process_applycommitted_latency_bucket{store="1",le="639.0"} 42353.0
+raft_process_applycommitted_latency_bucket{store="1",le="671.0"} 42361.0
+raft_process_applycommitted_latency_bucket{store="1",le="703.0"} 42365.0
+raft_process_applycommitted_latency_bucket{store="1",le="735.0"} 42369.0
+raft_process_applycommitted_latency_bucket{store="1",le="767.0"} 42375.0
+raft_process_applycommitted_latency_bucket{store="1",le="799.0"} 42381.0
+raft_process_applycommitted_latency_bucket{store="1",le="863.0"} 42386.0
+raft_process_applycommitted_latency_bucket{store="1",le="895.0"} 42390.0
+raft_process_applycommitted_latency_bucket{store="1",le="927.0"} 42397.0
+raft_process_applycommitted_latency_bucket{store="1",le="959.0"} 42405.0
+raft_process_applycommitted_latency_bucket{store="1",le="991.0"} 42412.0
+raft_process_applycommitted_latency_bucket{store="1",le="1023.0"} 42421.0
+raft_process_applycommitted_latency_bucket{store="1",le="1087.0"} 42435.0
+raft_process_applycommitted_latency_bucket{store="1",le="1151.0"} 42442.0
+raft_process_applycommitted_latency_bucket{store="1",le="1215.0"} 42449.0
+raft_process_applycommitted_latency_bucket{store="1",le="1279.0"} 42458.0
+raft_process_applycommitted_latency_bucket{store="1",le="1343.0"} 42461.0
+raft_process_applycommitted_latency_bucket{store="1",le="1407.0"} 42466.0
+raft_process_applycommitted_latency_bucket{store="1",le="1471.0"} 42469.0
+raft_process_applycommitted_latency_bucket{store="1",le="1535.0"} 42472.0
+raft_process_applycommitted_latency_bucket{store="1",le="1599.0"} 42473.0
+raft_process_applycommitted_latency_bucket{store="1",le="1727.0"} 42474.0
+raft_process_applycommitted_latency_bucket{store="1",le="1791.0"} 42486.0
+raft_process_applycommitted_latency_bucket{store="1",le="1855.0"} 42503.0
+raft_process_applycommitted_latency_bucket{store="1",le="1919.0"} 42509.0
+raft_process_applycommitted_latency_bucket{store="1",le="1983.0"} 42514.0
+raft_process_applycommitted_latency_bucket{store="1",le="2047.0"} 42518.0
+raft_process_applycommitted_latency_bucket{store="1",le="2175.0"} 42522.0
+raft_process_applycommitted_latency_bucket{store="1",le="2303.0"} 42526.0
+raft_process_applycommitted_latency_bucket{store="1",le="2431.0"} 42529.0
+raft_process_applycommitted_latency_bucket{store="1",le="2559.0"} 42531.0
+raft_process_applycommitted_latency_bucket{store="1",le="2687.0"} 42533.0
+raft_process_applycommitted_latency_bucket{store="1",le="6911.0"} 42537.0
+raft_process_applycommitted_latency_bucket{store="1",le="7167.0"} 42540.0
+raft_process_applycommitted_latency_bucket{store="1",le="7423.0"} 42548.0
+raft_process_applycommitted_latency_bucket{store="1",le="7679.0"} 42553.0
+raft_process_applycommitted_latency_bucket{store="1",le="7935.0"} 42557.0
+raft_process_applycommitted_latency_bucket{store="1",le="8191.0"} 42562.0
+raft_process_applycommitted_latency_bucket{store="1",le="8703.0"} 42572.0
+raft_process_applycommitted_latency_bucket{store="1",le="9215.0"} 42576.0
+raft_process_applycommitted_latency_bucket{store="1",le="9727.0"} 42583.0
+raft_process_applycommitted_latency_bucket{store="1",le="10239.0"} 42588.0
+raft_process_applycommitted_latency_bucket{store="1",le="10751.0"} 42591.0
+raft_process_applycommitted_latency_bucket{store="1",le="11263.0"} 42594.0
+raft_process_applycommitted_latency_bucket{store="1",le="11775.0"} 42596.0
+raft_process_applycommitted_latency_bucket{store="1",le="12287.0"} 42598.0
+raft_process_applycommitted_latency_bucket{store="1",le="13311.0"} 42600.0
+raft_process_applycommitted_latency_bucket{store="1",le="13823.0"} 42601.0
+raft_process_applycommitted_latency_bucket{store="1",le="14335.0"} 42605.0
+raft_process_applycommitted_latency_bucket{store="1",le="14847.0"} 42608.0
+raft_process_applycommitted_latency_bucket{store="1",le="15359.0"} 42610.0
+raft_process_applycommitted_latency_bucket{store="1",le="15871.0"} 42616.0
+raft_process_applycommitted_latency_bucket{store="1",le="16383.0"} 42620.0
+raft_process_applycommitted_latency_bucket{store="1",le="17407.0"} 42634.0
+raft_process_applycommitted_latency_bucket{store="1",le="18431.0"} 42655.0
+raft_process_applycommitted_latency_bucket{store="1",le="19455.0"} 42678.0
+raft_process_applycommitted_latency_bucket{store="1",le="20479.0"} 42724.0
+raft_process_applycommitted_latency_bucket{store="1",le="21503.0"} 42784.0
+raft_process_applycommitted_latency_bucket{store="1",le="22527.0"} 42869.0
+raft_process_applycommitted_latency_bucket{store="1",le="23551.0"} 42941.0
+raft_process_applycommitted_latency_bucket{store="1",le="24575.0"} 43041.0
+raft_process_applycommitted_latency_bucket{store="1",le="25599.0"} 43163.0
+raft_process_applycommitted_latency_bucket{store="1",le="26623.0"} 43320.0
+raft_process_applycommitted_latency_bucket{store="1",le="27647.0"} 43508.0
+raft_process_applycommitted_latency_bucket{store="1",le="28671.0"} 43746.0
+raft_process_applycommitted_latency_bucket{store="1",le="29695.0"} 44015.0
+raft_process_applycommitted_latency_bucket{store="1",le="30719.0"} 44324.0
+raft_process_applycommitted_latency_bucket{store="1",le="31743.0"} 44711.0
+raft_process_applycommitted_latency_bucket{store="1",le="32767.0"} 45084.0
+raft_process_applycommitted_latency_bucket{store="1",le="34815.0"} 45942.0
+raft_process_applycommitted_latency_bucket{store="1",le="36863.0"} 46940.0
+raft_process_applycommitted_latency_bucket{store="1",le="38911.0"} 47810.0
+raft_process_applycommitted_latency_bucket{store="1",le="40959.0"} 48543.0
+raft_process_applycommitted_latency_bucket{store="1",le="43007.0"} 49172.0
+raft_process_applycommitted_latency_bucket{store="1",le="45055.0"} 49712.0
+raft_process_applycommitted_latency_bucket{store="1",le="47103.0"} 50198.0
+raft_process_applycommitted_latency_bucket{store="1",le="49151.0"} 50691.0
+raft_process_applycommitted_latency_bucket{store="1",le="51199.0"} 51166.0
+raft_process_applycommitted_latency_bucket{store="1",le="53247.0"} 51579.0
+raft_process_applycommitted_latency_bucket{store="1",le="55295.0"} 51966.0
+raft_process_applycommitted_latency_bucket{store="1",le="57343.0"} 52361.0
+raft_process_applycommitted_latency_bucket{store="1",le="59391.0"} 52724.0
+raft_process_applycommitted_latency_bucket{store="1",le="61439.0"} 53065.0
+raft_process_applycommitted_latency_bucket{store="1",le="63487.0"} 53400.0
+raft_process_applycommitted_latency_bucket{store="1",le="65535.0"} 53701.0
+raft_process_applycommitted_latency_bucket{store="1",le="69631.0"} 54333.0
+raft_process_applycommitted_latency_bucket{store="1",le="73727.0"} 54926.0
+raft_process_applycommitted_latency_bucket{store="1",le="77823.0"} 55475.0
+raft_process_applycommitted_latency_bucket{store="1",le="81919.0"} 56020.0
+raft_process_applycommitted_latency_bucket{store="1",le="86015.0"} 56553.0
+raft_process_applycommitted_latency_bucket{store="1",le="90111.0"} 57025.0
+raft_process_applycommitted_latency_bucket{store="1",le="94207.0"} 57449.0
+raft_process_applycommitted_latency_bucket{store="1",le="98303.0"} 57837.0
+raft_process_applycommitted_latency_bucket{store="1",le="102399.0"} 58186.0
+raft_process_applycommitted_latency_bucket{store="1",le="106495.0"} 58530.0
+raft_process_applycommitted_latency_bucket{store="1",le="110591.0"} 58819.0
+raft_process_applycommitted_latency_bucket{store="1",le="114687.0"} 59126.0
+raft_process_applycommitted_latency_bucket{store="1",le="118783.0"} 59396.0
+raft_process_applycommitted_latency_bucket{store="1",le="122879.0"} 59649.0
+raft_process_applycommitted_latency_bucket{store="1",le="126975.0"} 59901.0
+raft_process_applycommitted_latency_bucket{store="1",le="131071.0"} 60181.0
+raft_process_applycommitted_latency_bucket{store="1",le="139263.0"} 60694.0
+raft_process_applycommitted_latency_bucket{store="1",le="147455.0"} 61214.0
+raft_process_applycommitted_latency_bucket{store="1",le="155647.0"} 61746.0
+raft_process_applycommitted_latency_bucket{store="1",le="163839.0"} 62313.0
+raft_process_applycommitted_latency_bucket{store="1",le="172031.0"} 62819.0
+raft_process_applycommitted_latency_bucket{store="1",le="180223.0"} 63287.0
+raft_process_applycommitted_latency_bucket{store="1",le="188415.0"} 63745.0
+raft_process_applycommitted_latency_bucket{store="1",le="196607.0"} 64188.0
+raft_process_applycommitted_latency_bucket{store="1",le="204799.0"} 64599.0
+raft_process_applycommitted_latency_bucket{store="1",le="212991.0"} 65018.0
+raft_process_applycommitted_latency_bucket{store="1",le="221183.0"} 65424.0
+raft_process_applycommitted_latency_bucket{store="1",le="229375.0"} 65764.0
+raft_process_applycommitted_latency_bucket{store="1",le="237567.0"} 66116.0
+raft_process_applycommitted_latency_bucket{store="1",le="245759.0"} 66470.0
+raft_process_applycommitted_latency_bucket{store="1",le="253951.0"} 66796.0
+raft_process_applycommitted_latency_bucket{store="1",le="262143.0"} 67084.0
+raft_process_applycommitted_latency_bucket{store="1",le="278527.0"} 67681.0
+raft_process_applycommitted_latency_bucket{store="1",le="294911.0"} 68244.0
+raft_process_applycommitted_latency_bucket{store="1",le="311295.0"} 68719.0
+raft_process_applycommitted_latency_bucket{store="1",le="327679.0"} 69150.0
+raft_process_applycommitted_latency_bucket{store="1",le="344063.0"} 69558.0
+raft_process_applycommitted_latency_bucket{store="1",le="360447.0"} 69908.0
+raft_process_applycommitted_latency_bucket{store="1",le="376831.0"} 70250.0
+raft_process_applycommitted_latency_bucket{store="1",le="393215.0"} 70600.0
+raft_process_applycommitted_latency_bucket{store="1",le="409599.0"} 70894.0
+raft_process_applycommitted_latency_bucket{store="1",le="425983.0"} 71182.0
+raft_process_applycommitted_latency_bucket{store="1",le="442367.0"} 71428.0
+raft_process_applycommitted_latency_bucket{store="1",le="458751.0"} 71655.0
+raft_process_applycommitted_latency_bucket{store="1",le="475135.0"} 71882.0
+raft_process_applycommitted_latency_bucket{store="1",le="491519.0"} 72080.0
+raft_process_applycommitted_latency_bucket{store="1",le="507903.0"} 72286.0
+raft_process_applycommitted_latency_bucket{store="1",le="524287.0"} 72482.0
+raft_process_applycommitted_latency_bucket{store="1",le="557055.0"} 72854.0
+raft_process_applycommitted_latency_bucket{store="1",le="589823.0"} 73184.0
+raft_process_applycommitted_latency_bucket{store="1",le="622591.0"} 73492.0
+raft_process_applycommitted_latency_bucket{store="1",le="655359.0"} 73791.0
+raft_process_applycommitted_latency_bucket{store="1",le="688127.0"} 74038.0
+raft_process_applycommitted_latency_bucket{store="1",le="720895.0"} 74308.0
+raft_process_applycommitted_latency_bucket{store="1",le="753663.0"} 74528.0
+raft_process_applycommitted_latency_bucket{store="1",le="786431.0"} 74742.0
+raft_process_applycommitted_latency_bucket{store="1",le="819199.0"} 74970.0
+raft_process_applycommitted_latency_bucket{store="1",le="851967.0"} 75213.0
+raft_process_applycommitted_latency_bucket{store="1",le="884735.0"} 75428.0
+raft_process_applycommitted_latency_bucket{store="1",le="917503.0"} 75634.0
+raft_process_applycommitted_latency_bucket{store="1",le="950271.0"} 75848.0
+raft_process_applycommitted_latency_bucket{store="1",le="983039.0"} 76101.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.015807e+06"} 76351.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.048575e+06"} 76569.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.114111e+06"} 76977.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.179647e+06"} 77355.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.245183e+06"} 77726.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.310719e+06"} 78102.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.376255e+06"} 78417.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.441791e+06"} 78707.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.507327e+06"} 78945.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.572863e+06"} 79194.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.638399e+06"} 79448.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.703935e+06"} 79678.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.769471e+06"} 79867.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.835007e+06"} 80072.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.900543e+06"} 80252.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.966079e+06"} 80430.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.031615e+06"} 80607.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.097151e+06"} 80786.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.228223e+06"} 81069.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.359295e+06"} 81293.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.490367e+06"} 81503.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.621439e+06"} 81702.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.752511e+06"} 81864.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.883583e+06"} 82021.0
+raft_process_applycommitted_latency_bucket{store="1",le="3.014655e+06"} 82168.0
+raft_process_applycommitted_latency_bucket{store="1",le="3.145727e+06"} 82302.0
+raft_process_applycommitted_latency_bucket{store="1",le="3.276799e+06"} 82409.0
+raft_process_applycommitted_latency_bucket{store="1",le="3.407871e+06"} 82513.0
+raft_process_applycommitted_latency_bucket{store="1",le="3.538943e+06"} 82615.0
+raft_process_applycommitted_latency_bucket{store="1",le="3.670015e+06"} 82703.0
+raft_process_applycommitted_latency_bucket{store="1",le="3.801087e+06"} 82785.0
+raft_process_applycommitted_latency_bucket{store="1",le="3.932159e+06"} 82869.0
+raft_process_applycommitted_latency_bucket{store="1",le="4.063231e+06"} 82925.0
+raft_process_applycommitted_latency_bucket{store="1",le="4.194303e+06"} 82992.0
+raft_process_applycommitted_latency_bucket{store="1",le="4.456447e+06"} 83101.0
+raft_process_applycommitted_latency_bucket{store="1",le="4.718591e+06"} 83194.0
+raft_process_applycommitted_latency_bucket{store="1",le="4.980735e+06"} 83280.0
+raft_process_applycommitted_latency_bucket{store="1",le="5.242879e+06"} 83342.0
+raft_process_applycommitted_latency_bucket{store="1",le="5.505023e+06"} 83399.0
+raft_process_applycommitted_latency_bucket{store="1",le="5.767167e+06"} 83454.0
+raft_process_applycommitted_latency_bucket{store="1",le="6.029311e+06"} 83489.0
+raft_process_applycommitted_latency_bucket{store="1",le="6.291455e+06"} 83517.0
+raft_process_applycommitted_latency_bucket{store="1",le="6.553599e+06"} 83542.0
+raft_process_applycommitted_latency_bucket{store="1",le="6.815743e+06"} 83569.0
+raft_process_applycommitted_latency_bucket{store="1",le="7.077887e+06"} 83594.0
+raft_process_applycommitted_latency_bucket{store="1",le="7.340031e+06"} 83613.0
+raft_process_applycommitted_latency_bucket{store="1",le="7.602175e+06"} 83635.0
+raft_process_applycommitted_latency_bucket{store="1",le="7.864319e+06"} 83650.0
+raft_process_applycommitted_latency_bucket{store="1",le="8.126463e+06"} 83669.0
+raft_process_applycommitted_latency_bucket{store="1",le="8.388607e+06"} 83677.0
+raft_process_applycommitted_latency_bucket{store="1",le="8.912895e+06"} 83704.0
+raft_process_applycommitted_latency_bucket{store="1",le="9.437183e+06"} 83722.0
+raft_process_applycommitted_latency_bucket{store="1",le="9.961471e+06"} 83729.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.0485759e+07"} 83736.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.1010047e+07"} 83743.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.1534335e+07"} 83749.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.2058623e+07"} 83754.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.2582911e+07"} 83757.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.3107199e+07"} 83759.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.3631487e+07"} 83763.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.4155775e+07"} 83764.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.4680063e+07"} 83766.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.5204351e+07"} 83770.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.6252927e+07"} 83772.0
+raft_process_applycommitted_latency_bucket{store="1",le="1.9922943e+07"} 83773.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.0971519e+07"} 83774.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.5165823e+07"} 83775.0
+raft_process_applycommitted_latency_bucket{store="1",le="2.6214399e+07"} 83776.0
+raft_process_applycommitted_latency_bucket{store="1",le="+Inf"} 83776.0
+raft_process_applycommitted_latency_sum{store="1"} 2.5535355126e+10
+raft_process_applycommitted_latency_count{store="1"} 83776.0
+# HELP queue_replicate_processingnanos Nanoseconds spent processing replicas in the replicate queue
+# TYPE queue_replicate_processingnanos counter
+queue_replicate_processingnanos{store="1"} 0.0
+# HELP changefeed_poll_request_nanos Time spent fetching changes
+# TYPE changefeed_poll_request_nanos histogram
+changefeed_poll_request_nanos_bucket{le="+Inf"} 0.0
+changefeed_poll_request_nanos_sum 0.0
+changefeed_poll_request_nanos_count 0.0
+# HELP txnrecovery_attempts_pending Number of transaction recovery attempts currently in-flight
+# TYPE txnrecovery_attempts_pending gauge
+txnrecovery_attempts_pending{store="1"} 0.0
+# HELP requests_slow_lease Number of requests that have been stuck for a long time acquiring a lease
+# TYPE requests_slow_lease gauge
+requests_slow_lease{store="1"} 0.0
+# HELP sql_savepoint_started_count Number of SQL SAVEPOINT statements started
+# TYPE sql_savepoint_started_count counter
+sql_savepoint_started_count 0.0
+# HELP replicas_leaders Number of raft leaders
+# TYPE replicas_leaders gauge
+replicas_leaders{store="1"} 7.0
+# HELP raftlog_truncated Number of Raft log entries truncated
+# TYPE raftlog_truncated counter
+raftlog_truncated{store="1"} 19128.0
+# HELP queue_raftsnapshot_process_success Number of replicas successfully processed by the Raft repair queue
+# TYPE queue_raftsnapshot_process_success counter
+queue_raftsnapshot_process_success{store="1"} 0.0
+# HELP queue_tsmaintenance_process_success Number of replicas successfully processed by the time series maintenance queue
+# TYPE queue_tsmaintenance_process_success counter
+queue_tsmaintenance_process_success{store="1"} 0.0
+# HELP requests_slow_raft Number of requests that have been stuck for a long time in raft
+# TYPE requests_slow_raft gauge
+requests_slow_raft{store="1"} 0.0
+# HELP addsstable_delay_total Amount by which evaluation of AddSSTable requests was delayed
+# TYPE addsstable_delay_total counter
+addsstable_delay_total{store="1"} 0.0
+# HELP sys_cpu_sys_percent Current system cpu percentage
+# TYPE sys_cpu_sys_percent gauge
+sys_cpu_sys_percent 0.014030189881984257
+# HELP queue_replicate_rebalancereplica Number of replica rebalancer-initiated additions attempted by the replicate queue
+# TYPE queue_replicate_rebalancereplica counter
+queue_replicate_rebalancereplica{store="1"} 0.0
+# HELP gossip_bytes_received Number of received gossip bytes
+# TYPE gossip_bytes_received counter
+gossip_bytes_received 1817.0
+# HELP distsender_rpc_sent_local Number of local RPCs sent
+# TYPE distsender_rpc_sent_local counter
+distsender_rpc_sent_local 4533.0
+# HELP sys_host_net_recv_packets Packets received on all network interfaces since this process started
+# TYPE sys_host_net_recv_packets gauge
+sys_host_net_recv_packets 593876.0
+# HELP changefeed_processing_nanos Time spent processing KV changes into SQL rows
+# TYPE changefeed_processing_nanos counter
+changefeed_processing_nanos 0.0
+# HELP sql_mem_distsql_current Current sql statement memory usage for distsql
+# TYPE sql_mem_distsql_current gauge
+sql_mem_distsql_current 0.0
+# HELP leases_error Number of failed lease requests
+# TYPE leases_error counter
+leases_error{store="1"} 0.0
+# HELP capacity Total storage capacity
+# TYPE capacity gauge
+capacity{store="1"} 6.4202351837184e+13
+# HELP rpc_heartbeats_loops_exited Counter of the number of connection heartbeat loops which have exited with an error
+# TYPE rpc_heartbeats_loops_exited counter
+rpc_heartbeats_loops_exited 0.0
+# HELP sys_host_disk_iopsinprogress IO operations currently in progress on this host
+# TYPE sys_host_disk_iopsinprogress gauge
+sys_host_disk_iopsinprogress 0.0
+# HELP ranges_overreplicated Number of ranges with more live replicas than the replication target
+# TYPE ranges_overreplicated gauge
+ranges_overreplicated{store="1"} 0.0
+# HELP intents_poison_attempts Count of (point or range) poisoning intent abort evaluation attempts
+# TYPE intents_poison_attempts counter
+intents_poison_attempts{store="1"} 0.0
+# HELP sys_goroutines Current number of goroutines
+# TYPE sys_goroutines gauge
+sys_goroutines 235.0
+# HELP raft_enqueued_pending Number of pending outgoing messages in the Raft Transport queue
+# TYPE raft_enqueued_pending gauge
+raft_enqueued_pending{store="1"} 0.0
+# HELP sql_txn_begin_started_count_internal Number of SQL transaction BEGIN statements started (internal queries)
+# TYPE sql_txn_begin_started_count_internal counter
+sql_txn_begin_started_count_internal 0.0
+# HELP txn_commits Number of committed KV transactions (including 1PC)
+# TYPE txn_commits counter
+txn_commits 7472.0
+# HELP liveness_heartbeatsuccesses Number of successful node liveness heartbeats from this node
+# TYPE liveness_heartbeatsuccesses counter
+liveness_heartbeatsuccesses 2720.0
+# HELP sys_cgo_allocbytes Current bytes of memory allocated by cgo
+# TYPE sys_cgo_allocbytes gauge
+sys_cgo_allocbytes 6.3363512e+07
+# HELP sql_distsql_flows_queue_wait Duration of time flows spend waiting in the queue
+# TYPE sql_distsql_flows_queue_wait histogram
+sql_distsql_flows_queue_wait_bucket{le="+Inf"} 0.0
+sql_distsql_flows_queue_wait_sum 0.0
+sql_distsql_flows_queue_wait_count 0.0
+# HELP txnrecovery_failures Number of transaction recovery attempts that failed
+# TYPE txnrecovery_failures counter
+txnrecovery_failures{store="1"} 0.0
+# HELP rpc_heartbeats_nominal Gauge of current connections in the nominal state
+# TYPE rpc_heartbeats_nominal gauge
+rpc_heartbeats_nominal 7.0
+# HELP sys_host_disk_write_count Disk write operations across all disks since this process started
+# TYPE sys_host_disk_write_count gauge
+sys_host_disk_write_count 106.0
+# HELP sys_host_disk_write_time Time spent writing to all disks since this process started
+# TYPE sys_host_disk_write_time gauge
+sys_host_disk_write_time 1.02e+08
+# HELP ranges_underreplicated Number of ranges with fewer live replicas than the replication target
+# TYPE ranges_underreplicated gauge
+ranges_underreplicated{store="1"} 0.0
+# HELP rocksdb_num_sstables Number of rocksdb SSTables
+# TYPE rocksdb_num_sstables gauge
+rocksdb_num_sstables{store="1"} 8.0
+# HELP raft_commandsapplied Count of Raft commands applied
+# TYPE raft_commandsapplied counter
+raft_commandsapplied{store="1"} 0.0
+# HELP raftlog_behind Number of Raft log entries followers on other stores are behind
+# TYPE raftlog_behind gauge
+raftlog_behind{store="1"} 0.0
+# HELP queue_tsmaintenance_pending Number of pending replicas in the time series maintenance queue
+# TYPE queue_tsmaintenance_pending gauge
+queue_tsmaintenance_pending{store="1"} 0.0
+# HELP sql_mem_admin_txn_max Memory usage per sql transaction for admin
+# TYPE sql_mem_admin_txn_max histogram
+sql_mem_admin_txn_max_bucket{le="+Inf"} 0.0
+sql_mem_admin_txn_max_sum 0.0
+sql_mem_admin_txn_max_count 0.0
+# HELP txnwaitqueue_pusher_slow The total number of cases where a pusher waited more than the excessive wait threshold
+# TYPE txnwaitqueue_pusher_slow gauge
+txnwaitqueue_pusher_slow{store="1"} 0.0
+# HELP compactor_compactingnanos Number of nanoseconds spent compacting ranges
+# TYPE compactor_compactingnanos counter
+compactor_compactingnanos{store="1"} 0.0
+# HELP rebalancing_lease_transfers Number of lease transfers motivated by store-level load imbalances
+# TYPE rebalancing_lease_transfers counter
+rebalancing_lease_transfers{store="1"} 0.0
+# HELP requests_slow_latch Number of requests that have been stuck for a long time acquiring latches
+# TYPE requests_slow_latch gauge
+requests_slow_latch{store="1"} 0.0
+# HELP keycount Count of all keys
+# TYPE keycount gauge
+keycount{store="1"} 119307.0
+# HELP addsstable_delay_enginebackpressure Amount by which evaluation of AddSSTable requests was delayed by storage-engine backpressure
+# TYPE addsstable_delay_enginebackpressure counter
+addsstable_delay_enginebackpressure{store="1"} 0.0
+# HELP tscache_skl_write_pages Number of pages in the write timestamp cache
+# TYPE tscache_skl_write_pages gauge
+tscache_skl_write_pages{store="1"} 1.0
+# HELP sql_query_started_count Number of SQL queries started
+# TYPE sql_query_started_count counter
+sql_query_started_count 0.0
+# HELP sys_gc_count Total number of GC runs
+# TYPE sys_gc_count gauge
+sys_gc_count 279.0
+# HELP sys_host_disk_read_time Time spent reading from all disks since this process started
+# TYPE sys_host_disk_read_time gauge
+sys_host_disk_read_time 4.7e+08
+# HELP sql_mem_distsql_max Memory usage per sql statement for distsql
+# TYPE sql_mem_distsql_max histogram
+sql_mem_distsql_max_bucket{le="4011.0"} 86.0
+sql_mem_distsql_max_bucket{le="4311.0"} 107.0
+sql_mem_distsql_max_bucket{le="4615.0"} 126.0
+sql_mem_distsql_max_bucket{le="4967.0"} 127.0
+sql_mem_distsql_max_bucket{le="5503.0"} 129.0
+sql_mem_distsql_max_bucket{le="5803.0"} 130.0
+sql_mem_distsql_max_bucket{le="5831.0"} 132.0
+sql_mem_distsql_max_bucket{le="6127.0"} 133.0
+sql_mem_distsql_max_bucket{le="6423.0"} 134.0
+sql_mem_distsql_max_bucket{le="6431.0"} 135.0
+sql_mem_distsql_max_bucket{le="6727.0"} 136.0
+sql_mem_distsql_max_bucket{le="+Inf"} 136.0
+sql_mem_distsql_max_sum 582308.0
+sql_mem_distsql_max_count 136.0
+# HELP sql_new_conns Counter of the number of sql connections created
+# TYPE sql_new_conns counter
+sql_new_conns 0.0
+# HELP sql_optimizer_plan_cache_misses Number of non-prepared statements for which a cached plan was not used
+# TYPE sql_optimizer_plan_cache_misses counter
+sql_optimizer_plan_cache_misses 0.0
+# HELP raft_rcvd_vote Number of MsgVote messages received by this store
+# TYPE raft_rcvd_vote counter
+raft_rcvd_vote{store="1"} 31.0
+# HELP addsstable_applications Number of SSTable ingestions applied (i.e. applied by Replicas)
+# TYPE addsstable_applications counter
+addsstable_applications{store="1"} 0.0
+# HELP sql_mem_bulk_max Memory usage per sql statement for bulk operations
+# TYPE sql_mem_bulk_max histogram
+sql_mem_bulk_max_bucket{le="+Inf"} 0.0
+sql_mem_bulk_max_sum 0.0
+sql_mem_bulk_max_count 0.0
+# HELP sql_select_started_count Number of SQL SELECT statements started
+# TYPE sql_select_started_count counter
+sql_select_started_count 0.0
+# HELP sql_misc_count Number of other SQL statements successfully executed
+# TYPE sql_misc_count counter
+sql_misc_count 0.0
+# HELP sql_delete_count_internal Number of SQL DELETE statements successfully executed (internal queries)
+# TYPE sql_delete_count_internal counter
+sql_delete_count_internal 505.0
+# HELP sql_savepoint_count_internal Number of SQL SAVEPOINT statements successfully executed (internal queries)
+# TYPE sql_savepoint_count_internal counter
+sql_savepoint_count_internal 0.0
+# HELP ranges_unavailable Number of ranges with fewer live replicas than needed for quorum
+# TYPE ranges_unavailable gauge
+ranges_unavailable{store="1"} 0.0
+# HELP capacity_available Available storage capacity
+# TYPE capacity_available gauge
+capacity_available{store="1"} 4.0402062147584e+13
+# HELP queue_gc_info_transactionspangcstaging Number of GC'able entries corresponding to staging txns
+# TYPE queue_gc_info_transactionspangcstaging counter
+queue_gc_info_transactionspangcstaging{store="1"} 0.0
+# HELP txn_restarts_txnpush Number of restarts due to a transaction push failure
+# TYPE txn_restarts_txnpush counter
+txn_restarts_txnpush 0.0
+# HELP rocksdb_block_cache_misses Count of block cache misses
+# TYPE rocksdb_block_cache_misses gauge
+rocksdb_block_cache_misses{store="1"} 8129.0
+# HELP addsstable_copies number of SSTable ingestions that required copying files during application
+# TYPE addsstable_copies counter
+addsstable_copies{store="1"} 0.0
+# HELP txnwaitqueue_pushee_waiting Number of pushees on the txn wait queue
+# TYPE txnwaitqueue_pushee_waiting gauge
+txnwaitqueue_pushee_waiting{store="1"} 0.0
+# HELP sql_mem_sql_txn_current Current sql transaction memory usage for sql
+# TYPE sql_mem_sql_txn_current gauge
+sql_mem_sql_txn_current 0.0
+sql_mem_sql_txn_current 0.0
+# HELP sql_insert_count Number of SQL INSERT statements successfully executed
+# TYPE sql_insert_count counter
+sql_insert_count 0.0
+# HELP sql_txn_abort_count Number of SQL transaction abort errors
+# TYPE sql_txn_abort_count counter
+sql_txn_abort_count 0.0
+# HELP intentage Cumulative age of intents
+# TYPE intentage gauge
+intentage{store="1"} -16.0
+# HELP range_merges Number of range merges
+# TYPE range_merges counter
+range_merges{store="1"} 0.0
+# HELP queue_gc_info_abortspangcnum Number of AbortSpan entries fit for removal
+# TYPE queue_gc_info_abortspangcnum counter
+queue_gc_info_abortspangcnum{store="1"} 1.0
+# HELP gossip_infos_sent Number of sent gossip Info objects
+# TYPE gossip_infos_sent counter
+gossip_infos_sent 30.0
+# HELP sql_update_count Number of SQL UPDATE statements successfully executed
+# TYPE sql_update_count counter
+sql_update_count 0.0
+# HELP sql_txn_rollback_count_internal Number of SQL transaction ROLLBACK statements successfully executed (internal queries)
+# TYPE sql_txn_rollback_count_internal counter
+sql_txn_rollback_count_internal 0.0
+# HELP raft_process_workingnanos Nanoseconds spent in store.processRaft() working
+# TYPE raft_process_workingnanos counter
+raft_process_workingnanos{store="1"} 2.4058409967e+10
+# HELP queue_merge_pending Number of pending replicas in the merge queue
+# TYPE queue_merge_pending gauge
+queue_merge_pending{store="1"} 0.0
+# HELP txnwaitqueue_pusher_wait_time Histogram of durations spent in queue by pushers
+# TYPE txnwaitqueue_pusher_wait_time histogram
+txnwaitqueue_pusher_wait_time_bucket{store="1",le="1.769471e+06"} 1.0
+txnwaitqueue_pusher_wait_time_bucket{store="1",le="4.980735e+06"} 2.0
+txnwaitqueue_pusher_wait_time_bucket{store="1",le="+Inf"} 2.0
+txnwaitqueue_pusher_wait_time_sum{store="1"} 6.750206e+06
+txnwaitqueue_pusher_wait_time_count{store="1"} 2.0
+# HELP sys_cpu_combined_percent_normalized Current user+system cpu percentage, normalized 0-1 by number of cores
+# TYPE sys_cpu_combined_percent_normalized gauge
+sys_cpu_combined_percent_normalized 0.008518329571204727
+# HELP sql_mem_internal_max Memory usage per sql statement for internal
+# TYPE sql_mem_internal_max histogram
+sql_mem_internal_max_bucket{le="4011.0"} 2581.0
+sql_mem_internal_max_bucket{le="4311.0"} 2616.0
+sql_mem_internal_max_bucket{le="4487.0"} 2617.0
+sql_mem_internal_max_bucket{le="4855.0"} 2636.0
+sql_mem_internal_max_bucket{le="4967.0"} 2637.0
+sql_mem_internal_max_bucket{le="+Inf"} 2637.0
+sql_mem_internal_max_sum 1.0604975e+07
+sql_mem_internal_max_count 2637.0
+# HELP sql_mem_admin_max Memory usage per sql statement for admin
+# TYPE sql_mem_admin_max histogram
+sql_mem_admin_max_bucket{le="+Inf"} 0.0
+sql_mem_admin_max_sum 0.0
+sql_mem_admin_max_count 0.0
+# HELP sql_txn_rollback_started_count Number of SQL transaction ROLLBACK statements started
+# TYPE sql_txn_rollback_started_count counter
+sql_txn_rollback_started_count 0.0
+# HELP sql_insert_count_internal Number of SQL INSERT statements successfully executed (internal queries)
+# TYPE sql_insert_count_internal counter
+sql_insert_count_internal 516.0
+# HELP sql_distsql_service_latency_internal Latency of DistSQL request execution (internal queries)
+# TYPE sql_distsql_service_latency_internal histogram
+sql_distsql_service_latency_internal_bucket{le="2.883583e+06"} 2.0
+sql_distsql_service_latency_internal_bucket{le="3.407871e+06"} 4.0
+sql_distsql_service_latency_internal_bucket{le="3.538943e+06"} 5.0
+sql_distsql_service_latency_internal_bucket{le="3.670015e+06"} 9.0
+sql_distsql_service_latency_internal_bucket{le="3.801087e+06"} 13.0
+sql_distsql_service_latency_internal_bucket{le="3.932159e+06"} 20.0
+sql_distsql_service_latency_internal_bucket{le="4.063231e+06"} 30.0
+sql_distsql_service_latency_internal_bucket{le="4.194303e+06"} 43.0
+sql_distsql_service_latency_internal_bucket{le="4.456447e+06"} 73.0
+sql_distsql_service_latency_internal_bucket{le="4.718591e+06"} 108.0
+sql_distsql_service_latency_internal_bucket{le="4.980735e+06"} 176.0
+sql_distsql_service_latency_internal_bucket{le="5.242879e+06"} 242.0
+sql_distsql_service_latency_internal_bucket{le="5.505023e+06"} 289.0
+sql_distsql_service_latency_internal_bucket{le="5.767167e+06"} 334.0
+sql_distsql_service_latency_internal_bucket{le="6.029311e+06"} 398.0
+sql_distsql_service_latency_internal_bucket{le="6.291455e+06"} 459.0
+sql_distsql_service_latency_internal_bucket{le="6.553599e+06"} 519.0
+sql_distsql_service_latency_internal_bucket{le="6.815743e+06"} 581.0
+sql_distsql_service_latency_internal_bucket{le="7.077887e+06"} 639.0
+sql_distsql_service_latency_internal_bucket{le="7.340031e+06"} 695.0
+sql_distsql_service_latency_internal_bucket{le="7.602175e+06"} 747.0
+sql_distsql_service_latency_internal_bucket{le="7.864319e+06"} 785.0
+sql_distsql_service_latency_internal_bucket{le="8.126463e+06"} 828.0
+sql_distsql_service_latency_internal_bucket{le="8.388607e+06"} 885.0
+sql_distsql_service_latency_internal_bucket{le="8.912895e+06"} 971.0
+sql_distsql_service_latency_internal_bucket{le="9.437183e+06"} 1037.0
+sql_distsql_service_latency_internal_bucket{le="9.961471e+06"} 1109.0
+sql_distsql_service_latency_internal_bucket{le="1.0485759e+07"} 1192.0
+sql_distsql_service_latency_internal_bucket{le="1.1010047e+07"} 1245.0
+sql_distsql_service_latency_internal_bucket{le="1.1534335e+07"} 1293.0
+sql_distsql_service_latency_internal_bucket{le="1.2058623e+07"} 1335.0
+sql_distsql_service_latency_internal_bucket{le="1.2582911e+07"} 1368.0
+sql_distsql_service_latency_internal_bucket{le="1.3107199e+07"} 1397.0
+sql_distsql_service_latency_internal_bucket{le="1.3631487e+07"} 1425.0
+sql_distsql_service_latency_internal_bucket{le="1.4155775e+07"} 1454.0
+sql_distsql_service_latency_internal_bucket{le="1.4680063e+07"} 1468.0
+sql_distsql_service_latency_internal_bucket{le="1.5204351e+07"} 1482.0
+sql_distsql_service_latency_internal_bucket{le="1.5728639e+07"} 1490.0
+sql_distsql_service_latency_internal_bucket{le="1.6252927e+07"} 1503.0
+sql_distsql_service_latency_internal_bucket{le="1.6777215e+07"} 1509.0
+sql_distsql_service_latency_internal_bucket{le="1.7825791e+07"} 1523.0
+sql_distsql_service_latency_internal_bucket{le="1.8874367e+07"} 1531.0
+sql_distsql_service_latency_internal_bucket{le="1.9922943e+07"} 1542.0
+sql_distsql_service_latency_internal_bucket{le="2.0971519e+07"} 1553.0
+sql_distsql_service_latency_internal_bucket{le="2.2020095e+07"} 1561.0
+sql_distsql_service_latency_internal_bucket{le="2.3068671e+07"} 1563.0
+sql_distsql_service_latency_internal_bucket{le="2.4117247e+07"} 1565.0
+sql_distsql_service_latency_internal_bucket{le="2.5165823e+07"} 1568.0
+sql_distsql_service_latency_internal_bucket{le="2.6214399e+07"} 1569.0
+sql_distsql_service_latency_internal_bucket{le="2.7262975e+07"} 1572.0
+sql_distsql_service_latency_internal_bucket{le="2.8311551e+07"} 1575.0
+sql_distsql_service_latency_internal_bucket{le="2.9360127e+07"} 1576.0
+sql_distsql_service_latency_internal_bucket{le="3.5651583e+07"} 1577.0
+sql_distsql_service_latency_internal_bucket{le="4.6137343e+07"} 1579.0
+sql_distsql_service_latency_internal_bucket{le="7.1303167e+07"} 1580.0
+sql_distsql_service_latency_internal_bucket{le="1.42606335e+08"} 1581.0
+sql_distsql_service_latency_internal_bucket{le="1.040187391e+09"} 1582.0
+sql_distsql_service_latency_internal_bucket{le="1.0200547327e+10"} 1583.0
+sql_distsql_service_latency_internal_bucket{le="+Inf"} 1583.0
+sql_distsql_service_latency_internal_sum 2.5664813521e+10
+sql_distsql_service_latency_internal_count 1583.0
+# HELP liveness_epochincrements Number of times this node has incremented its liveness epoch
+# TYPE liveness_epochincrements counter
+liveness_epochincrements 0.0
+# HELP distsender_rangelookups Number of range lookups.
+# TYPE distsender_rangelookups counter
+distsender_rangelookups 11.0
+# HELP sys_fd_softlimit Process open FD soft limit
+# TYPE sys_fd_softlimit gauge
+sys_fd_softlimit 1.048576e+06
+# HELP sys_host_disk_weightedio_time Weighted time spent reading from or writing to to all disks since this process started
+# TYPE sys_host_disk_weightedio_time gauge
+sys_host_disk_weightedio_time 5.89e+08
+# HELP sql_delete_count Number of SQL DELETE statements successfully executed
+# TYPE sql_delete_count counter
+sql_delete_count 0.0
+# HELP sql_distsql_service_latency Latency of DistSQL request execution
+# TYPE sql_distsql_service_latency histogram
+sql_distsql_service_latency_bucket{le="+Inf"} 0.0
+sql_distsql_service_latency_sum 0.0
+sql_distsql_service_latency_count 0.0
+# HELP sql_delete_started_count_internal Number of SQL DELETE statements started (internal queries)
+# TYPE sql_delete_started_count_internal counter
+sql_delete_started_count_internal 505.0
+# HELP sql_restart_savepoint_count_internal Number of `SAVEPOINT cockroach_restart` statements successfully executed (internal queries)
+# TYPE sql_restart_savepoint_count_internal counter
+sql_restart_savepoint_count_internal 0.0
+# HELP rpc_heartbeats_failed Gauge of current connections in the failed state
+# TYPE rpc_heartbeats_failed gauge
+rpc_heartbeats_failed 0.0
+# HELP rocksdb_encryption_algorithm algorithm in use for encryption-at-rest, see ccl/storageccl/engineccl/enginepbccl/key_registry.proto
+# TYPE rocksdb_encryption_algorithm gauge
+rocksdb_encryption_algorithm{store="1"} 0.0
+# HELP queue_gc_info_pushtxn Number of attempted pushes
+# TYPE queue_gc_info_pushtxn counter
+queue_gc_info_pushtxn{store="1"} 0.0
+# HELP livecount Count of live keys
+# TYPE livecount gauge
+livecount{store="1"} 116757.0
+# HELP raft_entrycache_bytes Aggregate size of all Raft entries in the Raft entry cache
+# TYPE raft_entrycache_bytes gauge
+raft_entrycache_bytes{store="1"} 115690.0
+# HELP queue_replicagc_removereplica Number of replica removals attempted by the replica gc queue
+# TYPE queue_replicagc_removereplica counter
+queue_replicagc_removereplica{store="1"} 0.0
+# HELP sys_cgo_totalbytes Total bytes of memory allocated by cgo, but not released
+# TYPE sys_cgo_totalbytes gauge
+sys_cgo_totalbytes 8.1698816e+07
+# HELP sql_conns Number of active sql connections
+# TYPE sql_conns gauge
+sql_conns 0.0
+# HELP sql_mem_conns_max Memory usage per sql statement for conns
+# TYPE sql_mem_conns_max histogram
+sql_mem_conns_max_bucket{le="+Inf"} 0.0
+sql_mem_conns_max_sum 0.0
+sql_mem_conns_max_count 0.0
+# HELP sql_delete_started_count Number of SQL DELETE statements started
+# TYPE sql_delete_started_count counter
+sql_delete_started_count 0.0
+# HELP sql_failure_count Number of statements resulting in a planning or runtime error
+# TYPE sql_failure_count counter
+sql_failure_count 0.0
+# HELP node_id node ID with labels for advertised RPC and HTTP addresses
+# TYPE node_id gauge
+node_id{advertise_addr="roach1:26257",http_addr="roach1:8080",sql_addr="roach1:26257"} 1.0
+# HELP valcount Count of all values
+# TYPE valcount gauge
+valcount{store="1"} 124081.0
+# HELP range_snapshots_generated Number of generated snapshots
+# TYPE range_snapshots_generated counter
+range_snapshots_generated{store="1"} 0.0
+# HELP sys_host_net_send_bytes Bytes sent on all network interfaces since this process started
+# TYPE sys_host_net_send_bytes gauge
+sys_host_net_send_bytes 4.61746036e+08
+# HELP sql_insert_started_count_internal Number of SQL INSERT statements started (internal queries)
+# TYPE sql_insert_started_count_internal counter
+sql_insert_started_count_internal 516.0
+# HELP sql_service_latency_internal Latency of SQL request execution (internal queries)
+# TYPE sql_service_latency_internal histogram
+sql_service_latency_internal_bucket{le="2.752511e+06"} 1.0
+sql_service_latency_internal_bucket{le="2.883583e+06"} 4.0
+sql_service_latency_internal_bucket{le="3.014655e+06"} 6.0
+sql_service_latency_internal_bucket{le="3.145727e+06"} 8.0
+sql_service_latency_internal_bucket{le="3.276799e+06"} 15.0
+sql_service_latency_internal_bucket{le="3.407871e+06"} 24.0
+sql_service_latency_internal_bucket{le="3.538943e+06"} 31.0
+sql_service_latency_internal_bucket{le="3.670015e+06"} 45.0
+sql_service_latency_internal_bucket{le="3.801087e+06"} 59.0
+sql_service_latency_internal_bucket{le="3.932159e+06"} 76.0
+sql_service_latency_internal_bucket{le="4.063231e+06"} 103.0
+sql_service_latency_internal_bucket{le="4.194303e+06"} 127.0
+sql_service_latency_internal_bucket{le="4.456447e+06"} 190.0
+sql_service_latency_internal_bucket{le="4.718591e+06"} 249.0
+sql_service_latency_internal_bucket{le="4.980735e+06"} 342.0
+sql_service_latency_internal_bucket{le="5.242879e+06"} 438.0
+sql_service_latency_internal_bucket{le="5.505023e+06"} 520.0
+sql_service_latency_internal_bucket{le="5.767167e+06"} 585.0
+sql_service_latency_internal_bucket{le="6.029311e+06"} 685.0
+sql_service_latency_internal_bucket{le="6.291455e+06"} 777.0
+sql_service_latency_internal_bucket{le="6.553599e+06"} 866.0
+sql_service_latency_internal_bucket{le="6.815743e+06"} 955.0
+sql_service_latency_internal_bucket{le="7.077887e+06"} 1036.0
+sql_service_latency_internal_bucket{le="7.340031e+06"} 1116.0
+sql_service_latency_internal_bucket{le="7.602175e+06"} 1188.0
+sql_service_latency_internal_bucket{le="7.864319e+06"} 1246.0
+sql_service_latency_internal_bucket{le="8.126463e+06"} 1310.0
+sql_service_latency_internal_bucket{le="8.388607e+06"} 1380.0
+sql_service_latency_internal_bucket{le="8.912895e+06"} 1497.0
+sql_service_latency_internal_bucket{le="9.437183e+06"} 1593.0
+sql_service_latency_internal_bucket{le="9.961471e+06"} 1686.0
+sql_service_latency_internal_bucket{le="1.0485759e+07"} 1792.0
+sql_service_latency_internal_bucket{le="1.1010047e+07"} 1865.0
+sql_service_latency_internal_bucket{le="1.1534335e+07"} 1931.0
+sql_service_latency_internal_bucket{le="1.2058623e+07"} 1998.0
+sql_service_latency_internal_bucket{le="1.2582911e+07"} 2057.0
+sql_service_latency_internal_bucket{le="1.3107199e+07"} 2116.0
+sql_service_latency_internal_bucket{le="1.3631487e+07"} 2172.0
+sql_service_latency_internal_bucket{le="1.4155775e+07"} 2228.0
+sql_service_latency_internal_bucket{le="1.4680063e+07"} 2279.0
+sql_service_latency_internal_bucket{le="1.5204351e+07"} 2315.0
+sql_service_latency_internal_bucket{le="1.5728639e+07"} 2353.0
+sql_service_latency_internal_bucket{le="1.6252927e+07"} 2386.0
+sql_service_latency_internal_bucket{le="1.6777215e+07"} 2415.0
+sql_service_latency_internal_bucket{le="1.7825791e+07"} 2465.0
+sql_service_latency_internal_bucket{le="1.8874367e+07"} 2501.0
+sql_service_latency_internal_bucket{le="1.9922943e+07"} 2525.0
+sql_service_latency_internal_bucket{le="2.0971519e+07"} 2546.0
+sql_service_latency_internal_bucket{le="2.2020095e+07"} 2563.0
+sql_service_latency_internal_bucket{le="2.3068671e+07"} 2581.0
+sql_service_latency_internal_bucket{le="2.4117247e+07"} 2592.0
+sql_service_latency_internal_bucket{le="2.5165823e+07"} 2603.0
+sql_service_latency_internal_bucket{le="2.6214399e+07"} 2614.0
+sql_service_latency_internal_bucket{le="2.7262975e+07"} 2619.0
+sql_service_latency_internal_bucket{le="2.8311551e+07"} 2625.0
+sql_service_latency_internal_bucket{le="2.9360127e+07"} 2629.0
+sql_service_latency_internal_bucket{le="3.0408703e+07"} 2632.0
+sql_service_latency_internal_bucket{le="3.5651583e+07"} 2633.0
+sql_service_latency_internal_bucket{le="3.7748735e+07"} 2634.0
+sql_service_latency_internal_bucket{le="3.9845887e+07"} 2636.0
+sql_service_latency_internal_bucket{le="4.1943039e+07"} 2639.0
+sql_service_latency_internal_bucket{le="4.4040191e+07"} 2640.0
+sql_service_latency_internal_bucket{le="4.6137343e+07"} 2644.0
+sql_service_latency_internal_bucket{le="4.8234495e+07"} 2646.0
+sql_service_latency_internal_bucket{le="5.0331647e+07"} 2647.0
+sql_service_latency_internal_bucket{le="5.2428799e+07"} 2648.0
+sql_service_latency_internal_bucket{le="7.1303167e+07"} 2649.0
+sql_service_latency_internal_bucket{le="1.25829119e+08"} 2650.0
+sql_service_latency_internal_bucket{le="1.42606335e+08"} 2651.0
+sql_service_latency_internal_bucket{le="2.18103807e+08"} 2652.0
+sql_service_latency_internal_bucket{le="2.26492415e+08"} 2653.0
+sql_service_latency_internal_bucket{le="5.20093695e+08"} 2654.0
+sql_service_latency_internal_bucket{le="1.040187391e+09"} 2655.0
+sql_service_latency_internal_bucket{le="1.0200547327e+10"} 2656.0
+sql_service_latency_internal_bucket{le="+Inf"} 2656.0
+sql_service_latency_internal_sum 3.8702937504e+10
+sql_service_latency_internal_count 2656.0
+# HELP rocksdb_block_cache_usage Bytes used by the block cache
+# TYPE rocksdb_block_cache_usage gauge
+rocksdb_block_cache_usage{store="1"} 3.9397184e+07
+# HELP compactor_compactions_failure Number of failed compaction requests sent to the storage engine
+# TYPE compactor_compactions_failure counter
+compactor_compactions_failure{store="1"} 0.0
+# HELP sql_txn_begin_count Number of SQL transaction BEGIN statements successfully executed
+# TYPE sql_txn_begin_count counter
+sql_txn_begin_count 0.0
+# HELP sql_txn_commit_started_count Number of SQL transaction COMMIT statements started
+# TYPE sql_txn_commit_started_count counter
+sql_txn_commit_started_count 0.0
+# HELP range_snapshots_learner_applied Number of applied learner snapshots
+# TYPE range_snapshots_learner_applied counter
+range_snapshots_learner_applied{store="1"} 0.0
+# HELP raft_rcvd_heartbeat Number of (coalesced, if enabled) MsgHeartbeat messages received by this store
+# TYPE raft_rcvd_heartbeat counter
+raft_rcvd_heartbeat{store="1"} 9077.0
+# HELP queue_replicate_process_failure Number of replicas which failed processing in the replicate queue
+# TYPE queue_replicate_process_failure counter
+queue_replicate_process_failure{store="1"} 0.0
+# HELP txn_restarts_writetoooldmulti Number of restarts due to multiple concurrent writers committing first
+# TYPE txn_restarts_writetoooldmulti counter
+txn_restarts_writetoooldmulti 0.0
+# HELP sql_savepoint_count Number of SQL SAVEPOINT statements successfully executed
+# TYPE sql_savepoint_count counter
+sql_savepoint_count 0.0
+# HELP sql_update_started_count_internal Number of SQL UPDATE statements started (internal queries)
+# TYPE sql_update_started_count_internal counter
+sql_update_started_count_internal 16.0
+# HELP replicas_leaseholders Number of lease holders
+# TYPE replicas_leaseholders gauge
+replicas_leaseholders{store="1"} 7.0
+# HELP rocksdb_bloom_filter_prefix_checked Number of times the bloom filter was checked
+# TYPE rocksdb_bloom_filter_prefix_checked gauge
+rocksdb_bloom_filter_prefix_checked{store="1"} 27363.0
+# HELP queue_split_purgatory Number of replicas in the split queue's purgatory, waiting to become splittable
+# TYPE queue_split_purgatory gauge
+queue_split_purgatory{store="1"} 0.0
+# HELP queue_gc_info_resolvesuccess Number of successful intent resolutions
+# TYPE queue_gc_info_resolvesuccess counter
+queue_gc_info_resolvesuccess{store="1"} 0.0
+# HELP txnrecovery_successes_aborted Number of transaction recovery attempts that aborted a transaction
+# TYPE txnrecovery_successes_aborted counter
+txnrecovery_successes_aborted{store="1"} 0.0
+# HELP changefeed_max_behind_nanos Largest commit-to-emit duration of any running feed
+# TYPE changefeed_max_behind_nanos gauge
+changefeed_max_behind_nanos 0.0
+# HELP sql_misc_started_count_internal Number of other SQL statements started (internal queries)
+# TYPE sql_misc_started_count_internal counter
+sql_misc_started_count_internal 2.0
+# HELP rocksdb_block_cache_pinned_usage Bytes pinned by the block cache
+# TYPE rocksdb_block_cache_pinned_usage gauge
+rocksdb_block_cache_pinned_usage{store="1"} 0.0
+# HELP sql_bytesout Number of sql bytes sent
+# TYPE sql_bytesout counter
+sql_bytesout 0.0
+# HELP timeseries_write_bytes Total size in bytes of metric samples written to disk
+# TYPE timeseries_write_bytes counter
+timeseries_write_bytes 8.2810041e+07
+# HELP sql_txn_latency Latency of SQL transactions
+# TYPE sql_txn_latency histogram
+sql_txn_latency_bucket{le="+Inf"} 0.0
+sql_txn_latency_sum 0.0
+sql_txn_latency_count 0.0
+# HELP sql_optimizer_fallback_count_internal Number of statements which the cost-based optimizer was unable to plan (internal queries)
+# TYPE sql_optimizer_fallback_count_internal counter
+sql_optimizer_fallback_count_internal 0.0
+# HELP raft_rcvd_dropped Number of dropped incoming Raft messages
+# TYPE raft_rcvd_dropped counter
+raft_rcvd_dropped{store="1"} 0.0
+# HELP queue_tsmaintenance_processingnanos Nanoseconds spent processing replicas in the time series maintenance queue
+# TYPE queue_tsmaintenance_processingnanos counter
+queue_tsmaintenance_processingnanos{store="1"} 0.0
+# HELP queue_gc_info_numkeysaffected Number of keys with GC'able data
+# TYPE queue_gc_info_numkeysaffected counter
+queue_gc_info_numkeysaffected{store="1"} 50.0
+# HELP distsender_batches_partial Number of partial batches processed after being divided on range boundaries
+# TYPE distsender_batches_partial counter
+distsender_batches_partial 3848.0
+# HELP queue_gc_info_abortspanconsidered Number of AbortSpan entries old enough to be considered for removal
+# TYPE queue_gc_info_abortspanconsidered counter
+queue_gc_info_abortspanconsidered{store="1"} 0.0
+# HELP tscache_skl_read_pages Number of pages in the read timestamp cache
+# TYPE tscache_skl_read_pages gauge
+tscache_skl_read_pages{store="1"} 1.0
+# HELP txnwaitqueue_query_wait_time Histogram of durations spent in queue by queries
+# TYPE txnwaitqueue_query_wait_time histogram
+txnwaitqueue_query_wait_time_bucket{store="1",le="+Inf"} 0.0
+txnwaitqueue_query_wait_time_sum{store="1"} 0.0
+txnwaitqueue_query_wait_time_count{store="1"} 0.0
+# HELP sql_select_count_internal Number of SQL SELECT statements successfully executed (internal queries)
+# TYPE sql_select_count_internal counter
+sql_select_count_internal 1607.0
+# HELP liveness_livenodes Number of live nodes in the cluster (will be 0 if this node is not itself live)
+# TYPE liveness_livenodes gauge
+liveness_livenodes 3.0
+# HELP sql_query_count Number of SQL queries executed
+# TYPE sql_query_count counter
+sql_query_count 0.0
+# HELP sql_optimizer_plan_cache_hits_internal Number of non-prepared statements for which a cached plan was used (internal queries)
+# TYPE sql_optimizer_plan_cache_hits_internal counter
+sql_optimizer_plan_cache_hits_internal 2120.0
+# HELP leases_success Number of successful lease requests
+# TYPE leases_success counter
+leases_success{store="1"} 2260.0
+# HELP capacity_used Used storage capacity
+# TYPE capacity_used gauge
+capacity_used{store="1"} 1.31897916e+08
+# HELP compactor_compactions_success Number of successful compaction requests sent to the storage engine
+# TYPE compactor_compactions_success counter
+compactor_compactions_success{store="1"} 0.0
+# HELP txn_restarts_serializable Number of restarts due to a forwarded commit timestamp and isolation=SERIALIZABLE
+# TYPE txn_restarts_serializable counter
+txn_restarts_serializable 0.0
+# HELP queue_replicate_purgatory Number of replicas in the replicate queue's purgatory, awaiting allocation options
+# TYPE queue_replicate_purgatory gauge
+queue_replicate_purgatory{store="1"} 0.0
+# HELP queue_split_process_success Number of replicas successfully processed by the split queue
+# TYPE queue_split_process_success counter
+queue_split_process_success{store="1"} 0.0
+# HELP queue_merge_process_success Number of replicas successfully processed by the merge queue
+# TYPE queue_merge_process_success counter
+queue_merge_process_success{store="1"} 0.0
+# HELP changefeed_flushes Total flushes across all feeds
+# TYPE changefeed_flushes counter
+changefeed_flushes 0.0
+# HELP changefeed_buffer_entries_out Total entries leaving the buffer between raft and changefeed sinks
+# TYPE changefeed_buffer_entries_out counter
+changefeed_buffer_entries_out 0.0
+# HELP sys_host_disk_read_bytes Bytes read from all disks since this process started
+# TYPE sys_host_disk_read_bytes gauge
+sys_host_disk_read_bytes 4.3319296e+07
+# HELP sql_mem_internal_current Current sql statement memory usage for internal
+# TYPE sql_mem_internal_current gauge
+sql_mem_internal_current 0.0
+# HELP clock_offset_stddevnanos Stddev clock offset with other nodes
+# TYPE clock_offset_stddevnanos gauge
+clock_offset_stddevnanos 210665.0
+# HELP sql_misc_count_internal Number of other SQL statements successfully executed (internal queries)
+# TYPE sql_misc_count_internal counter
+sql_misc_count_internal 2.0
+# HELP sql_optimizer_count_internal Number of statements which ran with the cost-based optimizer (internal queries)
+# TYPE sql_optimizer_count_internal counter
+sql_optimizer_count_internal 4798.0
+# HELP intentcount Count of intent keys
+# TYPE intentcount gauge
+intentcount{store="1"} 0.0
+# HELP txnwaitqueue_pusher_waiting Number of pushers on the txn wait queue
+# TYPE txnwaitqueue_pusher_waiting gauge
+txnwaitqueue_pusher_waiting{store="1"} 0.0
+# HELP txn_restarts_unknown Number of restarts due to a unknown reasons
+# TYPE txn_restarts_unknown counter
+txn_restarts_unknown 0.0
+# HELP gossip_connections_incoming Number of active incoming gossip connections
+# TYPE gossip_connections_incoming gauge
+gossip_connections_incoming 0.0
+# HELP txn_restarts_txnaborted Number of restarts due to an abort by a concurrent transaction (usually due to deadlock)
+# TYPE txn_restarts_txnaborted counter
+txn_restarts_txnaborted 0.0
+# HELP clock_offset_meannanos Mean clock offset with other nodes
+# TYPE clock_offset_meannanos gauge
+clock_offset_meannanos -14326.0
+# HELP sys_host_disk_io_time Time spent reading from or writing to all disks since this process started
+# TYPE sys_host_disk_io_time gauge
+sys_host_disk_io_time 4.75e+08
+# HELP sql_ddl_started_count Number of SQL DDL statements started
+# TYPE sql_ddl_started_count counter
+sql_ddl_started_count 0.0
+# HELP sql_misc_started_count Number of other SQL statements started
+# TYPE sql_misc_started_count counter
+sql_misc_started_count 0.0
+# HELP sql_ddl_count_internal Number of SQL DDL statements successfully executed (internal queries)
+# TYPE sql_ddl_count_internal counter
+sql_ddl_count_internal 4.0
+# HELP rpc_heartbeats_initializing Gauge of current connections in the initializing state
+# TYPE rpc_heartbeats_initializing gauge
+rpc_heartbeats_initializing 0.0
+# HELP lastupdatenanos Timestamp at which bytes/keys/intents metrics were last updated
+# TYPE lastupdatenanos gauge
+lastupdatenanos{store="1"} 5.937496135985266e+18
+# HELP sql_mem_admin_session_current Current sql session memory usage for admin
+# TYPE sql_mem_admin_session_current gauge
+sql_mem_admin_session_current 0.0
+# HELP sql_distsql_queries_total Number of distributed SQL queries executed
+# TYPE sql_distsql_queries_total counter
+sql_distsql_queries_total 2660.0
+# HELP sql_optimizer_fallback_count Number of statements which the cost-based optimizer was unable to plan
+# TYPE sql_optimizer_fallback_count counter
+sql_optimizer_fallback_count 0.0
+# HELP replicas_quiescent Number of quiesced replicas
+# TYPE replicas_quiescent gauge
+replicas_quiescent{store="1"} 34.0
+# HELP rocksdb_compactions Number of table compactions
+# TYPE rocksdb_compactions gauge
+rocksdb_compactions{store="1"} 7.0
+# HELP raft_rcvd_app Number of MsgApp messages received by this store
+# TYPE raft_rcvd_app counter
+raft_rcvd_app{store="1"} 62111.0
+# HELP queue_gc_pending Number of pending replicas in the GC queue
+# TYPE queue_gc_pending gauge
+queue_gc_pending{store="1"} 0.0
+# HELP sql_mem_internal_session_max Memory usage per sql session for internal
+# TYPE sql_mem_internal_session_max histogram
+sql_mem_internal_session_max_bucket{le="4011.0"} 2123.0
+sql_mem_internal_session_max_bucket{le="4487.0"} 2142.0
+sql_mem_internal_session_max_bucket{le="+Inf"} 2142.0
+sql_mem_internal_session_max_sum 8.600606e+06
+sql_mem_internal_session_max_count 2142.0
+# HELP sql_mem_conns_session_current Current sql session memory usage for conns
+# TYPE sql_mem_conns_session_current gauge
+sql_mem_conns_session_current 0.0
+# HELP valbytes Number of bytes taken up by values
+# TYPE valbytes gauge
+valbytes{store="1"} 7.5527718e+07
+# HELP range_raftleadertransfers Number of raft leader transfers
+# TYPE range_raftleadertransfers counter
+range_raftleadertransfers{store="1"} 5.0
+# HELP gossip_infos_received Number of received gossip Info objects
+# TYPE gossip_infos_received counter
+gossip_infos_received 8.0
+# HELP sql_restart_savepoint_release_started_count_internal Number of `RELEASE SAVEPOINT cockroach_restart` statements started (internal queries)
+# TYPE sql_restart_savepoint_release_started_count_internal counter
+sql_restart_savepoint_release_started_count_internal 0.0
+# HELP sql_distsql_exec_latency_internal Latency of DistSQL statement execution (internal queries)
+# TYPE sql_distsql_exec_latency_internal histogram
+sql_distsql_exec_latency_internal_bucket{le="245759.0"} 3.0
+sql_distsql_exec_latency_internal_bucket{le="262143.0"} 8.0
+sql_distsql_exec_latency_internal_bucket{le="278527.0"} 23.0
+sql_distsql_exec_latency_internal_bucket{le="294911.0"} 53.0
+sql_distsql_exec_latency_internal_bucket{le="311295.0"} 68.0
+sql_distsql_exec_latency_internal_bucket{le="327679.0"} 89.0
+sql_distsql_exec_latency_internal_bucket{le="344063.0"} 120.0
+sql_distsql_exec_latency_internal_bucket{le="360447.0"} 149.0
+sql_distsql_exec_latency_internal_bucket{le="376831.0"} 181.0
+sql_distsql_exec_latency_internal_bucket{le="393215.0"} 223.0
+sql_distsql_exec_latency_internal_bucket{le="409599.0"} 250.0
+sql_distsql_exec_latency_internal_bucket{le="425983.0"} 266.0
+sql_distsql_exec_latency_internal_bucket{le="442367.0"} 287.0
+sql_distsql_exec_latency_internal_bucket{le="458751.0"} 304.0
+sql_distsql_exec_latency_internal_bucket{le="475135.0"} 318.0
+sql_distsql_exec_latency_internal_bucket{le="491519.0"} 329.0
+sql_distsql_exec_latency_internal_bucket{le="507903.0"} 340.0
+sql_distsql_exec_latency_internal_bucket{le="524287.0"} 347.0
+sql_distsql_exec_latency_internal_bucket{le="557055.0"} 358.0
+sql_distsql_exec_latency_internal_bucket{le="589823.0"} 369.0
+sql_distsql_exec_latency_internal_bucket{le="622591.0"} 378.0
+sql_distsql_exec_latency_internal_bucket{le="655359.0"} 383.0
+sql_distsql_exec_latency_internal_bucket{le="688127.0"} 389.0
+sql_distsql_exec_latency_internal_bucket{le="720895.0"} 394.0
+sql_distsql_exec_latency_internal_bucket{le="753663.0"} 397.0
+sql_distsql_exec_latency_internal_bucket{le="786431.0"} 402.0
+sql_distsql_exec_latency_internal_bucket{le="819199.0"} 405.0
+sql_distsql_exec_latency_internal_bucket{le="884735.0"} 408.0
+sql_distsql_exec_latency_internal_bucket{le="917503.0"} 409.0
+sql_distsql_exec_latency_internal_bucket{le="950271.0"} 411.0
+sql_distsql_exec_latency_internal_bucket{le="983039.0"} 412.0
+sql_distsql_exec_latency_internal_bucket{le="1.048575e+06"} 413.0
+sql_distsql_exec_latency_internal_bucket{le="1.114111e+06"} 416.0
+sql_distsql_exec_latency_internal_bucket{le="1.245183e+06"} 419.0
+sql_distsql_exec_latency_internal_bucket{le="1.310719e+06"} 420.0
+sql_distsql_exec_latency_internal_bucket{le="1.441791e+06"} 421.0
+sql_distsql_exec_latency_internal_bucket{le="1.507327e+06"} 422.0
+sql_distsql_exec_latency_internal_bucket{le="1.572863e+06"} 426.0
+sql_distsql_exec_latency_internal_bucket{le="1.638399e+06"} 427.0
+sql_distsql_exec_latency_internal_bucket{le="1.703935e+06"} 429.0
+sql_distsql_exec_latency_internal_bucket{le="1.769471e+06"} 439.0
+sql_distsql_exec_latency_internal_bucket{le="1.835007e+06"} 442.0
+sql_distsql_exec_latency_internal_bucket{le="1.900543e+06"} 460.0
+sql_distsql_exec_latency_internal_bucket{le="1.966079e+06"} 484.0
+sql_distsql_exec_latency_internal_bucket{le="2.031615e+06"} 510.0
+sql_distsql_exec_latency_internal_bucket{le="2.097151e+06"} 550.0
+sql_distsql_exec_latency_internal_bucket{le="2.228223e+06"} 612.0
+sql_distsql_exec_latency_internal_bucket{le="2.359295e+06"} 688.0
+sql_distsql_exec_latency_internal_bucket{le="2.490367e+06"} 766.0
+sql_distsql_exec_latency_internal_bucket{le="2.621439e+06"} 845.0
+sql_distsql_exec_latency_internal_bucket{le="2.752511e+06"} 913.0
+sql_distsql_exec_latency_internal_bucket{le="2.883583e+06"} 967.0
+sql_distsql_exec_latency_internal_bucket{le="3.014655e+06"} 1022.0
+sql_distsql_exec_latency_internal_bucket{le="3.145727e+06"} 1070.0
+sql_distsql_exec_latency_internal_bucket{le="3.276799e+06"} 1108.0
+sql_distsql_exec_latency_internal_bucket{le="3.407871e+06"} 1144.0
+sql_distsql_exec_latency_internal_bucket{le="3.538943e+06"} 1171.0
+sql_distsql_exec_latency_internal_bucket{le="3.670015e+06"} 1207.0
+sql_distsql_exec_latency_internal_bucket{le="3.801087e+06"} 1238.0
+sql_distsql_exec_latency_internal_bucket{le="3.932159e+06"} 1267.0
+sql_distsql_exec_latency_internal_bucket{le="4.063231e+06"} 1292.0
+sql_distsql_exec_latency_internal_bucket{le="4.194303e+06"} 1328.0
+sql_distsql_exec_latency_internal_bucket{le="4.456447e+06"} 1373.0
+sql_distsql_exec_latency_internal_bucket{le="4.718591e+06"} 1410.0
+sql_distsql_exec_latency_internal_bucket{le="4.980735e+06"} 1434.0
+sql_distsql_exec_latency_internal_bucket{le="5.242879e+06"} 1463.0
+sql_distsql_exec_latency_internal_bucket{le="5.505023e+06"} 1479.0
+sql_distsql_exec_latency_internal_bucket{le="5.767167e+06"} 1489.0
+sql_distsql_exec_latency_internal_bucket{le="6.029311e+06"} 1498.0
+sql_distsql_exec_latency_internal_bucket{le="6.291455e+06"} 1509.0
+sql_distsql_exec_latency_internal_bucket{le="6.553599e+06"} 1523.0
+sql_distsql_exec_latency_internal_bucket{le="6.815743e+06"} 1531.0
+sql_distsql_exec_latency_internal_bucket{le="7.077887e+06"} 1540.0
+sql_distsql_exec_latency_internal_bucket{le="7.340031e+06"} 1545.0
+sql_distsql_exec_latency_internal_bucket{le="7.602175e+06"} 1551.0
+sql_distsql_exec_latency_internal_bucket{le="7.864319e+06"} 1554.0
+sql_distsql_exec_latency_internal_bucket{le="8.126463e+06"} 1555.0
+sql_distsql_exec_latency_internal_bucket{le="8.388607e+06"} 1556.0
+sql_distsql_exec_latency_internal_bucket{le="8.912895e+06"} 1562.0
+sql_distsql_exec_latency_internal_bucket{le="9.437183e+06"} 1565.0
+sql_distsql_exec_latency_internal_bucket{le="9.961471e+06"} 1568.0
+sql_distsql_exec_latency_internal_bucket{le="1.0485759e+07"} 1571.0
+sql_distsql_exec_latency_internal_bucket{le="1.1534335e+07"} 1574.0
+sql_distsql_exec_latency_internal_bucket{le="1.2058623e+07"} 1575.0
+sql_distsql_exec_latency_internal_bucket{le="1.2582911e+07"} 1576.0
+sql_distsql_exec_latency_internal_bucket{le="1.8874367e+07"} 1578.0
+sql_distsql_exec_latency_internal_bucket{le="2.2020095e+07"} 1580.0
+sql_distsql_exec_latency_internal_bucket{le="3.5651583e+07"} 1581.0
+sql_distsql_exec_latency_internal_bucket{le="1.30023423e+08"} 1582.0
+sql_distsql_exec_latency_internal_bucket{le="1.0200547327e+10"} 1583.0
+sql_distsql_exec_latency_internal_bucket{le="+Inf"} 1583.0
+sql_distsql_exec_latency_internal_sum 1.4678473169e+10
+sql_distsql_exec_latency_internal_count 1583.0
+# HELP replicas_leaders_not_leaseholders Number of replicas that are Raft leaders whose range lease is held by another store
+# TYPE replicas_leaders_not_leaseholders gauge
+replicas_leaders_not_leaseholders{store="1"} 0.0
+# HELP capacity_reserved Capacity reserved for snapshots
+# TYPE capacity_reserved gauge
+capacity_reserved{store="1"} 0.0
+# HELP queue_merge_process_failure Number of replicas which failed processing in the merge queue
+# TYPE queue_merge_process_failure counter
+queue_merge_process_failure{store="1"} 0.0
+# HELP queue_raftlog_processingnanos Nanoseconds spent processing replicas in the Raft log queue
+# TYPE queue_raftlog_processingnanos counter
+queue_raftlog_processingnanos{store="1"} 9.05864517e+08
+# HELP queue_replicagc_process_failure Number of replicas which failed processing in the replica GC queue
+# TYPE queue_replicagc_process_failure counter
+queue_replicagc_process_failure{store="1"} 0.0
+# HELP sys_uptime Process uptime
+# TYPE sys_uptime gauge
+sys_uptime 12224.0
+# HELP tscache_skl_write_rotations Number of page rotations in the write timestamp cache
+# TYPE tscache_skl_write_rotations counter
+tscache_skl_write_rotations{store="1"} 0.0
+# HELP queue_replicate_removereplica Number of replica removals attempted by the replicate queue (typically in response to a rebalancer-initiated addition)
+# TYPE queue_replicate_removereplica counter
+queue_replicate_removereplica{store="1"} 0.0
+# HELP queue_split_process_failure Number of replicas which failed processing in the split queue
+# TYPE queue_split_process_failure counter
+queue_split_process_failure{store="1"} 0.0
+# HELP rocksdb_block_cache_hits Count of block cache hits
+# TYPE rocksdb_block_cache_hits gauge
+rocksdb_block_cache_hits{store="1"} 94825.0
+# HELP raft_rcvd_heartbeatresp Number of (coalesced, if enabled) MsgHeartbeatResp messages received by this store
+# TYPE raft_rcvd_heartbeatresp counter
+raft_rcvd_heartbeatresp{store="1"} 2091.0
+# HELP queue_replicagc_pending Number of pending replicas in the replica GC queue
+# TYPE queue_replicagc_pending gauge
+queue_replicagc_pending{store="1"} 0.0
+# HELP queue_tsmaintenance_process_failure Number of replicas which failed processing in the time series maintenance queue
+# TYPE queue_tsmaintenance_process_failure counter
+queue_tsmaintenance_process_failure{store="1"} 0.0
+# HELP intentresolver_async_throttled Number of intent resolution attempts not run asynchronously due to throttling
+# TYPE intentresolver_async_throttled counter
+intentresolver_async_throttled{store="1"} 0.0
+# HELP sql_txn_rollback_started_count_internal Number of SQL transaction ROLLBACK statements started (internal queries)
+# TYPE sql_txn_rollback_started_count_internal counter
+sql_txn_rollback_started_count_internal 0.0
+# HELP intentbytes Number of bytes in intent KV pairs
+# TYPE intentbytes gauge
+intentbytes{store="1"} 0.0
+# HELP rocksdb_memtable_total_size Current size of memtable in bytes
+# TYPE rocksdb_memtable_total_size gauge
+rocksdb_memtable_total_size{store="1"} 1.4375272e+07
+# HELP txnrecovery_successes_pending Number of transaction recovery attempts that left a transaction pending
+# TYPE txnrecovery_successes_pending counter
+txnrecovery_successes_pending{store="1"} 0.0
+# HELP txn_durations KV transaction durations
+# TYPE txn_durations histogram
+txn_durations_bucket{le="950271.0"} 1.0
+txn_durations_bucket{le="1.015807e+06"} 2.0
+txn_durations_bucket{le="1.114111e+06"} 5.0
+txn_durations_bucket{le="1.245183e+06"} 10.0
+txn_durations_bucket{le="1.310719e+06"} 19.0
+txn_durations_bucket{le="1.376255e+06"} 26.0
+txn_durations_bucket{le="1.441791e+06"} 37.0
+txn_durations_bucket{le="1.507327e+06"} 62.0
+txn_durations_bucket{le="1.572863e+06"} 99.0
+txn_durations_bucket{le="1.638399e+06"} 146.0
+txn_durations_bucket{le="1.703935e+06"} 200.0
+txn_durations_bucket{le="1.769471e+06"} 270.0
+txn_durations_bucket{le="1.835007e+06"} 356.0
+txn_durations_bucket{le="1.900543e+06"} 441.0
+txn_durations_bucket{le="1.966079e+06"} 549.0
+txn_durations_bucket{le="2.031615e+06"} 672.0
+txn_durations_bucket{le="2.097151e+06"} 785.0
+txn_durations_bucket{le="2.228223e+06"} 993.0
+txn_durations_bucket{le="2.359295e+06"} 1210.0
+txn_durations_bucket{le="2.490367e+06"} 1430.0
+txn_durations_bucket{le="2.621439e+06"} 1627.0
+txn_durations_bucket{le="2.752511e+06"} 1852.0
+txn_durations_bucket{le="2.883583e+06"} 2073.0
+txn_durations_bucket{le="3.014655e+06"} 2318.0
+txn_durations_bucket{le="3.145727e+06"} 2541.0
+txn_durations_bucket{le="3.276799e+06"} 2796.0
+txn_durations_bucket{le="3.407871e+06"} 3039.0
+txn_durations_bucket{le="3.538943e+06"} 3283.0
+txn_durations_bucket{le="3.670015e+06"} 3508.0
+txn_durations_bucket{le="3.801087e+06"} 3731.0
+txn_durations_bucket{le="3.932159e+06"} 3942.0
+txn_durations_bucket{le="4.063231e+06"} 4114.0
+txn_durations_bucket{le="4.194303e+06"} 4281.0
+txn_durations_bucket{le="4.456447e+06"} 4572.0
+txn_durations_bucket{le="4.718591e+06"} 4809.0
+txn_durations_bucket{le="4.980735e+06"} 5010.0
+txn_durations_bucket{le="5.242879e+06"} 5187.0
+txn_durations_bucket{le="5.505023e+06"} 5351.0
+txn_durations_bucket{le="5.767167e+06"} 5492.0
+txn_durations_bucket{le="6.029311e+06"} 5627.0
+txn_durations_bucket{le="6.291455e+06"} 5743.0
+txn_durations_bucket{le="6.553599e+06"} 5858.0
+txn_durations_bucket{le="6.815743e+06"} 5975.0
+txn_durations_bucket{le="7.077887e+06"} 6082.0
+txn_durations_bucket{le="7.340031e+06"} 6167.0
+txn_durations_bucket{le="7.602175e+06"} 6242.0
+txn_durations_bucket{le="7.864319e+06"} 6304.0
+txn_durations_bucket{le="8.126463e+06"} 6356.0
+txn_durations_bucket{le="8.388607e+06"} 6399.0
+txn_durations_bucket{le="8.912895e+06"} 6499.0
+txn_durations_bucket{le="9.437183e+06"} 6572.0
+txn_durations_bucket{le="9.961471e+06"} 6658.0
+txn_durations_bucket{le="1.0485759e+07"} 6714.0
+txn_durations_bucket{le="1.1010047e+07"} 6785.0
+txn_durations_bucket{le="1.1534335e+07"} 6847.0
+txn_durations_bucket{le="1.2058623e+07"} 6899.0
+txn_durations_bucket{le="1.2582911e+07"} 6945.0
+txn_durations_bucket{le="1.3107199e+07"} 7001.0
+txn_durations_bucket{le="1.3631487e+07"} 7053.0
+txn_durations_bucket{le="1.4155775e+07"} 7109.0
+txn_durations_bucket{le="1.4680063e+07"} 7159.0
+txn_durations_bucket{le="1.5204351e+07"} 7183.0
+txn_durations_bucket{le="1.5728639e+07"} 7210.0
+txn_durations_bucket{le="1.6252927e+07"} 7239.0
+txn_durations_bucket{le="1.6777215e+07"} 7263.0
+txn_durations_bucket{le="1.7825791e+07"} 7302.0
+txn_durations_bucket{le="1.8874367e+07"} 7332.0
+txn_durations_bucket{le="1.9922943e+07"} 7357.0
+txn_durations_bucket{le="2.0971519e+07"} 7370.0
+txn_durations_bucket{le="2.2020095e+07"} 7389.0
+txn_durations_bucket{le="2.3068671e+07"} 7398.0
+txn_durations_bucket{le="2.4117247e+07"} 7409.0
+txn_durations_bucket{le="2.5165823e+07"} 7416.0
+txn_durations_bucket{le="2.6214399e+07"} 7423.0
+txn_durations_bucket{le="2.7262975e+07"} 7424.0
+txn_durations_bucket{le="2.8311551e+07"} 7430.0
+txn_durations_bucket{le="2.9360127e+07"} 7432.0
+txn_durations_bucket{le="3.0408703e+07"} 7435.0
+txn_durations_bucket{le="3.2505855e+07"} 7439.0
+txn_durations_bucket{le="3.3554431e+07"} 7440.0
+txn_durations_bucket{le="3.7748735e+07"} 7443.0
+txn_durations_bucket{le="3.9845887e+07"} 7447.0
+txn_durations_bucket{le="4.1943039e+07"} 7450.0
+txn_durations_bucket{le="4.6137343e+07"} 7452.0
+txn_durations_bucket{le="1.00663295e+08"} 7453.0
+txn_durations_bucket{le="1.04857599e+08"} 7454.0
+txn_durations_bucket{le="1.09051903e+08"} 7455.0
+txn_durations_bucket{le="1.17440511e+08"} 7456.0
+txn_durations_bucket{le="1.25829119e+08"} 7457.0
+txn_durations_bucket{le="1.34217727e+08"} 7458.0
+txn_durations_bucket{le="2.18103807e+08"} 7459.0
+txn_durations_bucket{le="2.26492415e+08"} 7460.0
+txn_durations_bucket{le="5.20093695e+08"} 7461.0
+txn_durations_bucket{le="9.05969663e+08"} 7462.0
+txn_durations_bucket{le="1.006632959e+09"} 7463.0
+txn_durations_bucket{le="1.040187391e+09"} 7464.0
+txn_durations_bucket{le="4.563402751e+09"} 7465.0
+txn_durations_bucket{le="+Inf"} 7465.0
+txn_durations_sum 4.8816906967e+10
+txn_durations_count 7465.0
+# HELP sys_go_allocbytes Current bytes of memory allocated by go
+# TYPE sys_go_allocbytes gauge
+sys_go_allocbytes 1.06576224e+08
+# HELP sys_host_net_recv_bytes Bytes received on all network interfaces since this process started
+# TYPE sys_host_net_recv_bytes gauge
+sys_host_net_recv_bytes 2.34392325e+08
+# HELP raft_process_logcommit_latency Latency histogram for committing Raft log entries
+# TYPE raft_process_logcommit_latency histogram
+raft_process_logcommit_latency_bucket{store="1",le="229375.0"} 1.0
+raft_process_logcommit_latency_bucket{store="1",le="237567.0"} 3.0
+raft_process_logcommit_latency_bucket{store="1",le="245759.0"} 4.0
+raft_process_logcommit_latency_bucket{store="1",le="253951.0"} 6.0
+raft_process_logcommit_latency_bucket{store="1",le="262143.0"} 12.0
+raft_process_logcommit_latency_bucket{store="1",le="278527.0"} 19.0
+raft_process_logcommit_latency_bucket{store="1",le="294911.0"} 53.0
+raft_process_logcommit_latency_bucket{store="1",le="311295.0"} 106.0
+raft_process_logcommit_latency_bucket{store="1",le="327679.0"} 196.0
+raft_process_logcommit_latency_bucket{store="1",le="344063.0"} 323.0
+raft_process_logcommit_latency_bucket{store="1",le="360447.0"} 500.0
+raft_process_logcommit_latency_bucket{store="1",le="376831.0"} 713.0
+raft_process_logcommit_latency_bucket{store="1",le="393215.0"} 997.0
+raft_process_logcommit_latency_bucket{store="1",le="409599.0"} 1362.0
+raft_process_logcommit_latency_bucket{store="1",le="425983.0"} 1800.0
+raft_process_logcommit_latency_bucket{store="1",le="442367.0"} 2314.0
+raft_process_logcommit_latency_bucket{store="1",le="458751.0"} 2818.0
+raft_process_logcommit_latency_bucket{store="1",le="475135.0"} 3404.0
+raft_process_logcommit_latency_bucket{store="1",le="491519.0"} 4003.0
+raft_process_logcommit_latency_bucket{store="1",le="507903.0"} 4687.0
+raft_process_logcommit_latency_bucket{store="1",le="524287.0"} 5361.0
+raft_process_logcommit_latency_bucket{store="1",le="557055.0"} 6875.0
+raft_process_logcommit_latency_bucket{store="1",le="589823.0"} 8409.0
+raft_process_logcommit_latency_bucket{store="1",le="622591.0"} 10050.0
+raft_process_logcommit_latency_bucket{store="1",le="655359.0"} 11694.0
+raft_process_logcommit_latency_bucket{store="1",le="688127.0"} 13332.0
+raft_process_logcommit_latency_bucket{store="1",le="720895.0"} 15073.0
+raft_process_logcommit_latency_bucket{store="1",le="753663.0"} 16774.0
+raft_process_logcommit_latency_bucket{store="1",le="786431.0"} 18420.0
+raft_process_logcommit_latency_bucket{store="1",le="819199.0"} 19982.0
+raft_process_logcommit_latency_bucket{store="1",le="851967.0"} 21514.0
+raft_process_logcommit_latency_bucket{store="1",le="884735.0"} 22990.0
+raft_process_logcommit_latency_bucket{store="1",le="917503.0"} 24326.0
+raft_process_logcommit_latency_bucket{store="1",le="950271.0"} 25560.0
+raft_process_logcommit_latency_bucket{store="1",le="983039.0"} 26706.0
+raft_process_logcommit_latency_bucket{store="1",le="1.015807e+06"} 27822.0
+raft_process_logcommit_latency_bucket{store="1",le="1.048575e+06"} 28770.0
+raft_process_logcommit_latency_bucket{store="1",le="1.114111e+06"} 30476.0
+raft_process_logcommit_latency_bucket{store="1",le="1.179647e+06"} 31927.0
+raft_process_logcommit_latency_bucket{store="1",le="1.245183e+06"} 33126.0
+raft_process_logcommit_latency_bucket{store="1",le="1.310719e+06"} 34230.0
+raft_process_logcommit_latency_bucket{store="1",le="1.376255e+06"} 35235.0
+raft_process_logcommit_latency_bucket{store="1",le="1.441791e+06"} 36152.0
+raft_process_logcommit_latency_bucket{store="1",le="1.507327e+06"} 36975.0
+raft_process_logcommit_latency_bucket{store="1",le="1.572863e+06"} 37751.0
+raft_process_logcommit_latency_bucket{store="1",le="1.638399e+06"} 38508.0
+raft_process_logcommit_latency_bucket{store="1",le="1.703935e+06"} 39195.0
+raft_process_logcommit_latency_bucket{store="1",le="1.769471e+06"} 39851.0
+raft_process_logcommit_latency_bucket{store="1",le="1.835007e+06"} 40441.0
+raft_process_logcommit_latency_bucket{store="1",le="1.900543e+06"} 40948.0
+raft_process_logcommit_latency_bucket{store="1",le="1.966079e+06"} 41384.0
+raft_process_logcommit_latency_bucket{store="1",le="2.031615e+06"} 41782.0
+raft_process_logcommit_latency_bucket{store="1",le="2.097151e+06"} 42108.0
+raft_process_logcommit_latency_bucket{store="1",le="2.228223e+06"} 42671.0
+raft_process_logcommit_latency_bucket{store="1",le="2.359295e+06"} 43132.0
+raft_process_logcommit_latency_bucket{store="1",le="2.490367e+06"} 43510.0
+raft_process_logcommit_latency_bucket{store="1",le="2.621439e+06"} 43807.0
+raft_process_logcommit_latency_bucket{store="1",le="2.752511e+06"} 44049.0
+raft_process_logcommit_latency_bucket{store="1",le="2.883583e+06"} 44270.0
+raft_process_logcommit_latency_bucket{store="1",le="3.014655e+06"} 44426.0
+raft_process_logcommit_latency_bucket{store="1",le="3.145727e+06"} 44569.0
+raft_process_logcommit_latency_bucket{store="1",le="3.276799e+06"} 44689.0
+raft_process_logcommit_latency_bucket{store="1",le="3.407871e+06"} 44794.0
+raft_process_logcommit_latency_bucket{store="1",le="3.538943e+06"} 44902.0
+raft_process_logcommit_latency_bucket{store="1",le="3.670015e+06"} 44988.0
+raft_process_logcommit_latency_bucket{store="1",le="3.801087e+06"} 45072.0
+raft_process_logcommit_latency_bucket{store="1",le="3.932159e+06"} 45158.0
+raft_process_logcommit_latency_bucket{store="1",le="4.063231e+06"} 45226.0
+raft_process_logcommit_latency_bucket{store="1",le="4.194303e+06"} 45274.0
+raft_process_logcommit_latency_bucket{store="1",le="4.456447e+06"} 45392.0
+raft_process_logcommit_latency_bucket{store="1",le="4.718591e+06"} 45477.0
+raft_process_logcommit_latency_bucket{store="1",le="4.980735e+06"} 45555.0
+raft_process_logcommit_latency_bucket{store="1",le="5.242879e+06"} 45619.0
+raft_process_logcommit_latency_bucket{store="1",le="5.505023e+06"} 45684.0
+raft_process_logcommit_latency_bucket{store="1",le="5.767167e+06"} 45723.0
+raft_process_logcommit_latency_bucket{store="1",le="6.029311e+06"} 45779.0
+raft_process_logcommit_latency_bucket{store="1",le="6.291455e+06"} 45817.0
+raft_process_logcommit_latency_bucket{store="1",le="6.553599e+06"} 45840.0
+raft_process_logcommit_latency_bucket{store="1",le="6.815743e+06"} 45875.0
+raft_process_logcommit_latency_bucket{store="1",le="7.077887e+06"} 45904.0
+raft_process_logcommit_latency_bucket{store="1",le="7.340031e+06"} 45919.0
+raft_process_logcommit_latency_bucket{store="1",le="7.602175e+06"} 45944.0
+raft_process_logcommit_latency_bucket{store="1",le="7.864319e+06"} 45962.0
+raft_process_logcommit_latency_bucket{store="1",le="8.126463e+06"} 45972.0
+raft_process_logcommit_latency_bucket{store="1",le="8.388607e+06"} 45980.0
+raft_process_logcommit_latency_bucket{store="1",le="8.912895e+06"} 46004.0
+raft_process_logcommit_latency_bucket{store="1",le="9.437183e+06"} 46018.0
+raft_process_logcommit_latency_bucket{store="1",le="9.961471e+06"} 46029.0
+raft_process_logcommit_latency_bucket{store="1",le="1.0485759e+07"} 46038.0
+raft_process_logcommit_latency_bucket{store="1",le="1.1010047e+07"} 46044.0
+raft_process_logcommit_latency_bucket{store="1",le="1.1534335e+07"} 46049.0
+raft_process_logcommit_latency_bucket{store="1",le="1.2058623e+07"} 46058.0
+raft_process_logcommit_latency_bucket{store="1",le="1.2582911e+07"} 46060.0
+raft_process_logcommit_latency_bucket{store="1",le="1.3107199e+07"} 46066.0
+raft_process_logcommit_latency_bucket{store="1",le="1.3631487e+07"} 46068.0
+raft_process_logcommit_latency_bucket{store="1",le="1.4155775e+07"} 46070.0
+raft_process_logcommit_latency_bucket{store="1",le="1.4680063e+07"} 46071.0
+raft_process_logcommit_latency_bucket{store="1",le="1.5204351e+07"} 46072.0
+raft_process_logcommit_latency_bucket{store="1",le="1.5728639e+07"} 46073.0
+raft_process_logcommit_latency_bucket{store="1",le="1.6252927e+07"} 46076.0
+raft_process_logcommit_latency_bucket{store="1",le="1.7825791e+07"} 46079.0
+raft_process_logcommit_latency_bucket{store="1",le="1.8874367e+07"} 46083.0
+raft_process_logcommit_latency_bucket{store="1",le="1.9922943e+07"} 46084.0
+raft_process_logcommit_latency_bucket{store="1",le="2.0971519e+07"} 46086.0
+raft_process_logcommit_latency_bucket{store="1",le="2.2020095e+07"} 46087.0
+raft_process_logcommit_latency_bucket{store="1",le="2.4117247e+07"} 46088.0
+raft_process_logcommit_latency_bucket{store="1",le="2.5165823e+07"} 46089.0
+raft_process_logcommit_latency_bucket{store="1",le="3.0408703e+07"} 46090.0
+raft_process_logcommit_latency_bucket{store="1",le="3.1457279e+07"} 46091.0
+raft_process_logcommit_latency_bucket{store="1",le="3.7748735e+07"} 46093.0
+raft_process_logcommit_latency_bucket{store="1",le="4.1943039e+07"} 46094.0
+raft_process_logcommit_latency_bucket{store="1",le="4.6137343e+07"} 46095.0
+raft_process_logcommit_latency_bucket{store="1",le="4.8234495e+07"} 46096.0
+raft_process_logcommit_latency_bucket{store="1",le="5.0331647e+07"} 46097.0
+raft_process_logcommit_latency_bucket{store="1",le="7.5497471e+07"} 46098.0
+raft_process_logcommit_latency_bucket{store="1",le="2.09715199e+08"} 46099.0
+raft_process_logcommit_latency_bucket{store="1",le="2.18103807e+08"} 46101.0
+raft_process_logcommit_latency_bucket{store="1",le="4.19430399e+08"} 46102.0
+raft_process_logcommit_latency_bucket{store="1",le="6.37534207e+08"} 46103.0
+raft_process_logcommit_latency_bucket{store="1",le="9.05969663e+08"} 46104.0
+raft_process_logcommit_latency_bucket{store="1",le="9.73078527e+08"} 46105.0
+raft_process_logcommit_latency_bucket{store="1",le="1.006632959e+09"} 46106.0
+raft_process_logcommit_latency_bucket{store="1",le="1.040187391e+09"} 46108.0
+raft_process_logcommit_latency_bucket{store="1",le="1.0200547327e+10"} 46110.0
+raft_process_logcommit_latency_bucket{store="1",le="+Inf"} 46110.0
+raft_process_logcommit_latency_sum{store="1"} 8.2096278498e+10
+raft_process_logcommit_latency_count{store="1"} 46110.0
+# HELP queue_consistency_process_success Number of replicas successfully processed by the consistency checker queue
+# TYPE queue_consistency_process_success counter
+queue_consistency_process_success{store="1"} 7.0
+# HELP distsender_batches_async_throttled Number of partial batches not sent asynchronously due to throttling
+# TYPE distsender_batches_async_throttled counter
+distsender_batches_async_throttled 0.0
+# HELP liveness_heartbeatlatency Node liveness heartbeat latency
+# TYPE liveness_heartbeatlatency histogram
+liveness_heartbeatlatency_bucket{le="1.966079e+06"} 2.0
+liveness_heartbeatlatency_bucket{le="2.228223e+06"} 11.0
+liveness_heartbeatlatency_bucket{le="2.359295e+06"} 20.0
+liveness_heartbeatlatency_bucket{le="2.490367e+06"} 48.0
+liveness_heartbeatlatency_bucket{le="2.621439e+06"} 94.0
+liveness_heartbeatlatency_bucket{le="2.752511e+06"} 156.0
+liveness_heartbeatlatency_bucket{le="2.883583e+06"} 250.0
+liveness_heartbeatlatency_bucket{le="3.014655e+06"} 372.0
+liveness_heartbeatlatency_bucket{le="3.145727e+06"} 513.0
+liveness_heartbeatlatency_bucket{le="3.276799e+06"} 653.0
+liveness_heartbeatlatency_bucket{le="3.407871e+06"} 797.0
+liveness_heartbeatlatency_bucket{le="3.538943e+06"} 949.0
+liveness_heartbeatlatency_bucket{le="3.670015e+06"} 1110.0
+liveness_heartbeatlatency_bucket{le="3.801087e+06"} 1264.0
+liveness_heartbeatlatency_bucket{le="3.932159e+06"} 1399.0
+liveness_heartbeatlatency_bucket{le="4.063231e+06"} 1537.0
+liveness_heartbeatlatency_bucket{le="4.194303e+06"} 1648.0
+liveness_heartbeatlatency_bucket{le="4.456447e+06"} 1822.0
+liveness_heartbeatlatency_bucket{le="4.718591e+06"} 1987.0
+liveness_heartbeatlatency_bucket{le="4.980735e+06"} 2096.0
+liveness_heartbeatlatency_bucket{le="5.242879e+06"} 2191.0
+liveness_heartbeatlatency_bucket{le="5.505023e+06"} 2277.0
+liveness_heartbeatlatency_bucket{le="5.767167e+06"} 2330.0
+liveness_heartbeatlatency_bucket{le="6.029311e+06"} 2383.0
+liveness_heartbeatlatency_bucket{le="6.291455e+06"} 2436.0
+liveness_heartbeatlatency_bucket{le="6.553599e+06"} 2479.0
+liveness_heartbeatlatency_bucket{le="6.815743e+06"} 2519.0
+liveness_heartbeatlatency_bucket{le="7.077887e+06"} 2559.0
+liveness_heartbeatlatency_bucket{le="7.340031e+06"} 2596.0
+liveness_heartbeatlatency_bucket{le="7.602175e+06"} 2616.0
+liveness_heartbeatlatency_bucket{le="7.864319e+06"} 2635.0
+liveness_heartbeatlatency_bucket{le="8.126463e+06"} 2647.0
+liveness_heartbeatlatency_bucket{le="8.388607e+06"} 2657.0
+liveness_heartbeatlatency_bucket{le="8.912895e+06"} 2672.0
+liveness_heartbeatlatency_bucket{le="9.437183e+06"} 2687.0
+liveness_heartbeatlatency_bucket{le="9.961471e+06"} 2695.0
+liveness_heartbeatlatency_bucket{le="1.0485759e+07"} 2699.0
+liveness_heartbeatlatency_bucket{le="1.1010047e+07"} 2701.0
+liveness_heartbeatlatency_bucket{le="1.1534335e+07"} 2705.0
+liveness_heartbeatlatency_bucket{le="1.2058623e+07"} 2710.0
+liveness_heartbeatlatency_bucket{le="1.2582911e+07"} 2711.0
+liveness_heartbeatlatency_bucket{le="1.3631487e+07"} 2713.0
+liveness_heartbeatlatency_bucket{le="1.4155775e+07"} 2714.0
+liveness_heartbeatlatency_bucket{le="1.8874367e+07"} 2715.0
+liveness_heartbeatlatency_bucket{le="4.1943039e+07"} 2716.0
+liveness_heartbeatlatency_bucket{le="9.6468991e+07"} 2717.0
+liveness_heartbeatlatency_bucket{le="1.04857599e+08"} 2718.0
+liveness_heartbeatlatency_bucket{le="9.39524095e+08"} 2719.0
+liveness_heartbeatlatency_bucket{le="1.040187391e+09"} 2720.0
+liveness_heartbeatlatency_bucket{le="4.563402751e+09"} 2721.0
+liveness_heartbeatlatency_bucket{le="1.0200547327e+10"} 2722.0
+liveness_heartbeatlatency_bucket{le="+Inf"} 2722.0
+liveness_heartbeatlatency_sum 2.8913562974e+10
+liveness_heartbeatlatency_count 2722.0
+# HELP sql_mem_internal_txn_current Current sql transaction memory usage for internal
+# TYPE sql_mem_internal_txn_current gauge
+sql_mem_internal_txn_current 0.0
+# HELP sql_exec_latency Latency of SQL statement execution
+# TYPE sql_exec_latency histogram
+sql_exec_latency_bucket{le="+Inf"} 0.0
+sql_exec_latency_sum 0.0
+sql_exec_latency_count 0.0
+# HELP sql_query_started_count_internal Number of SQL queries started (internal queries)
+# TYPE sql_query_started_count_internal counter
+sql_query_started_count_internal 2656.0
+# HELP queue_replicate_pending Number of pending replicas in the replicate queue
+# TYPE queue_replicate_pending gauge
+queue_replicate_pending{store="1"} 0.0
+# HELP queue_replicate_transferlease Number of range lease transfers attempted by the replicate queue
+# TYPE queue_replicate_transferlease counter
+queue_replicate_transferlease{store="1"} 0.0
+# HELP txn_autoretries Number of automatic retries to avoid serializable restarts
+# TYPE txn_autoretries counter
+txn_autoretries 0.0
+# HELP txn_aborts Number of aborted KV transactions
+# TYPE txn_aborts counter
+txn_aborts 1.0
+# HELP txn_restarts_writetooold Number of restarts due to a concurrent writer committing first
+# TYPE txn_restarts_writetooold counter
+txn_restarts_writetooold 0.0
+# HELP sys_cpu_user_percent Current user cpu percentage
+# TYPE sys_cpu_user_percent gauge
+sys_cpu_user_percent 0.02004312840283465
+# HELP sys_rss Current process RSS
+# TYPE sys_rss gauge
+sys_rss 3.14691584e+08
+# HELP changefeed_error_retries Total retryable errors encountered by all changefeeds
+# TYPE changefeed_error_retries counter
+changefeed_error_retries 0.0
+# HELP queue_raftsnapshot_pending Number of pending replicas in the Raft repair queue
+# TYPE queue_raftsnapshot_pending gauge
+queue_raftsnapshot_pending{store="1"} 0.0
+# HELP requests_backpressure_split Number of backpressured writes waiting on a Range split
+# TYPE requests_backpressure_split gauge
+requests_backpressure_split{store="1"} 0.0
+# HELP distsender_rpc_sent_nextreplicaerror Number of RPCs sent due to per-replica errors
+# TYPE distsender_rpc_sent_nextreplicaerror counter
+distsender_rpc_sent_nextreplicaerror 15.0
+# HELP sql_select_count Number of SQL SELECT statements successfully executed
+# TYPE sql_select_count counter
+sql_select_count 0.0
+# HELP leases_expiration Number of replica leaseholders using expiration-based leases
+# TYPE leases_expiration gauge
+leases_expiration{store="1"} 1.0
+# HELP queue_gc_info_transactionspanscanned Number of entries in transaction spans scanned from the engine
+# TYPE queue_gc_info_transactionspanscanned counter
+queue_gc_info_transactionspanscanned{store="1"} 0.0
+# HELP txnrecovery_successes_committed Number of transaction recovery attempts that committed a transaction
+# TYPE txnrecovery_successes_committed counter
+txnrecovery_successes_committed{store="1"} 0.0
+# HELP distsender_batches_async_sent Number of partial batches sent asynchronously
+# TYPE distsender_batches_async_sent counter
+distsender_batches_async_sent 1590.0
+# HELP sql_txn_commit_started_count_internal Number of SQL transaction COMMIT statements started (internal queries)
+# TYPE sql_txn_commit_started_count_internal counter
+sql_txn_commit_started_count_internal 0.0
+# HELP sql_restart_savepoint_release_count_internal Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed (internal queries)
+# TYPE sql_restart_savepoint_release_count_internal counter
+sql_restart_savepoint_release_count_internal 0.0
+# HELP syscount Count of system KV pairs
+# TYPE syscount gauge
+syscount{store="1"} 147.0
+# HELP rocksdb_bloom_filter_prefix_useful Number of times the bloom filter helped avoid iterator creation
+# TYPE rocksdb_bloom_filter_prefix_useful gauge
+rocksdb_bloom_filter_prefix_useful{store="1"} 11962.0
+# HELP rocksdb_estimated_pending_compaction Estimated pending compaction bytes
+# TYPE rocksdb_estimated_pending_compaction gauge
+rocksdb_estimated_pending_compaction{store="1"} 0.0
+# HELP queue_gc_info_intentsconsidered Number of 'old' intents
+# TYPE queue_gc_info_intentsconsidered counter
+queue_gc_info_intentsconsidered{store="1"} 0.0
+# HELP queue_gc_info_transactionspangcpending Number of GC'able entries corresponding to pending txns
+# TYPE queue_gc_info_transactionspangcpending counter
+queue_gc_info_transactionspangcpending{store="1"} 0.0
+# HELP exec_success Number of batch KV requests executed successfully on this node
+# TYPE exec_success counter
+exec_success 10074.0
+# HELP sys_host_disk_read_count Disk read operations across all disks since this process started
+# TYPE sys_host_disk_read_count gauge
+sys_host_disk_read_count 1176.0
+# HELP compactor_suggestionbytes_queued Number of logical bytes in suggested compactions in the queue
+# TYPE compactor_suggestionbytes_queued gauge
+compactor_suggestionbytes_queued{store="1"} 0.0
+# HELP txn_restarts_asyncwritefailure Number of restarts due to async consensus writes that failed to leave intents
+# TYPE txn_restarts_asyncwritefailure counter
+txn_restarts_asyncwritefailure 0.0
+# HELP sys_fd_open Process open file descriptors
+# TYPE sys_fd_open gauge
+sys_fd_open 47.0
+# HELP changefeed_emit_nanos Total time spent emitting all feeds
+# TYPE changefeed_emit_nanos counter
+changefeed_emit_nanos 0.0
+# HELP sql_mem_sql_session_current Current sql session memory usage for sql
+# TYPE sql_mem_sql_session_current gauge
+sql_mem_sql_session_current 0.0
+sql_mem_sql_session_current 0.0
+# HELP sql_mem_conns_txn_current Current sql transaction memory usage for conns
+# TYPE sql_mem_conns_txn_current gauge
+sql_mem_conns_txn_current 0.0
+# HELP txnwaitqueue_deadlocks_total Number of deadlocks detected by the txn wait queue
+# TYPE txnwaitqueue_deadlocks_total counter
+txnwaitqueue_deadlocks_total{store="1"} 0.0
+# HELP sql_mem_internal_txn_max Memory usage per sql transaction for internal
+# TYPE sql_mem_internal_txn_max histogram
+sql_mem_internal_txn_max_bucket{le="4011.0"} 1058.0
+sql_mem_internal_txn_max_bucket{le="4311.0"} 1060.0
+sql_mem_internal_txn_max_bucket{le="4615.0"} 1098.0
+sql_mem_internal_txn_max_bucket{le="4967.0"} 1100.0
+sql_mem_internal_txn_max_bucket{le="+Inf"} 1100.0
+sql_mem_internal_txn_max_sum 4.437564e+06
+sql_mem_internal_txn_max_count 1100.0
+# HELP sql_txn_abort_count_internal Number of SQL transaction abort errors (internal queries)
+# TYPE sql_txn_abort_count_internal counter
+sql_txn_abort_count_internal 0.0
+# HELP leases_epoch Number of replica leaseholders using epoch-based leases
+# TYPE leases_epoch gauge
+leases_epoch{store="1"} 6.0
+# HELP follower_reads_success_count Number of reads successfully processed by any replica
+# TYPE follower_reads_success_count counter
+follower_reads_success_count{store="1"} 2.0
+# HELP raft_ticks Number of Raft ticks queued
+# TYPE raft_ticks counter
+raft_ticks{store="1"} 61183.0
+# HELP queue_gc_info_abortspanscanned Number of transactions present in the AbortSpan scanned from the engine
+# TYPE queue_gc_info_abortspanscanned counter
+queue_gc_info_abortspanscanned{store="1"} 1.0
+# HELP raft_entrycache_hits Number of successful cache lookups in the Raft entry cache
+# TYPE raft_entrycache_hits counter
+raft_entrycache_hits{store="1"} 49670.0
+# HELP sql_mem_sql_session_max Memory usage per sql session for sql
+# TYPE sql_mem_sql_session_max histogram
+sql_mem_sql_session_max_bucket{le="+Inf"} 0.0
+sql_mem_sql_session_max_sum 0.0
+sql_mem_sql_session_max_count 0.0
+sql_mem_sql_session_max_bucket{le="+Inf"} 0.0
+sql_mem_sql_session_max_sum 0.0
+sql_mem_sql_session_max_count 0.0
+# HELP sql_restart_savepoint_rollback_started_count_internal Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started (internal queries)
+# TYPE sql_restart_savepoint_rollback_started_count_internal counter
+sql_restart_savepoint_rollback_started_count_internal 0.0
+# HELP sql_distsql_select_count_internal Number of DistSQL SELECT statements (internal queries)
+# TYPE sql_distsql_select_count_internal counter
+sql_distsql_select_count_internal 1583.0
+# HELP replicas_reserved Number of replicas reserved for snapshots
+# TYPE replicas_reserved gauge
+replicas_reserved{store="1"} 0.0
+# HELP livebytes Number of bytes of live data (keys plus values)
+# TYPE livebytes gauge
+livebytes{store="1"} 8.1979227e+07
+# HELP keybytes Number of bytes taken up by keys
+# TYPE keybytes gauge
+keybytes{store="1"} 6.730852e+06
+# HELP range_adds Number of range additions
+# TYPE range_adds counter
+range_adds{store="1"} 0.0
+# HELP range_snapshots_preemptive_applied Number of applied pre-emptive snapshots
+# TYPE range_snapshots_preemptive_applied counter
+range_snapshots_preemptive_applied{store="1"} 0.0
+# HELP changefeed_emitted_messages Messages emitted by all feeds
+# TYPE changefeed_emitted_messages counter
+changefeed_emitted_messages 0.0
+# HELP queue_gc_process_failure Number of replicas which failed processing in the GC queue
+# TYPE queue_gc_process_failure counter
+queue_gc_process_failure{store="1"} 0.0
+# HELP queue_gc_processingnanos Nanoseconds spent processing replicas in the GC queue
+# TYPE queue_gc_processingnanos counter
+queue_gc_processingnanos{store="1"} 1.21329751e+08
+# HELP raft_entrycache_accesses Number of cache lookups in the Raft entry cache
+# TYPE raft_entrycache_accesses counter
+raft_entrycache_accesses{store="1"} 49766.0
+# HELP txnwaitqueue_query_waiting Number of transaction status queries waiting for an updated transaction record
+# TYPE txnwaitqueue_query_waiting gauge
+txnwaitqueue_query_waiting{store="1"} 0.0
+# HELP queue_gc_process_success Number of replicas successfully processed by the GC queue
+# TYPE queue_gc_process_success counter
+queue_gc_process_success{store="1"} 9.0
+# HELP sql_mem_bulk_current Current sql statement memory usage for bulk operations
+# TYPE sql_mem_bulk_current gauge
+sql_mem_bulk_current 0.0
+# HELP sql_distsql_queries_active Number of distributed SQL queries currently active
+# TYPE sql_distsql_queries_active gauge
+sql_distsql_queries_active 0.0
+# HELP sql_restart_savepoint_started_count Number of `SAVEPOINT cockroach_restart` statements started
+# TYPE sql_restart_savepoint_started_count counter
+sql_restart_savepoint_started_count 0.0
+# HELP sql_txn_commit_count Number of SQL transaction COMMIT statements successfully executed
+# TYPE sql_txn_commit_count counter
+sql_txn_commit_count 0.0
+# HELP txn_restarts Number of restarted KV transactions
+# TYPE txn_restarts histogram
+txn_restarts_bucket{le="+Inf"} 0.0
+txn_restarts_sum 0.0
+txn_restarts_count 0.0
+# HELP sql_bytesin Number of sql bytes received
+# TYPE sql_bytesin counter
+sql_bytesin 0.0
+# HELP sql_distsql_select_count Number of DistSQL SELECT statements
+# TYPE sql_distsql_select_count counter
+sql_distsql_select_count 0.0
+# HELP rocksdb_table_readers_mem_estimate Memory used by index and filter blocks
+# TYPE rocksdb_table_readers_mem_estimate gauge
+rocksdb_table_readers_mem_estimate{store="1"} 122624.0
+# HELP raft_rcvd_appresp Number of MsgAppResp messages received by this store
+# TYPE raft_rcvd_appresp counter
+raft_rcvd_appresp{store="1"} 67681.0
+# HELP sys_cpu_sys_ns Total system cpu time
+# TYPE sys_cpu_sys_ns gauge
+sys_cpu_sys_ns 1.5442e+11
+# HELP distsender_rpc_sent Number of RPCs sent
+# TYPE distsender_rpc_sent counter
+distsender_rpc_sent 58459.0
+# HELP sql_mem_admin_current Current sql statement memory usage for admin
+# TYPE sql_mem_admin_current gauge
+sql_mem_admin_current 0.0
+# HELP build_timestamp Build information
+# TYPE build_timestamp gauge
+build_timestamp{tag="v19.2.2",go_version="go1.12.12"} 1.576028023e+09
+# HELP sql_distsql_flows_queued Number of distributed SQL flows currently queued
+# TYPE sql_distsql_flows_queued gauge
+sql_distsql_flows_queued 0.0
+# HELP sql_mem_sql_current Current sql statement memory usage for sql
+# TYPE sql_mem_sql_current gauge
+sql_mem_sql_current 0.0
+sql_mem_sql_current 0.0
+# HELP sql_ddl_count Number of SQL DDL statements successfully executed
+# TYPE sql_ddl_count counter
+sql_ddl_count 0.0
+# HELP replicas Number of replicas
+# TYPE replicas gauge
+replicas{store="1"} 34.0
+# HELP rpc_heartbeats_loops_started Counter of the number of connection heartbeat loops which have been started
+# TYPE rpc_heartbeats_loops_started counter
+rpc_heartbeats_loops_started 7.0
+# HELP queue_gc_info_transactionspangccommitted Number of GC'able entries corresponding to committed txns
+# TYPE queue_gc_info_transactionspangccommitted counter
+queue_gc_info_transactionspangccommitted{store="1"} 0.0
+# HELP intents_abort_attempts Count of (point or range) non-poisoning intent abort evaluation attempts
+# TYPE intents_abort_attempts counter
+intents_abort_attempts{store="1"} 0.0
+# HELP sys_go_totalbytes Total bytes of memory allocated by go, but not released
+# TYPE sys_go_totalbytes gauge
+sys_go_totalbytes 1.97562616e+08
+# HELP engine_stalls Number of disk stalls detected on this node
+# TYPE engine_stalls counter
+engine_stalls 0.0
+# HELP sql_restart_savepoint_count Number of `SAVEPOINT cockroach_restart` statements successfully executed
+# TYPE sql_restart_savepoint_count counter
+sql_restart_savepoint_count 0.0
+# HELP sysbytes Number of bytes in system KV pairs
+# TYPE sysbytes gauge
+sysbytes{store="1"} 13327.0
+# HELP raft_rcvd_prevote Number of MsgPreVote messages received by this store
+# TYPE raft_rcvd_prevote counter
+raft_rcvd_prevote{store="1"} 32.0
+# HELP liveness_heartbeatfailures Number of failed node liveness heartbeats from this node
+# TYPE liveness_heartbeatfailures counter
+liveness_heartbeatfailures 2.0
+# HELP sql_ddl_started_count_internal Number of SQL DDL statements started (internal queries)
+# TYPE sql_ddl_started_count_internal counter
+sql_ddl_started_count_internal 10.0
+# HELP sql_txn_latency_internal Latency of SQL transactions (internal queries)
+# TYPE sql_txn_latency_internal histogram
+sql_txn_latency_internal_bucket{le="1.441791e+06"} 1.0
+sql_txn_latency_internal_bucket{le="1.572863e+06"} 5.0
+sql_txn_latency_internal_bucket{le="1.638399e+06"} 9.0
+sql_txn_latency_internal_bucket{le="1.703935e+06"} 16.0
+sql_txn_latency_internal_bucket{le="1.769471e+06"} 26.0
+sql_txn_latency_internal_bucket{le="1.835007e+06"} 42.0
+sql_txn_latency_internal_bucket{le="1.900543e+06"} 56.0
+sql_txn_latency_internal_bucket{le="1.966079e+06"} 73.0
+sql_txn_latency_internal_bucket{le="2.031615e+06"} 97.0
+sql_txn_latency_internal_bucket{le="2.097151e+06"} 134.0
+sql_txn_latency_internal_bucket{le="2.228223e+06"} 196.0
+sql_txn_latency_internal_bucket{le="2.359295e+06"} 255.0
+sql_txn_latency_internal_bucket{le="2.490367e+06"} 293.0
+sql_txn_latency_internal_bucket{le="2.621439e+06"} 315.0
+sql_txn_latency_internal_bucket{le="2.752511e+06"} 329.0
+sql_txn_latency_internal_bucket{le="2.883583e+06"} 351.0
+sql_txn_latency_internal_bucket{le="3.014655e+06"} 363.0
+sql_txn_latency_internal_bucket{le="3.145727e+06"} 378.0
+sql_txn_latency_internal_bucket{le="3.276799e+06"} 401.0
+sql_txn_latency_internal_bucket{le="3.407871e+06"} 431.0
+sql_txn_latency_internal_bucket{le="3.538943e+06"} 458.0
+sql_txn_latency_internal_bucket{le="3.670015e+06"} 508.0
+sql_txn_latency_internal_bucket{le="3.801087e+06"} 561.0
+sql_txn_latency_internal_bucket{le="3.932159e+06"} 600.0
+sql_txn_latency_internal_bucket{le="4.063231e+06"} 660.0
+sql_txn_latency_internal_bucket{le="4.194303e+06"} 710.0
+sql_txn_latency_internal_bucket{le="4.456447e+06"} 806.0
+sql_txn_latency_internal_bucket{le="4.718591e+06"} 881.0
+sql_txn_latency_internal_bucket{le="4.980735e+06"} 944.0
+sql_txn_latency_internal_bucket{le="5.242879e+06"} 1018.0
+sql_txn_latency_internal_bucket{le="5.505023e+06"} 1088.0
+sql_txn_latency_internal_bucket{le="5.767167e+06"} 1158.0
+sql_txn_latency_internal_bucket{le="6.029311e+06"} 1212.0
+sql_txn_latency_internal_bucket{le="6.291455e+06"} 1258.0
+sql_txn_latency_internal_bucket{le="6.553599e+06"} 1309.0
+sql_txn_latency_internal_bucket{le="6.815743e+06"} 1361.0
+sql_txn_latency_internal_bucket{le="7.077887e+06"} 1422.0
+sql_txn_latency_internal_bucket{le="7.340031e+06"} 1470.0
+sql_txn_latency_internal_bucket{le="7.602175e+06"} 1511.0
+sql_txn_latency_internal_bucket{le="7.864319e+06"} 1544.0
+sql_txn_latency_internal_bucket{le="8.126463e+06"} 1584.0
+sql_txn_latency_internal_bucket{le="8.388607e+06"} 1620.0
+sql_txn_latency_internal_bucket{le="8.912895e+06"} 1692.0
+sql_txn_latency_internal_bucket{le="9.437183e+06"} 1749.0
+sql_txn_latency_internal_bucket{le="9.961471e+06"} 1806.0
+sql_txn_latency_internal_bucket{le="1.0485759e+07"} 1847.0
+sql_txn_latency_internal_bucket{le="1.1010047e+07"} 1887.0
+sql_txn_latency_internal_bucket{le="1.1534335e+07"} 1923.0
+sql_txn_latency_internal_bucket{le="1.2058623e+07"} 1949.0
+sql_txn_latency_internal_bucket{le="1.2582911e+07"} 1968.0
+sql_txn_latency_internal_bucket{le="1.3107199e+07"} 1982.0
+sql_txn_latency_internal_bucket{le="1.3631487e+07"} 1993.0
+sql_txn_latency_internal_bucket{le="1.4155775e+07"} 2008.0
+sql_txn_latency_internal_bucket{le="1.4680063e+07"} 2016.0
+sql_txn_latency_internal_bucket{le="1.5204351e+07"} 2020.0
+sql_txn_latency_internal_bucket{le="1.5728639e+07"} 2028.0
+sql_txn_latency_internal_bucket{le="1.6252927e+07"} 2032.0
+sql_txn_latency_internal_bucket{le="1.6777215e+07"} 2036.0
+sql_txn_latency_internal_bucket{le="1.7825791e+07"} 2044.0
+sql_txn_latency_internal_bucket{le="1.8874367e+07"} 2049.0
+sql_txn_latency_internal_bucket{le="1.9922943e+07"} 2052.0
+sql_txn_latency_internal_bucket{le="2.0971519e+07"} 2056.0
+sql_txn_latency_internal_bucket{le="2.2020095e+07"} 2060.0
+sql_txn_latency_internal_bucket{le="2.3068671e+07"} 2064.0
+sql_txn_latency_internal_bucket{le="2.4117247e+07"} 2065.0
+sql_txn_latency_internal_bucket{le="2.5165823e+07"} 2066.0
+sql_txn_latency_internal_bucket{le="2.6214399e+07"} 2068.0
+sql_txn_latency_internal_bucket{le="2.8311551e+07"} 2069.0
+sql_txn_latency_internal_bucket{le="2.9360127e+07"} 2070.0
+sql_txn_latency_internal_bucket{le="3.0408703e+07"} 2072.0
+sql_txn_latency_internal_bucket{le="3.2505855e+07"} 2073.0
+sql_txn_latency_internal_bucket{le="3.5651583e+07"} 2074.0
+sql_txn_latency_internal_bucket{le="4.1943039e+07"} 2076.0
+sql_txn_latency_internal_bucket{le="4.8234495e+07"} 2077.0
+sql_txn_latency_internal_bucket{le="1.25829119e+08"} 2078.0
+sql_txn_latency_internal_bucket{le="1.34217727e+08"} 2079.0
+sql_txn_latency_internal_bucket{le="2.18103807e+08"} 2080.0
+sql_txn_latency_internal_bucket{le="2.26492415e+08"} 2081.0
+sql_txn_latency_internal_bucket{le="5.20093695e+08"} 2082.0
+sql_txn_latency_internal_bucket{le="1.0200547327e+10"} 2083.0
+sql_txn_latency_internal_bucket{le="+Inf"} 2083.0
+sql_txn_latency_internal_sum 2.4672466909e+10
+sql_txn_latency_internal_count 2083.0
+# HELP totalbytes Total number of bytes taken up by keys and values including non-live data
+# TYPE totalbytes gauge
+totalbytes{store="1"} 8.225857e+07
+# HELP gcbytesage Cumulative age of non-live data
+# TYPE gcbytesage gauge
+gcbytesage{store="1"} -6.30933145e+08
+# HELP raft_rcvd_prop Number of MsgProp messages received by this store
+# TYPE raft_rcvd_prop counter
+raft_rcvd_prop{store="1"} 13.0
+# HELP raft_rcvd_prevoteresp Number of MsgPreVoteResp messages received by this store
+# TYPE raft_rcvd_prevoteresp counter
+raft_rcvd_prevoteresp{store="1"} 20.0
+# HELP queue_raftlog_process_success Number of replicas successfully processed by the Raft log queue
+# TYPE queue_raftlog_process_success counter
+queue_raftlog_process_success{store="1"} 154.0
+# HELP sql_restart_savepoint_rollback_count Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements successfully executed
+# TYPE sql_restart_savepoint_rollback_count counter
+sql_restart_savepoint_rollback_count 0.0
+# HELP queue_consistency_pending Number of pending replicas in the consistency checker queue
+# TYPE queue_consistency_pending gauge
+queue_consistency_pending{store="1"} 0.0
+# HELP sql_restart_savepoint_rollback_started_count Number of `ROLLBACK TO SAVEPOINT cockroach_restart` statements started
+# TYPE sql_restart_savepoint_rollback_started_count counter
+sql_restart_savepoint_rollback_started_count 0.0
+# HELP sql_update_count_internal Number of SQL UPDATE statements successfully executed (internal queries)
+# TYPE sql_update_count_internal counter
+sql_update_count_internal 16.0
+# HELP addsstable_proposals Number of SSTable ingestions proposed (i.e. sent to Raft by lease holders)
+# TYPE addsstable_proposals counter
+addsstable_proposals{store="1"} 0.0
+# HELP queue_replicate_addreplica Number of replica additions attempted by the replicate queue
+# TYPE queue_replicate_addreplica counter
+queue_replicate_addreplica{store="1"} 0.0
+# HELP sql_mem_sql_txn_max Memory usage per sql transaction for sql
+# TYPE sql_mem_sql_txn_max histogram
+sql_mem_sql_txn_max_bucket{le="+Inf"} 0.0
+sql_mem_sql_txn_max_sum 0.0
+sql_mem_sql_txn_max_count 0.0
+sql_mem_sql_txn_max_bucket{le="+Inf"} 0.0
+sql_mem_sql_txn_max_sum 0.0
+sql_mem_sql_txn_max_count 0.0
+# HELP sql_mem_conns_txn_max Memory usage per sql transaction for conns
+# TYPE sql_mem_conns_txn_max histogram
+sql_mem_conns_txn_max_bucket{le="+Inf"} 0.0
+sql_mem_conns_txn_max_sum 0.0
+sql_mem_conns_txn_max_count 0.0
+# HELP raft_process_commandcommit_latency Latency histogram for committing Raft commands
+# TYPE raft_process_commandcommit_latency histogram
+raft_process_commandcommit_latency_bucket{store="1",le="8703.0"} 2.0
+raft_process_commandcommit_latency_bucket{store="1",le="9215.0"} 5.0
+raft_process_commandcommit_latency_bucket{store="1",le="9727.0"} 23.0
+raft_process_commandcommit_latency_bucket{store="1",le="10239.0"} 64.0
+raft_process_commandcommit_latency_bucket{store="1",le="10751.0"} 119.0
+raft_process_commandcommit_latency_bucket{store="1",le="11263.0"} 215.0
+raft_process_commandcommit_latency_bucket{store="1",le="11775.0"} 298.0
+raft_process_commandcommit_latency_bucket{store="1",le="12287.0"} 415.0
+raft_process_commandcommit_latency_bucket{store="1",le="12799.0"} 517.0
+raft_process_commandcommit_latency_bucket{store="1",le="13311.0"} 608.0
+raft_process_commandcommit_latency_bucket{store="1",le="13823.0"} 674.0
+raft_process_commandcommit_latency_bucket{store="1",le="14335.0"} 748.0
+raft_process_commandcommit_latency_bucket{store="1",le="14847.0"} 809.0
+raft_process_commandcommit_latency_bucket{store="1",le="15359.0"} 863.0
+raft_process_commandcommit_latency_bucket{store="1",le="15871.0"} 916.0
+raft_process_commandcommit_latency_bucket{store="1",le="16383.0"} 977.0
+raft_process_commandcommit_latency_bucket{store="1",le="17407.0"} 1125.0
+raft_process_commandcommit_latency_bucket{store="1",le="18431.0"} 1295.0
+raft_process_commandcommit_latency_bucket{store="1",le="19455.0"} 1531.0
+raft_process_commandcommit_latency_bucket{store="1",le="20479.0"} 1788.0
+raft_process_commandcommit_latency_bucket{store="1",le="21503.0"} 2110.0
+raft_process_commandcommit_latency_bucket{store="1",le="22527.0"} 2513.0
+raft_process_commandcommit_latency_bucket{store="1",le="23551.0"} 2943.0
+raft_process_commandcommit_latency_bucket{store="1",le="24575.0"} 3527.0
+raft_process_commandcommit_latency_bucket{store="1",le="25599.0"} 4139.0
+raft_process_commandcommit_latency_bucket{store="1",le="26623.0"} 4886.0
+raft_process_commandcommit_latency_bucket{store="1",le="27647.0"} 5635.0
+raft_process_commandcommit_latency_bucket{store="1",le="28671.0"} 6427.0
+raft_process_commandcommit_latency_bucket{store="1",le="29695.0"} 7234.0
+raft_process_commandcommit_latency_bucket{store="1",le="30719.0"} 8064.0
+raft_process_commandcommit_latency_bucket{store="1",le="31743.0"} 8964.0
+raft_process_commandcommit_latency_bucket{store="1",le="32767.0"} 9885.0
+raft_process_commandcommit_latency_bucket{store="1",le="34815.0"} 11527.0
+raft_process_commandcommit_latency_bucket{store="1",le="36863.0"} 12928.0
+raft_process_commandcommit_latency_bucket{store="1",le="38911.0"} 14225.0
+raft_process_commandcommit_latency_bucket{store="1",le="40959.0"} 15324.0
+raft_process_commandcommit_latency_bucket{store="1",le="43007.0"} 16255.0
+raft_process_commandcommit_latency_bucket{store="1",le="45055.0"} 17117.0
+raft_process_commandcommit_latency_bucket{store="1",le="47103.0"} 17895.0
+raft_process_commandcommit_latency_bucket{store="1",le="49151.0"} 18640.0
+raft_process_commandcommit_latency_bucket{store="1",le="51199.0"} 19281.0
+raft_process_commandcommit_latency_bucket{store="1",le="53247.0"} 19961.0
+raft_process_commandcommit_latency_bucket{store="1",le="55295.0"} 20546.0
+raft_process_commandcommit_latency_bucket{store="1",le="57343.0"} 21150.0
+raft_process_commandcommit_latency_bucket{store="1",le="59391.0"} 21736.0
+raft_process_commandcommit_latency_bucket{store="1",le="61439.0"} 22256.0
+raft_process_commandcommit_latency_bucket{store="1",le="63487.0"} 22783.0
+raft_process_commandcommit_latency_bucket{store="1",le="65535.0"} 23256.0
+raft_process_commandcommit_latency_bucket{store="1",le="69631.0"} 24251.0
+raft_process_commandcommit_latency_bucket{store="1",le="73727.0"} 25169.0
+raft_process_commandcommit_latency_bucket{store="1",le="77823.0"} 26004.0
+raft_process_commandcommit_latency_bucket{store="1",le="81919.0"} 26775.0
+raft_process_commandcommit_latency_bucket{store="1",le="86015.0"} 27489.0
+raft_process_commandcommit_latency_bucket{store="1",le="90111.0"} 28155.0
+raft_process_commandcommit_latency_bucket{store="1",le="94207.0"} 28752.0
+raft_process_commandcommit_latency_bucket{store="1",le="98303.0"} 29281.0
+raft_process_commandcommit_latency_bucket{store="1",le="102399.0"} 29838.0
+raft_process_commandcommit_latency_bucket{store="1",le="106495.0"} 30300.0
+raft_process_commandcommit_latency_bucket{store="1",le="110591.0"} 30725.0
+raft_process_commandcommit_latency_bucket{store="1",le="114687.0"} 31127.0
+raft_process_commandcommit_latency_bucket{store="1",le="118783.0"} 31498.0
+raft_process_commandcommit_latency_bucket{store="1",le="122879.0"} 31854.0
+raft_process_commandcommit_latency_bucket{store="1",le="126975.0"} 32163.0
+raft_process_commandcommit_latency_bucket{store="1",le="131071.0"} 32450.0
+raft_process_commandcommit_latency_bucket{store="1",le="139263.0"} 32990.0
+raft_process_commandcommit_latency_bucket{store="1",le="147455.0"} 33471.0
+raft_process_commandcommit_latency_bucket{store="1",le="155647.0"} 33830.0
+raft_process_commandcommit_latency_bucket{store="1",le="163839.0"} 34176.0
+raft_process_commandcommit_latency_bucket{store="1",le="172031.0"} 34434.0
+raft_process_commandcommit_latency_bucket{store="1",le="180223.0"} 34668.0
+raft_process_commandcommit_latency_bucket{store="1",le="188415.0"} 34893.0
+raft_process_commandcommit_latency_bucket{store="1",le="196607.0"} 35116.0
+raft_process_commandcommit_latency_bucket{store="1",le="204799.0"} 35301.0
+raft_process_commandcommit_latency_bucket{store="1",le="212991.0"} 35494.0
+raft_process_commandcommit_latency_bucket{store="1",le="221183.0"} 35659.0
+raft_process_commandcommit_latency_bucket{store="1",le="229375.0"} 35833.0
+raft_process_commandcommit_latency_bucket{store="1",le="237567.0"} 35992.0
+raft_process_commandcommit_latency_bucket{store="1",le="245759.0"} 36128.0
+raft_process_commandcommit_latency_bucket{store="1",le="253951.0"} 36269.0
+raft_process_commandcommit_latency_bucket{store="1",le="262143.0"} 36429.0
+raft_process_commandcommit_latency_bucket{store="1",le="278527.0"} 36660.0
+raft_process_commandcommit_latency_bucket{store="1",le="294911.0"} 36867.0
+raft_process_commandcommit_latency_bucket{store="1",le="311295.0"} 37077.0
+raft_process_commandcommit_latency_bucket{store="1",le="327679.0"} 37288.0
+raft_process_commandcommit_latency_bucket{store="1",le="344063.0"} 37454.0
+raft_process_commandcommit_latency_bucket{store="1",le="360447.0"} 37621.0
+raft_process_commandcommit_latency_bucket{store="1",le="376831.0"} 37762.0
+raft_process_commandcommit_latency_bucket{store="1",le="393215.0"} 37920.0
+raft_process_commandcommit_latency_bucket{store="1",le="409599.0"} 38042.0
+raft_process_commandcommit_latency_bucket{store="1",le="425983.0"} 38168.0
+raft_process_commandcommit_latency_bucket{store="1",le="442367.0"} 38289.0
+raft_process_commandcommit_latency_bucket{store="1",le="458751.0"} 38379.0
+raft_process_commandcommit_latency_bucket{store="1",le="475135.0"} 38481.0
+raft_process_commandcommit_latency_bucket{store="1",le="491519.0"} 38564.0
+raft_process_commandcommit_latency_bucket{store="1",le="507903.0"} 38632.0
+raft_process_commandcommit_latency_bucket{store="1",le="524287.0"} 38714.0
+raft_process_commandcommit_latency_bucket{store="1",le="557055.0"} 38861.0
+raft_process_commandcommit_latency_bucket{store="1",le="589823.0"} 39013.0
+raft_process_commandcommit_latency_bucket{store="1",le="622591.0"} 39137.0
+raft_process_commandcommit_latency_bucket{store="1",le="655359.0"} 39263.0
+raft_process_commandcommit_latency_bucket{store="1",le="688127.0"} 39368.0
+raft_process_commandcommit_latency_bucket{store="1",le="720895.0"} 39459.0
+raft_process_commandcommit_latency_bucket{store="1",le="753663.0"} 39557.0
+raft_process_commandcommit_latency_bucket{store="1",le="786431.0"} 39638.0
+raft_process_commandcommit_latency_bucket{store="1",le="819199.0"} 39693.0
+raft_process_commandcommit_latency_bucket{store="1",le="851967.0"} 39770.0
+raft_process_commandcommit_latency_bucket{store="1",le="884735.0"} 39828.0
+raft_process_commandcommit_latency_bucket{store="1",le="917503.0"} 39883.0
+raft_process_commandcommit_latency_bucket{store="1",le="950271.0"} 39941.0
+raft_process_commandcommit_latency_bucket{store="1",le="983039.0"} 39996.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.015807e+06"} 40053.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.048575e+06"} 40103.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.114111e+06"} 40218.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.179647e+06"} 40312.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.245183e+06"} 40401.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.310719e+06"} 40515.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.376255e+06"} 40592.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.441791e+06"} 40706.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.507327e+06"} 40834.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.572863e+06"} 40973.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.638399e+06"} 41123.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.703935e+06"} 41275.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.769471e+06"} 41419.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.835007e+06"} 41557.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.900543e+06"} 41690.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.966079e+06"} 41837.0
+raft_process_commandcommit_latency_bucket{store="1",le="2.031615e+06"} 41976.0
+raft_process_commandcommit_latency_bucket{store="1",le="2.097151e+06"} 42105.0
+raft_process_commandcommit_latency_bucket{store="1",le="2.228223e+06"} 42335.0
+raft_process_commandcommit_latency_bucket{store="1",le="2.359295e+06"} 42526.0
+raft_process_commandcommit_latency_bucket{store="1",le="2.490367e+06"} 42699.0
+raft_process_commandcommit_latency_bucket{store="1",le="2.621439e+06"} 42848.0
+raft_process_commandcommit_latency_bucket{store="1",le="2.752511e+06"} 42973.0
+raft_process_commandcommit_latency_bucket{store="1",le="2.883583e+06"} 43080.0
+raft_process_commandcommit_latency_bucket{store="1",le="3.014655e+06"} 43189.0
+raft_process_commandcommit_latency_bucket{store="1",le="3.145727e+06"} 43286.0
+raft_process_commandcommit_latency_bucket{store="1",le="3.276799e+06"} 43369.0
+raft_process_commandcommit_latency_bucket{store="1",le="3.407871e+06"} 43444.0
+raft_process_commandcommit_latency_bucket{store="1",le="3.538943e+06"} 43523.0
+raft_process_commandcommit_latency_bucket{store="1",le="3.670015e+06"} 43590.0
+raft_process_commandcommit_latency_bucket{store="1",le="3.801087e+06"} 43654.0
+raft_process_commandcommit_latency_bucket{store="1",le="3.932159e+06"} 43717.0
+raft_process_commandcommit_latency_bucket{store="1",le="4.063231e+06"} 43753.0
+raft_process_commandcommit_latency_bucket{store="1",le="4.194303e+06"} 43801.0
+raft_process_commandcommit_latency_bucket{store="1",le="4.456447e+06"} 43889.0
+raft_process_commandcommit_latency_bucket{store="1",le="4.718591e+06"} 43969.0
+raft_process_commandcommit_latency_bucket{store="1",le="4.980735e+06"} 44035.0
+raft_process_commandcommit_latency_bucket{store="1",le="5.242879e+06"} 44079.0
+raft_process_commandcommit_latency_bucket{store="1",le="5.505023e+06"} 44126.0
+raft_process_commandcommit_latency_bucket{store="1",le="5.767167e+06"} 44163.0
+raft_process_commandcommit_latency_bucket{store="1",le="6.029311e+06"} 44180.0
+raft_process_commandcommit_latency_bucket{store="1",le="6.291455e+06"} 44198.0
+raft_process_commandcommit_latency_bucket{store="1",le="6.553599e+06"} 44221.0
+raft_process_commandcommit_latency_bucket{store="1",le="6.815743e+06"} 44237.0
+raft_process_commandcommit_latency_bucket{store="1",le="7.077887e+06"} 44251.0
+raft_process_commandcommit_latency_bucket{store="1",le="7.340031e+06"} 44268.0
+raft_process_commandcommit_latency_bucket{store="1",le="7.602175e+06"} 44285.0
+raft_process_commandcommit_latency_bucket{store="1",le="7.864319e+06"} 44298.0
+raft_process_commandcommit_latency_bucket{store="1",le="8.126463e+06"} 44313.0
+raft_process_commandcommit_latency_bucket{store="1",le="8.388607e+06"} 44319.0
+raft_process_commandcommit_latency_bucket{store="1",le="8.912895e+06"} 44338.0
+raft_process_commandcommit_latency_bucket{store="1",le="9.437183e+06"} 44352.0
+raft_process_commandcommit_latency_bucket{store="1",le="9.961471e+06"} 44358.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.0485759e+07"} 44363.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.1010047e+07"} 44367.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.1534335e+07"} 44373.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.2058623e+07"} 44376.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.2582911e+07"} 44377.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.3107199e+07"} 44379.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.3631487e+07"} 44382.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.4680063e+07"} 44384.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.5204351e+07"} 44386.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.6252927e+07"} 44387.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.8874367e+07"} 44388.0
+raft_process_commandcommit_latency_bucket{store="1",le="1.9922943e+07"} 44390.0
+raft_process_commandcommit_latency_bucket{store="1",le="2.6214399e+07"} 44391.0
+raft_process_commandcommit_latency_bucket{store="1",le="+Inf"} 44391.0
+raft_process_commandcommit_latency_sum{store="1"} 1.6329882265e+10
+raft_process_commandcommit_latency_count{store="1"} 44391.0
+# HELP raft_rcvd_voteresp Number of MsgVoteResp messages received by this store
+# TYPE raft_rcvd_voteresp counter
+raft_rcvd_voteresp{store="1"} 24.0
+# HELP raft_entrycache_size Number of Raft entries in the Raft entry cache
+# TYPE raft_entrycache_size gauge
+raft_entrycache_size{store="1"} 417.0
+# HELP tscache_skl_read_rotations Number of page rotations in the read timestamp cache
+# TYPE tscache_skl_read_rotations counter
+tscache_skl_read_rotations{store="1"} 0.0
+# HELP round_trip_latency Distribution of round-trip latencies with other nodes
+# TYPE round_trip_latency histogram
+round_trip_latency_bucket{le="221183.0"} 1.0
+round_trip_latency_bucket{le="237567.0"} 2.0
+round_trip_latency_bucket{le="253951.0"} 4.0
+round_trip_latency_bucket{le="278527.0"} 10.0
+round_trip_latency_bucket{le="294911.0"} 14.0
+round_trip_latency_bucket{le="311295.0"} 25.0
+round_trip_latency_bucket{le="327679.0"} 51.0
+round_trip_latency_bucket{le="344063.0"} 69.0
+round_trip_latency_bucket{le="360447.0"} 100.0
+round_trip_latency_bucket{le="376831.0"} 128.0
+round_trip_latency_bucket{le="393215.0"} 171.0
+round_trip_latency_bucket{le="409599.0"} 225.0
+round_trip_latency_bucket{le="425983.0"} 287.0
+round_trip_latency_bucket{le="442367.0"} 378.0
+round_trip_latency_bucket{le="458751.0"} 475.0
+round_trip_latency_bucket{le="475135.0"} 584.0
+round_trip_latency_bucket{le="491519.0"} 710.0
+round_trip_latency_bucket{le="507903.0"} 863.0
+round_trip_latency_bucket{le="524287.0"} 1038.0
+round_trip_latency_bucket{le="557055.0"} 1475.0
+round_trip_latency_bucket{le="589823.0"} 1979.0
+round_trip_latency_bucket{le="622591.0"} 2622.0
+round_trip_latency_bucket{le="655359.0"} 3314.0
+round_trip_latency_bucket{le="688127.0"} 4064.0
+round_trip_latency_bucket{le="720895.0"} 4905.0
+round_trip_latency_bucket{le="753663.0"} 5812.0
+round_trip_latency_bucket{le="786431.0"} 6765.0
+round_trip_latency_bucket{le="819199.0"} 7791.0
+round_trip_latency_bucket{le="851967.0"} 8913.0
+round_trip_latency_bucket{le="884735.0"} 9981.0
+round_trip_latency_bucket{le="917503.0"} 11033.0
+round_trip_latency_bucket{le="950271.0"} 12068.0
+round_trip_latency_bucket{le="983039.0"} 13072.0
+round_trip_latency_bucket{le="1.015807e+06"} 14069.0
+round_trip_latency_bucket{le="1.048575e+06"} 15031.0
+round_trip_latency_bucket{le="1.114111e+06"} 16651.0
+round_trip_latency_bucket{le="1.179647e+06"} 18055.0
+round_trip_latency_bucket{le="1.245183e+06"} 19374.0
+round_trip_latency_bucket{le="1.310719e+06"} 20496.0
+round_trip_latency_bucket{le="1.376255e+06"} 21477.0
+round_trip_latency_bucket{le="1.441791e+06"} 22299.0
+round_trip_latency_bucket{le="1.507327e+06"} 23073.0
+round_trip_latency_bucket{le="1.572863e+06"} 23740.0
+round_trip_latency_bucket{le="1.638399e+06"} 24341.0
+round_trip_latency_bucket{le="1.703935e+06"} 24843.0
+round_trip_latency_bucket{le="1.769471e+06"} 25249.0
+round_trip_latency_bucket{le="1.835007e+06"} 25668.0
+round_trip_latency_bucket{le="1.900543e+06"} 26007.0
+round_trip_latency_bucket{le="1.966079e+06"} 26344.0
+round_trip_latency_bucket{le="2.031615e+06"} 26597.0
+round_trip_latency_bucket{le="2.097151e+06"} 26801.0
+round_trip_latency_bucket{le="2.228223e+06"} 27159.0
+round_trip_latency_bucket{le="2.359295e+06"} 27448.0
+round_trip_latency_bucket{le="2.490367e+06"} 27652.0
+round_trip_latency_bucket{le="2.621439e+06"} 27822.0
+round_trip_latency_bucket{le="2.752511e+06"} 27959.0
+round_trip_latency_bucket{le="2.883583e+06"} 28063.0
+round_trip_latency_bucket{le="3.014655e+06"} 28123.0
+round_trip_latency_bucket{le="3.145727e+06"} 28185.0
+round_trip_latency_bucket{le="3.276799e+06"} 28243.0
+round_trip_latency_bucket{le="3.407871e+06"} 28281.0
+round_trip_latency_bucket{le="3.538943e+06"} 28332.0
+round_trip_latency_bucket{le="3.670015e+06"} 28358.0
+round_trip_latency_bucket{le="3.801087e+06"} 28377.0
+round_trip_latency_bucket{le="3.932159e+06"} 28399.0
+round_trip_latency_bucket{le="4.063231e+06"} 28416.0
+round_trip_latency_bucket{le="4.194303e+06"} 28426.0
+round_trip_latency_bucket{le="4.456447e+06"} 28446.0
+round_trip_latency_bucket{le="4.718591e+06"} 28460.0
+round_trip_latency_bucket{le="4.980735e+06"} 28469.0
+round_trip_latency_bucket{le="5.242879e+06"} 28478.0
+round_trip_latency_bucket{le="5.505023e+06"} 28484.0
+round_trip_latency_bucket{le="5.767167e+06"} 28489.0
+round_trip_latency_bucket{le="6.029311e+06"} 28493.0
+round_trip_latency_bucket{le="6.553599e+06"} 28494.0
+round_trip_latency_bucket{le="6.815743e+06"} 28497.0
+round_trip_latency_bucket{le="7.077887e+06"} 28498.0
+round_trip_latency_bucket{le="7.340031e+06"} 28500.0
+round_trip_latency_bucket{le="7.602175e+06"} 28501.0
+round_trip_latency_bucket{le="7.864319e+06"} 28502.0
+round_trip_latency_bucket{le="8.126463e+06"} 28505.0
+round_trip_latency_bucket{le="8.388607e+06"} 28507.0
+round_trip_latency_bucket{le="8.912895e+06"} 28509.0
+round_trip_latency_bucket{le="9.437183e+06"} 28510.0
+round_trip_latency_bucket{le="9.961471e+06"} 28511.0
+round_trip_latency_bucket{le="1.0485759e+07"} 28512.0
+round_trip_latency_bucket{le="1.1010047e+07"} 28513.0
+round_trip_latency_bucket{le="1.2582911e+07"} 28514.0
+round_trip_latency_bucket{le="1.5204351e+07"} 28515.0
+round_trip_latency_bucket{le="1.6252927e+07"} 28516.0
+round_trip_latency_bucket{le="1.7825791e+07"} 28518.0
+round_trip_latency_bucket{le="1.9922943e+07"} 28519.0
+round_trip_latency_bucket{le="2.2020095e+07"} 28520.0
+round_trip_latency_bucket{le="2.9360127e+07"} 28523.0
+round_trip_latency_bucket{le="3.1457279e+07"} 28524.0
+round_trip_latency_bucket{le="3.2505855e+07"} 28525.0
+round_trip_latency_bucket{le="5.2428799e+07"} 28526.0
+round_trip_latency_bucket{le="1.50994943e+08"} 28527.0
+round_trip_latency_bucket{le="3.52321535e+08"} 28528.0
+round_trip_latency_bucket{le="4.19430399e+08"} 28529.0
+round_trip_latency_bucket{le="6.71088639e+08"} 28530.0
+round_trip_latency_bucket{le="+Inf"} 28530.0
+round_trip_latency_sum 3.5795193998e+10
+round_trip_latency_count 28530.0
+# HELP sql_failure_count_internal Number of statements resulting in a planning or runtime error (internal queries)
+# TYPE sql_failure_count_internal counter
+sql_failure_count_internal 6.0
+# HELP raft_heartbeats_pending Number of pending heartbeats and responses waiting to be coalesced
+# TYPE raft_heartbeats_pending gauge
+raft_heartbeats_pending{store="1"} 0.0
+# HELP queue_replicate_removedeadreplica Number of dead replica removals attempted by the replicate queue (typically in response to a node outage)
+# TYPE queue_replicate_removedeadreplica counter
+queue_replicate_removedeadreplica{store="1"} 0.0
+# HELP sql_txn_begin_started_count Number of SQL transaction BEGIN statements started
+# TYPE sql_txn_begin_started_count counter
+sql_txn_begin_started_count 0.0
+# HELP timeseries_write_samples Total number of metric samples written to disk
+# TYPE timeseries_write_samples counter
+timeseries_write_samples 845784.0
+# HELP sys_gc_pause_ns Total GC pause
+# TYPE sys_gc_pause_ns gauge
+sys_gc_pause_ns 6.070045e+07
+# HELP sql_restart_savepoint_release_count Number of `RELEASE SAVEPOINT cockroach_restart` statements successfully executed
+# TYPE sql_restart_savepoint_release_count counter
+sql_restart_savepoint_release_count 0.0
+# HELP range_splits Number of range splits
+# TYPE range_splits counter
+range_splits{store="1"} 0.0
+# HELP compactor_suggestionbytes_compacted Number of logical bytes compacted from suggested compactions
+# TYPE compactor_suggestionbytes_compacted counter
+compactor_suggestionbytes_compacted{store="1"} 0.0
+# HELP distsender_errors_inleasetransferbackoffs Number of times backed off due to NotLeaseHolderErrors during lease transfer.
+# TYPE distsender_errors_inleasetransferbackoffs counter
+distsender_errors_inleasetransferbackoffs 0.0
+# HELP sql_distsql_flows_total Number of distributed SQL flows executed
+# TYPE sql_distsql_flows_total counter
+sql_distsql_flows_total 1042.0
+# HELP sql_mem_conns_session_max Memory usage per sql session for conns
+# TYPE sql_mem_conns_session_max histogram
+sql_mem_conns_session_max_bucket{le="+Inf"} 0.0
+sql_mem_conns_session_max_sum 0.0
+sql_mem_conns_session_max_count 0.0
+# HELP sql_optimizer_plan_cache_hits Number of non-prepared statements for which a cached plan was used
+# TYPE sql_optimizer_plan_cache_hits counter
+sql_optimizer_plan_cache_hits 0.0
+# HELP leases_transfers_error Number of failed lease transfers
+# TYPE leases_transfers_error counter
+leases_transfers_error{store="1"} 0.0
+# HELP rebalancing_writespersecond Number of keys written (i.e. applied by raft) per second to the store, averaged over a large time period as used in rebalancing decisions
+# TYPE rebalancing_writespersecond gauge
+rebalancing_writespersecond{store="1"} 213.02361755221986
+# HELP rocksdb_flushes Number of table flushes
+# TYPE rocksdb_flushes gauge
+rocksdb_flushes{store="1"} 13.0
+# HELP changefeed_buffer_entries_in Total entries entering the buffer between raft and changefeed sinks
+# TYPE changefeed_buffer_entries_in counter
+changefeed_buffer_entries_in 0.0
+# HELP sys_host_disk_write_bytes Bytes written to all disks since this process started
+# TYPE sys_host_disk_write_bytes gauge
+sys_host_disk_write_bytes 942080.0
+# HELP changefeed_emitted_bytes Bytes emitted by all feeds
+# TYPE changefeed_emitted_bytes counter
+changefeed_emitted_bytes 0.0
+# HELP sql_insert_started_count Number of SQL INSERT statements started
+# TYPE sql_insert_started_count counter
+sql_insert_started_count 0.0
+# HELP sql_distsql_exec_latency Latency of DistSQL statement execution
+# TYPE sql_distsql_exec_latency histogram
+sql_distsql_exec_latency_bucket{le="+Inf"} 0.0
+sql_distsql_exec_latency_sum 0.0
+sql_distsql_exec_latency_count 0.0
+# HELP queue_replicagc_processingnanos Nanoseconds spent processing replicas in the replica GC queue
+# TYPE queue_replicagc_processingnanos counter
+queue_replicagc_processingnanos{store="1"} 3.60590602e+09
+# HELP queue_replicate_removelearnerreplica Number of learner replica removals attempted by the replicate queue (typically due to internal race conditions)
+# TYPE queue_replicate_removelearnerreplica counter
+queue_replicate_removelearnerreplica{store="1"} 0.0
+# HELP rebalancing_range_rebalances Number of range rebalance operations motivated by store-level load imbalances
+# TYPE rebalancing_range_rebalances counter
+rebalancing_range_rebalances{store="1"} 0.0
+# HELP sql_mem_admin_session_max Memory usage per sql session for admin
+# TYPE sql_mem_admin_session_max histogram
+sql_mem_admin_session_max_bucket{le="+Inf"} 0.0
+sql_mem_admin_session_max_sum 0.0
+sql_mem_admin_session_max_count 0.0
+# HELP sql_optimizer_plan_cache_misses_internal Number of non-prepared statements for which a cached plan was not used (internal queries)
+# TYPE sql_optimizer_plan_cache_misses_internal counter
+sql_optimizer_plan_cache_misses_internal 524.0
+# HELP range_removes Number of range removals
+# TYPE range_removes counter
+range_removes{store="1"} 0.0
+# HELP range_snapshots_normal_applied Number of applied snapshots
+# TYPE range_snapshots_normal_applied counter
+range_snapshots_normal_applied{store="1"} 0.0
+# HELP queue_consistency_processingnanos Nanoseconds spent processing replicas in the consistency checker queue
+# TYPE queue_consistency_processingnanos counter
+queue_consistency_processingnanos{store="1"} 1.11826751e+08
+# HELP queue_split_pending Number of pending replicas in the split queue
+# TYPE queue_split_pending gauge
+queue_split_pending{store="1"} 0.0
+# HELP queue_gc_info_intenttxns Number of associated distinct transactions
+# TYPE queue_gc_info_intenttxns counter
+queue_gc_info_intenttxns{store="1"} 0.0
+# HELP queue_gc_info_transactionspangcaborted Number of GC'able entries corresponding to aborted txns
+# TYPE queue_gc_info_transactionspangcaborted counter
+queue_gc_info_transactionspangcaborted{store="1"} 0.0
+# HELP sql_txn_begin_count_internal Number of SQL transaction BEGIN statements successfully executed (internal queries)
+# TYPE sql_txn_begin_count_internal counter
+sql_txn_begin_count_internal 0.0
+# HELP ranges Number of ranges
+# TYPE ranges gauge
+ranges{store="1"} 34.0
+# HELP raft_rcvd_transferleader Number of MsgTransferLeader messages received by this store
+# TYPE raft_rcvd_transferleader counter
+raft_rcvd_transferleader{store="1"} 0.0
+# HELP gossip_connections_refused Number of refused incoming gossip connections
+# TYPE gossip_connections_refused counter
+gossip_connections_refused 0.0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/cockroachdb/testdata/non_cockroachdb.txt b/src/go/plugin/go.d/modules/cockroachdb/testdata/non_cockroachdb.txt
new file mode 100644
index 000000000..f5f0ae082
--- /dev/null
+++ b/src/go/plugin/go.d/modules/cockroachdb/testdata/non_cockroachdb.txt
@@ -0,0 +1,27 @@
+# HELP wmi_os_process_memory_limix_bytes OperatingSystem.MaxProcessMemorySize
+# TYPE wmi_os_process_memory_limix_bytes gauge
+wmi_os_process_memory_limix_bytes 1.40737488224256e+14
+# HELP wmi_os_processes OperatingSystem.NumberOfProcesses
+# TYPE wmi_os_processes gauge
+wmi_os_processes 124
+# HELP wmi_os_processes_limit OperatingSystem.MaxNumberOfProcesses
+# TYPE wmi_os_processes_limit gauge
+wmi_os_processes_limit 4.294967295e+09
+# HELP wmi_os_time OperatingSystem.LocalDateTime
+# TYPE wmi_os_time gauge
+wmi_os_time 1.57804974e+09
+# HELP wmi_os_timezone OperatingSystem.LocalDateTime
+# TYPE wmi_os_timezone gauge
+wmi_os_timezone{timezone="MSK"} 1
+# HELP wmi_os_users OperatingSystem.NumberOfUsers
+# TYPE wmi_os_users gauge
+wmi_os_users 2
+# HELP wmi_os_virtual_memory_bytes OperatingSystem.TotalVirtualMemorySize
+# TYPE wmi_os_virtual_memory_bytes gauge
+wmi_os_virtual_memory_bytes 5.770891264e+09
+# HELP wmi_os_virtual_memory_free_bytes OperatingSystem.FreeVirtualMemory
+# TYPE wmi_os_virtual_memory_free_bytes gauge
+wmi_os_virtual_memory_free_bytes 3.76489984e+09
+# HELP wmi_os_visible_memory_bytes OperatingSystem.TotalVisibleMemorySize
+# TYPE wmi_os_visible_memory_bytes gauge
+wmi_os_visible_memory_bytes 4.294496256e+09 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/consul/README.md b/src/go/plugin/go.d/modules/consul/README.md
new file mode 120000
index 000000000..5e57e46dc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/README.md
@@ -0,0 +1 @@
+integrations/consul.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/consul/charts.go b/src/go/plugin/go.d/modules/consul/charts.go
new file mode 100644
index 000000000..697a0c36a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/charts.go
@@ -0,0 +1,739 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package consul
+
+import (
+ "fmt"
+
+ "github.com/blang/semver/v4"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ _ = module.Priority + iota
+ prioKVSApplyTime
+ prioKVSApplyOperations
+ prioTXNApplyTime
+ prioTXNApplyOperations
+ prioRaftCommitTime
+ prioRaftCommitsRate
+
+ prioServerLeadershipStatus
+ prioRaftLeaderLastContactTime
+ prioRaftFollowerLastContactLeaderTime
+ prioRaftLeaderElections
+ prioRaftLeadershipTransitions
+
+ prioAutopilotClusterHealthStatus
+ prioAutopilotFailureTolerance
+ prioAutopilotServerHealthStatus
+ prioAutopilotServerStableTime
+ prioAutopilotServerSerfStatus
+ prioAutopilotServerVoterStatus
+
+ prioNetworkLanRTT
+
+ prioRPCRequests
+ prioRPCRequestsExceeded
+ prioRPCRequestsFailed
+
+ prioRaftThreadMainSaturation
+ prioRaftThreadFSMSaturation
+
+ prioRaftFSMLastRestoreDuration
+ prioRaftLeaderOldestLogAge
+ prioRaftRPCInstallSnapshotTime
+
+ prioBoltDBFreelistBytes
+ prioBoltDBLogsPerBatch
+ prioBoltDBStoreLogsTime
+
+ prioMemoryAllocated
+ prioMemorySys
+ prioGCPauseTime
+
+ prioServiceHealthCheckStatus
+ prioNodeHealthCheckStatus
+
+ prioLicenseExpirationTime
+)
+
+var (
+ clientCharts = module.Charts{
+ clientRPCRequestsRateChart.Copy(),
+ clientRPCRequestsExceededRateChart.Copy(),
+ clientRPCRequestsFailedRateChart.Copy(),
+
+ memoryAllocatedChart.Copy(),
+ memorySysChart.Copy(),
+ gcPauseTimeChart.Copy(),
+
+ licenseExpirationTimeChart.Copy(),
+ }
+
+ serverLeaderCharts = module.Charts{
+ raftCommitTimeChart.Copy(),
+ raftLeaderLastContactTimeChart.Copy(),
+ raftCommitsRateChart.Copy(),
+ raftLeaderOldestLogAgeChart.Copy(),
+ }
+ serverFollowerCharts = module.Charts{
+ raftFollowerLastContactLeaderTimeChart.Copy(),
+ raftRPCInstallSnapshotTimeChart.Copy(),
+ }
+ serverAutopilotHealthCharts = module.Charts{
+ autopilotServerHealthStatusChart.Copy(),
+ autopilotServerStableTimeChart.Copy(),
+ autopilotServerSerfStatusChart.Copy(),
+ autopilotServerVoterStatusChart.Copy(),
+ }
+ serverCommonCharts = module.Charts{
+ kvsApplyTimeChart.Copy(),
+ kvsApplyOperationsRateChart.Copy(),
+ txnApplyTimeChart.Copy(),
+ txnApplyOperationsRateChart.Copy(),
+
+ autopilotClusterHealthStatusChart.Copy(),
+ autopilotFailureTolerance.Copy(),
+
+ raftLeaderElectionsRateChart.Copy(),
+ raftLeadershipTransitionsRateChart.Copy(),
+ serverLeadershipStatusChart.Copy(),
+
+ networkLanRTTChart.Copy(),
+
+ clientRPCRequestsRateChart.Copy(),
+ clientRPCRequestsExceededRateChart.Copy(),
+ clientRPCRequestsFailedRateChart.Copy(),
+
+ raftThreadMainSaturationPercChart.Copy(),
+ raftThreadFSMSaturationPercChart.Copy(),
+
+ raftFSMLastRestoreDurationChart.Copy(),
+
+ raftBoltDBFreelistBytesChart.Copy(),
+ raftBoltDBLogsPerBatchChart.Copy(),
+ raftBoltDBStoreLogsTimeChart.Copy(),
+
+ memoryAllocatedChart.Copy(),
+ memorySysChart.Copy(),
+ gcPauseTimeChart.Copy(),
+
+ licenseExpirationTimeChart.Copy(),
+ }
+
+ kvsApplyTimeChart = module.Chart{
+ ID: "kvs_apply_time",
+ Title: "KVS apply time",
+ Units: "ms",
+ Fam: "transaction timing",
+ Ctx: "consul.kvs_apply_time",
+ Priority: prioKVSApplyTime,
+ Dims: module.Dims{
+ {ID: "kvs_apply_quantile=0.5", Name: "quantile_0.5", Div: precision * precision},
+ {ID: "kvs_apply_quantile=0.9", Name: "quantile_0.9", Div: precision * precision},
+ {ID: "kvs_apply_quantile=0.99", Name: "quantile_0.99", Div: precision * precision},
+ },
+ }
+ kvsApplyOperationsRateChart = module.Chart{
+ ID: "kvs_apply_operations_rate",
+ Title: "KVS apply operations",
+ Units: "ops/s",
+ Fam: "transaction timing",
+ Ctx: "consul.kvs_apply_operations_rate",
+ Priority: prioKVSApplyOperations,
+ Dims: module.Dims{
+ {ID: "kvs_apply_count", Name: "kvs_apply"},
+ },
+ }
+ txnApplyTimeChart = module.Chart{
+ ID: "txn_apply_time",
+ Title: "Transaction apply time",
+ Units: "ms",
+ Fam: "transaction timing",
+ Ctx: "consul.txn_apply_time",
+ Priority: prioTXNApplyTime,
+ Dims: module.Dims{
+ {ID: "txn_apply_quantile=0.5", Name: "quantile_0.5", Div: precision * precision},
+ {ID: "txn_apply_quantile=0.9", Name: "quantile_0.9", Div: precision * precision},
+ {ID: "txn_apply_quantile=0.99", Name: "quantile_0.99", Div: precision * precision},
+ },
+ }
+ txnApplyOperationsRateChart = module.Chart{
+ ID: "txn_apply_operations_rate",
+ Title: "Transaction apply operations",
+ Units: "ops/s",
+ Fam: "transaction timing",
+ Ctx: "consul.txn_apply_operations_rate",
+ Priority: prioTXNApplyOperations,
+ Dims: module.Dims{
+ {ID: "txn_apply_count", Name: "kvs_apply"},
+ },
+ }
+
+ raftCommitTimeChart = module.Chart{
+ ID: "raft_commit_time",
+ Title: "Raft commit time",
+ Units: "ms",
+ Fam: "transaction timing",
+ Ctx: "consul.raft_commit_time",
+ Priority: prioRaftCommitTime,
+ Dims: module.Dims{
+ {ID: "raft_commitTime_quantile=0.5", Name: "quantile_0.5", Div: precision * precision},
+ {ID: "raft_commitTime_quantile=0.9", Name: "quantile_0.9", Div: precision * precision},
+ {ID: "raft_commitTime_quantile=0.99", Name: "quantile_0.99", Div: precision * precision},
+ },
+ }
+ raftCommitsRateChart = module.Chart{
+ ID: "raft_commits_rate",
+ Title: "Raft commits rate",
+ Units: "commits/s",
+ Fam: "transaction timing",
+ Ctx: "consul.raft_commits_rate",
+ Priority: prioRaftCommitsRate,
+ Dims: module.Dims{
+ {ID: "raft_apply", Name: "commits", Div: precision, Algo: module.Incremental},
+ },
+ }
+
+ autopilotClusterHealthStatusChart = module.Chart{
+ ID: "autopilot_health_status",
+ Title: "Autopilot cluster health status",
+ Units: "status",
+ Fam: "autopilot",
+ Ctx: "consul.autopilot_health_status",
+ Priority: prioAutopilotClusterHealthStatus,
+ Dims: module.Dims{
+ {ID: "autopilot_healthy_yes", Name: "healthy"},
+ {ID: "autopilot_healthy_no", Name: "unhealthy"},
+ },
+ }
+ autopilotFailureTolerance = module.Chart{
+ ID: "autopilot_failure_tolerance",
+ Title: "Autopilot cluster failure tolerance",
+ Units: "servers",
+ Fam: "autopilot",
+ Ctx: "consul.autopilot_failure_tolerance",
+ Priority: prioAutopilotFailureTolerance,
+ Dims: module.Dims{
+ {ID: "autopilot_failure_tolerance", Name: "failure_tolerance"},
+ },
+ }
+ autopilotServerHealthStatusChart = module.Chart{
+ ID: "autopilot_server_health_status",
+ Title: "Autopilot server health status",
+ Units: "status",
+ Fam: "autopilot",
+ Ctx: "consul.autopilot_server_health_status",
+ Priority: prioAutopilotServerHealthStatus,
+ Dims: module.Dims{
+ {ID: "autopilot_server_healthy_yes", Name: "healthy"},
+ {ID: "autopilot_server_healthy_no", Name: "unhealthy"},
+ },
+ }
+ autopilotServerStableTimeChart = module.Chart{
+ ID: "autopilot_server_stable_time",
+ Title: "Autopilot server stable time",
+ Units: "seconds",
+ Fam: "autopilot",
+ Ctx: "consul.autopilot_server_stable_time",
+ Priority: prioAutopilotServerStableTime,
+ Dims: module.Dims{
+ {ID: "autopilot_server_stable_time", Name: "stable"},
+ },
+ }
+ autopilotServerSerfStatusChart = module.Chart{
+ ID: "autopilot_server_serf_status",
+ Title: "Autopilot server Serf status",
+ Units: "status",
+ Fam: "autopilot",
+ Ctx: "consul.autopilot_server_serf_status",
+ Priority: prioAutopilotServerSerfStatus,
+ Dims: module.Dims{
+ {ID: "autopilot_server_sefStatus_alive", Name: "alive"},
+ {ID: "autopilot_server_sefStatus_failed", Name: "failed"},
+ {ID: "autopilot_server_sefStatus_left", Name: "left"},
+ {ID: "autopilot_server_sefStatus_none", Name: "none"},
+ },
+ }
+ autopilotServerVoterStatusChart = module.Chart{
+ ID: "autopilot_server_voter_status",
+ Title: "Autopilot server Raft voting membership",
+ Units: "status",
+ Fam: "autopilot",
+ Ctx: "consul.autopilot_server_voter_status",
+ Priority: prioAutopilotServerVoterStatus,
+ Dims: module.Dims{
+ {ID: "autopilot_server_voter_yes", Name: "voter"},
+ {ID: "autopilot_server_voter_no", Name: "not_voter"},
+ },
+ }
+
+ raftLeaderLastContactTimeChart = module.Chart{
+ ID: "raft_leader_last_contact_time",
+ Title: "Raft leader last contact time",
+ Units: "ms",
+ Fam: "leadership changes",
+ Ctx: "consul.raft_leader_last_contact_time",
+ Priority: prioRaftLeaderLastContactTime,
+ Dims: module.Dims{
+ {ID: "raft_leader_lastContact_quantile=0.5", Name: "quantile_0.5", Div: precision * precision},
+ {ID: "raft_leader_lastContact_quantile=0.9", Name: "quantile_0.9", Div: precision * precision},
+ {ID: "raft_leader_lastContact_quantile=0.99", Name: "quantile_0.99", Div: precision * precision},
+ },
+ }
+ raftFollowerLastContactLeaderTimeChart = module.Chart{
+ ID: "raft_follower_last_contact_leader_time",
+ Title: "Raft follower last contact with the leader time",
+ Units: "ms",
+ Fam: "leadership changes",
+ Ctx: "consul.raft_follower_last_contact_leader_time",
+ Priority: prioRaftFollowerLastContactLeaderTime,
+ Dims: module.Dims{
+ {ID: "autopilot_server_lastContact_leader", Name: "leader_last_contact"},
+ },
+ }
+ raftLeaderElectionsRateChart = module.Chart{
+ ID: "raft_leader_elections_rate",
+ Title: "Raft leader elections rate",
+ Units: "elections/s",
+ Fam: "leadership changes",
+ Ctx: "consul.raft_leader_elections_rate",
+ Priority: prioRaftLeaderElections,
+ Dims: module.Dims{
+ {ID: "raft_state_candidate", Name: "leader", Algo: module.Incremental},
+ },
+ }
+ raftLeadershipTransitionsRateChart = module.Chart{
+ ID: "raft_leadership_transitions_rate",
+ Title: "Raft leadership transitions rate",
+ Units: "transitions/s",
+ Fam: "leadership changes",
+ Ctx: "consul.raft_leadership_transitions_rate",
+ Priority: prioRaftLeadershipTransitions,
+ Dims: module.Dims{
+ {ID: "raft_state_leader", Name: "leadership", Algo: module.Incremental},
+ },
+ }
+ serverLeadershipStatusChart = module.Chart{
+ ID: "server_leadership_status",
+ Title: "Server leadership status",
+ Units: "status",
+ Fam: "leadership changes",
+ Ctx: "consul.server_leadership_status",
+ Priority: prioServerLeadershipStatus,
+ Dims: module.Dims{
+ {ID: "server_isLeader_yes", Name: "leader"},
+ {ID: "server_isLeader_no", Name: "not_leader"},
+ },
+ }
+
+ networkLanRTTChart = module.Chart{
+ ID: "network_lan_rtt",
+ Title: "Network lan RTT",
+ Units: "ms",
+ Fam: "network rtt",
+ Ctx: "consul.network_lan_rtt",
+ Type: module.Area,
+ Priority: prioNetworkLanRTT,
+ Dims: module.Dims{
+ {ID: "network_lan_rtt_min", Name: "min", Div: 1e6},
+ {ID: "network_lan_rtt_max", Name: "max", Div: 1e6},
+ {ID: "network_lan_rtt_avg", Name: "avg", Div: 1e6},
+ },
+ }
+
+ clientRPCRequestsRateChart = module.Chart{
+ ID: "client_rpc_requests_rate",
+ Title: "Client RPC requests",
+ Units: "requests/s",
+ Fam: "rpc network activity",
+ Ctx: "consul.client_rpc_requests_rate",
+ Priority: prioRPCRequests,
+ Dims: module.Dims{
+ {ID: "client_rpc", Name: "rpc", Algo: module.Incremental},
+ },
+ }
+ clientRPCRequestsExceededRateChart = module.Chart{
+ ID: "client_rpc_requests_exceeded_rate",
+ Title: "Client rate-limited RPC requests",
+ Units: "requests/s",
+ Fam: "rpc network activity",
+ Ctx: "consul.client_rpc_requests_exceeded_rate",
+ Priority: prioRPCRequestsExceeded,
+ Dims: module.Dims{
+ {ID: "client_rpc_exceeded", Name: "exceeded", Algo: module.Incremental},
+ },
+ }
+ clientRPCRequestsFailedRateChart = module.Chart{
+ ID: "client_rpc_requests_failed_rate",
+ Title: "Client failed RPC requests",
+ Units: "requests/s",
+ Fam: "rpc network activity",
+ Ctx: "consul.client_rpc_requests_failed_rate",
+ Priority: prioRPCRequestsFailed,
+ Dims: module.Dims{
+ {ID: "client_rpc_failed", Name: "failed", Algo: module.Incremental},
+ },
+ }
+
+ raftThreadMainSaturationPercChart = module.Chart{
+ ID: "raft_thread_main_saturation_perc",
+ Title: "Raft main thread saturation",
+ Units: "percentage",
+ Fam: "raft saturation",
+ Ctx: "consul.raft_thread_main_saturation_perc",
+ Priority: prioRaftThreadMainSaturation,
+ Dims: module.Dims{
+ {ID: "raft_thread_main_saturation_quantile=0.5", Name: "quantile_0.5", Div: precision * 10},
+ {ID: "raft_thread_main_saturation_quantile=0.9", Name: "quantile_0.9", Div: precision * 10},
+ {ID: "raft_thread_main_saturation_quantile=0.99", Name: "quantile_0.99", Div: precision * 10},
+ },
+ }
+ raftThreadFSMSaturationPercChart = module.Chart{
+ ID: "raft_thread_fsm_saturation_perc",
+ Title: "Raft FSM thread saturation",
+ Units: "percentage",
+ Fam: "raft saturation",
+ Ctx: "consul.raft_thread_fsm_saturation_perc",
+ Priority: prioRaftThreadFSMSaturation,
+ Dims: module.Dims{
+ {ID: "raft_thread_fsm_saturation_quantile=0.5", Name: "quantile_0.5", Div: precision * 10},
+ {ID: "raft_thread_fsm_saturation_quantile=0.9", Name: "quantile_0.9", Div: precision * 10},
+ {ID: "raft_thread_fsm_saturation_quantile=0.99", Name: "quantile_0.99", Div: precision * 10},
+ },
+ }
+
+ raftFSMLastRestoreDurationChart = module.Chart{
+ ID: "raft_fsm_last_restore_duration",
+ Title: "Raft last restore duration",
+ Units: "ms",
+ Fam: "raft replication capacity",
+ Ctx: "consul.raft_fsm_last_restore_duration",
+ Priority: prioRaftFSMLastRestoreDuration,
+ Dims: module.Dims{
+ {ID: "raft_fsm_lastRestoreDuration", Name: "last_restore_duration"},
+ },
+ }
+ raftLeaderOldestLogAgeChart = module.Chart{
+ ID: "raft_leader_oldest_log_age",
+ Title: "Raft leader oldest log age",
+ Units: "seconds",
+ Fam: "raft replication capacity",
+ Ctx: "consul.raft_leader_oldest_log_age",
+ Priority: prioRaftLeaderOldestLogAge,
+ Dims: module.Dims{
+ {ID: "raft_leader_oldestLogAge", Name: "oldest_log_age", Div: 1000},
+ },
+ }
+ raftRPCInstallSnapshotTimeChart = module.Chart{
+ ID: "raft_rpc_install_snapshot_time",
+ Title: "Raft RPC install snapshot time",
+ Units: "ms",
+ Fam: "raft replication capacity",
+ Ctx: "consul.raft_rpc_install_snapshot_time",
+ Priority: prioRaftRPCInstallSnapshotTime,
+ Dims: module.Dims{
+ {ID: "raft_rpc_installSnapshot_quantile=0.5", Name: "quantile_0.5", Div: precision * precision},
+ {ID: "raft_rpc_installSnapshot_quantile=0.9", Name: "quantile_0.9", Div: precision * precision},
+ {ID: "raft_rpc_installSnapshot_quantile=0.99", Name: "quantile_0.99", Div: precision * precision},
+ },
+ }
+
+ raftBoltDBFreelistBytesChart = module.Chart{
+ ID: "raft_boltdb_freelist_bytes",
+ Title: "Raft BoltDB freelist",
+ Units: "bytes",
+ Fam: "boltdb performance",
+ Ctx: "consul.raft_boltdb_freelist_bytes",
+ Priority: prioBoltDBFreelistBytes,
+ Dims: module.Dims{
+ {ID: "raft_boltdb_freelistBytes", Name: "freelist"},
+ },
+ }
+ raftBoltDBLogsPerBatchChart = module.Chart{
+ ID: "raft_boltdb_logs_per_batch_rate",
+ Title: "Raft BoltDB logs written per batch",
+ Units: "logs/s",
+ Fam: "boltdb performance",
+ Ctx: "consul.raft_boltdb_logs_per_batch_rate",
+ Priority: prioBoltDBLogsPerBatch,
+ Dims: module.Dims{
+ {ID: "raft_boltdb_logsPerBatch_sum", Name: "written", Algo: module.Incremental},
+ },
+ }
+
+ raftBoltDBStoreLogsTimeChart = module.Chart{
+ ID: "raft_boltdb_store_logs_time",
+ Title: "Raft BoltDB store logs time",
+ Units: "ms",
+ Fam: "boltdb performance",
+ Ctx: "consul.raft_boltdb_store_logs_time",
+ Priority: prioBoltDBStoreLogsTime,
+ Dims: module.Dims{
+ {ID: "raft_boltdb_storeLogs_quantile=0.5", Name: "quantile_0.5", Div: precision * precision},
+ {ID: "raft_boltdb_storeLogs_quantile=0.9", Name: "quantile_0.9", Div: precision * precision},
+ {ID: "raft_boltdb_storeLogs_quantile=0.99", Name: "quantile_0.99", Div: precision * precision},
+ },
+ }
+
+ memoryAllocatedChart = module.Chart{
+ ID: "memory_allocated",
+ Title: "Memory allocated by the Consul process",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "consul.memory_allocated",
+ Priority: prioMemoryAllocated,
+ Dims: module.Dims{
+ {ID: "runtime_alloc_bytes", Name: "allocated"},
+ },
+ }
+ memorySysChart = module.Chart{
+ ID: "memory_sys",
+ Title: "Memory obtained from the OS",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "consul.memory_sys",
+ Priority: prioMemorySys,
+ Dims: module.Dims{
+ {ID: "runtime_sys_bytes", Name: "sys"},
+ },
+ }
+
+ gcPauseTimeChart = module.Chart{
+ ID: "gc_pause_time",
+ Title: "Garbage collection stop-the-world pause time",
+ Units: "seconds",
+ Fam: "garbage collection",
+ Ctx: "consul.gc_pause_time",
+ Priority: prioGCPauseTime,
+ Dims: module.Dims{
+ {ID: "runtime_total_gc_pause_ns", Name: "gc_pause", Algo: module.Incremental, Div: 1e9},
+ },
+ }
+
+ licenseExpirationTimeChart = module.Chart{
+ ID: "license_expiration_time",
+ Title: "License expiration time",
+ Units: "seconds",
+ Fam: "license",
+ Ctx: "consul.license_expiration_time",
+ Priority: prioLicenseExpirationTime,
+ Dims: module.Dims{
+ {ID: "system_licenseExpiration", Name: "license_expiration"},
+ },
+ }
+)
+
+var (
+ serviceHealthCheckStatusChartTmpl = module.Chart{
+ ID: "health_check_%s_status",
+ Title: "Service health check status",
+ Units: "status",
+ Fam: "service health checks",
+ Ctx: "consul.service_health_check_status",
+ Priority: prioServiceHealthCheckStatus,
+ Dims: module.Dims{
+ {ID: "health_check_%s_passing_status", Name: "passing"},
+ {ID: "health_check_%s_critical_status", Name: "critical"},
+ {ID: "health_check_%s_maintenance_status", Name: "maintenance"},
+ {ID: "health_check_%s_warning_status", Name: "warning"},
+ },
+ }
+ nodeHealthCheckStatusChartTmpl = module.Chart{
+ ID: "health_check_%s_status",
+ Title: "Node health check status",
+ Units: "status",
+ Fam: "node health checks",
+ Ctx: "consul.node_health_check_status",
+ Priority: prioNodeHealthCheckStatus,
+ Dims: module.Dims{
+ {ID: "health_check_%s_passing_status", Name: "passing"},
+ {ID: "health_check_%s_critical_status", Name: "critical"},
+ {ID: "health_check_%s_maintenance_status", Name: "maintenance"},
+ {ID: "health_check_%s_warning_status", Name: "warning"},
+ },
+ }
+)
+
+func (c *Consul) addGlobalCharts() {
+ if !c.isTelemetryPrometheusEnabled() {
+ return
+ }
+
+ var charts *module.Charts
+
+ if !c.isServer() {
+ charts = clientCharts.Copy()
+ } else {
+ charts = serverCommonCharts.Copy()
+
+ // can't really rely on checking if a response contains a metric due to retention of some metrics
+ // https://github.com/hashicorp/go-metrics/blob/b6d5c860c07ef6eeec89f4a662c7b452dd4d0c93/prometheus/prometheus.go#L75-L76
+ if c.version != nil {
+ if c.version.LT(semver.Version{Major: 1, Minor: 13, Patch: 0}) {
+ _ = charts.Remove(raftThreadMainSaturationPercChart.ID)
+ _ = charts.Remove(raftThreadFSMSaturationPercChart.ID)
+ }
+ if c.version.LT(semver.Version{Major: 1, Minor: 11, Patch: 0}) {
+ _ = charts.Remove(kvsApplyTimeChart.ID)
+ _ = charts.Remove(kvsApplyOperationsRateChart.ID)
+ _ = charts.Remove(txnApplyTimeChart.ID)
+ _ = charts.Remove(txnApplyOperationsRateChart.ID)
+ _ = charts.Remove(raftBoltDBFreelistBytesChart.ID)
+ }
+ }
+ }
+
+ if !c.hasLicense() {
+ _ = charts.Remove(licenseExpirationTimeChart.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.Labels = []module.Label{
+ {Key: "datacenter", Value: c.cfg.Config.Datacenter},
+ {Key: "node_name", Value: c.cfg.Config.NodeName},
+ }
+ }
+
+ if err := c.Charts().Add(*charts.Copy()...); err != nil {
+ c.Warning(err)
+ }
+}
+
+func (c *Consul) addServerAutopilotHealthCharts() {
+ charts := serverAutopilotHealthCharts.Copy()
+
+ for _, chart := range *charts {
+ chart.Labels = []module.Label{
+ {Key: "datacenter", Value: c.cfg.Config.Datacenter},
+ {Key: "node_name", Value: c.cfg.Config.NodeName},
+ }
+ }
+
+ if err := c.Charts().Add(*charts...); err != nil {
+ c.Warning(err)
+ }
+}
+
+func newServiceHealthCheckChart(check *agentCheck) *module.Chart {
+ chart := serviceHealthCheckStatusChartTmpl.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, check.CheckID)
+ chart.Labels = []module.Label{
+ {Key: "node_name", Value: check.Node},
+ {Key: "check_name", Value: check.Name},
+ {Key: "service_name", Value: check.ServiceName},
+ }
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, check.CheckID)
+ }
+ return chart
+}
+
+func newNodeHealthCheckChart(check *agentCheck) *module.Chart {
+ chart := nodeHealthCheckStatusChartTmpl.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, check.CheckID)
+ chart.Labels = []module.Label{
+ {Key: "node_name", Value: check.Node},
+ {Key: "check_name", Value: check.Name},
+ }
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, check.CheckID)
+ }
+ return chart
+}
+
+func (c *Consul) addHealthCheckCharts(check *agentCheck) {
+ var chart *module.Chart
+
+ if check.ServiceName != "" {
+ chart = newServiceHealthCheckChart(check)
+ } else {
+ chart = newNodeHealthCheckChart(check)
+ }
+
+ chart.Labels = append(chart.Labels, module.Label{
+ Key: "datacenter",
+ Value: c.cfg.Config.Datacenter,
+ })
+
+ if err := c.Charts().Add(chart); err != nil {
+ c.Warning(err)
+ }
+}
+
+func (c *Consul) removeHealthCheckCharts(checkID string) {
+ id := fmt.Sprintf("health_check_%s_status", checkID)
+
+ chart := c.Charts().Get(id)
+ if chart == nil {
+ c.Warningf("failed to remove '%s' chart: the chart does not exist", id)
+ return
+ }
+
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+}
+
+func (c *Consul) addLeaderCharts() {
+ charts := serverLeaderCharts.Copy()
+
+ for _, chart := range *charts {
+ chart.Labels = []module.Label{
+ {Key: "datacenter", Value: c.cfg.Config.Datacenter},
+ {Key: "node_name", Value: c.cfg.Config.NodeName},
+ }
+ }
+
+ if err := c.Charts().Add(*charts...); err != nil {
+ c.Warning(err)
+ }
+}
+
+func (c *Consul) removeLeaderCharts() {
+ s := make(map[string]bool)
+ for _, v := range serverLeaderCharts {
+ s[v.ID] = true
+ }
+
+ for _, v := range *c.Charts() {
+ if s[v.ID] {
+ v.MarkRemove()
+ v.MarkNotCreated()
+ }
+ }
+}
+
+func (c *Consul) addFollowerCharts() {
+ charts := serverFollowerCharts.Copy()
+ if c.isCloudManaged() {
+ // 'autopilot_server_lastContact_leader' comes from 'operator/autopilot/health' which is disabled
+ _ = charts.Remove(raftFollowerLastContactLeaderTimeChart.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.Labels = []module.Label{
+ {Key: "datacenter", Value: c.cfg.Config.Datacenter},
+ {Key: "node_name", Value: c.cfg.Config.NodeName},
+ }
+ }
+
+ if err := c.Charts().Add(*charts...); err != nil {
+ c.Warning(err)
+ }
+}
+
+func (c *Consul) removeFollowerCharts() {
+ s := make(map[string]bool)
+ for _, v := range serverFollowerCharts {
+ s[v.ID] = true
+ }
+
+ for _, v := range *c.Charts() {
+ if s[v.ID] {
+ v.MarkRemove()
+ v.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/consul/collect.go b/src/go/plugin/go.d/modules/consul/collect.go
new file mode 100644
index 000000000..3033e046e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/collect.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package consul
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ precision = 1000
+)
+
+func (c *Consul) collect() (map[string]int64, error) {
+ if c.cfg == nil {
+ if err := c.collectConfiguration(); err != nil {
+ return nil, err
+ }
+
+ c.addGlobalChartsOnce.Do(c.addGlobalCharts)
+ }
+
+ mx := make(map[string]int64)
+
+ if err := c.collectChecks(mx); err != nil {
+ return nil, err
+ }
+
+ if c.isServer() {
+ if !c.isCloudManaged() {
+ c.addServerAutopilotChartsOnce.Do(c.addServerAutopilotHealthCharts)
+ // 'operator/autopilot/health' is disabled in Cloud managed (403: Operation is not allowed in managed Consul clusters)
+ if err := c.collectAutopilotHealth(mx); err != nil {
+ return nil, err
+ }
+ }
+ if err := c.collectNetworkRTT(mx); err != nil {
+ return nil, err
+ }
+ }
+
+ if c.isTelemetryPrometheusEnabled() {
+ if err := c.collectMetricsPrometheus(mx); err != nil {
+ return nil, err
+ }
+ }
+
+ return mx, nil
+}
+
+func (c *Consul) isTelemetryPrometheusEnabled() bool {
+ return c.cfg.DebugConfig.Telemetry.PrometheusOpts.Expiration != "0s"
+}
+
+func (c *Consul) isCloudManaged() bool {
+ return c.cfg.DebugConfig.Cloud.ClientSecret != "" || c.cfg.DebugConfig.Cloud.ResourceID != ""
+}
+
+func (c *Consul) hasLicense() bool {
+ return c.cfg.Stats.License.ID != ""
+}
+
+func (c *Consul) isServer() bool {
+ return c.cfg.Config.Server
+}
+
+func (c *Consul) doOKDecode(urlPath string, in interface{}, statusCodes ...int) error {
+ req, err := web.NewHTTPRequestWithPath(c.Request, urlPath)
+ if err != nil {
+ return fmt.Errorf("error on creating request: %v", err)
+ }
+
+ if c.ACLToken != "" {
+ req.Header.Set("X-Consul-Token", c.ACLToken)
+ }
+
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on request to %s : %v", req.URL, err)
+ }
+
+ defer closeBody(resp)
+
+ codes := map[int]bool{http.StatusOK: true}
+ for _, v := range statusCodes {
+ codes[v] = true
+ }
+
+ if !codes[resp.StatusCode] {
+ return fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
+ }
+
+ if err = json.NewDecoder(resp.Body).Decode(&in); err != nil {
+ return fmt.Errorf("error on decoding response from %s : %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
diff --git a/src/go/plugin/go.d/modules/consul/collect_autopilot.go b/src/go/plugin/go.d/modules/consul/collect_autopilot.go
new file mode 100644
index 000000000..e73ce9b25
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/collect_autopilot.go
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package consul
+
+import (
+ "net/http"
+ "time"
+)
+
+const (
+ // https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health
+ urlPathOperationAutopilotHealth = "/v1/operator/autopilot/health"
+)
+
+type autopilotHealth struct {
+ Servers []struct {
+ ID string
+ SerfStatus string
+ Leader bool
+ LastContact string
+ Healthy bool
+ Voter bool
+ StableSince time.Time
+ }
+}
+
+func (c *Consul) collectAutopilotHealth(mx map[string]int64) error {
+ var health autopilotHealth
+
+ // The HTTP status code will indicate the health of the cluster: 200 is healthy, 429 is unhealthy.
+ // https://github.com/hashicorp/consul/blob/c7ef04c5979dbc311ff3c67b7bf3028a93e8b0f1/agent/operator_endpoint.go#L325
+ if err := c.doOKDecode(urlPathOperationAutopilotHealth, &health, http.StatusTooManyRequests); err != nil {
+ return err
+ }
+
+ for _, srv := range health.Servers {
+ if srv.ID == c.cfg.Config.NodeID {
+ // SerfStatus: alive, left, failed or none:
+ // https://github.com/hashicorp/consul/blob/c7ef04c5979dbc311ff3c67b7bf3028a93e8b0f1/agent/consul/operator_autopilot_endpoint.go#L124-L133
+ mx["autopilot_server_sefStatus_alive"] = boolToInt(srv.SerfStatus == "alive")
+ mx["autopilot_server_sefStatus_left"] = boolToInt(srv.SerfStatus == "left")
+ mx["autopilot_server_sefStatus_failed"] = boolToInt(srv.SerfStatus == "failed")
+ mx["autopilot_server_sefStatus_none"] = boolToInt(srv.SerfStatus == "none")
+ // https://github.com/hashicorp/raft-autopilot/blob/d936f51c374c3b7902d5e4fdafe9f7d8d199ea53/types.go#L110
+ mx["autopilot_server_healthy_yes"] = boolToInt(srv.Healthy)
+ mx["autopilot_server_healthy_no"] = boolToInt(!srv.Healthy)
+ mx["autopilot_server_voter_yes"] = boolToInt(srv.Voter)
+ mx["autopilot_server_voter_no"] = boolToInt(!srv.Voter)
+ mx["autopilot_server_stable_time"] = int64(time.Since(srv.StableSince).Seconds())
+ mx["autopilot_server_stable_time"] = int64(time.Since(srv.StableSince).Seconds())
+ if !srv.Leader {
+ if v, err := time.ParseDuration(srv.LastContact); err == nil {
+ mx["autopilot_server_lastContact_leader"] = v.Milliseconds()
+ }
+ }
+
+ break
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/consul/collect_checks.go b/src/go/plugin/go.d/modules/consul/collect_checks.go
new file mode 100644
index 000000000..88ea4612b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/collect_checks.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package consul
+
+const (
+ // https://www.consul.io/api-docs/agent/check#list-checks
+ urlPathAgentChecks = "/v1/agent/checks"
+)
+
+type agentCheck struct {
+ Node string
+ CheckID string
+ Name string
+ Status string
+ ServiceID string
+ ServiceName string
+ ServiceTags []string
+}
+
+func (c *Consul) collectChecks(mx map[string]int64) error {
+ var checks map[string]*agentCheck
+
+ if err := c.doOKDecode(urlPathAgentChecks, &checks); err != nil {
+ return err
+ }
+
+ for id, check := range checks {
+ if !c.checks[id] {
+ c.checks[id] = true
+ c.addHealthCheckCharts(check)
+ }
+
+ mx["health_check_"+id+"_passing_status"] = boolToInt(check.Status == "passing")
+ mx["health_check_"+id+"_warning_status"] = boolToInt(check.Status == "warning")
+ mx["health_check_"+id+"_critical_status"] = boolToInt(check.Status == "critical")
+ mx["health_check_"+id+"_maintenance_status"] = boolToInt(check.Status == "maintenance")
+ }
+
+ for id := range c.checks {
+ if _, ok := checks[id]; !ok {
+ delete(c.checks, id)
+ c.removeHealthCheckCharts(id)
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/consul/collect_config.go b/src/go/plugin/go.d/modules/consul/collect_config.go
new file mode 100644
index 000000000..14c77067f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/collect_config.go
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package consul
+
+import (
+ "github.com/blang/semver/v4"
+)
+
+const (
+ // https://developer.hashicorp.com/consul/api-docs/agent#read-configuration
+ urlPathAgentSelf = "/v1/agent/self"
+)
+
+type consulConfig struct {
+ Config struct {
+ Datacenter string
+ PrimaryDatacenter string
+ NodeName string
+ NodeID string
+ Server bool
+ Version string
+ }
+ DebugConfig struct {
+ Telemetry struct {
+ MetricsPrefix string
+ DisableHostname bool
+ PrometheusOpts struct {
+ Expiration string
+ Name string
+ }
+ }
+ Cloud struct {
+ AuthURL string
+ ClientID string
+ ClientSecret string
+ Hostname string
+ ResourceID string
+ ScadaAddress string
+ }
+ }
+ Stats struct {
+ License struct {
+ ID string `json:"id"`
+ } `json:"license"`
+ }
+}
+
+func (c *Consul) collectConfiguration() error {
+ var cfg consulConfig
+
+ if err := c.doOKDecode(urlPathAgentSelf, &cfg); err != nil {
+ return err
+ }
+
+ c.cfg = &cfg
+ c.Debugf("consul config: %+v", cfg)
+
+ if !c.isTelemetryPrometheusEnabled() {
+ c.Warning("export of Prometheus metrics is disabled")
+ }
+
+ ver, err := semver.New(c.cfg.Config.Version)
+ if err != nil {
+ c.Warningf("error on parsing Consul version '%s': %v", c.cfg.Config.Version, err)
+ return nil
+ }
+
+ c.version = ver
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/consul/collect_metrics.go b/src/go/plugin/go.d/modules/consul/collect_metrics.go
new file mode 100644
index 000000000..557ecf64c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/collect_metrics.go
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package consul
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+func (c *Consul) collectMetricsPrometheus(mx map[string]int64) error {
+ mfs, err := c.prom.Scrape()
+ if err != nil {
+ return err
+ }
+
+ // Key Metrics (https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics)
+
+ // prometheus metrics are messy:
+ // - if 'disable_hostname' is false (default):
+ // - consul_autopilot_failure_tolerance => consul_hostname_autopilot_failure_tolerance
+ // - both are exposed
+ // - only the one with the hostname has the correct value
+ // - 1.14.3 (it probably has something to do with cloud management version):
+ // - runtime_sys_bytes => runtime_sys_bytes_sys_bytes; consul_autopilot_healthy => consul_autopilot_healthy_healthy
+ // - both are exposed
+ // - only the one with the double name has the correct value
+
+ if c.isServer() {
+ c.collectSummary(mx, mfs, "raft_thread_main_saturation")
+ c.collectSummary(mx, mfs, "raft_thread_fsm_saturation")
+ c.collectSummary(mx, mfs, "raft_boltdb_logsPerBatch")
+ c.collectSummary(mx, mfs, "kvs_apply")
+ c.collectSummary(mx, mfs, "txn_apply")
+ c.collectSummary(mx, mfs, "raft_boltdb_storeLogs")
+ c.collectSummary(mx, mfs, "raft_rpc_installSnapshot") // make sense for followers only
+ c.collectSummary(mx, mfs, "raft_commitTime") // make sense for leader only
+ c.collectSummary(mx, mfs, "raft_leader_lastContact") // make sense for leader only
+
+ c.collectCounter(mx, mfs, "raft_apply", precision) // make sense for leader only
+ c.collectCounter(mx, mfs, "raft_state_candidate", 1)
+ c.collectCounter(mx, mfs, "raft_state_leader", 1)
+
+ c.collectGaugeBool(mx, mfs, "autopilot_healthy", "autopilot_healthy_healthy")
+ c.collectGaugeBool(mx, mfs, "server_isLeader", "server_isLeader_isLeader")
+ c.collectGauge(mx, mfs, "autopilot_failure_tolerance", 1, "autopilot_failure_tolerance_failure_tolerance")
+ c.collectGauge(mx, mfs, "raft_fsm_lastRestoreDuration", 1)
+ c.collectGauge(mx, mfs, "raft_leader_oldestLogAge", 1, "raft_leader_oldestLogAge_oldestLogAge")
+ c.collectGauge(mx, mfs, "raft_boltdb_freelistBytes", 1, "raft_boltdb_freelistBytes_freelistBytes")
+
+ if isLeader, ok := c.isLeader(mfs); ok {
+ if isLeader && !c.hasLeaderCharts {
+ c.addLeaderCharts()
+ c.hasLeaderCharts = true
+ }
+ if !isLeader && c.hasLeaderCharts {
+ c.removeLeaderCharts()
+ c.hasLeaderCharts = false
+ }
+ if !isLeader && !c.hasFollowerCharts {
+ c.addFollowerCharts()
+ c.hasFollowerCharts = true
+ }
+ if isLeader && c.hasFollowerCharts {
+ c.removeFollowerCharts()
+ c.hasFollowerCharts = false
+ }
+ }
+ }
+
+ c.collectGauge(mx, mfs, "system_licenseExpiration", 3600, "system_licenseExpiration_licenseExpiration")
+
+ c.collectCounter(mx, mfs, "client_rpc", 1)
+ c.collectCounter(mx, mfs, "client_rpc_exceeded", 1)
+ c.collectCounter(mx, mfs, "client_rpc_failed", 1)
+
+ c.collectGauge(mx, mfs, "runtime_alloc_bytes", 1, "runtime_alloc_bytes_alloc_bytes")
+ c.collectGauge(mx, mfs, "runtime_sys_bytes", 1, "runtime_sys_bytes_sys_bytes")
+ c.collectGauge(mx, mfs, "runtime_total_gc_pause_ns", 1, "runtime_total_gc_pause_ns_total_gc_pause_ns")
+
+ return nil
+}
+
+func (c *Consul) isLeader(mfs prometheus.MetricFamilies) (bool, bool) {
+ var mf *prometheus.MetricFamily
+ for _, v := range []string{"server_isLeader_isLeader", "server_isLeader"} {
+ if mf = mfs.GetGauge(c.promMetricNameWithHostname(v)); mf != nil {
+ break
+ }
+ if mf = mfs.GetGauge(c.promMetricName(v)); mf != nil {
+ break
+ }
+ }
+
+ if mf == nil {
+ return false, false
+ }
+
+ return mf.Metrics()[0].Gauge().Value() == 1, true
+}
+
+func (c *Consul) collectGauge(mx map[string]int64, mfs prometheus.MetricFamilies, name string, mul float64, aliases ...string) {
+ var mf *prometheus.MetricFamily
+ for _, v := range append(aliases, name) {
+ if mf = mfs.GetGauge(c.promMetricNameWithHostname(v)); mf != nil {
+ break
+ }
+ if mf = mfs.GetGauge(c.promMetricName(v)); mf != nil {
+ break
+ }
+ }
+
+ if mf == nil {
+ return
+ }
+
+ v := mf.Metrics()[0].Gauge().Value()
+
+ if !math.IsNaN(v) {
+ mx[name] = int64(v * mul)
+ }
+}
+
+func (c *Consul) collectGaugeBool(mx map[string]int64, mfs prometheus.MetricFamilies, name string, aliases ...string) {
+ var mf *prometheus.MetricFamily
+ for _, v := range append(aliases, name) {
+ if mf = mfs.GetGauge(c.promMetricNameWithHostname(v)); mf != nil {
+ break
+ }
+ if mf = mfs.GetGauge(c.promMetricName(v)); mf != nil {
+ break
+ }
+ }
+
+ if mf == nil {
+ return
+ }
+
+ v := mf.Metrics()[0].Gauge().Value()
+
+ if !math.IsNaN(v) {
+ mx[name+"_yes"] = boolToInt(v == 1)
+ mx[name+"_no"] = boolToInt(v == 0)
+ }
+}
+
+func (c *Consul) collectCounter(mx map[string]int64, mfs prometheus.MetricFamilies, name string, mul float64) {
+ mf := mfs.GetCounter(c.promMetricName(name))
+ if mf == nil {
+ return
+ }
+
+ v := mf.Metrics()[0].Counter().Value()
+
+ if !math.IsNaN(v) {
+ mx[name] = int64(v * mul)
+ }
+}
+
+func (c *Consul) collectSummary(mx map[string]int64, mfs prometheus.MetricFamilies, name string) {
+ mf := mfs.GetSummary(c.promMetricName(name))
+ if mf == nil {
+ return
+ }
+
+ m := mf.Metrics()[0]
+
+ for _, q := range m.Summary().Quantiles() {
+ v := q.Value()
+ // MaxAge is 10 seconds (hardcoded)
+ // https://github.com/hashicorp/go-metrics/blob/b6d5c860c07ef6eeec89f4a662c7b452dd4d0c93/prometheus/prometheus.go#L227
+ if math.IsNaN(v) {
+ v = 0
+ }
+
+ id := fmt.Sprintf("%s_quantile=%s", name, formatFloat(q.Quantile()))
+ mx[id] = int64(v * precision * precision)
+ }
+
+ mx[name+"_sum"] = int64(m.Summary().Sum() * precision)
+ mx[name+"_count"] = int64(m.Summary().Count())
+}
+
+func (c *Consul) promMetricName(name string) string {
+ px := c.cfg.DebugConfig.Telemetry.MetricsPrefix
+ return px + "_" + name
+}
+
+var forbiddenCharsReplacer = strings.NewReplacer(" ", "_", ".", "_", "=", "_", "-", "_", "/", "_")
+
+// controlled by 'disable_hostname'
+// https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-disable_hostname
+func (c *Consul) promMetricNameWithHostname(name string) string {
+ px := c.cfg.DebugConfig.Telemetry.MetricsPrefix
+ node := forbiddenCharsReplacer.Replace(c.cfg.Config.NodeName)
+
+ return px + "_" + node + "_" + name
+}
+
+func formatFloat(v float64) string {
+ return strconv.FormatFloat(v, 'f', -1, 64)
+}
diff --git a/src/go/plugin/go.d/modules/consul/collect_net_rtt.go b/src/go/plugin/go.d/modules/consul/collect_net_rtt.go
new file mode 100644
index 000000000..80330d23c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/collect_net_rtt.go
@@ -0,0 +1,75 @@
+package consul
+
+import (
+ "math"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+)
+
+const (
+ // https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes
+ urlPathCoordinateNodes = "/v1/coordinate/nodes"
+)
+
+type nodeCoordinates struct {
+ Node string
+ Coord struct {
+ Vec []float64
+ Error float64
+ Adjustment float64
+ Height float64
+ }
+}
+
+func (c *Consul) collectNetworkRTT(mx map[string]int64) error {
+ var coords []nodeCoordinates
+
+ if err := c.doOKDecode(urlPathCoordinateNodes, &coords); err != nil {
+ return err
+ }
+
+ var thisNode nodeCoordinates
+ var ok bool
+
+ coords, thisNode, ok = removeNodeCoordinates(coords, c.cfg.Config.NodeName)
+ if !ok || len(coords) == 0 {
+ return nil
+ }
+
+ sum := metrics.NewSummary()
+ for _, v := range coords {
+ d := calcDistance(thisNode, v)
+ sum.Observe(d.Seconds())
+ }
+ sum.WriteTo(mx, "network_lan_rtt", 1e9, 1)
+
+ return nil
+}
+
+func calcDistance(a, b nodeCoordinates) time.Duration {
+ // https://developer.hashicorp.com/consul/docs/architecture/coordinates#working-with-coordinates
+ sum := 0.0
+ for i := 0; i < len(a.Coord.Vec); i++ {
+ diff := a.Coord.Vec[i] - b.Coord.Vec[i]
+ sum += diff * diff
+ }
+
+ rtt := math.Sqrt(sum) + a.Coord.Height + b.Coord.Height
+
+ adjusted := rtt + a.Coord.Adjustment + b.Coord.Adjustment
+ if adjusted > 0.0 {
+ rtt = adjusted
+ }
+
+ return time.Duration(rtt * 1e9) // nanoseconds
+}
+
+func removeNodeCoordinates(coords []nodeCoordinates, node string) ([]nodeCoordinates, nodeCoordinates, bool) {
+ for i, v := range coords {
+ if v.Node == node {
+ return append(coords[:i], coords[i+1:]...), v, true
+ }
+ }
+ return coords, nodeCoordinates{}, false
+}
diff --git a/src/go/plugin/go.d/modules/consul/config_schema.json b/src/go/plugin/go.d/modules/consul/config_schema.json
new file mode 100644
index 000000000..a716e15e4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/config_schema.json
@@ -0,0 +1,193 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Consul collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the Consul [HTTP(S) API](https://developer.hashicorp.com/consul/api-docs).",
+ "type": "string",
+ "default": "http://127.0.0.1:8500",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "acl_token": {
+ "title": "X-Consul-Token",
+ "description": "The Consul token for [authentication](https://developer.hashicorp.com/consul/api-docs/api-structure#authentication).",
+ "type": "string",
+ "sensitive": true
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "acl_token",
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "acl_token": {
+ "ui:widget": "password"
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/consul/consul.go b/src/go/plugin/go.d/modules/consul/consul.go
new file mode 100644
index 000000000..6389d0650
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/consul.go
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package consul
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/blang/semver/v4"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("consul", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 1,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Consul {
+ return &Consul{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8500",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: &module.Charts{},
+ addGlobalChartsOnce: &sync.Once{},
+ addServerAutopilotChartsOnce: &sync.Once{},
+ checks: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ ACLToken string `yaml:"acl_token,omitempty" json:"acl_token"`
+}
+
+type Consul struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addGlobalChartsOnce *sync.Once
+ addServerAutopilotChartsOnce *sync.Once
+
+ httpClient *http.Client
+ prom prometheus.Prometheus
+
+ cfg *consulConfig
+ version *semver.Version
+ hasLeaderCharts bool
+ hasFollowerCharts bool
+ checks map[string]bool
+}
+
+func (c *Consul) Configuration() any {
+ return c.Config
+}
+
+func (c *Consul) Init() error {
+ if err := c.validateConfig(); err != nil {
+ c.Errorf("config validation: %v", err)
+ return err
+ }
+
+ httpClient, err := c.initHTTPClient()
+ if err != nil {
+ c.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ c.httpClient = httpClient
+
+ prom, err := c.initPrometheusClient(httpClient)
+ if err != nil {
+ c.Errorf("init Prometheus client: %v", err)
+ return err
+ }
+ c.prom = prom
+
+ return nil
+}
+
+func (c *Consul) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (c *Consul) Charts() *module.Charts {
+ return c.charts
+}
+
+func (c *Consul) Collect() map[string]int64 {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (c *Consul) Cleanup() {
+ if c.httpClient != nil {
+ c.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/consul/consul_test.go b/src/go/plugin/go.d/modules/consul/consul_test.go
new file mode 100644
index 000000000..ccc9f99be
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/consul_test.go
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package consul
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer1132Checks, _ = os.ReadFile("testdata/v1.13.2/v1-agent-checks.json")
+ dataVer1132ClientSelf, _ = os.ReadFile("testdata/v1.13.2/client_v1-agent-self.json")
+ dataVer1132ClientPromMetrics, _ = os.ReadFile("testdata/v1.13.2/client_v1-agent-metrics.txt")
+ dataVer1132ServerSelf, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self.json")
+ dataVer1132ServerSelfDisabledPrometheus, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self_disabled_prom.json")
+ dataVer1132ServerSelfWithHostname, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self_with_hostname.json")
+ dataVer1132ServerPromMetrics, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-metrics.txt")
+ dataVer1132ServerPromMetricsWithHostname, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt")
+ dataVer1132ServerOperatorAutopilotHealth, _ = os.ReadFile("testdata/v1.13.2/server_v1-operator-autopilot-health.json")
+ dataVer1132ServerCoordinateNodes, _ = os.ReadFile("testdata/v1.13.2/server_v1-coordinate-nodes.json")
+
+ dataVer1143CloudServerPromMetrics, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-agent-metrics.txt")
+ dataVer1143CloudServerSelf, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-agent-self.json")
+ dataVer1143CloudServerCoordinateNodes, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json")
+ dataVer1143CloudChecks, _ = os.ReadFile("testdata/v1.14.3-cloud/v1-agent-checks.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer1132Checks": dataVer1132Checks,
+ "dataVer1132ClientSelf": dataVer1132ClientSelf,
+ "dataVer1132ClientPromMetrics": dataVer1132ClientPromMetrics,
+ "dataVer1132ServerSelf": dataVer1132ServerSelf,
+ "dataVer1132ServerSelfWithHostname": dataVer1132ServerSelfWithHostname,
+ "dataVer1132ServerSelfDisabledPrometheus": dataVer1132ServerSelfDisabledPrometheus,
+ "dataVer1132ServerPromMetrics": dataVer1132ServerPromMetrics,
+ "dataVer1132ServerPromMetricsWithHostname": dataVer1132ServerPromMetricsWithHostname,
+ "dataVer1132ServerOperatorAutopilotHealth": dataVer1132ServerOperatorAutopilotHealth,
+ "dataVer1132ServerCoordinateNodes": dataVer1132ServerCoordinateNodes,
+ "dataVer1143CloudServerPromMetrics": dataVer1143CloudServerPromMetrics,
+ "dataVer1143CloudServerSelf": dataVer1143CloudServerSelf,
+ "dataVer1143CloudServerCoordinateNodes": dataVer1143CloudServerCoordinateNodes,
+ "dataVer1143CloudChecks": dataVer1143CloudChecks,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestConsul_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Consul{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestConsul_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ consul := New()
+ consul.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, consul.Init())
+ } else {
+ assert.NoError(t, consul.Init())
+ }
+ })
+ }
+}
+
+func TestConsul_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (consul *Consul, cleanup func())
+ }{
+ "success on response from Consul v1.13.2 server": {
+ wantFail: false,
+ prepare: caseConsulV1132ServerResponse,
+ },
+ "success on response from Consul v1.14.3 server cloud managed": {
+ wantFail: false,
+ prepare: caseConsulV1143CloudServerResponse,
+ },
+ "success on response from Consul v1.13.2 server with enabled hostname": {
+ wantFail: false,
+ prepare: caseConsulV1132ServerWithHostnameResponse,
+ },
+ "success on response from Consul v1.13.2 server with disabled prometheus": {
+ wantFail: false,
+ prepare: caseConsulV1132ServerWithDisabledPrometheus,
+ },
+ "success on response from Consul v1.13.2 client": {
+ wantFail: false,
+ prepare: caseConsulV1132ClientResponse,
+ },
+ "fail on invalid data response": {
+ wantFail: true,
+ prepare: caseInvalidDataResponse,
+ },
+ "fail on connection refused": {
+ wantFail: true,
+ prepare: caseConnectionRefused,
+ },
+ "fail on 404 response": {
+ wantFail: true,
+ prepare: case404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ consul, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, consul.Check())
+ } else {
+ assert.NoError(t, consul.Check())
+ }
+ })
+ }
+}
+
+func TestConsul_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (consul *Consul, cleanup func())
+ wantNumOfCharts int
+ wantMetrics map[string]int64
+ }{
+ "success on response from Consul v1.13.2 server": {
+ prepare: caseConsulV1132ServerResponse,
+ // 3 node, 1 service check, no license
+ wantNumOfCharts: len(serverCommonCharts) + len(serverAutopilotHealthCharts) + len(serverLeaderCharts) + 3 + 1 - 1,
+ wantMetrics: map[string]int64{
+ "autopilot_failure_tolerance": 1,
+ "autopilot_healthy_no": 0,
+ "autopilot_healthy_yes": 1,
+ "autopilot_server_healthy_no": 0,
+ "autopilot_server_healthy_yes": 1,
+ "autopilot_server_lastContact_leader": 13,
+ "autopilot_server_sefStatus_alive": 1,
+ "autopilot_server_sefStatus_failed": 0,
+ "autopilot_server_sefStatus_left": 0,
+ "autopilot_server_sefStatus_none": 0,
+ "autopilot_server_stable_time": 265849,
+ "autopilot_server_voter_no": 0,
+ "autopilot_server_voter_yes": 1,
+ "client_rpc": 6838,
+ "client_rpc_exceeded": 0,
+ "client_rpc_failed": 0,
+ "health_check_chk1_critical_status": 0,
+ "health_check_chk1_maintenance_status": 0,
+ "health_check_chk1_passing_status": 1,
+ "health_check_chk1_warning_status": 0,
+ "health_check_chk2_critical_status": 1,
+ "health_check_chk2_maintenance_status": 0,
+ "health_check_chk2_passing_status": 0,
+ "health_check_chk2_warning_status": 0,
+ "health_check_chk3_critical_status": 1,
+ "health_check_chk3_maintenance_status": 0,
+ "health_check_chk3_passing_status": 0,
+ "health_check_chk3_warning_status": 0,
+ "health_check_mysql_critical_status": 1,
+ "health_check_mysql_maintenance_status": 0,
+ "health_check_mysql_passing_status": 0,
+ "health_check_mysql_warning_status": 0,
+ "kvs_apply_count": 0,
+ "kvs_apply_quantile=0.5": 0,
+ "kvs_apply_quantile=0.9": 0,
+ "kvs_apply_quantile=0.99": 0,
+ "kvs_apply_sum": 0,
+ "network_lan_rtt_avg": 737592,
+ "network_lan_rtt_count": 2,
+ "network_lan_rtt_max": 991168,
+ "network_lan_rtt_min": 484017,
+ "network_lan_rtt_sum": 1475185,
+ "raft_apply": 10681000,
+ "raft_boltdb_freelistBytes": 11264,
+ "raft_boltdb_logsPerBatch_count": 12360,
+ "raft_boltdb_logsPerBatch_quantile=0.5": 1000000,
+ "raft_boltdb_logsPerBatch_quantile=0.9": 1000000,
+ "raft_boltdb_logsPerBatch_quantile=0.99": 1000000,
+ "raft_boltdb_logsPerBatch_sum": 12362000,
+ "raft_boltdb_storeLogs_count": 12360,
+ "raft_boltdb_storeLogs_quantile=0.5": 13176624,
+ "raft_boltdb_storeLogs_quantile=0.9": 13176624,
+ "raft_boltdb_storeLogs_quantile=0.99": 13176624,
+ "raft_boltdb_storeLogs_sum": 651888027,
+ "raft_commitTime_count": 12345,
+ "raft_commitTime_quantile=0.5": 41146488,
+ "raft_commitTime_quantile=0.9": 41146488,
+ "raft_commitTime_quantile=0.99": 41146488,
+ "raft_commitTime_sum": 955781149,
+ "raft_fsm_lastRestoreDuration": 2,
+ "raft_leader_lastContact_count": 80917,
+ "raft_leader_lastContact_quantile=0.5": 33000000,
+ "raft_leader_lastContact_quantile=0.9": 68000000,
+ "raft_leader_lastContact_quantile=0.99": 68000000,
+ "raft_leader_lastContact_sum": 3066900000,
+ "raft_leader_oldestLogAge": 166046464,
+ "raft_rpc_installSnapshot_count": 0,
+ "raft_rpc_installSnapshot_quantile=0.5": 0,
+ "raft_rpc_installSnapshot_quantile=0.9": 0,
+ "raft_rpc_installSnapshot_quantile=0.99": 0,
+ "raft_rpc_installSnapshot_sum": 0,
+ "raft_state_candidate": 1,
+ "raft_state_leader": 1,
+ "raft_thread_fsm_saturation_count": 11923,
+ "raft_thread_fsm_saturation_quantile=0.5": 0,
+ "raft_thread_fsm_saturation_quantile=0.9": 0,
+ "raft_thread_fsm_saturation_quantile=0.99": 0,
+ "raft_thread_fsm_saturation_sum": 90,
+ "raft_thread_main_saturation_count": 43067,
+ "raft_thread_main_saturation_quantile=0.5": 0,
+ "raft_thread_main_saturation_quantile=0.9": 0,
+ "raft_thread_main_saturation_quantile=0.99": 0,
+ "raft_thread_main_saturation_sum": 205409,
+ "runtime_alloc_bytes": 53065368,
+ "runtime_sys_bytes": 84955160,
+ "runtime_total_gc_pause_ns": 1372001280,
+ "server_isLeader_no": 0,
+ "server_isLeader_yes": 1,
+ "txn_apply_count": 0,
+ "txn_apply_quantile=0.5": 0,
+ "txn_apply_quantile=0.9": 0,
+ "txn_apply_quantile=0.99": 0,
+ "txn_apply_sum": 0,
+ },
+ },
+ "success on response from Consul v1.14.3 server cloud managed": {
+ prepare: caseConsulV1143CloudServerResponse,
+ // 3 node, 1 service check, license
+ wantNumOfCharts: len(serverCommonCharts) + len(serverLeaderCharts) + 3 + 1,
+ wantMetrics: map[string]int64{
+ "autopilot_failure_tolerance": 0,
+ "autopilot_healthy_no": 0,
+ "autopilot_healthy_yes": 1,
+ "client_rpc": 438718,
+ "client_rpc_exceeded": 0,
+ "client_rpc_failed": 0,
+ "health_check_chk1_critical_status": 0,
+ "health_check_chk1_maintenance_status": 0,
+ "health_check_chk1_passing_status": 1,
+ "health_check_chk1_warning_status": 0,
+ "health_check_chk2_critical_status": 1,
+ "health_check_chk2_maintenance_status": 0,
+ "health_check_chk2_passing_status": 0,
+ "health_check_chk2_warning_status": 0,
+ "health_check_chk3_critical_status": 1,
+ "health_check_chk3_maintenance_status": 0,
+ "health_check_chk3_passing_status": 0,
+ "health_check_chk3_warning_status": 0,
+ "health_check_mysql_critical_status": 1,
+ "health_check_mysql_maintenance_status": 0,
+ "health_check_mysql_passing_status": 0,
+ "health_check_mysql_warning_status": 0,
+ "kvs_apply_count": 2,
+ "kvs_apply_quantile=0.5": 0,
+ "kvs_apply_quantile=0.9": 0,
+ "kvs_apply_quantile=0.99": 0,
+ "kvs_apply_sum": 18550,
+ "network_lan_rtt_avg": 1321107,
+ "network_lan_rtt_count": 1,
+ "network_lan_rtt_max": 1321107,
+ "network_lan_rtt_min": 1321107,
+ "network_lan_rtt_sum": 1321107,
+ "raft_apply": 115252000,
+ "raft_boltdb_freelistBytes": 26008,
+ "raft_boltdb_logsPerBatch_count": 122794,
+ "raft_boltdb_logsPerBatch_quantile=0.5": 1000000,
+ "raft_boltdb_logsPerBatch_quantile=0.9": 1000000,
+ "raft_boltdb_logsPerBatch_quantile=0.99": 1000000,
+ "raft_boltdb_logsPerBatch_sum": 122856000,
+ "raft_boltdb_storeLogs_count": 122794,
+ "raft_boltdb_storeLogs_quantile=0.5": 1673303,
+ "raft_boltdb_storeLogs_quantile=0.9": 2210979,
+ "raft_boltdb_storeLogs_quantile=0.99": 2210979,
+ "raft_boltdb_storeLogs_sum": 278437403,
+ "raft_commitTime_count": 122785,
+ "raft_commitTime_quantile=0.5": 1718204,
+ "raft_commitTime_quantile=0.9": 2262192,
+ "raft_commitTime_quantile=0.99": 2262192,
+ "raft_commitTime_sum": 284260428,
+ "raft_fsm_lastRestoreDuration": 0,
+ "raft_leader_lastContact_count": 19,
+ "raft_leader_lastContact_quantile=0.5": 0,
+ "raft_leader_lastContact_quantile=0.9": 0,
+ "raft_leader_lastContact_quantile=0.99": 0,
+ "raft_leader_lastContact_sum": 598000,
+ "raft_leader_oldestLogAge": 68835264,
+ "raft_rpc_installSnapshot_count": 1,
+ "raft_rpc_installSnapshot_quantile=0.5": 0,
+ "raft_rpc_installSnapshot_quantile=0.9": 0,
+ "raft_rpc_installSnapshot_quantile=0.99": 0,
+ "raft_rpc_installSnapshot_sum": 473038,
+ "raft_state_candidate": 1,
+ "raft_state_leader": 1,
+ "raft_thread_fsm_saturation_count": 44326,
+ "raft_thread_fsm_saturation_quantile=0.5": 0,
+ "raft_thread_fsm_saturation_quantile=0.9": 0,
+ "raft_thread_fsm_saturation_quantile=0.99": 0,
+ "raft_thread_fsm_saturation_sum": 729,
+ "raft_thread_main_saturation_count": 451221,
+ "raft_thread_main_saturation_quantile=0.5": 0,
+ "raft_thread_main_saturation_quantile=0.9": 0,
+ "raft_thread_main_saturation_quantile=0.99": 9999,
+ "raft_thread_main_saturation_sum": 213059,
+ "runtime_alloc_bytes": 51729856,
+ "runtime_sys_bytes": 160156960,
+ "runtime_total_gc_pause_ns": 832754048,
+ "server_isLeader_no": 0,
+ "server_isLeader_yes": 1,
+ "system_licenseExpiration": 2949945,
+ "txn_apply_count": 0,
+ "txn_apply_quantile=0.5": 0,
+ "txn_apply_quantile=0.9": 0,
+ "txn_apply_quantile=0.99": 0,
+ "txn_apply_sum": 0,
+ },
+ },
+ "success on response from Consul v1.13.2 server with enabled hostname": {
+ prepare: caseConsulV1132ServerResponse,
+ // 3 node, 1 service check, no license
+ wantNumOfCharts: len(serverCommonCharts) + len(serverAutopilotHealthCharts) + len(serverLeaderCharts) + 3 + 1 - 1,
+ wantMetrics: map[string]int64{
+ "autopilot_failure_tolerance": 1,
+ "autopilot_healthy_no": 0,
+ "autopilot_healthy_yes": 1,
+ "autopilot_server_healthy_no": 0,
+ "autopilot_server_healthy_yes": 1,
+ "autopilot_server_lastContact_leader": 13,
+ "autopilot_server_sefStatus_alive": 1,
+ "autopilot_server_sefStatus_failed": 0,
+ "autopilot_server_sefStatus_left": 0,
+ "autopilot_server_sefStatus_none": 0,
+ "autopilot_server_stable_time": 265825,
+ "autopilot_server_voter_no": 0,
+ "autopilot_server_voter_yes": 1,
+ "client_rpc": 6838,
+ "client_rpc_exceeded": 0,
+ "client_rpc_failed": 0,
+ "health_check_chk1_critical_status": 0,
+ "health_check_chk1_maintenance_status": 0,
+ "health_check_chk1_passing_status": 1,
+ "health_check_chk1_warning_status": 0,
+ "health_check_chk2_critical_status": 1,
+ "health_check_chk2_maintenance_status": 0,
+ "health_check_chk2_passing_status": 0,
+ "health_check_chk2_warning_status": 0,
+ "health_check_chk3_critical_status": 1,
+ "health_check_chk3_maintenance_status": 0,
+ "health_check_chk3_passing_status": 0,
+ "health_check_chk3_warning_status": 0,
+ "health_check_mysql_critical_status": 1,
+ "health_check_mysql_maintenance_status": 0,
+ "health_check_mysql_passing_status": 0,
+ "health_check_mysql_warning_status": 0,
+ "kvs_apply_count": 0,
+ "kvs_apply_quantile=0.5": 0,
+ "kvs_apply_quantile=0.9": 0,
+ "kvs_apply_quantile=0.99": 0,
+ "kvs_apply_sum": 0,
+ "network_lan_rtt_avg": 737592,
+ "network_lan_rtt_count": 2,
+ "network_lan_rtt_max": 991168,
+ "network_lan_rtt_min": 484017,
+ "network_lan_rtt_sum": 1475185,
+ "raft_apply": 10681000,
+ "raft_boltdb_freelistBytes": 11264,
+ "raft_boltdb_logsPerBatch_count": 12360,
+ "raft_boltdb_logsPerBatch_quantile=0.5": 1000000,
+ "raft_boltdb_logsPerBatch_quantile=0.9": 1000000,
+ "raft_boltdb_logsPerBatch_quantile=0.99": 1000000,
+ "raft_boltdb_logsPerBatch_sum": 12362000,
+ "raft_boltdb_storeLogs_count": 12360,
+ "raft_boltdb_storeLogs_quantile=0.5": 13176624,
+ "raft_boltdb_storeLogs_quantile=0.9": 13176624,
+ "raft_boltdb_storeLogs_quantile=0.99": 13176624,
+ "raft_boltdb_storeLogs_sum": 651888027,
+ "raft_commitTime_count": 12345,
+ "raft_commitTime_quantile=0.5": 41146488,
+ "raft_commitTime_quantile=0.9": 41146488,
+ "raft_commitTime_quantile=0.99": 41146488,
+ "raft_commitTime_sum": 955781149,
+ "raft_fsm_lastRestoreDuration": 2,
+ "raft_leader_lastContact_count": 80917,
+ "raft_leader_lastContact_quantile=0.5": 33000000,
+ "raft_leader_lastContact_quantile=0.9": 68000000,
+ "raft_leader_lastContact_quantile=0.99": 68000000,
+ "raft_leader_lastContact_sum": 3066900000,
+ "raft_leader_oldestLogAge": 166046464,
+ "raft_rpc_installSnapshot_count": 0,
+ "raft_rpc_installSnapshot_quantile=0.5": 0,
+ "raft_rpc_installSnapshot_quantile=0.9": 0,
+ "raft_rpc_installSnapshot_quantile=0.99": 0,
+ "raft_rpc_installSnapshot_sum": 0,
+ "raft_state_candidate": 1,
+ "raft_state_leader": 1,
+ "raft_thread_fsm_saturation_count": 11923,
+ "raft_thread_fsm_saturation_quantile=0.5": 0,
+ "raft_thread_fsm_saturation_quantile=0.9": 0,
+ "raft_thread_fsm_saturation_quantile=0.99": 0,
+ "raft_thread_fsm_saturation_sum": 90,
+ "raft_thread_main_saturation_count": 43067,
+ "raft_thread_main_saturation_quantile=0.5": 0,
+ "raft_thread_main_saturation_quantile=0.9": 0,
+ "raft_thread_main_saturation_quantile=0.99": 0,
+ "raft_thread_main_saturation_sum": 205409,
+ "runtime_alloc_bytes": 53065368,
+ "runtime_sys_bytes": 84955160,
+ "runtime_total_gc_pause_ns": 1372001280,
+ "server_isLeader_no": 0,
+ "server_isLeader_yes": 1,
+ "txn_apply_count": 0,
+ "txn_apply_quantile=0.5": 0,
+ "txn_apply_quantile=0.9": 0,
+ "txn_apply_quantile=0.99": 0,
+ "txn_apply_sum": 0,
+ },
+ },
+ "success on response from Consul v1.13.2 server with disabled prometheus": {
+ prepare: caseConsulV1132ServerWithDisabledPrometheus,
+ // 3 node, 1 service check, no license
+ wantNumOfCharts: len(serverAutopilotHealthCharts) + 3 + 1,
+ wantMetrics: map[string]int64{
+ "autopilot_server_healthy_no": 0,
+ "autopilot_server_healthy_yes": 1,
+ "autopilot_server_lastContact_leader": 13,
+ "autopilot_server_sefStatus_alive": 1,
+ "autopilot_server_sefStatus_failed": 0,
+ "autopilot_server_sefStatus_left": 0,
+ "autopilot_server_sefStatus_none": 0,
+ "autopilot_server_stable_time": 265805,
+ "autopilot_server_voter_no": 0,
+ "autopilot_server_voter_yes": 1,
+ "health_check_chk1_critical_status": 0,
+ "health_check_chk1_maintenance_status": 0,
+ "health_check_chk1_passing_status": 1,
+ "health_check_chk1_warning_status": 0,
+ "health_check_chk2_critical_status": 1,
+ "health_check_chk2_maintenance_status": 0,
+ "health_check_chk2_passing_status": 0,
+ "health_check_chk2_warning_status": 0,
+ "health_check_chk3_critical_status": 1,
+ "health_check_chk3_maintenance_status": 0,
+ "health_check_chk3_passing_status": 0,
+ "health_check_chk3_warning_status": 0,
+ "health_check_mysql_critical_status": 1,
+ "health_check_mysql_maintenance_status": 0,
+ "health_check_mysql_passing_status": 0,
+ "health_check_mysql_warning_status": 0,
+ "network_lan_rtt_avg": 737592,
+ "network_lan_rtt_count": 2,
+ "network_lan_rtt_max": 991168,
+ "network_lan_rtt_min": 484017,
+ "network_lan_rtt_sum": 1475185,
+ },
+ },
+ "success on response from Consul v1.13.2 client": {
+ prepare: caseConsulV1132ClientResponse,
+ // 3 node, 1 service check, no license
+ wantNumOfCharts: len(clientCharts) + 3 + 1 - 1,
+ wantMetrics: map[string]int64{
+ "client_rpc": 34,
+ "client_rpc_exceeded": 0,
+ "client_rpc_failed": 0,
+ "health_check_chk1_critical_status": 0,
+ "health_check_chk1_maintenance_status": 0,
+ "health_check_chk1_passing_status": 1,
+ "health_check_chk1_warning_status": 0,
+ "health_check_chk2_critical_status": 1,
+ "health_check_chk2_maintenance_status": 0,
+ "health_check_chk2_passing_status": 0,
+ "health_check_chk2_warning_status": 0,
+ "health_check_chk3_critical_status": 1,
+ "health_check_chk3_maintenance_status": 0,
+ "health_check_chk3_passing_status": 0,
+ "health_check_chk3_warning_status": 0,
+ "health_check_mysql_critical_status": 1,
+ "health_check_mysql_maintenance_status": 0,
+ "health_check_mysql_passing_status": 0,
+ "health_check_mysql_warning_status": 0,
+ "runtime_alloc_bytes": 26333408,
+ "runtime_sys_bytes": 51201032,
+ "runtime_total_gc_pause_ns": 4182423,
+ },
+ },
+ "fail on invalid data response": {
+ prepare: caseInvalidDataResponse,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on connection refused": {
+ prepare: caseConnectionRefused,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on 404 response": {
+ prepare: case404,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ consul, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := consul.Collect()
+
+ delete(mx, "autopilot_server_stable_time")
+ delete(test.wantMetrics, "autopilot_server_stable_time")
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ assert.Equal(t, test.wantNumOfCharts, len(*consul.Charts()))
+ }
+ })
+ }
+}
+
+func caseConsulV1143CloudServerResponse(t *testing.T) (*Consul, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case r.URL.Path == urlPathAgentSelf:
+ _, _ = w.Write(dataVer1143CloudServerSelf)
+ case r.URL.Path == urlPathAgentChecks:
+ _, _ = w.Write(dataVer1143CloudChecks)
+ case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus":
+ _, _ = w.Write(dataVer1143CloudServerPromMetrics)
+ case r.URL.Path == urlPathOperationAutopilotHealth:
+ w.WriteHeader(http.StatusForbidden)
+ case r.URL.Path == urlPathCoordinateNodes:
+ _, _ = w.Write(dataVer1143CloudServerCoordinateNodes)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ consul := New()
+ consul.URL = srv.URL
+
+ require.NoError(t, consul.Init())
+
+ return consul, srv.Close
+}
+
+func caseConsulV1132ServerResponse(t *testing.T) (*Consul, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case r.URL.Path == urlPathAgentSelf:
+ _, _ = w.Write(dataVer1132ServerSelf)
+ case r.URL.Path == urlPathAgentChecks:
+ _, _ = w.Write(dataVer1132Checks)
+ case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus":
+ _, _ = w.Write(dataVer1132ServerPromMetrics)
+ case r.URL.Path == urlPathOperationAutopilotHealth:
+ _, _ = w.Write(dataVer1132ServerOperatorAutopilotHealth)
+ case r.URL.Path == urlPathCoordinateNodes:
+ _, _ = w.Write(dataVer1132ServerCoordinateNodes)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ consul := New()
+ consul.URL = srv.URL
+
+ require.NoError(t, consul.Init())
+
+ return consul, srv.Close
+}
+
+func caseConsulV1132ServerWithHostnameResponse(t *testing.T) (*Consul, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case r.URL.Path == urlPathAgentSelf:
+ _, _ = w.Write(dataVer1132ServerSelfWithHostname)
+ case r.URL.Path == urlPathAgentChecks:
+ _, _ = w.Write(dataVer1132Checks)
+ case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus":
+ _, _ = w.Write(dataVer1132ServerPromMetricsWithHostname)
+ case r.URL.Path == urlPathOperationAutopilotHealth:
+ _, _ = w.Write(dataVer1132ServerOperatorAutopilotHealth)
+ case r.URL.Path == urlPathCoordinateNodes:
+ _, _ = w.Write(dataVer1132ServerCoordinateNodes)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ consul := New()
+ consul.URL = srv.URL
+
+ require.NoError(t, consul.Init())
+
+ return consul, srv.Close
+}
+
+func caseConsulV1132ServerWithDisabledPrometheus(t *testing.T) (*Consul, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathAgentSelf:
+ _, _ = w.Write(dataVer1132ServerSelfDisabledPrometheus)
+ case urlPathAgentChecks:
+ _, _ = w.Write(dataVer1132Checks)
+ case urlPathOperationAutopilotHealth:
+ _, _ = w.Write(dataVer1132ServerOperatorAutopilotHealth)
+ case urlPathCoordinateNodes:
+ _, _ = w.Write(dataVer1132ServerCoordinateNodes)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ consul := New()
+ consul.URL = srv.URL
+
+ require.NoError(t, consul.Init())
+
+ return consul, srv.Close
+}
+
+func caseConsulV1132ClientResponse(t *testing.T) (*Consul, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case r.URL.Path == urlPathAgentSelf:
+ _, _ = w.Write(dataVer1132ClientSelf)
+ case r.URL.Path == urlPathAgentChecks:
+ _, _ = w.Write(dataVer1132Checks)
+ case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus":
+ _, _ = w.Write(dataVer1132ClientPromMetrics)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ consul := New()
+ consul.URL = srv.URL
+
+ require.NoError(t, consul.Init())
+
+ return consul, srv.Close
+}
+
+func caseInvalidDataResponse(t *testing.T) (*Consul, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ consul := New()
+ consul.URL = srv.URL
+
+ require.NoError(t, consul.Init())
+
+ return consul, srv.Close
+}
+
+func caseConnectionRefused(t *testing.T) (*Consul, func()) {
+ t.Helper()
+ consul := New()
+ consul.URL = "http://127.0.0.1:65535/"
+ require.NoError(t, consul.Init())
+
+ return consul, func() {}
+}
+
+func case404(t *testing.T) (*Consul, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+
+ consul := New()
+ consul.URL = srv.URL
+ require.NoError(t, consul.Init())
+
+ return consul, srv.Close
+}
diff --git a/src/go/plugin/go.d/modules/consul/init.go b/src/go/plugin/go.d/modules/consul/init.go
new file mode 100644
index 000000000..4ba5b86ea
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/init.go
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package consul
+
+import (
+ "errors"
+ "net/http"
+ "net/url"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (c *Consul) validateConfig() error {
+ if c.URL == "" {
+ return errors.New("'url' not set")
+ }
+ return nil
+}
+
+func (c *Consul) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(c.Client)
+}
+
+const urlPathAgentMetrics = "/v1/agent/metrics"
+
+func (c *Consul) initPrometheusClient(httpClient *http.Client) (prometheus.Prometheus, error) {
+ r, err := web.NewHTTPRequest(c.Request.Copy())
+ if err != nil {
+ return nil, err
+ }
+ r.URL.Path = urlPathAgentMetrics
+ r.URL.RawQuery = url.Values{
+ "format": []string{"prometheus"},
+ }.Encode()
+
+ req := c.Request.Copy()
+ req.URL = r.URL.String()
+
+ if c.ACLToken != "" {
+ if req.Headers == nil {
+ req.Headers = make(map[string]string)
+ }
+ req.Headers["X-Consul-Token"] = c.ACLToken
+ }
+
+ return prometheus.New(httpClient, req), nil
+}
diff --git a/src/go/plugin/go.d/modules/consul/integrations/consul.md b/src/go/plugin/go.d/modules/consul/integrations/consul.md
new file mode 100644
index 000000000..3a364bfd4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/integrations/consul.md
@@ -0,0 +1,359 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/consul/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/consul/metadata.yaml"
+sidebar_label: "Consul"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Service Discovery / Registry"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Consul
+
+
+<img src="https://netdata.cloud/img/consul.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: consul
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.
+
+
+It periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).
+
+Used endpoints:
+
+- [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)
+- [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)
+- [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)
+- [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)
+- [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This collector discovers instances running on the local host, that provide metrics on port 8500.
+
+On startup, it tries to collect metrics from:
+
+- http://localhost:8500
+- http://127.0.0.1:8500
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+The set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).
+
+
+### Per Consul instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | Leader | Follower | Client |
+|:------|:----------|:----|:---:|:---:|:---:|
+| consul.client_rpc_requests_rate | rpc | requests/s | • | • | • |
+| consul.client_rpc_requests_exceeded_rate | exceeded | requests/s | • | • | • |
+| consul.client_rpc_requests_failed_rate | failed | requests/s | • | • | • |
+| consul.memory_allocated | allocated | bytes | • | • | • |
+| consul.memory_sys | sys | bytes | • | • | • |
+| consul.gc_pause_time | gc_pause | seconds | • | • | • |
+| consul.kvs_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | • | |
+| consul.kvs_apply_operations_rate | kvs_apply | ops/s | • | • | |
+| consul.txn_apply_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | • | |
+| consul.txn_apply_operations_rate | txn_apply | ops/s | • | • | |
+| consul.autopilot_health_status | healthy, unhealthy | status | • | • | |
+| consul.autopilot_failure_tolerance | failure_tolerance | servers | • | • | |
+| consul.autopilot_server_health_status | healthy, unhealthy | status | • | • | |
+| consul.autopilot_server_stable_time | stable | seconds | • | • | |
+| consul.autopilot_server_serf_status | active, failed, left, none | status | • | • | |
+| consul.autopilot_server_voter_status | voter, not_voter | status | • | • | |
+| consul.network_lan_rtt | min, max, avg | ms | • | • | |
+| consul.raft_commit_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | | |
+| consul.raft_commits_rate | commits | commits/s | • | | |
+| consul.raft_leader_last_contact_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | | |
+| consul.raft_leader_oldest_log_age | oldest_log_age | seconds | • | | |
+| consul.raft_follower_last_contact_leader_time | leader_last_contact | ms | | • | |
+| consul.raft_rpc_install_snapshot_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | | • | |
+| consul.raft_leader_elections_rate | leader | elections/s | • | • | |
+| consul.raft_leadership_transitions_rate | leadership | transitions/s | • | • | |
+| consul.server_leadership_status | leader, not_leader | status | • | • | |
+| consul.raft_thread_main_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | • | • | |
+| consul.raft_thread_fsm_saturation_perc | quantile_0.5, quantile_0.9, quantile_0.99 | percentage | • | • | |
+| consul.raft_fsm_last_restore_duration | last_restore_duration | ms | • | • | |
+| consul.raft_boltdb_freelist_bytes | freelist | bytes | • | • | |
+| consul.raft_boltdb_logs_per_batch_rate | written | logs/s | • | • | |
+| consul.raft_boltdb_store_logs_time | quantile_0.5, quantile_0.9, quantile_0.99 | ms | • | • | |
+| consul.license_expiration_time | license_expiration | seconds | • | • | • |
+
+### Per node check
+
+Metrics about checks on Node level.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| datacenter | Datacenter Identifier |
+| node_name | The node's name |
+| check_name | The check's name |
+
+Metrics:
+
+| Metric | Dimensions | Unit | Leader | Follower | Client |
+|:------|:----------|:----|:---:|:---:|:---:|
+| consul.node_health_check_status | passing, maintenance, warning, critical | status | • | • | • |
+
+### Per service check
+
+Metrics about checks at a Service level.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| datacenter | Datacenter Identifier |
+| node_name | The node's name |
+| check_name | The check's name |
+| service_name | The service's name |
+
+Metrics:
+
+| Metric | Dimensions | Unit | Leader | Follower | Client |
+|:------|:----------|:----|:---:|:---:|:---:|
+| consul.service_health_check_status | passing, maintenance, warning, critical | status | • | • | • |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ consul_node_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.node_health_check_status | node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |
+| [ consul_service_health_check_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.service_health_check_status | service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter} |
+| [ consul_client_rpc_requests_exceeded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_exceeded_rate | number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |
+| [ consul_client_rpc_requests_failed ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.client_rpc_requests_failed_rate | number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter} |
+| [ consul_gc_pause_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.gc_pause_time | time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter} |
+| [ consul_autopilot_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_health_status | datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name} |
+| [ consul_autopilot_server_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.autopilot_server_health_status | server ${label:node_name} from datacenter ${label:datacenter} is unhealthy |
+| [ consul_raft_leader_last_contact_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leader_last_contact_time | median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes |
+| [ consul_raft_leadership_transitions ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_leadership_transitions_rate | there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader |
+| [ consul_raft_thread_main_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_main_saturation_perc | average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |
+| [ consul_raft_thread_fsm_saturation ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.raft_thread_fsm_saturation_perc | average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter} |
+| [ consul_license_expiration_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf) | consul.license_expiration_time | Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter} |
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable Prometheus telemetry
+
+[Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.
+
+
+#### Add required ACLs to Token
+
+Required **only if authentication is enabled**.
+
+| ACL | Endpoint |
+|:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |
+| `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |
+| `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/consul.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/consul.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>All options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://localhost:8500 | yes |
+| acl_token | ACL token used in every request. | | no |
+| max_checks | Checks processing/charting limit. | | no |
+| max_filter | Checks processing/charting filter. Uses [simple patterns](/src/libnetdata/simple_pattern/README.md). | | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| timeout | HTTP request timeout. | 1 | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client tls certificate. | | no |
+| tls_key | Client tls key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8500
+ acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"
+
+```
+##### Basic HTTP auth
+
+Local server with basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8500
+ acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"
+ username: foo
+ password: bar
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8500
+ acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"
+
+ - name: remote
+ url: http://203.0.113.10:8500
+ acl_token: "ada7f751-f654-8872-7f93-498e799158b6"
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `consul` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m consul
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `consul` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep consul
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep consul /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep consul
+```
+
+
diff --git a/src/go/plugin/go.d/modules/consul/metadata.yaml b/src/go/plugin/go.d/modules/consul/metadata.yaml
new file mode 100644
index 000000000..34445cd7e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/metadata.yaml
@@ -0,0 +1,599 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-consul
+ plugin_name: go.d.plugin
+ module_name: consul
+ monitored_instance:
+ name: Consul
+ link: https://www.consul.io/
+ categories:
+ - data-collection.service-discovery-registry
+ icon_filename: consul.svg
+ alternative_monitored_instances: []
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - service networking platform
+ - hashicorp
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors [key metrics](https://developer.hashicorp.com/consul/docs/agent/telemetry#key-metrics) of Consul Agents: transaction timings, leadership changes, memory usage and more.
+ method_description: |
+ It periodically sends HTTP requests to [Consul REST API](https://developer.hashicorp.com/consul/api-docs).
+
+ Used endpoints:
+
+ - [/operator/autopilot/health](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health)
+ - [/agent/checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks)
+ - [/agent/self](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration)
+ - [/agent/metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics)
+ - [/coordinate/nodes](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes)
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ This collector discovers instances running on the local host, that provide metrics on port 8500.
+
+ On startup, it tries to collect metrics from:
+
+ - http://localhost:8500
+ - http://127.0.0.1:8500
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable Prometheus telemetry
+ description: |
+ [Enable](https://developer.hashicorp.com/consul/docs/agent/config/config-files#telemetry-prometheus_retention_time) telemetry on your Consul agent, by increasing the value of `prometheus_retention_time` from `0`.
+ - title: Add required ACLs to Token
+ description: |
+ Required **only if authentication is enabled**.
+
+ | ACL | Endpoint |
+ |:---------------:|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+ | `operator:read` | [autopilot health status](https://developer.hashicorp.com/consul/api-docs/operator/autopilot#read-health) |
+ | `node:read` | [checks](https://developer.hashicorp.com/consul/api-docs/agent/check#list-checks) |
+ | `agent:read` | [configuration](https://developer.hashicorp.com/consul/api-docs/agent#read-configuration), [metrics](https://developer.hashicorp.com/consul/api-docs/agent#view-metrics), and [lan coordinates](https://developer.hashicorp.com/consul/api-docs/coordinate#read-lan-coordinates-for-all-nodes) |
+ configuration:
+ file:
+ name: go.d/consul.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: All options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://localhost:8500
+ required: true
+ - name: acl_token
+ description: ACL token used in every request.
+ default_value: ""
+ required: false
+ - name: max_checks
+ description: Checks processing/charting limit.
+ default_value: ""
+ required: false
+ - name: max_filter
+ description: Checks processing/charting filter. Uses [simple patterns](/src/libnetdata/simple_pattern/README.md).
+ default_value: ""
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client tls certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client tls key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8500
+ acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"
+ - name: Basic HTTP auth
+ description: Local server with basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8500
+ acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"
+ username: foo
+ password: bar
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8500
+ acl_token: "ec15675e-2999-d789-832e-8c4794daa8d7"
+
+ - name: remote
+ url: http://203.0.113.10:8500
+ acl_token: "ada7f751-f654-8872-7f93-498e799158b6"
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: consul_node_health_check_status
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.node_health_check_status
+ info: node health check ${label:check_name} has failed on server ${label:node_name} datacenter ${label:datacenter}
+ - name: consul_service_health_check_status
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.service_health_check_status
+ info: service health check ${label:check_name} for service ${label:service_name} has failed on server ${label:node_name} datacenter ${label:datacenter}
+ - name: consul_client_rpc_requests_exceeded
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.client_rpc_requests_exceeded_rate
+ info: number of rate-limited RPC requests made by server ${label:node_name} datacenter ${label:datacenter}
+ - name: consul_client_rpc_requests_failed
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.client_rpc_requests_failed_rate
+ info: number of failed RPC requests made by server ${label:node_name} datacenter ${label:datacenter}
+ - name: consul_gc_pause_time
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.gc_pause_time
+ info: time spent in stop-the-world garbage collection pauses on server ${label:node_name} datacenter ${label:datacenter}
+ - name: consul_autopilot_health_status
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.autopilot_health_status
+ info: datacenter ${label:datacenter} cluster is unhealthy as reported by server ${label:node_name}
+ - name: consul_autopilot_server_health_status
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.autopilot_server_health_status
+ info: server ${label:node_name} from datacenter ${label:datacenter} is unhealthy
+ - name: consul_raft_leader_last_contact_time
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.raft_leader_last_contact_time
+ info: median time elapsed since leader server ${label:node_name} datacenter ${label:datacenter} was last able to contact the follower nodes
+ - name: consul_raft_leadership_transitions
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.raft_leadership_transitions_rate
+ info: there has been a leadership change and server ${label:node_name} datacenter ${label:datacenter} has become the leader
+ - name: consul_raft_thread_main_saturation
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.raft_thread_main_saturation_perc
+ info: average saturation of the main Raft goroutine on server ${label:node_name} datacenter ${label:datacenter}
+ - name: consul_raft_thread_fsm_saturation
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.raft_thread_fsm_saturation_perc
+ info: average saturation of the FSM Raft goroutine on server ${label:node_name} datacenter ${label:datacenter}
+ - name: consul_license_expiration_time
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/consul.conf
+ metric: consul.license_expiration_time
+ info: Consul Enterprise licence expiration time on node ${label:node_name} datacenter ${label:datacenter}
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: |
+ The set of metrics depends on the [Consul Agent mode](https://developer.hashicorp.com/consul/docs/install/glossary#agent).
+ availability:
+ - Leader
+ - Follower
+ - Client
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: consul.client_rpc_requests_rate
+ description: Client RPC requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: rpc
+ - name: consul.client_rpc_requests_exceeded_rate
+ description: Client rate-limited RPC requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: exceeded
+ - name: consul.client_rpc_requests_failed_rate
+ description: Client failed RPC requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: failed
+ - name: consul.memory_allocated
+ description: Memory allocated by the Consul process
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: consul.memory_sys
+ description: Memory obtained from the OS
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: sys
+ - name: consul.gc_pause_time
+ description: Garbage collection stop-the-world pause time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: gc_pause
+ - name: consul.kvs_apply_time
+ description: KVS apply time
+ unit: ms
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: quantile_0.5
+ - name: quantile_0.9
+ - name: quantile_0.99
+ - name: consul.kvs_apply_operations_rate
+ description: KVS apply operations
+ unit: ops/s
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: kvs_apply
+ - name: consul.txn_apply_time
+ description: Transaction apply time
+ unit: ms
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: quantile_0.5
+ - name: quantile_0.9
+ - name: quantile_0.99
+ - name: consul.txn_apply_operations_rate
+ description: Transaction apply operations
+ unit: ops/s
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: txn_apply
+ - name: consul.autopilot_health_status
+ description: Autopilot cluster health status
+ unit: status
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: healthy
+ - name: unhealthy
+ - name: consul.autopilot_failure_tolerance
+ description: Autopilot cluster failure tolerance
+ unit: servers
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: failure_tolerance
+ - name: consul.autopilot_server_health_status
+ description: Autopilot server health status
+ unit: status
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: healthy
+ - name: unhealthy
+ - name: consul.autopilot_server_stable_time
+ description: Autopilot server stable time
+ unit: seconds
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: stable
+ - name: consul.autopilot_server_serf_status
+ description: Autopilot server Serf status
+ unit: status
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: active
+ - name: failed
+ - name: left
+ - name: none
+ - name: consul.autopilot_server_voter_status
+ description: Autopilot server Raft voting membership
+ unit: status
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: voter
+ - name: not_voter
+ - name: consul.network_lan_rtt
+ description: Network lan RTT
+ unit: ms
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: consul.raft_commit_time
+ description: Raft commit time
+ unit: ms
+ chart_type: line
+ availability:
+ - Leader
+ dimensions:
+ - name: quantile_0.5
+ - name: quantile_0.9
+ - name: quantile_0.99
+ - name: consul.raft_commits_rate
+ description: Raft commits rate
+ unit: commits/s
+ chart_type: line
+ availability:
+ - Leader
+ dimensions:
+ - name: commits
+ - name: consul.raft_leader_last_contact_time
+ description: Raft leader last contact time
+ unit: ms
+ chart_type: line
+ availability:
+ - Leader
+ dimensions:
+ - name: quantile_0.5
+ - name: quantile_0.9
+ - name: quantile_0.99
+ - name: consul.raft_leader_oldest_log_age
+ description: Raft leader oldest log age
+ unit: seconds
+ chart_type: line
+ availability:
+ - Leader
+ dimensions:
+ - name: oldest_log_age
+ - name: consul.raft_follower_last_contact_leader_time
+ description: Raft follower last contact with the leader time
+ unit: ms
+ chart_type: line
+ availability:
+ - Follower
+ dimensions:
+ - name: leader_last_contact
+ - name: consul.raft_rpc_install_snapshot_time
+ description: Raft RPC install snapshot time
+ unit: ms
+ chart_type: line
+ availability:
+ - Follower
+ dimensions:
+ - name: quantile_0.5
+ - name: quantile_0.9
+ - name: quantile_0.99
+ - name: consul.raft_leader_elections_rate
+ description: Raft leader elections rate
+ unit: elections/s
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: leader
+ - name: consul.raft_leadership_transitions_rate
+ description: Raft leadership transitions rate
+ unit: transitions/s
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: leadership
+ - name: consul.server_leadership_status
+ description: Server leadership status
+ unit: status
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: leader
+ - name: not_leader
+ - name: consul.raft_thread_main_saturation_perc
+ description: Raft main thread saturation
+ unit: percentage
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: quantile_0.5
+ - name: quantile_0.9
+ - name: quantile_0.99
+ - name: consul.raft_thread_fsm_saturation_perc
+ description: Raft FSM thread saturation
+ unit: percentage
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: quantile_0.5
+ - name: quantile_0.9
+ - name: quantile_0.99
+ - name: consul.raft_fsm_last_restore_duration
+ description: Raft last restore duration
+ unit: ms
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: last_restore_duration
+ - name: consul.raft_boltdb_freelist_bytes
+ description: Raft BoltDB freelist
+ unit: bytes
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: freelist
+ - name: consul.raft_boltdb_logs_per_batch_rate
+ description: Raft BoltDB logs written per batch
+ unit: logs/s
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: written
+ - name: consul.raft_boltdb_store_logs_time
+ description: Raft BoltDB store logs time
+ unit: ms
+ chart_type: line
+ availability:
+ - Leader
+ - Follower
+ dimensions:
+ - name: quantile_0.5
+ - name: quantile_0.9
+ - name: quantile_0.99
+ - name: consul.license_expiration_time
+ description: License expiration time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: license_expiration
+ - name: node check
+ description: Metrics about checks on Node level.
+ labels:
+ - name: datacenter
+ description: Datacenter Identifier
+ - name: node_name
+ description: The node's name
+ - name: check_name
+ description: The check's name
+ metrics:
+ - name: consul.node_health_check_status
+ description: Node health check status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: passing
+ - name: maintenance
+ - name: warning
+ - name: critical
+ - name: service check
+ description: Metrics about checks at a Service level.
+ labels:
+ - name: datacenter
+ description: Datacenter Identifier
+ - name: node_name
+ description: The node's name
+ - name: check_name
+ description: The check's name
+ - name: service_name
+ description: The service's name
+ metrics:
+ - name: consul.service_health_check_status
+ description: Service health check status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: passing
+ - name: maintenance
+ - name: warning
+ - name: critical
diff --git a/src/go/plugin/go.d/modules/consul/testdata/config.json b/src/go/plugin/go.d/modules/consul/testdata/config.json
new file mode 100644
index 000000000..bcd07a41b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "acl_token": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/consul/testdata/config.yaml b/src/go/plugin/go.d/modules/consul/testdata/config.yaml
new file mode 100644
index 000000000..def554c7e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+acl_token: "ok"
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/client_v1-agent-metrics.txt b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/client_v1-agent-metrics.txt
new file mode 100644
index 000000000..e93e677d8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/client_v1-agent-metrics.txt
@@ -0,0 +1,989 @@
+# HELP consul_acl_ResolveToken This measures the time it takes to resolve an ACL token.
+# TYPE consul_acl_ResolveToken summary
+consul_acl_ResolveToken{quantile="0.5"} NaN
+consul_acl_ResolveToken{quantile="0.9"} NaN
+consul_acl_ResolveToken{quantile="0.99"} NaN
+consul_acl_ResolveToken_sum 0
+consul_acl_ResolveToken_count 0
+# HELP consul_acl_authmethod_delete
+# TYPE consul_acl_authmethod_delete summary
+consul_acl_authmethod_delete{quantile="0.5"} NaN
+consul_acl_authmethod_delete{quantile="0.9"} NaN
+consul_acl_authmethod_delete{quantile="0.99"} NaN
+consul_acl_authmethod_delete_sum 0
+consul_acl_authmethod_delete_count 0
+# HELP consul_acl_authmethod_upsert
+# TYPE consul_acl_authmethod_upsert summary
+consul_acl_authmethod_upsert{quantile="0.5"} NaN
+consul_acl_authmethod_upsert{quantile="0.9"} NaN
+consul_acl_authmethod_upsert{quantile="0.99"} NaN
+consul_acl_authmethod_upsert_sum 0
+consul_acl_authmethod_upsert_count 0
+# HELP consul_acl_bindingrule_delete
+# TYPE consul_acl_bindingrule_delete summary
+consul_acl_bindingrule_delete{quantile="0.5"} NaN
+consul_acl_bindingrule_delete{quantile="0.9"} NaN
+consul_acl_bindingrule_delete{quantile="0.99"} NaN
+consul_acl_bindingrule_delete_sum 0
+consul_acl_bindingrule_delete_count 0
+# HELP consul_acl_bindingrule_upsert
+# TYPE consul_acl_bindingrule_upsert summary
+consul_acl_bindingrule_upsert{quantile="0.5"} NaN
+consul_acl_bindingrule_upsert{quantile="0.9"} NaN
+consul_acl_bindingrule_upsert{quantile="0.99"} NaN
+consul_acl_bindingrule_upsert_sum 0
+consul_acl_bindingrule_upsert_count 0
+# HELP consul_acl_blocked_check_deregistration Increments whenever a deregistration fails for a check (blocked by an ACL)
+# TYPE consul_acl_blocked_check_deregistration counter
+consul_acl_blocked_check_deregistration 0
+# HELP consul_acl_blocked_check_registration Increments whenever a registration fails for a check (blocked by an ACL)
+# TYPE consul_acl_blocked_check_registration counter
+consul_acl_blocked_check_registration 0
+# HELP consul_acl_blocked_node_registration Increments whenever a registration fails for a node (blocked by an ACL)
+# TYPE consul_acl_blocked_node_registration counter
+consul_acl_blocked_node_registration 0
+# HELP consul_acl_blocked_service_deregistration Increments whenever a deregistration fails for a service (blocked by an ACL)
+# TYPE consul_acl_blocked_service_deregistration counter
+consul_acl_blocked_service_deregistration 0
+# HELP consul_acl_blocked_service_registration Increments whenever a registration fails for a service (blocked by an ACL)
+# TYPE consul_acl_blocked_service_registration counter
+consul_acl_blocked_service_registration 0
+# HELP consul_acl_login
+# TYPE consul_acl_login summary
+consul_acl_login{quantile="0.5"} NaN
+consul_acl_login{quantile="0.9"} NaN
+consul_acl_login{quantile="0.99"} NaN
+consul_acl_login_sum 0
+consul_acl_login_count 0
+# HELP consul_acl_logout
+# TYPE consul_acl_logout summary
+consul_acl_logout{quantile="0.5"} NaN
+consul_acl_logout{quantile="0.9"} NaN
+consul_acl_logout{quantile="0.99"} NaN
+consul_acl_logout_sum 0
+consul_acl_logout_count 0
+# HELP consul_acl_policy_delete
+# TYPE consul_acl_policy_delete summary
+consul_acl_policy_delete{quantile="0.5"} NaN
+consul_acl_policy_delete{quantile="0.9"} NaN
+consul_acl_policy_delete{quantile="0.99"} NaN
+consul_acl_policy_delete_sum 0
+consul_acl_policy_delete_count 0
+# HELP consul_acl_policy_upsert
+# TYPE consul_acl_policy_upsert summary
+consul_acl_policy_upsert{quantile="0.5"} NaN
+consul_acl_policy_upsert{quantile="0.9"} NaN
+consul_acl_policy_upsert{quantile="0.99"} NaN
+consul_acl_policy_upsert_sum 0
+consul_acl_policy_upsert_count 0
+# HELP consul_acl_role_delete
+# TYPE consul_acl_role_delete summary
+consul_acl_role_delete{quantile="0.5"} NaN
+consul_acl_role_delete{quantile="0.9"} NaN
+consul_acl_role_delete{quantile="0.99"} NaN
+consul_acl_role_delete_sum 0
+consul_acl_role_delete_count 0
+# HELP consul_acl_role_upsert
+# TYPE consul_acl_role_upsert summary
+consul_acl_role_upsert{quantile="0.5"} NaN
+consul_acl_role_upsert{quantile="0.9"} NaN
+consul_acl_role_upsert{quantile="0.99"} NaN
+consul_acl_role_upsert_sum 0
+consul_acl_role_upsert_count 0
+# HELP consul_acl_token_cache_hit Increments if Consul is able to resolve a token's identity, or a legacy token, from the cache.
+# TYPE consul_acl_token_cache_hit counter
+consul_acl_token_cache_hit 0
+# HELP consul_acl_token_cache_miss Increments if Consul cannot resolve a token's identity, or a legacy token, from the cache.
+# TYPE consul_acl_token_cache_miss counter
+consul_acl_token_cache_miss 0
+# HELP consul_acl_token_clone
+# TYPE consul_acl_token_clone summary
+consul_acl_token_clone{quantile="0.5"} NaN
+consul_acl_token_clone{quantile="0.9"} NaN
+consul_acl_token_clone{quantile="0.99"} NaN
+consul_acl_token_clone_sum 0
+consul_acl_token_clone_count 0
+# HELP consul_acl_token_delete
+# TYPE consul_acl_token_delete summary
+consul_acl_token_delete{quantile="0.5"} NaN
+consul_acl_token_delete{quantile="0.9"} NaN
+consul_acl_token_delete{quantile="0.99"} NaN
+consul_acl_token_delete_sum 0
+consul_acl_token_delete_count 0
+# HELP consul_acl_token_upsert
+# TYPE consul_acl_token_upsert summary
+consul_acl_token_upsert{quantile="0.5"} NaN
+consul_acl_token_upsert{quantile="0.9"} NaN
+consul_acl_token_upsert{quantile="0.99"} NaN
+consul_acl_token_upsert_sum 0
+consul_acl_token_upsert_count 0
+# HELP consul_agent_tls_cert_expiry Seconds until the agent tls certificate expires. Updated every hour
+# TYPE consul_agent_tls_cert_expiry gauge
+consul_agent_tls_cert_expiry 0
+# HELP consul_api_http Samples how long it takes to service the given HTTP request for the given verb and path.
+# TYPE consul_api_http summary
+consul_api_http{quantile="0.5"} NaN
+consul_api_http{quantile="0.9"} NaN
+consul_api_http{quantile="0.99"} NaN
+consul_api_http_sum 0
+consul_api_http_count 0
+consul_api_http{method="GET",path="v1_agent_checks",quantile="0.5"} 0.12827900052070618
+consul_api_http{method="GET",path="v1_agent_checks",quantile="0.9"} 0.16961899399757385
+consul_api_http{method="GET",path="v1_agent_checks",quantile="0.99"} 0.16961899399757385
+consul_api_http_sum{method="GET",path="v1_agent_checks"} 72.76162604242563
+consul_api_http_count{method="GET",path="v1_agent_checks"} 430
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.5"} 0.21463799476623535
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.9"} 0.35256800055503845
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.99"} 0.35256800055503845
+consul_api_http_sum{method="GET",path="v1_agent_metrics"} 148.1220167428255
+consul_api_http_count{method="GET",path="v1_agent_metrics"} 438
+# HELP consul_catalog_connect_not_found Increments for each connect-based catalog query where the given service could not be found.
+# TYPE consul_catalog_connect_not_found counter
+consul_catalog_connect_not_found 0
+# HELP consul_catalog_connect_query Increments for each connect-based catalog query for the given service.
+# TYPE consul_catalog_connect_query counter
+consul_catalog_connect_query 0
+# HELP consul_catalog_connect_query_tag Increments for each connect-based catalog query for the given service with the given tag.
+# TYPE consul_catalog_connect_query_tag counter
+consul_catalog_connect_query_tag 0
+# HELP consul_catalog_connect_query_tags Increments for each connect-based catalog query for the given service with the given tags.
+# TYPE consul_catalog_connect_query_tags counter
+consul_catalog_connect_query_tags 0
+# HELP consul_catalog_deregister Measures the time it takes to complete a catalog deregister operation.
+# TYPE consul_catalog_deregister summary
+consul_catalog_deregister{quantile="0.5"} NaN
+consul_catalog_deregister{quantile="0.9"} NaN
+consul_catalog_deregister{quantile="0.99"} NaN
+consul_catalog_deregister_sum 0
+consul_catalog_deregister_count 0
+# HELP consul_catalog_register Measures the time it takes to complete a catalog register operation.
+# TYPE consul_catalog_register summary
+consul_catalog_register{quantile="0.5"} NaN
+consul_catalog_register{quantile="0.9"} NaN
+consul_catalog_register{quantile="0.99"} NaN
+consul_catalog_register_sum 0
+consul_catalog_register_count 0
+# HELP consul_catalog_service_not_found Increments for each catalog query where the given service could not be found.
+# TYPE consul_catalog_service_not_found counter
+consul_catalog_service_not_found 0
+# HELP consul_catalog_service_query Increments for each catalog query for the given service.
+# TYPE consul_catalog_service_query counter
+consul_catalog_service_query 0
+# HELP consul_catalog_service_query_tag Increments for each catalog query for the given service with the given tag.
+# TYPE consul_catalog_service_query_tag counter
+consul_catalog_service_query_tag 0
+# HELP consul_catalog_service_query_tags Increments for each catalog query for the given service with the given tags.
+# TYPE consul_catalog_service_query_tags counter
+consul_catalog_service_query_tags 0
+# HELP consul_client_api_catalog_datacenters Increments whenever a Consul agent receives a request to list datacenters in the catalog.
+# TYPE consul_client_api_catalog_datacenters counter
+consul_client_api_catalog_datacenters 0
+# HELP consul_client_api_catalog_deregister Increments whenever a Consul agent receives a catalog deregister request.
+# TYPE consul_client_api_catalog_deregister counter
+consul_client_api_catalog_deregister 0
+# HELP consul_client_api_catalog_gateway_services Increments whenever a Consul agent receives a request to list services associated with a gateway.
+# TYPE consul_client_api_catalog_gateway_services counter
+consul_client_api_catalog_gateway_services 0
+# HELP consul_client_api_catalog_node_service_list Increments whenever a Consul agent receives a request to list a node's registered services.
+# TYPE consul_client_api_catalog_node_service_list counter
+consul_client_api_catalog_node_service_list 0
+# HELP consul_client_api_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.
+# TYPE consul_client_api_catalog_node_services counter
+consul_client_api_catalog_node_services 0
+# HELP consul_client_api_catalog_nodes Increments whenever a Consul agent receives a request to list nodes from the catalog.
+# TYPE consul_client_api_catalog_nodes counter
+consul_client_api_catalog_nodes 0
+# HELP consul_client_api_catalog_register Increments whenever a Consul agent receives a catalog register request.
+# TYPE consul_client_api_catalog_register counter
+consul_client_api_catalog_register 0
+# HELP consul_client_api_catalog_service_nodes Increments whenever a Consul agent receives a request to list nodes offering a service.
+# TYPE consul_client_api_catalog_service_nodes counter
+consul_client_api_catalog_service_nodes 0
+# HELP consul_client_api_catalog_services Increments whenever a Consul agent receives a request to list services from the catalog.
+# TYPE consul_client_api_catalog_services counter
+consul_client_api_catalog_services 0
+# HELP consul_client_api_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service.
+# TYPE consul_client_api_error_catalog_service_nodes counter
+consul_client_api_error_catalog_service_nodes 0
+# HELP consul_client_api_success_catalog_datacenters Increments whenever a Consul agent successfully responds to a request to list datacenters.
+# TYPE consul_client_api_success_catalog_datacenters counter
+consul_client_api_success_catalog_datacenters 0
+# HELP consul_client_api_success_catalog_deregister Increments whenever a Consul agent successfully responds to a catalog deregister request.
+# TYPE consul_client_api_success_catalog_deregister counter
+consul_client_api_success_catalog_deregister 0
+# HELP consul_client_api_success_catalog_gateway_services Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway.
+# TYPE consul_client_api_success_catalog_gateway_services counter
+consul_client_api_success_catalog_gateway_services 0
+# HELP consul_client_api_success_catalog_node_service_list Increments whenever a Consul agent successfully responds to a request to list a node's registered services.
+# TYPE consul_client_api_success_catalog_node_service_list counter
+consul_client_api_success_catalog_node_service_list 0
+# HELP consul_client_api_success_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list services in a node.
+# TYPE consul_client_api_success_catalog_node_services counter
+consul_client_api_success_catalog_node_services 0
+# HELP consul_client_api_success_catalog_nodes Increments whenever a Consul agent successfully responds to a request to list nodes.
+# TYPE consul_client_api_success_catalog_nodes counter
+consul_client_api_success_catalog_nodes 0
+# HELP consul_client_api_success_catalog_register Increments whenever a Consul agent successfully responds to a catalog register request.
+# TYPE consul_client_api_success_catalog_register counter
+consul_client_api_success_catalog_register 0
+# HELP consul_client_api_success_catalog_service_nodes Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.
+# TYPE consul_client_api_success_catalog_service_nodes counter
+consul_client_api_success_catalog_service_nodes 0
+# HELP consul_client_api_success_catalog_services Increments whenever a Consul agent successfully responds to a request to list services.
+# TYPE consul_client_api_success_catalog_services counter
+consul_client_api_success_catalog_services 0
+# HELP consul_client_rpc Increments whenever a Consul agent in client mode makes an RPC request to a Consul server.
+# TYPE consul_client_rpc counter
+consul_client_rpc 34
+# HELP consul_client_rpc_error_catalog_datacenters Increments whenever a Consul agent receives an RPC error for a request to list datacenters.
+# TYPE consul_client_rpc_error_catalog_datacenters counter
+consul_client_rpc_error_catalog_datacenters 0
+# HELP consul_client_rpc_error_catalog_deregister Increments whenever a Consul agent receives an RPC error for a catalog deregister request.
+# TYPE consul_client_rpc_error_catalog_deregister counter
+consul_client_rpc_error_catalog_deregister 0
+# HELP consul_client_rpc_error_catalog_gateway_services Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway.
+# TYPE consul_client_rpc_error_catalog_gateway_services counter
+consul_client_rpc_error_catalog_gateway_services 0
+# HELP consul_client_rpc_error_catalog_node_service_list Increments whenever a Consul agent receives an RPC error for request to list a node's registered services.
+# TYPE consul_client_rpc_error_catalog_node_service_list counter
+consul_client_rpc_error_catalog_node_service_list 0
+# HELP consul_client_rpc_error_catalog_node_services Increments whenever a Consul agent receives an RPC error for a request to list services in a node.
+# TYPE consul_client_rpc_error_catalog_node_services counter
+consul_client_rpc_error_catalog_node_services 0
+# HELP consul_client_rpc_error_catalog_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes.
+# TYPE consul_client_rpc_error_catalog_nodes counter
+consul_client_rpc_error_catalog_nodes 0
+# HELP consul_client_rpc_error_catalog_register Increments whenever a Consul agent receives an RPC error for a catalog register request.
+# TYPE consul_client_rpc_error_catalog_register counter
+consul_client_rpc_error_catalog_register 0
+# HELP consul_client_rpc_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.
+# TYPE consul_client_rpc_error_catalog_service_nodes counter
+consul_client_rpc_error_catalog_service_nodes 0
+# HELP consul_client_rpc_error_catalog_services Increments whenever a Consul agent receives an RPC error for a request to list services.
+# TYPE consul_client_rpc_error_catalog_services counter
+consul_client_rpc_error_catalog_services 0
+# HELP consul_client_rpc_exceeded Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration.
+# TYPE consul_client_rpc_exceeded counter
+consul_client_rpc_exceeded 0
+# HELP consul_client_rpc_failed Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails.
+# TYPE consul_client_rpc_failed counter
+consul_client_rpc_failed 0
+# HELP consul_consul_cache_bypass Counts how many times a request bypassed the cache because no cache-key was provided.
+# TYPE consul_consul_cache_bypass counter
+consul_consul_cache_bypass 0
+# HELP consul_consul_cache_entries_count Represents the number of entries in this cache.
+# TYPE consul_consul_cache_entries_count gauge
+consul_consul_cache_entries_count 0
+# HELP consul_consul_cache_evict_expired Counts the number of expired entries that are evicted.
+# TYPE consul_consul_cache_evict_expired counter
+consul_consul_cache_evict_expired 0
+# HELP consul_consul_cache_fetch_error Counts the number of failed fetches by the cache.
+# TYPE consul_consul_cache_fetch_error counter
+consul_consul_cache_fetch_error 0
+# HELP consul_consul_cache_fetch_success Counts the number of successful fetches by the cache.
+# TYPE consul_consul_cache_fetch_success counter
+consul_consul_cache_fetch_success 0
+# HELP consul_consul_fsm_ca Deprecated - use fsm_ca instead
+# TYPE consul_consul_fsm_ca summary
+consul_consul_fsm_ca{quantile="0.5"} NaN
+consul_consul_fsm_ca{quantile="0.9"} NaN
+consul_consul_fsm_ca{quantile="0.99"} NaN
+consul_consul_fsm_ca_sum 0
+consul_consul_fsm_ca_count 0
+# HELP consul_consul_fsm_intention Deprecated - use fsm_intention instead
+# TYPE consul_consul_fsm_intention summary
+consul_consul_fsm_intention{quantile="0.5"} NaN
+consul_consul_fsm_intention{quantile="0.9"} NaN
+consul_consul_fsm_intention{quantile="0.99"} NaN
+consul_consul_fsm_intention_sum 0
+consul_consul_fsm_intention_count 0
+# HELP consul_consul_intention_apply
+# TYPE consul_consul_intention_apply summary
+consul_consul_intention_apply{quantile="0.5"} NaN
+consul_consul_intention_apply{quantile="0.9"} NaN
+consul_consul_intention_apply{quantile="0.99"} NaN
+consul_consul_intention_apply_sum 0
+consul_consul_intention_apply_count 0
+# HELP consul_consul_members_clients Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.
+# TYPE consul_consul_members_clients gauge
+consul_consul_members_clients 0
+# HELP consul_consul_members_servers Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.
+# TYPE consul_consul_members_servers gauge
+consul_consul_members_servers 0
+# HELP consul_consul_state_config_entries Measures the current number of unique configuration entries registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4.
+# TYPE consul_consul_state_config_entries gauge
+consul_consul_state_config_entries 0
+# HELP consul_consul_state_connect_instances Measures the current number of unique connect service instances registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4.
+# TYPE consul_consul_state_connect_instances gauge
+consul_consul_state_connect_instances 0
+# HELP consul_consul_state_kv_entries Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.
+# TYPE consul_consul_state_kv_entries gauge
+consul_consul_state_kv_entries 0
+# HELP consul_consul_state_nodes Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_consul_state_nodes gauge
+consul_consul_state_nodes 0
+# HELP consul_consul_state_peerings Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0.
+# TYPE consul_consul_state_peerings gauge
+consul_consul_state_peerings 0
+# HELP consul_consul_state_service_instances Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_consul_state_service_instances gauge
+consul_consul_state_service_instances 0
+# HELP consul_consul_state_services Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_consul_state_services gauge
+consul_consul_state_services 0
+# HELP consul_federation_state_apply
+# TYPE consul_federation_state_apply summary
+consul_federation_state_apply{quantile="0.5"} NaN
+consul_federation_state_apply{quantile="0.9"} NaN
+consul_federation_state_apply{quantile="0.99"} NaN
+consul_federation_state_apply_sum 0
+consul_federation_state_apply_count 0
+# HELP consul_federation_state_get
+# TYPE consul_federation_state_get summary
+consul_federation_state_get{quantile="0.5"} NaN
+consul_federation_state_get{quantile="0.9"} NaN
+consul_federation_state_get{quantile="0.99"} NaN
+consul_federation_state_get_sum 0
+consul_federation_state_get_count 0
+# HELP consul_federation_state_list
+# TYPE consul_federation_state_list summary
+consul_federation_state_list{quantile="0.5"} NaN
+consul_federation_state_list{quantile="0.9"} NaN
+consul_federation_state_list{quantile="0.99"} NaN
+consul_federation_state_list_sum 0
+consul_federation_state_list_count 0
+# HELP consul_federation_state_list_mesh_gateways
+# TYPE consul_federation_state_list_mesh_gateways summary
+consul_federation_state_list_mesh_gateways{quantile="0.5"} NaN
+consul_federation_state_list_mesh_gateways{quantile="0.9"} NaN
+consul_federation_state_list_mesh_gateways{quantile="0.99"} NaN
+consul_federation_state_list_mesh_gateways_sum 0
+consul_federation_state_list_mesh_gateways_count 0
+# HELP consul_fsm_acl Measures the time it takes to apply the given ACL operation to the FSM.
+# TYPE consul_fsm_acl summary
+consul_fsm_acl{quantile="0.5"} NaN
+consul_fsm_acl{quantile="0.9"} NaN
+consul_fsm_acl{quantile="0.99"} NaN
+consul_fsm_acl_sum 0
+consul_fsm_acl_count 0
+# HELP consul_fsm_acl_authmethod Measures the time it takes to apply an ACL authmethod operation to the FSM.
+# TYPE consul_fsm_acl_authmethod summary
+consul_fsm_acl_authmethod{quantile="0.5"} NaN
+consul_fsm_acl_authmethod{quantile="0.9"} NaN
+consul_fsm_acl_authmethod{quantile="0.99"} NaN
+consul_fsm_acl_authmethod_sum 0
+consul_fsm_acl_authmethod_count 0
+# HELP consul_fsm_acl_bindingrule Measures the time it takes to apply an ACL binding rule operation to the FSM.
+# TYPE consul_fsm_acl_bindingrule summary
+consul_fsm_acl_bindingrule{quantile="0.5"} NaN
+consul_fsm_acl_bindingrule{quantile="0.9"} NaN
+consul_fsm_acl_bindingrule{quantile="0.99"} NaN
+consul_fsm_acl_bindingrule_sum 0
+consul_fsm_acl_bindingrule_count 0
+# HELP consul_fsm_acl_policy Measures the time it takes to apply an ACL policy operation to the FSM.
+# TYPE consul_fsm_acl_policy summary
+consul_fsm_acl_policy{quantile="0.5"} NaN
+consul_fsm_acl_policy{quantile="0.9"} NaN
+consul_fsm_acl_policy{quantile="0.99"} NaN
+consul_fsm_acl_policy_sum 0
+consul_fsm_acl_policy_count 0
+# HELP consul_fsm_acl_token Measures the time it takes to apply an ACL token operation to the FSM.
+# TYPE consul_fsm_acl_token summary
+consul_fsm_acl_token{quantile="0.5"} NaN
+consul_fsm_acl_token{quantile="0.9"} NaN
+consul_fsm_acl_token{quantile="0.99"} NaN
+consul_fsm_acl_token_sum 0
+consul_fsm_acl_token_count 0
+# HELP consul_fsm_autopilot Measures the time it takes to apply the given autopilot update to the FSM.
+# TYPE consul_fsm_autopilot summary
+consul_fsm_autopilot{quantile="0.5"} NaN
+consul_fsm_autopilot{quantile="0.9"} NaN
+consul_fsm_autopilot{quantile="0.99"} NaN
+consul_fsm_autopilot_sum 0
+consul_fsm_autopilot_count 0
+# HELP consul_fsm_ca Measures the time it takes to apply CA configuration operations to the FSM.
+# TYPE consul_fsm_ca summary
+consul_fsm_ca{quantile="0.5"} NaN
+consul_fsm_ca{quantile="0.9"} NaN
+consul_fsm_ca{quantile="0.99"} NaN
+consul_fsm_ca_sum 0
+consul_fsm_ca_count 0
+# HELP consul_fsm_ca_leaf Measures the time it takes to apply an operation while signing a leaf certificate.
+# TYPE consul_fsm_ca_leaf summary
+consul_fsm_ca_leaf{quantile="0.5"} NaN
+consul_fsm_ca_leaf{quantile="0.9"} NaN
+consul_fsm_ca_leaf{quantile="0.99"} NaN
+consul_fsm_ca_leaf_sum 0
+consul_fsm_ca_leaf_count 0
+# HELP consul_fsm_coordinate_batch_update Measures the time it takes to apply the given batch coordinate update to the FSM.
+# TYPE consul_fsm_coordinate_batch_update summary
+consul_fsm_coordinate_batch_update{quantile="0.5"} NaN
+consul_fsm_coordinate_batch_update{quantile="0.9"} NaN
+consul_fsm_coordinate_batch_update{quantile="0.99"} NaN
+consul_fsm_coordinate_batch_update_sum 0
+consul_fsm_coordinate_batch_update_count 0
+# HELP consul_fsm_deregister Measures the time it takes to apply a catalog deregister operation to the FSM.
+# TYPE consul_fsm_deregister summary
+consul_fsm_deregister{quantile="0.5"} NaN
+consul_fsm_deregister{quantile="0.9"} NaN
+consul_fsm_deregister{quantile="0.99"} NaN
+consul_fsm_deregister_sum 0
+consul_fsm_deregister_count 0
+# HELP consul_fsm_intention Measures the time it takes to apply an intention operation to the FSM.
+# TYPE consul_fsm_intention summary
+consul_fsm_intention{quantile="0.5"} NaN
+consul_fsm_intention{quantile="0.9"} NaN
+consul_fsm_intention{quantile="0.99"} NaN
+consul_fsm_intention_sum 0
+consul_fsm_intention_count 0
+# HELP consul_fsm_kvs Measures the time it takes to apply the given KV operation to the FSM.
+# TYPE consul_fsm_kvs summary
+consul_fsm_kvs{quantile="0.5"} NaN
+consul_fsm_kvs{quantile="0.9"} NaN
+consul_fsm_kvs{quantile="0.99"} NaN
+consul_fsm_kvs_sum 0
+consul_fsm_kvs_count 0
+# HELP consul_fsm_peering Measures the time it takes to apply a peering operation to the FSM.
+# TYPE consul_fsm_peering summary
+consul_fsm_peering{quantile="0.5"} NaN
+consul_fsm_peering{quantile="0.9"} NaN
+consul_fsm_peering{quantile="0.99"} NaN
+consul_fsm_peering_sum 0
+consul_fsm_peering_count 0
+# HELP consul_fsm_persist Measures the time it takes to persist the FSM to a raft snapshot.
+# TYPE consul_fsm_persist summary
+consul_fsm_persist{quantile="0.5"} NaN
+consul_fsm_persist{quantile="0.9"} NaN
+consul_fsm_persist{quantile="0.99"} NaN
+consul_fsm_persist_sum 0
+consul_fsm_persist_count 0
+# HELP consul_fsm_prepared_query Measures the time it takes to apply the given prepared query update operation to the FSM.
+# TYPE consul_fsm_prepared_query summary
+consul_fsm_prepared_query{quantile="0.5"} NaN
+consul_fsm_prepared_query{quantile="0.9"} NaN
+consul_fsm_prepared_query{quantile="0.99"} NaN
+consul_fsm_prepared_query_sum 0
+consul_fsm_prepared_query_count 0
+# HELP consul_fsm_register Measures the time it takes to apply a catalog register operation to the FSM.
+# TYPE consul_fsm_register summary
+consul_fsm_register{quantile="0.5"} NaN
+consul_fsm_register{quantile="0.9"} NaN
+consul_fsm_register{quantile="0.99"} NaN
+consul_fsm_register_sum 0
+consul_fsm_register_count 0
+# HELP consul_fsm_session Measures the time it takes to apply the given session operation to the FSM.
+# TYPE consul_fsm_session summary
+consul_fsm_session{quantile="0.5"} NaN
+consul_fsm_session{quantile="0.9"} NaN
+consul_fsm_session{quantile="0.99"} NaN
+consul_fsm_session_sum 0
+consul_fsm_session_count 0
+# HELP consul_fsm_system_metadata Measures the time it takes to apply a system metadata operation to the FSM.
+# TYPE consul_fsm_system_metadata summary
+consul_fsm_system_metadata{quantile="0.5"} NaN
+consul_fsm_system_metadata{quantile="0.9"} NaN
+consul_fsm_system_metadata{quantile="0.99"} NaN
+consul_fsm_system_metadata_sum 0
+consul_fsm_system_metadata_count 0
+# HELP consul_fsm_tombstone Measures the time it takes to apply the given tombstone operation to the FSM.
+# TYPE consul_fsm_tombstone summary
+consul_fsm_tombstone{quantile="0.5"} NaN
+consul_fsm_tombstone{quantile="0.9"} NaN
+consul_fsm_tombstone{quantile="0.99"} NaN
+consul_fsm_tombstone_sum 0
+consul_fsm_tombstone_count 0
+# HELP consul_fsm_txn Measures the time it takes to apply the given transaction update to the FSM.
+# TYPE consul_fsm_txn summary
+consul_fsm_txn{quantile="0.5"} NaN
+consul_fsm_txn{quantile="0.9"} NaN
+consul_fsm_txn{quantile="0.99"} NaN
+consul_fsm_txn_sum 0
+consul_fsm_txn_count 0
+# HELP consul_grpc_client_connection_count Counts the number of new gRPC connections opened by the client agent to a Consul server.
+# TYPE consul_grpc_client_connection_count counter
+consul_grpc_client_connection_count 2
+# HELP consul_grpc_client_connections Measures the number of active gRPC connections open from the client agent to any Consul servers.
+# TYPE consul_grpc_client_connections gauge
+consul_grpc_client_connections 1
+# HELP consul_grpc_client_request_count Counts the number of gRPC requests made by the client agent to a Consul server.
+# TYPE consul_grpc_client_request_count counter
+consul_grpc_client_request_count 0
+# HELP consul_grpc_server_connection_count Counts the number of new gRPC connections received by the server.
+# TYPE consul_grpc_server_connection_count counter
+consul_grpc_server_connection_count 0
+# HELP consul_grpc_server_connections Measures the number of active gRPC connections open on the server.
+# TYPE consul_grpc_server_connections gauge
+consul_grpc_server_connections 0
+# HELP consul_grpc_server_request_count Counts the number of gRPC requests received by the server.
+# TYPE consul_grpc_server_request_count counter
+consul_grpc_server_request_count 0
+# HELP consul_grpc_server_stream_count Counts the number of new gRPC streams received by the server.
+# TYPE consul_grpc_server_stream_count counter
+consul_grpc_server_stream_count 0
+# HELP consul_grpc_server_streams Measures the number of active gRPC streams handled by the server.
+# TYPE consul_grpc_server_streams gauge
+consul_grpc_server_streams 0
+# HELP consul_intention_apply
+# TYPE consul_intention_apply summary
+consul_intention_apply{quantile="0.5"} NaN
+consul_intention_apply{quantile="0.9"} NaN
+consul_intention_apply{quantile="0.99"} NaN
+consul_intention_apply_sum 0
+consul_intention_apply_count 0
+# HELP consul_kvs_apply Measures the time it takes to complete an update to the KV store.
+# TYPE consul_kvs_apply summary
+consul_kvs_apply{quantile="0.5"} NaN
+consul_kvs_apply{quantile="0.9"} NaN
+consul_kvs_apply{quantile="0.99"} NaN
+consul_kvs_apply_sum 0
+consul_kvs_apply_count 0
+# HELP consul_leader_barrier Measures the time spent waiting for the raft barrier upon gaining leadership.
+# TYPE consul_leader_barrier summary
+consul_leader_barrier{quantile="0.5"} NaN
+consul_leader_barrier{quantile="0.9"} NaN
+consul_leader_barrier{quantile="0.99"} NaN
+consul_leader_barrier_sum 0
+consul_leader_barrier_count 0
+# HELP consul_leader_reapTombstones Measures the time spent clearing tombstones.
+# TYPE consul_leader_reapTombstones summary
+consul_leader_reapTombstones{quantile="0.5"} NaN
+consul_leader_reapTombstones{quantile="0.9"} NaN
+consul_leader_reapTombstones{quantile="0.99"} NaN
+consul_leader_reapTombstones_sum 0
+consul_leader_reapTombstones_count 0
+# HELP consul_leader_reconcile Measures the time spent updating the raft store from the serf member information.
+# TYPE consul_leader_reconcile summary
+consul_leader_reconcile{quantile="0.5"} NaN
+consul_leader_reconcile{quantile="0.9"} NaN
+consul_leader_reconcile{quantile="0.99"} NaN
+consul_leader_reconcile_sum 0
+consul_leader_reconcile_count 0
+# HELP consul_leader_reconcileMember Measures the time spent updating the raft store for a single serf member's information.
+# TYPE consul_leader_reconcileMember summary
+consul_leader_reconcileMember{quantile="0.5"} NaN
+consul_leader_reconcileMember{quantile="0.9"} NaN
+consul_leader_reconcileMember{quantile="0.99"} NaN
+consul_leader_reconcileMember_sum 0
+consul_leader_reconcileMember_count 0
+# HELP consul_leader_replication_acl_policies_index Tracks the index of ACL policies in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_policies_index gauge
+consul_leader_replication_acl_policies_index 0
+# HELP consul_leader_replication_acl_policies_status Tracks the current health of ACL policy replication on the leader
+# TYPE consul_leader_replication_acl_policies_status gauge
+consul_leader_replication_acl_policies_status 0
+# HELP consul_leader_replication_acl_roles_index Tracks the index of ACL roles in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_roles_index gauge
+consul_leader_replication_acl_roles_index 0
+# HELP consul_leader_replication_acl_roles_status Tracks the current health of ACL role replication on the leader
+# TYPE consul_leader_replication_acl_roles_status gauge
+consul_leader_replication_acl_roles_status 0
+# HELP consul_leader_replication_acl_tokens_index Tracks the index of ACL tokens in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_tokens_index gauge
+consul_leader_replication_acl_tokens_index 0
+# HELP consul_leader_replication_acl_tokens_status Tracks the current health of ACL token replication on the leader
+# TYPE consul_leader_replication_acl_tokens_status gauge
+consul_leader_replication_acl_tokens_status 0
+# HELP consul_leader_replication_config_entries_index Tracks the index of config entries in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_config_entries_index gauge
+consul_leader_replication_config_entries_index 0
+# HELP consul_leader_replication_config_entries_status Tracks the current health of config entry replication on the leader
+# TYPE consul_leader_replication_config_entries_status gauge
+consul_leader_replication_config_entries_status 0
+# HELP consul_leader_replication_federation_state_index Tracks the index of federation states in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_federation_state_index gauge
+consul_leader_replication_federation_state_index 0
+# HELP consul_leader_replication_federation_state_status Tracks the current health of federation state replication on the leader
+# TYPE consul_leader_replication_federation_state_status gauge
+consul_leader_replication_federation_state_status 0
+# HELP consul_leader_replication_namespaces_index Tracks the index of federation states in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_namespaces_index gauge
+consul_leader_replication_namespaces_index 0
+# HELP consul_leader_replication_namespaces_status Tracks the current health of federation state replication on the leader
+# TYPE consul_leader_replication_namespaces_status gauge
+consul_leader_replication_namespaces_status 0
+# HELP consul_memberlist_gossip consul_memberlist_gossip
+# TYPE consul_memberlist_gossip summary
+consul_memberlist_gossip{network="lan",quantile="0.5"} 0.02992900088429451
+consul_memberlist_gossip{network="lan",quantile="0.9"} 0.05322999879717827
+consul_memberlist_gossip{network="lan",quantile="0.99"} 0.09028899669647217
+consul_memberlist_gossip_sum{network="lan"} 72.09632398188114
+consul_memberlist_gossip_count{network="lan"} 2159
+# HELP consul_memberlist_msg_alive consul_memberlist_msg_alive
+# TYPE consul_memberlist_msg_alive counter
+consul_memberlist_msg_alive{network="lan"} 3
+# HELP consul_memberlist_probeNode consul_memberlist_probeNode
+# TYPE consul_memberlist_probeNode summary
+consul_memberlist_probeNode{network="lan",quantile="0.5"} 1.2391510009765625
+consul_memberlist_probeNode{network="lan",quantile="0.9"} 1.470810055732727
+consul_memberlist_probeNode{network="lan",quantile="0.99"} 1.470810055732727
+consul_memberlist_probeNode_sum{network="lan"} 550.6824030280113
+consul_memberlist_probeNode_count{network="lan"} 410
+# HELP consul_memberlist_pushPullNode consul_memberlist_pushPullNode
+# TYPE consul_memberlist_pushPullNode summary
+consul_memberlist_pushPullNode{network="lan",quantile="0.5"} 1.6478170156478882
+consul_memberlist_pushPullNode{network="lan",quantile="0.9"} 1.6478170156478882
+consul_memberlist_pushPullNode{network="lan",quantile="0.99"} 1.6478170156478882
+consul_memberlist_pushPullNode_sum{network="lan"} 28.438491106033325
+consul_memberlist_pushPullNode_count{network="lan"} 17
+# HELP consul_memberlist_tcp_accept consul_memberlist_tcp_accept
+# TYPE consul_memberlist_tcp_accept counter
+consul_memberlist_tcp_accept{network="lan"} 15
+# HELP consul_memberlist_tcp_connect consul_memberlist_tcp_connect
+# TYPE consul_memberlist_tcp_connect counter
+consul_memberlist_tcp_connect{network="lan"} 18
+# HELP consul_memberlist_tcp_sent consul_memberlist_tcp_sent
+# TYPE consul_memberlist_tcp_sent counter
+consul_memberlist_tcp_sent{network="lan"} 24679
+# HELP consul_memberlist_udp_received consul_memberlist_udp_received
+# TYPE consul_memberlist_udp_received counter
+consul_memberlist_udp_received{network="lan"} 117437
+# HELP consul_memberlist_udp_sent consul_memberlist_udp_sent
+# TYPE consul_memberlist_udp_sent counter
+consul_memberlist_udp_sent{network="lan"} 118601
+# HELP consul_prepared_query_apply Measures the time it takes to apply a prepared query update.
+# TYPE consul_prepared_query_apply summary
+consul_prepared_query_apply{quantile="0.5"} NaN
+consul_prepared_query_apply{quantile="0.9"} NaN
+consul_prepared_query_apply{quantile="0.99"} NaN
+consul_prepared_query_apply_sum 0
+consul_prepared_query_apply_count 0
+# HELP consul_prepared_query_execute Measures the time it takes to process a prepared query execute request.
+# TYPE consul_prepared_query_execute summary
+consul_prepared_query_execute{quantile="0.5"} NaN
+consul_prepared_query_execute{quantile="0.9"} NaN
+consul_prepared_query_execute{quantile="0.99"} NaN
+consul_prepared_query_execute_sum 0
+consul_prepared_query_execute_count 0
+# HELP consul_prepared_query_execute_remote Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter.
+# TYPE consul_prepared_query_execute_remote summary
+consul_prepared_query_execute_remote{quantile="0.5"} NaN
+consul_prepared_query_execute_remote{quantile="0.9"} NaN
+consul_prepared_query_execute_remote{quantile="0.99"} NaN
+consul_prepared_query_execute_remote_sum 0
+consul_prepared_query_execute_remote_count 0
+# HELP consul_prepared_query_explain Measures the time it takes to process a prepared query explain request.
+# TYPE consul_prepared_query_explain summary
+consul_prepared_query_explain{quantile="0.5"} NaN
+consul_prepared_query_explain{quantile="0.9"} NaN
+consul_prepared_query_explain{quantile="0.99"} NaN
+consul_prepared_query_explain_sum 0
+consul_prepared_query_explain_count 0
+# HELP consul_raft_applied_index Represents the raft applied index.
+# TYPE consul_raft_applied_index gauge
+consul_raft_applied_index 0
+# HELP consul_raft_apply This counts the number of Raft transactions occurring over the interval.
+# TYPE consul_raft_apply counter
+consul_raft_apply 0
+# HELP consul_raft_commitTime This measures the time it takes to commit a new entry to the Raft log on the leader.
+# TYPE consul_raft_commitTime summary
+consul_raft_commitTime{quantile="0.5"} NaN
+consul_raft_commitTime{quantile="0.9"} NaN
+consul_raft_commitTime{quantile="0.99"} NaN
+consul_raft_commitTime_sum 0
+consul_raft_commitTime_count 0
+# HELP consul_raft_fsm_lastRestoreDuration This measures how long the last FSM restore (from disk or leader) took.
+# TYPE consul_raft_fsm_lastRestoreDuration gauge
+consul_raft_fsm_lastRestoreDuration 0
+# HELP consul_raft_last_index Represents the raft last index.
+# TYPE consul_raft_last_index gauge
+consul_raft_last_index 0
+# HELP consul_raft_leader_lastContact Measures the time since the leader was last able to contact the follower nodes when checking its leader lease.
+# TYPE consul_raft_leader_lastContact summary
+consul_raft_leader_lastContact{quantile="0.5"} NaN
+consul_raft_leader_lastContact{quantile="0.9"} NaN
+consul_raft_leader_lastContact{quantile="0.99"} NaN
+consul_raft_leader_lastContact_sum 0
+consul_raft_leader_lastContact_count 0
+# HELP consul_raft_leader_oldestLogAge This measures how old the oldest log in the leader's log store is.
+# TYPE consul_raft_leader_oldestLogAge gauge
+consul_raft_leader_oldestLogAge 0
+# HELP consul_raft_rpc_installSnapshot Measures the time it takes the raft leader to install a snapshot on a follower that is catching up after being down or has just joined the cluster.
+# TYPE consul_raft_rpc_installSnapshot summary
+consul_raft_rpc_installSnapshot{quantile="0.5"} NaN
+consul_raft_rpc_installSnapshot{quantile="0.9"} NaN
+consul_raft_rpc_installSnapshot{quantile="0.99"} NaN
+consul_raft_rpc_installSnapshot_sum 0
+consul_raft_rpc_installSnapshot_count 0
+# HELP consul_raft_snapshot_persist Measures the time it takes raft to write a new snapshot to disk.
+# TYPE consul_raft_snapshot_persist summary
+consul_raft_snapshot_persist{quantile="0.5"} NaN
+consul_raft_snapshot_persist{quantile="0.9"} NaN
+consul_raft_snapshot_persist{quantile="0.99"} NaN
+consul_raft_snapshot_persist_sum 0
+consul_raft_snapshot_persist_count 0
+# HELP consul_raft_state_candidate This increments whenever a Consul server starts an election.
+# TYPE consul_raft_state_candidate counter
+consul_raft_state_candidate 0
+# HELP consul_raft_state_leader This increments whenever a Consul server becomes a leader.
+# TYPE consul_raft_state_leader counter
+consul_raft_state_leader 0
+# HELP consul_rpc_accept_conn Increments when a server accepts an RPC connection.
+# TYPE consul_rpc_accept_conn counter
+consul_rpc_accept_conn 0
+# HELP consul_rpc_consistentRead Measures the time spent confirming that a consistent read can be performed.
+# TYPE consul_rpc_consistentRead summary
+consul_rpc_consistentRead{quantile="0.5"} NaN
+consul_rpc_consistentRead{quantile="0.9"} NaN
+consul_rpc_consistentRead{quantile="0.99"} NaN
+consul_rpc_consistentRead_sum 0
+consul_rpc_consistentRead_count 0
+# HELP consul_rpc_cross_dc Increments when a server sends a (potentially blocking) cross datacenter RPC query.
+# TYPE consul_rpc_cross_dc counter
+consul_rpc_cross_dc 0
+# HELP consul_rpc_queries_blocking Shows the current number of in-flight blocking queries the server is handling.
+# TYPE consul_rpc_queries_blocking gauge
+consul_rpc_queries_blocking 0
+# HELP consul_rpc_query Increments when a server receives a read request, indicating the rate of new read queries.
+# TYPE consul_rpc_query counter
+consul_rpc_query 0
+# HELP consul_rpc_raft_handoff Increments when a server accepts a Raft-related RPC connection.
+# TYPE consul_rpc_raft_handoff counter
+consul_rpc_raft_handoff 0
+# HELP consul_rpc_request Increments when a server receives a Consul-related RPC request.
+# TYPE consul_rpc_request counter
+consul_rpc_request 0
+# HELP consul_rpc_request_error Increments when a server returns an error from an RPC request.
+# TYPE consul_rpc_request_error counter
+consul_rpc_request_error 0
+# HELP consul_runtime_alloc_bytes consul_runtime_alloc_bytes
+# TYPE consul_runtime_alloc_bytes gauge
+consul_runtime_alloc_bytes 2.6333408e+07
+# HELP consul_runtime_free_count consul_runtime_free_count
+# TYPE consul_runtime_free_count gauge
+consul_runtime_free_count 674987
+# HELP consul_runtime_gc_pause_ns consul_runtime_gc_pause_ns
+# TYPE consul_runtime_gc_pause_ns summary
+consul_runtime_gc_pause_ns{quantile="0.5"} NaN
+consul_runtime_gc_pause_ns{quantile="0.9"} NaN
+consul_runtime_gc_pause_ns{quantile="0.99"} NaN
+consul_runtime_gc_pause_ns_sum 4.182423e+06
+consul_runtime_gc_pause_ns_count 17
+# HELP consul_runtime_heap_objects consul_runtime_heap_objects
+# TYPE consul_runtime_heap_objects gauge
+consul_runtime_heap_objects 63474
+# HELP consul_runtime_malloc_count consul_runtime_malloc_count
+# TYPE consul_runtime_malloc_count gauge
+consul_runtime_malloc_count 738461
+# HELP consul_runtime_num_goroutines consul_runtime_num_goroutines
+# TYPE consul_runtime_num_goroutines gauge
+consul_runtime_num_goroutines 53
+# HELP consul_runtime_sys_bytes consul_runtime_sys_bytes
+# TYPE consul_runtime_sys_bytes gauge
+consul_runtime_sys_bytes 5.1201032e+07
+# HELP consul_runtime_total_gc_pause_ns consul_runtime_total_gc_pause_ns
+# TYPE consul_runtime_total_gc_pause_ns gauge
+consul_runtime_total_gc_pause_ns 4.182423e+06
+# HELP consul_runtime_total_gc_runs consul_runtime_total_gc_runs
+# TYPE consul_runtime_total_gc_runs gauge
+consul_runtime_total_gc_runs 17
+# HELP consul_serf_coordinate_adjustment_ms consul_serf_coordinate_adjustment_ms
+# TYPE consul_serf_coordinate_adjustment_ms summary
+consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.5"} 1.9778540134429932
+consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.9"} 2.0611228942871094
+consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.99"} 2.0611228942871094
+consul_serf_coordinate_adjustment_ms_sum{network="lan"} 375.26442916691303
+consul_serf_coordinate_adjustment_ms_count{network="lan"} 410
+# HELP consul_serf_member_join consul_serf_member_join
+# TYPE consul_serf_member_join counter
+consul_serf_member_join{network="lan"} 3
+# HELP consul_serf_msgs_received consul_serf_msgs_received
+# TYPE consul_serf_msgs_received summary
+consul_serf_msgs_received{network="lan",quantile="0.5"} NaN
+consul_serf_msgs_received{network="lan",quantile="0.9"} NaN
+consul_serf_msgs_received{network="lan",quantile="0.99"} NaN
+consul_serf_msgs_received_sum{network="lan"} 100
+consul_serf_msgs_received_count{network="lan"} 4
+# HELP consul_serf_msgs_sent consul_serf_msgs_sent
+# TYPE consul_serf_msgs_sent summary
+consul_serf_msgs_sent{network="lan",quantile="0.5"} NaN
+consul_serf_msgs_sent{network="lan",quantile="0.9"} NaN
+consul_serf_msgs_sent{network="lan",quantile="0.99"} NaN
+consul_serf_msgs_sent_sum{network="lan"} 200
+consul_serf_msgs_sent_count{network="lan"} 8
+# HELP consul_serf_queue_Event consul_serf_queue_Event
+# TYPE consul_serf_queue_Event summary
+consul_serf_queue_Event{network="lan",quantile="0.5"} NaN
+consul_serf_queue_Event{network="lan",quantile="0.9"} NaN
+consul_serf_queue_Event{network="lan",quantile="0.99"} NaN
+consul_serf_queue_Event_sum{network="lan"} 0
+consul_serf_queue_Event_count{network="lan"} 14
+# HELP consul_serf_queue_Intent consul_serf_queue_Intent
+# TYPE consul_serf_queue_Intent summary
+consul_serf_queue_Intent{network="lan",quantile="0.5"} NaN
+consul_serf_queue_Intent{network="lan",quantile="0.9"} NaN
+consul_serf_queue_Intent{network="lan",quantile="0.99"} NaN
+consul_serf_queue_Intent_sum{network="lan"} 0
+consul_serf_queue_Intent_count{network="lan"} 14
+# HELP consul_serf_queue_Query consul_serf_queue_Query
+# TYPE consul_serf_queue_Query summary
+consul_serf_queue_Query{network="lan",quantile="0.5"} NaN
+consul_serf_queue_Query{network="lan",quantile="0.9"} NaN
+consul_serf_queue_Query{network="lan",quantile="0.99"} NaN
+consul_serf_queue_Query_sum{network="lan"} 0
+consul_serf_queue_Query_count{network="lan"} 14
+# HELP consul_serf_snapshot_appendLine consul_serf_snapshot_appendLine
+# TYPE consul_serf_snapshot_appendLine summary
+consul_serf_snapshot_appendLine{network="lan",quantile="0.5"} NaN
+consul_serf_snapshot_appendLine{network="lan",quantile="0.9"} NaN
+consul_serf_snapshot_appendLine{network="lan",quantile="0.99"} NaN
+consul_serf_snapshot_appendLine_sum{network="lan"} 0.08486000122502446
+consul_serf_snapshot_appendLine_count{network="lan"} 4
+# HELP consul_server_isLeader Tracks if the server is a leader.
+# TYPE consul_server_isLeader gauge
+consul_server_isLeader 0
+# HELP consul_session_apply Measures the time spent applying a session update.
+# TYPE consul_session_apply summary
+consul_session_apply{quantile="0.5"} NaN
+consul_session_apply{quantile="0.9"} NaN
+consul_session_apply{quantile="0.99"} NaN
+consul_session_apply_sum 0
+consul_session_apply_count 0
+# HELP consul_session_renew Measures the time spent renewing a session.
+# TYPE consul_session_renew summary
+consul_session_renew{quantile="0.5"} NaN
+consul_session_renew{quantile="0.9"} NaN
+consul_session_renew{quantile="0.99"} NaN
+consul_session_renew_sum 0
+consul_session_renew_count 0
+# HELP consul_session_ttl_active Tracks the active number of sessions being tracked.
+# TYPE consul_session_ttl_active gauge
+consul_session_ttl_active 0
+# HELP consul_session_ttl_invalidate Measures the time spent invalidating an expired session.
+# TYPE consul_session_ttl_invalidate summary
+consul_session_ttl_invalidate{quantile="0.5"} NaN
+consul_session_ttl_invalidate{quantile="0.9"} NaN
+consul_session_ttl_invalidate{quantile="0.99"} NaN
+consul_session_ttl_invalidate_sum 0
+consul_session_ttl_invalidate_count 0
+# HELP consul_txn_apply Measures the time spent applying a transaction operation.
+# TYPE consul_txn_apply summary
+consul_txn_apply{quantile="0.5"} NaN
+consul_txn_apply{quantile="0.9"} NaN
+consul_txn_apply{quantile="0.99"} NaN
+consul_txn_apply_sum 0
+consul_txn_apply_count 0
+# HELP consul_txn_read Measures the time spent returning a read transaction.
+# TYPE consul_txn_read summary
+consul_txn_read{quantile="0.5"} NaN
+consul_txn_read{quantile="0.9"} NaN
+consul_txn_read{quantile="0.99"} NaN
+consul_txn_read_sum 0
+consul_txn_read_count 0
+# HELP consul_version Represents the Consul version.
+# TYPE consul_version gauge
+consul_version 0
+consul_version{pre_release="",version="1.13.2"} 1
+# HELP consul_xds_server_streams Measures the number of active xDS streams handled by the server split by protocol version.
+# TYPE consul_xds_server_streams gauge
+consul_xds_server_streams 0
+# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 1.9158e-05
+go_gc_duration_seconds{quantile="0.25"} 0.000109081
+go_gc_duration_seconds{quantile="0.5"} 0.000251188
+go_gc_duration_seconds{quantile="0.75"} 0.000417427
+go_gc_duration_seconds{quantile="1"} 0.000564015
+go_gc_duration_seconds_sum 0.004182423
+go_gc_duration_seconds_count 17
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 58
+# HELP go_info Information about the Go environment.
+# TYPE go_info gauge
+go_info{version="go1.18.1"} 1
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 2.6578488e+07
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 2.1175476e+08
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.493307e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 675169
+# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
+# TYPE go_memstats_gc_cpu_fraction gauge
+go_memstats_gc_cpu_fraction 3.182534545511277e-05
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 6.043992e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 2.6578488e+07
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 1.2009472e+07
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 2.8884992e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 64658
+# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes gauge
+go_memstats_heap_released_bytes 1.056768e+06
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 4.0894464e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.671442476091947e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 739827
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 9600
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 15600
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 265880
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 310080
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 3.547528e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 1.395013e+06
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 1.048576e+06
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 1.048576e+06
+# HELP go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 5.1201032e+07
+# HELP go_threads Number of OS threads created.
+# TYPE go_threads gauge
+go_threads 13
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 3.12
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 1024
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 18
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 9.9598336e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.67144207026e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 8.133632e+08
+# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
+# TYPE process_virtual_memory_max_bytes gauge
+process_virtual_memory_max_bytes -1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/client_v1-agent-self.json b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/client_v1-agent-self.json
new file mode 100644
index 000000000..e5f75dc24
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/client_v1-agent-self.json
@@ -0,0 +1,50 @@
+{
+ "Config": {
+ "Datacenter": "us-central",
+ "PrimaryDatacenter": "us-central",
+ "NodeName": "satya-vm",
+ "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d",
+ "Revision": "0e046bbb",
+ "Server": false,
+ "Version": "1.13.2",
+ "BuildDate": "2022-09-20T20:30:07Z"
+ },
+ "DebugConfig": {
+ "Telemetry": {
+ "AllowedPrefixes": [],
+ "BlockedPrefixes": [
+ "consul.rpc.server.call"
+ ],
+ "CirconusAPIApp": "",
+ "CirconusAPIToken": "hidden",
+ "CirconusAPIURL": "",
+ "CirconusBrokerID": "",
+ "CirconusBrokerSelectTag": "",
+ "CirconusCheckDisplayName": "",
+ "CirconusCheckForceMetricActivation": "",
+ "CirconusCheckID": "",
+ "CirconusCheckInstanceID": "",
+ "CirconusCheckSearchTag": "",
+ "CirconusCheckTags": "",
+ "CirconusSubmissionInterval": "",
+ "CirconusSubmissionURL": "",
+ "Disable": false,
+ "DisableHostname": true,
+ "DogstatsdAddr": "",
+ "DogstatsdTags": [],
+ "FilterDefault": true,
+ "MetricsPrefix": "consul",
+ "PrometheusOpts": {
+ "CounterDefinitions": [],
+ "Expiration": "10m0s",
+ "GaugeDefinitions": [],
+ "Name": "consul",
+ "Registerer": null,
+ "SummaryDefinitions": []
+ },
+ "RetryFailedConfiguration": true,
+ "StatsdAddr": "",
+ "StatsiteAddr": ""
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-metrics.txt b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-metrics.txt
new file mode 100644
index 000000000..63dbaddfc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-metrics.txt
@@ -0,0 +1,1255 @@
+# HELP consul_acl_ResolveToken This measures the time it takes to resolve an ACL token.
+# TYPE consul_acl_ResolveToken summary
+consul_acl_ResolveToken{quantile="0.5"} NaN
+consul_acl_ResolveToken{quantile="0.9"} NaN
+consul_acl_ResolveToken{quantile="0.99"} NaN
+consul_acl_ResolveToken_sum 0
+consul_acl_ResolveToken_count 0
+# HELP consul_acl_authmethod_delete
+# TYPE consul_acl_authmethod_delete summary
+consul_acl_authmethod_delete{quantile="0.5"} NaN
+consul_acl_authmethod_delete{quantile="0.9"} NaN
+consul_acl_authmethod_delete{quantile="0.99"} NaN
+consul_acl_authmethod_delete_sum 0
+consul_acl_authmethod_delete_count 0
+# HELP consul_acl_authmethod_upsert
+# TYPE consul_acl_authmethod_upsert summary
+consul_acl_authmethod_upsert{quantile="0.5"} NaN
+consul_acl_authmethod_upsert{quantile="0.9"} NaN
+consul_acl_authmethod_upsert{quantile="0.99"} NaN
+consul_acl_authmethod_upsert_sum 0
+consul_acl_authmethod_upsert_count 0
+# HELP consul_acl_bindingrule_delete
+# TYPE consul_acl_bindingrule_delete summary
+consul_acl_bindingrule_delete{quantile="0.5"} NaN
+consul_acl_bindingrule_delete{quantile="0.9"} NaN
+consul_acl_bindingrule_delete{quantile="0.99"} NaN
+consul_acl_bindingrule_delete_sum 0
+consul_acl_bindingrule_delete_count 0
+# HELP consul_acl_bindingrule_upsert
+# TYPE consul_acl_bindingrule_upsert summary
+consul_acl_bindingrule_upsert{quantile="0.5"} NaN
+consul_acl_bindingrule_upsert{quantile="0.9"} NaN
+consul_acl_bindingrule_upsert{quantile="0.99"} NaN
+consul_acl_bindingrule_upsert_sum 0
+consul_acl_bindingrule_upsert_count 0
+# HELP consul_acl_blocked_check_deregistration Increments whenever a deregistration fails for a check (blocked by an ACL)
+# TYPE consul_acl_blocked_check_deregistration counter
+consul_acl_blocked_check_deregistration 0
+# HELP consul_acl_blocked_check_registration Increments whenever a registration fails for a check (blocked by an ACL)
+# TYPE consul_acl_blocked_check_registration counter
+consul_acl_blocked_check_registration 0
+# HELP consul_acl_blocked_node_registration Increments whenever a registration fails for a node (blocked by an ACL)
+# TYPE consul_acl_blocked_node_registration counter
+consul_acl_blocked_node_registration 0
+# HELP consul_acl_blocked_service_deregistration Increments whenever a deregistration fails for a service (blocked by an ACL)
+# TYPE consul_acl_blocked_service_deregistration counter
+consul_acl_blocked_service_deregistration 0
+# HELP consul_acl_blocked_service_registration Increments whenever a registration fails for a service (blocked by an ACL)
+# TYPE consul_acl_blocked_service_registration counter
+consul_acl_blocked_service_registration 0
+# HELP consul_acl_login
+# TYPE consul_acl_login summary
+consul_acl_login{quantile="0.5"} NaN
+consul_acl_login{quantile="0.9"} NaN
+consul_acl_login{quantile="0.99"} NaN
+consul_acl_login_sum 0
+consul_acl_login_count 0
+# HELP consul_acl_logout
+# TYPE consul_acl_logout summary
+consul_acl_logout{quantile="0.5"} NaN
+consul_acl_logout{quantile="0.9"} NaN
+consul_acl_logout{quantile="0.99"} NaN
+consul_acl_logout_sum 0
+consul_acl_logout_count 0
+# HELP consul_acl_policy_delete
+# TYPE consul_acl_policy_delete summary
+consul_acl_policy_delete{quantile="0.5"} NaN
+consul_acl_policy_delete{quantile="0.9"} NaN
+consul_acl_policy_delete{quantile="0.99"} NaN
+consul_acl_policy_delete_sum 0
+consul_acl_policy_delete_count 0
+# HELP consul_acl_policy_upsert
+# TYPE consul_acl_policy_upsert summary
+consul_acl_policy_upsert{quantile="0.5"} NaN
+consul_acl_policy_upsert{quantile="0.9"} NaN
+consul_acl_policy_upsert{quantile="0.99"} NaN
+consul_acl_policy_upsert_sum 0
+consul_acl_policy_upsert_count 0
+# HELP consul_acl_role_delete
+# TYPE consul_acl_role_delete summary
+consul_acl_role_delete{quantile="0.5"} NaN
+consul_acl_role_delete{quantile="0.9"} NaN
+consul_acl_role_delete{quantile="0.99"} NaN
+consul_acl_role_delete_sum 0
+consul_acl_role_delete_count 0
+# HELP consul_acl_role_upsert
+# TYPE consul_acl_role_upsert summary
+consul_acl_role_upsert{quantile="0.5"} NaN
+consul_acl_role_upsert{quantile="0.9"} NaN
+consul_acl_role_upsert{quantile="0.99"} NaN
+consul_acl_role_upsert_sum 0
+consul_acl_role_upsert_count 0
+# HELP consul_acl_token_cache_hit Increments if Consul is able to resolve a token's identity, or a legacy token, from the cache.
+# TYPE consul_acl_token_cache_hit counter
+consul_acl_token_cache_hit 0
+# HELP consul_acl_token_cache_miss Increments if Consul cannot resolve a token's identity, or a legacy token, from the cache.
+# TYPE consul_acl_token_cache_miss counter
+consul_acl_token_cache_miss 0
+# HELP consul_acl_token_clone
+# TYPE consul_acl_token_clone summary
+consul_acl_token_clone{quantile="0.5"} NaN
+consul_acl_token_clone{quantile="0.9"} NaN
+consul_acl_token_clone{quantile="0.99"} NaN
+consul_acl_token_clone_sum 0
+consul_acl_token_clone_count 0
+# HELP consul_acl_token_delete
+# TYPE consul_acl_token_delete summary
+consul_acl_token_delete{quantile="0.5"} NaN
+consul_acl_token_delete{quantile="0.9"} NaN
+consul_acl_token_delete{quantile="0.99"} NaN
+consul_acl_token_delete_sum 0
+consul_acl_token_delete_count 0
+# HELP consul_acl_token_upsert
+# TYPE consul_acl_token_upsert summary
+consul_acl_token_upsert{quantile="0.5"} NaN
+consul_acl_token_upsert{quantile="0.9"} NaN
+consul_acl_token_upsert{quantile="0.99"} NaN
+consul_acl_token_upsert_sum 0
+consul_acl_token_upsert_count 0
+# HELP consul_agent_tls_cert_expiry Seconds until the agent tls certificate expires. Updated every hour
+# TYPE consul_agent_tls_cert_expiry gauge
+consul_agent_tls_cert_expiry 0
+# HELP consul_api_http Samples how long it takes to service the given HTTP request for the given verb and path.
+# TYPE consul_api_http summary
+consul_api_http{quantile="0.5"} NaN
+consul_api_http{quantile="0.9"} NaN
+consul_api_http{quantile="0.99"} NaN
+consul_api_http_sum 0
+consul_api_http_count 0
+consul_api_http{method="GET",path="v1_agent_checks",quantile="0.5"} 0.11646900326013565
+consul_api_http{method="GET",path="v1_agent_checks",quantile="0.9"} 0.3685469925403595
+consul_api_http{method="GET",path="v1_agent_checks",quantile="0.99"} 1.142822027206421
+consul_api_http_sum{method="GET",path="v1_agent_checks"} 24054.416150089353
+consul_api_http_count{method="GET",path="v1_agent_checks"} 99423
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.5"} 0.8454239964485168
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.9"} 4.116001129150391
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.99"} 6.345314025878906
+consul_api_http_sum{method="GET",path="v1_agent_metrics"} 169447.17186257243
+consul_api_http_count{method="GET",path="v1_agent_metrics"} 118670
+# HELP consul_autopilot_failure_tolerance Tracks the number of voting servers that the cluster can lose while continuing to function.
+# TYPE consul_autopilot_failure_tolerance gauge
+consul_autopilot_failure_tolerance 1
+# HELP consul_autopilot_healthy Tracks the overall health of the local server cluster. 1 if all servers are healthy, 0 if one or more are unhealthy.
+# TYPE consul_autopilot_healthy gauge
+consul_autopilot_healthy 1
+# HELP consul_catalog_connect_not_found Increments for each connect-based catalog query where the given service could not be found.
+# TYPE consul_catalog_connect_not_found counter
+consul_catalog_connect_not_found 0
+# HELP consul_catalog_connect_query Increments for each connect-based catalog query for the given service.
+# TYPE consul_catalog_connect_query counter
+consul_catalog_connect_query 0
+# HELP consul_catalog_connect_query_tag Increments for each connect-based catalog query for the given service with the given tag.
+# TYPE consul_catalog_connect_query_tag counter
+consul_catalog_connect_query_tag 0
+# HELP consul_catalog_connect_query_tags Increments for each connect-based catalog query for the given service with the given tags.
+# TYPE consul_catalog_connect_query_tags counter
+consul_catalog_connect_query_tags 0
+# HELP consul_catalog_deregister Measures the time it takes to complete a catalog deregister operation.
+# TYPE consul_catalog_deregister summary
+consul_catalog_deregister{quantile="0.5"} NaN
+consul_catalog_deregister{quantile="0.9"} NaN
+consul_catalog_deregister{quantile="0.99"} NaN
+consul_catalog_deregister_sum 0
+consul_catalog_deregister_count 0
+# HELP consul_catalog_register Measures the time it takes to complete a catalog register operation.
+# TYPE consul_catalog_register summary
+consul_catalog_register{quantile="0.5"} NaN
+consul_catalog_register{quantile="0.9"} NaN
+consul_catalog_register{quantile="0.99"} NaN
+consul_catalog_register_sum 15302.798070907593
+consul_catalog_register_count 193
+# HELP consul_catalog_service_not_found Increments for each catalog query where the given service could not be found.
+# TYPE consul_catalog_service_not_found counter
+consul_catalog_service_not_found 0
+# HELP consul_catalog_service_query Increments for each catalog query for the given service.
+# TYPE consul_catalog_service_query counter
+consul_catalog_service_query 0
+# HELP consul_catalog_service_query_tag Increments for each catalog query for the given service with the given tag.
+# TYPE consul_catalog_service_query_tag counter
+consul_catalog_service_query_tag 0
+# HELP consul_catalog_service_query_tags Increments for each catalog query for the given service with the given tags.
+# TYPE consul_catalog_service_query_tags counter
+consul_catalog_service_query_tags 0
+# HELP consul_client_api_catalog_datacenters Increments whenever a Consul agent receives a request to list datacenters in the catalog.
+# TYPE consul_client_api_catalog_datacenters counter
+consul_client_api_catalog_datacenters 0
+# HELP consul_client_api_catalog_deregister Increments whenever a Consul agent receives a catalog deregister request.
+# TYPE consul_client_api_catalog_deregister counter
+consul_client_api_catalog_deregister 0
+# HELP consul_client_api_catalog_gateway_services Increments whenever a Consul agent receives a request to list services associated with a gateway.
+# TYPE consul_client_api_catalog_gateway_services counter
+consul_client_api_catalog_gateway_services 0
+# HELP consul_client_api_catalog_node_service_list Increments whenever a Consul agent receives a request to list a node's registered services.
+# TYPE consul_client_api_catalog_node_service_list counter
+consul_client_api_catalog_node_service_list 0
+# HELP consul_client_api_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.
+# TYPE consul_client_api_catalog_node_services counter
+consul_client_api_catalog_node_services 0
+# HELP consul_client_api_catalog_nodes Increments whenever a Consul agent receives a request to list nodes from the catalog.
+# TYPE consul_client_api_catalog_nodes counter
+consul_client_api_catalog_nodes 0
+# HELP consul_client_api_catalog_register Increments whenever a Consul agent receives a catalog register request.
+# TYPE consul_client_api_catalog_register counter
+consul_client_api_catalog_register 0
+# HELP consul_client_api_catalog_service_nodes Increments whenever a Consul agent receives a request to list nodes offering a service.
+# TYPE consul_client_api_catalog_service_nodes counter
+consul_client_api_catalog_service_nodes 0
+# HELP consul_client_api_catalog_services Increments whenever a Consul agent receives a request to list services from the catalog.
+# TYPE consul_client_api_catalog_services counter
+consul_client_api_catalog_services 0
+# HELP consul_client_api_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service.
+# TYPE consul_client_api_error_catalog_service_nodes counter
+consul_client_api_error_catalog_service_nodes 0
+# HELP consul_client_api_success_catalog_datacenters Increments whenever a Consul agent successfully responds to a request to list datacenters.
+# TYPE consul_client_api_success_catalog_datacenters counter
+consul_client_api_success_catalog_datacenters 0
+# HELP consul_client_api_success_catalog_deregister Increments whenever a Consul agent successfully responds to a catalog deregister request.
+# TYPE consul_client_api_success_catalog_deregister counter
+consul_client_api_success_catalog_deregister 0
+# HELP consul_client_api_success_catalog_gateway_services Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway.
+# TYPE consul_client_api_success_catalog_gateway_services counter
+consul_client_api_success_catalog_gateway_services 0
+# HELP consul_client_api_success_catalog_node_service_list Increments whenever a Consul agent successfully responds to a request to list a node's registered services.
+# TYPE consul_client_api_success_catalog_node_service_list counter
+consul_client_api_success_catalog_node_service_list 0
+# HELP consul_client_api_success_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list services in a node.
+# TYPE consul_client_api_success_catalog_node_services counter
+consul_client_api_success_catalog_node_services 0
+# HELP consul_client_api_success_catalog_nodes Increments whenever a Consul agent successfully responds to a request to list nodes.
+# TYPE consul_client_api_success_catalog_nodes counter
+consul_client_api_success_catalog_nodes 0
+# HELP consul_client_api_success_catalog_register Increments whenever a Consul agent successfully responds to a catalog register request.
+# TYPE consul_client_api_success_catalog_register counter
+consul_client_api_success_catalog_register 0
+# HELP consul_client_api_success_catalog_service_nodes Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.
+# TYPE consul_client_api_success_catalog_service_nodes counter
+consul_client_api_success_catalog_service_nodes 0
+# HELP consul_client_api_success_catalog_services Increments whenever a Consul agent successfully responds to a request to list services.
+# TYPE consul_client_api_success_catalog_services counter
+consul_client_api_success_catalog_services 0
+# HELP consul_client_rpc Increments whenever a Consul agent in client mode makes an RPC request to a Consul server.
+# TYPE consul_client_rpc counter
+consul_client_rpc 6838
+# HELP consul_client_rpc_error_catalog_datacenters Increments whenever a Consul agent receives an RPC error for a request to list datacenters.
+# TYPE consul_client_rpc_error_catalog_datacenters counter
+consul_client_rpc_error_catalog_datacenters 0
+# HELP consul_client_rpc_error_catalog_deregister Increments whenever a Consul agent receives an RPC error for a catalog deregister request.
+# TYPE consul_client_rpc_error_catalog_deregister counter
+consul_client_rpc_error_catalog_deregister 0
+# HELP consul_client_rpc_error_catalog_gateway_services Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway.
+# TYPE consul_client_rpc_error_catalog_gateway_services counter
+consul_client_rpc_error_catalog_gateway_services 0
+# HELP consul_client_rpc_error_catalog_node_service_list Increments whenever a Consul agent receives an RPC error for request to list a node's registered services.
+# TYPE consul_client_rpc_error_catalog_node_service_list counter
+consul_client_rpc_error_catalog_node_service_list 0
+# HELP consul_client_rpc_error_catalog_node_services Increments whenever a Consul agent receives an RPC error for a request to list services in a node.
+# TYPE consul_client_rpc_error_catalog_node_services counter
+consul_client_rpc_error_catalog_node_services 0
+# HELP consul_client_rpc_error_catalog_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes.
+# TYPE consul_client_rpc_error_catalog_nodes counter
+consul_client_rpc_error_catalog_nodes 0
+# HELP consul_client_rpc_error_catalog_register Increments whenever a Consul agent receives an RPC error for a catalog register request.
+# TYPE consul_client_rpc_error_catalog_register counter
+consul_client_rpc_error_catalog_register 0
+# HELP consul_client_rpc_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.
+# TYPE consul_client_rpc_error_catalog_service_nodes counter
+consul_client_rpc_error_catalog_service_nodes 0
+# HELP consul_client_rpc_error_catalog_services Increments whenever a Consul agent receives an RPC error for a request to list services.
+# TYPE consul_client_rpc_error_catalog_services counter
+consul_client_rpc_error_catalog_services 0
+# HELP consul_client_rpc_exceeded Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration.
+# TYPE consul_client_rpc_exceeded counter
+consul_client_rpc_exceeded 0
+# HELP consul_client_rpc_failed Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails.
+# TYPE consul_client_rpc_failed counter
+consul_client_rpc_failed 0
+# HELP consul_consul_cache_bypass Counts how many times a request bypassed the cache because no cache-key was provided.
+# TYPE consul_consul_cache_bypass counter
+consul_consul_cache_bypass 0
+# HELP consul_consul_cache_entries_count Represents the number of entries in this cache.
+# TYPE consul_consul_cache_entries_count gauge
+consul_consul_cache_entries_count 0
+# HELP consul_consul_cache_evict_expired Counts the number of expired entries that are evicted.
+# TYPE consul_consul_cache_evict_expired counter
+consul_consul_cache_evict_expired 0
+# HELP consul_consul_cache_fetch_error Counts the number of failed fetches by the cache.
+# TYPE consul_consul_cache_fetch_error counter
+consul_consul_cache_fetch_error 0
+# HELP consul_consul_cache_fetch_success Counts the number of successful fetches by the cache.
+# TYPE consul_consul_cache_fetch_success counter
+consul_consul_cache_fetch_success 0
+# HELP consul_consul_fsm_ca Deprecated - use fsm_ca instead
+# TYPE consul_consul_fsm_ca summary
+consul_consul_fsm_ca{quantile="0.5"} NaN
+consul_consul_fsm_ca{quantile="0.9"} NaN
+consul_consul_fsm_ca{quantile="0.99"} NaN
+consul_consul_fsm_ca_sum 0
+consul_consul_fsm_ca_count 0
+# HELP consul_consul_fsm_intention Deprecated - use fsm_intention instead
+# TYPE consul_consul_fsm_intention summary
+consul_consul_fsm_intention{quantile="0.5"} NaN
+consul_consul_fsm_intention{quantile="0.9"} NaN
+consul_consul_fsm_intention{quantile="0.99"} NaN
+consul_consul_fsm_intention_sum 0
+consul_consul_fsm_intention_count 0
+# HELP consul_consul_intention_apply
+# TYPE consul_consul_intention_apply summary
+consul_consul_intention_apply{quantile="0.5"} NaN
+consul_consul_intention_apply{quantile="0.9"} NaN
+consul_consul_intention_apply{quantile="0.99"} NaN
+consul_consul_intention_apply_sum 0
+consul_consul_intention_apply_count 0
+# HELP consul_consul_members_clients Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.
+# TYPE consul_consul_members_clients gauge
+consul_consul_members_clients 0
+consul_consul_members_clients{datacenter="us-central"} 0
+# HELP consul_consul_members_servers Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.
+# TYPE consul_consul_members_servers gauge
+consul_consul_members_servers 0
+consul_consul_members_servers{datacenter="us-central"} 3
+# HELP consul_consul_peering_exported_services A gauge that tracks how many services are exported for the peering. The labels are "peering" and, for enterprise, "partition". We emit this metric every 9 seconds
+# TYPE consul_consul_peering_exported_services gauge
+consul_consul_peering_exported_services 0
+# HELP consul_consul_state_config_entries Measures the current number of unique configuration entries registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4.
+# TYPE consul_consul_state_config_entries gauge
+consul_consul_state_config_entries 0
+consul_consul_state_config_entries{datacenter="us-central",kind="exported-services"} 0
+consul_consul_state_config_entries{datacenter="us-central",kind="ingress-gateway"} 0
+consul_consul_state_config_entries{datacenter="us-central",kind="mesh"} 0
+consul_consul_state_config_entries{datacenter="us-central",kind="proxy-defaults"} 0
+consul_consul_state_config_entries{datacenter="us-central",kind="service-defaults"} 0
+consul_consul_state_config_entries{datacenter="us-central",kind="service-intentions"} 0
+consul_consul_state_config_entries{datacenter="us-central",kind="service-resolver"} 0
+consul_consul_state_config_entries{datacenter="us-central",kind="service-router"} 0
+consul_consul_state_config_entries{datacenter="us-central",kind="service-splitter"} 0
+consul_consul_state_config_entries{datacenter="us-central",kind="terminating-gateway"} 0
+# HELP consul_consul_state_connect_instances Measures the current number of unique connect service instances registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4.
+# TYPE consul_consul_state_connect_instances gauge
+consul_consul_state_connect_instances 0
+consul_consul_state_connect_instances{datacenter="us-central",kind="connect-native"} 0
+consul_consul_state_connect_instances{datacenter="us-central",kind="connect-proxy"} 0
+consul_consul_state_connect_instances{datacenter="us-central",kind="ingress-gateway"} 0
+consul_consul_state_connect_instances{datacenter="us-central",kind="mesh-gateway"} 0
+consul_consul_state_connect_instances{datacenter="us-central",kind="terminating-gateway"} 0
+# HELP consul_consul_state_kv_entries Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.
+# TYPE consul_consul_state_kv_entries gauge
+consul_consul_state_kv_entries 0
+consul_consul_state_kv_entries{datacenter="us-central"} 1
+# HELP consul_consul_state_nodes Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_consul_state_nodes gauge
+consul_consul_state_nodes 0
+consul_consul_state_nodes{datacenter="us-central"} 3
+# HELP consul_consul_state_peerings Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0.
+# TYPE consul_consul_state_peerings gauge
+consul_consul_state_peerings 0
+consul_consul_state_peerings{datacenter="us-central"} 0
+# HELP consul_consul_state_service_instances Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_consul_state_service_instances gauge
+consul_consul_state_service_instances 0
+consul_consul_state_service_instances{datacenter="us-central"} 4
+# HELP consul_consul_state_services Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_consul_state_services gauge
+consul_consul_state_services 0
+consul_consul_state_services{datacenter="us-central"} 2
+# HELP consul_federation_state_apply
+# TYPE consul_federation_state_apply summary
+consul_federation_state_apply{quantile="0.5"} NaN
+consul_federation_state_apply{quantile="0.9"} NaN
+consul_federation_state_apply{quantile="0.99"} NaN
+consul_federation_state_apply_sum 0
+consul_federation_state_apply_count 0
+# HELP consul_federation_state_get
+# TYPE consul_federation_state_get summary
+consul_federation_state_get{quantile="0.5"} NaN
+consul_federation_state_get{quantile="0.9"} NaN
+consul_federation_state_get{quantile="0.99"} NaN
+consul_federation_state_get_sum 0
+consul_federation_state_get_count 0
+# HELP consul_federation_state_list
+# TYPE consul_federation_state_list summary
+consul_federation_state_list{quantile="0.5"} NaN
+consul_federation_state_list{quantile="0.9"} NaN
+consul_federation_state_list{quantile="0.99"} NaN
+consul_federation_state_list_sum 0
+consul_federation_state_list_count 0
+# HELP consul_federation_state_list_mesh_gateways
+# TYPE consul_federation_state_list_mesh_gateways summary
+consul_federation_state_list_mesh_gateways{quantile="0.5"} NaN
+consul_federation_state_list_mesh_gateways{quantile="0.9"} NaN
+consul_federation_state_list_mesh_gateways{quantile="0.99"} NaN
+consul_federation_state_list_mesh_gateways_sum 0
+consul_federation_state_list_mesh_gateways_count 0
+# HELP consul_fsm_acl Measures the time it takes to apply the given ACL operation to the FSM.
+# TYPE consul_fsm_acl summary
+consul_fsm_acl{quantile="0.5"} NaN
+consul_fsm_acl{quantile="0.9"} NaN
+consul_fsm_acl{quantile="0.99"} NaN
+consul_fsm_acl_sum 0
+consul_fsm_acl_count 0
+# HELP consul_fsm_acl_authmethod Measures the time it takes to apply an ACL authmethod operation to the FSM.
+# TYPE consul_fsm_acl_authmethod summary
+consul_fsm_acl_authmethod{quantile="0.5"} NaN
+consul_fsm_acl_authmethod{quantile="0.9"} NaN
+consul_fsm_acl_authmethod{quantile="0.99"} NaN
+consul_fsm_acl_authmethod_sum 0
+consul_fsm_acl_authmethod_count 0
+# HELP consul_fsm_acl_bindingrule Measures the time it takes to apply an ACL binding rule operation to the FSM.
+# TYPE consul_fsm_acl_bindingrule summary
+consul_fsm_acl_bindingrule{quantile="0.5"} NaN
+consul_fsm_acl_bindingrule{quantile="0.9"} NaN
+consul_fsm_acl_bindingrule{quantile="0.99"} NaN
+consul_fsm_acl_bindingrule_sum 0
+consul_fsm_acl_bindingrule_count 0
+# HELP consul_fsm_acl_policy Measures the time it takes to apply an ACL policy operation to the FSM.
+# TYPE consul_fsm_acl_policy summary
+consul_fsm_acl_policy{quantile="0.5"} NaN
+consul_fsm_acl_policy{quantile="0.9"} NaN
+consul_fsm_acl_policy{quantile="0.99"} NaN
+consul_fsm_acl_policy_sum 0
+consul_fsm_acl_policy_count 0
+# HELP consul_fsm_acl_token Measures the time it takes to apply an ACL token operation to the FSM.
+# TYPE consul_fsm_acl_token summary
+consul_fsm_acl_token{quantile="0.5"} NaN
+consul_fsm_acl_token{quantile="0.9"} NaN
+consul_fsm_acl_token{quantile="0.99"} NaN
+consul_fsm_acl_token_sum 0
+consul_fsm_acl_token_count 0
+# HELP consul_fsm_autopilot Measures the time it takes to apply the given autopilot update to the FSM.
+# TYPE consul_fsm_autopilot summary
+consul_fsm_autopilot{quantile="0.5"} NaN
+consul_fsm_autopilot{quantile="0.9"} NaN
+consul_fsm_autopilot{quantile="0.99"} NaN
+consul_fsm_autopilot_sum 0
+consul_fsm_autopilot_count 0
+# HELP consul_fsm_ca Measures the time it takes to apply CA configuration operations to the FSM.
+# TYPE consul_fsm_ca summary
+consul_fsm_ca{quantile="0.5"} NaN
+consul_fsm_ca{quantile="0.9"} NaN
+consul_fsm_ca{quantile="0.99"} NaN
+consul_fsm_ca_sum 0
+consul_fsm_ca_count 0
+# HELP consul_fsm_ca_leaf Measures the time it takes to apply an operation while signing a leaf certificate.
+# TYPE consul_fsm_ca_leaf summary
+consul_fsm_ca_leaf{quantile="0.5"} NaN
+consul_fsm_ca_leaf{quantile="0.9"} NaN
+consul_fsm_ca_leaf{quantile="0.99"} NaN
+consul_fsm_ca_leaf_sum 0
+consul_fsm_ca_leaf_count 0
+# HELP consul_fsm_coordinate_batch_update Measures the time it takes to apply the given batch coordinate update to the FSM.
+# TYPE consul_fsm_coordinate_batch_update summary
+consul_fsm_coordinate_batch_update{quantile="0.5"} 0.846472978591919
+consul_fsm_coordinate_batch_update{quantile="0.9"} 0.846472978591919
+consul_fsm_coordinate_batch_update{quantile="0.99"} 0.846472978591919
+consul_fsm_coordinate_batch_update_sum 1319.3496078031603
+consul_fsm_coordinate_batch_update_count 22753
+# HELP consul_fsm_deregister Measures the time it takes to apply a catalog deregister operation to the FSM.
+# TYPE consul_fsm_deregister summary
+consul_fsm_deregister{quantile="0.5"} NaN
+consul_fsm_deregister{quantile="0.9"} NaN
+consul_fsm_deregister{quantile="0.99"} NaN
+consul_fsm_deregister_sum 7.263695985078812
+consul_fsm_deregister_count 25
+# HELP consul_fsm_intention Measures the time it takes to apply an intention operation to the FSM.
+# TYPE consul_fsm_intention summary
+consul_fsm_intention{quantile="0.5"} NaN
+consul_fsm_intention{quantile="0.9"} NaN
+consul_fsm_intention{quantile="0.99"} NaN
+consul_fsm_intention_sum 0
+consul_fsm_intention_count 0
+# HELP consul_fsm_kvs Measures the time it takes to apply the given KV operation to the FSM.
+# TYPE consul_fsm_kvs summary
+consul_fsm_kvs{quantile="0.5"} NaN
+consul_fsm_kvs{quantile="0.9"} NaN
+consul_fsm_kvs{quantile="0.99"} NaN
+consul_fsm_kvs_sum 0
+consul_fsm_kvs_count 0
+# HELP consul_fsm_peering Measures the time it takes to apply a peering operation to the FSM.
+# TYPE consul_fsm_peering summary
+consul_fsm_peering{quantile="0.5"} NaN
+consul_fsm_peering{quantile="0.9"} NaN
+consul_fsm_peering{quantile="0.99"} NaN
+consul_fsm_peering_sum 0
+consul_fsm_peering_count 0
+# HELP consul_fsm_persist Measures the time it takes to persist the FSM to a raft snapshot.
+# TYPE consul_fsm_persist summary
+consul_fsm_persist{quantile="0.5"} NaN
+consul_fsm_persist{quantile="0.9"} NaN
+consul_fsm_persist{quantile="0.99"} NaN
+consul_fsm_persist_sum 0.7345139980316162
+consul_fsm_persist_count 1
+# HELP consul_fsm_prepared_query Measures the time it takes to apply the given prepared query update operation to the FSM.
+# TYPE consul_fsm_prepared_query summary
+consul_fsm_prepared_query{quantile="0.5"} NaN
+consul_fsm_prepared_query{quantile="0.9"} NaN
+consul_fsm_prepared_query{quantile="0.99"} NaN
+consul_fsm_prepared_query_sum 0
+consul_fsm_prepared_query_count 0
+# HELP consul_fsm_register Measures the time it takes to apply a catalog register operation to the FSM.
+# TYPE consul_fsm_register summary
+consul_fsm_register{quantile="0.5"} NaN
+consul_fsm_register{quantile="0.9"} NaN
+consul_fsm_register{quantile="0.99"} NaN
+consul_fsm_register_sum 77.52807594463229
+consul_fsm_register_count 475
+# HELP consul_fsm_session Measures the time it takes to apply the given session operation to the FSM.
+# TYPE consul_fsm_session summary
+consul_fsm_session{quantile="0.5"} NaN
+consul_fsm_session{quantile="0.9"} NaN
+consul_fsm_session{quantile="0.99"} NaN
+consul_fsm_session_sum 0
+consul_fsm_session_count 0
+# HELP consul_fsm_system_metadata Measures the time it takes to apply a system metadata operation to the FSM.
+# TYPE consul_fsm_system_metadata summary
+consul_fsm_system_metadata{quantile="0.5"} NaN
+consul_fsm_system_metadata{quantile="0.9"} NaN
+consul_fsm_system_metadata{quantile="0.99"} NaN
+consul_fsm_system_metadata_sum 0
+consul_fsm_system_metadata_count 0
+# HELP consul_fsm_tombstone Measures the time it takes to apply the given tombstone operation to the FSM.
+# TYPE consul_fsm_tombstone summary
+consul_fsm_tombstone{quantile="0.5"} NaN
+consul_fsm_tombstone{quantile="0.9"} NaN
+consul_fsm_tombstone{quantile="0.99"} NaN
+consul_fsm_tombstone_sum 0
+consul_fsm_tombstone_count 0
+# HELP consul_fsm_txn Measures the time it takes to apply the given transaction update to the FSM.
+# TYPE consul_fsm_txn summary
+consul_fsm_txn{quantile="0.5"} NaN
+consul_fsm_txn{quantile="0.9"} NaN
+consul_fsm_txn{quantile="0.99"} NaN
+consul_fsm_txn_sum 0
+consul_fsm_txn_count 0
+# HELP consul_grpc_client_connection_count Counts the number of new gRPC connections opened by the client agent to a Consul server.
+# TYPE consul_grpc_client_connection_count counter
+consul_grpc_client_connection_count 875
+# HELP consul_grpc_client_connections Measures the number of active gRPC connections open from the client agent to any Consul servers.
+# TYPE consul_grpc_client_connections gauge
+consul_grpc_client_connections 1
+# HELP consul_grpc_client_request_count Counts the number of gRPC requests made by the client agent to a Consul server.
+# TYPE consul_grpc_client_request_count counter
+consul_grpc_client_request_count 0
+# HELP consul_grpc_server_connection_count Counts the number of new gRPC connections received by the server.
+# TYPE consul_grpc_server_connection_count counter
+consul_grpc_server_connection_count 853
+# HELP consul_grpc_server_connections Measures the number of active gRPC connections open on the server.
+# TYPE consul_grpc_server_connections gauge
+consul_grpc_server_connections 1
+# HELP consul_grpc_server_request_count Counts the number of gRPC requests received by the server.
+# TYPE consul_grpc_server_request_count counter
+consul_grpc_server_request_count 0
+# HELP consul_grpc_server_stream_count Counts the number of new gRPC streams received by the server.
+# TYPE consul_grpc_server_stream_count counter
+consul_grpc_server_stream_count 0
+# HELP consul_grpc_server_streams Measures the number of active gRPC streams handled by the server.
+# TYPE consul_grpc_server_streams gauge
+consul_grpc_server_streams 0
+# HELP consul_intention_apply
+# TYPE consul_intention_apply summary
+consul_intention_apply{quantile="0.5"} NaN
+consul_intention_apply{quantile="0.9"} NaN
+consul_intention_apply{quantile="0.99"} NaN
+consul_intention_apply_sum 0
+consul_intention_apply_count 0
+# HELP consul_kvs_apply Measures the time it takes to complete an update to the KV store.
+# TYPE consul_kvs_apply summary
+consul_kvs_apply{quantile="0.5"} NaN
+consul_kvs_apply{quantile="0.9"} NaN
+consul_kvs_apply{quantile="0.99"} NaN
+consul_kvs_apply_sum 0
+consul_kvs_apply_count 0
+# HELP consul_leader_barrier Measures the time spent waiting for the raft barrier upon gaining leadership.
+# TYPE consul_leader_barrier summary
+consul_leader_barrier{quantile="0.5"} NaN
+consul_leader_barrier{quantile="0.9"} NaN
+consul_leader_barrier{quantile="0.99"} NaN
+consul_leader_barrier_sum 115364.21848773956
+consul_leader_barrier_count 1657
+# HELP consul_leader_reapTombstones Measures the time spent clearing tombstones.
+# TYPE consul_leader_reapTombstones summary
+consul_leader_reapTombstones{quantile="0.5"} NaN
+consul_leader_reapTombstones{quantile="0.9"} NaN
+consul_leader_reapTombstones{quantile="0.99"} NaN
+consul_leader_reapTombstones_sum 26.21475601196289
+consul_leader_reapTombstones_count 1
+# HELP consul_leader_reconcile Measures the time spent updating the raft store from the serf member information.
+# TYPE consul_leader_reconcile summary
+consul_leader_reconcile{quantile="0.5"} NaN
+consul_leader_reconcile{quantile="0.9"} NaN
+consul_leader_reconcile{quantile="0.99"} NaN
+consul_leader_reconcile_sum 543.0488127619028
+consul_leader_reconcile_count 1657
+# HELP consul_leader_reconcileMember Measures the time spent updating the raft store for a single serf member's information.
+# TYPE consul_leader_reconcileMember summary
+consul_leader_reconcileMember{quantile="0.5"} NaN
+consul_leader_reconcileMember{quantile="0.9"} NaN
+consul_leader_reconcileMember{quantile="0.99"} NaN
+consul_leader_reconcileMember_sum 511.33584634773433
+consul_leader_reconcileMember_count 4975
+# HELP consul_leader_replication_acl_policies_index Tracks the index of ACL policies in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_policies_index gauge
+consul_leader_replication_acl_policies_index 0
+# HELP consul_leader_replication_acl_policies_status Tracks the current health of ACL policy replication on the leader
+# TYPE consul_leader_replication_acl_policies_status gauge
+consul_leader_replication_acl_policies_status 0
+# HELP consul_leader_replication_acl_roles_index Tracks the index of ACL roles in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_roles_index gauge
+consul_leader_replication_acl_roles_index 0
+# HELP consul_leader_replication_acl_roles_status Tracks the current health of ACL role replication on the leader
+# TYPE consul_leader_replication_acl_roles_status gauge
+consul_leader_replication_acl_roles_status 0
+# HELP consul_leader_replication_acl_tokens_index Tracks the index of ACL tokens in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_tokens_index gauge
+consul_leader_replication_acl_tokens_index 0
+# HELP consul_leader_replication_acl_tokens_status Tracks the current health of ACL token replication on the leader
+# TYPE consul_leader_replication_acl_tokens_status gauge
+consul_leader_replication_acl_tokens_status 0
+# HELP consul_leader_replication_config_entries_index Tracks the index of config entries in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_config_entries_index gauge
+consul_leader_replication_config_entries_index 0
+# HELP consul_leader_replication_config_entries_status Tracks the current health of config entry replication on the leader
+# TYPE consul_leader_replication_config_entries_status gauge
+consul_leader_replication_config_entries_status 0
+# HELP consul_leader_replication_federation_state_index Tracks the index of federation states in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_federation_state_index gauge
+consul_leader_replication_federation_state_index 0
+# HELP consul_leader_replication_federation_state_status Tracks the current health of federation state replication on the leader
+# TYPE consul_leader_replication_federation_state_status gauge
+consul_leader_replication_federation_state_status 0
+# HELP consul_leader_replication_namespaces_index Tracks the index of federation states in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_namespaces_index gauge
+consul_leader_replication_namespaces_index 0
+# HELP consul_leader_replication_namespaces_status Tracks the current health of federation state replication on the leader
+# TYPE consul_leader_replication_namespaces_status gauge
+consul_leader_replication_namespaces_status 0
+# HELP consul_memberlist_gossip consul_memberlist_gossip
+# TYPE consul_memberlist_gossip summary
+consul_memberlist_gossip{network="lan",quantile="0.5"} 0.02304000034928322
+consul_memberlist_gossip{network="lan",quantile="0.9"} 0.03136000037193298
+consul_memberlist_gossip{network="lan",quantile="0.99"} 0.0756089985370636
+consul_memberlist_gossip_sum{network="lan"} 10881.414362509036
+consul_memberlist_gossip_count{network="lan"} 497026
+consul_memberlist_gossip{network="wan",quantile="0.5"} 0.018719999119639397
+consul_memberlist_gossip{network="wan",quantile="0.9"} 0.029740000143647194
+consul_memberlist_gossip{network="wan",quantile="0.99"} 0.048298001289367676
+consul_memberlist_gossip_sum{network="wan"} 4231.353692025063
+consul_memberlist_gossip_count{network="wan"} 198810
+# HELP consul_memberlist_probeNode consul_memberlist_probeNode
+# TYPE consul_memberlist_probeNode summary
+consul_memberlist_probeNode{network="lan",quantile="0.5"} 0.8122829794883728
+consul_memberlist_probeNode{network="lan",quantile="0.9"} 1.0762710571289062
+consul_memberlist_probeNode{network="lan",quantile="0.99"} 1.0762710571289062
+consul_memberlist_probeNode_sum{network="lan"} 79954.1767796278
+consul_memberlist_probeNode_count{network="lan"} 94283
+consul_memberlist_probeNode{network="wan",quantile="0.5"} 0.8124139904975891
+consul_memberlist_probeNode{network="wan",quantile="0.9"} 0.9564329981803894
+consul_memberlist_probeNode{network="wan",quantile="0.99"} 0.9564329981803894
+consul_memberlist_probeNode_sum{network="wan"} 17170.356712043285
+consul_memberlist_probeNode_count{network="wan"} 18817
+# HELP consul_memberlist_pushPullNode consul_memberlist_pushPullNode
+# TYPE consul_memberlist_pushPullNode summary
+consul_memberlist_pushPullNode{network="lan",quantile="0.5"} NaN
+consul_memberlist_pushPullNode{network="lan",quantile="0.9"} NaN
+consul_memberlist_pushPullNode{network="lan",quantile="0.99"} NaN
+consul_memberlist_pushPullNode_sum{network="lan"} 6319.592049598694
+consul_memberlist_pushPullNode_count{network="lan"} 3316
+consul_memberlist_pushPullNode{network="wan",quantile="0.5"} NaN
+consul_memberlist_pushPullNode{network="wan",quantile="0.9"} NaN
+consul_memberlist_pushPullNode{network="wan",quantile="0.99"} NaN
+consul_memberlist_pushPullNode_sum{network="wan"} 3150.5957354307175
+consul_memberlist_pushPullNode_count{network="wan"} 1657
+# HELP consul_memberlist_tcp_accept consul_memberlist_tcp_accept
+# TYPE consul_memberlist_tcp_accept counter
+consul_memberlist_tcp_accept{network="lan"} 3327
+consul_memberlist_tcp_accept{network="wan"} 1661
+# HELP consul_memberlist_tcp_connect consul_memberlist_tcp_connect
+# TYPE consul_memberlist_tcp_connect counter
+consul_memberlist_tcp_connect{network="lan"} 3316
+consul_memberlist_tcp_connect{network="wan"} 1657
+# HELP consul_memberlist_tcp_sent consul_memberlist_tcp_sent
+# TYPE consul_memberlist_tcp_sent counter
+consul_memberlist_tcp_sent{network="lan"} 5.728236e+06
+consul_memberlist_tcp_sent{network="wan"} 2.671365e+06
+# HELP consul_memberlist_udp_received consul_memberlist_udp_received
+# TYPE consul_memberlist_udp_received counter
+consul_memberlist_udp_received{network="lan"} 2.7072233e+07
+consul_memberlist_udp_received{network="wan"} 5.805281e+06
+# HELP consul_memberlist_udp_sent consul_memberlist_udp_sent
+# TYPE consul_memberlist_udp_sent counter
+consul_memberlist_udp_sent{network="lan"} 2.7064743e+07
+consul_memberlist_udp_sent{network="wan"} 5.806099e+06
+# HELP consul_mesh_active_root_ca_expiry Seconds until the service mesh root certificate expires. Updated every hour
+# TYPE consul_mesh_active_root_ca_expiry gauge
+consul_mesh_active_root_ca_expiry NaN
+# HELP consul_mesh_active_signing_ca_expiry Seconds until the service mesh signing certificate expires. Updated every hour
+# TYPE consul_mesh_active_signing_ca_expiry gauge
+consul_mesh_active_signing_ca_expiry NaN
+# HELP consul_prepared_query_apply Measures the time it takes to apply a prepared query update.
+# TYPE consul_prepared_query_apply summary
+consul_prepared_query_apply{quantile="0.5"} NaN
+consul_prepared_query_apply{quantile="0.9"} NaN
+consul_prepared_query_apply{quantile="0.99"} NaN
+consul_prepared_query_apply_sum 0
+consul_prepared_query_apply_count 0
+# HELP consul_prepared_query_execute Measures the time it takes to process a prepared query execute request.
+# TYPE consul_prepared_query_execute summary
+consul_prepared_query_execute{quantile="0.5"} NaN
+consul_prepared_query_execute{quantile="0.9"} NaN
+consul_prepared_query_execute{quantile="0.99"} NaN
+consul_prepared_query_execute_sum 0
+consul_prepared_query_execute_count 0
+# HELP consul_prepared_query_execute_remote Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter.
+# TYPE consul_prepared_query_execute_remote summary
+consul_prepared_query_execute_remote{quantile="0.5"} NaN
+consul_prepared_query_execute_remote{quantile="0.9"} NaN
+consul_prepared_query_execute_remote{quantile="0.99"} NaN
+consul_prepared_query_execute_remote_sum 0
+consul_prepared_query_execute_remote_count 0
+# HELP consul_prepared_query_explain Measures the time it takes to process a prepared query explain request.
+# TYPE consul_prepared_query_explain summary
+consul_prepared_query_explain{quantile="0.5"} NaN
+consul_prepared_query_explain{quantile="0.9"} NaN
+consul_prepared_query_explain{quantile="0.99"} NaN
+consul_prepared_query_explain_sum 0
+consul_prepared_query_explain_count 0
+# HELP consul_raft_applied_index Represents the raft applied index.
+# TYPE consul_raft_applied_index gauge
+consul_raft_applied_index 452955
+# HELP consul_raft_apply This counts the number of Raft transactions occurring over the interval.
+# TYPE consul_raft_apply counter
+consul_raft_apply 10681
+# HELP consul_raft_barrier consul_raft_barrier
+# TYPE consul_raft_barrier counter
+consul_raft_barrier 1657
+# HELP consul_raft_boltdb_freePageBytes consul_raft_boltdb_freePageBytes
+# TYPE consul_raft_boltdb_freePageBytes gauge
+consul_raft_boltdb_freePageBytes 5.758976e+06
+# HELP consul_raft_boltdb_freelistBytes consul_raft_boltdb_freelistBytes
+# TYPE consul_raft_boltdb_freelistBytes gauge
+consul_raft_boltdb_freelistBytes 11264
+# HELP consul_raft_boltdb_getLog consul_raft_boltdb_getLog
+# TYPE consul_raft_boltdb_getLog summary
+consul_raft_boltdb_getLog{quantile="0.5"} 0.030570000410079956
+consul_raft_boltdb_getLog{quantile="0.9"} 0.030570000410079956
+consul_raft_boltdb_getLog{quantile="0.99"} 0.030570000410079956
+consul_raft_boltdb_getLog_sum 630.6968591569457
+consul_raft_boltdb_getLog_count 39046
+# HELP consul_raft_boltdb_logBatchSize consul_raft_boltdb_logBatchSize
+# TYPE consul_raft_boltdb_logBatchSize summary
+consul_raft_boltdb_logBatchSize{quantile="0.5"} 243
+consul_raft_boltdb_logBatchSize{quantile="0.9"} 243
+consul_raft_boltdb_logBatchSize{quantile="0.99"} 243
+consul_raft_boltdb_logBatchSize_sum 3.567357e+06
+consul_raft_boltdb_logBatchSize_count 12360
+# HELP consul_raft_boltdb_logSize consul_raft_boltdb_logSize
+# TYPE consul_raft_boltdb_logSize summary
+consul_raft_boltdb_logSize{quantile="0.5"} 243
+consul_raft_boltdb_logSize{quantile="0.9"} 243
+consul_raft_boltdb_logSize{quantile="0.99"} 243
+consul_raft_boltdb_logSize_sum 3.567357e+06
+consul_raft_boltdb_logSize_count 12362
+# HELP consul_raft_boltdb_logsPerBatch consul_raft_boltdb_logsPerBatch
+# TYPE consul_raft_boltdb_logsPerBatch summary
+consul_raft_boltdb_logsPerBatch{quantile="0.5"} 1
+consul_raft_boltdb_logsPerBatch{quantile="0.9"} 1
+consul_raft_boltdb_logsPerBatch{quantile="0.99"} 1
+consul_raft_boltdb_logsPerBatch_sum 12362
+consul_raft_boltdb_logsPerBatch_count 12360
+# HELP consul_raft_boltdb_numFreePages consul_raft_boltdb_numFreePages
+# TYPE consul_raft_boltdb_numFreePages gauge
+consul_raft_boltdb_numFreePages 1399
+# HELP consul_raft_boltdb_numPendingPages consul_raft_boltdb_numPendingPages
+# TYPE consul_raft_boltdb_numPendingPages gauge
+consul_raft_boltdb_numPendingPages 7
+# HELP consul_raft_boltdb_openReadTxn consul_raft_boltdb_openReadTxn
+# TYPE consul_raft_boltdb_openReadTxn gauge
+consul_raft_boltdb_openReadTxn 0
+# HELP consul_raft_boltdb_storeLogs consul_raft_boltdb_storeLogs
+# TYPE consul_raft_boltdb_storeLogs summary
+consul_raft_boltdb_storeLogs{quantile="0.5"} 13.176624298095703
+consul_raft_boltdb_storeLogs{quantile="0.9"} 13.176624298095703
+consul_raft_boltdb_storeLogs{quantile="0.99"} 13.176624298095703
+consul_raft_boltdb_storeLogs_sum 651888.0279793739
+consul_raft_boltdb_storeLogs_count 12360
+# HELP consul_raft_boltdb_totalReadTxn consul_raft_boltdb_totalReadTxn
+# TYPE consul_raft_boltdb_totalReadTxn counter
+consul_raft_boltdb_totalReadTxn 51200
+# HELP consul_raft_boltdb_txstats_cursorCount consul_raft_boltdb_txstats_cursorCount
+# TYPE consul_raft_boltdb_txstats_cursorCount counter
+consul_raft_boltdb_txstats_cursorCount 139498
+# HELP consul_raft_boltdb_txstats_nodeCount consul_raft_boltdb_txstats_nodeCount
+# TYPE consul_raft_boltdb_txstats_nodeCount counter
+consul_raft_boltdb_txstats_nodeCount 52400
+# HELP consul_raft_boltdb_txstats_nodeDeref consul_raft_boltdb_txstats_nodeDeref
+# TYPE consul_raft_boltdb_txstats_nodeDeref counter
+consul_raft_boltdb_txstats_nodeDeref 0
+# HELP consul_raft_boltdb_txstats_pageAlloc consul_raft_boltdb_txstats_pageAlloc
+# TYPE consul_raft_boltdb_txstats_pageAlloc gauge
+consul_raft_boltdb_txstats_pageAlloc 4.38874112e+08
+# HELP consul_raft_boltdb_txstats_pageCount consul_raft_boltdb_txstats_pageCount
+# TYPE consul_raft_boltdb_txstats_pageCount gauge
+consul_raft_boltdb_txstats_pageCount 107147
+# HELP consul_raft_boltdb_txstats_rebalance consul_raft_boltdb_txstats_rebalance
+# TYPE consul_raft_boltdb_txstats_rebalance counter
+consul_raft_boltdb_txstats_rebalance 5869
+# HELP consul_raft_boltdb_txstats_rebalanceTime consul_raft_boltdb_txstats_rebalanceTime
+# TYPE consul_raft_boltdb_txstats_rebalanceTime summary
+consul_raft_boltdb_txstats_rebalanceTime{quantile="0.5"} 0
+consul_raft_boltdb_txstats_rebalanceTime{quantile="0.9"} 0
+consul_raft_boltdb_txstats_rebalanceTime{quantile="0.99"} 0
+consul_raft_boltdb_txstats_rebalanceTime_sum 3.391055107116699
+consul_raft_boltdb_txstats_rebalanceTime_count 19882
+# HELP consul_raft_boltdb_txstats_spill consul_raft_boltdb_txstats_spill
+# TYPE consul_raft_boltdb_txstats_spill counter
+consul_raft_boltdb_txstats_spill 51598
+# HELP consul_raft_boltdb_txstats_spillTime consul_raft_boltdb_txstats_spillTime
+# TYPE consul_raft_boltdb_txstats_spillTime summary
+consul_raft_boltdb_txstats_spillTime{quantile="0.5"} 0
+consul_raft_boltdb_txstats_spillTime{quantile="0.9"} 0.019670000299811363
+consul_raft_boltdb_txstats_spillTime{quantile="0.99"} 0.019670000299811363
+consul_raft_boltdb_txstats_spillTime_sum 372.6177089449484
+consul_raft_boltdb_txstats_spillTime_count 19882
+# HELP consul_raft_boltdb_txstats_split consul_raft_boltdb_txstats_split
+# TYPE consul_raft_boltdb_txstats_split counter
+consul_raft_boltdb_txstats_split 2154
+# HELP consul_raft_boltdb_txstats_write consul_raft_boltdb_txstats_write
+# TYPE consul_raft_boltdb_txstats_write counter
+consul_raft_boltdb_txstats_write 76328
+# HELP consul_raft_boltdb_txstats_writeTime consul_raft_boltdb_txstats_writeTime
+# TYPE consul_raft_boltdb_txstats_writeTime summary
+consul_raft_boltdb_txstats_writeTime{quantile="0.5"} 0
+consul_raft_boltdb_txstats_writeTime{quantile="0.9"} 13.529101371765137
+consul_raft_boltdb_txstats_writeTime{quantile="0.99"} 13.529101371765137
+consul_raft_boltdb_txstats_writeTime_sum 649086.0377488136
+consul_raft_boltdb_txstats_writeTime_count 19882
+# HELP consul_raft_boltdb_writeCapacity consul_raft_boltdb_writeCapacity
+# TYPE consul_raft_boltdb_writeCapacity summary
+consul_raft_boltdb_writeCapacity{quantile="0.5"} 76.11837005615234
+consul_raft_boltdb_writeCapacity{quantile="0.9"} 76.11837005615234
+consul_raft_boltdb_writeCapacity{quantile="0.99"} 76.11837005615234
+consul_raft_boltdb_writeCapacity_sum 1.1691283255012557e+06
+consul_raft_boltdb_writeCapacity_count 12360
+# HELP consul_raft_commitNumLogs consul_raft_commitNumLogs
+# TYPE consul_raft_commitNumLogs gauge
+consul_raft_commitNumLogs 1
+# HELP consul_raft_commitTime This measures the time it takes to commit a new entry to the Raft log on the leader.
+# TYPE consul_raft_commitTime summary
+consul_raft_commitTime{quantile="0.5"} 41.146488189697266
+consul_raft_commitTime{quantile="0.9"} 41.146488189697266
+consul_raft_commitTime{quantile="0.99"} 41.146488189697266
+consul_raft_commitTime_sum 955781.14939785
+consul_raft_commitTime_count 12345
+# HELP consul_raft_fsm_apply consul_raft_fsm_apply
+# TYPE consul_raft_fsm_apply summary
+consul_raft_fsm_apply{quantile="0.5"} 0.9867730140686035
+consul_raft_fsm_apply{quantile="0.9"} 0.9867730140686035
+consul_raft_fsm_apply{quantile="0.99"} 0.9867730140686035
+consul_raft_fsm_apply_sum 2157.036477720365
+consul_raft_fsm_apply_count 23257
+# HELP consul_raft_fsm_enqueue consul_raft_fsm_enqueue
+# TYPE consul_raft_fsm_enqueue summary
+consul_raft_fsm_enqueue{quantile="0.5"} 0.01827000081539154
+consul_raft_fsm_enqueue{quantile="0.9"} 0.01827000081539154
+consul_raft_fsm_enqueue{quantile="0.99"} 0.01827000081539154
+consul_raft_fsm_enqueue_sum 312.4720518933609
+consul_raft_fsm_enqueue_count 12345
+# HELP consul_raft_fsm_lastRestoreDuration This measures how long the last FSM restore (from disk or leader) took.
+# TYPE consul_raft_fsm_lastRestoreDuration gauge
+consul_raft_fsm_lastRestoreDuration 2
+# HELP consul_raft_last_index Represents the raft last index.
+# TYPE consul_raft_last_index gauge
+consul_raft_last_index 452955
+# HELP consul_raft_leader_dispatchLog consul_raft_leader_dispatchLog
+# TYPE consul_raft_leader_dispatchLog summary
+consul_raft_leader_dispatchLog{quantile="0.5"} 13.253751754760742
+consul_raft_leader_dispatchLog{quantile="0.9"} 13.253751754760742
+consul_raft_leader_dispatchLog{quantile="0.99"} 13.253751754760742
+consul_raft_leader_dispatchLog_sum 652275.1332504749
+consul_raft_leader_dispatchLog_count 12345
+# HELP consul_raft_leader_dispatchNumLogs consul_raft_leader_dispatchNumLogs
+# TYPE consul_raft_leader_dispatchNumLogs gauge
+consul_raft_leader_dispatchNumLogs 1
+# HELP consul_raft_leader_lastContact Measures the time since the leader was last able to contact the follower nodes when checking its leader lease.
+# TYPE consul_raft_leader_lastContact summary
+consul_raft_leader_lastContact{quantile="0.5"} 33
+consul_raft_leader_lastContact{quantile="0.9"} 68
+consul_raft_leader_lastContact{quantile="0.99"} 68
+consul_raft_leader_lastContact_sum 3.0669e+06
+consul_raft_leader_lastContact_count 80917
+# HELP consul_raft_leader_oldestLogAge This measures how old the oldest log in the leader's log store is.
+# TYPE consul_raft_leader_oldestLogAge gauge
+consul_raft_leader_oldestLogAge 1.66046464e+08
+# HELP consul_raft_replication_appendEntries_logs consul_raft_replication_appendEntries_logs
+# TYPE consul_raft_replication_appendEntries_logs counter
+consul_raft_replication_appendEntries_logs{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 12346
+consul_raft_replication_appendEntries_logs{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 12346
+# HELP consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae
+# TYPE consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae counter
+consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae 12346
+# HELP consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7
+# TYPE consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 counter
+consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 12346
+# HELP consul_raft_replication_appendEntries_rpc consul_raft_replication_appendEntries_rpc
+# TYPE consul_raft_replication_appendEntries_rpc summary
+consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.5"} 0.7382550239562988
+consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.9"} 1.030032992362976
+consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.99"} 3.7775509357452393
+consul_raft_replication_appendEntries_rpc_sum{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 3.243855408252254e+06
+consul_raft_replication_appendEntries_rpc_count{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 1.317473e+06
+consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.5"} 0.6895250082015991
+consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.9"} 0.9500619769096375
+consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.99"} 1.682297945022583
+consul_raft_replication_appendEntries_rpc_sum{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 3.2418369520926476e+06
+consul_raft_replication_appendEntries_rpc_count{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 1.317366e+06
+# HELP consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae
+# TYPE consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae summary
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.5"} 0.7751650214195251
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.9"} 1.1017019748687744
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.99"} 3.81791090965271
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae_sum 3.299558741098821e+06
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae_count 1.317473e+06
+# HELP consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7
+# TYPE consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 summary
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.5"} 0.7417550086975098
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.9"} 1.0077530145645142
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.99"} 1.726017951965332
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7_sum 3.2977981372908056e+06
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7_count 1.317366e+06
+# HELP consul_raft_replication_heartbeat consul_raft_replication_heartbeat
+# TYPE consul_raft_replication_heartbeat summary
+consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.5"} 0.5587760210037231
+consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.9"} 1.304479956626892
+consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.99"} 1.4234989881515503
+consul_raft_replication_heartbeat_sum{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 103812.21699500084
+consul_raft_replication_heartbeat_count{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 132454
+consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.5"} 0.5443660020828247
+consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.9"} 0.9280639886856079
+consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.99"} 1.0106929540634155
+consul_raft_replication_heartbeat_sum{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 96512.05100156367
+consul_raft_replication_heartbeat_count{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 132488
+# HELP consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae
+# TYPE consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae summary
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.5"} 0.5922750234603882
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.9"} 1.3319799900054932
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.99"} 1.454179048538208
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae_sum 108115.97687250376
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae_count 132454
+# HELP consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7
+# TYPE consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 summary
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.5"} 0.5915359854698181
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.9"} 0.9649440050125122
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.99"} 1.0576729774475098
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7_sum 100780.49696727097
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7_count 132488
+# HELP consul_raft_rpc_installSnapshot Measures the time it takes the raft leader to install a snapshot on a follower that is catching up after being down or has just joined the cluster.
+# TYPE consul_raft_rpc_installSnapshot summary
+consul_raft_rpc_installSnapshot{quantile="0.5"} NaN
+consul_raft_rpc_installSnapshot{quantile="0.9"} NaN
+consul_raft_rpc_installSnapshot{quantile="0.99"} NaN
+consul_raft_rpc_installSnapshot_sum 0
+consul_raft_rpc_installSnapshot_count 0
+# HELP consul_raft_snapshot_persist Measures the time it takes raft to write a new snapshot to disk.
+# TYPE consul_raft_snapshot_persist summary
+consul_raft_snapshot_persist{quantile="0.5"} NaN
+consul_raft_snapshot_persist{quantile="0.9"} NaN
+consul_raft_snapshot_persist{quantile="0.99"} NaN
+consul_raft_snapshot_persist_sum 0.7742639780044556
+consul_raft_snapshot_persist_count 1
+# HELP consul_raft_state_candidate This increments whenever a Consul server starts an election.
+# TYPE consul_raft_state_candidate counter
+consul_raft_state_candidate 1
+# HELP consul_raft_state_leader This increments whenever a Consul server becomes a leader.
+# TYPE consul_raft_state_leader counter
+consul_raft_state_leader 1
+# HELP consul_raft_thread_fsm_saturation consul_raft_thread_fsm_saturation
+# TYPE consul_raft_thread_fsm_saturation summary
+consul_raft_thread_fsm_saturation{quantile="0.5"} 0
+consul_raft_thread_fsm_saturation{quantile="0.9"} 0
+consul_raft_thread_fsm_saturation{quantile="0.99"} 0
+consul_raft_thread_fsm_saturation_sum 0.09000000357627869
+consul_raft_thread_fsm_saturation_count 11923
+# HELP consul_raft_thread_main_saturation consul_raft_thread_main_saturation
+# TYPE consul_raft_thread_main_saturation summary
+consul_raft_thread_main_saturation{quantile="0.5"} 0
+consul_raft_thread_main_saturation{quantile="0.9"} 0
+consul_raft_thread_main_saturation{quantile="0.99"} 0
+consul_raft_thread_main_saturation_sum 205.40999860689044
+consul_raft_thread_main_saturation_count 43067
+# HELP consul_raft_verify_leader consul_raft_verify_leader
+# TYPE consul_raft_verify_leader counter
+consul_raft_verify_leader 364
+# HELP consul_rpc_accept_conn Increments when a server accepts an RPC connection.
+# TYPE consul_rpc_accept_conn counter
+consul_rpc_accept_conn 864
+# HELP consul_rpc_consistentRead Measures the time spent confirming that a consistent read can be performed.
+# TYPE consul_rpc_consistentRead summary
+consul_rpc_consistentRead{quantile="0.5"} NaN
+consul_rpc_consistentRead{quantile="0.9"} NaN
+consul_rpc_consistentRead{quantile="0.99"} NaN
+consul_rpc_consistentRead_sum 293.6328800059855
+consul_rpc_consistentRead_count 364
+# HELP consul_rpc_cross_dc Increments when a server sends a (potentially blocking) cross datacenter RPC query.
+# TYPE consul_rpc_cross_dc counter
+consul_rpc_cross_dc 0
+# HELP consul_rpc_queries_blocking Shows the current number of in-flight blocking queries the server is handling.
+# TYPE consul_rpc_queries_blocking gauge
+consul_rpc_queries_blocking 1
+# HELP consul_rpc_query Increments when a server receives a read request, indicating the rate of new read queries.
+# TYPE consul_rpc_query counter
+consul_rpc_query 2559
+# HELP consul_rpc_raft_handoff Increments when a server accepts a Raft-related RPC connection.
+# TYPE consul_rpc_raft_handoff counter
+consul_rpc_raft_handoff 2
+# HELP consul_rpc_request Increments when a server receives a Consul-related RPC request.
+# TYPE consul_rpc_request counter
+consul_rpc_request 159034
+# HELP consul_rpc_request_error Increments when a server returns an error from an RPC request.
+# TYPE consul_rpc_request_error counter
+consul_rpc_request_error 0
+# HELP consul_runtime_alloc_bytes consul_runtime_alloc_bytes
+# TYPE consul_runtime_alloc_bytes gauge
+consul_runtime_alloc_bytes 5.3065368e+07
+# HELP consul_runtime_free_count consul_runtime_free_count
+# TYPE consul_runtime_free_count gauge
+consul_runtime_free_count 7.33623104e+08
+# HELP consul_runtime_gc_pause_ns consul_runtime_gc_pause_ns
+# TYPE consul_runtime_gc_pause_ns summary
+consul_runtime_gc_pause_ns{quantile="0.5"} NaN
+consul_runtime_gc_pause_ns{quantile="0.9"} NaN
+consul_runtime_gc_pause_ns{quantile="0.99"} NaN
+consul_runtime_gc_pause_ns_sum 1.372001275e+09
+consul_runtime_gc_pause_ns_count 3761
+# HELP consul_runtime_heap_objects consul_runtime_heap_objects
+# TYPE consul_runtime_heap_objects gauge
+consul_runtime_heap_objects 227577
+# HELP consul_runtime_malloc_count consul_runtime_malloc_count
+# TYPE consul_runtime_malloc_count gauge
+consul_runtime_malloc_count 7.33850688e+08
+# HELP consul_runtime_num_goroutines consul_runtime_num_goroutines
+# TYPE consul_runtime_num_goroutines gauge
+consul_runtime_num_goroutines 132
+# HELP consul_runtime_sys_bytes consul_runtime_sys_bytes
+# TYPE consul_runtime_sys_bytes gauge
+consul_runtime_sys_bytes 8.495516e+07
+# HELP consul_runtime_total_gc_pause_ns consul_runtime_total_gc_pause_ns
+# TYPE consul_runtime_total_gc_pause_ns gauge
+consul_runtime_total_gc_pause_ns 1.37200128e+09
+# HELP consul_runtime_total_gc_runs consul_runtime_total_gc_runs
+# TYPE consul_runtime_total_gc_runs gauge
+consul_runtime_total_gc_runs 3761
+# HELP consul_serf_coordinate_adjustment_ms consul_serf_coordinate_adjustment_ms
+# TYPE consul_serf_coordinate_adjustment_ms summary
+consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.5"} 0.1953909993171692
+consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.9"} 0.2344750016927719
+consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.99"} 0.2344750016927719
+consul_serf_coordinate_adjustment_ms_sum{network="lan"} 20281.621190846952
+consul_serf_coordinate_adjustment_ms_count{network="lan"} 94283
+consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.5"} 0.19766099750995636
+consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.9"} 0.20183999836444855
+consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.99"} 0.20183999836444855
+consul_serf_coordinate_adjustment_ms_sum{network="wan"} 4030.8057950612783
+consul_serf_coordinate_adjustment_ms_count{network="wan"} 18817
+# HELP consul_serf_queue_Event consul_serf_queue_Event
+# TYPE consul_serf_queue_Event summary
+consul_serf_queue_Event{network="lan",quantile="0.5"} NaN
+consul_serf_queue_Event{network="lan",quantile="0.9"} NaN
+consul_serf_queue_Event{network="lan",quantile="0.99"} NaN
+consul_serf_queue_Event_sum{network="lan"} 0
+consul_serf_queue_Event_count{network="lan"} 3313
+consul_serf_queue_Event{network="wan",quantile="0.5"} NaN
+consul_serf_queue_Event{network="wan",quantile="0.9"} NaN
+consul_serf_queue_Event{network="wan",quantile="0.99"} NaN
+consul_serf_queue_Event_sum{network="wan"} 0
+consul_serf_queue_Event_count{network="wan"} 3313
+# HELP consul_serf_queue_Intent consul_serf_queue_Intent
+# TYPE consul_serf_queue_Intent summary
+consul_serf_queue_Intent{network="lan",quantile="0.5"} NaN
+consul_serf_queue_Intent{network="lan",quantile="0.9"} NaN
+consul_serf_queue_Intent{network="lan",quantile="0.99"} NaN
+consul_serf_queue_Intent_sum{network="lan"} 0
+consul_serf_queue_Intent_count{network="lan"} 3313
+consul_serf_queue_Intent{network="wan",quantile="0.5"} NaN
+consul_serf_queue_Intent{network="wan",quantile="0.9"} NaN
+consul_serf_queue_Intent{network="wan",quantile="0.99"} NaN
+consul_serf_queue_Intent_sum{network="wan"} 0
+consul_serf_queue_Intent_count{network="wan"} 3313
+# HELP consul_serf_queue_Query consul_serf_queue_Query
+# TYPE consul_serf_queue_Query summary
+consul_serf_queue_Query{network="lan",quantile="0.5"} NaN
+consul_serf_queue_Query{network="lan",quantile="0.9"} NaN
+consul_serf_queue_Query{network="lan",quantile="0.99"} NaN
+consul_serf_queue_Query_sum{network="lan"} 0
+consul_serf_queue_Query_count{network="lan"} 3313
+consul_serf_queue_Query{network="wan",quantile="0.5"} NaN
+consul_serf_queue_Query{network="wan",quantile="0.9"} NaN
+consul_serf_queue_Query{network="wan",quantile="0.99"} NaN
+consul_serf_queue_Query_sum{network="wan"} 0
+consul_serf_queue_Query_count{network="wan"} 3313
+# HELP consul_server_isLeader Tracks if the server is a leader.
+# TYPE consul_server_isLeader gauge
+consul_server_isLeader 1
+# HELP consul_session_apply Measures the time spent applying a session update.
+# TYPE consul_session_apply summary
+consul_session_apply{quantile="0.5"} NaN
+consul_session_apply{quantile="0.9"} NaN
+consul_session_apply{quantile="0.99"} NaN
+consul_session_apply_sum 0
+consul_session_apply_count 0
+# HELP consul_session_renew Measures the time spent renewing a session.
+# TYPE consul_session_renew summary
+consul_session_renew{quantile="0.5"} NaN
+consul_session_renew{quantile="0.9"} NaN
+consul_session_renew{quantile="0.99"} NaN
+consul_session_renew_sum 0
+consul_session_renew_count 0
+# HELP consul_session_ttl_active Tracks the active number of sessions being tracked.
+# TYPE consul_session_ttl_active gauge
+consul_session_ttl_active 0
+# HELP consul_session_ttl_invalidate Measures the time spent invalidating an expired session.
+# TYPE consul_session_ttl_invalidate summary
+consul_session_ttl_invalidate{quantile="0.5"} NaN
+consul_session_ttl_invalidate{quantile="0.9"} NaN
+consul_session_ttl_invalidate{quantile="0.99"} NaN
+consul_session_ttl_invalidate_sum 0
+consul_session_ttl_invalidate_count 0
+# HELP consul_txn_apply Measures the time spent applying a transaction operation.
+# TYPE consul_txn_apply summary
+consul_txn_apply{quantile="0.5"} NaN
+consul_txn_apply{quantile="0.9"} NaN
+consul_txn_apply{quantile="0.99"} NaN
+consul_txn_apply_sum 0
+consul_txn_apply_count 0
+# HELP consul_txn_read Measures the time spent returning a read transaction.
+# TYPE consul_txn_read summary
+consul_txn_read{quantile="0.5"} NaN
+consul_txn_read{quantile="0.9"} NaN
+consul_txn_read{quantile="0.99"} NaN
+consul_txn_read_sum 0
+consul_txn_read_count 0
+# HELP consul_version Represents the Consul version.
+# TYPE consul_version gauge
+consul_version 0
+# HELP consul_xds_server_streams Measures the number of active xDS streams handled by the server split by protocol version.
+# TYPE consul_xds_server_streams gauge
+consul_xds_server_streams 0
+# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 4.9921e-05
+go_gc_duration_seconds{quantile="0.25"} 0.00010804
+go_gc_duration_seconds{quantile="0.5"} 0.00016214
+go_gc_duration_seconds{quantile="0.75"} 0.000549655
+go_gc_duration_seconds{quantile="1"} 0.003364656
+go_gc_duration_seconds_sum 1.3724735909999999
+go_gc_duration_seconds_count 3762
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 137
+# HELP go_info Information about the Go environment.
+# TYPE go_info gauge
+go_info{version="go1.18.1"} 1
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 2.6647944e+07
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 9.1199946672e+10
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 2.497531e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 7.33814669e+08
+# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
+# TYPE go_memstats_gc_cpu_fraction gauge
+go_memstats_gc_cpu_fraction 4.2228338057402265e-05
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 8.927624e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 2.6647944e+07
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 3.3161216e+07
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 3.620864e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 49363
+# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes gauge
+go_memstats_heap_released_bytes 9.936896e+06
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 6.9369856e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.6713685789559276e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 7.33864032e+08
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 9600
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 15600
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 413168
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 620160
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 5.3447888e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 1.591077e+06
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 1.933312e+06
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 1.933312e+06
+# HELP go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 8.495516e+07
+# HELP go_threads Number of OS threads created.
+# TYPE go_threads gauge
+go_threads 15
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 3612.93
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 1024
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 35
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 1.53645056e+08
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.67126917263e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 9.18421504e+08
+# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
+# TYPE process_virtual_memory_max_bytes gauge
+process_virtual_memory_max_bytes -1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt
new file mode 100644
index 000000000..a5df1d586
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt
@@ -0,0 +1,1509 @@
+# HELP consul_acl_ResolveToken This measures the time it takes to resolve an ACL token.
+# TYPE consul_acl_ResolveToken summary
+consul_acl_ResolveToken{quantile="0.5"} NaN
+consul_acl_ResolveToken{quantile="0.9"} NaN
+consul_acl_ResolveToken{quantile="0.99"} NaN
+consul_acl_ResolveToken_sum 0
+consul_acl_ResolveToken_count 0
+# HELP consul_acl_authmethod_delete
+# TYPE consul_acl_authmethod_delete summary
+consul_acl_authmethod_delete{quantile="0.5"} NaN
+consul_acl_authmethod_delete{quantile="0.9"} NaN
+consul_acl_authmethod_delete{quantile="0.99"} NaN
+consul_acl_authmethod_delete_sum 0
+consul_acl_authmethod_delete_count 0
+# HELP consul_acl_authmethod_upsert
+# TYPE consul_acl_authmethod_upsert summary
+consul_acl_authmethod_upsert{quantile="0.5"} NaN
+consul_acl_authmethod_upsert{quantile="0.9"} NaN
+consul_acl_authmethod_upsert{quantile="0.99"} NaN
+consul_acl_authmethod_upsert_sum 0
+consul_acl_authmethod_upsert_count 0
+# HELP consul_acl_bindingrule_delete
+# TYPE consul_acl_bindingrule_delete summary
+consul_acl_bindingrule_delete{quantile="0.5"} NaN
+consul_acl_bindingrule_delete{quantile="0.9"} NaN
+consul_acl_bindingrule_delete{quantile="0.99"} NaN
+consul_acl_bindingrule_delete_sum 0
+consul_acl_bindingrule_delete_count 0
+# HELP consul_acl_bindingrule_upsert
+# TYPE consul_acl_bindingrule_upsert summary
+consul_acl_bindingrule_upsert{quantile="0.5"} NaN
+consul_acl_bindingrule_upsert{quantile="0.9"} NaN
+consul_acl_bindingrule_upsert{quantile="0.99"} NaN
+consul_acl_bindingrule_upsert_sum 0
+consul_acl_bindingrule_upsert_count 0
+# HELP consul_acl_blocked_check_deregistration Increments whenever a deregistration fails for a check (blocked by an ACL)
+# TYPE consul_acl_blocked_check_deregistration counter
+consul_acl_blocked_check_deregistration 0
+# HELP consul_acl_blocked_check_registration Increments whenever a registration fails for a check (blocked by an ACL)
+# TYPE consul_acl_blocked_check_registration counter
+consul_acl_blocked_check_registration 0
+# HELP consul_acl_blocked_node_registration Increments whenever a registration fails for a node (blocked by an ACL)
+# TYPE consul_acl_blocked_node_registration counter
+consul_acl_blocked_node_registration 0
+# HELP consul_acl_blocked_service_deregistration Increments whenever a deregistration fails for a service (blocked by an ACL)
+# TYPE consul_acl_blocked_service_deregistration counter
+consul_acl_blocked_service_deregistration 0
+# HELP consul_acl_blocked_service_registration Increments whenever a registration fails for a service (blocked by an ACL)
+# TYPE consul_acl_blocked_service_registration counter
+consul_acl_blocked_service_registration 0
+# HELP consul_acl_login
+# TYPE consul_acl_login summary
+consul_acl_login{quantile="0.5"} NaN
+consul_acl_login{quantile="0.9"} NaN
+consul_acl_login{quantile="0.99"} NaN
+consul_acl_login_sum 0
+consul_acl_login_count 0
+# HELP consul_acl_logout
+# TYPE consul_acl_logout summary
+consul_acl_logout{quantile="0.5"} NaN
+consul_acl_logout{quantile="0.9"} NaN
+consul_acl_logout{quantile="0.99"} NaN
+consul_acl_logout_sum 0
+consul_acl_logout_count 0
+# HELP consul_acl_policy_delete
+# TYPE consul_acl_policy_delete summary
+consul_acl_policy_delete{quantile="0.5"} NaN
+consul_acl_policy_delete{quantile="0.9"} NaN
+consul_acl_policy_delete{quantile="0.99"} NaN
+consul_acl_policy_delete_sum 0
+consul_acl_policy_delete_count 0
+# HELP consul_acl_policy_upsert
+# TYPE consul_acl_policy_upsert summary
+consul_acl_policy_upsert{quantile="0.5"} NaN
+consul_acl_policy_upsert{quantile="0.9"} NaN
+consul_acl_policy_upsert{quantile="0.99"} NaN
+consul_acl_policy_upsert_sum 0
+consul_acl_policy_upsert_count 0
+# HELP consul_acl_role_delete
+# TYPE consul_acl_role_delete summary
+consul_acl_role_delete{quantile="0.5"} NaN
+consul_acl_role_delete{quantile="0.9"} NaN
+consul_acl_role_delete{quantile="0.99"} NaN
+consul_acl_role_delete_sum 0
+consul_acl_role_delete_count 0
+# HELP consul_acl_role_upsert
+# TYPE consul_acl_role_upsert summary
+consul_acl_role_upsert{quantile="0.5"} NaN
+consul_acl_role_upsert{quantile="0.9"} NaN
+consul_acl_role_upsert{quantile="0.99"} NaN
+consul_acl_role_upsert_sum 0
+consul_acl_role_upsert_count 0
+# HELP consul_acl_token_cache_hit Increments if Consul is able to resolve a token's identity, or a legacy token, from the cache.
+# TYPE consul_acl_token_cache_hit counter
+consul_acl_token_cache_hit 0
+# HELP consul_acl_token_cache_miss Increments if Consul cannot resolve a token's identity, or a legacy token, from the cache.
+# TYPE consul_acl_token_cache_miss counter
+consul_acl_token_cache_miss 0
+# HELP consul_acl_token_clone
+# TYPE consul_acl_token_clone summary
+consul_acl_token_clone{quantile="0.5"} NaN
+consul_acl_token_clone{quantile="0.9"} NaN
+consul_acl_token_clone{quantile="0.99"} NaN
+consul_acl_token_clone_sum 0
+consul_acl_token_clone_count 0
+# HELP consul_acl_token_delete
+# TYPE consul_acl_token_delete summary
+consul_acl_token_delete{quantile="0.5"} NaN
+consul_acl_token_delete{quantile="0.9"} NaN
+consul_acl_token_delete{quantile="0.99"} NaN
+consul_acl_token_delete_sum 0
+consul_acl_token_delete_count 0
+# HELP consul_acl_token_upsert
+# TYPE consul_acl_token_upsert summary
+consul_acl_token_upsert{quantile="0.5"} NaN
+consul_acl_token_upsert{quantile="0.9"} NaN
+consul_acl_token_upsert{quantile="0.99"} NaN
+consul_acl_token_upsert_sum 0
+consul_acl_token_upsert_count 0
+# HELP consul_agent_tls_cert_expiry Seconds until the agent tls certificate expires. Updated every hour
+# TYPE consul_agent_tls_cert_expiry gauge
+consul_agent_tls_cert_expiry 0
+# HELP consul_api_http Samples how long it takes to service the given HTTP request for the given verb and path.
+# TYPE consul_api_http summary
+consul_api_http{quantile="0.5"} NaN
+consul_api_http{quantile="0.9"} NaN
+consul_api_http{quantile="0.99"} NaN
+consul_api_http_sum 0
+consul_api_http_count 0
+consul_api_http{method="GET",path="v1_agent_checks",quantile="0.5"} 0.10910899937152863
+consul_api_http{method="GET",path="v1_agent_checks",quantile="0.9"} 0.2985079884529114
+consul_api_http{method="GET",path="v1_agent_checks",quantile="0.99"} 0.2985079884529114
+consul_api_http_sum{method="GET",path="v1_agent_checks"} 85.87442895025015
+consul_api_http_count{method="GET",path="v1_agent_checks"} 588
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.5"} 0.5271260142326355
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.9"} 1.2289390563964844
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.99"} 1.2289390563964844
+consul_api_http_sum{method="GET",path="v1_agent_metrics"} 409.6580027639866
+consul_api_http_count{method="GET",path="v1_agent_metrics"} 592
+# HELP consul_autopilot_failure_tolerance Tracks the number of voting servers that the cluster can lose while continuing to function.
+# TYPE consul_autopilot_failure_tolerance gauge
+consul_autopilot_failure_tolerance 0
+# HELP consul_autopilot_healthy Tracks the overall health of the local server cluster. 1 if all servers are healthy, 0 if one or more are unhealthy.
+# TYPE consul_autopilot_healthy gauge
+consul_autopilot_healthy 0
+# HELP consul_catalog_connect_not_found Increments for each connect-based catalog query where the given service could not be found.
+# TYPE consul_catalog_connect_not_found counter
+consul_catalog_connect_not_found 0
+# HELP consul_catalog_connect_query Increments for each connect-based catalog query for the given service.
+# TYPE consul_catalog_connect_query counter
+consul_catalog_connect_query 0
+# HELP consul_catalog_connect_query_tag Increments for each connect-based catalog query for the given service with the given tag.
+# TYPE consul_catalog_connect_query_tag counter
+consul_catalog_connect_query_tag 0
+# HELP consul_catalog_connect_query_tags Increments for each connect-based catalog query for the given service with the given tags.
+# TYPE consul_catalog_connect_query_tags counter
+consul_catalog_connect_query_tags 0
+# HELP consul_catalog_deregister Measures the time it takes to complete a catalog deregister operation.
+# TYPE consul_catalog_deregister summary
+consul_catalog_deregister{quantile="0.5"} NaN
+consul_catalog_deregister{quantile="0.9"} NaN
+consul_catalog_deregister{quantile="0.99"} NaN
+consul_catalog_deregister_sum 0
+consul_catalog_deregister_count 0
+# HELP consul_catalog_register Measures the time it takes to complete a catalog register operation.
+# TYPE consul_catalog_register summary
+consul_catalog_register{quantile="0.5"} NaN
+consul_catalog_register{quantile="0.9"} NaN
+consul_catalog_register{quantile="0.99"} NaN
+consul_catalog_register_sum 45.98546886444092
+consul_catalog_register_count 2
+# HELP consul_catalog_service_not_found Increments for each catalog query where the given service could not be found.
+# TYPE consul_catalog_service_not_found counter
+consul_catalog_service_not_found 0
+# HELP consul_catalog_service_query Increments for each catalog query for the given service.
+# TYPE consul_catalog_service_query counter
+consul_catalog_service_query 0
+# HELP consul_catalog_service_query_tag Increments for each catalog query for the given service with the given tag.
+# TYPE consul_catalog_service_query_tag counter
+consul_catalog_service_query_tag 0
+# HELP consul_catalog_service_query_tags Increments for each catalog query for the given service with the given tags.
+# TYPE consul_catalog_service_query_tags counter
+consul_catalog_service_query_tags 0
+# HELP consul_client_api_catalog_datacenters Increments whenever a Consul agent receives a request to list datacenters in the catalog.
+# TYPE consul_client_api_catalog_datacenters counter
+consul_client_api_catalog_datacenters 0
+# HELP consul_client_api_catalog_deregister Increments whenever a Consul agent receives a catalog deregister request.
+# TYPE consul_client_api_catalog_deregister counter
+consul_client_api_catalog_deregister 0
+# HELP consul_client_api_catalog_gateway_services Increments whenever a Consul agent receives a request to list services associated with a gateway.
+# TYPE consul_client_api_catalog_gateway_services counter
+consul_client_api_catalog_gateway_services 0
+# HELP consul_client_api_catalog_node_service_list Increments whenever a Consul agent receives a request to list a node's registered services.
+# TYPE consul_client_api_catalog_node_service_list counter
+consul_client_api_catalog_node_service_list 0
+# HELP consul_client_api_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.
+# TYPE consul_client_api_catalog_node_services counter
+consul_client_api_catalog_node_services 0
+# HELP consul_client_api_catalog_nodes Increments whenever a Consul agent receives a request to list nodes from the catalog.
+# TYPE consul_client_api_catalog_nodes counter
+consul_client_api_catalog_nodes 0
+# HELP consul_client_api_catalog_register Increments whenever a Consul agent receives a catalog register request.
+# TYPE consul_client_api_catalog_register counter
+consul_client_api_catalog_register 0
+# HELP consul_client_api_catalog_service_nodes Increments whenever a Consul agent receives a request to list nodes offering a service.
+# TYPE consul_client_api_catalog_service_nodes counter
+consul_client_api_catalog_service_nodes 0
+# HELP consul_client_api_catalog_services Increments whenever a Consul agent receives a request to list services from the catalog.
+# TYPE consul_client_api_catalog_services counter
+consul_client_api_catalog_services 0
+# HELP consul_client_api_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service.
+# TYPE consul_client_api_error_catalog_service_nodes counter
+consul_client_api_error_catalog_service_nodes 0
+# HELP consul_client_api_success_catalog_datacenters Increments whenever a Consul agent successfully responds to a request to list datacenters.
+# TYPE consul_client_api_success_catalog_datacenters counter
+consul_client_api_success_catalog_datacenters 0
+# HELP consul_client_api_success_catalog_deregister Increments whenever a Consul agent successfully responds to a catalog deregister request.
+# TYPE consul_client_api_success_catalog_deregister counter
+consul_client_api_success_catalog_deregister 0
+# HELP consul_client_api_success_catalog_gateway_services Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway.
+# TYPE consul_client_api_success_catalog_gateway_services counter
+consul_client_api_success_catalog_gateway_services 0
+# HELP consul_client_api_success_catalog_node_service_list Increments whenever a Consul agent successfully responds to a request to list a node's registered services.
+# TYPE consul_client_api_success_catalog_node_service_list counter
+consul_client_api_success_catalog_node_service_list 0
+# HELP consul_client_api_success_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list services in a node.
+# TYPE consul_client_api_success_catalog_node_services counter
+consul_client_api_success_catalog_node_services 0
+# HELP consul_client_api_success_catalog_nodes Increments whenever a Consul agent successfully responds to a request to list nodes.
+# TYPE consul_client_api_success_catalog_nodes counter
+consul_client_api_success_catalog_nodes 0
+# HELP consul_client_api_success_catalog_register Increments whenever a Consul agent successfully responds to a catalog register request.
+# TYPE consul_client_api_success_catalog_register counter
+consul_client_api_success_catalog_register 0
+# HELP consul_client_api_success_catalog_service_nodes Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.
+# TYPE consul_client_api_success_catalog_service_nodes counter
+consul_client_api_success_catalog_service_nodes 0
+# HELP consul_client_api_success_catalog_services Increments whenever a Consul agent successfully responds to a request to list services.
+# TYPE consul_client_api_success_catalog_services counter
+consul_client_api_success_catalog_services 0
+# HELP consul_client_rpc Increments whenever a Consul agent in client mode makes an RPC request to a Consul server.
+# TYPE consul_client_rpc counter
+consul_client_rpc 46
+# HELP consul_client_rpc_error_catalog_datacenters Increments whenever a Consul agent receives an RPC error for a request to list datacenters.
+# TYPE consul_client_rpc_error_catalog_datacenters counter
+consul_client_rpc_error_catalog_datacenters 0
+# HELP consul_client_rpc_error_catalog_deregister Increments whenever a Consul agent receives an RPC error for a catalog deregister request.
+# TYPE consul_client_rpc_error_catalog_deregister counter
+consul_client_rpc_error_catalog_deregister 0
+# HELP consul_client_rpc_error_catalog_gateway_services Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway.
+# TYPE consul_client_rpc_error_catalog_gateway_services counter
+consul_client_rpc_error_catalog_gateway_services 0
+# HELP consul_client_rpc_error_catalog_node_service_list Increments whenever a Consul agent receives an RPC error for request to list a node's registered services.
+# TYPE consul_client_rpc_error_catalog_node_service_list counter
+consul_client_rpc_error_catalog_node_service_list 0
+# HELP consul_client_rpc_error_catalog_node_services Increments whenever a Consul agent receives an RPC error for a request to list services in a node.
+# TYPE consul_client_rpc_error_catalog_node_services counter
+consul_client_rpc_error_catalog_node_services 0
+# HELP consul_client_rpc_error_catalog_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes.
+# TYPE consul_client_rpc_error_catalog_nodes counter
+consul_client_rpc_error_catalog_nodes 0
+# HELP consul_client_rpc_error_catalog_register Increments whenever a Consul agent receives an RPC error for a catalog register request.
+# TYPE consul_client_rpc_error_catalog_register counter
+consul_client_rpc_error_catalog_register 0
+# HELP consul_client_rpc_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.
+# TYPE consul_client_rpc_error_catalog_service_nodes counter
+consul_client_rpc_error_catalog_service_nodes 0
+# HELP consul_client_rpc_error_catalog_services Increments whenever a Consul agent receives an RPC error for a request to list services.
+# TYPE consul_client_rpc_error_catalog_services counter
+consul_client_rpc_error_catalog_services 0
+# HELP consul_client_rpc_exceeded Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration.
+# TYPE consul_client_rpc_exceeded counter
+consul_client_rpc_exceeded 0
+# HELP consul_client_rpc_failed Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails.
+# TYPE consul_client_rpc_failed counter
+consul_client_rpc_failed 0
+# HELP consul_consul_cache_bypass Counts how many times a request bypassed the cache because no cache-key was provided.
+# TYPE consul_consul_cache_bypass counter
+consul_consul_cache_bypass 0
+# HELP consul_consul_cache_entries_count Represents the number of entries in this cache.
+# TYPE consul_consul_cache_entries_count gauge
+consul_consul_cache_entries_count 0
+# HELP consul_consul_cache_evict_expired Counts the number of expired entries that are evicted.
+# TYPE consul_consul_cache_evict_expired counter
+consul_consul_cache_evict_expired 0
+# HELP consul_consul_cache_fetch_error Counts the number of failed fetches by the cache.
+# TYPE consul_consul_cache_fetch_error counter
+consul_consul_cache_fetch_error 0
+# HELP consul_consul_cache_fetch_success Counts the number of successful fetches by the cache.
+# TYPE consul_consul_cache_fetch_success counter
+consul_consul_cache_fetch_success 0
+# HELP consul_consul_fsm_ca Deprecated - use fsm_ca instead
+# TYPE consul_consul_fsm_ca summary
+consul_consul_fsm_ca{quantile="0.5"} NaN
+consul_consul_fsm_ca{quantile="0.9"} NaN
+consul_consul_fsm_ca{quantile="0.99"} NaN
+consul_consul_fsm_ca_sum 0
+consul_consul_fsm_ca_count 0
+# HELP consul_consul_fsm_intention Deprecated - use fsm_intention instead
+# TYPE consul_consul_fsm_intention summary
+consul_consul_fsm_intention{quantile="0.5"} NaN
+consul_consul_fsm_intention{quantile="0.9"} NaN
+consul_consul_fsm_intention{quantile="0.99"} NaN
+consul_consul_fsm_intention_sum 0
+consul_consul_fsm_intention_count 0
+# HELP consul_consul_intention_apply
+# TYPE consul_consul_intention_apply summary
+consul_consul_intention_apply{quantile="0.5"} NaN
+consul_consul_intention_apply{quantile="0.9"} NaN
+consul_consul_intention_apply{quantile="0.99"} NaN
+consul_consul_intention_apply_sum 0
+consul_consul_intention_apply_count 0
+# HELP consul_consul_members_clients Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.
+# TYPE consul_consul_members_clients gauge
+consul_consul_members_clients 0
+# HELP consul_consul_members_servers Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.
+# TYPE consul_consul_members_servers gauge
+consul_consul_members_servers 0
+# HELP consul_consul_peering_exported_services A gauge that tracks how many services are exported for the peering. The labels are "peering" and, for enterprise, "partition". We emit this metric every 9 seconds
+# TYPE consul_consul_peering_exported_services gauge
+consul_consul_peering_exported_services 0
+# HELP consul_consul_state_config_entries Measures the current number of unique configuration entries registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4.
+# TYPE consul_consul_state_config_entries gauge
+consul_consul_state_config_entries 0
+# HELP consul_consul_state_connect_instances Measures the current number of unique connect service instances registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4.
+# TYPE consul_consul_state_connect_instances gauge
+consul_consul_state_connect_instances 0
+# HELP consul_consul_state_kv_entries Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.
+# TYPE consul_consul_state_kv_entries gauge
+consul_consul_state_kv_entries 0
+# HELP consul_consul_state_nodes Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_consul_state_nodes gauge
+consul_consul_state_nodes 0
+# HELP consul_consul_state_peerings Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0.
+# TYPE consul_consul_state_peerings gauge
+consul_consul_state_peerings 0
+# HELP consul_consul_state_service_instances Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_consul_state_service_instances gauge
+consul_consul_state_service_instances 0
+# HELP consul_consul_state_services Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_consul_state_services gauge
+consul_consul_state_services 0
+# HELP consul_federation_state_apply
+# TYPE consul_federation_state_apply summary
+consul_federation_state_apply{quantile="0.5"} NaN
+consul_federation_state_apply{quantile="0.9"} NaN
+consul_federation_state_apply{quantile="0.99"} NaN
+consul_federation_state_apply_sum 0
+consul_federation_state_apply_count 0
+# HELP consul_federation_state_get
+# TYPE consul_federation_state_get summary
+consul_federation_state_get{quantile="0.5"} NaN
+consul_federation_state_get{quantile="0.9"} NaN
+consul_federation_state_get{quantile="0.99"} NaN
+consul_federation_state_get_sum 0
+consul_federation_state_get_count 0
+# HELP consul_federation_state_list
+# TYPE consul_federation_state_list summary
+consul_federation_state_list{quantile="0.5"} NaN
+consul_federation_state_list{quantile="0.9"} NaN
+consul_federation_state_list{quantile="0.99"} NaN
+consul_federation_state_list_sum 0
+consul_federation_state_list_count 0
+# HELP consul_federation_state_list_mesh_gateways
+# TYPE consul_federation_state_list_mesh_gateways summary
+consul_federation_state_list_mesh_gateways{quantile="0.5"} NaN
+consul_federation_state_list_mesh_gateways{quantile="0.9"} NaN
+consul_federation_state_list_mesh_gateways{quantile="0.99"} NaN
+consul_federation_state_list_mesh_gateways_sum 0
+consul_federation_state_list_mesh_gateways_count 0
+# HELP consul_fsm_acl Measures the time it takes to apply the given ACL operation to the FSM.
+# TYPE consul_fsm_acl summary
+consul_fsm_acl{quantile="0.5"} NaN
+consul_fsm_acl{quantile="0.9"} NaN
+consul_fsm_acl{quantile="0.99"} NaN
+consul_fsm_acl_sum 0
+consul_fsm_acl_count 0
+# HELP consul_fsm_acl_authmethod Measures the time it takes to apply an ACL authmethod operation to the FSM.
+# TYPE consul_fsm_acl_authmethod summary
+consul_fsm_acl_authmethod{quantile="0.5"} NaN
+consul_fsm_acl_authmethod{quantile="0.9"} NaN
+consul_fsm_acl_authmethod{quantile="0.99"} NaN
+consul_fsm_acl_authmethod_sum 0
+consul_fsm_acl_authmethod_count 0
+# HELP consul_fsm_acl_bindingrule Measures the time it takes to apply an ACL binding rule operation to the FSM.
+# TYPE consul_fsm_acl_bindingrule summary
+consul_fsm_acl_bindingrule{quantile="0.5"} NaN
+consul_fsm_acl_bindingrule{quantile="0.9"} NaN
+consul_fsm_acl_bindingrule{quantile="0.99"} NaN
+consul_fsm_acl_bindingrule_sum 0
+consul_fsm_acl_bindingrule_count 0
+# HELP consul_fsm_acl_policy Measures the time it takes to apply an ACL policy operation to the FSM.
+# TYPE consul_fsm_acl_policy summary
+consul_fsm_acl_policy{quantile="0.5"} NaN
+consul_fsm_acl_policy{quantile="0.9"} NaN
+consul_fsm_acl_policy{quantile="0.99"} NaN
+consul_fsm_acl_policy_sum 0
+consul_fsm_acl_policy_count 0
+# HELP consul_fsm_acl_token Measures the time it takes to apply an ACL token operation to the FSM.
+# TYPE consul_fsm_acl_token summary
+consul_fsm_acl_token{quantile="0.5"} NaN
+consul_fsm_acl_token{quantile="0.9"} NaN
+consul_fsm_acl_token{quantile="0.99"} NaN
+consul_fsm_acl_token_sum 0
+consul_fsm_acl_token_count 0
+# HELP consul_fsm_autopilot Measures the time it takes to apply the given autopilot update to the FSM.
+# TYPE consul_fsm_autopilot summary
+consul_fsm_autopilot{quantile="0.5"} NaN
+consul_fsm_autopilot{quantile="0.9"} NaN
+consul_fsm_autopilot{quantile="0.99"} NaN
+consul_fsm_autopilot_sum 0
+consul_fsm_autopilot_count 0
+# HELP consul_fsm_ca Measures the time it takes to apply CA configuration operations to the FSM.
+# TYPE consul_fsm_ca summary
+consul_fsm_ca{quantile="0.5"} NaN
+consul_fsm_ca{quantile="0.9"} NaN
+consul_fsm_ca{quantile="0.99"} NaN
+consul_fsm_ca_sum 0
+consul_fsm_ca_count 0
+# HELP consul_fsm_ca_leaf Measures the time it takes to apply an operation while signing a leaf certificate.
+# TYPE consul_fsm_ca_leaf summary
+consul_fsm_ca_leaf{quantile="0.5"} NaN
+consul_fsm_ca_leaf{quantile="0.9"} NaN
+consul_fsm_ca_leaf{quantile="0.99"} NaN
+consul_fsm_ca_leaf_sum 0
+consul_fsm_ca_leaf_count 0
+# HELP consul_fsm_coordinate_batch_update Measures the time it takes to apply the given batch coordinate update to the FSM.
+# TYPE consul_fsm_coordinate_batch_update summary
+consul_fsm_coordinate_batch_update{quantile="0.5"} 0.10997900366783142
+consul_fsm_coordinate_batch_update{quantile="0.9"} 0.10997900366783142
+consul_fsm_coordinate_batch_update{quantile="0.99"} 0.10997900366783142
+consul_fsm_coordinate_batch_update_sum 240.22869294136763
+consul_fsm_coordinate_batch_update_count 11035
+# HELP consul_fsm_deregister Measures the time it takes to apply a catalog deregister operation to the FSM.
+# TYPE consul_fsm_deregister summary
+consul_fsm_deregister{quantile="0.5"} NaN
+consul_fsm_deregister{quantile="0.9"} NaN
+consul_fsm_deregister{quantile="0.99"} NaN
+consul_fsm_deregister_sum 1.4027419984340668
+consul_fsm_deregister_count 3
+# HELP consul_fsm_intention Measures the time it takes to apply an intention operation to the FSM.
+# TYPE consul_fsm_intention summary
+consul_fsm_intention{quantile="0.5"} NaN
+consul_fsm_intention{quantile="0.9"} NaN
+consul_fsm_intention{quantile="0.99"} NaN
+consul_fsm_intention_sum 0
+consul_fsm_intention_count 0
+# HELP consul_fsm_kvs Measures the time it takes to apply the given KV operation to the FSM.
+# TYPE consul_fsm_kvs summary
+consul_fsm_kvs{quantile="0.5"} NaN
+consul_fsm_kvs{quantile="0.9"} NaN
+consul_fsm_kvs{quantile="0.99"} NaN
+consul_fsm_kvs_sum 0
+consul_fsm_kvs_count 0
+# HELP consul_fsm_peering Measures the time it takes to apply a peering operation to the FSM.
+# TYPE consul_fsm_peering summary
+consul_fsm_peering{quantile="0.5"} NaN
+consul_fsm_peering{quantile="0.9"} NaN
+consul_fsm_peering{quantile="0.99"} NaN
+consul_fsm_peering_sum 0
+consul_fsm_peering_count 0
+# HELP consul_fsm_persist Measures the time it takes to persist the FSM to a raft snapshot.
+# TYPE consul_fsm_persist summary
+consul_fsm_persist{quantile="0.5"} NaN
+consul_fsm_persist{quantile="0.9"} NaN
+consul_fsm_persist{quantile="0.99"} NaN
+consul_fsm_persist_sum 0
+consul_fsm_persist_count 0
+# HELP consul_fsm_prepared_query Measures the time it takes to apply the given prepared query update operation to the FSM.
+# TYPE consul_fsm_prepared_query summary
+consul_fsm_prepared_query{quantile="0.5"} NaN
+consul_fsm_prepared_query{quantile="0.9"} NaN
+consul_fsm_prepared_query{quantile="0.99"} NaN
+consul_fsm_prepared_query_sum 0
+consul_fsm_prepared_query_count 0
+# HELP consul_fsm_register Measures the time it takes to apply a catalog register operation to the FSM.
+# TYPE consul_fsm_register summary
+consul_fsm_register{quantile="0.5"} NaN
+consul_fsm_register{quantile="0.9"} NaN
+consul_fsm_register{quantile="0.99"} NaN
+consul_fsm_register_sum 20.184059869498014
+consul_fsm_register_count 210
+# HELP consul_fsm_session Measures the time it takes to apply the given session operation to the FSM.
+# TYPE consul_fsm_session summary
+consul_fsm_session{quantile="0.5"} NaN
+consul_fsm_session{quantile="0.9"} NaN
+consul_fsm_session{quantile="0.99"} NaN
+consul_fsm_session_sum 0
+consul_fsm_session_count 0
+# HELP consul_fsm_system_metadata Measures the time it takes to apply a system metadata operation to the FSM.
+# TYPE consul_fsm_system_metadata summary
+consul_fsm_system_metadata{quantile="0.5"} NaN
+consul_fsm_system_metadata{quantile="0.9"} NaN
+consul_fsm_system_metadata{quantile="0.99"} NaN
+consul_fsm_system_metadata_sum 0
+consul_fsm_system_metadata_count 0
+# HELP consul_fsm_tombstone Measures the time it takes to apply the given tombstone operation to the FSM.
+# TYPE consul_fsm_tombstone summary
+consul_fsm_tombstone{quantile="0.5"} NaN
+consul_fsm_tombstone{quantile="0.9"} NaN
+consul_fsm_tombstone{quantile="0.99"} NaN
+consul_fsm_tombstone_sum 0
+consul_fsm_tombstone_count 0
+# HELP consul_fsm_txn Measures the time it takes to apply the given transaction update to the FSM.
+# TYPE consul_fsm_txn summary
+consul_fsm_txn{quantile="0.5"} NaN
+consul_fsm_txn{quantile="0.9"} NaN
+consul_fsm_txn{quantile="0.99"} NaN
+consul_fsm_txn_sum 0
+consul_fsm_txn_count 0
+# HELP consul_grpc_client_connection_count Counts the number of new gRPC connections opened by the client agent to a Consul server.
+# TYPE consul_grpc_client_connection_count counter
+consul_grpc_client_connection_count 9
+# HELP consul_grpc_client_connections Measures the number of active gRPC connections open from the client agent to any Consul servers.
+# TYPE consul_grpc_client_connections gauge
+consul_grpc_client_connections 0
+# HELP consul_grpc_client_request_count Counts the number of gRPC requests made by the client agent to a Consul server.
+# TYPE consul_grpc_client_request_count counter
+consul_grpc_client_request_count 0
+# HELP consul_grpc_server_connection_count Counts the number of new gRPC connections received by the server.
+# TYPE consul_grpc_server_connection_count counter
+consul_grpc_server_connection_count 6
+# HELP consul_grpc_server_connections Measures the number of active gRPC connections open on the server.
+# TYPE consul_grpc_server_connections gauge
+consul_grpc_server_connections 0
+# HELP consul_grpc_server_request_count Counts the number of gRPC requests received by the server.
+# TYPE consul_grpc_server_request_count counter
+consul_grpc_server_request_count 0
+# HELP consul_grpc_server_stream_count Counts the number of new gRPC streams received by the server.
+# TYPE consul_grpc_server_stream_count counter
+consul_grpc_server_stream_count 0
+# HELP consul_grpc_server_streams Measures the number of active gRPC streams handled by the server.
+# TYPE consul_grpc_server_streams gauge
+consul_grpc_server_streams 0
+# HELP consul_intention_apply
+# TYPE consul_intention_apply summary
+consul_intention_apply{quantile="0.5"} NaN
+consul_intention_apply{quantile="0.9"} NaN
+consul_intention_apply{quantile="0.99"} NaN
+consul_intention_apply_sum 0
+consul_intention_apply_count 0
+# HELP consul_kvs_apply Measures the time it takes to complete an update to the KV store.
+# TYPE consul_kvs_apply summary
+consul_kvs_apply{quantile="0.5"} NaN
+consul_kvs_apply{quantile="0.9"} NaN
+consul_kvs_apply{quantile="0.99"} NaN
+consul_kvs_apply_sum 0
+consul_kvs_apply_count 0
+# HELP consul_leader_barrier Measures the time spent waiting for the raft barrier upon gaining leadership.
+# TYPE consul_leader_barrier summary
+consul_leader_barrier{quantile="0.5"} NaN
+consul_leader_barrier{quantile="0.9"} NaN
+consul_leader_barrier{quantile="0.99"} NaN
+consul_leader_barrier_sum 168.71699333190918
+consul_leader_barrier_count 8
+# HELP consul_leader_reapTombstones Measures the time spent clearing tombstones.
+# TYPE consul_leader_reapTombstones summary
+consul_leader_reapTombstones{quantile="0.5"} NaN
+consul_leader_reapTombstones{quantile="0.9"} NaN
+consul_leader_reapTombstones{quantile="0.99"} NaN
+consul_leader_reapTombstones_sum 0
+consul_leader_reapTombstones_count 0
+# HELP consul_leader_reconcile Measures the time spent updating the raft store from the serf member information.
+# TYPE consul_leader_reconcile summary
+consul_leader_reconcile{quantile="0.5"} NaN
+consul_leader_reconcile{quantile="0.9"} NaN
+consul_leader_reconcile{quantile="0.99"} NaN
+consul_leader_reconcile_sum 2.5833420008420944
+consul_leader_reconcile_count 8
+# HELP consul_leader_reconcileMember Measures the time spent updating the raft store for a single serf member's information.
+# TYPE consul_leader_reconcileMember summary
+consul_leader_reconcileMember{quantile="0.5"} NaN
+consul_leader_reconcileMember{quantile="0.9"} NaN
+consul_leader_reconcileMember{quantile="0.99"} NaN
+consul_leader_reconcileMember_sum 60.76006331667304
+consul_leader_reconcileMember_count 26
+# HELP consul_leader_replication_acl_policies_index Tracks the index of ACL policies in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_policies_index gauge
+consul_leader_replication_acl_policies_index 0
+# HELP consul_leader_replication_acl_policies_status Tracks the current health of ACL policy replication on the leader
+# TYPE consul_leader_replication_acl_policies_status gauge
+consul_leader_replication_acl_policies_status 0
+# HELP consul_leader_replication_acl_roles_index Tracks the index of ACL roles in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_roles_index gauge
+consul_leader_replication_acl_roles_index 0
+# HELP consul_leader_replication_acl_roles_status Tracks the current health of ACL role replication on the leader
+# TYPE consul_leader_replication_acl_roles_status gauge
+consul_leader_replication_acl_roles_status 0
+# HELP consul_leader_replication_acl_tokens_index Tracks the index of ACL tokens in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_tokens_index gauge
+consul_leader_replication_acl_tokens_index 0
+# HELP consul_leader_replication_acl_tokens_status Tracks the current health of ACL token replication on the leader
+# TYPE consul_leader_replication_acl_tokens_status gauge
+consul_leader_replication_acl_tokens_status 0
+# HELP consul_leader_replication_config_entries_index Tracks the index of config entries in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_config_entries_index gauge
+consul_leader_replication_config_entries_index 0
+# HELP consul_leader_replication_config_entries_status Tracks the current health of config entry replication on the leader
+# TYPE consul_leader_replication_config_entries_status gauge
+consul_leader_replication_config_entries_status 0
+# HELP consul_leader_replication_federation_state_index Tracks the index of federation states in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_federation_state_index gauge
+consul_leader_replication_federation_state_index 0
+# HELP consul_leader_replication_federation_state_status Tracks the current health of federation state replication on the leader
+# TYPE consul_leader_replication_federation_state_status gauge
+consul_leader_replication_federation_state_status 0
+# HELP consul_leader_replication_namespaces_index Tracks the index of federation states in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_namespaces_index gauge
+consul_leader_replication_namespaces_index 0
+# HELP consul_leader_replication_namespaces_status Tracks the current health of federation state replication on the leader
+# TYPE consul_leader_replication_namespaces_status gauge
+consul_leader_replication_namespaces_status 0
+# HELP consul_memberlist_degraded_probe consul_memberlist_degraded_probe
+# TYPE consul_memberlist_degraded_probe counter
+consul_memberlist_degraded_probe{network="lan"} 1
+consul_memberlist_degraded_probe{network="wan"} 1
+# HELP consul_memberlist_gossip consul_memberlist_gossip
+# TYPE consul_memberlist_gossip summary
+consul_memberlist_gossip{network="lan",quantile="0.5"} 0.019190000370144844
+consul_memberlist_gossip{network="lan",quantile="0.9"} 0.04447900131344795
+consul_memberlist_gossip{network="lan",quantile="0.99"} 0.06036800146102905
+consul_memberlist_gossip_sum{network="lan"} 72.94090104475617
+consul_memberlist_gossip_count{network="lan"} 2984
+consul_memberlist_gossip{network="wan",quantile="0.5"} 0.020829999819397926
+consul_memberlist_gossip{network="wan",quantile="0.9"} 0.04980999976396561
+consul_memberlist_gossip{network="wan",quantile="0.99"} 0.06190900132060051
+consul_memberlist_gossip_sum{network="wan"} 31.62462099501863
+consul_memberlist_gossip_count{network="wan"} 1193
+# HELP consul_memberlist_msg_alive consul_memberlist_msg_alive
+# TYPE consul_memberlist_msg_alive counter
+consul_memberlist_msg_alive{network="lan"} 5
+consul_memberlist_msg_alive{network="wan"} 5
+# HELP consul_memberlist_msg_dead consul_memberlist_msg_dead
+# TYPE consul_memberlist_msg_dead counter
+consul_memberlist_msg_dead{network="lan"} 2
+consul_memberlist_msg_dead{network="wan"} 2
+# HELP consul_memberlist_probeNode consul_memberlist_probeNode
+# TYPE consul_memberlist_probeNode summary
+consul_memberlist_probeNode{network="lan",quantile="0.5"} 0.9080119729042053
+consul_memberlist_probeNode{network="lan",quantile="0.9"} 1.071262001991272
+consul_memberlist_probeNode{network="lan",quantile="0.99"} 1.071262001991272
+consul_memberlist_probeNode_sum{network="lan"} 560.697409927845
+consul_memberlist_probeNode_count{network="lan"} 559
+consul_memberlist_probeNode{network="wan",quantile="0.5"} 0.7037429809570312
+consul_memberlist_probeNode{network="wan",quantile="0.9"} 1.0175219774246216
+consul_memberlist_probeNode{network="wan",quantile="0.99"} 1.0175219774246216
+consul_memberlist_probeNode_sum{network="wan"} 133.5382086634636
+consul_memberlist_probeNode_count{network="wan"} 112
+# HELP consul_memberlist_pushPullNode consul_memberlist_pushPullNode
+# TYPE consul_memberlist_pushPullNode summary
+consul_memberlist_pushPullNode{network="lan",quantile="0.5"} NaN
+consul_memberlist_pushPullNode{network="lan",quantile="0.9"} NaN
+consul_memberlist_pushPullNode{network="lan",quantile="0.99"} NaN
+consul_memberlist_pushPullNode_sum{network="lan"} 32.9423828125
+consul_memberlist_pushPullNode_count{network="lan"} 23
+consul_memberlist_pushPullNode{network="wan",quantile="0.5"} NaN
+consul_memberlist_pushPullNode{network="wan",quantile="0.9"} NaN
+consul_memberlist_pushPullNode{network="wan",quantile="0.99"} NaN
+consul_memberlist_pushPullNode_sum{network="wan"} 14.840403079986572
+consul_memberlist_pushPullNode_count{network="wan"} 10
+# HELP consul_memberlist_tcp_accept consul_memberlist_tcp_accept
+# TYPE consul_memberlist_tcp_accept counter
+consul_memberlist_tcp_accept{network="lan"} 23
+consul_memberlist_tcp_accept{network="wan"} 10
+# HELP consul_memberlist_tcp_connect consul_memberlist_tcp_connect
+# TYPE consul_memberlist_tcp_connect counter
+consul_memberlist_tcp_connect{network="lan"} 23
+consul_memberlist_tcp_connect{network="wan"} 10
+# HELP consul_memberlist_tcp_sent consul_memberlist_tcp_sent
+# TYPE consul_memberlist_tcp_sent counter
+consul_memberlist_tcp_sent{network="lan"} 39810
+consul_memberlist_tcp_sent{network="wan"} 15776
+# HELP consul_memberlist_udp_received consul_memberlist_udp_received
+# TYPE consul_memberlist_udp_received counter
+consul_memberlist_udp_received{network="lan"} 168805
+consul_memberlist_udp_received{network="wan"} 42596
+# HELP consul_memberlist_udp_sent consul_memberlist_udp_sent
+# TYPE consul_memberlist_udp_sent counter
+consul_memberlist_udp_sent{network="lan"} 168852
+consul_memberlist_udp_sent{network="wan"} 41510
+# HELP consul_mesh_active_root_ca_expiry Seconds until the service mesh root certificate expires. Updated every hour
+# TYPE consul_mesh_active_root_ca_expiry gauge
+consul_mesh_active_root_ca_expiry 0
+# HELP consul_mesh_active_signing_ca_expiry Seconds until the service mesh signing certificate expires. Updated every hour
+# TYPE consul_mesh_active_signing_ca_expiry gauge
+consul_mesh_active_signing_ca_expiry 0
+# HELP consul_prepared_query_apply Measures the time it takes to apply a prepared query update.
+# TYPE consul_prepared_query_apply summary
+consul_prepared_query_apply{quantile="0.5"} NaN
+consul_prepared_query_apply{quantile="0.9"} NaN
+consul_prepared_query_apply{quantile="0.99"} NaN
+consul_prepared_query_apply_sum 0
+consul_prepared_query_apply_count 0
+# HELP consul_prepared_query_execute Measures the time it takes to process a prepared query execute request.
+# TYPE consul_prepared_query_execute summary
+consul_prepared_query_execute{quantile="0.5"} NaN
+consul_prepared_query_execute{quantile="0.9"} NaN
+consul_prepared_query_execute{quantile="0.99"} NaN
+consul_prepared_query_execute_sum 0
+consul_prepared_query_execute_count 0
+# HELP consul_prepared_query_execute_remote Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter.
+# TYPE consul_prepared_query_execute_remote summary
+consul_prepared_query_execute_remote{quantile="0.5"} NaN
+consul_prepared_query_execute_remote{quantile="0.9"} NaN
+consul_prepared_query_execute_remote{quantile="0.99"} NaN
+consul_prepared_query_execute_remote_sum 0
+consul_prepared_query_execute_remote_count 0
+# HELP consul_prepared_query_explain Measures the time it takes to process a prepared query explain request.
+# TYPE consul_prepared_query_explain summary
+consul_prepared_query_explain{quantile="0.5"} NaN
+consul_prepared_query_explain{quantile="0.9"} NaN
+consul_prepared_query_explain{quantile="0.99"} NaN
+consul_prepared_query_explain_sum 0
+consul_prepared_query_explain_count 0
+# HELP consul_raft_applied_index Represents the raft applied index.
+# TYPE consul_raft_applied_index gauge
+consul_raft_applied_index 0
+# HELP consul_raft_apply This counts the number of Raft transactions occurring over the interval.
+# TYPE consul_raft_apply counter
+consul_raft_apply 52
+# HELP consul_raft_barrier consul_raft_barrier
+# TYPE consul_raft_barrier counter
+consul_raft_barrier 8
+# HELP consul_raft_boltdb_getLog consul_raft_boltdb_getLog
+# TYPE consul_raft_boltdb_getLog summary
+consul_raft_boltdb_getLog{quantile="0.5"} 0.04751899838447571
+consul_raft_boltdb_getLog{quantile="0.9"} 0.04751899838447571
+consul_raft_boltdb_getLog{quantile="0.99"} 0.04751899838447571
+consul_raft_boltdb_getLog_sum 177.71392010012642
+consul_raft_boltdb_getLog_count 25884
+# HELP consul_raft_boltdb_logBatchSize consul_raft_boltdb_logBatchSize
+# TYPE consul_raft_boltdb_logBatchSize summary
+consul_raft_boltdb_logBatchSize{quantile="0.5"} 414
+consul_raft_boltdb_logBatchSize{quantile="0.9"} 414
+consul_raft_boltdb_logBatchSize{quantile="0.99"} 414
+consul_raft_boltdb_logBatchSize_sum 32278
+consul_raft_boltdb_logBatchSize_count 93
+# HELP consul_raft_boltdb_logSize consul_raft_boltdb_logSize
+# TYPE consul_raft_boltdb_logSize summary
+consul_raft_boltdb_logSize{quantile="0.5"} 414
+consul_raft_boltdb_logSize{quantile="0.9"} 414
+consul_raft_boltdb_logSize{quantile="0.99"} 414
+consul_raft_boltdb_logSize_sum 32278
+consul_raft_boltdb_logSize_count 95
+# HELP consul_raft_boltdb_logsPerBatch consul_raft_boltdb_logsPerBatch
+# TYPE consul_raft_boltdb_logsPerBatch summary
+consul_raft_boltdb_logsPerBatch{quantile="0.5"} 1
+consul_raft_boltdb_logsPerBatch{quantile="0.9"} 1
+consul_raft_boltdb_logsPerBatch{quantile="0.99"} 1
+consul_raft_boltdb_logsPerBatch_sum 95
+consul_raft_boltdb_logsPerBatch_count 93
+# HELP consul_raft_boltdb_storeLogs consul_raft_boltdb_storeLogs
+# TYPE consul_raft_boltdb_storeLogs summary
+consul_raft_boltdb_storeLogs{quantile="0.5"} 17.80512237548828
+consul_raft_boltdb_storeLogs{quantile="0.9"} 17.80512237548828
+consul_raft_boltdb_storeLogs{quantile="0.99"} 17.80512237548828
+consul_raft_boltdb_storeLogs_sum 1006.1075472831726
+consul_raft_boltdb_storeLogs_count 93
+# HELP consul_raft_boltdb_totalReadTxn consul_raft_boltdb_totalReadTxn
+# TYPE consul_raft_boltdb_totalReadTxn counter
+consul_raft_boltdb_totalReadTxn 25946
+# HELP consul_raft_boltdb_txstats_cursorCount consul_raft_boltdb_txstats_cursorCount
+# TYPE consul_raft_boltdb_txstats_cursorCount counter
+consul_raft_boltdb_txstats_cursorCount 52198
+# HELP consul_raft_boltdb_txstats_nodeCount consul_raft_boltdb_txstats_nodeCount
+# TYPE consul_raft_boltdb_txstats_nodeCount counter
+consul_raft_boltdb_txstats_nodeCount 386
+# HELP consul_raft_boltdb_txstats_nodeDeref consul_raft_boltdb_txstats_nodeDeref
+# TYPE consul_raft_boltdb_txstats_nodeDeref counter
+consul_raft_boltdb_txstats_nodeDeref 0
+# HELP consul_raft_boltdb_txstats_rebalance consul_raft_boltdb_txstats_rebalance
+# TYPE consul_raft_boltdb_txstats_rebalance counter
+consul_raft_boltdb_txstats_rebalance 0
+# HELP consul_raft_boltdb_txstats_rebalanceTime consul_raft_boltdb_txstats_rebalanceTime
+# TYPE consul_raft_boltdb_txstats_rebalanceTime summary
+consul_raft_boltdb_txstats_rebalanceTime{quantile="0.5"} 0
+consul_raft_boltdb_txstats_rebalanceTime{quantile="0.9"} 0
+consul_raft_boltdb_txstats_rebalanceTime{quantile="0.99"} 0
+consul_raft_boltdb_txstats_rebalanceTime_sum 0
+consul_raft_boltdb_txstats_rebalanceTime_count 120
+# HELP consul_raft_boltdb_txstats_spill consul_raft_boltdb_txstats_spill
+# TYPE consul_raft_boltdb_txstats_spill counter
+consul_raft_boltdb_txstats_spill 398
+# HELP consul_raft_boltdb_txstats_spillTime consul_raft_boltdb_txstats_spillTime
+# TYPE consul_raft_boltdb_txstats_spillTime summary
+consul_raft_boltdb_txstats_spillTime{quantile="0.5"} 0.018939999863505363
+consul_raft_boltdb_txstats_spillTime{quantile="0.9"} 0.04575999826192856
+consul_raft_boltdb_txstats_spillTime{quantile="0.99"} 0.04575999826192856
+consul_raft_boltdb_txstats_spillTime_sum 2.559216999274213
+consul_raft_boltdb_txstats_spillTime_count 120
+# HELP consul_raft_boltdb_txstats_split consul_raft_boltdb_txstats_split
+# TYPE consul_raft_boltdb_txstats_split counter
+consul_raft_boltdb_txstats_split 19
+# HELP consul_raft_boltdb_txstats_write consul_raft_boltdb_txstats_write
+# TYPE consul_raft_boltdb_txstats_write counter
+consul_raft_boltdb_txstats_write 600
+# HELP consul_raft_boltdb_txstats_writeTime consul_raft_boltdb_txstats_writeTime
+# TYPE consul_raft_boltdb_txstats_writeTime summary
+consul_raft_boltdb_txstats_writeTime{quantile="0.5"} 17.56859588623047
+consul_raft_boltdb_txstats_writeTime{quantile="0.9"} 17.67194366455078
+consul_raft_boltdb_txstats_writeTime{quantile="0.99"} 17.67194366455078
+consul_raft_boltdb_txstats_writeTime_sum 1048.4321446418762
+consul_raft_boltdb_txstats_writeTime_count 120
+# HELP consul_raft_boltdb_writeCapacity consul_raft_boltdb_writeCapacity
+# TYPE consul_raft_boltdb_writeCapacity summary
+consul_raft_boltdb_writeCapacity{quantile="0.5"} 56.34065628051758
+consul_raft_boltdb_writeCapacity{quantile="0.9"} 56.34065628051758
+consul_raft_boltdb_writeCapacity{quantile="0.99"} 56.34065628051758
+consul_raft_boltdb_writeCapacity_sum 11092.64028930664
+consul_raft_boltdb_writeCapacity_count 93
+# HELP consul_raft_candidate_electSelf consul_raft_candidate_electSelf
+# TYPE consul_raft_candidate_electSelf summary
+consul_raft_candidate_electSelf{quantile="0.5"} NaN
+consul_raft_candidate_electSelf{quantile="0.9"} NaN
+consul_raft_candidate_electSelf{quantile="0.99"} NaN
+consul_raft_candidate_electSelf_sum 64.78176307678223
+consul_raft_candidate_electSelf_count 2
+# HELP consul_raft_commitTime This measures the time it takes to commit a new entry to the Raft log on the leader.
+# TYPE consul_raft_commitTime summary
+consul_raft_commitTime{quantile="0.5"} 58.47069549560547
+consul_raft_commitTime{quantile="0.9"} 58.47069549560547
+consul_raft_commitTime{quantile="0.99"} 58.47069549560547
+consul_raft_commitTime_sum 1418.8827295303345
+consul_raft_commitTime_count 64
+# HELP consul_raft_fsm_apply consul_raft_fsm_apply
+# TYPE consul_raft_fsm_apply summary
+consul_raft_fsm_apply{quantile="0.5"} 0.1474989950656891
+consul_raft_fsm_apply{quantile="0.9"} 0.1474989950656891
+consul_raft_fsm_apply{quantile="0.99"} 0.1474989950656891
+consul_raft_fsm_apply_sum 368.55326924845576
+consul_raft_fsm_apply_count 11248
+# HELP consul_raft_fsm_enqueue consul_raft_fsm_enqueue
+# TYPE consul_raft_fsm_enqueue summary
+consul_raft_fsm_enqueue{quantile="0.5"} 0.01882000081241131
+consul_raft_fsm_enqueue{quantile="0.9"} 0.01882000081241131
+consul_raft_fsm_enqueue{quantile="0.99"} 0.01882000081241131
+consul_raft_fsm_enqueue_sum 1.6373119996860623
+consul_raft_fsm_enqueue_count 64
+# HELP consul_raft_fsm_lastRestoreDuration This measures how long the last FSM restore (from disk or leader) took.
+# TYPE consul_raft_fsm_lastRestoreDuration gauge
+consul_raft_fsm_lastRestoreDuration 0
+# HELP consul_raft_fsm_restore consul_raft_fsm_restore
+# TYPE consul_raft_fsm_restore summary
+consul_raft_fsm_restore{quantile="0.5"} NaN
+consul_raft_fsm_restore{quantile="0.9"} NaN
+consul_raft_fsm_restore{quantile="0.99"} NaN
+consul_raft_fsm_restore_sum 2.6886210441589355
+consul_raft_fsm_restore_count 1
+# HELP consul_raft_last_index Represents the raft last index.
+# TYPE consul_raft_last_index gauge
+consul_raft_last_index 0
+# HELP consul_raft_leader_dispatchLog consul_raft_leader_dispatchLog
+# TYPE consul_raft_leader_dispatchLog summary
+consul_raft_leader_dispatchLog{quantile="0.5"} 17.841020584106445
+consul_raft_leader_dispatchLog{quantile="0.9"} 17.841020584106445
+consul_raft_leader_dispatchLog{quantile="0.99"} 17.841020584106445
+consul_raft_leader_dispatchLog_sum 614.3611516952515
+consul_raft_leader_dispatchLog_count 64
+# HELP consul_raft_leader_lastContact Measures the time since the leader was last able to contact the follower nodes when checking its leader lease.
+# TYPE consul_raft_leader_lastContact summary
+consul_raft_leader_lastContact{quantile="0.5"} 30
+consul_raft_leader_lastContact{quantile="0.9"} 67
+consul_raft_leader_lastContact{quantile="0.99"} 67
+consul_raft_leader_lastContact_sum 13872
+consul_raft_leader_lastContact_count 364
+# HELP consul_raft_leader_oldestLogAge This measures how old the oldest log in the leader's log store is.
+# TYPE consul_raft_leader_oldestLogAge gauge
+consul_raft_leader_oldestLogAge 0
+# HELP consul_raft_net_getRPCType consul_raft_net_getRPCType
+# TYPE consul_raft_net_getRPCType summary
+consul_raft_net_getRPCType{quantile="0.5"} NaN
+consul_raft_net_getRPCType{quantile="0.9"} NaN
+consul_raft_net_getRPCType{quantile="0.99"} NaN
+consul_raft_net_getRPCType_sum 269090.0442453362
+consul_raft_net_getRPCType_count 2002
+# HELP consul_raft_net_rpcDecode consul_raft_net_rpcDecode
+# TYPE consul_raft_net_rpcDecode summary
+consul_raft_net_rpcDecode{rpcType="AppendEntries",quantile="0.5"} NaN
+consul_raft_net_rpcDecode{rpcType="AppendEntries",quantile="0.9"} NaN
+consul_raft_net_rpcDecode{rpcType="AppendEntries",quantile="0.99"} NaN
+consul_raft_net_rpcDecode_sum{rpcType="AppendEntries"} 50.56464605871588
+consul_raft_net_rpcDecode_count{rpcType="AppendEntries"} 1811
+consul_raft_net_rpcDecode{rpcType="Heartbeat",quantile="0.5"} NaN
+consul_raft_net_rpcDecode{rpcType="Heartbeat",quantile="0.9"} NaN
+consul_raft_net_rpcDecode{rpcType="Heartbeat",quantile="0.99"} NaN
+consul_raft_net_rpcDecode_sum{rpcType="Heartbeat"} 4.609708994626999
+consul_raft_net_rpcDecode_count{rpcType="Heartbeat"} 189
+consul_raft_net_rpcDecode{rpcType="RequestVote",quantile="0.5"} NaN
+consul_raft_net_rpcDecode{rpcType="RequestVote",quantile="0.9"} NaN
+consul_raft_net_rpcDecode{rpcType="RequestVote",quantile="0.99"} NaN
+consul_raft_net_rpcDecode_sum{rpcType="RequestVote"} 0.052609000355005264
+consul_raft_net_rpcDecode_count{rpcType="RequestVote"} 1
+consul_raft_net_rpcDecode{rpcType="TimeoutNow",quantile="0.5"} NaN
+consul_raft_net_rpcDecode{rpcType="TimeoutNow",quantile="0.9"} NaN
+consul_raft_net_rpcDecode{rpcType="TimeoutNow",quantile="0.99"} NaN
+consul_raft_net_rpcDecode_sum{rpcType="TimeoutNow"} 0.07034999877214432
+consul_raft_net_rpcDecode_count{rpcType="TimeoutNow"} 1
+# HELP consul_raft_net_rpcEnqueue consul_raft_net_rpcEnqueue
+# TYPE consul_raft_net_rpcEnqueue summary
+consul_raft_net_rpcEnqueue{rpcType="AppendEntries",quantile="0.5"} NaN
+consul_raft_net_rpcEnqueue{rpcType="AppendEntries",quantile="0.9"} NaN
+consul_raft_net_rpcEnqueue{rpcType="AppendEntries",quantile="0.99"} NaN
+consul_raft_net_rpcEnqueue_sum{rpcType="AppendEntries"} 61.944881823379546
+consul_raft_net_rpcEnqueue_count{rpcType="AppendEntries"} 1811
+consul_raft_net_rpcEnqueue{rpcType="Heartbeat",quantile="0.5"} NaN
+consul_raft_net_rpcEnqueue{rpcType="Heartbeat",quantile="0.9"} NaN
+consul_raft_net_rpcEnqueue{rpcType="Heartbeat",quantile="0.99"} NaN
+consul_raft_net_rpcEnqueue_sum{rpcType="Heartbeat"} 4.966151000931859
+consul_raft_net_rpcEnqueue_count{rpcType="Heartbeat"} 189
+consul_raft_net_rpcEnqueue{rpcType="RequestVote",quantile="0.5"} NaN
+consul_raft_net_rpcEnqueue{rpcType="RequestVote",quantile="0.9"} NaN
+consul_raft_net_rpcEnqueue{rpcType="RequestVote",quantile="0.99"} NaN
+consul_raft_net_rpcEnqueue_sum{rpcType="RequestVote"} 0.012551000341773033
+consul_raft_net_rpcEnqueue_count{rpcType="RequestVote"} 1
+consul_raft_net_rpcEnqueue{rpcType="TimeoutNow",quantile="0.5"} NaN
+consul_raft_net_rpcEnqueue{rpcType="TimeoutNow",quantile="0.9"} NaN
+consul_raft_net_rpcEnqueue{rpcType="TimeoutNow",quantile="0.99"} NaN
+consul_raft_net_rpcEnqueue_sum{rpcType="TimeoutNow"} 0.021700000390410423
+consul_raft_net_rpcEnqueue_count{rpcType="TimeoutNow"} 1
+# HELP consul_raft_net_rpcRespond consul_raft_net_rpcRespond
+# TYPE consul_raft_net_rpcRespond summary
+consul_raft_net_rpcRespond{rpcType="AppendEntries",quantile="0.5"} NaN
+consul_raft_net_rpcRespond{rpcType="AppendEntries",quantile="0.9"} NaN
+consul_raft_net_rpcRespond{rpcType="AppendEntries",quantile="0.99"} NaN
+consul_raft_net_rpcRespond_sum{rpcType="AppendEntries"} 632.5211075674742
+consul_raft_net_rpcRespond_count{rpcType="AppendEntries"} 1811
+consul_raft_net_rpcRespond{rpcType="Heartbeat",quantile="0.5"} NaN
+consul_raft_net_rpcRespond{rpcType="Heartbeat",quantile="0.9"} NaN
+consul_raft_net_rpcRespond{rpcType="Heartbeat",quantile="0.99"} NaN
+consul_raft_net_rpcRespond_sum{rpcType="Heartbeat"} 2.6388960042968392
+consul_raft_net_rpcRespond_count{rpcType="Heartbeat"} 189
+consul_raft_net_rpcRespond{rpcType="RequestVote",quantile="0.5"} NaN
+consul_raft_net_rpcRespond{rpcType="RequestVote",quantile="0.9"} NaN
+consul_raft_net_rpcRespond{rpcType="RequestVote",quantile="0.99"} NaN
+consul_raft_net_rpcRespond_sum{rpcType="RequestVote"} 27.120553970336914
+consul_raft_net_rpcRespond_count{rpcType="RequestVote"} 1
+consul_raft_net_rpcRespond{rpcType="TimeoutNow",quantile="0.5"} NaN
+consul_raft_net_rpcRespond{rpcType="TimeoutNow",quantile="0.9"} NaN
+consul_raft_net_rpcRespond{rpcType="TimeoutNow",quantile="0.99"} NaN
+consul_raft_net_rpcRespond_sum{rpcType="TimeoutNow"} 0.18450799584388733
+consul_raft_net_rpcRespond_count{rpcType="TimeoutNow"} 1
+# HELP consul_raft_replication_appendEntries_logs consul_raft_replication_appendEntries_logs
+# TYPE consul_raft_replication_appendEntries_logs counter
+consul_raft_replication_appendEntries_logs{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 64
+consul_raft_replication_appendEntries_logs{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 64
+# HELP consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae
+# TYPE consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae counter
+consul_raft_replication_appendEntries_logs_3e75e0af_859b_83e8_779f_f3a6d12f02ae 64
+# HELP consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7
+# TYPE consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 counter
+consul_raft_replication_appendEntries_logs_72849161_41cb_14df_fc9b_563ddff3bae7 64
+# HELP consul_raft_replication_appendEntries_rpc consul_raft_replication_appendEntries_rpc
+# TYPE consul_raft_replication_appendEntries_rpc summary
+consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.5"} 0.7193149924278259
+consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.9"} 1.123671054840088
+consul_raft_replication_appendEntries_rpc{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.99"} 2.9677159786224365
+consul_raft_replication_appendEntries_rpc_sum{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 5982.053934007883
+consul_raft_replication_appendEntries_rpc_count{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 6008
+consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.5"} 0.6742749810218811
+consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.9"} 1.1206400394439697
+consul_raft_replication_appendEntries_rpc{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.99"} 4.632521152496338
+consul_raft_replication_appendEntries_rpc_sum{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 5640.875204831362
+consul_raft_replication_appendEntries_rpc_count{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 6125
+# HELP consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae
+# TYPE consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae summary
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.5"} 0.7773330211639404
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.9"} 1.177711009979248
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.99"} 3.0745749473571777
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae_sum 6255.161469876766
+consul_raft_replication_appendEntries_rpc_3e75e0af_859b_83e8_779f_f3a6d12f02ae_count 6008
+# HELP consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7
+# TYPE consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7 summary
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.5"} 0.7206940054893494
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.9"} 1.1687090396881104
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.99"} 4.6782097816467285
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7_sum 5913.810284465551
+consul_raft_replication_appendEntries_rpc_72849161_41cb_14df_fc9b_563ddff3bae7_count 6125
+# HELP consul_raft_replication_heartbeat consul_raft_replication_heartbeat
+# TYPE consul_raft_replication_heartbeat summary
+consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.5"} 0.6244940161705017
+consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.9"} 1.0416409969329834
+consul_raft_replication_heartbeat{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae",quantile="0.99"} 1.4274380207061768
+consul_raft_replication_heartbeat_sum{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 488.172178208828
+consul_raft_replication_heartbeat_count{peer_id="3e75e0af-859b-83e8-779f-f3a6d12f02ae"} 601
+consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.5"} 0.6106240153312683
+consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.9"} 0.9524030089378357
+consul_raft_replication_heartbeat{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7",quantile="0.99"} 0.9726319909095764
+consul_raft_replication_heartbeat_sum{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 459.77358454465866
+consul_raft_replication_heartbeat_count{peer_id="72849161-41cb-14df-fc9b-563ddff3bae7"} 625
+# HELP consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae
+# TYPE consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae summary
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.5"} 0.65802401304245
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.9"} 1.0810810327529907
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae{quantile="0.99"} 1.4524680376052856
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae_sum 509.27614790201187
+consul_raft_replication_heartbeat_3e75e0af_859b_83e8_779f_f3a6d12f02ae_count 601
+# HELP consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7
+# TYPE consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7 summary
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.5"} 0.6355040073394775
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.9"} 1.000391960144043
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7{quantile="0.99"} 1.0161620378494263
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7_sum 480.9242581129074
+consul_raft_replication_heartbeat_72849161_41cb_14df_fc9b_563ddff3bae7_count 625
+# HELP consul_raft_rpc_appendEntries consul_raft_rpc_appendEntries
+# TYPE consul_raft_rpc_appendEntries summary
+consul_raft_rpc_appendEntries{quantile="0.5"} NaN
+consul_raft_rpc_appendEntries{quantile="0.9"} NaN
+consul_raft_rpc_appendEntries{quantile="0.99"} NaN
+consul_raft_rpc_appendEntries_sum 573.4200130868703
+consul_raft_rpc_appendEntries_count 2000
+# HELP consul_raft_rpc_appendEntries_processLogs consul_raft_rpc_appendEntries_processLogs
+# TYPE consul_raft_rpc_appendEntries_processLogs summary
+consul_raft_rpc_appendEntries_processLogs{quantile="0.5"} NaN
+consul_raft_rpc_appendEntries_processLogs{quantile="0.9"} NaN
+consul_raft_rpc_appendEntries_processLogs{quantile="0.99"} NaN
+consul_raft_rpc_appendEntries_processLogs_sum 148.3990723239258
+consul_raft_rpc_appendEntries_processLogs_count 28
+# HELP consul_raft_rpc_appendEntries_storeLogs consul_raft_rpc_appendEntries_storeLogs
+# TYPE consul_raft_rpc_appendEntries_storeLogs summary
+consul_raft_rpc_appendEntries_storeLogs{quantile="0.5"} NaN
+consul_raft_rpc_appendEntries_storeLogs{quantile="0.9"} NaN
+consul_raft_rpc_appendEntries_storeLogs{quantile="0.99"} NaN
+consul_raft_rpc_appendEntries_storeLogs_sum 395.2212791442871
+consul_raft_rpc_appendEntries_storeLogs_count 29
+# HELP consul_raft_rpc_installSnapshot Measures the time it takes the raft leader to install a snapshot on a follower that is catching up after being down or has just joined the cluster.
+# TYPE consul_raft_rpc_installSnapshot summary
+consul_raft_rpc_installSnapshot{quantile="0.5"} NaN
+consul_raft_rpc_installSnapshot{quantile="0.9"} NaN
+consul_raft_rpc_installSnapshot{quantile="0.99"} NaN
+consul_raft_rpc_installSnapshot_sum 0
+consul_raft_rpc_installSnapshot_count 0
+# HELP consul_raft_rpc_processHeartbeat consul_raft_rpc_processHeartbeat
+# TYPE consul_raft_rpc_processHeartbeat summary
+consul_raft_rpc_processHeartbeat{quantile="0.5"} NaN
+consul_raft_rpc_processHeartbeat{quantile="0.9"} NaN
+consul_raft_rpc_processHeartbeat{quantile="0.99"} NaN
+consul_raft_rpc_processHeartbeat_sum 3.374873999040574
+consul_raft_rpc_processHeartbeat_count 189
+# HELP consul_raft_rpc_requestVote consul_raft_rpc_requestVote
+# TYPE consul_raft_rpc_requestVote summary
+consul_raft_rpc_requestVote{quantile="0.5"} NaN
+consul_raft_rpc_requestVote{quantile="0.9"} NaN
+consul_raft_rpc_requestVote{quantile="0.99"} NaN
+consul_raft_rpc_requestVote_sum 27.062883377075195
+consul_raft_rpc_requestVote_count 1
+# HELP consul_raft_snapshot_persist Measures the time it takes raft to write a new snapshot to disk.
+# TYPE consul_raft_snapshot_persist summary
+consul_raft_snapshot_persist{quantile="0.5"} NaN
+consul_raft_snapshot_persist{quantile="0.9"} NaN
+consul_raft_snapshot_persist{quantile="0.99"} NaN
+consul_raft_snapshot_persist_sum 0
+consul_raft_snapshot_persist_count 0
+# HELP consul_raft_state_candidate This increments whenever a Consul server starts an election.
+# TYPE consul_raft_state_candidate counter
+consul_raft_state_candidate 1
+# HELP consul_raft_state_follower consul_raft_state_follower
+# TYPE consul_raft_state_follower counter
+consul_raft_state_follower 1
+# HELP consul_raft_state_leader This increments whenever a Consul server becomes a leader.
+# TYPE consul_raft_state_leader counter
+consul_raft_state_leader 1
+# HELP consul_raft_thread_fsm_saturation consul_raft_thread_fsm_saturation
+# TYPE consul_raft_thread_fsm_saturation summary
+consul_raft_thread_fsm_saturation{quantile="0.5"} 0
+consul_raft_thread_fsm_saturation{quantile="0.9"} 0
+consul_raft_thread_fsm_saturation{quantile="0.99"} 0
+consul_raft_thread_fsm_saturation_sum 0.14000000059604645
+consul_raft_thread_fsm_saturation_count 75
+# HELP consul_raft_thread_main_saturation consul_raft_thread_main_saturation
+# TYPE consul_raft_thread_main_saturation summary
+consul_raft_thread_main_saturation{quantile="0.5"} 0
+consul_raft_thread_main_saturation{quantile="0.9"} 0.009999999776482582
+consul_raft_thread_main_saturation{quantile="0.99"} 0.009999999776482582
+consul_raft_thread_main_saturation_sum 0.9699999857693911
+consul_raft_thread_main_saturation_count 328
+# HELP consul_raft_verify_leader consul_raft_verify_leader
+# TYPE consul_raft_verify_leader counter
+consul_raft_verify_leader 6
+# HELP consul_rpc_accept_conn Increments when a server accepts an RPC connection.
+# TYPE consul_rpc_accept_conn counter
+consul_rpc_accept_conn 15
+# HELP consul_rpc_consistentRead Measures the time spent confirming that a consistent read can be performed.
+# TYPE consul_rpc_consistentRead summary
+consul_rpc_consistentRead{quantile="0.5"} NaN
+consul_rpc_consistentRead{quantile="0.9"} NaN
+consul_rpc_consistentRead{quantile="0.99"} NaN
+consul_rpc_consistentRead_sum 3.1557260155677795
+consul_rpc_consistentRead_count 6
+# HELP consul_rpc_cross_dc Increments when a server sends a (potentially blocking) cross datacenter RPC query.
+# TYPE consul_rpc_cross_dc counter
+consul_rpc_cross_dc 0
+# HELP consul_rpc_queries_blocking Shows the current number of in-flight blocking queries the server is handling.
+# TYPE consul_rpc_queries_blocking gauge
+consul_rpc_queries_blocking 0
+# HELP consul_rpc_query Increments when a server receives a read request, indicating the rate of new read queries.
+# TYPE consul_rpc_query counter
+consul_rpc_query 19
+# HELP consul_rpc_raft_handoff Increments when a server accepts a Raft-related RPC connection.
+# TYPE consul_rpc_raft_handoff counter
+consul_rpc_raft_handoff 4
+# HELP consul_rpc_request Increments when a server receives a Consul-related RPC request.
+# TYPE consul_rpc_request counter
+consul_rpc_request 936
+# HELP consul_rpc_request_error Increments when a server returns an error from an RPC request.
+# TYPE consul_rpc_request_error counter
+consul_rpc_request_error 0
+# HELP consul_runtime_gc_pause_ns consul_runtime_gc_pause_ns
+# TYPE consul_runtime_gc_pause_ns summary
+consul_runtime_gc_pause_ns{quantile="0.5"} NaN
+consul_runtime_gc_pause_ns{quantile="0.9"} NaN
+consul_runtime_gc_pause_ns{quantile="0.99"} NaN
+consul_runtime_gc_pause_ns_sum 1.565053e+07
+consul_runtime_gc_pause_ns_count 42
+# HELP consul_satya_vm_autopilot_failure_tolerance consul_satya_vm_autopilot_failure_tolerance
+# TYPE consul_satya_vm_autopilot_failure_tolerance gauge
+consul_satya_vm_autopilot_failure_tolerance 1
+# HELP consul_satya_vm_autopilot_healthy consul_satya_vm_autopilot_healthy
+# TYPE consul_satya_vm_autopilot_healthy gauge
+consul_satya_vm_autopilot_healthy 1
+# HELP consul_satya_vm_consul_members_clients consul_satya_vm_consul_members_clients
+# TYPE consul_satya_vm_consul_members_clients gauge
+consul_satya_vm_consul_members_clients{datacenter="us-central"} 0
+# HELP consul_satya_vm_consul_members_servers consul_satya_vm_consul_members_servers
+# TYPE consul_satya_vm_consul_members_servers gauge
+consul_satya_vm_consul_members_servers{datacenter="us-central"} 3
+# HELP consul_satya_vm_consul_state_config_entries consul_satya_vm_consul_state_config_entries
+# TYPE consul_satya_vm_consul_state_config_entries gauge
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="exported-services"} 0
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="ingress-gateway"} 0
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="mesh"} 0
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="proxy-defaults"} 0
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-defaults"} 0
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-intentions"} 0
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-resolver"} 0
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-router"} 0
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="service-splitter"} 0
+consul_satya_vm_consul_state_config_entries{datacenter="us-central",kind="terminating-gateway"} 0
+# HELP consul_satya_vm_consul_state_connect_instances consul_satya_vm_consul_state_connect_instances
+# TYPE consul_satya_vm_consul_state_connect_instances gauge
+consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="connect-native"} 0
+consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="connect-proxy"} 0
+consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="ingress-gateway"} 0
+consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="mesh-gateway"} 0
+consul_satya_vm_consul_state_connect_instances{datacenter="us-central",kind="terminating-gateway"} 0
+# HELP consul_satya_vm_consul_state_kv_entries consul_satya_vm_consul_state_kv_entries
+# TYPE consul_satya_vm_consul_state_kv_entries gauge
+consul_satya_vm_consul_state_kv_entries{datacenter="us-central"} 1
+# HELP consul_satya_vm_consul_state_nodes consul_satya_vm_consul_state_nodes
+# TYPE consul_satya_vm_consul_state_nodes gauge
+consul_satya_vm_consul_state_nodes{datacenter="us-central"} 3
+# HELP consul_satya_vm_consul_state_peerings consul_satya_vm_consul_state_peerings
+# TYPE consul_satya_vm_consul_state_peerings gauge
+consul_satya_vm_consul_state_peerings{datacenter="us-central"} 0
+# HELP consul_satya_vm_consul_state_service_instances consul_satya_vm_consul_state_service_instances
+# TYPE consul_satya_vm_consul_state_service_instances gauge
+consul_satya_vm_consul_state_service_instances{datacenter="us-central"} 4
+# HELP consul_satya_vm_consul_state_services consul_satya_vm_consul_state_services
+# TYPE consul_satya_vm_consul_state_services gauge
+consul_satya_vm_consul_state_services{datacenter="us-central"} 2
+# HELP consul_satya_vm_grpc_client_connections consul_satya_vm_grpc_client_connections
+# TYPE consul_satya_vm_grpc_client_connections gauge
+consul_satya_vm_grpc_client_connections 1
+# HELP consul_satya_vm_grpc_server_connections consul_satya_vm_grpc_server_connections
+# TYPE consul_satya_vm_grpc_server_connections gauge
+consul_satya_vm_grpc_server_connections 0
+# HELP consul_satya_vm_memberlist_health_score consul_satya_vm_memberlist_health_score
+# TYPE consul_satya_vm_memberlist_health_score gauge
+consul_satya_vm_memberlist_health_score{network="lan"} 0
+consul_satya_vm_memberlist_health_score{network="wan"} 0
+# HELP consul_satya_vm_mesh_active_root_ca_expiry consul_satya_vm_mesh_active_root_ca_expiry
+# TYPE consul_satya_vm_mesh_active_root_ca_expiry gauge
+consul_satya_vm_mesh_active_root_ca_expiry NaN
+# HELP consul_satya_vm_mesh_active_signing_ca_expiry consul_satya_vm_mesh_active_signing_ca_expiry
+# TYPE consul_satya_vm_mesh_active_signing_ca_expiry gauge
+consul_satya_vm_mesh_active_signing_ca_expiry NaN
+# HELP consul_satya_vm_raft_applied_index consul_satya_vm_raft_applied_index
+# TYPE consul_satya_vm_raft_applied_index gauge
+consul_satya_vm_raft_applied_index 455437
+# HELP consul_satya_vm_raft_boltdb_freePageBytes consul_satya_vm_raft_boltdb_freePageBytes
+# TYPE consul_satya_vm_raft_boltdb_freePageBytes gauge
+consul_satya_vm_raft_boltdb_freePageBytes 3.960832e+06
+# HELP consul_satya_vm_raft_boltdb_freelistBytes consul_satya_vm_raft_boltdb_freelistBytes
+# TYPE consul_satya_vm_raft_boltdb_freelistBytes gauge
+consul_satya_vm_raft_boltdb_freelistBytes 7752
+# HELP consul_satya_vm_raft_boltdb_numFreePages consul_satya_vm_raft_boltdb_numFreePages
+# TYPE consul_satya_vm_raft_boltdb_numFreePages gauge
+consul_satya_vm_raft_boltdb_numFreePages 961
+# HELP consul_satya_vm_raft_boltdb_numPendingPages consul_satya_vm_raft_boltdb_numPendingPages
+# TYPE consul_satya_vm_raft_boltdb_numPendingPages gauge
+consul_satya_vm_raft_boltdb_numPendingPages 6
+# HELP consul_satya_vm_raft_boltdb_openReadTxn consul_satya_vm_raft_boltdb_openReadTxn
+# TYPE consul_satya_vm_raft_boltdb_openReadTxn gauge
+consul_satya_vm_raft_boltdb_openReadTxn 0
+# HELP consul_satya_vm_raft_boltdb_txstats_pageAlloc consul_satya_vm_raft_boltdb_txstats_pageAlloc
+# TYPE consul_satya_vm_raft_boltdb_txstats_pageAlloc gauge
+consul_satya_vm_raft_boltdb_txstats_pageAlloc 2.465792e+06
+# HELP consul_satya_vm_raft_boltdb_txstats_pageCount consul_satya_vm_raft_boltdb_txstats_pageCount
+# TYPE consul_satya_vm_raft_boltdb_txstats_pageCount gauge
+consul_satya_vm_raft_boltdb_txstats_pageCount 602
+# HELP consul_satya_vm_raft_commitNumLogs consul_satya_vm_raft_commitNumLogs
+# TYPE consul_satya_vm_raft_commitNumLogs gauge
+consul_satya_vm_raft_commitNumLogs 1
+# HELP consul_satya_vm_raft_fsm_lastRestoreDuration consul_satya_vm_raft_fsm_lastRestoreDuration
+# TYPE consul_satya_vm_raft_fsm_lastRestoreDuration gauge
+consul_satya_vm_raft_fsm_lastRestoreDuration 2
+# HELP consul_satya_vm_raft_last_index consul_satya_vm_raft_last_index
+# TYPE consul_satya_vm_raft_last_index gauge
+consul_satya_vm_raft_last_index 455437
+# HELP consul_satya_vm_raft_leader_dispatchNumLogs consul_satya_vm_raft_leader_dispatchNumLogs
+# TYPE consul_satya_vm_raft_leader_dispatchNumLogs gauge
+consul_satya_vm_raft_leader_dispatchNumLogs 1
+# HELP consul_satya_vm_raft_leader_oldestLogAge consul_satya_vm_raft_leader_oldestLogAge
+# TYPE consul_satya_vm_raft_leader_oldestLogAge gauge
+consul_satya_vm_raft_leader_oldestLogAge 1.86193632e+08
+# HELP consul_satya_vm_raft_peers consul_satya_vm_raft_peers
+# TYPE consul_satya_vm_raft_peers gauge
+consul_satya_vm_raft_peers 3
+# HELP consul_satya_vm_rpc_queries_blocking consul_satya_vm_rpc_queries_blocking
+# TYPE consul_satya_vm_rpc_queries_blocking gauge
+consul_satya_vm_rpc_queries_blocking 1
+# HELP consul_satya_vm_runtime_alloc_bytes consul_satya_vm_runtime_alloc_bytes
+# TYPE consul_satya_vm_runtime_alloc_bytes gauge
+consul_satya_vm_runtime_alloc_bytes 3.2406104e+07
+# HELP consul_satya_vm_runtime_free_count consul_satya_vm_runtime_free_count
+# TYPE consul_satya_vm_runtime_free_count gauge
+consul_satya_vm_runtime_free_count 8.260123e+06
+# HELP consul_satya_vm_runtime_heap_objects consul_satya_vm_runtime_heap_objects
+# TYPE consul_satya_vm_runtime_heap_objects gauge
+consul_satya_vm_runtime_heap_objects 118531
+# HELP consul_satya_vm_runtime_malloc_count consul_satya_vm_runtime_malloc_count
+# TYPE consul_satya_vm_runtime_malloc_count gauge
+consul_satya_vm_runtime_malloc_count 8.378654e+06
+# HELP consul_satya_vm_runtime_num_goroutines consul_satya_vm_runtime_num_goroutines
+# TYPE consul_satya_vm_runtime_num_goroutines gauge
+consul_satya_vm_runtime_num_goroutines 123
+# HELP consul_satya_vm_runtime_sys_bytes consul_satya_vm_runtime_sys_bytes
+# TYPE consul_satya_vm_runtime_sys_bytes gauge
+consul_satya_vm_runtime_sys_bytes 7.3614344e+07
+# HELP consul_satya_vm_runtime_total_gc_pause_ns consul_satya_vm_runtime_total_gc_pause_ns
+# TYPE consul_satya_vm_runtime_total_gc_pause_ns gauge
+consul_satya_vm_runtime_total_gc_pause_ns 1.565053e+07
+# HELP consul_satya_vm_runtime_total_gc_runs consul_satya_vm_runtime_total_gc_runs
+# TYPE consul_satya_vm_runtime_total_gc_runs gauge
+consul_satya_vm_runtime_total_gc_runs 42
+# HELP consul_satya_vm_server_isLeader consul_satya_vm_server_isLeader
+# TYPE consul_satya_vm_server_isLeader gauge
+consul_satya_vm_server_isLeader 1
+# HELP consul_satya_vm_session_ttl_active consul_satya_vm_session_ttl_active
+# TYPE consul_satya_vm_session_ttl_active gauge
+consul_satya_vm_session_ttl_active 0
+# HELP consul_satya_vm_version consul_satya_vm_version
+# TYPE consul_satya_vm_version gauge
+consul_satya_vm_version{pre_release="",version="1.13.2"} 1
+# HELP consul_serf_coordinate_adjustment_ms consul_serf_coordinate_adjustment_ms
+# TYPE consul_serf_coordinate_adjustment_ms summary
+consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.5"} 0.18447500467300415
+consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.9"} 0.23558799922466278
+consul_serf_coordinate_adjustment_ms{network="lan",quantile="0.99"} 0.3543170094490051
+consul_serf_coordinate_adjustment_ms_sum{network="lan"} 127.64726796071045
+consul_serf_coordinate_adjustment_ms_count{network="lan"} 559
+consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.5"} 0.11145199835300446
+consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.9"} 0.12108899652957916
+consul_serf_coordinate_adjustment_ms{network="wan",quantile="0.99"} 0.12108899652957916
+consul_serf_coordinate_adjustment_ms_sum{network="wan"} 29.19709792546928
+consul_serf_coordinate_adjustment_ms_count{network="wan"} 112
+# HELP consul_serf_events consul_serf_events
+# TYPE consul_serf_events counter
+consul_serf_events{network="lan"} 2
+# HELP consul_serf_events_consul:new_leader consul_serf_events_consul:new_leader
+# TYPE consul_serf_events_consul:new_leader counter
+consul_serf_events_consul:new_leader{network="lan"} 2
+# HELP consul_serf_member_join consul_serf_member_join
+# TYPE consul_serf_member_join counter
+consul_serf_member_join{network="lan"} 5
+consul_serf_member_join{network="wan"} 5
+# HELP consul_serf_member_left consul_serf_member_left
+# TYPE consul_serf_member_left counter
+consul_serf_member_left{network="lan"} 2
+consul_serf_member_left{network="wan"} 2
+# HELP consul_serf_msgs_received consul_serf_msgs_received
+# TYPE consul_serf_msgs_received summary
+consul_serf_msgs_received{network="lan",quantile="0.5"} NaN
+consul_serf_msgs_received{network="lan",quantile="0.9"} NaN
+consul_serf_msgs_received{network="lan",quantile="0.99"} NaN
+consul_serf_msgs_received_sum{network="lan"} 1066
+consul_serf_msgs_received_count{network="lan"} 33
+consul_serf_msgs_received{network="wan",quantile="0.5"} NaN
+consul_serf_msgs_received{network="wan",quantile="0.9"} NaN
+consul_serf_msgs_received{network="wan",quantile="0.99"} NaN
+consul_serf_msgs_received_sum{network="wan"} 909
+consul_serf_msgs_received_count{network="wan"} 23
+# HELP consul_serf_msgs_sent consul_serf_msgs_sent
+# TYPE consul_serf_msgs_sent summary
+consul_serf_msgs_sent{network="lan",quantile="0.5"} NaN
+consul_serf_msgs_sent{network="lan",quantile="0.9"} NaN
+consul_serf_msgs_sent{network="lan",quantile="0.99"} NaN
+consul_serf_msgs_sent_sum{network="lan"} 1204
+consul_serf_msgs_sent_count{network="lan"} 36
+consul_serf_msgs_sent{network="wan",quantile="0.5"} NaN
+consul_serf_msgs_sent{network="wan",quantile="0.9"} NaN
+consul_serf_msgs_sent{network="wan",quantile="0.99"} NaN
+consul_serf_msgs_sent_sum{network="wan"} 792
+consul_serf_msgs_sent_count{network="wan"} 20
+# HELP consul_serf_queue_Event consul_serf_queue_Event
+# TYPE consul_serf_queue_Event summary
+consul_serf_queue_Event{network="lan",quantile="0.5"} NaN
+consul_serf_queue_Event{network="lan",quantile="0.9"} NaN
+consul_serf_queue_Event{network="lan",quantile="0.99"} NaN
+consul_serf_queue_Event_sum{network="lan"} 0
+consul_serf_queue_Event_count{network="lan"} 19
+consul_serf_queue_Event{network="wan",quantile="0.5"} NaN
+consul_serf_queue_Event{network="wan",quantile="0.9"} NaN
+consul_serf_queue_Event{network="wan",quantile="0.99"} NaN
+consul_serf_queue_Event_sum{network="wan"} 0
+consul_serf_queue_Event_count{network="wan"} 19
+# HELP consul_serf_queue_Intent consul_serf_queue_Intent
+# TYPE consul_serf_queue_Intent summary
+consul_serf_queue_Intent{network="lan",quantile="0.5"} NaN
+consul_serf_queue_Intent{network="lan",quantile="0.9"} NaN
+consul_serf_queue_Intent{network="lan",quantile="0.99"} NaN
+consul_serf_queue_Intent_sum{network="lan"} 0
+consul_serf_queue_Intent_count{network="lan"} 19
+consul_serf_queue_Intent{network="wan",quantile="0.5"} NaN
+consul_serf_queue_Intent{network="wan",quantile="0.9"} NaN
+consul_serf_queue_Intent{network="wan",quantile="0.99"} NaN
+consul_serf_queue_Intent_sum{network="wan"} 1
+consul_serf_queue_Intent_count{network="wan"} 19
+# HELP consul_serf_queue_Query consul_serf_queue_Query
+# TYPE consul_serf_queue_Query summary
+consul_serf_queue_Query{network="lan",quantile="0.5"} NaN
+consul_serf_queue_Query{network="lan",quantile="0.9"} NaN
+consul_serf_queue_Query{network="lan",quantile="0.99"} NaN
+consul_serf_queue_Query_sum{network="lan"} 0
+consul_serf_queue_Query_count{network="lan"} 19
+consul_serf_queue_Query{network="wan",quantile="0.5"} NaN
+consul_serf_queue_Query{network="wan",quantile="0.9"} NaN
+consul_serf_queue_Query{network="wan",quantile="0.99"} NaN
+consul_serf_queue_Query_sum{network="wan"} 0
+consul_serf_queue_Query_count{network="wan"} 19
+# HELP consul_serf_snapshot_appendLine consul_serf_snapshot_appendLine
+# TYPE consul_serf_snapshot_appendLine summary
+consul_serf_snapshot_appendLine{network="lan",quantile="0.5"} NaN
+consul_serf_snapshot_appendLine{network="lan",quantile="0.9"} NaN
+consul_serf_snapshot_appendLine{network="lan",quantile="0.99"} NaN
+consul_serf_snapshot_appendLine_sum{network="lan"} 0.3810300036566332
+consul_serf_snapshot_appendLine_count{network="lan"} 15
+consul_serf_snapshot_appendLine{network="wan",quantile="0.5"} NaN
+consul_serf_snapshot_appendLine{network="wan",quantile="0.9"} NaN
+consul_serf_snapshot_appendLine{network="wan",quantile="0.99"} NaN
+consul_serf_snapshot_appendLine_sum{network="wan"} 0.3907299981219694
+consul_serf_snapshot_appendLine_count{network="wan"} 13
+# HELP consul_server_isLeader Tracks if the server is a leader.
+# TYPE consul_server_isLeader gauge
+consul_server_isLeader 0
+# HELP consul_session_apply Measures the time spent applying a session update.
+# TYPE consul_session_apply summary
+consul_session_apply{quantile="0.5"} NaN
+consul_session_apply{quantile="0.9"} NaN
+consul_session_apply{quantile="0.99"} NaN
+consul_session_apply_sum 0
+consul_session_apply_count 0
+# HELP consul_session_renew Measures the time spent renewing a session.
+# TYPE consul_session_renew summary
+consul_session_renew{quantile="0.5"} NaN
+consul_session_renew{quantile="0.9"} NaN
+consul_session_renew{quantile="0.99"} NaN
+consul_session_renew_sum 0
+consul_session_renew_count 0
+# HELP consul_session_ttl_active Tracks the active number of sessions being tracked.
+# TYPE consul_session_ttl_active gauge
+consul_session_ttl_active 0
+# HELP consul_session_ttl_invalidate Measures the time spent invalidating an expired session.
+# TYPE consul_session_ttl_invalidate summary
+consul_session_ttl_invalidate{quantile="0.5"} NaN
+consul_session_ttl_invalidate{quantile="0.9"} NaN
+consul_session_ttl_invalidate{quantile="0.99"} NaN
+consul_session_ttl_invalidate_sum 0
+consul_session_ttl_invalidate_count 0
+# HELP consul_txn_apply Measures the time spent applying a transaction operation.
+# TYPE consul_txn_apply summary
+consul_txn_apply{quantile="0.5"} NaN
+consul_txn_apply{quantile="0.9"} NaN
+consul_txn_apply{quantile="0.99"} NaN
+consul_txn_apply_sum 0
+consul_txn_apply_count 0
+# HELP consul_txn_read Measures the time spent returning a read transaction.
+# TYPE consul_txn_read summary
+consul_txn_read{quantile="0.5"} NaN
+consul_txn_read{quantile="0.9"} NaN
+consul_txn_read{quantile="0.99"} NaN
+consul_txn_read_sum 0
+consul_txn_read_count 0
+# HELP consul_version Represents the Consul version.
+# TYPE consul_version gauge
+consul_version 0
+# HELP consul_xds_server_streams Measures the number of active xDS streams handled by the server split by protocol version.
+# TYPE consul_xds_server_streams gauge
+consul_xds_server_streams 0
+# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 5.3269e-05
+go_gc_duration_seconds{quantile="0.25"} 0.000130599
+go_gc_duration_seconds{quantile="0.5"} 0.000271028
+go_gc_duration_seconds{quantile="0.75"} 0.000362027
+go_gc_duration_seconds{quantile="1"} 0.002227924
+go_gc_duration_seconds_sum 0.01565053
+go_gc_duration_seconds_count 42
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 130
+# HELP go_info Information about the Go environment.
+# TYPE go_info gauge
+go_info{version="go1.18.1"} 1
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 3.2922384e+07
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 7.39548784e+08
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.625099e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 8.260339e+06
+# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
+# TYPE go_memstats_gc_cpu_fraction gauge
+go_memstats_gc_cpu_fraction 7.265691723511656e-05
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 6.583e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 3.2922384e+07
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 2.3904256e+07
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 3.72736e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 122074
+# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes gauge
+go_memstats_heap_released_bytes 1.6113664e+07
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 6.1177856e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.6713887082058973e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 8.382413e+06
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 9600
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 15600
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 375768
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 603840
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 4.5858448e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 1.872245e+06
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 1.736704e+06
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 1.736704e+06
+# HELP go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 7.3614344e+07
+# HELP go_threads Number of OS threads created.
+# TYPE go_threads gauge
+go_threads 14
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 20.7
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 1024
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 33
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 1.22032128e+08
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.67138812259e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 8.48359424e+08
+# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
+# TYPE process_virtual_memory_max_bytes gauge
+process_virtual_memory_max_bytes -1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self.json b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self.json
new file mode 100644
index 000000000..0b11cda53
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self.json
@@ -0,0 +1,50 @@
+{
+ "Config": {
+ "Datacenter": "us-central",
+ "PrimaryDatacenter": "us-central",
+ "NodeName": "satya-vm",
+ "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d",
+ "Revision": "0e046bbb",
+ "Server": true,
+ "Version": "1.13.2",
+ "BuildDate": "2022-09-20T20:30:07Z"
+ },
+ "DebugConfig": {
+ "Telemetry": {
+ "AllowedPrefixes": [],
+ "BlockedPrefixes": [
+ "consul.rpc.server.call"
+ ],
+ "CirconusAPIApp": "",
+ "CirconusAPIToken": "hidden",
+ "CirconusAPIURL": "",
+ "CirconusBrokerID": "",
+ "CirconusBrokerSelectTag": "",
+ "CirconusCheckDisplayName": "",
+ "CirconusCheckForceMetricActivation": "",
+ "CirconusCheckID": "",
+ "CirconusCheckInstanceID": "",
+ "CirconusCheckSearchTag": "",
+ "CirconusCheckTags": "",
+ "CirconusSubmissionInterval": "",
+ "CirconusSubmissionURL": "",
+ "Disable": false,
+ "DisableHostname": false,
+ "DogstatsdAddr": "",
+ "DogstatsdTags": [],
+ "FilterDefault": true,
+ "MetricsPrefix": "consul",
+ "PrometheusOpts": {
+ "CounterDefinitions": [],
+ "Expiration": "2m0s",
+ "GaugeDefinitions": [],
+ "Name": "consul",
+ "Registerer": null,
+ "SummaryDefinitions": []
+ },
+ "RetryFailedConfiguration": true,
+ "StatsdAddr": "",
+ "StatsiteAddr": ""
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_cloud-managed.json b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_cloud-managed.json
new file mode 100644
index 000000000..0b11cda53
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_cloud-managed.json
@@ -0,0 +1,50 @@
+{
+ "Config": {
+ "Datacenter": "us-central",
+ "PrimaryDatacenter": "us-central",
+ "NodeName": "satya-vm",
+ "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d",
+ "Revision": "0e046bbb",
+ "Server": true,
+ "Version": "1.13.2",
+ "BuildDate": "2022-09-20T20:30:07Z"
+ },
+ "DebugConfig": {
+ "Telemetry": {
+ "AllowedPrefixes": [],
+ "BlockedPrefixes": [
+ "consul.rpc.server.call"
+ ],
+ "CirconusAPIApp": "",
+ "CirconusAPIToken": "hidden",
+ "CirconusAPIURL": "",
+ "CirconusBrokerID": "",
+ "CirconusBrokerSelectTag": "",
+ "CirconusCheckDisplayName": "",
+ "CirconusCheckForceMetricActivation": "",
+ "CirconusCheckID": "",
+ "CirconusCheckInstanceID": "",
+ "CirconusCheckSearchTag": "",
+ "CirconusCheckTags": "",
+ "CirconusSubmissionInterval": "",
+ "CirconusSubmissionURL": "",
+ "Disable": false,
+ "DisableHostname": false,
+ "DogstatsdAddr": "",
+ "DogstatsdTags": [],
+ "FilterDefault": true,
+ "MetricsPrefix": "consul",
+ "PrometheusOpts": {
+ "CounterDefinitions": [],
+ "Expiration": "2m0s",
+ "GaugeDefinitions": [],
+ "Name": "consul",
+ "Registerer": null,
+ "SummaryDefinitions": []
+ },
+ "RetryFailedConfiguration": true,
+ "StatsdAddr": "",
+ "StatsiteAddr": ""
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_disabled_prom.json b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_disabled_prom.json
new file mode 100644
index 000000000..c964d10fe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_disabled_prom.json
@@ -0,0 +1,50 @@
+{
+ "Config": {
+ "Datacenter": "us-central",
+ "PrimaryDatacenter": "us-central",
+ "NodeName": "satya-vm",
+ "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d",
+ "Revision": "0e046bbb",
+ "Server": true,
+ "Version": "1.13.2",
+ "BuildDate": "2022-09-20T20:30:07Z"
+ },
+ "DebugConfig": {
+ "Telemetry": {
+ "AllowedPrefixes": [],
+ "BlockedPrefixes": [
+ "consul.rpc.server.call"
+ ],
+ "CirconusAPIApp": "",
+ "CirconusAPIToken": "hidden",
+ "CirconusAPIURL": "",
+ "CirconusBrokerID": "",
+ "CirconusBrokerSelectTag": "",
+ "CirconusCheckDisplayName": "",
+ "CirconusCheckForceMetricActivation": "",
+ "CirconusCheckID": "",
+ "CirconusCheckInstanceID": "",
+ "CirconusCheckSearchTag": "",
+ "CirconusCheckTags": "",
+ "CirconusSubmissionInterval": "",
+ "CirconusSubmissionURL": "",
+ "Disable": false,
+ "DisableHostname": false,
+ "DogstatsdAddr": "",
+ "DogstatsdTags": [],
+ "FilterDefault": true,
+ "MetricsPrefix": "consul",
+ "PrometheusOpts": {
+ "CounterDefinitions": [],
+ "Expiration": "0s",
+ "GaugeDefinitions": [],
+ "Name": "consul",
+ "Registerer": null,
+ "SummaryDefinitions": []
+ },
+ "RetryFailedConfiguration": true,
+ "StatsdAddr": "",
+ "StatsiteAddr": ""
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_with_hostname.json b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_with_hostname.json
new file mode 100644
index 000000000..dfe37bcc0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-agent-self_with_hostname.json
@@ -0,0 +1,50 @@
+{
+ "Config": {
+ "Datacenter": "us-central",
+ "PrimaryDatacenter": "us-central",
+ "NodeName": "satya-vm",
+ "NodeID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d",
+ "Revision": "0e046bbb",
+ "Server": false,
+ "Version": "1.13.2",
+ "BuildDate": "2022-09-20T20:30:07Z"
+ },
+ "DebugConfig": {
+ "Telemetry": {
+ "AllowedPrefixes": [],
+ "BlockedPrefixes": [
+ "consul.rpc.server.call"
+ ],
+ "CirconusAPIApp": "",
+ "CirconusAPIToken": "hidden",
+ "CirconusAPIURL": "",
+ "CirconusBrokerID": "",
+ "CirconusBrokerSelectTag": "",
+ "CirconusCheckDisplayName": "",
+ "CirconusCheckForceMetricActivation": "",
+ "CirconusCheckID": "",
+ "CirconusCheckInstanceID": "",
+ "CirconusCheckSearchTag": "",
+ "CirconusCheckTags": "",
+ "CirconusSubmissionInterval": "",
+ "CirconusSubmissionURL": "",
+ "Disable": false,
+ "DisableHostname": false,
+ "DogstatsdAddr": "",
+ "DogstatsdTags": [],
+ "FilterDefault": true,
+ "MetricsPrefix": "consul",
+ "PrometheusOpts": {
+ "CounterDefinitions": [],
+ "Expiration": "10m0s",
+ "GaugeDefinitions": [],
+ "Name": "consul",
+ "Registerer": null,
+ "SummaryDefinitions": []
+ },
+ "RetryFailedConfiguration": true,
+ "StatsdAddr": "",
+ "StatsiteAddr": ""
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-coordinate-nodes.json b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-coordinate-nodes.json
new file mode 100644
index 000000000..8f3f63839
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-coordinate-nodes.json
@@ -0,0 +1,59 @@
+[
+ {
+ "Node": "satya-vm",
+ "Segment": "",
+ "Coord": {
+ "Vec": [
+ 0.014829503547751722,
+ 0.0072173849395880596,
+ 0.004329474334739038,
+ -0.0032798752739064438,
+ -0.010134170963372591,
+ -0.008257638503292454,
+ 0.00752142875530981,
+ 0.0017901665053347217
+ ],
+ "Error": 0.493977389081921,
+ "Adjustment": 0.00017401717315766792,
+ "Height": 2.8272088782225915e-05
+ }
+ },
+ {
+ "Node": "satya-vm2",
+ "Segment": "",
+ "Coord": {
+ "Vec": [
+ 0.01485399579339927,
+ 0.007233318963330601,
+ 0.004314864811042585,
+ -0.0032764668107421653,
+ -0.010133938771787391,
+ -0.008238915750721635,
+ 0.0075168683512753035,
+ 0.001776534386752108
+ ],
+ "Error": 0.3003366063730667,
+ "Adjustment": 0.00019935098724887628,
+ "Height": 4.192904954404545e-05
+ }
+ },
+ {
+ "Node": "satya-vm3",
+ "Segment": "",
+ "Coord": {
+ "Vec": [
+ 0.014782092899311995,
+ 0.007186516660508205,
+ 0.004357885422476095,
+ -0.003286526239099157,
+ -0.010134722455521066,
+ -0.008294075475167818,
+ 0.007530358624901773,
+ 0.0018166544975743123
+ ],
+ "Error": 0.12048664650994341,
+ "Adjustment": 0.00014477073973997567,
+ "Height": 0.0005656138448826895
+ }
+ }
+]
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-operator-autopilot-health.json b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-operator-autopilot-health.json
new file mode 100644
index 000000000..4acee01ec
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/server_v1-operator-autopilot-health.json
@@ -0,0 +1,48 @@
+{
+ "Healthy": true,
+ "FailureTolerance": 1,
+ "Servers": [
+ {
+ "ID": "72849161-41cb-14df-fc9b-563ddff3bae7",
+ "Name": "satya-vm3",
+ "Address": "10.10.30.119:8300",
+ "SerfStatus": "alive",
+ "Version": "1.13.2",
+ "Leader": false,
+ "LastContact": "54.653679ms",
+ "LastTerm": 29,
+ "LastIndex": 486777,
+ "Healthy": true,
+ "Voter": true,
+ "StableSince": "2022-12-21T13:53:42Z"
+ },
+ {
+ "ID": "3e75e0af-859b-83e8-779f-f3a6d12f02ae",
+ "Name": "satya-vm2",
+ "Address": "10.10.30.176:8300",
+ "SerfStatus": "alive",
+ "Version": "1.13.2",
+ "Leader": true,
+ "LastContact": "0ms",
+ "LastTerm": 29,
+ "LastIndex": 486777,
+ "Healthy": true,
+ "Voter": true,
+ "StableSince": "2022-12-21T13:53:46Z"
+ },
+ {
+ "ID": "d86b8af4-5dc5-d790-7c32-420d4ac1dd8d",
+ "Name": "satya-vm",
+ "Address": "10.10.30.177:8300",
+ "SerfStatus": "alive",
+ "Version": "1.13.2",
+ "Leader": false,
+ "LastContact": "13.211617ms",
+ "LastTerm": 29,
+ "LastIndex": 486777,
+ "Healthy": true,
+ "Voter": true,
+ "StableSince": "2022-12-20T09:55:28Z"
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/v1-agent-checks.json b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/v1-agent-checks.json
new file mode 100644
index 000000000..b8967cb74
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.13.2/v1-agent-checks.json
@@ -0,0 +1,68 @@
+{
+ "chk1": {
+ "Node": "mysql1",
+ "CheckID": "chk1",
+ "Name": "ssh",
+ "Status": "passing",
+ "Notes": "",
+ "Output": "TCP connect 127.0.0.1:22: Success",
+ "ServiceID": "",
+ "ServiceName": "",
+ "ServiceTags": [
+ ],
+ "Definition": {
+ },
+ "CreateIndex": 0,
+ "ModifyIndex": 0
+ },
+ "chk2": {
+ "Node": "mysql1",
+ "CheckID": "chk2",
+ "Name": "telnet",
+ "Status": "critical",
+ "Notes": "",
+ "Output": "dial tcp 127.0.0.1:23: connect: connection refused",
+ "ServiceID": "",
+ "ServiceName": "",
+ "ServiceTags": [
+ ],
+ "Definition": {
+ },
+ "CreateIndex": 0,
+ "ModifyIndex": 0
+ },
+ "chk3": {
+ "Node": "mysql1",
+ "CheckID": "chk3",
+ "Name": "telnet",
+ "Status": "critical",
+ "Notes": "",
+ "Output": "dial tcp 127.0.0.1:23: connect: connection refused",
+ "ServiceID": "",
+ "ServiceName": "",
+ "ServiceTags": [
+ ],
+ "Definition": {
+ },
+ "CreateIndex": 0,
+ "ModifyIndex": 0
+ },
+ "mysql": {
+ "Node": "mysql1",
+ "CheckID": "mysql",
+ "Name": "MYSQL TCP on port 3336",
+ "Status": "critical",
+ "Notes": "",
+ "Output": "dial tcp 127.0.0.1:3336: connect: connection refused",
+ "ServiceID": "mysql0",
+ "ServiceName": "mysql",
+ "ServiceTags": [
+ "primary",
+ "secondary"
+ ],
+ "Definition": {
+ },
+ "CreateIndex": 0,
+ "ModifyIndex": 0
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-metrics.txt b/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-metrics.txt
new file mode 100644
index 000000000..094f03508
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-metrics.txt
@@ -0,0 +1,1502 @@
+# HELP consul_acl_ResolveToken This measures the time it takes to resolve an ACL token.
+# TYPE consul_acl_ResolveToken summary
+consul_acl_ResolveToken{quantile="0.5"} 0.05904199928045273
+consul_acl_ResolveToken{quantile="0.9"} 0.1010729968547821
+consul_acl_ResolveToken{quantile="0.99"} 0.18903599679470062
+consul_acl_ResolveToken_sum 59019.61223328998
+consul_acl_ResolveToken_count 863476
+# HELP consul_acl_authmethod_delete
+# TYPE consul_acl_authmethod_delete summary
+consul_acl_authmethod_delete{quantile="0.5"} NaN
+consul_acl_authmethod_delete{quantile="0.9"} NaN
+consul_acl_authmethod_delete{quantile="0.99"} NaN
+consul_acl_authmethod_delete_sum 0
+consul_acl_authmethod_delete_count 0
+# HELP consul_acl_authmethod_upsert
+# TYPE consul_acl_authmethod_upsert summary
+consul_acl_authmethod_upsert{quantile="0.5"} NaN
+consul_acl_authmethod_upsert{quantile="0.9"} NaN
+consul_acl_authmethod_upsert{quantile="0.99"} NaN
+consul_acl_authmethod_upsert_sum 0
+consul_acl_authmethod_upsert_count 0
+# HELP consul_acl_bindingrule_delete
+# TYPE consul_acl_bindingrule_delete summary
+consul_acl_bindingrule_delete{quantile="0.5"} NaN
+consul_acl_bindingrule_delete{quantile="0.9"} NaN
+consul_acl_bindingrule_delete{quantile="0.99"} NaN
+consul_acl_bindingrule_delete_sum 0
+consul_acl_bindingrule_delete_count 0
+# HELP consul_acl_bindingrule_upsert
+# TYPE consul_acl_bindingrule_upsert summary
+consul_acl_bindingrule_upsert{quantile="0.5"} NaN
+consul_acl_bindingrule_upsert{quantile="0.9"} NaN
+consul_acl_bindingrule_upsert{quantile="0.99"} NaN
+consul_acl_bindingrule_upsert_sum 0
+consul_acl_bindingrule_upsert_count 0
+# HELP consul_acl_blocked_check_deregistration Increments whenever a deregistration fails for a check (blocked by an ACL)
+# TYPE consul_acl_blocked_check_deregistration counter
+consul_acl_blocked_check_deregistration 0
+# HELP consul_acl_blocked_check_registration Increments whenever a registration fails for a check (blocked by an ACL)
+# TYPE consul_acl_blocked_check_registration counter
+consul_acl_blocked_check_registration 0
+# HELP consul_acl_blocked_node_registration Increments whenever a registration fails for a node (blocked by an ACL)
+# TYPE consul_acl_blocked_node_registration counter
+consul_acl_blocked_node_registration 0
+# HELP consul_acl_blocked_service_deregistration Increments whenever a deregistration fails for a service (blocked by an ACL)
+# TYPE consul_acl_blocked_service_deregistration counter
+consul_acl_blocked_service_deregistration 0
+# HELP consul_acl_blocked_service_registration Increments whenever a registration fails for a service (blocked by an ACL)
+# TYPE consul_acl_blocked_service_registration counter
+consul_acl_blocked_service_registration 0
+# HELP consul_acl_login
+# TYPE consul_acl_login summary
+consul_acl_login{quantile="0.5"} NaN
+consul_acl_login{quantile="0.9"} NaN
+consul_acl_login{quantile="0.99"} NaN
+consul_acl_login_sum 0
+consul_acl_login_count 0
+# HELP consul_acl_logout
+# TYPE consul_acl_logout summary
+consul_acl_logout{quantile="0.5"} NaN
+consul_acl_logout{quantile="0.9"} NaN
+consul_acl_logout{quantile="0.99"} NaN
+consul_acl_logout_sum 0
+consul_acl_logout_count 0
+# HELP consul_acl_policy_delete
+# TYPE consul_acl_policy_delete summary
+consul_acl_policy_delete{quantile="0.5"} NaN
+consul_acl_policy_delete{quantile="0.9"} NaN
+consul_acl_policy_delete{quantile="0.99"} NaN
+consul_acl_policy_delete_sum 2.2944839000701904
+consul_acl_policy_delete_count 1
+# HELP consul_acl_policy_upsert
+# TYPE consul_acl_policy_upsert summary
+consul_acl_policy_upsert{quantile="0.5"} NaN
+consul_acl_policy_upsert{quantile="0.9"} NaN
+consul_acl_policy_upsert{quantile="0.99"} NaN
+consul_acl_policy_upsert_sum 173.05634947121143
+consul_acl_policy_upsert_count 11
+# HELP consul_acl_role_delete
+# TYPE consul_acl_role_delete summary
+consul_acl_role_delete{quantile="0.5"} NaN
+consul_acl_role_delete{quantile="0.9"} NaN
+consul_acl_role_delete{quantile="0.99"} NaN
+consul_acl_role_delete_sum 0
+consul_acl_role_delete_count 0
+# HELP consul_acl_role_upsert
+# TYPE consul_acl_role_upsert summary
+consul_acl_role_upsert{quantile="0.5"} NaN
+consul_acl_role_upsert{quantile="0.9"} NaN
+consul_acl_role_upsert{quantile="0.99"} NaN
+consul_acl_role_upsert_sum 0
+consul_acl_role_upsert_count 0
+# HELP consul_acl_token_cache_hit Increments if Consul is able to resolve a token's identity, or a legacy token, from the cache.
+# TYPE consul_acl_token_cache_hit counter
+consul_acl_token_cache_hit 0
+# HELP consul_acl_token_cache_miss Increments if Consul cannot resolve a token's identity, or a legacy token, from the cache.
+# TYPE consul_acl_token_cache_miss counter
+consul_acl_token_cache_miss 0
+# HELP consul_acl_token_clone
+# TYPE consul_acl_token_clone summary
+consul_acl_token_clone{quantile="0.5"} NaN
+consul_acl_token_clone{quantile="0.9"} NaN
+consul_acl_token_clone{quantile="0.99"} NaN
+consul_acl_token_clone_sum 0
+consul_acl_token_clone_count 0
+# HELP consul_acl_token_delete
+# TYPE consul_acl_token_delete summary
+consul_acl_token_delete{quantile="0.5"} NaN
+consul_acl_token_delete{quantile="0.9"} NaN
+consul_acl_token_delete{quantile="0.99"} NaN
+consul_acl_token_delete_sum 35.43468403816223
+consul_acl_token_delete_count 12
+# HELP consul_acl_token_upsert
+# TYPE consul_acl_token_upsert summary
+consul_acl_token_upsert{quantile="0.5"} NaN
+consul_acl_token_upsert{quantile="0.9"} NaN
+consul_acl_token_upsert{quantile="0.99"} NaN
+consul_acl_token_upsert_sum 33.15468955039978
+consul_acl_token_upsert_count 9
+# HELP consul_agent_event consul_agent_event
+# TYPE consul_agent_event counter
+consul_agent_event 793609
+# HELP consul_agent_tls_cert_expiry Seconds until the agent tls certificate expires. Updated every hour
+# TYPE consul_agent_tls_cert_expiry gauge
+consul_agent_tls_cert_expiry 0
+# HELP consul_agent_write_event consul_agent_write_event
+# TYPE consul_agent_write_event summary
+consul_agent_write_event{quantile="0.5"} 0.012071000412106514
+consul_agent_write_event{quantile="0.9"} 0.03231099992990494
+consul_agent_write_event{quantile="0.99"} 0.038460999727249146
+consul_agent_write_event_sum 17825.32184328325
+consul_agent_write_event_count 793609
+# HELP consul_api_http Samples how long it takes to service the given HTTP request for the given verb and path.
+# TYPE consul_api_http summary
+consul_api_http{quantile="0.5"} NaN
+consul_api_http{quantile="0.9"} NaN
+consul_api_http{quantile="0.99"} NaN
+consul_api_http_sum 0
+consul_api_http_count 0
+consul_api_http{method="GET",path="v1_acl_policy_",quantile="0.5"} NaN
+consul_api_http{method="GET",path="v1_acl_policy_",quantile="0.9"} NaN
+consul_api_http{method="GET",path="v1_acl_policy_",quantile="0.99"} NaN
+consul_api_http_sum{method="GET",path="v1_acl_policy_"} 0.3439910039305687
+consul_api_http_count{method="GET",path="v1_acl_policy_"} 2
+consul_api_http{method="GET",path="v1_acl_policy_name_",quantile="0.5"} NaN
+consul_api_http{method="GET",path="v1_acl_policy_name_",quantile="0.9"} NaN
+consul_api_http{method="GET",path="v1_acl_policy_name_",quantile="0.99"} NaN
+consul_api_http_sum{method="GET",path="v1_acl_policy_name_"} 0.2537579983472824
+consul_api_http_count{method="GET",path="v1_acl_policy_name_"} 2
+consul_api_http{method="GET",path="v1_acl_token_",quantile="0.5"} NaN
+consul_api_http{method="GET",path="v1_acl_token_",quantile="0.9"} NaN
+consul_api_http{method="GET",path="v1_acl_token_",quantile="0.99"} NaN
+consul_api_http_sum{method="GET",path="v1_acl_token_"} 292.9099607616663
+consul_api_http_count{method="GET",path="v1_acl_token_"} 1447
+consul_api_http{method="GET",path="v1_agent_members",quantile="0.5"} NaN
+consul_api_http{method="GET",path="v1_agent_members",quantile="0.9"} NaN
+consul_api_http{method="GET",path="v1_agent_members",quantile="0.99"} NaN
+consul_api_http_sum{method="GET",path="v1_agent_members"} 1504.3780329823494
+consul_api_http_count{method="GET",path="v1_agent_members"} 15059
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.5"} NaN
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.9"} NaN
+consul_api_http{method="GET",path="v1_agent_metrics",quantile="0.99"} NaN
+consul_api_http_sum{method="GET",path="v1_agent_metrics"} 47773.76364764571
+consul_api_http_count{method="GET",path="v1_agent_metrics"} 10129
+consul_api_http{method="GET",path="v1_agent_self",quantile="0.5"} NaN
+consul_api_http{method="GET",path="v1_agent_self",quantile="0.9"} NaN
+consul_api_http{method="GET",path="v1_agent_self",quantile="0.99"} NaN
+consul_api_http_sum{method="GET",path="v1_agent_self"} 9246.783903598785
+consul_api_http_count{method="GET",path="v1_agent_self"} 7567
+consul_api_http{method="GET",path="v1_catalog_node-services_",quantile="0.5"} 0.8214660286903381
+consul_api_http{method="GET",path="v1_catalog_node-services_",quantile="0.9"} 1.1057649850845337
+consul_api_http{method="GET",path="v1_catalog_node-services_",quantile="0.99"} 1.1057649850845337
+consul_api_http_sum{method="GET",path="v1_catalog_node-services_"} 824.5040957331657
+consul_api_http_count{method="GET",path="v1_catalog_node-services_"} 1069
+consul_api_http{method="GET",path="v1_catalog_nodes",quantile="0.5"} NaN
+consul_api_http{method="GET",path="v1_catalog_nodes",quantile="0.9"} NaN
+consul_api_http{method="GET",path="v1_catalog_nodes",quantile="0.99"} NaN
+consul_api_http_sum{method="GET",path="v1_catalog_nodes"} 0.37226200103759766
+consul_api_http_count{method="GET",path="v1_catalog_nodes"} 1
+consul_api_http{method="GET",path="v1_catalog_service_",quantile="0.5"} 0.538116991519928
+consul_api_http{method="GET",path="v1_catalog_service_",quantile="0.9"} 0.6367400288581848
+consul_api_http{method="GET",path="v1_catalog_service_",quantile="0.99"} 0.6367400288581848
+consul_api_http_sum{method="GET",path="v1_catalog_service_"} 43381.559261500835
+consul_api_http_count{method="GET",path="v1_catalog_service_"} 75066
+consul_api_http{method="GET",path="v1_internal_ui_catalog-overview",quantile="0.5"} 0.2639490067958832
+consul_api_http{method="GET",path="v1_internal_ui_catalog-overview",quantile="0.9"} 0.2639490067958832
+consul_api_http{method="GET",path="v1_internal_ui_catalog-overview",quantile="0.99"} 0.2639490067958832
+consul_api_http_sum{method="GET",path="v1_internal_ui_catalog-overview"} 3496.612477712333
+consul_api_http_count{method="GET",path="v1_internal_ui_catalog-overview"} 14553
+consul_api_http{method="GET",path="v1_namespace_",quantile="0.5"} 0.14019399881362915
+consul_api_http{method="GET",path="v1_namespace_",quantile="0.9"} 0.29843899607658386
+consul_api_http{method="GET",path="v1_namespace_",quantile="0.99"} 0.29843899607658386
+consul_api_http_sum{method="GET",path="v1_namespace_"} 6329.847745008767
+consul_api_http_count{method="GET",path="v1_namespace_"} 30022
+consul_api_http{method="GET",path="v1_operator_autopilot_health",quantile="0.5"} NaN
+consul_api_http{method="GET",path="v1_operator_autopilot_health",quantile="0.9"} NaN
+consul_api_http{method="GET",path="v1_operator_autopilot_health",quantile="0.99"} NaN
+consul_api_http_sum{method="GET",path="v1_operator_autopilot_health"} 1326.0989246219397
+consul_api_http_count{method="GET",path="v1_operator_autopilot_health"} 7747
+consul_api_http{method="GET",path="v1_partitions",quantile="0.5"} NaN
+consul_api_http{method="GET",path="v1_partitions",quantile="0.9"} NaN
+consul_api_http{method="GET",path="v1_partitions",quantile="0.99"} NaN
+consul_api_http_sum{method="GET",path="v1_partitions"} 3190.110695719719
+consul_api_http_count{method="GET",path="v1_partitions"} 4136
+consul_api_http{method="GET",path="v1_status_leader",quantile="0.5"} 0.07637300342321396
+consul_api_http{method="GET",path="v1_status_leader",quantile="0.9"} 0.07637300342321396
+consul_api_http{method="GET",path="v1_status_leader",quantile="0.99"} 0.07637300342321396
+consul_api_http_sum{method="GET",path="v1_status_leader"} 4829.641642797738
+consul_api_http_count{method="GET",path="v1_status_leader"} 45620
+consul_api_http{method="PUT",path="v1_catalog_register",quantile="0.5"} 2.291783094406128
+consul_api_http{method="PUT",path="v1_catalog_register",quantile="0.9"} 2.9903249740600586
+consul_api_http{method="PUT",path="v1_catalog_register",quantile="0.99"} 2.9903249740600586
+consul_api_http_sum{method="PUT",path="v1_catalog_register"} 284584.19143879414
+consul_api_http_count{method="PUT",path="v1_catalog_register"} 90170
+# HELP consul_autopilot_failure_tolerance Tracks the number of voting servers that the cluster can lose while continuing to function.
+# TYPE consul_autopilot_failure_tolerance gauge
+consul_autopilot_failure_tolerance 0
+# HELP consul_autopilot_failure_tolerance_failure_tolerance consul_autopilot_failure_tolerance_failure_tolerance
+# TYPE consul_autopilot_failure_tolerance_failure_tolerance gauge
+consul_autopilot_failure_tolerance_failure_tolerance 0
+# HELP consul_autopilot_healthy Tracks the overall health of the local server cluster. 1 if all servers are healthy, 0 if one or more are unhealthy.
+# TYPE consul_autopilot_healthy gauge
+consul_autopilot_healthy 0
+# HELP consul_autopilot_healthy_healthy consul_autopilot_healthy_healthy
+# TYPE consul_autopilot_healthy_healthy gauge
+consul_autopilot_healthy_healthy 1
+# HELP consul_cache_bypass Counts how many times a request bypassed the cache because no cache-key was provided.
+# TYPE consul_cache_bypass counter
+consul_cache_bypass 0
+# HELP consul_cache_connect_ca_leaf_fetch_success consul_cache_connect_ca_leaf_fetch_success
+# TYPE consul_cache_connect_ca_leaf_fetch_success counter
+consul_cache_connect_ca_leaf_fetch_success{result_not_modified="false"} 2
+# HELP consul_cache_connect_ca_root_fetch_success consul_cache_connect_ca_root_fetch_success
+# TYPE consul_cache_connect_ca_root_fetch_success counter
+consul_cache_connect_ca_root_fetch_success{result_not_modified="false"} 271
+# HELP consul_cache_connect_ca_root_hit consul_cache_connect_ca_root_hit
+# TYPE consul_cache_connect_ca_root_hit counter
+consul_cache_connect_ca_root_hit 2
+# HELP consul_cache_entries_count Represents the number of entries in this cache.
+# TYPE consul_cache_entries_count gauge
+consul_cache_entries_count 0
+# HELP consul_cache_entries_count_entries_count consul_cache_entries_count_entries_count
+# TYPE consul_cache_entries_count_entries_count gauge
+consul_cache_entries_count_entries_count 30
+# HELP consul_cache_evict_expired Counts the number of expired entries that are evicted.
+# TYPE consul_cache_evict_expired counter
+consul_cache_evict_expired 1
+# HELP consul_cache_fetch_error Counts the number of failed fetches by the cache.
+# TYPE consul_cache_fetch_error counter
+consul_cache_fetch_error 0
+# HELP consul_cache_fetch_success Counts the number of successful fetches by the cache.
+# TYPE consul_cache_fetch_success counter
+consul_cache_fetch_success 0
+consul_cache_fetch_success{result_not_modified="false"} 1381
+# HELP consul_catalog_connect_not_found Increments for each connect-based catalog query where the given service could not be found.
+# TYPE consul_catalog_connect_not_found counter
+consul_catalog_connect_not_found 0
+# HELP consul_catalog_connect_query Increments for each connect-based catalog query for the given service.
+# TYPE consul_catalog_connect_query counter
+consul_catalog_connect_query 0
+# HELP consul_catalog_connect_query_tag Increments for each connect-based catalog query for the given service with the given tag.
+# TYPE consul_catalog_connect_query_tag counter
+consul_catalog_connect_query_tag 0
+# HELP consul_catalog_connect_query_tags Increments for each connect-based catalog query for the given service with the given tags.
+# TYPE consul_catalog_connect_query_tags counter
+consul_catalog_connect_query_tags 0
+# HELP consul_catalog_deregister Measures the time it takes to complete a catalog deregister operation.
+# TYPE consul_catalog_deregister summary
+consul_catalog_deregister{quantile="0.5"} NaN
+consul_catalog_deregister{quantile="0.9"} NaN
+consul_catalog_deregister{quantile="0.99"} NaN
+consul_catalog_deregister_sum 221.93704390525818
+consul_catalog_deregister_count 55
+# HELP consul_catalog_register Measures the time it takes to complete a catalog register operation.
+# TYPE consul_catalog_register summary
+consul_catalog_register{quantile="0.5"} 2.13044810295105
+consul_catalog_register{quantile="0.9"} 2.721796989440918
+consul_catalog_register{quantile="0.99"} 2.721796989440918
+consul_catalog_register_sum 265432.1276627779
+consul_catalog_register_count 90231
+# HELP consul_catalog_service_not_found Increments for each catalog query where the given service could not be found.
+# TYPE consul_catalog_service_not_found counter
+consul_catalog_service_not_found 0
+# HELP consul_catalog_service_query Increments for each catalog query for the given service.
+# TYPE consul_catalog_service_query counter
+consul_catalog_service_query 0
+consul_catalog_service_query{service="consul-connect-injector-consul"} 15004
+consul_catalog_service_query{service="consul-ingress-gateway-consul"} 15009
+consul_catalog_service_query{service="kubelet-default"} 15009
+consul_catalog_service_query{service="kubernetes-default"} 15016
+consul_catalog_service_query{service="netdata-default"} 15009
+# HELP consul_catalog_service_query_tag Increments for each catalog query for the given service with the given tag.
+# TYPE consul_catalog_service_query_tag counter
+consul_catalog_service_query_tag 0
+# HELP consul_catalog_service_query_tags Increments for each catalog query for the given service with the given tags.
+# TYPE consul_catalog_service_query_tags counter
+consul_catalog_service_query_tags 0
+consul_catalog_service_query_tags{service="consul-connect-injector-consul",tag="k8s"} 15003
+consul_catalog_service_query_tags{service="consul-ingress-gateway-consul",tag="k8s"} 15009
+consul_catalog_service_query_tags{service="kubelet-default",tag="k8s"} 15009
+consul_catalog_service_query_tags{service="kubernetes-default",tag="k8s"} 15014
+consul_catalog_service_query_tags{service="netdata-default",tag="k8s"} 15004
+# HELP consul_client_api_catalog_datacenters Increments whenever a Consul agent receives a request to list datacenters in the catalog.
+# TYPE consul_client_api_catalog_datacenters counter
+consul_client_api_catalog_datacenters 0
+# HELP consul_client_api_catalog_deregister Increments whenever a Consul agent receives a catalog deregister request.
+# TYPE consul_client_api_catalog_deregister counter
+consul_client_api_catalog_deregister 0
+# HELP consul_client_api_catalog_gateway_services Increments whenever a Consul agent receives a request to list services associated with a gateway.
+# TYPE consul_client_api_catalog_gateway_services counter
+consul_client_api_catalog_gateway_services 0
+# HELP consul_client_api_catalog_node_service_list Increments whenever a Consul agent receives a request to list a node's registered services.
+# TYPE consul_client_api_catalog_node_service_list counter
+consul_client_api_catalog_node_service_list 0
+consul_client_api_catalog_node_service_list{node="ip-172-25-37-57",partition="default"} 1069
+# HELP consul_client_api_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.
+# TYPE consul_client_api_catalog_node_services counter
+consul_client_api_catalog_node_services 0
+# HELP consul_client_api_catalog_nodes Increments whenever a Consul agent receives a request to list nodes from the catalog.
+# TYPE consul_client_api_catalog_nodes counter
+consul_client_api_catalog_nodes 0
+consul_client_api_catalog_nodes{node="ip-172-25-37-57",partition="default"} 1
+# HELP consul_client_api_catalog_register Increments whenever a Consul agent receives a catalog register request.
+# TYPE consul_client_api_catalog_register counter
+consul_client_api_catalog_register 0
+consul_client_api_catalog_register{node="ip-172-25-37-57",partition="default"} 90170
+# HELP consul_client_api_catalog_service_nodes Increments whenever a Consul agent receives a request to list nodes offering a service.
+# TYPE consul_client_api_catalog_service_nodes counter
+consul_client_api_catalog_service_nodes 0
+consul_client_api_catalog_service_nodes{node="ip-172-25-37-57",partition="default"} 75066
+# HELP consul_client_api_catalog_services Increments whenever a Consul agent receives a request to list services from the catalog.
+# TYPE consul_client_api_catalog_services counter
+consul_client_api_catalog_services 0
+# HELP consul_client_api_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for request to list nodes offering a service.
+# TYPE consul_client_api_error_catalog_service_nodes counter
+consul_client_api_error_catalog_service_nodes 0
+# HELP consul_client_api_success_catalog_datacenters Increments whenever a Consul agent successfully responds to a request to list datacenters.
+# TYPE consul_client_api_success_catalog_datacenters counter
+consul_client_api_success_catalog_datacenters 0
+# HELP consul_client_api_success_catalog_deregister Increments whenever a Consul agent successfully responds to a catalog deregister request.
+# TYPE consul_client_api_success_catalog_deregister counter
+consul_client_api_success_catalog_deregister 0
+# HELP consul_client_api_success_catalog_gateway_services Increments whenever a Consul agent successfully responds to a request to list services associated with a gateway.
+# TYPE consul_client_api_success_catalog_gateway_services counter
+consul_client_api_success_catalog_gateway_services 0
+# HELP consul_client_api_success_catalog_node_service_list Increments whenever a Consul agent successfully responds to a request to list a node's registered services.
+# TYPE consul_client_api_success_catalog_node_service_list counter
+consul_client_api_success_catalog_node_service_list 0
+consul_client_api_success_catalog_node_service_list{node="ip-172-25-37-57",partition="default"} 1069
+# HELP consul_client_api_success_catalog_node_services Increments whenever a Consul agent successfully responds to a request to list services in a node.
+# TYPE consul_client_api_success_catalog_node_services counter
+consul_client_api_success_catalog_node_services 0
+# HELP consul_client_api_success_catalog_nodes Increments whenever a Consul agent successfully responds to a request to list nodes.
+# TYPE consul_client_api_success_catalog_nodes counter
+consul_client_api_success_catalog_nodes 0
+consul_client_api_success_catalog_nodes{node="ip-172-25-37-57",partition="default"} 1
+# HELP consul_client_api_success_catalog_register Increments whenever a Consul agent successfully responds to a catalog register request.
+# TYPE consul_client_api_success_catalog_register counter
+consul_client_api_success_catalog_register 0
+consul_client_api_success_catalog_register{node="ip-172-25-37-57",partition="default"} 90170
+# HELP consul_client_api_success_catalog_service_nodes Increments whenever a Consul agent successfully responds to a request to list nodes offering a service.
+# TYPE consul_client_api_success_catalog_service_nodes counter
+consul_client_api_success_catalog_service_nodes 0
+consul_client_api_success_catalog_service_nodes{node="ip-172-25-37-57",partition="default"} 75072
+# HELP consul_client_api_success_catalog_services Increments whenever a Consul agent successfully responds to a request to list services.
+# TYPE consul_client_api_success_catalog_services counter
+consul_client_api_success_catalog_services 0
+# HELP consul_client_rpc Increments whenever a Consul agent in client mode makes an RPC request to a Consul server.
+# TYPE consul_client_rpc counter
+consul_client_rpc 438718
+# HELP consul_client_rpc_error_catalog_datacenters Increments whenever a Consul agent receives an RPC error for a request to list datacenters.
+# TYPE consul_client_rpc_error_catalog_datacenters counter
+consul_client_rpc_error_catalog_datacenters 0
+# HELP consul_client_rpc_error_catalog_deregister Increments whenever a Consul agent receives an RPC error for a catalog deregister request.
+# TYPE consul_client_rpc_error_catalog_deregister counter
+consul_client_rpc_error_catalog_deregister 0
+# HELP consul_client_rpc_error_catalog_gateway_services Increments whenever a Consul agent receives an RPC error for a request to list services associated with a gateway.
+# TYPE consul_client_rpc_error_catalog_gateway_services counter
+consul_client_rpc_error_catalog_gateway_services 0
+# HELP consul_client_rpc_error_catalog_node_service_list Increments whenever a Consul agent receives an RPC error for request to list a node's registered services.
+# TYPE consul_client_rpc_error_catalog_node_service_list counter
+consul_client_rpc_error_catalog_node_service_list 0
+# HELP consul_client_rpc_error_catalog_node_services Increments whenever a Consul agent receives an RPC error for a request to list services in a node.
+# TYPE consul_client_rpc_error_catalog_node_services counter
+consul_client_rpc_error_catalog_node_services 0
+# HELP consul_client_rpc_error_catalog_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes.
+# TYPE consul_client_rpc_error_catalog_nodes counter
+consul_client_rpc_error_catalog_nodes 0
+# HELP consul_client_rpc_error_catalog_register Increments whenever a Consul agent receives an RPC error for a catalog register request.
+# TYPE consul_client_rpc_error_catalog_register counter
+consul_client_rpc_error_catalog_register 0
+# HELP consul_client_rpc_error_catalog_service_nodes Increments whenever a Consul agent receives an RPC error for a request to list nodes offering a service.
+# TYPE consul_client_rpc_error_catalog_service_nodes counter
+consul_client_rpc_error_catalog_service_nodes 0
+# HELP consul_client_rpc_error_catalog_services Increments whenever a Consul agent receives an RPC error for a request to list services.
+# TYPE consul_client_rpc_error_catalog_services counter
+consul_client_rpc_error_catalog_services 0
+# HELP consul_client_rpc_exceeded Increments whenever a Consul agent in client mode makes an RPC request to a Consul server gets rate limited by that agent's limits configuration.
+# TYPE consul_client_rpc_exceeded counter
+consul_client_rpc_exceeded 0
+# HELP consul_client_rpc_failed Increments whenever a Consul agent in client mode makes an RPC request to a Consul server and fails.
+# TYPE consul_client_rpc_failed counter
+consul_client_rpc_failed 0
+# HELP consul_consul_cache_bypass Deprecated - please use cache_bypass instead.
+# TYPE consul_consul_cache_bypass counter
+consul_consul_cache_bypass 0
+# HELP consul_consul_cache_connect_ca_leaf_fetch_success consul_consul_cache_connect_ca_leaf_fetch_success
+# TYPE consul_consul_cache_connect_ca_leaf_fetch_success counter
+consul_consul_cache_connect_ca_leaf_fetch_success{result_not_modified="false"} 2
+# HELP consul_consul_cache_connect_ca_root_fetch_success consul_consul_cache_connect_ca_root_fetch_success
+# TYPE consul_consul_cache_connect_ca_root_fetch_success counter
+consul_consul_cache_connect_ca_root_fetch_success{result_not_modified="false"} 271
+# HELP consul_consul_cache_connect_ca_root_hit consul_consul_cache_connect_ca_root_hit
+# TYPE consul_consul_cache_connect_ca_root_hit counter
+consul_consul_cache_connect_ca_root_hit 2
+# HELP consul_consul_cache_entries_count Deprecated - please use cache_entries_count instead.
+# TYPE consul_consul_cache_entries_count gauge
+consul_consul_cache_entries_count 0
+# HELP consul_consul_cache_entries_count_entries_count consul_consul_cache_entries_count_entries_count
+# TYPE consul_consul_cache_entries_count_entries_count gauge
+consul_consul_cache_entries_count_entries_count 30
+# HELP consul_consul_cache_evict_expired Deprecated - please use cache_evict_expired instead.
+# TYPE consul_consul_cache_evict_expired counter
+consul_consul_cache_evict_expired 1
+# HELP consul_consul_cache_fetch_error Deprecated - please use cache_fetch_error instead.
+# TYPE consul_consul_cache_fetch_error counter
+consul_consul_cache_fetch_error 0
+# HELP consul_consul_cache_fetch_success Deprecated - please use cache_fetch_success instead.
+# TYPE consul_consul_cache_fetch_success counter
+consul_consul_cache_fetch_success 0
+consul_consul_cache_fetch_success{result_not_modified="false"} 1381
+# HELP consul_consul_fsm_ca Deprecated - use fsm_ca instead
+# TYPE consul_consul_fsm_ca summary
+consul_consul_fsm_ca{quantile="0.5"} NaN
+consul_consul_fsm_ca{quantile="0.9"} NaN
+consul_consul_fsm_ca{quantile="0.99"} NaN
+consul_consul_fsm_ca_sum 0
+consul_consul_fsm_ca_count 0
+# HELP consul_consul_fsm_intention Deprecated - use fsm_intention instead
+# TYPE consul_consul_fsm_intention summary
+consul_consul_fsm_intention{quantile="0.5"} NaN
+consul_consul_fsm_intention{quantile="0.9"} NaN
+consul_consul_fsm_intention{quantile="0.99"} NaN
+consul_consul_fsm_intention_sum 0
+consul_consul_fsm_intention_count 0
+# HELP consul_consul_intention_apply Deprecated - please use intention_apply
+# TYPE consul_consul_intention_apply summary
+consul_consul_intention_apply{quantile="0.5"} NaN
+consul_consul_intention_apply{quantile="0.9"} NaN
+consul_consul_intention_apply{quantile="0.99"} NaN
+consul_consul_intention_apply_sum 0
+consul_consul_intention_apply_count 0
+# HELP consul_consul_leader_reconcile consul_consul_leader_reconcile
+# TYPE consul_consul_leader_reconcile summary
+consul_consul_leader_reconcile{quantile="0.5"} NaN
+consul_consul_leader_reconcile{quantile="0.9"} NaN
+consul_consul_leader_reconcile{quantile="0.99"} NaN
+consul_consul_leader_reconcile_sum 1747.430968016386
+consul_consul_leader_reconcile_count 7530
+# HELP consul_consul_members_clients Deprecated - please use members_clients instead.
+# TYPE consul_consul_members_clients gauge
+consul_consul_members_clients 0
+# HELP consul_consul_members_clients_clients consul_consul_members_clients_clients
+# TYPE consul_consul_members_clients_clients gauge
+consul_consul_members_clients_clients{datacenter="consul-sandbox-cluster-0159c9d3"} 1
+consul_consul_members_clients_clients{datacenter="consul-sandbox-cluster-0159c9d3",partition="default",segment=""} 1
+# HELP consul_consul_members_servers Deprecated - please use members_servers instead.
+# TYPE consul_consul_members_servers gauge
+consul_consul_members_servers 0
+# HELP consul_consul_members_servers_servers consul_consul_members_servers_servers
+# TYPE consul_consul_members_servers_servers gauge
+consul_consul_members_servers_servers{datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# HELP consul_consul_peering_exported_services Deprecated - please use peering_exported_services
+# TYPE consul_consul_peering_exported_services gauge
+consul_consul_peering_exported_services 0
+# HELP consul_consul_peering_healthy Deprecated - please use peering_exported_services
+# TYPE consul_consul_peering_healthy gauge
+consul_consul_peering_healthy 0
+# HELP consul_consul_state_config_entries Deprecated - please use state_config_entries instead.
+# TYPE consul_consul_state_config_entries gauge
+consul_consul_state_config_entries 0
+# HELP consul_consul_state_config_entries_config_entries consul_consul_state_config_entries_config_entries
+# TYPE consul_consul_state_config_entries_config_entries gauge
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="exported-services",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="exported-services",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="exported-services",namespace="infra",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="infra",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh",namespace="infra",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="proxy-defaults",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="proxy-defaults",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="proxy-defaults",namespace="infra",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-defaults",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-defaults",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-defaults",namespace="infra",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-intentions",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-intentions",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-intentions",namespace="infra",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-resolver",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-resolver",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-resolver",namespace="infra",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-router",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-router",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-router",namespace="infra",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-splitter",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-splitter",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="service-splitter",namespace="infra",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="consul",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="default",partition="default"} 0
+consul_consul_state_config_entries_config_entries{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="infra",partition="default"} 0
+# HELP consul_consul_state_connect_instances Deprecated - please use state_connect_instances instead.
+# TYPE consul_consul_state_connect_instances gauge
+consul_consul_state_connect_instances 0
+# HELP consul_consul_state_connect_instances_connect_instances consul_consul_state_connect_instances_connect_instances
+# TYPE consul_consul_state_connect_instances_connect_instances gauge
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-native",namespace="consul",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-native",namespace="default",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-native",namespace="infra",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-proxy",namespace="consul",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-proxy",namespace="default",partition="default"} 1
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="connect-proxy",namespace="infra",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="consul",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="default",partition="default"} 2
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="ingress-gateway",namespace="infra",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh-gateway",namespace="consul",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh-gateway",namespace="default",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="mesh-gateway",namespace="infra",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="consul",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="default",partition="default"} 0
+consul_consul_state_connect_instances_connect_instances{datacenter="consul-sandbox-cluster-0159c9d3",kind="terminating-gateway",namespace="infra",partition="default"} 0
+# HELP consul_consul_state_kv_entries Deprecated - please use kv_entries instead.
+# TYPE consul_consul_state_kv_entries gauge
+consul_consul_state_kv_entries 0
+# HELP consul_consul_state_kv_entries_kv_entries consul_consul_state_kv_entries_kv_entries
+# TYPE consul_consul_state_kv_entries_kv_entries gauge
+consul_consul_state_kv_entries_kv_entries{datacenter="consul-sandbox-cluster-0159c9d3",namespace="consul"} 0
+consul_consul_state_kv_entries_kv_entries{datacenter="consul-sandbox-cluster-0159c9d3",namespace="default"} 0
+consul_consul_state_kv_entries_kv_entries{datacenter="consul-sandbox-cluster-0159c9d3",namespace="infra"} 0
+# HELP consul_consul_state_nodes Deprecated - please use state_nodes instead.
+# TYPE consul_consul_state_nodes gauge
+consul_consul_state_nodes 0
+# HELP consul_consul_state_nodes_nodes consul_consul_state_nodes_nodes
+# TYPE consul_consul_state_nodes_nodes gauge
+consul_consul_state_nodes_nodes{datacenter="consul-sandbox-cluster-0159c9d3",partition="default"} 8
+# HELP consul_consul_state_peerings Deprecated - please use state_peerings instead.
+# TYPE consul_consul_state_peerings gauge
+consul_consul_state_peerings 0
+# HELP consul_consul_state_peerings_peerings consul_consul_state_peerings_peerings
+# TYPE consul_consul_state_peerings_peerings gauge
+consul_consul_state_peerings_peerings{datacenter="consul-sandbox-cluster-0159c9d3",partition="default"} 0
+# HELP consul_consul_state_service_instances Deprecated - please use state_service_instances instead.
+# TYPE consul_consul_state_service_instances gauge
+consul_consul_state_service_instances 0
+# HELP consul_consul_state_service_instances_service_instances consul_consul_state_service_instances_service_instances
+# TYPE consul_consul_state_service_instances_service_instances gauge
+consul_consul_state_service_instances_service_instances{datacenter="consul-sandbox-cluster-0159c9d3",namespace="consul",partition="default"} 2
+consul_consul_state_service_instances_service_instances{datacenter="consul-sandbox-cluster-0159c9d3",namespace="default",partition="default"} 9
+consul_consul_state_service_instances_service_instances{datacenter="consul-sandbox-cluster-0159c9d3",namespace="infra",partition="default"} 0
+# HELP consul_consul_state_services Deprecated - please use state_services instead.
+# TYPE consul_consul_state_services gauge
+consul_consul_state_services 0
+# HELP consul_consul_state_services_services consul_consul_state_services_services
+# TYPE consul_consul_state_services_services gauge
+consul_consul_state_services_services{datacenter="consul-sandbox-cluster-0159c9d3",namespace="consul",partition="default"} 2
+consul_consul_state_services_services{datacenter="consul-sandbox-cluster-0159c9d3",namespace="default",partition="default"} 7
+consul_consul_state_services_services{datacenter="consul-sandbox-cluster-0159c9d3",namespace="infra",partition="default"} 0
+# HELP consul_federation_state_apply
+# TYPE consul_federation_state_apply summary
+consul_federation_state_apply{quantile="0.5"} NaN
+consul_federation_state_apply{quantile="0.9"} NaN
+consul_federation_state_apply{quantile="0.99"} NaN
+consul_federation_state_apply_sum 0
+consul_federation_state_apply_count 0
+# HELP consul_federation_state_get
+# TYPE consul_federation_state_get summary
+consul_federation_state_get{quantile="0.5"} NaN
+consul_federation_state_get{quantile="0.9"} NaN
+consul_federation_state_get{quantile="0.99"} NaN
+consul_federation_state_get_sum 0
+consul_federation_state_get_count 0
+# HELP consul_federation_state_list
+# TYPE consul_federation_state_list summary
+consul_federation_state_list{quantile="0.5"} NaN
+consul_federation_state_list{quantile="0.9"} NaN
+consul_federation_state_list{quantile="0.99"} NaN
+consul_federation_state_list_sum 0
+consul_federation_state_list_count 0
+# HELP consul_federation_state_list_mesh_gateways
+# TYPE consul_federation_state_list_mesh_gateways summary
+consul_federation_state_list_mesh_gateways{quantile="0.5"} NaN
+consul_federation_state_list_mesh_gateways{quantile="0.9"} NaN
+consul_federation_state_list_mesh_gateways{quantile="0.99"} NaN
+consul_federation_state_list_mesh_gateways_sum 0
+consul_federation_state_list_mesh_gateways_count 0
+# HELP consul_fsm_acl Measures the time it takes to apply the given ACL operation to the FSM.
+# TYPE consul_fsm_acl summary
+consul_fsm_acl{quantile="0.5"} NaN
+consul_fsm_acl{quantile="0.9"} NaN
+consul_fsm_acl{quantile="0.99"} NaN
+consul_fsm_acl_sum 0
+consul_fsm_acl_count 0
+# HELP consul_fsm_acl_authmethod Measures the time it takes to apply an ACL authmethod operation to the FSM.
+# TYPE consul_fsm_acl_authmethod summary
+consul_fsm_acl_authmethod{quantile="0.5"} NaN
+consul_fsm_acl_authmethod{quantile="0.9"} NaN
+consul_fsm_acl_authmethod{quantile="0.99"} NaN
+consul_fsm_acl_authmethod_sum 0
+consul_fsm_acl_authmethod_count 0
+# HELP consul_fsm_acl_bindingrule Measures the time it takes to apply an ACL binding rule operation to the FSM.
+# TYPE consul_fsm_acl_bindingrule summary
+consul_fsm_acl_bindingrule{quantile="0.5"} NaN
+consul_fsm_acl_bindingrule{quantile="0.9"} NaN
+consul_fsm_acl_bindingrule{quantile="0.99"} NaN
+consul_fsm_acl_bindingrule_sum 0
+consul_fsm_acl_bindingrule_count 0
+# HELP consul_fsm_acl_policy Measures the time it takes to apply an ACL policy operation to the FSM.
+# TYPE consul_fsm_acl_policy summary
+consul_fsm_acl_policy{quantile="0.5"} NaN
+consul_fsm_acl_policy{quantile="0.9"} NaN
+consul_fsm_acl_policy{quantile="0.99"} NaN
+consul_fsm_acl_policy_sum 0
+consul_fsm_acl_policy_count 0
+# HELP consul_fsm_acl_token Measures the time it takes to apply an ACL token operation to the FSM.
+# TYPE consul_fsm_acl_token summary
+consul_fsm_acl_token{quantile="0.5"} NaN
+consul_fsm_acl_token{quantile="0.9"} NaN
+consul_fsm_acl_token{quantile="0.99"} NaN
+consul_fsm_acl_token_sum 0
+consul_fsm_acl_token_count 0
+consul_fsm_acl_token{op="upsert",quantile="0.5"} NaN
+consul_fsm_acl_token{op="upsert",quantile="0.9"} NaN
+consul_fsm_acl_token{op="upsert",quantile="0.99"} NaN
+consul_fsm_acl_token_sum{op="upsert"} 0.18545499444007874
+consul_fsm_acl_token_count{op="upsert"} 1
+# HELP consul_fsm_autopilot Measures the time it takes to apply the given autopilot update to the FSM.
+# TYPE consul_fsm_autopilot summary
+consul_fsm_autopilot{quantile="0.5"} NaN
+consul_fsm_autopilot{quantile="0.9"} NaN
+consul_fsm_autopilot{quantile="0.99"} NaN
+consul_fsm_autopilot_sum 37.74536604247987
+consul_fsm_autopilot_count 753
+# HELP consul_fsm_ca Measures the time it takes to apply CA configuration operations to the FSM.
+# TYPE consul_fsm_ca summary
+consul_fsm_ca{quantile="0.5"} NaN
+consul_fsm_ca{quantile="0.9"} NaN
+consul_fsm_ca{quantile="0.99"} NaN
+consul_fsm_ca_sum 0
+consul_fsm_ca_count 0
+# HELP consul_fsm_ca_leaf Measures the time it takes to apply an operation while signing a leaf certificate.
+# TYPE consul_fsm_ca_leaf summary
+consul_fsm_ca_leaf{quantile="0.5"} NaN
+consul_fsm_ca_leaf{quantile="0.9"} NaN
+consul_fsm_ca_leaf{quantile="0.99"} NaN
+consul_fsm_ca_leaf_sum 0
+consul_fsm_ca_leaf_count 0
+# HELP consul_fsm_coordinate_batch_update Measures the time it takes to apply the given batch coordinate update to the FSM.
+# TYPE consul_fsm_coordinate_batch_update summary
+consul_fsm_coordinate_batch_update{quantile="0.5"} 0.1002039983868599
+consul_fsm_coordinate_batch_update{quantile="0.9"} 0.1002039983868599
+consul_fsm_coordinate_batch_update{quantile="0.99"} 0.1002039983868599
+consul_fsm_coordinate_batch_update_sum 2816.718877375126
+consul_fsm_coordinate_batch_update_count 21979
+# HELP consul_fsm_deregister Measures the time it takes to apply a catalog deregister operation to the FSM.
+# TYPE consul_fsm_deregister summary
+consul_fsm_deregister{quantile="0.5"} NaN
+consul_fsm_deregister{quantile="0.9"} NaN
+consul_fsm_deregister{quantile="0.99"} NaN
+consul_fsm_deregister_sum 81.9582624938339
+consul_fsm_deregister_count 56
+# HELP consul_fsm_intention Measures the time it takes to apply an intention operation to the FSM.
+# TYPE consul_fsm_intention summary
+consul_fsm_intention{quantile="0.5"} NaN
+consul_fsm_intention{quantile="0.9"} NaN
+consul_fsm_intention{quantile="0.99"} NaN
+consul_fsm_intention_sum 0
+consul_fsm_intention_count 0
+# HELP consul_fsm_kvs Measures the time it takes to apply the given KV operation to the FSM.
+# TYPE consul_fsm_kvs summary
+consul_fsm_kvs{quantile="0.5"} NaN
+consul_fsm_kvs{quantile="0.9"} NaN
+consul_fsm_kvs{quantile="0.99"} NaN
+consul_fsm_kvs_sum 0
+consul_fsm_kvs_count 0
+# HELP consul_fsm_peering Measures the time it takes to apply a peering operation to the FSM.
+# TYPE consul_fsm_peering summary
+consul_fsm_peering{quantile="0.5"} NaN
+consul_fsm_peering{quantile="0.9"} NaN
+consul_fsm_peering{quantile="0.99"} NaN
+consul_fsm_peering_sum 0
+consul_fsm_peering_count 0
+# HELP consul_fsm_persist Measures the time it takes to persist the FSM to a raft snapshot.
+# TYPE consul_fsm_persist summary
+consul_fsm_persist{quantile="0.5"} NaN
+consul_fsm_persist{quantile="0.9"} NaN
+consul_fsm_persist{quantile="0.99"} NaN
+consul_fsm_persist_sum 361.0432777404785
+consul_fsm_persist_count 10
+# HELP consul_fsm_prepared_query Measures the time it takes to apply the given prepared query update operation to the FSM.
+# TYPE consul_fsm_prepared_query summary
+consul_fsm_prepared_query{quantile="0.5"} NaN
+consul_fsm_prepared_query{quantile="0.9"} NaN
+consul_fsm_prepared_query{quantile="0.99"} NaN
+consul_fsm_prepared_query_sum 0
+consul_fsm_prepared_query_count 0
+# HELP consul_fsm_register Measures the time it takes to apply a catalog register operation to the FSM.
+# TYPE consul_fsm_register summary
+consul_fsm_register{quantile="0.5"} 0.15392500162124634
+consul_fsm_register{quantile="0.9"} 0.22902700304985046
+consul_fsm_register{quantile="0.99"} 0.22902700304985046
+consul_fsm_register_sum 17763.026295486838
+consul_fsm_register_count 90283
+# HELP consul_fsm_session Measures the time it takes to apply the given session operation to the FSM.
+# TYPE consul_fsm_session summary
+consul_fsm_session{quantile="0.5"} NaN
+consul_fsm_session{quantile="0.9"} NaN
+consul_fsm_session{quantile="0.99"} NaN
+consul_fsm_session_sum 0
+consul_fsm_session_count 0
+# HELP consul_fsm_system_metadata Measures the time it takes to apply a system metadata operation to the FSM.
+# TYPE consul_fsm_system_metadata summary
+consul_fsm_system_metadata{quantile="0.5"} NaN
+consul_fsm_system_metadata{quantile="0.9"} NaN
+consul_fsm_system_metadata{quantile="0.99"} NaN
+consul_fsm_system_metadata_sum 0
+consul_fsm_system_metadata_count 0
+# HELP consul_fsm_tombstone Measures the time it takes to apply the given tombstone operation to the FSM.
+# TYPE consul_fsm_tombstone summary
+consul_fsm_tombstone{quantile="0.5"} NaN
+consul_fsm_tombstone{quantile="0.9"} NaN
+consul_fsm_tombstone{quantile="0.99"} NaN
+consul_fsm_tombstone_sum 0
+consul_fsm_tombstone_count 0
+# HELP consul_fsm_txn Measures the time it takes to apply the given transaction update to the FSM.
+# TYPE consul_fsm_txn summary
+consul_fsm_txn{quantile="0.5"} NaN
+consul_fsm_txn{quantile="0.9"} NaN
+consul_fsm_txn{quantile="0.99"} NaN
+consul_fsm_txn_sum 0
+consul_fsm_txn_count 0
+# HELP consul_grpc_client_connection_count Counts the number of new gRPC connections opened by the client agent to a Consul server.
+# TYPE consul_grpc_client_connection_count counter
+consul_grpc_client_connection_count 0
+# HELP consul_grpc_client_connections Measures the number of active gRPC connections open from the client agent to any Consul servers.
+# TYPE consul_grpc_client_connections gauge
+consul_grpc_client_connections 0
+# HELP consul_grpc_client_request_count Counts the number of gRPC requests made by the client agent to a Consul server.
+# TYPE consul_grpc_client_request_count counter
+consul_grpc_client_request_count 0
+consul_grpc_client_request_count{server_type="internal"} 4136
+# HELP consul_grpc_server_connection_count Counts the number of new gRPC connections received by the server.
+# TYPE consul_grpc_server_connection_count counter
+consul_grpc_server_connection_count 0
+consul_grpc_server_connection_count{server_type="external"} 1
+# HELP consul_grpc_server_connections Measures the number of active gRPC connections open on the server.
+# TYPE consul_grpc_server_connections gauge
+consul_grpc_server_connections 0
+# HELP consul_grpc_server_connections_connections consul_grpc_server_connections_connections
+# TYPE consul_grpc_server_connections_connections gauge
+consul_grpc_server_connections_connections{server_type="external"} 5
+# HELP consul_grpc_server_request_count Counts the number of gRPC requests received by the server.
+# TYPE consul_grpc_server_request_count counter
+consul_grpc_server_request_count 0
+consul_grpc_server_request_count{server_type="external"} 49
+consul_grpc_server_request_count{server_type="internal"} 4139
+# HELP consul_grpc_server_stream_count Counts the number of new gRPC streams received by the server.
+# TYPE consul_grpc_server_stream_count counter
+consul_grpc_server_stream_count 0
+# HELP consul_grpc_server_streams Measures the number of active gRPC streams handled by the server.
+# TYPE consul_grpc_server_streams gauge
+consul_grpc_server_streams 0
+# HELP consul_intention_apply
+# TYPE consul_intention_apply summary
+consul_intention_apply{quantile="0.5"} NaN
+consul_intention_apply{quantile="0.9"} NaN
+consul_intention_apply{quantile="0.99"} NaN
+consul_intention_apply_sum 0
+consul_intention_apply_count 0
+# HELP consul_kvs_apply Measures the time it takes to complete an update to the KV store.
+# TYPE consul_kvs_apply summary
+consul_kvs_apply{quantile="0.5"} NaN
+consul_kvs_apply{quantile="0.9"} NaN
+consul_kvs_apply{quantile="0.99"} NaN
+consul_kvs_apply_sum 18.550758838653564
+consul_kvs_apply_count 2
+# HELP consul_leader_barrier Measures the time spent waiting for the raft barrier upon gaining leadership.
+# TYPE consul_leader_barrier summary
+consul_leader_barrier{quantile="0.5"} NaN
+consul_leader_barrier{quantile="0.9"} NaN
+consul_leader_barrier{quantile="0.99"} NaN
+consul_leader_barrier_sum 16746.72570502758
+consul_leader_barrier_count 7530
+# HELP consul_leader_reapTombstones Measures the time spent clearing tombstones.
+# TYPE consul_leader_reapTombstones summary
+consul_leader_reapTombstones{quantile="0.5"} NaN
+consul_leader_reapTombstones{quantile="0.9"} NaN
+consul_leader_reapTombstones{quantile="0.99"} NaN
+consul_leader_reapTombstones_sum 8.299793243408203
+consul_leader_reapTombstones_count 2
+# HELP consul_leader_reconcile consul_leader_reconcile
+# TYPE consul_leader_reconcile summary
+consul_leader_reconcile{quantile="0.5"} NaN
+consul_leader_reconcile{quantile="0.9"} NaN
+consul_leader_reconcile{quantile="0.99"} NaN
+consul_leader_reconcile_sum 1640.2054885923862
+consul_leader_reconcile_count 7530
+# HELP consul_leader_reconcileMember Measures the time spent updating the raft store for a single serf member's information.
+# TYPE consul_leader_reconcileMember summary
+consul_leader_reconcileMember{quantile="0.5"} NaN
+consul_leader_reconcileMember{quantile="0.9"} NaN
+consul_leader_reconcileMember{quantile="0.99"} NaN
+consul_leader_reconcileMember_sum 923.1838235380128
+consul_leader_reconcileMember_count 9879
+# HELP consul_leader_replication_acl_policies_index Tracks the index of ACL policies in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_policies_index gauge
+consul_leader_replication_acl_policies_index 0
+# HELP consul_leader_replication_acl_policies_status Tracks the current health of ACL policy replication on the leader
+# TYPE consul_leader_replication_acl_policies_status gauge
+consul_leader_replication_acl_policies_status 0
+# HELP consul_leader_replication_acl_roles_index Tracks the index of ACL roles in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_roles_index gauge
+consul_leader_replication_acl_roles_index 0
+# HELP consul_leader_replication_acl_roles_status Tracks the current health of ACL role replication on the leader
+# TYPE consul_leader_replication_acl_roles_status gauge
+consul_leader_replication_acl_roles_status 0
+# HELP consul_leader_replication_acl_tokens_index Tracks the index of ACL tokens in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_acl_tokens_index gauge
+consul_leader_replication_acl_tokens_index 0
+# HELP consul_leader_replication_acl_tokens_status Tracks the current health of ACL token replication on the leader
+# TYPE consul_leader_replication_acl_tokens_status gauge
+consul_leader_replication_acl_tokens_status 0
+# HELP consul_leader_replication_config_entries_index Tracks the index of config entries in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_config_entries_index gauge
+consul_leader_replication_config_entries_index 0
+# HELP consul_leader_replication_config_entries_status Tracks the current health of config entry replication on the leader
+# TYPE consul_leader_replication_config_entries_status gauge
+consul_leader_replication_config_entries_status 0
+# HELP consul_leader_replication_federation_state_index Tracks the index of federation states in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_federation_state_index gauge
+consul_leader_replication_federation_state_index 0
+# HELP consul_leader_replication_federation_state_status Tracks the current health of federation state replication on the leader
+# TYPE consul_leader_replication_federation_state_status gauge
+consul_leader_replication_federation_state_status 0
+# HELP consul_leader_replication_namespaces_index Tracks the index of federation states in the primary that the secondary has successfully replicated
+# TYPE consul_leader_replication_namespaces_index gauge
+consul_leader_replication_namespaces_index 0
+# HELP consul_leader_replication_namespaces_status Tracks the current health of federation state replication on the leader
+# TYPE consul_leader_replication_namespaces_status gauge
+consul_leader_replication_namespaces_status 0
+# HELP consul_memberlist_gossip consul_memberlist_gossip
+# TYPE consul_memberlist_gossip summary
+consul_memberlist_gossip{network="wan",quantile="0.5"} 0.013411000370979309
+consul_memberlist_gossip{network="wan",quantile="0.9"} 0.01651100069284439
+consul_memberlist_gossip{network="wan",quantile="0.99"} 0.017091000452637672
+consul_memberlist_gossip_sum{network="wan"} 12186.142546130694
+consul_memberlist_gossip_count{network="wan"} 903629
+consul_memberlist_gossip{network="lan",partition="default",segment="",quantile="0.5"} 0.01858999952673912
+consul_memberlist_gossip{network="lan",partition="default",segment="",quantile="0.9"} 0.02322000078856945
+consul_memberlist_gossip{network="lan",partition="default",segment="",quantile="0.99"} 0.03482099995017052
+consul_memberlist_gossip_sum{network="lan",partition="default",segment=""} 38046.85491481074
+consul_memberlist_gossip_count{network="lan",partition="default",segment=""} 2.259067e+06
+# HELP consul_memberlist_node_instances_instances consul_memberlist_node_instances_instances
+# TYPE consul_memberlist_node_instances_instances gauge
+consul_memberlist_node_instances_instances{network="lan",node_state="alive",partition="default",segment=""} 2
+consul_memberlist_node_instances_instances{network="lan",node_state="dead",partition="default",segment=""} 0
+consul_memberlist_node_instances_instances{network="lan",node_state="left",partition="default",segment=""} 0
+consul_memberlist_node_instances_instances{network="lan",node_state="suspect",partition="default",segment=""} 0
+# HELP consul_memberlist_probeNode consul_memberlist_probeNode
+# TYPE consul_memberlist_probeNode summary
+consul_memberlist_probeNode{network="lan",partition="default",segment="",quantile="0.5"} 1.3738830089569092
+consul_memberlist_probeNode{network="lan",partition="default",segment="",quantile="0.9"} 1.4592169523239136
+consul_memberlist_probeNode{network="lan",partition="default",segment="",quantile="0.99"} 1.4592169523239136
+consul_memberlist_probeNode_sum{network="lan",partition="default",segment=""} 44756.27836251259
+consul_memberlist_probeNode_count{network="lan",partition="default",segment=""} 30847
+# HELP consul_memberlist_pushPullNode consul_memberlist_pushPullNode
+# TYPE consul_memberlist_pushPullNode summary
+consul_memberlist_pushPullNode{network="lan",partition="default",segment="",quantile="0.5"} 2.5498108863830566
+consul_memberlist_pushPullNode{network="lan",partition="default",segment="",quantile="0.9"} 2.5498108863830566
+consul_memberlist_pushPullNode{network="lan",partition="default",segment="",quantile="0.99"} 2.5498108863830566
+consul_memberlist_pushPullNode_sum{network="lan",partition="default",segment=""} 5021.0542075634
+consul_memberlist_pushPullNode_count{network="lan",partition="default",segment=""} 1773
+# HELP consul_memberlist_queue_broadcasts consul_memberlist_queue_broadcasts
+# TYPE consul_memberlist_queue_broadcasts summary
+consul_memberlist_queue_broadcasts{network="wan",quantile="0.5"} 0
+consul_memberlist_queue_broadcasts{network="wan",quantile="0.9"} 0
+consul_memberlist_queue_broadcasts{network="wan",quantile="0.99"} 0
+consul_memberlist_queue_broadcasts_sum{network="wan"} 0
+consul_memberlist_queue_broadcasts_count{network="wan"} 15060
+consul_memberlist_queue_broadcasts{network="lan",partition="default",segment="",quantile="0.5"} 0
+consul_memberlist_queue_broadcasts{network="lan",partition="default",segment="",quantile="0.9"} 0
+consul_memberlist_queue_broadcasts{network="lan",partition="default",segment="",quantile="0.99"} 0
+consul_memberlist_queue_broadcasts_sum{network="lan",partition="default",segment=""} 0
+consul_memberlist_queue_broadcasts_count{network="lan",partition="default",segment=""} 15060
+# HELP consul_memberlist_size_local_local consul_memberlist_size_local_local
+# TYPE consul_memberlist_size_local_local gauge
+consul_memberlist_size_local_local{network="lan",partition="default",segment=""} 2.208582144e+09
+# HELP consul_memberlist_size_remote consul_memberlist_size_remote
+# TYPE consul_memberlist_size_remote summary
+consul_memberlist_size_remote{network="lan",partition="default",segment="",quantile="0.5"} 717
+consul_memberlist_size_remote{network="lan",partition="default",segment="",quantile="0.9"} 717
+consul_memberlist_size_remote{network="lan",partition="default",segment="",quantile="0.99"} 717
+consul_memberlist_size_remote_sum{network="lan",partition="default",segment=""} 2.538313e+06
+consul_memberlist_size_remote_count{network="lan",partition="default",segment=""} 3549
+# HELP consul_memberlist_tcp_accept consul_memberlist_tcp_accept
+# TYPE consul_memberlist_tcp_accept counter
+consul_memberlist_tcp_accept{network="lan",partition="default",segment=""} 1776
+# HELP consul_memberlist_tcp_connect consul_memberlist_tcp_connect
+# TYPE consul_memberlist_tcp_connect counter
+consul_memberlist_tcp_connect{network="lan",partition="default",segment=""} 1773
+# HELP consul_memberlist_tcp_sent consul_memberlist_tcp_sent
+# TYPE consul_memberlist_tcp_sent counter
+consul_memberlist_tcp_sent{network="lan",partition="default",segment=""} 3.206921e+06
+# HELP consul_memberlist_udp_received consul_memberlist_udp_received
+# TYPE consul_memberlist_udp_received counter
+consul_memberlist_udp_received{network="lan"} 9.221042e+06
+# HELP consul_memberlist_udp_sent consul_memberlist_udp_sent
+# TYPE consul_memberlist_udp_sent counter
+consul_memberlist_udp_sent{network="lan",partition="default",segment=""} 9.218109e+06
+# HELP consul_members_clients Measures the current number of client agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.
+# TYPE consul_members_clients gauge
+consul_members_clients 0
+# HELP consul_members_servers Measures the current number of server agents registered with Consul. It is only emitted by Consul servers. Added in v1.9.6.
+# TYPE consul_members_servers gauge
+consul_members_servers 0
+# HELP consul_mesh_active_root_ca_expiry Seconds until the service mesh root certificate expires. Updated every hour
+# TYPE consul_mesh_active_root_ca_expiry gauge
+consul_mesh_active_root_ca_expiry 0
+# HELP consul_mesh_active_signing_ca_expiry Seconds until the service mesh signing certificate expires. Updated every hour
+# TYPE consul_mesh_active_signing_ca_expiry gauge
+consul_mesh_active_signing_ca_expiry 0
+# HELP consul_namespace_read consul_namespace_read
+# TYPE consul_namespace_read summary
+consul_namespace_read{quantile="0.5"} 0.06529200077056885
+consul_namespace_read{quantile="0.9"} 0.12670400738716125
+consul_namespace_read{quantile="0.99"} 0.12670400738716125
+consul_namespace_read_sum 2885.675253532827
+consul_namespace_read_count 30042
+# HELP consul_partition_list consul_partition_list
+# TYPE consul_partition_list summary
+consul_partition_list{quantile="0.5"} NaN
+consul_partition_list{quantile="0.9"} NaN
+consul_partition_list{quantile="0.99"} NaN
+consul_partition_list_sum 325.827104203403
+consul_partition_list_count 4138
+# HELP consul_peering_exported_services A gauge that tracks how many services are exported for the peering. The labels are "peer_name", "peer_id" and, for enterprise, "partition". We emit this metric every 9 seconds
+# TYPE consul_peering_exported_services gauge
+consul_peering_exported_services 0
+# HELP consul_peering_healthy A gauge that tracks how if a peering is healthy (1) or not (0). The labels are "peer_name", "peer_id" and, for enterprise, "partition". We emit this metric every 9 seconds
+# TYPE consul_peering_healthy gauge
+consul_peering_healthy 0
+# HELP consul_prepared_query_apply Measures the time it takes to apply a prepared query update.
+# TYPE consul_prepared_query_apply summary
+consul_prepared_query_apply{quantile="0.5"} NaN
+consul_prepared_query_apply{quantile="0.9"} NaN
+consul_prepared_query_apply{quantile="0.99"} NaN
+consul_prepared_query_apply_sum 0
+consul_prepared_query_apply_count 0
+# HELP consul_prepared_query_execute Measures the time it takes to process a prepared query execute request.
+# TYPE consul_prepared_query_execute summary
+consul_prepared_query_execute{quantile="0.5"} NaN
+consul_prepared_query_execute{quantile="0.9"} NaN
+consul_prepared_query_execute{quantile="0.99"} NaN
+consul_prepared_query_execute_sum 0
+consul_prepared_query_execute_count 0
+# HELP consul_prepared_query_execute_remote Measures the time it takes to process a prepared query execute request that was forwarded to another datacenter.
+# TYPE consul_prepared_query_execute_remote summary
+consul_prepared_query_execute_remote{quantile="0.5"} NaN
+consul_prepared_query_execute_remote{quantile="0.9"} NaN
+consul_prepared_query_execute_remote{quantile="0.99"} NaN
+consul_prepared_query_execute_remote_sum 0
+consul_prepared_query_execute_remote_count 0
+# HELP consul_prepared_query_explain Measures the time it takes to process a prepared query explain request.
+# TYPE consul_prepared_query_explain summary
+consul_prepared_query_explain{quantile="0.5"} NaN
+consul_prepared_query_explain{quantile="0.9"} NaN
+consul_prepared_query_explain{quantile="0.99"} NaN
+consul_prepared_query_explain_sum 0
+consul_prepared_query_explain_count 0
+# HELP consul_raft_applied_index Represents the raft applied index.
+# TYPE consul_raft_applied_index gauge
+consul_raft_applied_index 0
+# HELP consul_raft_applied_index_applied_index consul_raft_applied_index_applied_index
+# TYPE consul_raft_applied_index_applied_index gauge
+consul_raft_applied_index_applied_index 145203
+# HELP consul_raft_apply This counts the number of Raft transactions occurring over the interval.
+# TYPE consul_raft_apply counter
+consul_raft_apply 115252
+# HELP consul_raft_barrier consul_raft_barrier
+# TYPE consul_raft_barrier counter
+consul_raft_barrier 7530
+# HELP consul_raft_boltdb_freePageBytes_freePageBytes consul_raft_boltdb_freePageBytes_freePageBytes
+# TYPE consul_raft_boltdb_freePageBytes_freePageBytes gauge
+consul_raft_boltdb_freePageBytes_freePageBytes 1.3307904e+07
+# HELP consul_raft_boltdb_freelistBytes_freelistBytes consul_raft_boltdb_freelistBytes_freelistBytes
+# TYPE consul_raft_boltdb_freelistBytes_freelistBytes gauge
+consul_raft_boltdb_freelistBytes_freelistBytes 26008
+# HELP consul_raft_boltdb_getLog consul_raft_boltdb_getLog
+# TYPE consul_raft_boltdb_getLog summary
+consul_raft_boltdb_getLog{quantile="0.5"} 0.06123099848628044
+consul_raft_boltdb_getLog{quantile="0.9"} 0.06123099848628044
+consul_raft_boltdb_getLog{quantile="0.99"} 0.06123099848628044
+consul_raft_boltdb_getLog_sum 1990.6473612803966
+consul_raft_boltdb_getLog_count 45019
+# HELP consul_raft_boltdb_logBatchSize consul_raft_boltdb_logBatchSize
+# TYPE consul_raft_boltdb_logBatchSize summary
+consul_raft_boltdb_logBatchSize{quantile="0.5"} 1109
+consul_raft_boltdb_logBatchSize{quantile="0.9"} 1167
+consul_raft_boltdb_logBatchSize{quantile="0.99"} 1167
+consul_raft_boltdb_logBatchSize_sum 1.05877264e+08
+consul_raft_boltdb_logBatchSize_count 122794
+# HELP consul_raft_boltdb_logSize consul_raft_boltdb_logSize
+# TYPE consul_raft_boltdb_logSize summary
+consul_raft_boltdb_logSize{quantile="0.5"} 1109
+consul_raft_boltdb_logSize{quantile="0.9"} 1167
+consul_raft_boltdb_logSize{quantile="0.99"} 1167
+consul_raft_boltdb_logSize_sum 1.05877264e+08
+consul_raft_boltdb_logSize_count 122856
+# HELP consul_raft_boltdb_logsPerBatch consul_raft_boltdb_logsPerBatch
+# TYPE consul_raft_boltdb_logsPerBatch summary
+consul_raft_boltdb_logsPerBatch{quantile="0.5"} 1
+consul_raft_boltdb_logsPerBatch{quantile="0.9"} 1
+consul_raft_boltdb_logsPerBatch{quantile="0.99"} 1
+consul_raft_boltdb_logsPerBatch_sum 122856
+consul_raft_boltdb_logsPerBatch_count 122794
+# HELP consul_raft_boltdb_numFreePages_numFreePages consul_raft_boltdb_numFreePages_numFreePages
+# TYPE consul_raft_boltdb_numFreePages_numFreePages gauge
+consul_raft_boltdb_numFreePages_numFreePages 3238
+# HELP consul_raft_boltdb_numPendingPages_numPendingPages consul_raft_boltdb_numPendingPages_numPendingPages
+# TYPE consul_raft_boltdb_numPendingPages_numPendingPages gauge
+consul_raft_boltdb_numPendingPages_numPendingPages 11
+# HELP consul_raft_boltdb_openReadTxn_openReadTxn consul_raft_boltdb_openReadTxn_openReadTxn
+# TYPE consul_raft_boltdb_openReadTxn_openReadTxn gauge
+consul_raft_boltdb_openReadTxn_openReadTxn 0
+# HELP consul_raft_boltdb_storeLogs consul_raft_boltdb_storeLogs
+# TYPE consul_raft_boltdb_storeLogs summary
+consul_raft_boltdb_storeLogs{quantile="0.5"} 1.6733039617538452
+consul_raft_boltdb_storeLogs{quantile="0.9"} 2.21097993850708
+consul_raft_boltdb_storeLogs{quantile="0.99"} 2.21097993850708
+consul_raft_boltdb_storeLogs_sum 278437.40395510197
+consul_raft_boltdb_storeLogs_count 122794
+# HELP consul_raft_boltdb_totalReadTxn consul_raft_boltdb_totalReadTxn
+# TYPE consul_raft_boltdb_totalReadTxn counter
+consul_raft_boltdb_totalReadTxn 100198
+# HELP consul_raft_boltdb_txstats_cursorCount consul_raft_boltdb_txstats_cursorCount
+# TYPE consul_raft_boltdb_txstats_cursorCount counter
+consul_raft_boltdb_txstats_cursorCount 568889
+# HELP consul_raft_boltdb_txstats_nodeCount consul_raft_boltdb_txstats_nodeCount
+# TYPE consul_raft_boltdb_txstats_nodeCount counter
+consul_raft_boltdb_txstats_nodeCount 537103
+# HELP consul_raft_boltdb_txstats_nodeDeref consul_raft_boltdb_txstats_nodeDeref
+# TYPE consul_raft_boltdb_txstats_nodeDeref counter
+consul_raft_boltdb_txstats_nodeDeref 136
+# HELP consul_raft_boltdb_txstats_pageAlloc_pageAlloc consul_raft_boltdb_txstats_pageAlloc_pageAlloc
+# TYPE consul_raft_boltdb_txstats_pageAlloc_pageAlloc gauge
+consul_raft_boltdb_txstats_pageAlloc_pageAlloc 5.955145728e+09
+# HELP consul_raft_boltdb_txstats_pageCount_pageCount consul_raft_boltdb_txstats_pageCount_pageCount
+# TYPE consul_raft_boltdb_txstats_pageCount_pageCount gauge
+consul_raft_boltdb_txstats_pageCount_pageCount 1.453893e+06
+# HELP consul_raft_boltdb_txstats_rebalance consul_raft_boltdb_txstats_rebalance
+# TYPE consul_raft_boltdb_txstats_rebalance counter
+consul_raft_boltdb_txstats_rebalance 91912
+# HELP consul_raft_boltdb_txstats_rebalanceTime consul_raft_boltdb_txstats_rebalanceTime
+# TYPE consul_raft_boltdb_txstats_rebalanceTime summary
+consul_raft_boltdb_txstats_rebalanceTime{quantile="0.5"} 0
+consul_raft_boltdb_txstats_rebalanceTime{quantile="0.9"} 0
+consul_raft_boltdb_txstats_rebalanceTime{quantile="0.99"} 0
+consul_raft_boltdb_txstats_rebalanceTime_sum 61.22855579853058
+consul_raft_boltdb_txstats_rebalanceTime_count 90364
+# HELP consul_raft_boltdb_txstats_spill consul_raft_boltdb_txstats_spill
+# TYPE consul_raft_boltdb_txstats_spill counter
+consul_raft_boltdb_txstats_spill 545942
+# HELP consul_raft_boltdb_txstats_spillTime consul_raft_boltdb_txstats_spillTime
+# TYPE consul_raft_boltdb_txstats_spillTime summary
+consul_raft_boltdb_txstats_spillTime{quantile="0.5"} 0
+consul_raft_boltdb_txstats_spillTime{quantile="0.9"} 0.19511699676513672
+consul_raft_boltdb_txstats_spillTime{quantile="0.99"} 0.19511699676513672
+consul_raft_boltdb_txstats_spillTime_sum 3640.070483505726
+consul_raft_boltdb_txstats_spillTime_count 90364
+# HELP consul_raft_boltdb_txstats_split consul_raft_boltdb_txstats_split
+# TYPE consul_raft_boltdb_txstats_split counter
+consul_raft_boltdb_txstats_split 55070
+# HELP consul_raft_boltdb_txstats_write consul_raft_boltdb_txstats_write
+# TYPE consul_raft_boltdb_txstats_write counter
+consul_raft_boltdb_txstats_write 791562
+# HELP consul_raft_boltdb_txstats_writeTime consul_raft_boltdb_txstats_writeTime
+# TYPE consul_raft_boltdb_txstats_writeTime summary
+consul_raft_boltdb_txstats_writeTime{quantile="0.5"} 0
+consul_raft_boltdb_txstats_writeTime{quantile="0.9"} 11.23631763458252
+consul_raft_boltdb_txstats_writeTime{quantile="0.99"} 11.23631763458252
+consul_raft_boltdb_txstats_writeTime_sum 254982.9575778246
+consul_raft_boltdb_txstats_writeTime_count 90364
+# HELP consul_raft_boltdb_writeCapacity consul_raft_boltdb_writeCapacity
+# TYPE consul_raft_boltdb_writeCapacity summary
+consul_raft_boltdb_writeCapacity{quantile="0.5"} 601.9552612304688
+consul_raft_boltdb_writeCapacity{quantile="0.9"} 635.841064453125
+consul_raft_boltdb_writeCapacity{quantile="0.99"} 635.841064453125
+consul_raft_boltdb_writeCapacity_sum 6.307136215111172e+07
+consul_raft_boltdb_writeCapacity_count 122794
+# HELP consul_raft_commitNumLogs_commitNumLogs consul_raft_commitNumLogs_commitNumLogs
+# TYPE consul_raft_commitNumLogs_commitNumLogs gauge
+consul_raft_commitNumLogs_commitNumLogs 1
+# HELP consul_raft_commitTime This measures the time it takes to commit a new entry to the Raft log on the leader.
+# TYPE consul_raft_commitTime summary
+consul_raft_commitTime{quantile="0.5"} 1.7182049751281738
+consul_raft_commitTime{quantile="0.9"} 2.2621920108795166
+consul_raft_commitTime{quantile="0.99"} 2.2621920108795166
+consul_raft_commitTime_sum 284260.4287290573
+consul_raft_commitTime_count 122785
+# HELP consul_raft_fsm_apply consul_raft_fsm_apply
+# TYPE consul_raft_fsm_apply summary
+consul_raft_fsm_apply{quantile="0.5"} 0.16612499952316284
+consul_raft_fsm_apply{quantile="0.9"} 0.2391670048236847
+consul_raft_fsm_apply{quantile="0.99"} 0.2391670048236847
+consul_raft_fsm_apply_sum 24152.752846952528
+consul_raft_fsm_apply_count 115317
+# HELP consul_raft_fsm_enqueue consul_raft_fsm_enqueue
+# TYPE consul_raft_fsm_enqueue summary
+consul_raft_fsm_enqueue{quantile="0.5"} 0.015490000136196613
+consul_raft_fsm_enqueue{quantile="0.9"} 0.04627100005745888
+consul_raft_fsm_enqueue{quantile="0.99"} 0.04627100005745888
+consul_raft_fsm_enqueue_sum 3328.7210418977775
+consul_raft_fsm_enqueue_count 122763
+# HELP consul_raft_fsm_lastRestoreDuration This measures how long the last FSM restore (from disk or leader) took.
+# TYPE consul_raft_fsm_lastRestoreDuration gauge
+consul_raft_fsm_lastRestoreDuration 0
+# HELP consul_raft_last_index Represents the raft last index.
+# TYPE consul_raft_last_index gauge
+consul_raft_last_index 0
+# HELP consul_raft_last_index_last_index consul_raft_last_index_last_index
+# TYPE consul_raft_last_index_last_index gauge
+consul_raft_last_index_last_index 145203
+# HELP consul_raft_leader_dispatchLog consul_raft_leader_dispatchLog
+# TYPE consul_raft_leader_dispatchLog summary
+consul_raft_leader_dispatchLog{quantile="0.5"} 1.7106239795684814
+consul_raft_leader_dispatchLog{quantile="0.9"} 2.249191999435425
+consul_raft_leader_dispatchLog{quantile="0.99"} 2.249191999435425
+consul_raft_leader_dispatchLog_sum 282281.0580151081
+consul_raft_leader_dispatchLog_count 122780
+# HELP consul_raft_leader_dispatchNumLogs_dispatchNumLogs consul_raft_leader_dispatchNumLogs_dispatchNumLogs
+# TYPE consul_raft_leader_dispatchNumLogs_dispatchNumLogs gauge
+consul_raft_leader_dispatchNumLogs_dispatchNumLogs 1
+# HELP consul_raft_leader_lastContact Measures the time since the leader was last able to contact the follower nodes when checking its leader lease.
+# TYPE consul_raft_leader_lastContact summary
+consul_raft_leader_lastContact{quantile="0.5"} NaN
+consul_raft_leader_lastContact{quantile="0.9"} NaN
+consul_raft_leader_lastContact{quantile="0.99"} NaN
+consul_raft_leader_lastContact_sum 598
+consul_raft_leader_lastContact_count 19
+# HELP consul_raft_leader_oldestLogAge This measures how old the oldest log in the leader's log store is.
+# TYPE consul_raft_leader_oldestLogAge gauge
+consul_raft_leader_oldestLogAge 0
+# HELP consul_raft_leader_oldestLogAge_oldestLogAge consul_raft_leader_oldestLogAge_oldestLogAge
+# TYPE consul_raft_leader_oldestLogAge_oldestLogAge gauge
+consul_raft_leader_oldestLogAge_oldestLogAge 6.8835264e+07
+# HELP consul_raft_rpc_installSnapshot Measures the time it takes the raft leader to install a snapshot on a follower that is catching up after being down or has just joined the cluster.
+# TYPE consul_raft_rpc_installSnapshot summary
+consul_raft_rpc_installSnapshot{quantile="0.5"} NaN
+consul_raft_rpc_installSnapshot{quantile="0.9"} NaN
+consul_raft_rpc_installSnapshot{quantile="0.99"} NaN
+consul_raft_rpc_installSnapshot_sum 473.0382385253906
+consul_raft_rpc_installSnapshot_count 1
+# HELP consul_raft_snapshot_persist Measures the time it takes raft to write a new snapshot to disk.
+# TYPE consul_raft_snapshot_persist summary
+consul_raft_snapshot_persist{quantile="0.5"} NaN
+consul_raft_snapshot_persist{quantile="0.9"} NaN
+consul_raft_snapshot_persist{quantile="0.99"} NaN
+consul_raft_snapshot_persist_sum 457.33628499507904
+consul_raft_snapshot_persist_count 10
+# HELP consul_raft_state_candidate This increments whenever a Consul server starts an election.
+# TYPE consul_raft_state_candidate counter
+consul_raft_state_candidate 1
+# HELP consul_raft_state_leader This increments whenever a Consul server becomes a leader.
+# TYPE consul_raft_state_leader counter
+consul_raft_state_leader 1
+# HELP consul_raft_thread_fsm_saturation consul_raft_thread_fsm_saturation
+# TYPE consul_raft_thread_fsm_saturation summary
+consul_raft_thread_fsm_saturation{quantile="0.5"} 0
+consul_raft_thread_fsm_saturation{quantile="0.9"} 0
+consul_raft_thread_fsm_saturation{quantile="0.99"} 0
+consul_raft_thread_fsm_saturation_sum 0.7299999818205833
+consul_raft_thread_fsm_saturation_count 44326
+# HELP consul_raft_thread_main_saturation consul_raft_thread_main_saturation
+# TYPE consul_raft_thread_main_saturation summary
+consul_raft_thread_main_saturation{quantile="0.5"} 0
+consul_raft_thread_main_saturation{quantile="0.9"} 0
+consul_raft_thread_main_saturation{quantile="0.99"} 0.009999999776482582
+consul_raft_thread_main_saturation_sum 213.059995315969
+consul_raft_thread_main_saturation_count 451221
+# HELP consul_raft_verify_leader consul_raft_verify_leader
+# TYPE consul_raft_verify_leader counter
+consul_raft_verify_leader 2
+# HELP consul_rpc_accept_conn Increments when a server accepts an RPC connection.
+# TYPE consul_rpc_accept_conn counter
+consul_rpc_accept_conn 39
+# HELP consul_rpc_consistentRead Measures the time spent confirming that a consistent read can be performed.
+# TYPE consul_rpc_consistentRead summary
+consul_rpc_consistentRead{quantile="0.5"} NaN
+consul_rpc_consistentRead{quantile="0.9"} NaN
+consul_rpc_consistentRead{quantile="0.99"} NaN
+consul_rpc_consistentRead_sum 85.52406929805875
+consul_rpc_consistentRead_count 1600
+# HELP consul_rpc_cross_dc Increments when a server sends a (potentially blocking) cross datacenter RPC query.
+# TYPE consul_rpc_cross_dc counter
+consul_rpc_cross_dc 0
+# HELP consul_rpc_queries_blocking Shows the current number of in-flight blocking queries the server is handling.
+# TYPE consul_rpc_queries_blocking gauge
+consul_rpc_queries_blocking 0
+# HELP consul_rpc_queries_blocking_queries_blocking consul_rpc_queries_blocking_queries_blocking
+# TYPE consul_rpc_queries_blocking_queries_blocking gauge
+consul_rpc_queries_blocking_queries_blocking 20
+# HELP consul_rpc_query Increments when a server receives a read request, indicating the rate of new read queries.
+# TYPE consul_rpc_query counter
+consul_rpc_query 261853
+# HELP consul_rpc_raft_handoff Increments when a server accepts a Raft-related RPC connection.
+# TYPE consul_rpc_raft_handoff counter
+consul_rpc_raft_handoff 3
+# HELP consul_rpc_request Increments when a server receives a Consul-related RPC request.
+# TYPE consul_rpc_request counter
+consul_rpc_request 233395
+# HELP consul_rpc_request_error Increments when a server returns an error from an RPC request.
+# TYPE consul_rpc_request_error counter
+consul_rpc_request_error 0
+# HELP consul_runtime_alloc_bytes_alloc_bytes consul_runtime_alloc_bytes_alloc_bytes
+# TYPE consul_runtime_alloc_bytes_alloc_bytes gauge
+consul_runtime_alloc_bytes_alloc_bytes 5.1729856e+07
+# HELP consul_runtime_free_count_free_count consul_runtime_free_count_free_count
+# TYPE consul_runtime_free_count_free_count gauge
+consul_runtime_free_count_free_count 1.513573888e+09
+# HELP consul_runtime_gc_pause_ns consul_runtime_gc_pause_ns
+# TYPE consul_runtime_gc_pause_ns summary
+consul_runtime_gc_pause_ns{quantile="0.5"} NaN
+consul_runtime_gc_pause_ns{quantile="0.9"} NaN
+consul_runtime_gc_pause_ns{quantile="0.99"} NaN
+consul_runtime_gc_pause_ns_sum 8.32754022e+08
+consul_runtime_gc_pause_ns_count 4172
+# HELP consul_runtime_heap_objects_heap_objects consul_runtime_heap_objects_heap_objects
+# TYPE consul_runtime_heap_objects_heap_objects gauge
+consul_runtime_heap_objects_heap_objects 309596
+# HELP consul_runtime_malloc_count_malloc_count consul_runtime_malloc_count_malloc_count
+# TYPE consul_runtime_malloc_count_malloc_count gauge
+consul_runtime_malloc_count_malloc_count 1.51388352e+09
+# HELP consul_runtime_num_goroutines_num_goroutines consul_runtime_num_goroutines_num_goroutines
+# TYPE consul_runtime_num_goroutines_num_goroutines gauge
+consul_runtime_num_goroutines_num_goroutines 305
+# HELP consul_runtime_sys_bytes_sys_bytes consul_runtime_sys_bytes_sys_bytes
+# TYPE consul_runtime_sys_bytes_sys_bytes gauge
+consul_runtime_sys_bytes_sys_bytes 1.6015696e+08
+# HELP consul_runtime_total_gc_pause_ns_total_gc_pause_ns consul_runtime_total_gc_pause_ns_total_gc_pause_ns
+# TYPE consul_runtime_total_gc_pause_ns_total_gc_pause_ns gauge
+consul_runtime_total_gc_pause_ns_total_gc_pause_ns 8.32754048e+08
+# HELP consul_runtime_total_gc_runs_total_gc_runs consul_runtime_total_gc_runs_total_gc_runs
+# TYPE consul_runtime_total_gc_runs_total_gc_runs gauge
+consul_runtime_total_gc_runs_total_gc_runs 4172
+# HELP consul_serf_coordinate_adjustment_ms consul_serf_coordinate_adjustment_ms
+# TYPE consul_serf_coordinate_adjustment_ms summary
+consul_serf_coordinate_adjustment_ms{network="lan",partition="default",segment="",quantile="0.5"} 0.31390100717544556
+consul_serf_coordinate_adjustment_ms{network="lan",partition="default",segment="",quantile="0.9"} 0.31821900606155396
+consul_serf_coordinate_adjustment_ms{network="lan",partition="default",segment="",quantile="0.99"} 0.31821900606155396
+consul_serf_coordinate_adjustment_ms_sum{network="lan",partition="default",segment=""} 23996.035400994588
+consul_serf_coordinate_adjustment_ms_count{network="lan",partition="default",segment=""} 30847
+# HELP consul_serf_queue_Event consul_serf_queue_Event
+# TYPE consul_serf_queue_Event summary
+consul_serf_queue_Event{network="wan",quantile="0.5"} 0
+consul_serf_queue_Event{network="wan",quantile="0.9"} 0
+consul_serf_queue_Event{network="wan",quantile="0.99"} 0
+consul_serf_queue_Event_sum{network="wan"} 0
+consul_serf_queue_Event_count{network="wan"} 15060
+consul_serf_queue_Event{network="lan",partition="default",segment="",quantile="0.5"} 0
+consul_serf_queue_Event{network="lan",partition="default",segment="",quantile="0.9"} 0
+consul_serf_queue_Event{network="lan",partition="default",segment="",quantile="0.99"} 0
+consul_serf_queue_Event_sum{network="lan",partition="default",segment=""} 6429
+consul_serf_queue_Event_count{network="lan",partition="default",segment=""} 15060
+# HELP consul_serf_queue_Intent consul_serf_queue_Intent
+# TYPE consul_serf_queue_Intent summary
+consul_serf_queue_Intent{network="wan",quantile="0.5"} 0
+consul_serf_queue_Intent{network="wan",quantile="0.9"} 0
+consul_serf_queue_Intent{network="wan",quantile="0.99"} 0
+consul_serf_queue_Intent_sum{network="wan"} 0
+consul_serf_queue_Intent_count{network="wan"} 15060
+consul_serf_queue_Intent{network="lan",partition="default",segment="",quantile="0.5"} 0
+consul_serf_queue_Intent{network="lan",partition="default",segment="",quantile="0.9"} 0
+consul_serf_queue_Intent{network="lan",partition="default",segment="",quantile="0.99"} 0
+consul_serf_queue_Intent_sum{network="lan",partition="default",segment=""} 0
+consul_serf_queue_Intent_count{network="lan",partition="default",segment=""} 15060
+# HELP consul_serf_queue_Query consul_serf_queue_Query
+# TYPE consul_serf_queue_Query summary
+consul_serf_queue_Query{network="wan",quantile="0.5"} 0
+consul_serf_queue_Query{network="wan",quantile="0.9"} 0
+consul_serf_queue_Query{network="wan",quantile="0.99"} 0
+consul_serf_queue_Query_sum{network="wan"} 0
+consul_serf_queue_Query_count{network="wan"} 15060
+consul_serf_queue_Query{network="lan",partition="default",segment="",quantile="0.5"} 0
+consul_serf_queue_Query{network="lan",partition="default",segment="",quantile="0.9"} 0
+consul_serf_queue_Query{network="lan",partition="default",segment="",quantile="0.99"} 0
+consul_serf_queue_Query_sum{network="lan",partition="default",segment=""} 0
+consul_serf_queue_Query_count{network="lan",partition="default",segment=""} 15060
+# HELP consul_server_isLeader Tracks if the server is a leader.
+# TYPE consul_server_isLeader gauge
+consul_server_isLeader 0
+# HELP consul_server_isLeader_isLeader consul_server_isLeader_isLeader
+# TYPE consul_server_isLeader_isLeader gauge
+consul_server_isLeader_isLeader 1
+# HELP consul_session_apply Measures the time spent applying a session update.
+# TYPE consul_session_apply summary
+consul_session_apply{quantile="0.5"} NaN
+consul_session_apply{quantile="0.9"} NaN
+consul_session_apply{quantile="0.99"} NaN
+consul_session_apply_sum 0
+consul_session_apply_count 0
+# HELP consul_session_renew Measures the time spent renewing a session.
+# TYPE consul_session_renew summary
+consul_session_renew{quantile="0.5"} NaN
+consul_session_renew{quantile="0.9"} NaN
+consul_session_renew{quantile="0.99"} NaN
+consul_session_renew_sum 0
+consul_session_renew_count 0
+# HELP consul_session_ttl_active Tracks the active number of sessions being tracked.
+# TYPE consul_session_ttl_active gauge
+consul_session_ttl_active 0
+# HELP consul_session_ttl_active_active consul_session_ttl_active_active
+# TYPE consul_session_ttl_active_active gauge
+consul_session_ttl_active_active 0
+# HELP consul_session_ttl_invalidate Measures the time spent invalidating an expired session.
+# TYPE consul_session_ttl_invalidate summary
+consul_session_ttl_invalidate{quantile="0.5"} NaN
+consul_session_ttl_invalidate{quantile="0.9"} NaN
+consul_session_ttl_invalidate{quantile="0.99"} NaN
+consul_session_ttl_invalidate_sum 0
+consul_session_ttl_invalidate_count 0
+# HELP consul_state_config_entries Measures the current number of unique configuration entries registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4.
+# TYPE consul_state_config_entries gauge
+consul_state_config_entries 0
+# HELP consul_state_connect_instances Measures the current number of unique connect service instances registered with Consul, labeled by Kind. It is only emitted by Consul servers. Added in v1.10.4.
+# TYPE consul_state_connect_instances gauge
+consul_state_connect_instances 0
+# HELP consul_state_kv_entries Measures the current number of entries in the Consul KV store. It is only emitted by Consul servers. Added in v1.10.3.
+# TYPE consul_state_kv_entries gauge
+consul_state_kv_entries 0
+# HELP consul_state_nodes Measures the current number of nodes registered with Consul. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_state_nodes gauge
+consul_state_nodes 0
+# HELP consul_state_peerings Measures the current number of peerings registered with Consul. It is only emitted by Consul servers. Added in v1.13.0.
+# TYPE consul_state_peerings gauge
+consul_state_peerings 0
+# HELP consul_state_service_instances Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_state_service_instances gauge
+consul_state_service_instances 0
+# HELP consul_state_services Measures the current number of unique services registered with Consul, based on service name. It is only emitted by Consul servers. Added in v1.9.0.
+# TYPE consul_state_services gauge
+consul_state_services 0
+# HELP consul_system_licenseExpiration Represents the number of hours until the current license is going to expire
+# TYPE consul_system_licenseExpiration gauge
+consul_system_licenseExpiration 0
+# HELP consul_system_licenseExpiration_licenseExpiration consul_system_licenseExpiration_licenseExpiration
+# TYPE consul_system_licenseExpiration_licenseExpiration gauge
+consul_system_licenseExpiration_licenseExpiration 819.429443359375
+# HELP consul_txn_apply Measures the time spent applying a transaction operation.
+# TYPE consul_txn_apply summary
+consul_txn_apply{quantile="0.5"} NaN
+consul_txn_apply{quantile="0.9"} NaN
+consul_txn_apply{quantile="0.99"} NaN
+consul_txn_apply_sum 0
+consul_txn_apply_count 0
+# HELP consul_txn_read Measures the time spent returning a read transaction.
+# TYPE consul_txn_read summary
+consul_txn_read{quantile="0.5"} NaN
+consul_txn_read{quantile="0.9"} NaN
+consul_txn_read{quantile="0.99"} NaN
+consul_txn_read_sum 0
+consul_txn_read_count 0
+# HELP consul_version Represents the Consul version.
+# TYPE consul_version gauge
+consul_version 0
+# HELP consul_xds_server_idealStreamsMax The maximum number of xDS streams per server, chosen to achieve a roughly even spread of load across servers.
+# TYPE consul_xds_server_idealStreamsMax gauge
+consul_xds_server_idealStreamsMax 0
+# HELP consul_xds_server_streamDrained Counts the number of xDS streams that are drained when rebalancing the load between servers.
+# TYPE consul_xds_server_streamDrained counter
+consul_xds_server_streamDrained 0
+# HELP consul_xds_server_streamStart Measures the time in milliseconds after an xDS stream is opened until xDS resources are first generated for the stream.
+# TYPE consul_xds_server_streamStart summary
+consul_xds_server_streamStart{quantile="0.5"} NaN
+consul_xds_server_streamStart{quantile="0.9"} NaN
+consul_xds_server_streamStart{quantile="0.99"} NaN
+consul_xds_server_streamStart_sum 3501.488723754883
+consul_xds_server_streamStart_count 11
+# HELP consul_xds_server_streams Measures the number of active xDS streams handled by the server split by protocol version.
+# TYPE consul_xds_server_streams gauge
+consul_xds_server_streams 0
+# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 3.7191e-05
+go_gc_duration_seconds{quantile="0.25"} 6.1463e-05
+go_gc_duration_seconds{quantile="0.5"} 7.7062e-05
+go_gc_duration_seconds{quantile="0.75"} 0.000115923
+go_gc_duration_seconds{quantile="1"} 0.001147196
+go_gc_duration_seconds_sum 0.832754027
+go_gc_duration_seconds_count 4172
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 313
+# HELP go_info Information about the Go environment.
+# TYPE go_info gauge
+go_info{version="go1.19.4"} 1
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 5.195244e+07
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 1.0251245704e+11
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 4.77878e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 1.51357406e+09
+# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
+# TYPE go_memstats_gc_cpu_fraction gauge
+go_memstats_gc_cpu_fraction 2.663750489550345e-05
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 1.5347888e+07
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 5.195244e+07
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 7.4121216e+07
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 6.1472768e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 311688
+# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes gauge
+go_memstats_heap_released_bytes 5.914624e+07
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 1.35593984e+08
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.6741251000160766e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 1.513885748e+09
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 2400
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 15600
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 712656
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 943776
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 7.2274088e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 658892
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 2.818048e+06
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 2.818048e+06
+# HELP go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 1.60156968e+08
+# HELP go_threads Number of OS threads created.
+# TYPE go_threads gauge
+go_threads 10
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 4001.82
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 65536
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 45
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 1.30408448e+08
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.67367331028e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 1.046990848e+09
+# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
+# TYPE process_virtual_memory_max_bytes gauge
+process_virtual_memory_max_bytes -1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-self.json b/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-self.json
new file mode 100644
index 000000000..8a11b7d0e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-agent-self.json
@@ -0,0 +1,71 @@
+{
+ "Config": {
+ "Datacenter": "consul-sandbox-cluster-0159c9d3",
+ "PrimaryDatacenter": "consul-sandbox-cluster-0159c9d3",
+ "NodeName": "ip-172-25-37-57",
+ "NodeID": "b1906d81-c585-7c2c-1236-a5404b7fa7ca",
+ "Revision": "55a184d3",
+ "Server": true,
+ "Version": "1.14.3+ent",
+ "BuildDate": "2022-12-13T17:12:10Z"
+ },
+ "DebugConfig": {
+ "Cloud": {
+ "AuthURL": "",
+ "ClientID": "492e9e67-6386-4727-964f-8a41305f30a5",
+ "ClientSecret": "hidden",
+ "Hostname": "",
+ "ResourceID": "organization/1/project/2/hashicorp.consul.cluster/3",
+ "ScadaAddress": ""
+ },
+ "Telemetry": {
+ "AllowedPrefixes": [],
+ "BlockedPrefixes": [
+ "consul.rpc.server.call"
+ ],
+ "CirconusAPIApp": "",
+ "CirconusAPIToken": "hidden",
+ "CirconusAPIURL": "",
+ "CirconusBrokerID": "",
+ "CirconusBrokerSelectTag": "",
+ "CirconusCheckDisplayName": "",
+ "CirconusCheckForceMetricActivation": "",
+ "CirconusCheckID": "",
+ "CirconusCheckInstanceID": "",
+ "CirconusCheckSearchTag": "",
+ "CirconusCheckTags": "",
+ "CirconusSubmissionInterval": "",
+ "CirconusSubmissionURL": "",
+ "Disable": false,
+ "DisableHostname": false,
+ "DogstatsdAddr": "127.0.0.1:8125",
+ "DogstatsdTags": [],
+ "FilterDefault": true,
+ "MetricsPrefix": "consul",
+ "PrometheusOpts": {
+ "CounterDefinitions": [],
+ "Expiration": "5m0s",
+ "GaugeDefinitions": [],
+ "Name": "consul",
+ "Registerer": null,
+ "SummaryDefinitions": []
+ },
+ "RetryFailedConfiguration": true,
+ "StatsdAddr": "",
+ "StatsiteAddr": ""
+ }
+ },
+ "Stats": {
+ "license": {
+ "customer": "a1c27ed4-43a4-4192-9f39-14e1166d2d2e",
+ "expiration_time": "2023-02-22 14:11:12.877172615 +0000 UTC",
+ "features": "Automated Backups, Automated Upgrades, Namespaces, SSO, Audit Logging, Admin Partitions",
+ "id": "492e9e67-6386-4727-964f-8a41305f30a5",
+ "install_id": "*",
+ "issue_time": "2023-01-15 14:11:12.877172615 +0000 UTC",
+ "modules": "Governance and Policy",
+ "product": "consul",
+ "start_time": "2023-01-15 14:11:12.877172615 +0000 UTC"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json b/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json
new file mode 100644
index 000000000..bfe44c7fc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json
@@ -0,0 +1,42 @@
+[
+ {
+ "Node": "ip-10-50-133-93",
+ "Segment": "",
+ "Partition": "default",
+ "Coord": {
+ "Vec": [
+ -0.0005406415790908119,
+ -0.005125240204547753,
+ -0.0010556502711423538,
+ -0.00223296135134459,
+ 0.002051567080576126,
+ -0.004494795954099239,
+ -0.0010621855776488467,
+ 0.0013985871196457514
+ ],
+ "Error": 0.056466891936309965,
+ "Adjustment": -0.0004925342111843478,
+ "Height": 0.00043853135504766936
+ }
+ },
+ {
+ "Node": "ip-172-25-37-57",
+ "Segment": "",
+ "Partition": "default",
+ "Coord": {
+ "Vec": [
+ -0.00041456488713690183,
+ -0.0039300429073992685,
+ -0.0008094743964577936,
+ -0.001712238560569221,
+ 0.0015731451331568297,
+ -0.00344661716784539,
+ -0.0008144857045591224,
+ 0.0010724389795601075
+ ],
+ "Error": 0.0223287150164881,
+ "Adjustment": -0.0004893904130922427,
+ "Height": 5.5788597108650077e-05
+ }
+ }
+]
diff --git a/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/v1-agent-checks.json b/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/v1-agent-checks.json
new file mode 100644
index 000000000..0daa492c0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/consul/testdata/v1.14.3-cloud/v1-agent-checks.json
@@ -0,0 +1,68 @@
+{
+ "chk1": {
+ "Node": "mysql1",
+ "CheckID": "chk1",
+ "Name": "ssh",
+ "Status": "passing",
+ "Notes": "",
+ "Output": "TCP connect 127.0.0.1:22: Success",
+ "ServiceID": "",
+ "ServiceName": "",
+ "ServiceTags": [
+ ],
+ "Definition": {
+ },
+ "CreateIndex": 0,
+ "ModifyIndex": 0
+ },
+ "chk2": {
+ "Node": "mysql1",
+ "CheckID": "chk2",
+ "Name": "telnet",
+ "Status": "critical",
+ "Notes": "",
+ "Output": "dial tcp 127.0.0.1:23: connect: connection refused",
+ "ServiceID": "",
+ "ServiceName": "",
+ "ServiceTags": [
+ ],
+ "Definition": {
+ },
+ "CreateIndex": 0,
+ "ModifyIndex": 0
+ },
+ "chk3": {
+ "Node": "mysql1",
+ "CheckID": "chk3",
+ "Name": "telnet",
+ "Status": "critical",
+ "Notes": "",
+ "Output": "dial tcp 127.0.0.1:23: connect: connection refused",
+ "ServiceID": "",
+ "ServiceName": "",
+ "ServiceTags": [
+ ],
+ "Definition": {
+ },
+ "CreateIndex": 0,
+ "ModifyIndex": 0
+ },
+ "mysql": {
+ "Node": "mysql1",
+ "CheckID": "mysql",
+ "Name": "MYSQL TCP on port 3336",
+ "Status": "critical",
+ "Notes": "",
+ "Output": "dial tcp 127.0.0.1:3336: connect: connection refused",
+ "ServiceID": "mysql0",
+ "ServiceName": "mysql",
+ "ServiceTags": [
+ "primary",
+ "secondary"
+ ],
+ "Definition": {
+ },
+ "CreateIndex": 0,
+ "ModifyIndex": 0
+ }
+}
diff --git a/src/go/plugin/go.d/modules/coredns/README.md b/src/go/plugin/go.d/modules/coredns/README.md
new file mode 120000
index 000000000..fcd7e5544
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/README.md
@@ -0,0 +1 @@
+integrations/coredns.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/coredns/charts.go b/src/go/plugin/go.d/modules/coredns/charts.go
new file mode 100644
index 000000000..fd93efad3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/charts.go
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package coredns
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Chart is an alias for module.Chart
+ Chart = module.Chart
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+ // Dim is an alias for module.Dim
+ Dim = module.Dim
+)
+
+var summaryCharts = Charts{
+ {
+ ID: "dns_request_count_total",
+ Title: "Number Of DNS Requests",
+ Units: "requests/s",
+ Fam: "summary",
+ Ctx: "coredns.dns_request_count_total",
+ Dims: Dims{
+ {ID: "request_total", Name: "requests", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "dns_responses_count_total",
+ Title: "Number Of DNS Responses",
+ Units: "responses/s",
+ Fam: "summary",
+ Ctx: "coredns.dns_responses_count_total",
+ Dims: Dims{
+ {ID: "response_total", Name: "responses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "dns_request_count_total_per_status",
+ Title: "Number Of Processed And Dropped DNS Requests",
+ Units: "requests/s",
+ Fam: "summary",
+ Ctx: "coredns.dns_request_count_total_per_status",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "request_per_status_processed", Name: "processed", Algo: module.Incremental},
+ {ID: "request_per_status_dropped", Name: "dropped", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "dns_no_matching_zone_dropped_total",
+ Title: "Number Of Dropped DNS Requests Because Of No Matching Zone",
+ Units: "requests/s",
+ Fam: "summary",
+ Ctx: "coredns.dns_no_matching_zone_dropped_total",
+ Dims: Dims{
+ {ID: "no_matching_zone_dropped_total", Name: "dropped", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "dns_panic_count_total",
+ Title: "Number Of Panics",
+ Units: "panics/s",
+ Fam: "summary",
+ Ctx: "coredns.dns_panic_count_total",
+ Dims: Dims{
+ {ID: "panic_total", Name: "panics", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "dns_requests_count_total_per_proto",
+ Title: "Number Of DNS Requests Per Transport Protocol",
+ Units: "requests/s",
+ Fam: "summary",
+ Ctx: "coredns.dns_requests_count_total_per_proto",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "request_per_proto_udp", Name: "udp", Algo: module.Incremental},
+ {ID: "request_per_proto_tcp", Name: "tcp", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "dns_requests_count_total_per_ip_family",
+ Title: "Number Of DNS Requests Per IP Family",
+ Units: "requests/s",
+ Fam: "summary",
+ Ctx: "coredns.dns_requests_count_total_per_ip_family",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "request_per_ip_family_v4", Name: "v4", Algo: module.Incremental},
+ {ID: "request_per_ip_family_v6", Name: "v6", Algo: module.Incremental},
+ },
+ },
+ //{
+ // ID: "dns_requests_duration_seconds",
+ // Title: "Number Of DNS Requests Per Bucket",
+ // Units: "requests/s",
+ // Fam: "summary",
+ // Ctx: "coredns.dns_requests_duration_seconds",
+ // Type: module.Stacked,
+ // Dims: Dims{
+ // {ID: "request_duration_seconds_bucket_0.00025", Name: "0.00025s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.0005", Name: "0.0005s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.001", Name: "0.001s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.002", Name: "0.002s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.004", Name: "0.004s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.008", Name: "0.008s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.016", Name: "0.016s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.032", Name: "0.032s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.064", Name: "0.064s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.128", Name: "0.128s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.256", Name: "0.256s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_0.512", Name: "0.512s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_1.024", Name: "1.024s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_2.048", Name: "2.048s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_4.096", Name: "4.096s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_8.192", Name: "8.192s", Algo: module.Incremental},
+ // {ID: "request_duration_seconds_bucket_+Inf", Name: "+Inf", Algo: module.Incremental},
+ // },
+ //},
+ {
+ ID: "dns_requests_count_total_per_type",
+ Title: "Number Of DNS Requests Per Type",
+ Units: "requests/s",
+ Fam: "summary",
+ Ctx: "coredns.dns_requests_count_total_per_per_type",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "request_per_type_A", Name: "A", Algo: module.Incremental},
+ {ID: "request_per_type_AAAA", Name: "AAAA", Algo: module.Incremental},
+ {ID: "request_per_type_MX", Name: "MX", Algo: module.Incremental},
+ {ID: "request_per_type_SOA", Name: "SOA", Algo: module.Incremental},
+ {ID: "request_per_type_CNAME", Name: "CNAME", Algo: module.Incremental},
+ {ID: "request_per_type_PTR", Name: "PTR", Algo: module.Incremental},
+ {ID: "request_per_type_TXT", Name: "TXT", Algo: module.Incremental},
+ {ID: "request_per_type_NS", Name: "NS", Algo: module.Incremental},
+ {ID: "request_per_type_DS", Name: "DS", Algo: module.Incremental},
+ {ID: "request_per_type_DNSKEY", Name: "DNSKEY", Algo: module.Incremental},
+ {ID: "request_per_type_RRSIG", Name: "RRSIG", Algo: module.Incremental},
+ {ID: "request_per_type_NSEC", Name: "NSEC", Algo: module.Incremental},
+ {ID: "request_per_type_NSEC3", Name: "NSEC3", Algo: module.Incremental},
+ {ID: "request_per_type_IXFR", Name: "IXFR", Algo: module.Incremental},
+ {ID: "request_per_type_ANY", Name: "ANY", Algo: module.Incremental},
+ {ID: "request_per_type_other", Name: "other", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "dns_responses_count_total_per_rcode",
+ Title: "Number Of DNS Responses Per Rcode",
+ Units: "responses/s",
+ Fam: "summary",
+ Ctx: "coredns.dns_responses_count_total_per_rcode",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "response_per_rcode_NOERROR", Name: "NOERROR", Algo: module.Incremental},
+ {ID: "response_per_rcode_FORMERR", Name: "FORMERR", Algo: module.Incremental},
+ {ID: "response_per_rcode_SERVFAIL", Name: "SERVFAIL", Algo: module.Incremental},
+ {ID: "response_per_rcode_NXDOMAIN", Name: "NXDOMAIN", Algo: module.Incremental},
+ {ID: "response_per_rcode_NOTIMP", Name: "NOTIMP", Algo: module.Incremental},
+ {ID: "response_per_rcode_REFUSED", Name: "REFUSED", Algo: module.Incremental},
+ {ID: "response_per_rcode_YXDOMAIN", Name: "YXDOMAIN", Algo: module.Incremental},
+ {ID: "response_per_rcode_YXRRSET", Name: "YXRRSET", Algo: module.Incremental},
+ {ID: "response_per_rcode_NXRRSET", Name: "NXRRSET", Algo: module.Incremental},
+ {ID: "response_per_rcode_NOTAUTH", Name: "NOTAUTH", Algo: module.Incremental},
+ {ID: "response_per_rcode_NOTZONE", Name: "NOTZONE", Algo: module.Incremental},
+ {ID: "response_per_rcode_BADSIG", Name: "BADSIG", Algo: module.Incremental},
+ {ID: "response_per_rcode_BADKEY", Name: "BADKEY", Algo: module.Incremental},
+ {ID: "response_per_rcode_BADTIME", Name: "BADTIME", Algo: module.Incremental},
+ {ID: "response_per_rcode_BADMODE", Name: "BADMODE", Algo: module.Incremental},
+ {ID: "response_per_rcode_BADNAME", Name: "BADNAME", Algo: module.Incremental},
+ {ID: "response_per_rcode_BADALG", Name: "BADALG", Algo: module.Incremental},
+ {ID: "response_per_rcode_BADTRUNC", Name: "BADTRUNC", Algo: module.Incremental},
+ {ID: "response_per_rcode_BADCOOKIE", Name: "BADCOOKIE", Algo: module.Incremental},
+ {ID: "response_per_rcode_other", Name: "other", Algo: module.Incremental},
+ },
+ },
+}
+
+var serverCharts = Charts{
+ {
+ ID: "per_%s_%s_dns_request_count_total",
+ Title: "Number Of DNS Requests, %s %s",
+ Units: "requests/s",
+ Fam: "%s %s",
+ Ctx: "coredns.server_dns_request_count_total",
+ Dims: Dims{
+ {ID: "%s_request_total", Name: "requests", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "per_%s_%s_dns_responses_count_total",
+ Title: "Number Of DNS Responses, %s %s",
+ Units: "responses/s",
+ Fam: "%s %s",
+ Ctx: "coredns.server_dns_responses_count_total",
+ Dims: Dims{
+ {ID: "%s_response_total", Name: "responses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "per_%s_%s_dns_request_count_total_per_status",
+ Title: "Number Of Processed And Dropped DNS Requests, %s %s",
+ Units: "requests/s",
+ Fam: "%s %s",
+ Ctx: "coredns.server_dns_request_count_total_per_status",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "%s_request_per_status_processed", Name: "processed", Algo: module.Incremental},
+ {ID: "%s_request_per_status_dropped", Name: "dropped", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "per_%s_%s_dns_requests_count_total_per_proto",
+ Title: "Number Of DNS Requests Per Transport Protocol, %s %s",
+ Units: "requests/s",
+ Fam: "%s %s",
+ Ctx: "coredns.server_dns_requests_count_total_per_proto",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "%s_request_per_proto_udp", Name: "udp", Algo: module.Incremental},
+ {ID: "%s_request_per_proto_tcp", Name: "tcp", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "per_%s_%s_dns_requests_count_total_per_ip_family",
+ Title: "Number Of DNS Requests Per IP Family, %s %s",
+ Units: "requests/s",
+ Fam: "%s %s",
+ Ctx: "coredns.server_dns_requests_count_total_per_ip_family",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "%s_request_per_ip_family_v4", Name: "v4", Algo: module.Incremental},
+ {ID: "%s_request_per_ip_family_v6", Name: "v6", Algo: module.Incremental},
+ },
+ },
+ //{
+ // ID: "per_%s_%s_dns_requests_duration_seconds",
+ // Title: "Number Of DNS Requests Per Bucket, %s %s",
+ // Units: "requests/s",
+ // Fam: "%s %s",
+ // Ctx: "coredns.server_dns_requests_duration_seconds",
+ // Type: module.Stacked,
+ // Dims: Dims{
+ // {ID: "%s_request_duration_seconds_bucket_0.00025", Name: "0.00025s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.0005", Name: "0.0005s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.001", Name: "0.001s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.002", Name: "0.002s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.004", Name: "0.004s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.008", Name: "0.008s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.016", Name: "0.016s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.032", Name: "0.032s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.064", Name: "0.064s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.128", Name: "0.128s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.256", Name: "0.256s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_0.512", Name: "0.512s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_1.024", Name: "1.024s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_2.048", Name: "2.048s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_4.096", Name: "4.096s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_8.192", Name: "8.192s", Algo: module.Incremental},
+ // {ID: "%s_request_duration_seconds_bucket_+Inf", Name: "+Inf", Algo: module.Incremental},
+ // },
+ //},
+ {
+ ID: "per_%s_%s_dns_requests_count_total_per_type",
+ Title: "Number Of DNS Requests Per Type, %s %s",
+ Units: "requests/s",
+ Fam: "%s %s",
+ Ctx: "coredns.server_dns_requests_count_total_per_per_type",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "%s_request_per_type_A", Name: "A", Algo: module.Incremental},
+ {ID: "%s_request_per_type_AAAA", Name: "AAAA", Algo: module.Incremental},
+ {ID: "%s_request_per_type_MX", Name: "MX", Algo: module.Incremental},
+ {ID: "%s_request_per_type_SOA", Name: "SOA", Algo: module.Incremental},
+ {ID: "%s_request_per_type_CNAME", Name: "CNAME", Algo: module.Incremental},
+ {ID: "%s_request_per_type_PTR", Name: "PTR", Algo: module.Incremental},
+ {ID: "%s_request_per_type_TXT", Name: "TXT", Algo: module.Incremental},
+ {ID: "%s_request_per_type_NS", Name: "NS", Algo: module.Incremental},
+ {ID: "%s_request_per_type_DS", Name: "DS", Algo: module.Incremental},
+ {ID: "%s_request_per_type_DNSKEY", Name: "DNSKEY", Algo: module.Incremental},
+ {ID: "%s_request_per_type_RRSIG", Name: "RRSIG", Algo: module.Incremental},
+ {ID: "%s_request_per_type_NSEC", Name: "NSEC", Algo: module.Incremental},
+ {ID: "%s_request_per_type_NSEC3", Name: "NSEC3", Algo: module.Incremental},
+ {ID: "%s_request_per_type_IXFR", Name: "IXFR", Algo: module.Incremental},
+ {ID: "%s_request_per_type_ANY", Name: "ANY", Algo: module.Incremental},
+ {ID: "%s_request_per_type_other", Name: "other", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "per_%s_%s_dns_responses_count_total_per_rcode",
+ Title: "Number Of DNS Responses Per Rcode, %s %s",
+ Units: "responses/s",
+ Fam: "%s %s",
+ Ctx: "coredns.server_dns_responses_count_total_per_rcode",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "%s_response_per_rcode_NOERROR", Name: "NOERROR", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_FORMERR", Name: "FORMERR", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_SERVFAIL", Name: "SERVFAIL", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_NXDOMAIN", Name: "NXDOMAIN", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_NOTIMP", Name: "NOTIMP", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_REFUSED", Name: "REFUSED", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_YXDOMAIN", Name: "YXDOMAIN", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_YXRRSET", Name: "YXRRSET", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_NXRRSET", Name: "NXRRSET", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_NOTAUTH", Name: "NOTAUTH", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_NOTZONE", Name: "NOTZONE", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_BADSIG", Name: "BADSIG", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_BADKEY", Name: "BADKEY", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_BADTIME", Name: "BADTIME", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_BADMODE", Name: "BADMODE", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_BADNAME", Name: "BADNAME", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_BADALG", Name: "BADALG", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_BADTRUNC", Name: "BADTRUNC", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_BADCOOKIE", Name: "BADCOOKIE", Algo: module.Incremental},
+ {ID: "%s_response_per_rcode_other", Name: "other", Algo: module.Incremental},
+ },
+ },
+}
+
+var zoneCharts = func() Charts {
+ c := serverCharts.Copy()
+ _ = c.Remove("per_%s_%s_dns_request_count_total_per_status")
+ return *c
+}()
diff --git a/src/go/plugin/go.d/modules/coredns/collect.go b/src/go/plugin/go.d/modules/coredns/collect.go
new file mode 100644
index 000000000..d6137b181
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/collect.go
@@ -0,0 +1,713 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package coredns
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/blang/semver/v4"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+const (
+ metricPanicCountTotal169orOlder = "coredns_panic_count_total"
+ metricRequestCountTotal169orOlder = "coredns_dns_request_count_total"
+ metricRequestTypeCountTotal169orOlder = "coredns_dns_request_type_count_total"
+ metricResponseRcodeCountTotal169orOlder = "coredns_dns_response_rcode_count_total"
+
+ metricPanicCountTotal170orNewer = "coredns_panics_total"
+ metricRequestCountTotal170orNewer = "coredns_dns_requests_total"
+ metricRequestTypeCountTotal170orNewer = "coredns_dns_requests_total"
+ metricResponseRcodeCountTotal170orNewer = "coredns_dns_responses_total"
+)
+
+var (
+ empty = ""
+ dropped = "dropped"
+ emptyServerReplaceName = "empty"
+ rootZoneReplaceName = "root"
+ version169 = semver.MustParse("1.6.9")
+)
+
+type requestMetricsNames struct {
+ panicCountTotal string
+ // true for all metrics below:
+ // - if none of server block matches 'server' tag is "", empty server has only one zone - dropped.
+ // example:
+ // coredns_dns_requests_total{family="1",proto="udp",server="",zone="dropped"} 1 for
+ // - dropped requests are added to both dropped and corresponding zone
+ // example:
+ // coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",zone="dropped"} 2
+ // coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",zone="ya.ru."} 2
+ requestCountTotal string
+ requestTypeCountTotal string
+ responseRcodeCountTotal string
+}
+
+func (cd *CoreDNS) collect() (map[string]int64, error) {
+ raw, err := cd.prom.ScrapeSeries()
+
+ if err != nil {
+ return nil, err
+ }
+
+ mx := newMetrics()
+
+ // some metric names are different depending on the version
+ // update them once
+ if !cd.skipVersionCheck {
+ cd.updateVersionDependentMetrics(raw)
+ cd.skipVersionCheck = true
+ }
+
+ //we can only get these metrics if we know the server version
+ if cd.version == nil {
+ return nil, errors.New("unable to determine server version")
+ }
+
+ cd.collectPanic(mx, raw)
+ cd.collectSummaryRequests(mx, raw)
+ cd.collectSummaryRequestsPerType(mx, raw)
+ cd.collectSummaryResponsesPerRcode(mx, raw)
+
+ if cd.perServerMatcher != nil {
+ cd.collectPerServerRequests(mx, raw)
+ //cd.collectPerServerRequestsDuration(mx, raw)
+ cd.collectPerServerRequestPerType(mx, raw)
+ cd.collectPerServerResponsePerRcode(mx, raw)
+ }
+
+ if cd.perZoneMatcher != nil {
+ cd.collectPerZoneRequests(mx, raw)
+ //cd.collectPerZoneRequestsDuration(mx, raw)
+ cd.collectPerZoneRequestsPerType(mx, raw)
+ cd.collectPerZoneResponsesPerRcode(mx, raw)
+ }
+
+ return stm.ToMap(mx), nil
+}
+
+func (cd *CoreDNS) updateVersionDependentMetrics(raw prometheus.Series) {
+ version := cd.parseVersion(raw)
+ if version == nil {
+ return
+ }
+ cd.version = version
+ if cd.version.LTE(version169) {
+ cd.metricNames.panicCountTotal = metricPanicCountTotal169orOlder
+ cd.metricNames.requestCountTotal = metricRequestCountTotal169orOlder
+ cd.metricNames.requestTypeCountTotal = metricRequestTypeCountTotal169orOlder
+ cd.metricNames.responseRcodeCountTotal = metricResponseRcodeCountTotal169orOlder
+ } else {
+ cd.metricNames.panicCountTotal = metricPanicCountTotal170orNewer
+ cd.metricNames.requestCountTotal = metricRequestCountTotal170orNewer
+ cd.metricNames.requestTypeCountTotal = metricRequestTypeCountTotal170orNewer
+ cd.metricNames.responseRcodeCountTotal = metricResponseRcodeCountTotal170orNewer
+ }
+}
+
+func (cd *CoreDNS) parseVersion(raw prometheus.Series) *semver.Version {
+ var versionStr string
+ for _, metric := range raw.FindByName("coredns_build_info") {
+ versionStr = metric.Labels.Get("version")
+ }
+ if versionStr == "" {
+ cd.Error("cannot find version string in metrics")
+ return nil
+ }
+
+ version, err := semver.Make(versionStr)
+ if err != nil {
+ cd.Errorf("failed to find server version: %v", err)
+ return nil
+ }
+ return &version
+}
+
+func (cd *CoreDNS) collectPanic(mx *metrics, raw prometheus.Series) {
+ mx.Panic.Set(raw.FindByName(cd.metricNames.panicCountTotal).Max())
+}
+
+func (cd *CoreDNS) collectSummaryRequests(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName(cd.metricNames.requestCountTotal) {
+ var (
+ family = metric.Labels.Get("family")
+ proto = metric.Labels.Get("proto")
+ server = metric.Labels.Get("server")
+ zone = metric.Labels.Get("zone")
+ value = metric.Value
+ )
+
+ if family == empty || proto == empty || zone == empty {
+ continue
+ }
+
+ if server == empty {
+ mx.NoZoneDropped.Add(value)
+ }
+
+ setRequestPerStatus(&mx.Summary.Request, value, server, zone)
+
+ if zone == dropped && server != empty {
+ continue
+ }
+
+ mx.Summary.Request.Total.Add(value)
+ setRequestPerIPFamily(&mx.Summary.Request, value, family)
+ setRequestPerProto(&mx.Summary.Request, value, proto)
+ }
+}
+
+//func (cd *CoreDNS) collectSummaryRequestsDuration(mx *metrics, raw prometheus.Series) {
+// for _, metric := range raw.FindByName(metricRequestDurationSecondsBucket) {
+// var (
+// server = metric.Labels.Get("server")
+// zone = metric.Labels.Get("zone")
+// le = metric.Labels.Get("le")
+// value = metric.Value
+// )
+//
+// if zone == empty || zone == dropped && server != empty || le == empty {
+// continue
+// }
+//
+// setRequestDuration(&mx.Summary.Request, value, le)
+// }
+// processRequestDuration(&mx.Summary.Request)
+//}
+
+func (cd *CoreDNS) collectSummaryRequestsPerType(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName(cd.metricNames.requestTypeCountTotal) {
+ var (
+ server = metric.Labels.Get("server")
+ typ = metric.Labels.Get("type")
+ zone = metric.Labels.Get("zone")
+ value = metric.Value
+ )
+
+ if typ == empty || zone == empty || zone == dropped && server != empty {
+ continue
+ }
+
+ setRequestPerType(&mx.Summary.Request, value, typ)
+ }
+}
+
+func (cd *CoreDNS) collectSummaryResponsesPerRcode(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName(cd.metricNames.responseRcodeCountTotal) {
+ var (
+ rcode = metric.Labels.Get("rcode")
+ server = metric.Labels.Get("server")
+ zone = metric.Labels.Get("zone")
+ value = metric.Value
+ )
+
+ if rcode == empty || zone == empty || zone == dropped && server != empty {
+ continue
+ }
+
+ setResponsePerRcode(&mx.Summary.Response, value, rcode)
+ }
+}
+
+// Per Server
+
+func (cd *CoreDNS) collectPerServerRequests(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName(cd.metricNames.requestCountTotal) {
+ var (
+ family = metric.Labels.Get("family")
+ proto = metric.Labels.Get("proto")
+ server = metric.Labels.Get("server")
+ zone = metric.Labels.Get("zone")
+ value = metric.Value
+ )
+
+ if family == empty || proto == empty || zone == empty {
+ continue
+ }
+
+ if !cd.perServerMatcher.MatchString(server) {
+ continue
+ }
+
+ if server == empty {
+ server = emptyServerReplaceName
+ }
+
+ if !cd.collectedServers[server] {
+ cd.addNewServerCharts(server)
+ cd.collectedServers[server] = true
+ }
+
+ if _, ok := mx.PerServer[server]; !ok {
+ mx.PerServer[server] = &requestResponse{}
+ }
+
+ srv := mx.PerServer[server]
+
+ setRequestPerStatus(&srv.Request, value, server, zone)
+
+ if zone == dropped && server != emptyServerReplaceName {
+ continue
+ }
+
+ srv.Request.Total.Add(value)
+ setRequestPerIPFamily(&srv.Request, value, family)
+ setRequestPerProto(&srv.Request, value, proto)
+ }
+}
+
+//func (cd *CoreDNS) collectPerServerRequestsDuration(mx *metrics, raw prometheus.Series) {
+// for _, metric := range raw.FindByName(metricRequestDurationSecondsBucket) {
+// var (
+// server = metric.Labels.Get("server")
+// zone = metric.Labels.Get("zone")
+// le = metric.Labels.Get("le")
+// value = metric.Value
+// )
+//
+// if zone == empty || zone == dropped && server != empty || le == empty {
+// continue
+// }
+//
+// if !cd.perServerMatcher.MatchString(server) {
+// continue
+// }
+//
+// if server == empty {
+// server = emptyServerReplaceName
+// }
+//
+// if !cd.collectedServers[server] {
+// cd.addNewServerCharts(server)
+// cd.collectedServers[server] = true
+// }
+//
+// if _, ok := mx.PerServer[server]; !ok {
+// mx.PerServer[server] = &requestResponse{}
+// }
+//
+// setRequestDuration(&mx.PerServer[server].Request, value, le)
+// }
+// for _, s := range mx.PerServer {
+// processRequestDuration(&s.Request)
+// }
+//}
+
+func (cd *CoreDNS) collectPerServerRequestPerType(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName(cd.metricNames.requestTypeCountTotal) {
+ var (
+ server = metric.Labels.Get("server")
+ typ = metric.Labels.Get("type")
+ zone = metric.Labels.Get("zone")
+ value = metric.Value
+ )
+
+ if typ == empty || zone == empty || zone == dropped && server != empty {
+ continue
+ }
+
+ if !cd.perServerMatcher.MatchString(server) {
+ continue
+ }
+
+ if server == empty {
+ server = emptyServerReplaceName
+ }
+
+ if !cd.collectedServers[server] {
+ cd.addNewServerCharts(server)
+ cd.collectedServers[server] = true
+ }
+
+ if _, ok := mx.PerServer[server]; !ok {
+ mx.PerServer[server] = &requestResponse{}
+ }
+
+ setRequestPerType(&mx.PerServer[server].Request, value, typ)
+ }
+}
+
+func (cd *CoreDNS) collectPerServerResponsePerRcode(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName(cd.metricNames.responseRcodeCountTotal) {
+ var (
+ rcode = metric.Labels.Get("rcode")
+ server = metric.Labels.Get("server")
+ zone = metric.Labels.Get("zone")
+ value = metric.Value
+ )
+
+ if rcode == empty || zone == empty || zone == dropped && server != empty {
+ continue
+ }
+
+ if !cd.perServerMatcher.MatchString(server) {
+ continue
+ }
+
+ if server == empty {
+ server = emptyServerReplaceName
+ }
+
+ if !cd.collectedServers[server] {
+ cd.addNewServerCharts(server)
+ cd.collectedServers[server] = true
+ }
+
+ if _, ok := mx.PerServer[server]; !ok {
+ mx.PerServer[server] = &requestResponse{}
+ }
+
+ setResponsePerRcode(&mx.PerServer[server].Response, value, rcode)
+ }
+}
+
+// Per Zone
+
+func (cd *CoreDNS) collectPerZoneRequests(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName(cd.metricNames.requestCountTotal) {
+ var (
+ family = metric.Labels.Get("family")
+ proto = metric.Labels.Get("proto")
+ zone = metric.Labels.Get("zone")
+ value = metric.Value
+ )
+
+ if family == empty || proto == empty || zone == empty {
+ continue
+ }
+
+ if !cd.perZoneMatcher.MatchString(zone) {
+ continue
+ }
+
+ if zone == "." {
+ zone = rootZoneReplaceName
+ }
+
+ if !cd.collectedZones[zone] {
+ cd.addNewZoneCharts(zone)
+ cd.collectedZones[zone] = true
+ }
+
+ if _, ok := mx.PerZone[zone]; !ok {
+ mx.PerZone[zone] = &requestResponse{}
+ }
+
+ zoneMX := mx.PerZone[zone]
+ zoneMX.Request.Total.Add(value)
+ setRequestPerIPFamily(&zoneMX.Request, value, family)
+ setRequestPerProto(&zoneMX.Request, value, proto)
+ }
+}
+
+//func (cd *CoreDNS) collectPerZoneRequestsDuration(mx *metrics, raw prometheus.Series) {
+// for _, metric := range raw.FindByName(metricRequestDurationSecondsBucket) {
+// var (
+// zone = metric.Labels.Get("zone")
+// le = metric.Labels.Get("le")
+// value = metric.Value
+// )
+//
+// if zone == empty || le == empty {
+// continue
+// }
+//
+// if !cd.perZoneMatcher.MatchString(zone) {
+// continue
+// }
+//
+// if zone == "." {
+// zone = rootZoneReplaceName
+// }
+//
+// if !cd.collectedZones[zone] {
+// cd.addNewZoneCharts(zone)
+// cd.collectedZones[zone] = true
+// }
+//
+// if _, ok := mx.PerZone[zone]; !ok {
+// mx.PerZone[zone] = &requestResponse{}
+// }
+//
+// setRequestDuration(&mx.PerZone[zone].Request, value, le)
+// }
+// for _, s := range mx.PerZone {
+// processRequestDuration(&s.Request)
+// }
+//}
+
+func (cd *CoreDNS) collectPerZoneRequestsPerType(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName(cd.metricNames.requestTypeCountTotal) {
+ var (
+ typ = metric.Labels.Get("type")
+ zone = metric.Labels.Get("zone")
+ value = metric.Value
+ )
+
+ if typ == empty || zone == empty {
+ continue
+ }
+
+ if !cd.perZoneMatcher.MatchString(zone) {
+ continue
+ }
+
+ if zone == "." {
+ zone = rootZoneReplaceName
+ }
+
+ if !cd.collectedZones[zone] {
+ cd.addNewZoneCharts(zone)
+ cd.collectedZones[zone] = true
+ }
+
+ if _, ok := mx.PerZone[zone]; !ok {
+ mx.PerZone[zone] = &requestResponse{}
+ }
+
+ setRequestPerType(&mx.PerZone[zone].Request, value, typ)
+ }
+}
+
+func (cd *CoreDNS) collectPerZoneResponsesPerRcode(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName(cd.metricNames.responseRcodeCountTotal) {
+ var (
+ rcode = metric.Labels.Get("rcode")
+ zone = metric.Labels.Get("zone")
+ value = metric.Value
+ )
+
+ if rcode == empty || zone == empty {
+ continue
+ }
+
+ if !cd.perZoneMatcher.MatchString(zone) {
+ continue
+ }
+
+ if zone == "." {
+ zone = rootZoneReplaceName
+ }
+
+ if !cd.collectedZones[zone] {
+ cd.addNewZoneCharts(zone)
+ cd.collectedZones[zone] = true
+ }
+
+ if _, ok := mx.PerZone[zone]; !ok {
+ mx.PerZone[zone] = &requestResponse{}
+ }
+
+ setResponsePerRcode(&mx.PerZone[zone].Response, value, rcode)
+ }
+}
+
+// ---
+
+func setRequestPerIPFamily(mx *request, value float64, family string) {
+ switch family {
+ case "1":
+ mx.PerIPFamily.IPv4.Add(value)
+ case "2":
+ mx.PerIPFamily.IPv6.Add(value)
+ }
+}
+
+func setRequestPerProto(mx *request, value float64, proto string) {
+ switch proto {
+ case "udp":
+ mx.PerProto.UDP.Add(value)
+ case "tcp":
+ mx.PerProto.TCP.Add(value)
+ }
+}
+
+func setRequestPerStatus(mx *request, value float64, server, zone string) {
+ switch zone {
+ default:
+ mx.PerStatus.Processed.Add(value)
+ case "dropped":
+ mx.PerStatus.Dropped.Add(value)
+ if server == empty || server == emptyServerReplaceName {
+ return
+ }
+ mx.PerStatus.Processed.Sub(value)
+ }
+}
+
+func setRequestPerType(mx *request, value float64, typ string) {
+ switch typ {
+ default:
+ mx.PerType.Other.Add(value)
+ case "A":
+ mx.PerType.A.Add(value)
+ case "AAAA":
+ mx.PerType.AAAA.Add(value)
+ case "MX":
+ mx.PerType.MX.Add(value)
+ case "SOA":
+ mx.PerType.SOA.Add(value)
+ case "CNAME":
+ mx.PerType.CNAME.Add(value)
+ case "PTR":
+ mx.PerType.PTR.Add(value)
+ case "TXT":
+ mx.PerType.TXT.Add(value)
+ case "NS":
+ mx.PerType.NS.Add(value)
+ case "DS":
+ mx.PerType.DS.Add(value)
+ case "DNSKEY":
+ mx.PerType.DNSKEY.Add(value)
+ case "RRSIG":
+ mx.PerType.RRSIG.Add(value)
+ case "NSEC":
+ mx.PerType.NSEC.Add(value)
+ case "NSEC3":
+ mx.PerType.NSEC3.Add(value)
+ case "IXFR":
+ mx.PerType.IXFR.Add(value)
+ case "ANY":
+ mx.PerType.ANY.Add(value)
+ }
+}
+
+func setResponsePerRcode(mx *response, value float64, rcode string) {
+ mx.Total.Add(value)
+
+ switch rcode {
+ default:
+ mx.PerRcode.Other.Add(value)
+ case "NOERROR":
+ mx.PerRcode.NOERROR.Add(value)
+ case "FORMERR":
+ mx.PerRcode.FORMERR.Add(value)
+ case "SERVFAIL":
+ mx.PerRcode.SERVFAIL.Add(value)
+ case "NXDOMAIN":
+ mx.PerRcode.NXDOMAIN.Add(value)
+ case "NOTIMP":
+ mx.PerRcode.NOTIMP.Add(value)
+ case "REFUSED":
+ mx.PerRcode.REFUSED.Add(value)
+ case "YXDOMAIN":
+ mx.PerRcode.YXDOMAIN.Add(value)
+ case "YXRRSET":
+ mx.PerRcode.YXRRSET.Add(value)
+ case "NXRRSET":
+ mx.PerRcode.NXRRSET.Add(value)
+ case "NOTAUTH":
+ mx.PerRcode.NOTAUTH.Add(value)
+ case "NOTZONE":
+ mx.PerRcode.NOTZONE.Add(value)
+ case "BADSIG":
+ mx.PerRcode.BADSIG.Add(value)
+ case "BADKEY":
+ mx.PerRcode.BADKEY.Add(value)
+ case "BADTIME":
+ mx.PerRcode.BADTIME.Add(value)
+ case "BADMODE":
+ mx.PerRcode.BADMODE.Add(value)
+ case "BADNAME":
+ mx.PerRcode.BADNAME.Add(value)
+ case "BADALG":
+ mx.PerRcode.BADALG.Add(value)
+ case "BADTRUNC":
+ mx.PerRcode.BADTRUNC.Add(value)
+ case "BADCOOKIE":
+ mx.PerRcode.BADCOOKIE.Add(value)
+ }
+}
+
+//func setRequestDuration(mx *request, value float64, le string) {
+// switch le {
+// case "0.00025":
+// mx.Duration.LE000025.Add(value)
+// case "0.0005":
+// mx.Duration.LE00005.Add(value)
+// case "0.001":
+// mx.Duration.LE0001.Add(value)
+// case "0.002":
+// mx.Duration.LE0002.Add(value)
+// case "0.004":
+// mx.Duration.LE0004.Add(value)
+// case "0.008":
+// mx.Duration.LE0008.Add(value)
+// case "0.016":
+// mx.Duration.LE0016.Add(value)
+// case "0.032":
+// mx.Duration.LE0032.Add(value)
+// case "0.064":
+// mx.Duration.LE0064.Add(value)
+// case "0.128":
+// mx.Duration.LE0128.Add(value)
+// case "0.256":
+// mx.Duration.LE0256.Add(value)
+// case "0.512":
+// mx.Duration.LE0512.Add(value)
+// case "1.024":
+// mx.Duration.LE1024.Add(value)
+// case "2.048":
+// mx.Duration.LE2048.Add(value)
+// case "4.096":
+// mx.Duration.LE4096.Add(value)
+// case "8.192":
+// mx.Duration.LE8192.Add(value)
+// case "+Inf":
+// mx.Duration.LEInf.Add(value)
+// }
+//}
+
+//func processRequestDuration(mx *request) {
+// mx.Duration.LEInf.Sub(mx.Duration.LE8192.Value())
+// mx.Duration.LE8192.Sub(mx.Duration.LE4096.Value())
+// mx.Duration.LE4096.Sub(mx.Duration.LE2048.Value())
+// mx.Duration.LE2048.Sub(mx.Duration.LE1024.Value())
+// mx.Duration.LE1024.Sub(mx.Duration.LE0512.Value())
+// mx.Duration.LE0512.Sub(mx.Duration.LE0256.Value())
+// mx.Duration.LE0256.Sub(mx.Duration.LE0128.Value())
+// mx.Duration.LE0128.Sub(mx.Duration.LE0064.Value())
+// mx.Duration.LE0064.Sub(mx.Duration.LE0032.Value())
+// mx.Duration.LE0032.Sub(mx.Duration.LE0016.Value())
+// mx.Duration.LE0016.Sub(mx.Duration.LE0008.Value())
+// mx.Duration.LE0008.Sub(mx.Duration.LE0004.Value())
+// mx.Duration.LE0004.Sub(mx.Duration.LE0002.Value())
+// mx.Duration.LE0002.Sub(mx.Duration.LE0001.Value())
+// mx.Duration.LE0001.Sub(mx.Duration.LE00005.Value())
+// mx.Duration.LE00005.Sub(mx.Duration.LE000025.Value())
+//}
+
+// ---
+
+func (cd *CoreDNS) addNewServerCharts(name string) {
+ charts := serverCharts.Copy()
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, "server", name)
+ chart.Title = fmt.Sprintf(chart.Title, "Server", name)
+ chart.Fam = fmt.Sprintf(chart.Fam, "server", name)
+
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+ _ = cd.charts.Add(*charts...)
+}
+
+func (cd *CoreDNS) addNewZoneCharts(name string) {
+ charts := zoneCharts.Copy()
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, "zone", name)
+ chart.Title = fmt.Sprintf(chart.Title, "Zone", name)
+ chart.Fam = fmt.Sprintf(chart.Fam, "zone", name)
+ chart.Ctx = strings.Replace(chart.Ctx, "coredns.server_", "coredns.zone_", 1)
+
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+ _ = cd.charts.Add(*charts...)
+}
diff --git a/src/go/plugin/go.d/modules/coredns/config_schema.json b/src/go/plugin/go.d/modules/coredns/config_schema.json
new file mode 100644
index 000000000..d5f87912b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/config_schema.json
@@ -0,0 +1,270 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "CoreDNS collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the CoreDNS [metrics page](https://coredns.io/plugins/metrics/).",
+ "type": "string",
+ "default": "http://127.0.0.1:9153/metrics",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "per_server_stats": {
+ "title": "Server selector",
+ "description": "Configures collection of per-server statistics. If left empty, no stats will be collected. Matching is performed against the `server` label value.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "includes": {
+ "title": "Include",
+ "description": "Include servers whose names match any of the specified inclusion [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Server pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "excludes": {
+ "title": "Exclude",
+ "description": "Exclude servers whose names match any of the specified exclusion [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Server pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ },
+ "per_zone_stats": {
+ "title": "Zone selector",
+ "description": "Configures collection of per-zone statistics. If left empty, no stats will be collected. Matching is performed against the `zone` label value.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "includes": {
+ "title": "Include",
+ "description": "Include zones whose names match any of the specified inclusion [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Zone pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "excludes": {
+ "title": "Exclude",
+ "description": "Exclude zones whose names match any of the specified exclusion [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Zone pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Server & Zone stats",
+ "fields": [
+ "per_server_stats",
+ "per_zone_stats"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "per_server_stats": {
+ "ui:help": "The logic for inclusion and exclusion is as follows: `(include1 OR include2) AND !(exclude1 OR exclude2)`.",
+ "ui:collapsible": true
+ },
+ "per_zone_stats": {
+ "ui:help": "The logic for inclusion and exclusion is as follows: `(include1 OR include2) AND !(exclude1 OR exclude2)`.",
+ "ui:collapsible": true
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/coredns/coredns.go b/src/go/plugin/go.d/modules/coredns/coredns.go
new file mode 100644
index 000000000..c91af7d15
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/coredns.go
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package coredns
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/blang/semver/v4"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("coredns", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *CoreDNS {
+ return &CoreDNS{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:9153/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: summaryCharts.Copy(),
+ collectedServers: make(map[string]bool),
+ collectedZones: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ PerServerStats matcher.SimpleExpr `yaml:"per_server_stats,omitempty" json:"per_server_stats"`
+ PerZoneStats matcher.SimpleExpr `yaml:"per_zone_stats,omitempty" json:"per_zone_stats"`
+}
+
+type CoreDNS struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ prom prometheus.Prometheus
+
+ charts *Charts
+
+ perServerMatcher matcher.Matcher
+ perZoneMatcher matcher.Matcher
+ collectedServers map[string]bool
+ collectedZones map[string]bool
+ skipVersionCheck bool
+ version *semver.Version
+ metricNames requestMetricsNames
+}
+
+func (cd *CoreDNS) Configuration() any {
+ return cd.Config
+}
+
+func (cd *CoreDNS) Init() error {
+ if err := cd.validateConfig(); err != nil {
+ cd.Errorf("config validation: %v", err)
+ return err
+ }
+
+ sm, err := cd.initPerServerMatcher()
+ if err != nil {
+ cd.Error(err)
+ return err
+ }
+ if sm != nil {
+ cd.perServerMatcher = sm
+ }
+
+ zm, err := cd.initPerZoneMatcher()
+ if err != nil {
+ cd.Error(err)
+ return err
+ }
+ if zm != nil {
+ cd.perZoneMatcher = zm
+ }
+
+ prom, err := cd.initPrometheusClient()
+ if err != nil {
+ cd.Error(err)
+ return err
+ }
+ cd.prom = prom
+
+ return nil
+}
+
+func (cd *CoreDNS) Check() error {
+ mx, err := cd.collect()
+ if err != nil {
+ cd.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (cd *CoreDNS) Charts() *Charts {
+ return cd.charts
+}
+
+func (cd *CoreDNS) Collect() map[string]int64 {
+ mx, err := cd.collect()
+
+ if err != nil {
+ cd.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (cd *CoreDNS) Cleanup() {
+ if cd.prom != nil && cd.prom.HTTPClient() != nil {
+ cd.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/coredns/coredns_test.go b/src/go/plugin/go.d/modules/coredns/coredns_test.go
new file mode 100644
index 000000000..5d67b417f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/coredns_test.go
@@ -0,0 +1,573 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package coredns
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer169NoLoad, _ = os.ReadFile("testdata/version169/no_load.txt")
+ dataVer169SomeLoad, _ = os.ReadFile("testdata/version169/some_load.txt")
+
+ dataVer170NoLoad, _ = os.ReadFile("testdata/version170/no_load.txt")
+ dataVer170SomeLoad, _ = os.ReadFile("testdata/version170/some_load.txt")
+
+ dataNoLoadNoVersion, _ = os.ReadFile("testdata/no_version/no_load.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer169NoLoad": dataVer169NoLoad,
+ "dataVer169SomeLoad": dataVer169SomeLoad,
+ "dataVer170NoLoad": dataVer170NoLoad,
+ "dataVer170SomeLoad": dataVer170SomeLoad,
+ "dataNoLoadNoVersion": dataNoLoadNoVersion,
+ } {
+ require.NotNilf(t, data, name)
+ }
+}
+
+func TestCoreDNS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &CoreDNS{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestCoreDNS_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestCoreDNS_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestCoreDNS_Init(t *testing.T) {
+ assert.NoError(t, New().Init())
+}
+
+func TestCoreDNS_InitNG(t *testing.T) {
+ job := New()
+ job.URL = ""
+ assert.Error(t, job.Init())
+}
+
+func TestCoreDNS_Check(t *testing.T) {
+ tests := []struct {
+ name string
+ data []byte
+ }{
+ {"version 1.6.9", dataVer169NoLoad},
+ {"version 1.7.0", dataVer170NoLoad},
+ }
+ for _, testNoLoad := range tests {
+ t.Run(testNoLoad.name, func(t *testing.T) {
+
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(testNoLoad.data)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+ })
+ }
+}
+
+func TestCoreDNS_CheckNG(t *testing.T) {
+ job := New()
+ job.URL = "http://127.0.0.1:38001/metrics"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestCoreDNS_Collect(t *testing.T) {
+ tests := []struct {
+ name string
+ data []byte
+ }{
+ {"version 1.6.9", dataVer169SomeLoad},
+ {"version 1.7.0", dataVer170SomeLoad},
+ }
+ for _, testSomeLoad := range tests {
+ t.Run(testSomeLoad.name, func(t *testing.T) {
+
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(testSomeLoad.data)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ job.PerServerStats.Includes = []string{"glob:*"}
+ job.PerZoneStats.Includes = []string{"glob:*"}
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "coredns.io._request_per_ip_family_v4": 19,
+ "coredns.io._request_per_ip_family_v6": 0,
+ "coredns.io._request_per_proto_tcp": 0,
+ "coredns.io._request_per_proto_udp": 19,
+ "coredns.io._request_per_status_dropped": 0,
+ "coredns.io._request_per_status_processed": 0,
+ "coredns.io._request_per_type_A": 6,
+ "coredns.io._request_per_type_AAAA": 6,
+ "coredns.io._request_per_type_ANY": 0,
+ "coredns.io._request_per_type_CNAME": 0,
+ "coredns.io._request_per_type_DNSKEY": 0,
+ "coredns.io._request_per_type_DS": 0,
+ "coredns.io._request_per_type_IXFR": 0,
+ "coredns.io._request_per_type_MX": 7,
+ "coredns.io._request_per_type_NS": 0,
+ "coredns.io._request_per_type_NSEC": 0,
+ "coredns.io._request_per_type_NSEC3": 0,
+ "coredns.io._request_per_type_PTR": 0,
+ "coredns.io._request_per_type_RRSIG": 0,
+ "coredns.io._request_per_type_SOA": 0,
+ "coredns.io._request_per_type_SRV": 0,
+ "coredns.io._request_per_type_TXT": 0,
+ "coredns.io._request_per_type_other": 0,
+ "coredns.io._request_total": 19,
+ "coredns.io._response_per_rcode_BADALG": 0,
+ "coredns.io._response_per_rcode_BADCOOKIE": 0,
+ "coredns.io._response_per_rcode_BADKEY": 0,
+ "coredns.io._response_per_rcode_BADMODE": 0,
+ "coredns.io._response_per_rcode_BADNAME": 0,
+ "coredns.io._response_per_rcode_BADSIG": 0,
+ "coredns.io._response_per_rcode_BADTIME": 0,
+ "coredns.io._response_per_rcode_BADTRUNC": 0,
+ "coredns.io._response_per_rcode_FORMERR": 0,
+ "coredns.io._response_per_rcode_NOERROR": 19,
+ "coredns.io._response_per_rcode_NOTAUTH": 0,
+ "coredns.io._response_per_rcode_NOTIMP": 0,
+ "coredns.io._response_per_rcode_NOTZONE": 0,
+ "coredns.io._response_per_rcode_NXDOMAIN": 0,
+ "coredns.io._response_per_rcode_NXRRSET": 0,
+ "coredns.io._response_per_rcode_REFUSED": 0,
+ "coredns.io._response_per_rcode_SERVFAIL": 0,
+ "coredns.io._response_per_rcode_YXDOMAIN": 0,
+ "coredns.io._response_per_rcode_YXRRSET": 0,
+ "coredns.io._response_per_rcode_other": 0,
+ "coredns.io._response_total": 19,
+ "dns://:53_request_per_ip_family_v4": 15,
+ "dns://:53_request_per_ip_family_v6": 0,
+ "dns://:53_request_per_proto_tcp": 0,
+ "dns://:53_request_per_proto_udp": 15,
+ "dns://:53_request_per_status_dropped": 9,
+ "dns://:53_request_per_status_processed": 6,
+ "dns://:53_request_per_type_A": 5,
+ "dns://:53_request_per_type_AAAA": 5,
+ "dns://:53_request_per_type_ANY": 0,
+ "dns://:53_request_per_type_CNAME": 0,
+ "dns://:53_request_per_type_DNSKEY": 0,
+ "dns://:53_request_per_type_DS": 0,
+ "dns://:53_request_per_type_IXFR": 0,
+ "dns://:53_request_per_type_MX": 5,
+ "dns://:53_request_per_type_NS": 0,
+ "dns://:53_request_per_type_NSEC": 0,
+ "dns://:53_request_per_type_NSEC3": 0,
+ "dns://:53_request_per_type_PTR": 0,
+ "dns://:53_request_per_type_RRSIG": 0,
+ "dns://:53_request_per_type_SOA": 0,
+ "dns://:53_request_per_type_SRV": 0,
+ "dns://:53_request_per_type_TXT": 0,
+ "dns://:53_request_per_type_other": 0,
+ "dns://:53_request_total": 15,
+ "dns://:53_response_per_rcode_BADALG": 0,
+ "dns://:53_response_per_rcode_BADCOOKIE": 0,
+ "dns://:53_response_per_rcode_BADKEY": 0,
+ "dns://:53_response_per_rcode_BADMODE": 0,
+ "dns://:53_response_per_rcode_BADNAME": 0,
+ "dns://:53_response_per_rcode_BADSIG": 0,
+ "dns://:53_response_per_rcode_BADTIME": 0,
+ "dns://:53_response_per_rcode_BADTRUNC": 0,
+ "dns://:53_response_per_rcode_FORMERR": 0,
+ "dns://:53_response_per_rcode_NOERROR": 6,
+ "dns://:53_response_per_rcode_NOTAUTH": 0,
+ "dns://:53_response_per_rcode_NOTIMP": 0,
+ "dns://:53_response_per_rcode_NOTZONE": 0,
+ "dns://:53_response_per_rcode_NXDOMAIN": 0,
+ "dns://:53_response_per_rcode_NXRRSET": 0,
+ "dns://:53_response_per_rcode_REFUSED": 0,
+ "dns://:53_response_per_rcode_SERVFAIL": 9,
+ "dns://:53_response_per_rcode_YXDOMAIN": 0,
+ "dns://:53_response_per_rcode_YXRRSET": 0,
+ "dns://:53_response_per_rcode_other": 0,
+ "dns://:53_response_total": 15,
+ "dns://:54_request_per_ip_family_v4": 25,
+ "dns://:54_request_per_ip_family_v6": 0,
+ "dns://:54_request_per_proto_tcp": 0,
+ "dns://:54_request_per_proto_udp": 25,
+ "dns://:54_request_per_status_dropped": 12,
+ "dns://:54_request_per_status_processed": 13,
+ "dns://:54_request_per_type_A": 8,
+ "dns://:54_request_per_type_AAAA": 8,
+ "dns://:54_request_per_type_ANY": 0,
+ "dns://:54_request_per_type_CNAME": 0,
+ "dns://:54_request_per_type_DNSKEY": 0,
+ "dns://:54_request_per_type_DS": 0,
+ "dns://:54_request_per_type_IXFR": 0,
+ "dns://:54_request_per_type_MX": 9,
+ "dns://:54_request_per_type_NS": 0,
+ "dns://:54_request_per_type_NSEC": 0,
+ "dns://:54_request_per_type_NSEC3": 0,
+ "dns://:54_request_per_type_PTR": 0,
+ "dns://:54_request_per_type_RRSIG": 0,
+ "dns://:54_request_per_type_SOA": 0,
+ "dns://:54_request_per_type_SRV": 0,
+ "dns://:54_request_per_type_TXT": 0,
+ "dns://:54_request_per_type_other": 0,
+ "dns://:54_request_total": 25,
+ "dns://:54_response_per_rcode_BADALG": 0,
+ "dns://:54_response_per_rcode_BADCOOKIE": 0,
+ "dns://:54_response_per_rcode_BADKEY": 0,
+ "dns://:54_response_per_rcode_BADMODE": 0,
+ "dns://:54_response_per_rcode_BADNAME": 0,
+ "dns://:54_response_per_rcode_BADSIG": 0,
+ "dns://:54_response_per_rcode_BADTIME": 0,
+ "dns://:54_response_per_rcode_BADTRUNC": 0,
+ "dns://:54_response_per_rcode_FORMERR": 0,
+ "dns://:54_response_per_rcode_NOERROR": 13,
+ "dns://:54_response_per_rcode_NOTAUTH": 0,
+ "dns://:54_response_per_rcode_NOTIMP": 0,
+ "dns://:54_response_per_rcode_NOTZONE": 0,
+ "dns://:54_response_per_rcode_NXDOMAIN": 0,
+ "dns://:54_response_per_rcode_NXRRSET": 0,
+ "dns://:54_response_per_rcode_REFUSED": 0,
+ "dns://:54_response_per_rcode_SERVFAIL": 12,
+ "dns://:54_response_per_rcode_YXDOMAIN": 0,
+ "dns://:54_response_per_rcode_YXRRSET": 0,
+ "dns://:54_response_per_rcode_other": 0,
+ "dns://:54_response_total": 25,
+ "dropped_request_per_ip_family_v4": 42,
+ "dropped_request_per_ip_family_v6": 0,
+ "dropped_request_per_proto_tcp": 0,
+ "dropped_request_per_proto_udp": 42,
+ "dropped_request_per_status_dropped": 0,
+ "dropped_request_per_status_processed": 0,
+ "dropped_request_per_type_A": 14,
+ "dropped_request_per_type_AAAA": 14,
+ "dropped_request_per_type_ANY": 0,
+ "dropped_request_per_type_CNAME": 0,
+ "dropped_request_per_type_DNSKEY": 0,
+ "dropped_request_per_type_DS": 0,
+ "dropped_request_per_type_IXFR": 0,
+ "dropped_request_per_type_MX": 14,
+ "dropped_request_per_type_NS": 0,
+ "dropped_request_per_type_NSEC": 0,
+ "dropped_request_per_type_NSEC3": 0,
+ "dropped_request_per_type_PTR": 0,
+ "dropped_request_per_type_RRSIG": 0,
+ "dropped_request_per_type_SOA": 0,
+ "dropped_request_per_type_SRV": 0,
+ "dropped_request_per_type_TXT": 0,
+ "dropped_request_per_type_other": 0,
+ "dropped_request_total": 42,
+ "dropped_response_per_rcode_BADALG": 0,
+ "dropped_response_per_rcode_BADCOOKIE": 0,
+ "dropped_response_per_rcode_BADKEY": 0,
+ "dropped_response_per_rcode_BADMODE": 0,
+ "dropped_response_per_rcode_BADNAME": 0,
+ "dropped_response_per_rcode_BADSIG": 0,
+ "dropped_response_per_rcode_BADTIME": 0,
+ "dropped_response_per_rcode_BADTRUNC": 0,
+ "dropped_response_per_rcode_FORMERR": 0,
+ "dropped_response_per_rcode_NOERROR": 0,
+ "dropped_response_per_rcode_NOTAUTH": 0,
+ "dropped_response_per_rcode_NOTIMP": 0,
+ "dropped_response_per_rcode_NOTZONE": 0,
+ "dropped_response_per_rcode_NXDOMAIN": 0,
+ "dropped_response_per_rcode_NXRRSET": 0,
+ "dropped_response_per_rcode_REFUSED": 21,
+ "dropped_response_per_rcode_SERVFAIL": 21,
+ "dropped_response_per_rcode_YXDOMAIN": 0,
+ "dropped_response_per_rcode_YXRRSET": 0,
+ "dropped_response_per_rcode_other": 0,
+ "dropped_response_total": 42,
+ "empty_request_per_ip_family_v4": 21,
+ "empty_request_per_ip_family_v6": 0,
+ "empty_request_per_proto_tcp": 0,
+ "empty_request_per_proto_udp": 21,
+ "empty_request_per_status_dropped": 21,
+ "empty_request_per_status_processed": 0,
+ "empty_request_per_type_A": 7,
+ "empty_request_per_type_AAAA": 7,
+ "empty_request_per_type_ANY": 0,
+ "empty_request_per_type_CNAME": 0,
+ "empty_request_per_type_DNSKEY": 0,
+ "empty_request_per_type_DS": 0,
+ "empty_request_per_type_IXFR": 0,
+ "empty_request_per_type_MX": 7,
+ "empty_request_per_type_NS": 0,
+ "empty_request_per_type_NSEC": 0,
+ "empty_request_per_type_NSEC3": 0,
+ "empty_request_per_type_PTR": 0,
+ "empty_request_per_type_RRSIG": 0,
+ "empty_request_per_type_SOA": 0,
+ "empty_request_per_type_SRV": 0,
+ "empty_request_per_type_TXT": 0,
+ "empty_request_per_type_other": 0,
+ "empty_request_total": 21,
+ "empty_response_per_rcode_BADALG": 0,
+ "empty_response_per_rcode_BADCOOKIE": 0,
+ "empty_response_per_rcode_BADKEY": 0,
+ "empty_response_per_rcode_BADMODE": 0,
+ "empty_response_per_rcode_BADNAME": 0,
+ "empty_response_per_rcode_BADSIG": 0,
+ "empty_response_per_rcode_BADTIME": 0,
+ "empty_response_per_rcode_BADTRUNC": 0,
+ "empty_response_per_rcode_FORMERR": 0,
+ "empty_response_per_rcode_NOERROR": 0,
+ "empty_response_per_rcode_NOTAUTH": 0,
+ "empty_response_per_rcode_NOTIMP": 0,
+ "empty_response_per_rcode_NOTZONE": 0,
+ "empty_response_per_rcode_NXDOMAIN": 0,
+ "empty_response_per_rcode_NXRRSET": 0,
+ "empty_response_per_rcode_REFUSED": 21,
+ "empty_response_per_rcode_SERVFAIL": 0,
+ "empty_response_per_rcode_YXDOMAIN": 0,
+ "empty_response_per_rcode_YXRRSET": 0,
+ "empty_response_per_rcode_other": 0,
+ "empty_response_total": 21,
+ "no_matching_zone_dropped_total": 21,
+ "panic_total": 0,
+ "request_per_ip_family_v4": 61,
+ "request_per_ip_family_v6": 0,
+ "request_per_proto_tcp": 0,
+ "request_per_proto_udp": 61,
+ "request_per_status_dropped": 42,
+ "request_per_status_processed": 19,
+ "request_per_type_A": 20,
+ "request_per_type_AAAA": 20,
+ "request_per_type_ANY": 0,
+ "request_per_type_CNAME": 0,
+ "request_per_type_DNSKEY": 0,
+ "request_per_type_DS": 0,
+ "request_per_type_IXFR": 0,
+ "request_per_type_MX": 21,
+ "request_per_type_NS": 0,
+ "request_per_type_NSEC": 0,
+ "request_per_type_NSEC3": 0,
+ "request_per_type_PTR": 0,
+ "request_per_type_RRSIG": 0,
+ "request_per_type_SOA": 0,
+ "request_per_type_SRV": 0,
+ "request_per_type_TXT": 0,
+ "request_per_type_other": 0,
+ "request_total": 61,
+ "response_per_rcode_BADALG": 0,
+ "response_per_rcode_BADCOOKIE": 0,
+ "response_per_rcode_BADKEY": 0,
+ "response_per_rcode_BADMODE": 0,
+ "response_per_rcode_BADNAME": 0,
+ "response_per_rcode_BADSIG": 0,
+ "response_per_rcode_BADTIME": 0,
+ "response_per_rcode_BADTRUNC": 0,
+ "response_per_rcode_FORMERR": 0,
+ "response_per_rcode_NOERROR": 19,
+ "response_per_rcode_NOTAUTH": 0,
+ "response_per_rcode_NOTIMP": 0,
+ "response_per_rcode_NOTZONE": 0,
+ "response_per_rcode_NXDOMAIN": 0,
+ "response_per_rcode_NXRRSET": 0,
+ "response_per_rcode_REFUSED": 21,
+ "response_per_rcode_SERVFAIL": 21,
+ "response_per_rcode_YXDOMAIN": 0,
+ "response_per_rcode_YXRRSET": 0,
+ "response_per_rcode_other": 0,
+ "response_total": 61,
+ "ya.ru._request_per_ip_family_v4": 21,
+ "ya.ru._request_per_ip_family_v6": 0,
+ "ya.ru._request_per_proto_tcp": 0,
+ "ya.ru._request_per_proto_udp": 21,
+ "ya.ru._request_per_status_dropped": 0,
+ "ya.ru._request_per_status_processed": 0,
+ "ya.ru._request_per_type_A": 7,
+ "ya.ru._request_per_type_AAAA": 7,
+ "ya.ru._request_per_type_ANY": 0,
+ "ya.ru._request_per_type_CNAME": 0,
+ "ya.ru._request_per_type_DNSKEY": 0,
+ "ya.ru._request_per_type_DS": 0,
+ "ya.ru._request_per_type_IXFR": 0,
+ "ya.ru._request_per_type_MX": 7,
+ "ya.ru._request_per_type_NS": 0,
+ "ya.ru._request_per_type_NSEC": 0,
+ "ya.ru._request_per_type_NSEC3": 0,
+ "ya.ru._request_per_type_PTR": 0,
+ "ya.ru._request_per_type_RRSIG": 0,
+ "ya.ru._request_per_type_SOA": 0,
+ "ya.ru._request_per_type_SRV": 0,
+ "ya.ru._request_per_type_TXT": 0,
+ "ya.ru._request_per_type_other": 0,
+ "ya.ru._request_total": 21,
+ "ya.ru._response_per_rcode_BADALG": 0,
+ "ya.ru._response_per_rcode_BADCOOKIE": 0,
+ "ya.ru._response_per_rcode_BADKEY": 0,
+ "ya.ru._response_per_rcode_BADMODE": 0,
+ "ya.ru._response_per_rcode_BADNAME": 0,
+ "ya.ru._response_per_rcode_BADSIG": 0,
+ "ya.ru._response_per_rcode_BADTIME": 0,
+ "ya.ru._response_per_rcode_BADTRUNC": 0,
+ "ya.ru._response_per_rcode_FORMERR": 0,
+ "ya.ru._response_per_rcode_NOERROR": 0,
+ "ya.ru._response_per_rcode_NOTAUTH": 0,
+ "ya.ru._response_per_rcode_NOTIMP": 0,
+ "ya.ru._response_per_rcode_NOTZONE": 0,
+ "ya.ru._response_per_rcode_NXDOMAIN": 0,
+ "ya.ru._response_per_rcode_NXRRSET": 0,
+ "ya.ru._response_per_rcode_REFUSED": 0,
+ "ya.ru._response_per_rcode_SERVFAIL": 21,
+ "ya.ru._response_per_rcode_YXDOMAIN": 0,
+ "ya.ru._response_per_rcode_YXRRSET": 0,
+ "ya.ru._response_per_rcode_other": 0,
+ "ya.ru._response_total": 21,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+ })
+ }
+}
+
+func TestCoreDNS_CollectNoLoad(t *testing.T) {
+ tests := []struct {
+ name string
+ data []byte
+ }{
+ {"version 1.6.9", dataVer169NoLoad},
+ {"version 1.7.0", dataVer170NoLoad},
+ }
+ for _, testNoLoad := range tests {
+ t.Run(testNoLoad.name, func(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(testNoLoad.data)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ job.PerServerStats.Includes = []string{"glob:*"}
+ job.PerZoneStats.Includes = []string{"glob:*"}
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "no_matching_zone_dropped_total": 0,
+ "panic_total": 99,
+ "request_per_ip_family_v4": 0,
+ "request_per_ip_family_v6": 0,
+ "request_per_proto_tcp": 0,
+ "request_per_proto_udp": 0,
+ "request_per_status_dropped": 0,
+ "request_per_status_processed": 0,
+ "request_per_type_A": 0,
+ "request_per_type_AAAA": 0,
+ "request_per_type_ANY": 0,
+ "request_per_type_CNAME": 0,
+ "request_per_type_DNSKEY": 0,
+ "request_per_type_DS": 0,
+ "request_per_type_IXFR": 0,
+ "request_per_type_MX": 0,
+ "request_per_type_NS": 0,
+ "request_per_type_NSEC": 0,
+ "request_per_type_NSEC3": 0,
+ "request_per_type_PTR": 0,
+ "request_per_type_RRSIG": 0,
+ "request_per_type_SOA": 0,
+ "request_per_type_SRV": 0,
+ "request_per_type_TXT": 0,
+ "request_per_type_other": 0,
+ "request_total": 0,
+ "response_per_rcode_BADALG": 0,
+ "response_per_rcode_BADCOOKIE": 0,
+ "response_per_rcode_BADKEY": 0,
+ "response_per_rcode_BADMODE": 0,
+ "response_per_rcode_BADNAME": 0,
+ "response_per_rcode_BADSIG": 0,
+ "response_per_rcode_BADTIME": 0,
+ "response_per_rcode_BADTRUNC": 0,
+ "response_per_rcode_FORMERR": 0,
+ "response_per_rcode_NOERROR": 0,
+ "response_per_rcode_NOTAUTH": 0,
+ "response_per_rcode_NOTIMP": 0,
+ "response_per_rcode_NOTZONE": 0,
+ "response_per_rcode_NXDOMAIN": 0,
+ "response_per_rcode_NXRRSET": 0,
+ "response_per_rcode_REFUSED": 0,
+ "response_per_rcode_SERVFAIL": 0,
+ "response_per_rcode_YXDOMAIN": 0,
+ "response_per_rcode_YXRRSET": 0,
+ "response_per_rcode_other": 0,
+ "response_total": 0,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+ })
+ }
+
+}
+
+func TestCoreDNS_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestCoreDNS_404(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestCoreDNS_CollectNoVersion(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataNoLoadNoVersion)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ job.PerServerStats.Includes = []string{"glob:*"}
+ job.PerZoneStats.Includes = []string{"glob:*"}
+ require.NoError(t, job.Init())
+ require.Error(t, job.Check())
+
+ assert.Nil(t, job.Collect())
+}
diff --git a/src/go/plugin/go.d/modules/coredns/init.go b/src/go/plugin/go.d/modules/coredns/init.go
new file mode 100644
index 000000000..e2b888bb6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/init.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package coredns
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (cd *CoreDNS) validateConfig() error {
+ if cd.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (cd *CoreDNS) initPerServerMatcher() (matcher.Matcher, error) {
+ if cd.PerServerStats.Empty() {
+ return nil, nil
+ }
+ return cd.PerServerStats.Parse()
+}
+
+func (cd *CoreDNS) initPerZoneMatcher() (matcher.Matcher, error) {
+ if cd.PerZoneStats.Empty() {
+ return nil, nil
+ }
+ return cd.PerZoneStats.Parse()
+}
+
+func (cd *CoreDNS) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(cd.Client)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.New(client, cd.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/coredns/integrations/coredns.md b/src/go/plugin/go.d/modules/coredns/integrations/coredns.md
new file mode 100644
index 000000000..549e2d8d9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/integrations/coredns.md
@@ -0,0 +1,329 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/coredns/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/coredns/metadata.yaml"
+sidebar_label: "CoreDNS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# CoreDNS
+
+
+<img src="https://netdata.cloud/img/coredns.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: coredns
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors CoreDNS instances.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per CoreDNS instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| coredns.dns_request_count_total | requests | requests/s |
+| coredns.dns_responses_count_total | responses | responses/s |
+| coredns.dns_request_count_total_per_status | processed, dropped | requests/s |
+| coredns.dns_no_matching_zone_dropped_total | dropped | requests/s |
+| coredns.dns_panic_count_total | panics | panics/s |
+| coredns.dns_requests_count_total_per_proto | udp, tcp | requests/s |
+| coredns.dns_requests_count_total_per_ip_family | v4, v6 | requests/s |
+| coredns.dns_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |
+| coredns.dns_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |
+
+### Per server
+
+These metrics refer to the DNS server.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| server_name | Server name. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| coredns.server_dns_request_count_total | requests | requests/s |
+| coredns.server_dns_responses_count_total | responses | responses/s |
+| coredns.server_request_count_total_per_status | processed, dropped | requests/s |
+| coredns.server_requests_count_total_per_proto | udp, tcp | requests/s |
+| coredns.server_requests_count_total_per_ip_family | v4, v6 | requests/s |
+| coredns.server_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |
+| coredns.server_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |
+
+### Per zone
+
+These metrics refer to the DNS zone.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| zone_name | Zone name. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| coredns.zone_dns_request_count_total | requests | requests/s |
+| coredns.zone_dns_responses_count_total | responses | responses/s |
+| coredns.zone_requests_count_total_per_proto | udp, tcp | requests/s |
+| coredns.zone_requests_count_total_per_ip_family | v4, v6 | requests/s |
+| coredns.zone_requests_count_total_per_per_type | a, aaaa, mx, soa, cname, ptr, txt, ns, ds, dnskey, rrsig, nsec, nsec3, ixfr, any, other | requests/s |
+| coredns.zone_responses_count_total_per_rcode | noerror, formerr, servfail, nxdomain, notimp, refused, yxdomain, yxrrset, nxrrset, notauth, notzone, badsig, badkey, badtime, badmode, badname, badalg, badtrunc, badcookie, other | responses/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/coredns.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/coredns.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>All options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:9153/metrics | yes |
+| per_server_stats | Server filter. | | no |
+| per_zone_stats | Zone filter. | | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| timeout | HTTP request timeout. | 2 | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client tls certificate. | | no |
+| tls_key | Client tls key. | | no |
+
+##### per_server_stats
+
+Metrics of servers matching the selector will be collected.
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).
+- Syntax:
+
+```yaml
+per_server_stats:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+```
+
+
+##### per_zone_stats
+
+Metrics of zones matching the selector will be collected.
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).
+- Syntax:
+
+```yaml
+per_zone_stats:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9153/metrics
+
+```
+</details>
+
+##### Basic HTTP auth
+
+Local server with basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9153/metrics
+ username: foo
+ password: bar
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9153/metrics
+
+ - name: remote
+ url: http://203.0.113.10:9153/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `coredns` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m coredns
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `coredns` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep coredns
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep coredns /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep coredns
+```
+
+
diff --git a/src/go/plugin/go.d/modules/coredns/metadata.yaml b/src/go/plugin/go.d/modules/coredns/metadata.yaml
new file mode 100644
index 000000000..e128ab546
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/metadata.yaml
@@ -0,0 +1,459 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-coredns
+ plugin_name: go.d.plugin
+ module_name: coredns
+ monitored_instance:
+ name: CoreDNS
+ link: https://coredns.io/
+ icon_filename: coredns.svg
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - coredns
+ - dns
+ - kubernetes
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors CoreDNS instances.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/coredns.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: All options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:9153/metrics
+ required: true
+ - name: per_server_stats
+ description: Server filter.
+ default_value: ""
+ required: false
+ detailed_description: |
+ Metrics of servers matching the selector will be collected.
+ - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+ - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).
+ - Syntax:
+
+ ```yaml
+ per_server_stats:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+ ```
+ - name: per_zone_stats
+ description: Zone filter.
+ default_value: ""
+ required: false
+ detailed_description: |
+ Metrics of zones matching the selector will be collected.
+ - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+ - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).
+ - Syntax:
+
+ ```yaml
+ per_zone_stats:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+ ```
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 2
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client tls certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client tls key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9153/metrics
+ - name: Basic HTTP auth
+ description: Local server with basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9153/metrics
+ username: foo
+ password: bar
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9153/metrics
+
+ - name: remote
+ url: http://203.0.113.10:9153/metrics
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: coredns.dns_request_count_total
+ description: Number Of DNS Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: coredns.dns_responses_count_total
+ description: Number Of DNS Responses
+ unit: responses/s
+ chart_type: line
+ dimensions:
+ - name: responses
+ - name: coredns.dns_request_count_total_per_status
+ description: Number Of Processed And Dropped DNS Requests
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: processed
+ - name: dropped
+ - name: coredns.dns_no_matching_zone_dropped_total
+ description: Number Of Dropped DNS Requests Because Of No Matching Zone
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: dropped
+ - name: coredns.dns_panic_count_total
+ description: Number Of Panics
+ unit: panics/s
+ chart_type: line
+ dimensions:
+ - name: panics
+ - name: coredns.dns_requests_count_total_per_proto
+ description: Number Of DNS Requests Per Transport Protocol
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: udp
+ - name: tcp
+ - name: coredns.dns_requests_count_total_per_ip_family
+ description: Number Of DNS Requests Per IP Family
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: v4
+ - name: v6
+ - name: coredns.dns_requests_count_total_per_per_type
+ description: Number Of DNS Requests Per Type
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a
+ - name: aaaa
+ - name: mx
+ - name: soa
+ - name: cname
+ - name: ptr
+ - name: txt
+ - name: ns
+ - name: ds
+ - name: dnskey
+ - name: rrsig
+ - name: nsec
+ - name: nsec3
+ - name: ixfr
+ - name: any
+ - name: other
+ - name: coredns.dns_responses_count_total_per_rcode
+ description: Number Of DNS Responses Per Rcode
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: noerror
+ - name: formerr
+ - name: servfail
+ - name: nxdomain
+ - name: notimp
+ - name: refused
+ - name: yxdomain
+ - name: yxrrset
+ - name: nxrrset
+ - name: notauth
+ - name: notzone
+ - name: badsig
+ - name: badkey
+ - name: badtime
+ - name: badmode
+ - name: badname
+ - name: badalg
+ - name: badtrunc
+ - name: badcookie
+ - name: other
+ - name: server
+ description: These metrics refer to the DNS server.
+ labels:
+ - name: server_name
+ description: Server name.
+ metrics:
+ - name: coredns.server_dns_request_count_total
+ description: Number Of DNS Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: coredns.server_dns_responses_count_total
+ description: Number Of DNS Responses
+ unit: responses/s
+ chart_type: line
+ dimensions:
+ - name: responses
+ - name: coredns.server_request_count_total_per_status
+ description: Number Of Processed And Dropped DNS Requests
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: processed
+ - name: dropped
+ - name: coredns.server_requests_count_total_per_proto
+ description: Number Of DNS Requests Per Transport Protocol
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: udp
+ - name: tcp
+ - name: coredns.server_requests_count_total_per_ip_family
+ description: Number Of DNS Requests Per IP Family
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: v4
+ - name: v6
+ - name: coredns.server_requests_count_total_per_per_type
+ description: Number Of DNS Requests Per Type
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a
+ - name: aaaa
+ - name: mx
+ - name: soa
+ - name: cname
+ - name: ptr
+ - name: txt
+ - name: ns
+ - name: ds
+ - name: dnskey
+ - name: rrsig
+ - name: nsec
+ - name: nsec3
+ - name: ixfr
+ - name: any
+ - name: other
+ - name: coredns.server_responses_count_total_per_rcode
+ description: Number Of DNS Responses Per Rcode
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: noerror
+ - name: formerr
+ - name: servfail
+ - name: nxdomain
+ - name: notimp
+ - name: refused
+ - name: yxdomain
+ - name: yxrrset
+ - name: nxrrset
+ - name: notauth
+ - name: notzone
+ - name: badsig
+ - name: badkey
+ - name: badtime
+ - name: badmode
+ - name: badname
+ - name: badalg
+ - name: badtrunc
+ - name: badcookie
+ - name: other
+ - name: zone
+ description: These metrics refer to the DNS zone.
+ labels:
+ - name: zone_name
+ description: Zone name.
+ metrics:
+ - name: coredns.zone_dns_request_count_total
+ description: Number Of DNS Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: coredns.zone_dns_responses_count_total
+ description: Number Of DNS Responses
+ unit: responses/s
+ chart_type: line
+ dimensions:
+ - name: responses
+ - name: coredns.zone_requests_count_total_per_proto
+ description: Number Of DNS Requests Per Transport Protocol
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: udp
+ - name: tcp
+ - name: coredns.zone_requests_count_total_per_ip_family
+ description: Number Of DNS Requests Per IP Family
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: v4
+ - name: v6
+ - name: coredns.zone_requests_count_total_per_per_type
+ description: Number Of DNS Requests Per Type
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a
+ - name: aaaa
+ - name: mx
+ - name: soa
+ - name: cname
+ - name: ptr
+ - name: txt
+ - name: ns
+ - name: ds
+ - name: dnskey
+ - name: rrsig
+ - name: nsec
+ - name: nsec3
+ - name: ixfr
+ - name: any
+ - name: other
+ - name: coredns.zone_responses_count_total_per_rcode
+ description: Number Of DNS Responses Per Rcode
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: noerror
+ - name: formerr
+ - name: servfail
+ - name: nxdomain
+ - name: notimp
+ - name: refused
+ - name: yxdomain
+ - name: yxrrset
+ - name: nxrrset
+ - name: notauth
+ - name: notzone
+ - name: badsig
+ - name: badkey
+ - name: badtime
+ - name: badmode
+ - name: badname
+ - name: badalg
+ - name: badtrunc
+ - name: badcookie
+ - name: other
diff --git a/src/go/plugin/go.d/modules/coredns/metrics.go b/src/go/plugin/go.d/modules/coredns/metrics.go
new file mode 100644
index 000000000..5929fdbf6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/metrics.go
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package coredns
+
+import (
+ mtx "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+)
+
+func newMetrics() *metrics {
+ mx := &metrics{}
+ mx.PerServer = make(map[string]*requestResponse)
+ mx.PerZone = make(map[string]*requestResponse)
+
+ return mx
+}
+
+type metrics struct {
+ Panic mtx.Gauge `stm:"panic_total"`
+ NoZoneDropped mtx.Gauge `stm:"no_matching_zone_dropped_total"`
+ Summary requestResponse `stm:""`
+ PerServer map[string]*requestResponse `stm:""`
+ PerZone map[string]*requestResponse `stm:""`
+}
+
+type requestResponse struct {
+ Request request `stm:"request"`
+ Response response `stm:"response"`
+}
+
+type request struct {
+ Total mtx.Gauge `stm:"total"`
+ PerStatus struct {
+ Processed mtx.Gauge `stm:"processed"`
+ Dropped mtx.Gauge `stm:"dropped"`
+ } `stm:"per_status"`
+ PerProto struct {
+ UDP mtx.Gauge `stm:"udp"`
+ TCP mtx.Gauge `stm:"tcp"`
+ } `stm:"per_proto"`
+ PerIPFamily struct {
+ IPv4 mtx.Gauge `stm:"v4"`
+ IPv6 mtx.Gauge `stm:"v6"`
+ } `stm:"per_ip_family"`
+ // https://github.com/coredns/coredns/blob/master/plugin/metrics/vars/report.go
+ PerType struct {
+ A mtx.Gauge `stm:"A"`
+ AAAA mtx.Gauge `stm:"AAAA"`
+ MX mtx.Gauge `stm:"MX"`
+ SOA mtx.Gauge `stm:"SOA"`
+ CNAME mtx.Gauge `stm:"CNAME"`
+ PTR mtx.Gauge `stm:"PTR"`
+ TXT mtx.Gauge `stm:"TXT"`
+ NS mtx.Gauge `stm:"NS"`
+ SRV mtx.Gauge `stm:"SRV"`
+ DS mtx.Gauge `stm:"DS"`
+ DNSKEY mtx.Gauge `stm:"DNSKEY"`
+ RRSIG mtx.Gauge `stm:"RRSIG"`
+ NSEC mtx.Gauge `stm:"NSEC"`
+ NSEC3 mtx.Gauge `stm:"NSEC3"`
+ IXFR mtx.Gauge `stm:"IXFR"`
+ ANY mtx.Gauge `stm:"ANY"`
+ Other mtx.Gauge `stm:"other"`
+ } `stm:"per_type"`
+ //Duration struct {
+ // LE000025 mtx.Gauge `stm:"0.00025"`
+ // LE00005 mtx.Gauge `stm:"0.0005"`
+ // LE0001 mtx.Gauge `stm:"0.001"`
+ // LE0002 mtx.Gauge `stm:"0.002"`
+ // LE0004 mtx.Gauge `stm:"0.004"`
+ // LE0008 mtx.Gauge `stm:"0.008"`
+ // LE0016 mtx.Gauge `stm:"0.016"`
+ // LE0032 mtx.Gauge `stm:"0.032"`
+ // LE0064 mtx.Gauge `stm:"0.064"`
+ // LE0128 mtx.Gauge `stm:"0.128"`
+ // LE0256 mtx.Gauge `stm:"0.256"`
+ // LE0512 mtx.Gauge `stm:"0.512"`
+ // LE1024 mtx.Gauge `stm:"1.024"`
+ // LE2048 mtx.Gauge `stm:"2.048"`
+ // LE4096 mtx.Gauge `stm:"4.096"`
+ // LE8192 mtx.Gauge `stm:"8.192"`
+ // LEInf mtx.Gauge `stm:"+Inf"`
+ //} `stm:"duration_seconds_bucket"`
+}
+
+// https://github.com/miekg/dns/blob/master/types.go
+// https://github.com/miekg/dns/blob/master/msg.go#L169
+type response struct {
+ Total mtx.Gauge `stm:"total"`
+ PerRcode struct {
+ NOERROR mtx.Gauge `stm:"NOERROR"`
+ FORMERR mtx.Gauge `stm:"FORMERR"`
+ SERVFAIL mtx.Gauge `stm:"SERVFAIL"`
+ NXDOMAIN mtx.Gauge `stm:"NXDOMAIN"`
+ NOTIMP mtx.Gauge `stm:"NOTIMP"`
+ REFUSED mtx.Gauge `stm:"REFUSED"`
+ YXDOMAIN mtx.Gauge `stm:"YXDOMAIN"`
+ YXRRSET mtx.Gauge `stm:"YXRRSET"`
+ NXRRSET mtx.Gauge `stm:"NXRRSET"`
+ NOTAUTH mtx.Gauge `stm:"NOTAUTH"`
+ NOTZONE mtx.Gauge `stm:"NOTZONE"`
+ BADSIG mtx.Gauge `stm:"BADSIG"`
+ BADKEY mtx.Gauge `stm:"BADKEY"`
+ BADTIME mtx.Gauge `stm:"BADTIME"`
+ BADMODE mtx.Gauge `stm:"BADMODE"`
+ BADNAME mtx.Gauge `stm:"BADNAME"`
+ BADALG mtx.Gauge `stm:"BADALG"`
+ BADTRUNC mtx.Gauge `stm:"BADTRUNC"`
+ BADCOOKIE mtx.Gauge `stm:"BADCOOKIE"`
+ Other mtx.Gauge `stm:"other"`
+ } `stm:"per_rcode"`
+}
diff --git a/src/go/plugin/go.d/modules/coredns/testdata/config.json b/src/go/plugin/go.d/modules/coredns/testdata/config.json
new file mode 100644
index 000000000..2dc54a1a2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/testdata/config.json
@@ -0,0 +1,36 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "per_server_stats": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ },
+ "per_zone_stats": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/src/go/plugin/go.d/modules/coredns/testdata/config.yaml b/src/go/plugin/go.d/modules/coredns/testdata/config.yaml
new file mode 100644
index 000000000..be474167f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/testdata/config.yaml
@@ -0,0 +1,27 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+per_server_stats:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
+per_zone_stats:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/src/go/plugin/go.d/modules/coredns/testdata/no_version/no_load.txt b/src/go/plugin/go.d/modules/coredns/testdata/no_version/no_load.txt
new file mode 100644
index 000000000..f0de841f0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/testdata/no_version/no_load.txt
@@ -0,0 +1,6 @@
+# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built.
+# TYPE coredns_build_info gauge
+coredns_build_info{goversion="go1.14.4",revision="f59c03d"} 1
+# HELP coredns_panics_total A metrics that counts the number of panics.
+# TYPE coredns_panics_total counter
+coredns_panics_total 99 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/coredns/testdata/version169/no_load.txt b/src/go/plugin/go.d/modules/coredns/testdata/version169/no_load.txt
new file mode 100644
index 000000000..8fee1a73c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/testdata/version169/no_load.txt
@@ -0,0 +1,6 @@
+# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built.
+# TYPE coredns_build_info gauge
+coredns_build_info{goversion="go1.14.1",revision="1766568",version="1.6.9"} 1
+# HELP coredns_panic_count_total A metrics that counts the number of panics.
+# TYPE coredns_panic_count_total counter
+coredns_panic_count_total 99 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/coredns/testdata/version169/some_load.txt b/src/go/plugin/go.d/modules/coredns/testdata/version169/some_load.txt
new file mode 100644
index 000000000..15c4a57ec
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/testdata/version169/some_load.txt
@@ -0,0 +1,180 @@
+# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built.
+# TYPE coredns_build_info gauge
+coredns_build_info{goversion="go1.14.1",revision="1766568",version="1.6.9"} 1
+# HELP coredns_panic_count_total A metrics that counts the number of panics.
+# TYPE coredns_panic_count_total counter
+coredns_panic_count_total 0
+# HELP coredns_dns_request_count_total Counter of DNS requests made per zone, protocol and family.
+# TYPE coredns_dns_request_count_total counter
+coredns_dns_request_count_total{family="1",proto="udp",server="",zone="dropped"} 21
+coredns_dns_request_count_total{family="1",proto="udp",server="dns://:53",zone="coredns.io."} 6
+coredns_dns_request_count_total{family="1",proto="udp",server="dns://:53",zone="dropped"} 9
+coredns_dns_request_count_total{family="1",proto="udp",server="dns://:53",zone="ya.ru."} 9
+coredns_dns_request_count_total{family="1",proto="udp",server="dns://:54",zone="coredns.io."} 13
+coredns_dns_request_count_total{family="1",proto="udp",server="dns://:54",zone="dropped"} 12
+coredns_dns_request_count_total{family="1",proto="udp",server="dns://:54",zone="ya.ru."} 12
+# HELP coredns_dns_request_type_count_total Counter of DNS requests per type, per zone.
+# TYPE coredns_dns_request_type_count_total counter
+coredns_dns_request_type_count_total{server="",type="A",zone="dropped"} 7
+coredns_dns_request_type_count_total{server="",type="AAAA",zone="dropped"} 7
+coredns_dns_request_type_count_total{server="",type="MX",zone="dropped"} 7
+coredns_dns_request_type_count_total{server="dns://:53",type="A",zone="coredns.io."} 2
+coredns_dns_request_type_count_total{server="dns://:53",type="A",zone="dropped"} 3
+coredns_dns_request_type_count_total{server="dns://:53",type="A",zone="ya.ru."} 3
+coredns_dns_request_type_count_total{server="dns://:53",type="AAAA",zone="coredns.io."} 2
+coredns_dns_request_type_count_total{server="dns://:53",type="AAAA",zone="dropped"} 3
+coredns_dns_request_type_count_total{server="dns://:53",type="AAAA",zone="ya.ru."} 3
+coredns_dns_request_type_count_total{server="dns://:53",type="MX",zone="coredns.io."} 2
+coredns_dns_request_type_count_total{server="dns://:53",type="MX",zone="dropped"} 3
+coredns_dns_request_type_count_total{server="dns://:53",type="MX",zone="ya.ru."} 3
+coredns_dns_request_type_count_total{server="dns://:54",type="A",zone="coredns.io."} 4
+coredns_dns_request_type_count_total{server="dns://:54",type="A",zone="dropped"} 4
+coredns_dns_request_type_count_total{server="dns://:54",type="A",zone="ya.ru."} 4
+coredns_dns_request_type_count_total{server="dns://:54",type="AAAA",zone="coredns.io."} 4
+coredns_dns_request_type_count_total{server="dns://:54",type="AAAA",zone="dropped"} 4
+coredns_dns_request_type_count_total{server="dns://:54",type="AAAA",zone="ya.ru."} 4
+coredns_dns_request_type_count_total{server="dns://:54",type="MX",zone="coredns.io."} 5
+coredns_dns_request_type_count_total{server="dns://:54",type="MX",zone="dropped"} 4
+coredns_dns_request_type_count_total{server="dns://:54",type="MX",zone="ya.ru."} 4
+# HELP coredns_dns_response_rcode_count_total Counter of response status codes.
+# TYPE coredns_dns_response_rcode_count_total counter
+coredns_dns_response_rcode_count_total{rcode="NOERROR",server="dns://:53",zone="coredns.io."} 6
+coredns_dns_response_rcode_count_total{rcode="NOERROR",server="dns://:54",zone="coredns.io."} 13
+coredns_dns_response_rcode_count_total{rcode="REFUSED",server="",zone="dropped"} 21
+coredns_dns_response_rcode_count_total{rcode="SERVFAIL",server="dns://:53",zone="dropped"} 9
+coredns_dns_response_rcode_count_total{rcode="SERVFAIL",server="dns://:53",zone="ya.ru."} 9
+coredns_dns_response_rcode_count_total{rcode="SERVFAIL",server="dns://:54",zone="dropped"} 12
+coredns_dns_response_rcode_count_total{rcode="SERVFAIL",server="dns://:54",zone="ya.ru."} 12
+# HELP coredns_dns_request_duration_seconds Histogram of the time (in seconds) each request took.
+# TYPE coredns_dns_request_duration_seconds histogram
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.00025"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.0005"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.001"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.002"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.004"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.008"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.016"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.032"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.064"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.128"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.256"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="0.512"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="1.024"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="2.048"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="4.096"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="8.192"} 21
+coredns_dns_request_duration_seconds_bucket{server="",zone="dropped",le="+Inf"} 21
+coredns_dns_request_duration_seconds_sum{server="",zone="dropped"} 0.00015171000000000005
+coredns_dns_request_duration_seconds_count{server="",zone="dropped"} 21
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.00025"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.0005"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.001"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.002"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.004"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.008"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.016"} 1
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.032"} 1
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.064"} 5
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.128"} 6
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.256"} 6
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="0.512"} 6
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="1.024"} 6
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="2.048"} 6
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="4.096"} 6
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="8.192"} 6
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="coredns.io.",le="+Inf"} 6
+coredns_dns_request_duration_seconds_sum{server="dns://:53",zone="coredns.io."} 0.278949832
+coredns_dns_request_duration_seconds_count{server="dns://:53",zone="coredns.io."} 6
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.00025"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.0005"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.001"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.002"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.004"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.008"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.016"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.032"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.064"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.128"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.256"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="0.512"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="1.024"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="2.048"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="4.096"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="8.192"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="dropped",le="+Inf"} 9
+coredns_dns_request_duration_seconds_sum{server="dns://:53",zone="dropped"} 7.657700000000001e-05
+coredns_dns_request_duration_seconds_count{server="dns://:53",zone="dropped"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.00025"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.0005"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.001"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.002"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.004"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.008"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.016"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.032"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.064"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.128"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.256"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="0.512"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="1.024"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="2.048"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="4.096"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="8.192"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:53",zone="ya.ru.",le="+Inf"} 9
+coredns_dns_request_duration_seconds_sum{server="dns://:53",zone="ya.ru."} 0.001103838
+coredns_dns_request_duration_seconds_count{server="dns://:53",zone="ya.ru."} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.00025"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.0005"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.001"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.002"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.004"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.008"} 0
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.016"} 9
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.032"} 10
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.064"} 13
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.128"} 13
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.256"} 13
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="0.512"} 13
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="1.024"} 13
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="2.048"} 13
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="4.096"} 13
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="8.192"} 13
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="coredns.io.",le="+Inf"} 13
+coredns_dns_request_duration_seconds_sum{server="dns://:54",zone="coredns.io."} 0.25558616300000003
+coredns_dns_request_duration_seconds_count{server="dns://:54",zone="coredns.io."} 13
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.00025"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.0005"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.001"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.002"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.004"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.008"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.016"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.032"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.064"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.128"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.256"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="0.512"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="1.024"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="2.048"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="4.096"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="8.192"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="dropped",le="+Inf"} 12
+coredns_dns_request_duration_seconds_sum{server="dns://:54",zone="dropped"} 9.260400000000001e-05
+coredns_dns_request_duration_seconds_count{server="dns://:54",zone="dropped"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.00025"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.0005"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.001"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.002"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.004"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.008"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.016"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.032"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.064"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.128"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.256"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="0.512"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="1.024"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="2.048"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="4.096"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="8.192"} 12
+coredns_dns_request_duration_seconds_bucket{server="dns://:54",zone="ya.ru.",le="+Inf"} 12 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/coredns/testdata/version170/no_load.txt b/src/go/plugin/go.d/modules/coredns/testdata/version170/no_load.txt
new file mode 100644
index 000000000..ba343ab57
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/testdata/version170/no_load.txt
@@ -0,0 +1,6 @@
+# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built.
+# TYPE coredns_build_info gauge
+coredns_build_info{goversion="go1.14.4",revision="f59c03d",version="1.7.0"} 1
+# HELP coredns_panics_total A metrics that counts the number of panics.
+# TYPE coredns_panics_total counter
+coredns_panics_total 99 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/coredns/testdata/version170/some_load.txt b/src/go/plugin/go.d/modules/coredns/testdata/version170/some_load.txt
new file mode 100644
index 000000000..34f0a9a22
--- /dev/null
+++ b/src/go/plugin/go.d/modules/coredns/testdata/version170/some_load.txt
@@ -0,0 +1,38 @@
+# HELP coredns_build_info A metric with a constant '1' value labeled by version, revision, and goversion from which CoreDNS was built.
+# TYPE coredns_build_info gauge
+coredns_build_info{goversion="go1.14.4",revision="f59c03d",version="1.7.0"} 1
+# HELP coredns_panics_total A metrics that counts the number of panics.
+# TYPE coredns_panics_total counter
+coredns_panics_total 0
+# HELP coredns_dns_requests_total Counter of DNS requests made per zone, protocol and family.
+# TYPE coredns_dns_requests_total counter
+coredns_dns_requests_total{family="1",proto="udp",server="",type="A",zone="dropped"} 7
+coredns_dns_requests_total{family="1",proto="udp",server="",type="AAAA",zone="dropped"} 7
+coredns_dns_requests_total{family="1",proto="udp",server="",type="MX",zone="dropped"} 7
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="A",zone="coredns.io."} 2
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="A",zone="dropped"} 3
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="A",zone="ya.ru."} 3
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="AAAA",zone="coredns.io."} 2
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="AAAA",zone="dropped"} 3
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="AAAA",zone="ya.ru."} 3
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="MX",zone="coredns.io."} 2
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="MX",zone="dropped"} 3
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:53",type="MX",zone="ya.ru."} 3
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="A",zone="coredns.io."} 4
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="A",zone="dropped"} 4
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="A",zone="ya.ru."} 4
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="AAAA",zone="coredns.io."} 4
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="AAAA",zone="dropped"} 4
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="AAAA",zone="ya.ru."} 4
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="MX",zone="coredns.io."} 5
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="MX",zone="dropped"} 4
+coredns_dns_requests_total{family="1",proto="udp",server="dns://:54",type="MX",zone="ya.ru."} 4
+# HELP coredns_dns_response_rcode_count_total Counter of response status codes.
+# TYPE coredns_dns_response_rcode_count_total counter
+coredns_dns_responses_total{rcode="NOERROR",server="dns://:53",zone="coredns.io."} 6
+coredns_dns_responses_total{rcode="NOERROR",server="dns://:54",zone="coredns.io."} 13
+coredns_dns_responses_total{rcode="REFUSED",server="",zone="dropped"} 21
+coredns_dns_responses_total{rcode="SERVFAIL",server="dns://:53",zone="dropped"} 9
+coredns_dns_responses_total{rcode="SERVFAIL",server="dns://:53",zone="ya.ru."} 9
+coredns_dns_responses_total{rcode="SERVFAIL",server="dns://:54",zone="dropped"} 12
+coredns_dns_responses_total{rcode="SERVFAIL",server="dns://:54",zone="ya.ru."} 12 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/couchbase/README.md b/src/go/plugin/go.d/modules/couchbase/README.md
new file mode 120000
index 000000000..fa8d05e1c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/README.md
@@ -0,0 +1 @@
+integrations/couchbase.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/couchbase/charts.go b/src/go/plugin/go.d/modules/couchbase/charts.go
new file mode 100644
index 000000000..277b814ad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/charts.go
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchbase
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+type (
+ Charts = module.Charts
+ Chart = module.Chart
+ Dim = module.Dim
+)
+
+var bucketQuotaPercentUsedChart = Chart{
+ ID: "bucket_quota_percent_used",
+ Title: "Quota Percent Used Per Bucket",
+ Units: "%",
+ Fam: "buckets basic stats",
+ Ctx: "couchbase.bucket_quota_percent_used",
+}
+
+var bucketOpsPerSecChart = Chart{
+ ID: "bucket_ops_per_sec",
+ Title: "Operations Per Second Per Bucket",
+ Units: "ops/s",
+ Fam: "buckets basic stats",
+ Ctx: "couchbase.bucket_ops_per_sec",
+ Type: module.Stacked,
+}
+
+var bucketDiskFetchesChart = Chart{
+ ID: "bucket_disk_fetches",
+ Title: "Disk Fetches Per Bucket",
+ Units: "fetches",
+ Fam: "buckets basic stats",
+ Ctx: "couchbase.bucket_disk_fetches",
+ Type: module.Stacked,
+}
+
+var bucketItemCountChart = Chart{
+ ID: "bucket_item_count",
+ Title: "Item Count Per Bucket",
+ Units: "items",
+ Fam: "buckets basic stats",
+ Ctx: "couchbase.bucket_item_count",
+ Type: module.Stacked,
+}
+
+var bucketDiskUsedChart = Chart{
+ ID: "bucket_disk_used_stats",
+ Title: "Disk Used Per Bucket",
+ Units: "bytes",
+ Fam: "buckets basic stats",
+ Ctx: "couchbase.bucket_disk_used_stats",
+ Type: module.Stacked,
+}
+
+var bucketDataUsedChart = Chart{
+ ID: "bucket_data_used",
+ Title: "Data Used Per Bucket",
+ Units: "bytes",
+ Fam: "buckets basic stats",
+ Ctx: "couchbase.bucket_data_used",
+ Type: module.Stacked,
+}
+
+var bucketMemUsedChart = Chart{
+ ID: "bucket_mem_used",
+ Title: "Memory Used Per Bucket",
+ Units: "bytes",
+ Fam: "buckets basic stats",
+ Ctx: "couchbase.bucket_mem_used",
+ Type: module.Stacked,
+}
+
+var bucketVBActiveNumNonResidentChart = Chart{
+ ID: "bucket_vb_active_num_non_resident_stats",
+ Title: "Number Of Non-Resident Items Per Bucket",
+ Units: "items",
+ Fam: "buckets basic stats",
+ Ctx: "couchbase.bucket_vb_active_num_non_resident",
+ Type: module.Stacked,
+}
diff --git a/src/go/plugin/go.d/modules/couchbase/collect.go b/src/go/plugin/go.d/modules/couchbase/collect.go
new file mode 100644
index 000000000..6027ac918
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/collect.go
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchbase
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ urlPathBucketsStats = "/pools/default/buckets"
+
+ precision = 1000
+)
+
+func (cb *Couchbase) collect() (map[string]int64, error) {
+ ms, err := cb.scrapeCouchbase()
+ if err != nil {
+ return nil, fmt.Errorf("error on scraping couchbase: %v", err)
+ }
+ if ms.empty() {
+ return nil, nil
+ }
+
+ collected := make(map[string]int64)
+ cb.collectBasicStats(collected, ms)
+
+ return collected, nil
+}
+
+func (cb *Couchbase) collectBasicStats(collected map[string]int64, ms *cbMetrics) {
+ for _, b := range ms.BucketsBasicStats {
+
+ if !cb.collectedBuckets[b.Name] {
+ cb.collectedBuckets[b.Name] = true
+ cb.addBucketToCharts(b.Name)
+ }
+
+ bs := b.BasicStats
+ collected[indexDimID(b.Name, "quota_percent_used")] = int64(bs.QuotaPercentUsed * precision)
+ collected[indexDimID(b.Name, "ops_per_sec")] = int64(bs.OpsPerSec * precision)
+ collected[indexDimID(b.Name, "disk_fetches")] = int64(bs.DiskFetches)
+ collected[indexDimID(b.Name, "item_count")] = int64(bs.ItemCount)
+ collected[indexDimID(b.Name, "disk_used")] = int64(bs.DiskUsed)
+ collected[indexDimID(b.Name, "data_used")] = int64(bs.DataUsed)
+ collected[indexDimID(b.Name, "mem_used")] = int64(bs.MemUsed)
+ collected[indexDimID(b.Name, "vb_active_num_non_resident")] = int64(bs.VbActiveNumNonResident)
+ }
+}
+
+func (cb *Couchbase) addBucketToCharts(bucket string) {
+ cb.addDimToChart(bucketQuotaPercentUsedChart.ID, &module.Dim{
+ ID: indexDimID(bucket, "quota_percent_used"),
+ Name: bucket,
+ Div: precision,
+ })
+
+ cb.addDimToChart(bucketOpsPerSecChart.ID, &module.Dim{
+ ID: indexDimID(bucket, "ops_per_sec"),
+ Name: bucket,
+ Div: precision,
+ })
+
+ cb.addDimToChart(bucketDiskFetchesChart.ID, &module.Dim{
+ ID: indexDimID(bucket, "disk_fetches"),
+ Name: bucket,
+ })
+
+ cb.addDimToChart(bucketItemCountChart.ID, &module.Dim{
+ ID: indexDimID(bucket, "item_count"),
+ Name: bucket,
+ })
+
+ cb.addDimToChart(bucketDiskUsedChart.ID, &module.Dim{
+ ID: indexDimID(bucket, "disk_used"),
+ Name: bucket,
+ })
+
+ cb.addDimToChart(bucketDataUsedChart.ID, &module.Dim{
+ ID: indexDimID(bucket, "data_used"),
+ Name: bucket,
+ })
+
+ cb.addDimToChart(bucketMemUsedChart.ID, &module.Dim{
+ ID: indexDimID(bucket, "mem_used"),
+ Name: bucket,
+ })
+
+ cb.addDimToChart(bucketVBActiveNumNonResidentChart.ID, &module.Dim{
+ ID: indexDimID(bucket, "vb_active_num_non_resident"),
+ Name: bucket,
+ })
+}
+
+func (cb *Couchbase) addDimToChart(chartID string, dim *module.Dim) {
+ chart := cb.Charts().Get(chartID)
+ if chart == nil {
+ cb.Warningf("error on adding '%s' dimension: can not find '%s' chart", dim.ID, chartID)
+ return
+ }
+ if err := chart.AddDim(dim); err != nil {
+ cb.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (cb *Couchbase) scrapeCouchbase() (*cbMetrics, error) {
+ req, err := web.NewHTTPRequestWithPath(cb.Request, urlPathBucketsStats)
+ if err != nil {
+ return nil, err
+ }
+ req.URL.RawQuery = url.Values{"skipMap": []string{"true"}}.Encode()
+
+ ms := &cbMetrics{}
+ if err := cb.doOKDecode(req, &ms.BucketsBasicStats); err != nil {
+ return nil, err
+ }
+ return ms, nil
+}
+
+func (cb *Couchbase) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := cb.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func indexDimID(name, metric string) string {
+ return fmt.Sprintf("bucket_%s_%s", name, metric)
+}
diff --git a/src/go/plugin/go.d/modules/couchbase/config_schema.json b/src/go/plugin/go.d/modules/couchbase/config_schema.json
new file mode 100644
index 000000000..6ef455a97
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Couchbase collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Couchbase REST API.",
+ "type": "string",
+ "default": "http://127.0.0.1:8091",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/couchbase/couchbase.go b/src/go/plugin/go.d/modules/couchbase/couchbase.go
new file mode 100644
index 000000000..8ef880c2c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/couchbase.go
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchbase
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("couchbase", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Couchbase {
+ return &Couchbase{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8091",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ collectedBuckets: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Couchbase struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ httpClient *http.Client
+ charts *module.Charts
+
+ collectedBuckets map[string]bool
+}
+
+func (cb *Couchbase) Configuration() any {
+ return cb.Config
+}
+
+func (cb *Couchbase) Init() error {
+ err := cb.validateConfig()
+ if err != nil {
+ cb.Errorf("check configuration: %v", err)
+ return err
+ }
+
+ httpClient, err := cb.initHTTPClient()
+ if err != nil {
+ cb.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ cb.httpClient = httpClient
+
+ charts, err := cb.initCharts()
+ if err != nil {
+ cb.Errorf("init charts: %v", err)
+ return err
+ }
+ cb.charts = charts
+
+ return nil
+}
+
+func (cb *Couchbase) Check() error {
+ mx, err := cb.collect()
+ if err != nil {
+ cb.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (cb *Couchbase) Charts() *Charts {
+ return cb.charts
+}
+
+func (cb *Couchbase) Collect() map[string]int64 {
+ mx, err := cb.collect()
+ if err != nil {
+ cb.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (cb *Couchbase) Cleanup() {
+ if cb.httpClient == nil {
+ return
+ }
+ cb.httpClient.CloseIdleConnections()
+}
diff --git a/src/go/plugin/go.d/modules/couchbase/couchbase_test.go b/src/go/plugin/go.d/modules/couchbase/couchbase_test.go
new file mode 100644
index 000000000..b28c8e8fe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/couchbase_test.go
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchbase
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer660BucketsBasicStats, _ = os.ReadFile("testdata/6.6.0/buckets_basic_stats.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer660BucketsBasicStats": dataVer660BucketsBasicStats,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestCouchbase_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Couchbase{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestCouchbase_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset 'URL'": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "",
+ },
+ },
+ },
+ },
+ "fails on invalid URL": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "127.0.0.1:9090",
+ },
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ cb := New()
+ cb.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, cb.Init())
+ } else {
+ assert.NoError(t, cb.Init())
+ }
+ })
+ }
+}
+
+func TestCouchbase_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (cb *Couchbase, cleanup func())
+ wantFail bool
+ }{
+ "success on valid response v6.6.0": {
+ prepare: prepareCouchbaseV660,
+ },
+ "fails on response with invalid data": {
+ wantFail: true,
+ prepare: prepareCouchbaseInvalidData,
+ },
+ "fails on 404 response": {
+ wantFail: true,
+ prepare: prepareCouchbase404,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCouchbaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ cb, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, cb.Check())
+ } else {
+ assert.NoError(t, cb.Check())
+ }
+ })
+ }
+}
+
+func TestCouchbase_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (cb *Couchbase, cleanup func())
+ wantCollected map[string]int64
+ }{
+ "success on valid response v6.6.0": {
+ prepare: prepareCouchbaseV660,
+ wantCollected: map[string]int64{
+ "bucket_beer-sample_data_used": 13990431,
+ "bucket_beer-sample_disk_fetches": 1,
+ "bucket_beer-sample_disk_used": 27690472,
+ "bucket_beer-sample_item_count": 7303,
+ "bucket_beer-sample_mem_used": 34294872,
+ "bucket_beer-sample_ops_per_sec": 1100,
+ "bucket_beer-sample_quota_percent_used": 32706,
+ "bucket_beer-sample_vb_active_num_non_resident": 1,
+ "bucket_gamesim-sample_data_used": 5371804,
+ "bucket_gamesim-sample_disk_fetches": 1,
+ "bucket_gamesim-sample_disk_used": 13821793,
+ "bucket_gamesim-sample_item_count": 586,
+ "bucket_gamesim-sample_mem_used": 29586696,
+ "bucket_gamesim-sample_ops_per_sec": 1100,
+ "bucket_gamesim-sample_quota_percent_used": 28216,
+ "bucket_gamesim-sample_vb_active_num_non_resident": 1,
+ "bucket_travel-sample_data_used": 53865472,
+ "bucket_travel-sample_disk_fetches": 1,
+ "bucket_travel-sample_disk_used": 62244260,
+ "bucket_travel-sample_item_count": 31591,
+ "bucket_travel-sample_mem_used": 54318184,
+ "bucket_travel-sample_ops_per_sec": 1100,
+ "bucket_travel-sample_quota_percent_used": 51801,
+ "bucket_travel-sample_vb_active_num_non_resident": 1,
+ },
+ },
+ "fails on response with invalid data": {
+ prepare: prepareCouchbaseInvalidData,
+ },
+ "fails on 404 response": {
+ prepare: prepareCouchbase404,
+ },
+ "fails on connection refused": {
+ prepare: prepareCouchbaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ cb, cleanup := test.prepare(t)
+ defer cleanup()
+
+ collected := cb.Collect()
+
+ assert.Equal(t, test.wantCollected, collected)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, cb, collected)
+ })
+ }
+}
+
+func prepareCouchbaseV660(t *testing.T) (cb *Couchbase, cleanup func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer660BucketsBasicStats)
+ }))
+
+ cb = New()
+ cb.URL = srv.URL
+ require.NoError(t, cb.Init())
+
+ return cb, srv.Close
+}
+
+func prepareCouchbaseInvalidData(t *testing.T) (*Couchbase, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ cb := New()
+ cb.URL = srv.URL
+ require.NoError(t, cb.Init())
+
+ return cb, srv.Close
+}
+
+func prepareCouchbase404(t *testing.T) (*Couchbase, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ cb := New()
+ cb.URL = srv.URL
+ require.NoError(t, cb.Init())
+
+ return cb, srv.Close
+}
+
+func prepareCouchbaseConnectionRefused(t *testing.T) (*Couchbase, func()) {
+ t.Helper()
+ cb := New()
+ cb.URL = "http://127.0.0.1:38001"
+ require.NoError(t, cb.Init())
+
+ return cb, func() {}
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, cb *Couchbase, collected map[string]int64) {
+ for _, chart := range *cb.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/couchbase/init.go b/src/go/plugin/go.d/modules/couchbase/init.go
new file mode 100644
index 000000000..196e6998c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/init.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchbase
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (cb *Couchbase) initCharts() (*Charts, error) {
+ var bucketCharts = module.Charts{
+ bucketQuotaPercentUsedChart.Copy(),
+ bucketOpsPerSecChart.Copy(),
+ bucketDiskFetchesChart.Copy(),
+ bucketItemCountChart.Copy(),
+ bucketDiskUsedChart.Copy(),
+ bucketDataUsedChart.Copy(),
+ bucketMemUsedChart.Copy(),
+ bucketVBActiveNumNonResidentChart.Copy(),
+ }
+ return bucketCharts.Copy(), nil
+}
+
+func (cb *Couchbase) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(cb.Client)
+}
+
+func (cb *Couchbase) validateConfig() error {
+ if cb.URL == "" {
+ return errors.New("URL not set")
+ }
+ if _, err := web.NewHTTPRequest(cb.Request); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/couchbase/integrations/couchbase.md b/src/go/plugin/go.d/modules/couchbase/integrations/couchbase.md
new file mode 100644
index 000000000..b53dc940c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/integrations/couchbase.md
@@ -0,0 +1,247 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/couchbase/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/couchbase/metadata.yaml"
+sidebar_label: "Couchbase"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Couchbase
+
+
+<img src="https://netdata.cloud/img/couchbase.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: couchbase
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Couchbase servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Couchbase instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| couchbase.bucket_quota_percent_used | a dimension per bucket | percentage |
+| couchbase.bucket_ops_per_sec | a dimension per bucket | ops/s |
+| couchbase.bucket_disk_fetches | a dimension per bucket | fetches |
+| couchbase.bucket_item_count | a dimension per bucket | items |
+| couchbase.bucket_disk_used_stats | a dimension per bucket | bytes |
+| couchbase.bucket_data_used | a dimension per bucket | bytes |
+| couchbase.bucket_mem_used | a dimension per bucket | bytes |
+| couchbase.bucket_vb_active_num_non_resident | a dimension per bucket | items |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/couchbase.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/couchbase.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>All options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8091 | yes |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| timeout | HTTP request timeout. | 2 | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client tls certificate. | | no |
+| tls_key | Client tls key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8091
+
+```
+</details>
+
+##### Basic HTTP auth
+
+Local server with basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8091
+ username: foo
+ password: bar
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8091
+
+ - name: remote
+ url: http://203.0.113.0:8091
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `couchbase` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m couchbase
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `couchbase` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep couchbase
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep couchbase /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep couchbase
+```
+
+
diff --git a/src/go/plugin/go.d/modules/couchbase/metadata.yaml b/src/go/plugin/go.d/modules/couchbase/metadata.yaml
new file mode 100644
index 000000000..de21e924d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/metadata.yaml
@@ -0,0 +1,214 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-couchbase
+ plugin_name: go.d.plugin
+ module_name: couchbase
+ monitored_instance:
+ name: Couchbase
+ link: https://www.couchbase.com/
+ icon_filename: couchbase.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - couchbase
+ - databases
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Couchbase servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/couchbase.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: All options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8091
+ required: true
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 2
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client tls certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client tls key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8091
+ - name: Basic HTTP auth
+ description: Local server with basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8091
+ username: foo
+ password: bar
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8091
+
+ - name: remote
+ url: http://203.0.113.0:8091
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: couchbase.bucket_quota_percent_used
+ description: Quota Percent Used Per Bucket
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: a dimension per bucket
+ - name: couchbase.bucket_ops_per_sec
+ description: Operations Per Second Per Bucket
+ unit: ops/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: couchbase.bucket_disk_fetches
+ description: Disk Fetches Per Bucket
+ unit: fetches
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: couchbase.bucket_item_count
+ description: Item Count Per Bucket
+ unit: items
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: couchbase.bucket_disk_used_stats
+ description: Disk Used Per Bucket
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: couchbase.bucket_data_used
+ description: Data Used Per Bucket
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: couchbase.bucket_mem_used
+ description: Memory Used Per Bucket
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: couchbase.bucket_vb_active_num_non_resident
+ description: Number Of Non-Resident Items Per Bucket
+ unit: items
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
diff --git a/src/go/plugin/go.d/modules/couchbase/metrics.go b/src/go/plugin/go.d/modules/couchbase/metrics.go
new file mode 100644
index 000000000..c4f23304b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/metrics.go
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchbase
+
+type cbMetrics struct {
+ // https://developer.couchbase.com/resources/best-practice-guides/monitoring-guide.pdf
+ BucketsBasicStats []bucketsBasicStats
+}
+
+func (m cbMetrics) empty() bool {
+ switch {
+ case m.hasBucketsStats():
+ return false
+ }
+ return true
+}
+
+func (m cbMetrics) hasBucketsStats() bool { return len(m.BucketsBasicStats) > 0 }
+
+type bucketsBasicStats struct {
+ Name string `json:"name"`
+
+ BasicStats struct {
+ DataUsed float64 `json:"dataUsed"`
+ DiskFetches float64 `json:"diskFetches"`
+ ItemCount float64 `json:"itemCount"`
+ DiskUsed float64 `json:"diskUsed"`
+ MemUsed float64 `json:"memUsed"`
+ OpsPerSec float64 `json:"opsPerSec"`
+ QuotaPercentUsed float64 `json:"quotaPercentUsed"`
+ VbActiveNumNonResident float64 `json:"vbActiveNumNonResident"`
+ } `json:"basicStats"`
+}
diff --git a/src/go/plugin/go.d/modules/couchbase/testdata/6.6.0/buckets_basic_stats.json b/src/go/plugin/go.d/modules/couchbase/testdata/6.6.0/buckets_basic_stats.json
new file mode 100644
index 000000000..3749add79
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/testdata/6.6.0/buckets_basic_stats.json
@@ -0,0 +1,422 @@
+[
+ {
+ "name": "beer-sample",
+ "uuid": "bf10ab11911f1c065db5fd58c5fbc0b6",
+ "bucketType": "membase",
+ "authType": "sasl",
+ "uri": "/pools/default/buckets/beer-sample?bucket_uuid=bf10ab11911f1c065db5fd58c5fbc0b6",
+ "streamingUri": "/pools/default/bucketsStreaming/beer-sample?bucket_uuid=bf10ab11911f1c065db5fd58c5fbc0b6",
+ "localRandomKeyUri": "/pools/default/buckets/beer-sample/localRandomKey",
+ "controllers": {
+ "compactAll": "/pools/default/buckets/beer-sample/controller/compactBucket",
+ "compactDB": "/pools/default/buckets/beer-sample/controller/compactDatabases",
+ "purgeDeletes": "/pools/default/buckets/beer-sample/controller/unsafePurgeBucket",
+ "startRecovery": "/pools/default/buckets/beer-sample/controller/startRecovery"
+ },
+ "nodes": [
+ {
+ "couchApiBaseHTTPS": "https://172.17.0.2:18092/beer-sample%2Bbf10ab11911f1c065db5fd58c5fbc0b6",
+ "couchApiBase": "http://172.17.0.2:8092/beer-sample%2Bbf10ab11911f1c065db5fd58c5fbc0b6",
+ "systemStats": {
+ "cpu_utilization_rate": 15.21035598705502,
+ "cpu_stolen_rate": 0,
+ "swap_total": 0,
+ "swap_used": 0,
+ "mem_total": 33587437568,
+ "mem_free": 30532227072,
+ "mem_limit": 33587437568,
+ "cpu_cores_available": 6,
+ "allocstall": 0
+ },
+ "interestingStats": {
+ "cmd_get": 0,
+ "couch_docs_actual_disk_size": 102960477,
+ "couch_docs_data_size": 72439963,
+ "couch_spatial_data_size": 0,
+ "couch_spatial_disk_size": 0,
+ "couch_views_actual_disk_size": 796048,
+ "couch_views_data_size": 787744,
+ "curr_items": 39480,
+ "curr_items_tot": 39480,
+ "ep_bg_fetched": 0,
+ "get_hits": 0,
+ "mem_used": 118199752,
+ "ops": 0,
+ "vb_active_num_non_resident": 0,
+ "vb_replica_curr_items": 0
+ },
+ "uptime": "638",
+ "memoryTotal": 33587437568,
+ "memoryFree": 30532227072,
+ "mcdMemoryReserved": 25625,
+ "mcdMemoryAllocated": 25625,
+ "replication": 0,
+ "clusterMembership": "active",
+ "recoveryType": "none",
+ "status": "healthy",
+ "otpNode": "ns_1@cb.local",
+ "thisNode": true,
+ "hostname": "172.17.0.2:8091",
+ "nodeUUID": "da79fcb65d6ae1f8b4fdfa3ccb2e4500",
+ "clusterCompatibility": 393222,
+ "version": "6.6.0-7909-enterprise",
+ "os": "x86_64-unknown-linux-gnu",
+ "cpuCount": 6,
+ "ports": {
+ "direct": 11210,
+ "httpsCAPI": 18092,
+ "httpsMgmt": 18091,
+ "distTCP": 21100,
+ "distTLS": 21150
+ },
+ "services": [
+ "cbas",
+ "eventing",
+ "fts",
+ "index",
+ "kv",
+ "n1ql"
+ ],
+ "nodeEncryption": false,
+ "configuredHostname": "127.0.0.1:8091",
+ "addressFamily": "inet",
+ "externalListeners": [
+ {
+ "afamily": "inet",
+ "nodeEncryption": false
+ },
+ {
+ "afamily": "inet6",
+ "nodeEncryption": false
+ }
+ ]
+ }
+ ],
+ "stats": {
+ "uri": "/pools/default/buckets/beer-sample/stats",
+ "directoryURI": "/pools/default/buckets/beer-sample/statsDirectory",
+ "nodeStatsListURI": "/pools/default/buckets/beer-sample/nodes"
+ },
+ "nodeLocator": "vbucket",
+ "saslPassword": "47809efed0156c874b91bbdfeba89912",
+ "ddocs": {
+ "uri": "/pools/default/buckets/beer-sample/ddocs"
+ },
+ "replicaIndex": true,
+ "autoCompactionSettings": false,
+ "maxTTL": 0,
+ "compressionMode": "passive",
+ "replicaNumber": 1,
+ "threadsNumber": 3,
+ "quota": {
+ "ram": 104857600,
+ "rawRAM": 104857600
+ },
+ "basicStats": {
+ "quotaPercentUsed": 32.70613861083984,
+ "opsPerSec": 1.1,
+ "diskFetches": 1,
+ "itemCount": 7303,
+ "diskUsed": 27690472,
+ "dataUsed": 13990431,
+ "memUsed": 34294872,
+ "vbActiveNumNonResident": 1
+ },
+ "evictionPolicy": "valueOnly",
+ "durabilityMinLevel": "none",
+ "conflictResolutionType": "seqno",
+ "bucketCapabilitiesVer": "",
+ "bucketCapabilities": [
+ "durableWrite",
+ "tombstonedUserXAttrs",
+ "couchapi",
+ "dcp",
+ "cbhello",
+ "touch",
+ "cccp",
+ "xdcrCheckpointing",
+ "nodesExt",
+ "xattr"
+ ]
+ },
+ {
+ "name": "gamesim-sample",
+ "uuid": "23ff61363bc4df9af4eb9c2198fc74d3",
+ "bucketType": "membase",
+ "authType": "sasl",
+ "uri": "/pools/default/buckets/gamesim-sample?bucket_uuid=23ff61363bc4df9af4eb9c2198fc74d3",
+ "streamingUri": "/pools/default/bucketsStreaming/gamesim-sample?bucket_uuid=23ff61363bc4df9af4eb9c2198fc74d3",
+ "localRandomKeyUri": "/pools/default/buckets/gamesim-sample/localRandomKey",
+ "controllers": {
+ "compactAll": "/pools/default/buckets/gamesim-sample/controller/compactBucket",
+ "compactDB": "/pools/default/buckets/gamesim-sample/controller/compactDatabases",
+ "purgeDeletes": "/pools/default/buckets/gamesim-sample/controller/unsafePurgeBucket",
+ "startRecovery": "/pools/default/buckets/gamesim-sample/controller/startRecovery"
+ },
+ "nodes": [
+ {
+ "couchApiBaseHTTPS": "https://172.17.0.2:18092/gamesim-sample%2B23ff61363bc4df9af4eb9c2198fc74d3",
+ "couchApiBase": "http://172.17.0.2:8092/gamesim-sample%2B23ff61363bc4df9af4eb9c2198fc74d3",
+ "systemStats": {
+ "cpu_utilization_rate": 15.21035598705502,
+ "cpu_stolen_rate": 0,
+ "swap_total": 0,
+ "swap_used": 0,
+ "mem_total": 33587437568,
+ "mem_free": 30532227072,
+ "mem_limit": 33587437568,
+ "cpu_cores_available": 6,
+ "allocstall": 0
+ },
+ "interestingStats": {
+ "cmd_get": 0,
+ "couch_docs_actual_disk_size": 102960477,
+ "couch_docs_data_size": 72439963,
+ "couch_spatial_data_size": 0,
+ "couch_spatial_disk_size": 0,
+ "couch_views_actual_disk_size": 796048,
+ "couch_views_data_size": 787744,
+ "curr_items": 39480,
+ "curr_items_tot": 39480,
+ "ep_bg_fetched": 0,
+ "get_hits": 0,
+ "mem_used": 118199752,
+ "ops": 0,
+ "vb_active_num_non_resident": 0,
+ "vb_replica_curr_items": 0
+ },
+ "uptime": "638",
+ "memoryTotal": 33587437568,
+ "memoryFree": 30532227072,
+ "mcdMemoryReserved": 25625,
+ "mcdMemoryAllocated": 25625,
+ "replication": 0,
+ "clusterMembership": "active",
+ "recoveryType": "none",
+ "status": "healthy",
+ "otpNode": "ns_1@cb.local",
+ "thisNode": true,
+ "hostname": "172.17.0.2:8091",
+ "nodeUUID": "da79fcb65d6ae1f8b4fdfa3ccb2e4500",
+ "clusterCompatibility": 393222,
+ "version": "6.6.0-7909-enterprise",
+ "os": "x86_64-unknown-linux-gnu",
+ "cpuCount": 6,
+ "ports": {
+ "direct": 11210,
+ "httpsCAPI": 18092,
+ "httpsMgmt": 18091,
+ "distTCP": 21100,
+ "distTLS": 21150
+ },
+ "services": [
+ "cbas",
+ "eventing",
+ "fts",
+ "index",
+ "kv",
+ "n1ql"
+ ],
+ "nodeEncryption": false,
+ "configuredHostname": "127.0.0.1:8091",
+ "addressFamily": "inet",
+ "externalListeners": [
+ {
+ "afamily": "inet",
+ "nodeEncryption": false
+ },
+ {
+ "afamily": "inet6",
+ "nodeEncryption": false
+ }
+ ]
+ }
+ ],
+ "stats": {
+ "uri": "/pools/default/buckets/gamesim-sample/stats",
+ "directoryURI": "/pools/default/buckets/gamesim-sample/statsDirectory",
+ "nodeStatsListURI": "/pools/default/buckets/gamesim-sample/nodes"
+ },
+ "nodeLocator": "vbucket",
+ "saslPassword": "39cf71a1da3f298bed52d19973dce967",
+ "ddocs": {
+ "uri": "/pools/default/buckets/gamesim-sample/ddocs"
+ },
+ "replicaIndex": true,
+ "autoCompactionSettings": false,
+ "maxTTL": 0,
+ "compressionMode": "passive",
+ "replicaNumber": 1,
+ "threadsNumber": 3,
+ "quota": {
+ "ram": 104857600,
+ "rawRAM": 104857600
+ },
+ "basicStats": {
+ "quotaPercentUsed": 28.21607208251953,
+ "opsPerSec": 1.1,
+ "diskFetches": 1,
+ "itemCount": 586,
+ "diskUsed": 13821793,
+ "dataUsed": 5371804,
+ "memUsed": 29586696,
+ "vbActiveNumNonResident": 1
+ },
+ "evictionPolicy": "valueOnly",
+ "durabilityMinLevel": "none",
+ "conflictResolutionType": "seqno",
+ "bucketCapabilitiesVer": "",
+ "bucketCapabilities": [
+ "durableWrite",
+ "tombstonedUserXAttrs",
+ "couchapi",
+ "dcp",
+ "cbhello",
+ "touch",
+ "cccp",
+ "xdcrCheckpointing",
+ "nodesExt",
+ "xattr"
+ ]
+ },
+ {
+ "name": "travel-sample",
+ "uuid": "68a336f9ec0e0d2150d56298c896d0a9",
+ "bucketType": "membase",
+ "authType": "sasl",
+ "uri": "/pools/default/buckets/travel-sample?bucket_uuid=68a336f9ec0e0d2150d56298c896d0a9",
+ "streamingUri": "/pools/default/bucketsStreaming/travel-sample?bucket_uuid=68a336f9ec0e0d2150d56298c896d0a9",
+ "localRandomKeyUri": "/pools/default/buckets/travel-sample/localRandomKey",
+ "controllers": {
+ "compactAll": "/pools/default/buckets/travel-sample/controller/compactBucket",
+ "compactDB": "/pools/default/buckets/travel-sample/controller/compactDatabases",
+ "purgeDeletes": "/pools/default/buckets/travel-sample/controller/unsafePurgeBucket",
+ "startRecovery": "/pools/default/buckets/travel-sample/controller/startRecovery"
+ },
+ "nodes": [
+ {
+ "couchApiBaseHTTPS": "https://172.17.0.2:18092/travel-sample%2B68a336f9ec0e0d2150d56298c896d0a9",
+ "couchApiBase": "http://172.17.0.2:8092/travel-sample%2B68a336f9ec0e0d2150d56298c896d0a9",
+ "systemStats": {
+ "cpu_utilization_rate": 15.21035598705502,
+ "cpu_stolen_rate": 0,
+ "swap_total": 0,
+ "swap_used": 0,
+ "mem_total": 33587437568,
+ "mem_free": 30532227072,
+ "mem_limit": 33587437568,
+ "cpu_cores_available": 6,
+ "allocstall": 0
+ },
+ "interestingStats": {
+ "cmd_get": 0,
+ "couch_docs_actual_disk_size": 102960477,
+ "couch_docs_data_size": 72439963,
+ "couch_spatial_data_size": 0,
+ "couch_spatial_disk_size": 0,
+ "couch_views_actual_disk_size": 796048,
+ "couch_views_data_size": 787744,
+ "curr_items": 39480,
+ "curr_items_tot": 39480,
+ "ep_bg_fetched": 0,
+ "get_hits": 0,
+ "mem_used": 118199752,
+ "ops": 0,
+ "vb_active_num_non_resident": 0,
+ "vb_replica_curr_items": 0
+ },
+ "uptime": "638",
+ "memoryTotal": 33587437568,
+ "memoryFree": 30532227072,
+ "mcdMemoryReserved": 25625,
+ "mcdMemoryAllocated": 25625,
+ "replication": 0,
+ "clusterMembership": "active",
+ "recoveryType": "none",
+ "status": "healthy",
+ "otpNode": "ns_1@cb.local",
+ "thisNode": true,
+ "hostname": "172.17.0.2:8091",
+ "nodeUUID": "da79fcb65d6ae1f8b4fdfa3ccb2e4500",
+ "clusterCompatibility": 393222,
+ "version": "6.6.0-7909-enterprise",
+ "os": "x86_64-unknown-linux-gnu",
+ "cpuCount": 6,
+ "ports": {
+ "direct": 11210,
+ "httpsCAPI": 18092,
+ "httpsMgmt": 18091,
+ "distTCP": 21100,
+ "distTLS": 21150
+ },
+ "services": [
+ "cbas",
+ "eventing",
+ "fts",
+ "index",
+ "kv",
+ "n1ql"
+ ],
+ "nodeEncryption": false,
+ "configuredHostname": "127.0.0.1:8091",
+ "addressFamily": "inet",
+ "externalListeners": [
+ {
+ "afamily": "inet",
+ "nodeEncryption": false
+ },
+ {
+ "afamily": "inet6",
+ "nodeEncryption": false
+ }
+ ]
+ }
+ ],
+ "stats": {
+ "uri": "/pools/default/buckets/travel-sample/stats",
+ "directoryURI": "/pools/default/buckets/travel-sample/statsDirectory",
+ "nodeStatsListURI": "/pools/default/buckets/travel-sample/nodes"
+ },
+ "nodeLocator": "vbucket",
+ "saslPassword": "c6be6d9be723b8b1f8eac4edb84a06ed",
+ "ddocs": {
+ "uri": "/pools/default/buckets/travel-sample/ddocs"
+ },
+ "replicaIndex": true,
+ "autoCompactionSettings": false,
+ "maxTTL": 0,
+ "compressionMode": "passive",
+ "replicaNumber": 1,
+ "threadsNumber": 3,
+ "quota": {
+ "ram": 104857600,
+ "rawRAM": 104857600
+ },
+ "basicStats": {
+ "quotaPercentUsed": 51.80185699462891,
+ "opsPerSec": 1.1,
+ "diskFetches": 1,
+ "itemCount": 31591,
+ "diskUsed": 62244260,
+ "dataUsed": 53865472,
+ "memUsed": 54318184,
+ "vbActiveNumNonResident": 1
+ },
+ "evictionPolicy": "valueOnly",
+ "durabilityMinLevel": "none",
+ "conflictResolutionType": "seqno",
+ "bucketCapabilitiesVer": "",
+ "bucketCapabilities": [
+ "durableWrite",
+ "tombstonedUserXAttrs",
+ "couchapi",
+ "dcp",
+ "cbhello",
+ "touch",
+ "cccp",
+ "xdcrCheckpointing",
+ "nodesExt",
+ "xattr"
+ ]
+ }
+]
diff --git a/src/go/plugin/go.d/modules/couchbase/testdata/config.json b/src/go/plugin/go.d/modules/couchbase/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/couchbase/testdata/config.yaml b/src/go/plugin/go.d/modules/couchbase/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchbase/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/couchdb/README.md b/src/go/plugin/go.d/modules/couchdb/README.md
new file mode 120000
index 000000000..14cff4d36
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/README.md
@@ -0,0 +1 @@
+integrations/couchdb.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/couchdb/charts.go b/src/go/plugin/go.d/modules/couchdb/charts.go
new file mode 100644
index 000000000..3d84471d2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/charts.go
@@ -0,0 +1,228 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchdb
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+type (
+ Charts = module.Charts
+ Dims = module.Dims
+ Vars = module.Vars
+)
+
+var dbActivityCharts = Charts{
+ {
+ ID: "activity",
+ Title: "Overall Activity",
+ Units: "requests/s",
+ Fam: "dbactivity",
+ Ctx: "couchdb.activity",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "couchdb_database_reads", Name: "DB reads", Algo: module.Incremental},
+ {ID: "couchdb_database_writes", Name: "DB writes", Algo: module.Incremental},
+ {ID: "couchdb_httpd_view_reads", Name: "View reads", Algo: module.Incremental},
+ },
+ },
+}
+
+var httpTrafficBreakdownCharts = Charts{
+ {
+ ID: "request_methods",
+ Title: "HTTP request methods",
+ Units: "requests/s",
+ Fam: "httptraffic",
+ Ctx: "couchdb.request_methods",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "couchdb_httpd_request_methods_COPY", Name: "COPY", Algo: module.Incremental},
+ {ID: "couchdb_httpd_request_methods_DELETE", Name: "DELETE", Algo: module.Incremental},
+ {ID: "couchdb_httpd_request_methods_GET", Name: "GET", Algo: module.Incremental},
+ {ID: "couchdb_httpd_request_methods_HEAD", Name: "HEAD", Algo: module.Incremental},
+ {ID: "couchdb_httpd_request_methods_OPTIONS", Name: "OPTIONS", Algo: module.Incremental},
+ {ID: "couchdb_httpd_request_methods_POST", Name: "POST", Algo: module.Incremental},
+ {ID: "couchdb_httpd_request_methods_PUT", Name: "PUT", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "response_codes",
+ Title: "HTTP response status codes",
+ Units: "responses/s",
+ Fam: "httptraffic",
+ Ctx: "couchdb.response_codes",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "couchdb_httpd_status_codes_200", Name: "200 OK", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_201", Name: "201 Created", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_202", Name: "202 Accepted", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_204", Name: "204 No Content", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_206", Name: "206 Partial Content", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_301", Name: "301 Moved Permanently", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_302", Name: "302 Found", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_304", Name: "304 Not Modified", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_400", Name: "400 Bad Request", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_401", Name: "401 Unauthorized", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_403", Name: "403 Forbidden", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_404", Name: "404 Not Found", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_406", Name: "406 Not Acceptable", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_409", Name: "409 Conflict", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_412", Name: "412 Precondition Failed", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_413", Name: "413 Request Entity Too Long", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_414", Name: "414 Request URI Too Long", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_415", Name: "415 Unsupported Media Type", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_416", Name: "416 Requested Range Not Satisfiable", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_417", Name: "417 Expectation Failed", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_500", Name: "500 Internal Server Error", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_501", Name: "501 Not Implemented", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_503", Name: "503 Service Unavailable", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "response_code_classes",
+ Title: "HTTP response status code classes",
+ Units: "responses/s",
+ Fam: "httptraffic",
+ Ctx: "couchdb.response_code_classes",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "couchdb_httpd_status_codes_2xx", Name: "2xx Success", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_3xx", Name: "3xx Redirection", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_4xx", Name: "4xx Client error", Algo: module.Incremental},
+ {ID: "couchdb_httpd_status_codes_5xx", Name: "5xx Server error", Algo: module.Incremental},
+ },
+ },
+}
+
+var serverOperationsCharts = Charts{
+ {
+ ID: "active_tasks",
+ Title: "Active task breakdown",
+ Units: "tasks",
+ Fam: "ops",
+ Ctx: "couchdb.active_tasks",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "active_tasks_indexer", Name: "Indexer"},
+ {ID: "active_tasks_database_compaction", Name: "DB Compaction"},
+ {ID: "active_tasks_replication", Name: "Replication"},
+ {ID: "active_tasks_view_compaction", Name: "View Compaction"},
+ },
+ },
+ {
+ ID: "replicator_jobs",
+ Title: "Replicator job breakdown",
+ Units: "jobs",
+ Fam: "ops",
+ Ctx: "couchdb.replicator_jobs",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "couch_replicator_jobs_running", Name: "Running"},
+ {ID: "couch_replicator_jobs_pending", Name: "Pending"},
+ {ID: "couch_replicator_jobs_crashed", Name: "Crashed"},
+ {ID: "internal_replication_jobs", Name: "Internal replication jobs"},
+ },
+ },
+ {
+ ID: "open_files",
+ Title: "Open files",
+ Units: "files",
+ Fam: "ops",
+ Ctx: "couchdb.open_files",
+ Dims: Dims{
+ {ID: "couchdb_open_os_files", Name: "# files"},
+ },
+ },
+}
+
+var erlangStatisticsCharts = Charts{
+ {
+ ID: "erlang_memory",
+ Title: "Erlang VM memory usage",
+ Units: "B",
+ Fam: "erlang",
+ Ctx: "couchdb.erlang_vm_memory",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "memory_atom", Name: "atom"},
+ {ID: "memory_binary", Name: "binaries"},
+ {ID: "memory_code", Name: "code"},
+ {ID: "memory_ets", Name: "ets"},
+ {ID: "memory_processes", Name: "procs"},
+ {ID: "memory_other", Name: "other"},
+ },
+ },
+ {
+ ID: "erlang_proc_counts",
+ Title: "Process counts",
+ Units: "processes",
+ Fam: "erlang",
+ Ctx: "couchdb.proccounts",
+ Dims: Dims{
+ {ID: "os_proc_count", Name: "OS procs"},
+ {ID: "process_count", Name: "erl procs"},
+ },
+ },
+ {
+ ID: "erlang_peak_msg_queue",
+ Title: "Peak message queue size",
+ Units: "messages",
+ Fam: "erlang",
+ Ctx: "couchdb.peakmsgqueue",
+ Dims: Dims{
+ {ID: "peak_msg_queue", Name: "peak size"},
+ },
+ },
+ {
+ ID: "erlang_reductions",
+ Title: "Erlang reductions",
+ Units: "reductions",
+ Fam: "erlang",
+ Ctx: "couchdb.reductions",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "reductions", Name: "reductions", Algo: module.Incremental},
+ },
+ },
+}
+
+var (
+ dbSpecificCharts = Charts{
+ {
+ ID: "db_sizes_file",
+ Title: "Database sizes (file)",
+ Units: "KiB",
+ Fam: "perdbstats",
+ Ctx: "couchdb.db_sizes_file",
+ },
+ {
+ ID: "db_sizes_external",
+ Title: "Database sizes (external)",
+ Units: "KiB",
+ Fam: "perdbstats",
+ Ctx: "couchdb.db_sizes_external",
+ },
+ {
+ ID: "db_sizes_active",
+ Title: "Database sizes (active)",
+ Units: "KiB",
+ Fam: "perdbstats",
+ Ctx: "couchdb.db_sizes_active",
+ },
+ {
+ ID: "db_doc_counts",
+ Title: "Database # of docs",
+ Units: "docs",
+ Fam: "perdbstats",
+ Ctx: "couchdb.db_doc_count",
+ },
+ {
+ ID: "db_doc_del_counts",
+ Title: "Database # of deleted docs",
+ Units: "docs",
+ Fam: "perdbstats",
+ Ctx: "couchdb.db_doc_del_count",
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/couchdb/collect.go b/src/go/plugin/go.d/modules/couchdb/collect.go
new file mode 100644
index 000000000..21b38fb3a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/collect.go
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchdb
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net/http"
+ "strings"
+ "sync"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ urlPathActiveTasks = "/_active_tasks"
+ urlPathOverviewStats = "/_node/%s/_stats"
+ urlPathSystemStats = "/_node/%s/_system"
+ urlPathDatabases = "/_dbs_info"
+
+ httpStatusCodePrefix = "couchdb_httpd_status_codes_"
+ httpStatusCodePrefixLen = len(httpStatusCodePrefix)
+)
+
+func (cdb *CouchDB) collect() (map[string]int64, error) {
+ ms := cdb.scrapeCouchDB()
+ if ms.empty() {
+ return nil, nil
+ }
+
+ collected := make(map[string]int64)
+ cdb.collectNodeStats(collected, ms)
+ cdb.collectSystemStats(collected, ms)
+ cdb.collectActiveTasks(collected, ms)
+ cdb.collectDBStats(collected, ms)
+
+ return collected, nil
+}
+
+func (cdb *CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) {
+ if !ms.hasNodeStats() {
+ return
+ }
+
+ for metric, value := range stm.ToMap(ms.NodeStats) {
+ collected[metric] = value
+ if strings.HasPrefix(metric, httpStatusCodePrefix) {
+ code := metric[httpStatusCodePrefixLen:]
+ collected["couchdb_httpd_status_codes_"+string(code[0])+"xx"] += value
+ }
+ }
+}
+
+func (cdb *CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) {
+ if !ms.hasNodeSystem() {
+ return
+ }
+
+ for metric, value := range stm.ToMap(ms.NodeSystem) {
+ collected[metric] = value
+ }
+
+ collected["peak_msg_queue"] = findMaxMQSize(ms.NodeSystem.MessageQueues)
+}
+
+func (cdb *CouchDB) collectActiveTasks(collected map[string]int64, ms *cdbMetrics) {
+ collected["active_tasks_indexer"] = 0
+ collected["active_tasks_database_compaction"] = 0
+ collected["active_tasks_replication"] = 0
+ collected["active_tasks_view_compaction"] = 0
+
+ if !ms.hasActiveTasks() {
+ return
+ }
+
+ for _, task := range ms.ActiveTasks {
+ collected["active_tasks_"+task.Type]++
+ }
+}
+
+func (cdb *CouchDB) collectDBStats(collected map[string]int64, ms *cdbMetrics) {
+ if !ms.hasDBStats() {
+ return
+ }
+
+ for _, dbStats := range ms.DBStats {
+ if dbStats.Error != "" {
+ cdb.Warning("database '", dbStats.Key, "' doesn't exist")
+ continue
+ }
+ merge(collected, stm.ToMap(dbStats.Info), "db_"+dbStats.Key)
+ }
+}
+
+func (cdb *CouchDB) scrapeCouchDB() *cdbMetrics {
+ ms := &cdbMetrics{}
+ wg := &sync.WaitGroup{}
+
+ wg.Add(1)
+ go func() { defer wg.Done(); cdb.scrapeNodeStats(ms) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); cdb.scrapeSystemStats(ms) }()
+
+ wg.Add(1)
+ go func() { defer wg.Done(); cdb.scrapeActiveTasks(ms) }()
+
+ if len(cdb.databases) > 0 {
+ wg.Add(1)
+ go func() { defer wg.Done(); cdb.scrapeDBStats(ms) }()
+ }
+
+ wg.Wait()
+ return ms
+}
+
+func (cdb *CouchDB) scrapeNodeStats(ms *cdbMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(cdb.Request, fmt.Sprintf(urlPathOverviewStats, cdb.Config.Node))
+
+ var stats cdbNodeStats
+ if err := cdb.doOKDecode(req, &stats); err != nil {
+ cdb.Warning(err)
+ return
+ }
+ ms.NodeStats = &stats
+}
+
+func (cdb *CouchDB) scrapeSystemStats(ms *cdbMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(cdb.Request, fmt.Sprintf(urlPathSystemStats, cdb.Config.Node))
+
+ var stats cdbNodeSystem
+ if err := cdb.doOKDecode(req, &stats); err != nil {
+ cdb.Warning(err)
+ return
+ }
+ ms.NodeSystem = &stats
+}
+
+func (cdb *CouchDB) scrapeActiveTasks(ms *cdbMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(cdb.Request, urlPathActiveTasks)
+
+ var stats []cdbActiveTask
+ if err := cdb.doOKDecode(req, &stats); err != nil {
+ cdb.Warning(err)
+ return
+ }
+ ms.ActiveTasks = stats
+}
+
+func (cdb *CouchDB) scrapeDBStats(ms *cdbMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(cdb.Request, urlPathDatabases)
+ req.Method = http.MethodPost
+ req.Header.Add("Accept", "application/json")
+ req.Header.Add("Content-Type", "application/json")
+
+ var q struct {
+ Keys []string `json:"keys"`
+ }
+ q.Keys = cdb.databases
+ body, err := json.Marshal(q)
+ if err != nil {
+ cdb.Error(err)
+ return
+ }
+ req.Body = io.NopCloser(bytes.NewReader(body))
+
+ var stats []cdbDBStats
+ if err := cdb.doOKDecode(req, &stats); err != nil {
+ cdb.Warning(err)
+ return
+ }
+ ms.DBStats = stats
+}
+
+func findMaxMQSize(MessageQueues map[string]interface{}) int64 {
+ var maxSize float64
+ for _, mq := range MessageQueues {
+ switch mqSize := mq.(type) {
+ case float64:
+ maxSize = math.Max(maxSize, mqSize)
+ case map[string]interface{}:
+ if v, ok := mqSize["count"].(float64); ok {
+ maxSize = math.Max(maxSize, v)
+ }
+ }
+ }
+ return int64(maxSize)
+}
+
+func (cdb *CouchDB) pingCouchDB() error {
+ req, _ := web.NewHTTPRequest(cdb.Request)
+
+ var info struct{ Couchdb string }
+ if err := cdb.doOKDecode(req, &info); err != nil {
+ return err
+ }
+
+ if info.Couchdb != "Welcome" {
+ return errors.New("not a CouchDB endpoint")
+ }
+
+ return nil
+}
+
+func (cdb *CouchDB) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := cdb.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ // TODO: read resp body, it contains reason
+ // ex.: {"error":"bad_request","reason":"`keys` member must exist."} (400)
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func merge(dst, src map[string]int64, prefix string) {
+ for k, v := range src {
+ dst[prefix+"_"+k] = v
+ }
+}
diff --git a/src/go/plugin/go.d/modules/couchdb/config_schema.json b/src/go/plugin/go.d/modules/couchdb/config_schema.json
new file mode 100644
index 000000000..0df439b07
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/config_schema.json
@@ -0,0 +1,197 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "CouchDB collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the CouchDB web server.",
+ "type": "string",
+ "default": "http://127.0.0.1:5984",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "node": {
+ "title": "Node name",
+ "description": "CouchDB node name. Same as -name vm.args argument.",
+ "type": "string",
+ "default": "_local"
+ },
+ "databases": {
+ "title": "Databases",
+ "description": "A space-separated list of database names for which you want to collect data. Leave blank to exclude database statistics.",
+ "type": "string"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url",
+ "node"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "node",
+ "databases"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/couchdb/couchdb.go b/src/go/plugin/go.d/modules/couchdb/couchdb.go
new file mode 100644
index 000000000..56563ec7b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/couchdb.go
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchdb
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("couchdb", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *CouchDB {
+ return &CouchDB{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:5984",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ },
+ Node: "_local",
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ Node string `yaml:"node,omitempty" json:"node"`
+ Databases string `yaml:"databases,omitempty" json:"databases"`
+}
+
+type CouchDB struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ databases []string
+}
+
+func (cdb *CouchDB) Configuration() any {
+ return cdb.Config
+}
+
+func (cdb *CouchDB) Init() error {
+ err := cdb.validateConfig()
+ if err != nil {
+ cdb.Errorf("check configuration: %v", err)
+ return err
+ }
+
+ cdb.databases = strings.Fields(cdb.Config.Databases)
+
+ httpClient, err := cdb.initHTTPClient()
+ if err != nil {
+ cdb.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ cdb.httpClient = httpClient
+
+ charts, err := cdb.initCharts()
+ if err != nil {
+ cdb.Errorf("init charts: %v", err)
+ return err
+ }
+ cdb.charts = charts
+
+ return nil
+}
+
+func (cdb *CouchDB) Check() error {
+ if err := cdb.pingCouchDB(); err != nil {
+ cdb.Error(err)
+ return err
+ }
+
+ mx, err := cdb.collect()
+ if err != nil {
+ cdb.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (cdb *CouchDB) Charts() *Charts {
+ return cdb.charts
+}
+
+func (cdb *CouchDB) Collect() map[string]int64 {
+ mx, err := cdb.collect()
+ if err != nil {
+ cdb.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (cdb *CouchDB) Cleanup() {
+ if cdb.httpClient == nil {
+ return
+ }
+ cdb.httpClient.CloseIdleConnections()
+}
diff --git a/src/go/plugin/go.d/modules/couchdb/couchdb_test.go b/src/go/plugin/go.d/modules/couchdb/couchdb_test.go
new file mode 100644
index 000000000..99b7825fd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/couchdb_test.go
@@ -0,0 +1,464 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchdb
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer311Root, _ = os.ReadFile("testdata/v3.1.1/root.json")
+ dataVer311ActiveTasks, _ = os.ReadFile("testdata/v3.1.1/active_tasks.json")
+ dataVer311NodeStats, _ = os.ReadFile("testdata/v3.1.1/node_stats.json")
+ dataVer311NodeSystem, _ = os.ReadFile("testdata/v3.1.1/node_system.json")
+ dataVer311DbsInfo, _ = os.ReadFile("testdata/v3.1.1/dbs_info.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer311Root": dataVer311Root,
+ "dataVer311ActiveTasks": dataVer311ActiveTasks,
+ "dataVer311NodeStats": dataVer311NodeStats,
+ "dataVer311NodeSystem": dataVer311NodeSystem,
+ "dataVer311DbsInfo": dataVer311DbsInfo,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestCouchDB_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &CouchDB{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestCouchDB_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantNumOfCharts int
+ wantFail bool
+ }{
+ "default": {
+ wantNumOfCharts: numOfCharts(
+ dbActivityCharts,
+ httpTrafficBreakdownCharts,
+ serverOperationsCharts,
+ erlangStatisticsCharts,
+ ),
+ config: New().Config,
+ },
+ "URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ }},
+ },
+ "invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ }},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ es := New()
+ es.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, es.Init())
+ } else {
+ assert.NoError(t, es.Init())
+ assert.Equal(t, test.wantNumOfCharts, len(*es.Charts()))
+ }
+ })
+ }
+}
+
+func TestCouchDB_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (cdb *CouchDB, cleanup func())
+ wantFail bool
+ }{
+ "valid data": {prepare: prepareCouchDBValidData},
+ "invalid data": {prepare: prepareCouchDBInvalidData, wantFail: true},
+ "404": {prepare: prepareCouchDB404, wantFail: true},
+ "connection refused": {prepare: prepareCouchDBConnectionRefused, wantFail: true},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ cdb, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, cdb.Check())
+ } else {
+ assert.NoError(t, cdb.Check())
+ }
+ })
+ }
+}
+
+func TestCouchDB_Charts(t *testing.T) {
+ assert.Nil(t, New().Charts())
+}
+
+func TestCouchDB_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestCouchDB_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *CouchDB
+ wantCollected map[string]int64
+ checkCharts bool
+ }{
+ "all stats": {
+ prepare: func() *CouchDB {
+ cdb := New()
+ cdb.Config.Databases = "db1 db2"
+ return cdb
+ },
+ wantCollected: map[string]int64{
+
+ // node stats
+ "couch_replicator_jobs_crashed": 1,
+ "couch_replicator_jobs_pending": 1,
+ "couch_replicator_jobs_running": 1,
+ "couchdb_database_reads": 1,
+ "couchdb_database_writes": 14,
+ "couchdb_httpd_request_methods_COPY": 1,
+ "couchdb_httpd_request_methods_DELETE": 1,
+ "couchdb_httpd_request_methods_GET": 75544,
+ "couchdb_httpd_request_methods_HEAD": 1,
+ "couchdb_httpd_request_methods_OPTIONS": 1,
+ "couchdb_httpd_request_methods_POST": 15,
+ "couchdb_httpd_request_methods_PUT": 3,
+ "couchdb_httpd_status_codes_200": 75294,
+ "couchdb_httpd_status_codes_201": 15,
+ "couchdb_httpd_status_codes_202": 1,
+ "couchdb_httpd_status_codes_204": 1,
+ "couchdb_httpd_status_codes_206": 1,
+ "couchdb_httpd_status_codes_301": 1,
+ "couchdb_httpd_status_codes_302": 1,
+ "couchdb_httpd_status_codes_304": 1,
+ "couchdb_httpd_status_codes_400": 1,
+ "couchdb_httpd_status_codes_401": 20,
+ "couchdb_httpd_status_codes_403": 1,
+ "couchdb_httpd_status_codes_404": 225,
+ "couchdb_httpd_status_codes_405": 1,
+ "couchdb_httpd_status_codes_406": 1,
+ "couchdb_httpd_status_codes_409": 1,
+ "couchdb_httpd_status_codes_412": 3,
+ "couchdb_httpd_status_codes_413": 1,
+ "couchdb_httpd_status_codes_414": 1,
+ "couchdb_httpd_status_codes_415": 1,
+ "couchdb_httpd_status_codes_416": 1,
+ "couchdb_httpd_status_codes_417": 1,
+ "couchdb_httpd_status_codes_500": 1,
+ "couchdb_httpd_status_codes_501": 1,
+ "couchdb_httpd_status_codes_503": 1,
+ "couchdb_httpd_status_codes_2xx": 75312,
+ "couchdb_httpd_status_codes_3xx": 3,
+ "couchdb_httpd_status_codes_4xx": 258,
+ "couchdb_httpd_status_codes_5xx": 3,
+ "couchdb_httpd_view_reads": 1,
+ "couchdb_open_os_files": 1,
+
+ // node system
+ "context_switches": 22614499,
+ "ets_table_count": 116,
+ "internal_replication_jobs": 1,
+ "io_input": 49674812,
+ "io_output": 686400800,
+ "memory_atom_used": 488328,
+ "memory_atom": 504433,
+ "memory_binary": 297696,
+ "memory_code": 11252688,
+ "memory_ets": 1579120,
+ "memory_other": 20427855,
+ "memory_processes": 9161448,
+ "os_proc_count": 1,
+ "peak_msg_queue": 2,
+ "process_count": 296,
+ "reductions": 43211228312,
+ "run_queue": 1,
+
+ // active tasks
+ "active_tasks_database_compaction": 1,
+ "active_tasks_indexer": 2,
+ "active_tasks_replication": 1,
+ "active_tasks_view_compaction": 1,
+
+ // databases
+ "db_db1_db_doc_counts": 14,
+ "db_db1_db_doc_del_counts": 1,
+ "db_db1_db_sizes_active": 2818,
+ "db_db1_db_sizes_external": 588,
+ "db_db1_db_sizes_file": 74115,
+
+ "db_db2_db_doc_counts": 15,
+ "db_db2_db_doc_del_counts": 1,
+ "db_db2_db_sizes_active": 1818,
+ "db_db2_db_sizes_external": 288,
+ "db_db2_db_sizes_file": 7415,
+ },
+ checkCharts: true,
+ },
+ "wrong node": {
+ prepare: func() *CouchDB {
+ cdb := New()
+ cdb.Config.Node = "bad_node@bad_host"
+ cdb.Config.Databases = "db1 db2"
+ return cdb
+ },
+ wantCollected: map[string]int64{
+
+ // node stats
+
+ // node system
+
+ // active tasks
+ "active_tasks_database_compaction": 1,
+ "active_tasks_indexer": 2,
+ "active_tasks_replication": 1,
+ "active_tasks_view_compaction": 1,
+
+ // databases
+ "db_db1_db_doc_counts": 14,
+ "db_db1_db_doc_del_counts": 1,
+ "db_db1_db_sizes_active": 2818,
+ "db_db1_db_sizes_external": 588,
+ "db_db1_db_sizes_file": 74115,
+
+ "db_db2_db_doc_counts": 15,
+ "db_db2_db_doc_del_counts": 1,
+ "db_db2_db_sizes_active": 1818,
+ "db_db2_db_sizes_external": 288,
+ "db_db2_db_sizes_file": 7415,
+ },
+ checkCharts: false,
+ },
+ "wrong database": {
+ prepare: func() *CouchDB {
+ cdb := New()
+ cdb.Config.Databases = "bad_db db1 db2"
+ return cdb
+ },
+ wantCollected: map[string]int64{
+
+ // node stats
+ "couch_replicator_jobs_crashed": 1,
+ "couch_replicator_jobs_pending": 1,
+ "couch_replicator_jobs_running": 1,
+ "couchdb_database_reads": 1,
+ "couchdb_database_writes": 14,
+ "couchdb_httpd_request_methods_COPY": 1,
+ "couchdb_httpd_request_methods_DELETE": 1,
+ "couchdb_httpd_request_methods_GET": 75544,
+ "couchdb_httpd_request_methods_HEAD": 1,
+ "couchdb_httpd_request_methods_OPTIONS": 1,
+ "couchdb_httpd_request_methods_POST": 15,
+ "couchdb_httpd_request_methods_PUT": 3,
+ "couchdb_httpd_status_codes_200": 75294,
+ "couchdb_httpd_status_codes_201": 15,
+ "couchdb_httpd_status_codes_202": 1,
+ "couchdb_httpd_status_codes_204": 1,
+ "couchdb_httpd_status_codes_206": 1,
+ "couchdb_httpd_status_codes_301": 1,
+ "couchdb_httpd_status_codes_302": 1,
+ "couchdb_httpd_status_codes_304": 1,
+ "couchdb_httpd_status_codes_400": 1,
+ "couchdb_httpd_status_codes_401": 20,
+ "couchdb_httpd_status_codes_403": 1,
+ "couchdb_httpd_status_codes_404": 225,
+ "couchdb_httpd_status_codes_405": 1,
+ "couchdb_httpd_status_codes_406": 1,
+ "couchdb_httpd_status_codes_409": 1,
+ "couchdb_httpd_status_codes_412": 3,
+ "couchdb_httpd_status_codes_413": 1,
+ "couchdb_httpd_status_codes_414": 1,
+ "couchdb_httpd_status_codes_415": 1,
+ "couchdb_httpd_status_codes_416": 1,
+ "couchdb_httpd_status_codes_417": 1,
+ "couchdb_httpd_status_codes_500": 1,
+ "couchdb_httpd_status_codes_501": 1,
+ "couchdb_httpd_status_codes_503": 1,
+ "couchdb_httpd_status_codes_2xx": 75312,
+ "couchdb_httpd_status_codes_3xx": 3,
+ "couchdb_httpd_status_codes_4xx": 258,
+ "couchdb_httpd_status_codes_5xx": 3,
+ "couchdb_httpd_view_reads": 1,
+ "couchdb_open_os_files": 1,
+
+ // node system
+ "context_switches": 22614499,
+ "ets_table_count": 116,
+ "internal_replication_jobs": 1,
+ "io_input": 49674812,
+ "io_output": 686400800,
+ "memory_atom_used": 488328,
+ "memory_atom": 504433,
+ "memory_binary": 297696,
+ "memory_code": 11252688,
+ "memory_ets": 1579120,
+ "memory_other": 20427855,
+ "memory_processes": 9161448,
+ "os_proc_count": 1,
+ "peak_msg_queue": 2,
+ "process_count": 296,
+ "reductions": 43211228312,
+ "run_queue": 1,
+
+ // active tasks
+ "active_tasks_database_compaction": 1,
+ "active_tasks_indexer": 2,
+ "active_tasks_replication": 1,
+ "active_tasks_view_compaction": 1,
+
+ // databases
+ "db_db1_db_doc_counts": 14,
+ "db_db1_db_doc_del_counts": 1,
+ "db_db1_db_sizes_active": 2818,
+ "db_db1_db_sizes_external": 588,
+ "db_db1_db_sizes_file": 74115,
+
+ "db_db2_db_doc_counts": 15,
+ "db_db2_db_doc_del_counts": 1,
+ "db_db2_db_sizes_active": 1818,
+ "db_db2_db_sizes_external": 288,
+ "db_db2_db_sizes_file": 7415,
+ },
+ checkCharts: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ cdb, cleanup := prepareCouchDB(t, test.prepare)
+ defer cleanup()
+
+ var collected map[string]int64
+ for i := 0; i < 10; i++ {
+ collected = cdb.Collect()
+ }
+
+ assert.Equal(t, test.wantCollected, collected)
+ if test.checkCharts {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, cdb, collected)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, cdb *CouchDB, collected map[string]int64) {
+ for _, chart := range *cdb.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareCouchDB(t *testing.T, createCDB func() *CouchDB) (cdb *CouchDB, cleanup func()) {
+ t.Helper()
+ cdb = createCDB()
+ srv := prepareCouchDBEndpoint()
+ cdb.URL = srv.URL
+
+ require.NoError(t, cdb.Init())
+
+ return cdb, srv.Close
+}
+
+func prepareCouchDBValidData(t *testing.T) (cdb *CouchDB, cleanup func()) {
+ return prepareCouchDB(t, New)
+}
+
+func prepareCouchDBInvalidData(t *testing.T) (*CouchDB, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ cdb := New()
+ cdb.URL = srv.URL
+ require.NoError(t, cdb.Init())
+
+ return cdb, srv.Close
+}
+
+func prepareCouchDB404(t *testing.T) (*CouchDB, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ cdb := New()
+ cdb.URL = srv.URL
+ require.NoError(t, cdb.Init())
+
+ return cdb, srv.Close
+}
+
+func prepareCouchDBConnectionRefused(t *testing.T) (*CouchDB, func()) {
+ t.Helper()
+ cdb := New()
+ cdb.URL = "http://127.0.0.1:38001"
+ require.NoError(t, cdb.Init())
+
+ return cdb, func() {}
+}
+
+func prepareCouchDBEndpoint() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/_node/_local/_stats":
+ _, _ = w.Write(dataVer311NodeStats)
+ case "/_node/_local/_system":
+ _, _ = w.Write(dataVer311NodeSystem)
+ case urlPathActiveTasks:
+ _, _ = w.Write(dataVer311ActiveTasks)
+ case "/_dbs_info":
+ _, _ = w.Write(dataVer311DbsInfo)
+ case "/":
+ _, _ = w.Write(dataVer311Root)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
+
+func numOfCharts(charts ...Charts) (num int) {
+ for _, v := range charts {
+ num += len(v)
+ }
+ return num
+}
diff --git a/src/go/plugin/go.d/modules/couchdb/init.go b/src/go/plugin/go.d/modules/couchdb/init.go
new file mode 100644
index 000000000..65e555749
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/init.go
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchdb
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (cdb *CouchDB) validateConfig() error {
+ if cdb.URL == "" {
+ return errors.New("URL not set")
+ }
+ if cdb.Node == "" {
+ return errors.New("'node' not set")
+ }
+ if _, err := web.NewHTTPRequest(cdb.Request); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (cdb *CouchDB) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(cdb.Client)
+}
+
+func (cdb *CouchDB) initCharts() (*Charts, error) {
+ charts := module.Charts{}
+
+ if err := charts.Add(*dbActivityCharts.Copy()...); err != nil {
+ return nil, err
+ }
+ if err := charts.Add(*httpTrafficBreakdownCharts.Copy()...); err != nil {
+ return nil, err
+ }
+ if err := charts.Add(*serverOperationsCharts.Copy()...); err != nil {
+ return nil, err
+ }
+ if len(cdb.databases) != 0 {
+ dbCharts := dbSpecificCharts.Copy()
+
+ if err := charts.Add(*dbCharts...); err != nil {
+ return nil, err
+ }
+
+ for _, chart := range *dbCharts {
+ for _, db := range cdb.databases {
+ if err := chart.AddDim(&module.Dim{ID: "db_" + db + "_" + chart.ID, Name: db}); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ }
+ if err := charts.Add(*erlangStatisticsCharts.Copy()...); err != nil {
+ return nil, err
+ }
+
+ if len(charts) == 0 {
+ return nil, errors.New("zero charts")
+ }
+ return &charts, nil
+}
diff --git a/src/go/plugin/go.d/modules/couchdb/integrations/couchdb.md b/src/go/plugin/go.d/modules/couchdb/integrations/couchdb.md
new file mode 100644
index 000000000..5e7f578cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/integrations/couchdb.md
@@ -0,0 +1,260 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/couchdb/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/couchdb/metadata.yaml"
+sidebar_label: "CouchDB"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# CouchDB
+
+
+<img src="https://netdata.cloud/img/couchdb.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: couchdb
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors CouchDB servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per CouchDB instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| couchdb.activity | db_reads, db_writes, view_reads | requests/s |
+| couchdb.request_methods | copy, delete, get, head, options, post, put | requests/s |
+| couchdb.response_codes | 200, 201, 202, 204, 206, 301, 302, 304, 400, 401, 403, 404, 406, 409, 412, 413, 414, 415, 416, 417, 500, 501, 503 | responses/s |
+| couchdb.response_code_classes | 2xx, 3xx, 4xx, 5xx | responses/s |
+| couchdb.active_tasks | indexer, db_compaction, replication, view_compaction | tasks |
+| couchdb.replicator_jobs | running, pending, crashed, internal_replication_jobs | jobs |
+| couchdb.open_files | files | files |
+| couchdb.erlang_vm_memory | atom, binaries, code, ets, procs, other | B |
+| couchdb.proccounts | os_procs, erl_procs | processes |
+| couchdb.peakmsgqueue | peak_size | messages |
+| couchdb.reductions | reductions | reductions |
+| couchdb.db_sizes_file | a dimension per database | KiB |
+| couchdb.db_sizes_external | a dimension per database | KiB |
+| couchdb.db_sizes_active | a dimension per database | KiB |
+| couchdb.db_doc_count | a dimension per database | docs |
+| couchdb.db_doc_del_count | a dimension per database | docs |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/couchdb.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/couchdb.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:5984 | yes |
+| node | CouchDB node name. Same as -name vm.args argument. | _local | no |
+| databases | List of database names for which db-specific stats should be displayed, space separated. | | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| timeout | HTTP request timeout. | 2 | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client tls certificate. | | no |
+| tls_key | Client tls key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:5984
+
+```
+</details>
+
+##### Basic HTTP auth
+
+Local server with basic HTTP authentication, node name and multiple databases defined. Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file. Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:5984
+ node: couchdb@127.0.0.1
+ databases: my-db other-db
+ username: foo
+ password: bar
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:5984
+
+ - name: remote
+ url: http://203.0.113.0:5984
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `couchdb` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m couchdb
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `couchdb` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep couchdb
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep couchdb /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep couchdb
+```
+
+
diff --git a/src/go/plugin/go.d/modules/couchdb/metadata.yaml b/src/go/plugin/go.d/modules/couchdb/metadata.yaml
new file mode 100644
index 000000000..2f0036db2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/metadata.yaml
@@ -0,0 +1,323 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-couchdb
+ plugin_name: go.d.plugin
+ module_name: couchdb
+ monitored_instance:
+ name: CouchDB
+ link: https://couchdb.apache.org/
+ icon_filename: couchdb.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - couchdb
+ - databases
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors CouchDB servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/couchdb.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:5984
+ required: true
+ - name: node
+ description: CouchDB node name. Same as -name vm.args argument.
+ default_value: "_local"
+ required: false
+ - name: databases
+ description: List of database names for which db-specific stats should be displayed, space separated.
+ default_value: ""
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 2
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client tls certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client tls key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ folding:
+ title: Example
+ enabled: true
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:5984
+ - name: Basic HTTP auth
+ description: >
+ Local server with basic HTTP authentication, node name and multiple databases defined.
+ Make sure to match the node name with the `NODENAME` value in your CouchDB's `etc/vm.args` file.
+ Typically, this is of the form `couchdb@fully.qualified.domain.name` in a cluster, or `couchdb@127.0.0.1` for a single-node server.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:5984
+ node: couchdb@127.0.0.1
+ databases: my-db other-db
+ username: foo
+ password: bar
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:5984
+
+ - name: remote
+ url: http://203.0.113.0:5984
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: couchdb.activity
+ description: Overall Activity
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: db_reads
+ - name: db_writes
+ - name: view_reads
+ - name: couchdb.request_methods
+ description: HTTP request methods
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: copy
+ - name: delete
+ - name: get
+ - name: head
+ - name: options
+ - name: post
+ - name: put
+ - name: couchdb.response_codes
+ description: HTTP response status codes
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: "200"
+ - name: "201"
+ - name: "202"
+ - name: "204"
+ - name: "206"
+ - name: "301"
+ - name: "302"
+ - name: "304"
+ - name: "400"
+ - name: "401"
+ - name: "403"
+ - name: "404"
+ - name: "406"
+ - name: "409"
+ - name: "412"
+ - name: "413"
+ - name: "414"
+ - name: "415"
+ - name: "416"
+ - name: "417"
+ - name: "500"
+ - name: "501"
+ - name: "503"
+ - name: couchdb.response_code_classes
+ description: HTTP response status code classes
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: couchdb.active_tasks
+ description: Active task breakdown
+ unit: tasks
+ chart_type: stacked
+ dimensions:
+ - name: indexer
+ - name: db_compaction
+ - name: replication
+ - name: view_compaction
+ - name: couchdb.replicator_jobs
+ description: Replicator job breakdown
+ unit: jobs
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: pending
+ - name: crashed
+ - name: internal_replication_jobs
+ - name: couchdb.open_files
+ description: Open files
+ unit: files
+ chart_type: line
+ dimensions:
+ - name: files
+ - name: couchdb.erlang_vm_memory
+ description: Erlang VM memory usage
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: atom
+ - name: binaries
+ - name: code
+ - name: ets
+ - name: procs
+ - name: other
+ - name: couchdb.proccounts
+ description: Process counts
+ unit: processes
+ chart_type: line
+ dimensions:
+ - name: os_procs
+ - name: erl_procs
+ - name: couchdb.peakmsgqueue
+ description: Peak message queue size
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: peak_size
+ - name: couchdb.reductions
+ description: Erlang reductions
+ unit: reductions
+ chart_type: line
+ dimensions:
+ - name: reductions
+ - name: couchdb.db_sizes_file
+ description: Database sizes (file)
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: a dimension per database
+ - name: couchdb.db_sizes_external
+ description: Database sizes (external)
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: a dimension per database
+ - name: couchdb.db_sizes_active
+ description: Database sizes (active)
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: a dimension per database
+ - name: couchdb.db_doc_count
+ description: 'Database # of docs'
+ unit: docs
+ chart_type: line
+ dimensions:
+ - name: a dimension per database
+ - name: couchdb.db_doc_del_count
+ description: 'Database # of deleted docs'
+ unit: docs
+ chart_type: line
+ dimensions:
+ - name: a dimension per database
diff --git a/src/go/plugin/go.d/modules/couchdb/metrics.go b/src/go/plugin/go.d/modules/couchdb/metrics.go
new file mode 100644
index 000000000..4d2f02679
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/metrics.go
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package couchdb
+
+// https://docs.couchdb.org/en/stable/api/index.html
+
+type cdbMetrics struct {
+ // https://docs.couchdb.org/en/stable/api/server/common.html#active-tasks
+ ActiveTasks []cdbActiveTask
+ // https://docs.couchdb.org/en/stable/api/server/common.html#node-node-name-stats
+ NodeStats *cdbNodeStats
+ // https://docs.couchdb.org/en/stable/api/server/common.html#node-node-name-system
+ NodeSystem *cdbNodeSystem
+ // https://docs.couchdb.org/en/stable/api/database/common.html
+ DBStats []cdbDBStats
+}
+
+func (m cdbMetrics) empty() bool {
+ switch {
+ case m.hasActiveTasks(), m.hasNodeStats(), m.hasNodeSystem(), m.hasDBStats():
+ return false
+ }
+ return true
+}
+
+func (m cdbMetrics) hasActiveTasks() bool { return m.ActiveTasks != nil }
+func (m cdbMetrics) hasNodeStats() bool { return m.NodeStats != nil }
+func (m cdbMetrics) hasNodeSystem() bool { return m.NodeSystem != nil }
+func (m cdbMetrics) hasDBStats() bool { return m.DBStats != nil }
+
+type cdbActiveTask struct {
+ Type string `json:"type"`
+}
+
+type cdbNodeStats struct {
+ CouchDB struct {
+ DatabaseReads struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"database_reads" json:"database_reads"`
+ DatabaseWrites struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"database_writes" json:"database_writes"`
+ HTTPd struct {
+ ViewReads struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"view_reads" json:"view_reads"`
+ } `stm:"httpd" json:"httpd"`
+ HTTPdRequestMethods struct {
+ Copy struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"COPY" json:"COPY"`
+ Delete struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"DELETE" json:"DELETE"`
+ Get struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"GET" json:"GET"`
+ Head struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"HEAD" json:"HEAD"`
+ Options struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"OPTIONS" json:"OPTIONS"`
+ Post struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"POST" json:"POST"`
+ Put struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"PUT" json:"PUT"`
+ } `stm:"httpd_request_methods" json:"httpd_request_methods"`
+ HTTPdStatusCodes struct {
+ Code200 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"200" json:"200"`
+ Code201 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"201" json:"201"`
+ Code202 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"202" json:"202"`
+ Code204 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"204" json:"204"`
+ Code206 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"206" json:"206"`
+ Code301 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"301" json:"301"`
+ Code302 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"302" json:"302"`
+ Code304 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"304" json:"304"`
+ Code400 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"400" json:"400"`
+ Code401 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"401" json:"401"`
+ Code403 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"403" json:"403"`
+ Code404 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"404" json:"404"`
+ Code405 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"405" json:"405"`
+ Code406 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"406" json:"406"`
+ Code409 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"409" json:"409"`
+ Code412 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"412" json:"412"`
+ Code413 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"413" json:"413"`
+ Code414 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"414" json:"414"`
+ Code415 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"415" json:"415"`
+ Code416 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"416" json:"416"`
+ Code417 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"417" json:"417"`
+ Code500 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"500" json:"500"`
+ Code501 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"501" json:"501"`
+ Code503 struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"503" json:"503"`
+ } `stm:"httpd_status_codes" json:"httpd_status_codes"`
+ OpenOSFiles struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"open_os_files" json:"open_os_files"`
+ } `stm:"couchdb" json:"couchdb"`
+ CouchReplicator struct {
+ Jobs struct {
+ Running struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"running" json:"running"`
+ Pending struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"pending" json:"pending"`
+ Crashed struct {
+ Value float64 `stm:"" json:"value"`
+ } `stm:"crashed" json:"crashed"`
+ } `stm:"jobs" json:"jobs"`
+ } `stm:"couch_replicator" json:"couch_replicator"`
+}
+
+type cdbNodeSystem struct {
+ Memory struct {
+ Other float64 `stm:"other" json:"other"`
+ Atom float64 `stm:"atom" json:"atom"`
+ AtomUsed float64 `stm:"atom_used" json:"atom_used"`
+ Processes float64 `stm:"processes" json:"processes"`
+ Binary float64 `stm:"binary" json:"binary"`
+ Code float64 `stm:"code" json:"code"`
+ Ets float64 `stm:"ets" json:"ets"`
+ } `stm:"memory" json:"memory"`
+
+ RunQueue float64 `stm:"run_queue" json:"run_queue"`
+ EtsTableCount float64 `stm:"ets_table_count" json:"ets_table_count"`
+ ContextSwitches float64 `stm:"context_switches" json:"context_switches"`
+ Reductions float64 `stm:"reductions" json:"reductions"`
+ IOInput float64 `stm:"io_input" json:"io_input"`
+ IOOutput float64 `stm:"io_output" json:"io_output"`
+ OSProcCount float64 `stm:"os_proc_count" json:"os_proc_count"`
+ ProcessCount float64 `stm:"process_count" json:"process_count"`
+ InternalReplicationJobs float64 `stm:"internal_replication_jobs" json:"internal_replication_jobs"`
+
+ MessageQueues map[string]interface{} `json:"message_queues"`
+}
+
+type cdbDBStats struct {
+ Key string
+ Error string
+ Info struct {
+ Sizes struct {
+ File float64 `stm:"file" json:"file"`
+ External float64 `stm:"external" json:"external"`
+ Active float64 `stm:"active" json:"active"`
+ } `stm:"db_sizes" json:"sizes"`
+ DocDelCount float64 `stm:"db_doc_del_counts" json:"doc_del_count"`
+ DocCount float64 `stm:"db_doc_counts" json:"doc_count"`
+ }
+}
diff --git a/src/go/plugin/go.d/modules/couchdb/testdata/config.json b/src/go/plugin/go.d/modules/couchdb/testdata/config.json
new file mode 100644
index 000000000..0fa716e5d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/testdata/config.json
@@ -0,0 +1,22 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "node": "ok",
+ "databases": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/couchdb/testdata/config.yaml b/src/go/plugin/go.d/modules/couchdb/testdata/config.yaml
new file mode 100644
index 000000000..4968ed263
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/testdata/config.yaml
@@ -0,0 +1,19 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+node: "ok"
+databases: "ok"
diff --git a/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/active_tasks.json b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/active_tasks.json
new file mode 100644
index 000000000..788fe5642
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/active_tasks.json
@@ -0,0 +1,63 @@
+[
+ {
+ "changes_done": 64438,
+ "database": "mailbox",
+ "pid": "<0.12986.1>",
+ "progress": 84,
+ "started_on": 1376116576,
+ "total_changes": 76215,
+ "type": "database_compaction",
+ "updated_on": 1376116619
+ },
+ {
+ "changes_done": 26534,
+ "database": "mailbox",
+ "pid": "<0.12943.2>",
+ "progress": 23,
+ "started_on": 1376116592,
+ "total_changes": 76215,
+ "type": "view_compaction",
+ "updated_on": 1376116637
+ },
+ {
+ "changes_done": 14443,
+ "database": "mailbox",
+ "design_document": "c9753817b3ba7c674d92361f24f59b9f",
+ "pid": "<0.10461.3>",
+ "progress": 18,
+ "started_on": 1376116621,
+ "total_changes": 76215,
+ "type": "indexer",
+ "updated_on": 1376116650
+ },
+ {
+ "changes_done": 5454,
+ "database": "mailbox",
+ "design_document": "_design/meta",
+ "pid": "<0.6838.4>",
+ "progress": 7,
+ "started_on": 1376116632,
+ "total_changes": 76215,
+ "type": "indexer",
+ "updated_on": 1376116651
+ },
+ {
+ "checkpointed_source_seq": 68585,
+ "continuous": false,
+ "doc_id": null,
+ "doc_write_failures": 1,
+ "docs_read": 4524,
+ "docs_written": 4524,
+ "missing_revisions_found": 4524,
+ "pid": "<0.1538.5>",
+ "progress": 44,
+ "replication_id": "9bc1727d74d49d9e157e260bb8bbd1d5",
+ "revisions_checked": 4524,
+ "source": "mailbox",
+ "source_seq": 154419,
+ "started_on": 1376116644,
+ "target": "http://mailsrv:5984/mailbox",
+ "type": "replication",
+ "updated_on": 1376116651
+ }
+]
diff --git a/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/dbs_info.json b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/dbs_info.json
new file mode 100644
index 000000000..9ca43a53c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/dbs_info.json
@@ -0,0 +1,52 @@
+[
+ {
+ "key": "db1",
+ "info": {
+ "db_name": "db1",
+ "purge_seq": "0-g1AAAABPeJzLYWBgYMpgTmHgzcvPy09JdcjLz8gvLskBCeexAEmGBiD1HwiyEhlwqEtkSKqHKMgCAIT2GV4",
+ "update_seq": "14-g1AAAABPeJzLYWBgYMpgTmHgzcvPy09JdcjLz8gvLskBCeexAEmGBiD1HwiyEjlxqEtkSKoHK2DNAgCGOxls",
+ "sizes": {
+ "file": 74115,
+ "external": 588,
+ "active": 2818
+ },
+ "props": {},
+ "doc_del_count": 1,
+ "doc_count": 14,
+ "disk_format_version": 8,
+ "compact_running": false,
+ "cluster": {
+ "q": 2,
+ "n": 1,
+ "w": 1,
+ "r": 1
+ },
+ "instance_start_time": "0"
+ }
+ },
+ {
+ "key": "db2",
+ "info": {
+ "db_name": "db2",
+ "purge_seq": "0-g1AAAABPeJzLYWBgYMpgTmHgzcvPy09JdcjLz8gvLskBCeexAEmGBiD1HwiyEhlwqEtkSKqHKMgCAIT2GV5",
+ "update_seq": "14-g1AAAABPeJzLYWBgYMpgTmHgzcvPy09JdcjLz8gvLskBCeexAEmGBiD1HwiyEjlxqEtkSKoHK2DNAgCGOxlt",
+ "sizes": {
+ "file": 7415,
+ "external": 288,
+ "active": 1818
+ },
+ "props": {},
+ "doc_del_count": 1,
+ "doc_count": 15,
+ "disk_format_version": 8,
+ "compact_running": false,
+ "cluster": {
+ "q": 2,
+ "n": 1,
+ "w": 1,
+ "r": 1
+ },
+ "instance_start_time": "0"
+ }
+ }
+]
diff --git a/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/node_stats.json b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/node_stats.json
new file mode 100644
index 000000000..ae31366af
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/node_stats.json
@@ -0,0 +1,1651 @@
+{
+ "global_changes": {
+ "db_writes": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of db writes performed by global changes"
+ },
+ "event_doc_conflict": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of conflicted event docs encountered by global changes"
+ },
+ "listener_pending_updates": {
+ "value": 1,
+ "type": "gauge",
+ "desc": "number of global changes updates pending writes in global_changes_listener"
+ },
+ "rpcs": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of rpc operations performed by global_changes"
+ },
+ "server_pending_updates": {
+ "value": 1,
+ "type": "gauge",
+ "desc": "number of global changes updates pending writes in global_changes_server"
+ }
+ },
+ "couchdb": {
+ "httpd": {
+ "aborted_requests": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of aborted requests"
+ },
+ "bulk_docs": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "distribution of the number of docs in _bulk_docs requests"
+ },
+ "bulk_requests": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of bulk requests"
+ },
+ "requests": {
+ "value": 75562,
+ "type": "counter",
+ "desc": "number of HTTP requests"
+ },
+ "view_timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP view timeouts"
+ },
+ "find_timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP find timeouts"
+ },
+ "explain_timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP _explain timeouts"
+ },
+ "all_docs_timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP all_docs timeouts"
+ },
+ "partition_view_requests": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of partition HTTP view requests"
+ },
+ "partition_find_requests": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of partition HTTP _find requests"
+ },
+ "partition_explain_requests": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of partition HTTP _explain requests"
+ },
+ "partition_all_docs_requests": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of partition HTTP _all_docs requests"
+ },
+ "partition_view_timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of partition HTTP view timeouts"
+ },
+ "partition_find_timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of partition HTTP find timeouts"
+ },
+ "partition_explain_timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of partition HTTP _explain timeouts"
+ },
+ "partition_all_docs_timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of partition HTTP all_docs timeouts"
+ },
+ "temporary_view_reads": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of temporary view reads"
+ },
+ "view_reads": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of view reads"
+ },
+ "clients_requesting_changes": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of clients for continuous _changes"
+ },
+ "purge_requests": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of purge requests"
+ }
+ },
+ "dbinfo": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "distribution of latencies for calls to retrieve DB info"
+ },
+ "io_queue": {
+ "search": {
+ "value": 1,
+ "type": "counter",
+ "desc": "Search IO directly triggered by client requests"
+ }
+ },
+ "io_queue2": {
+ "search": {
+ "count": {
+ "value": 1,
+ "type": "counter",
+ "desc": "Search IO directly triggered by client requests"
+ }
+ }
+ },
+ "auth_cache_hits": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of authentication cache hits"
+ },
+ "auth_cache_misses": {
+ "value": 2,
+ "type": "counter",
+ "desc": "number of authentication cache misses"
+ },
+ "collect_results_time": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "microsecond latency for calls to couch_db:collect_results/3"
+ },
+ "database_writes": {
+ "value": 14,
+ "type": "counter",
+ "desc": "number of times a database was changed"
+ },
+ "database_reads": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of times a document was read from a database"
+ },
+ "database_purges": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of times a database was purged"
+ },
+ "db_open_time": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "milliseconds required to open a database"
+ },
+ "document_inserts": {
+ "value": 17,
+ "type": "counter",
+ "desc": "number of documents inserted"
+ },
+ "document_writes": {
+ "value": 17,
+ "type": "counter",
+ "desc": "number of document write operations"
+ },
+ "document_purges": {
+ "total": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of total document purge operations"
+ },
+ "success": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of successful document purge operations"
+ },
+ "failure": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed document purge operations"
+ }
+ },
+ "local_document_writes": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of _local document write operations"
+ },
+ "httpd_request_methods": {
+ "COPY": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP COPY requests"
+ },
+ "DELETE": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP DELETE requests"
+ },
+ "GET": {
+ "value": 75544,
+ "type": "counter",
+ "desc": "number of HTTP GET requests"
+ },
+ "HEAD": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP HEAD requests"
+ },
+ "OPTIONS": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP OPTIONS requests"
+ },
+ "POST": {
+ "value": 15,
+ "type": "counter",
+ "desc": "number of HTTP POST requests"
+ },
+ "PUT": {
+ "value": 3,
+ "type": "counter",
+ "desc": "number of HTTP PUT requests"
+ }
+ },
+ "httpd_status_codes": {
+ "200": {
+ "value": 75294,
+ "type": "counter",
+ "desc": "number of HTTP 200 OK responses"
+ },
+ "201": {
+ "value": 15,
+ "type": "counter",
+ "desc": "number of HTTP 201 Created responses"
+ },
+ "202": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 202 Accepted responses"
+ },
+ "204": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 204 No Content responses"
+ },
+ "206": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 206 Partial Content"
+ },
+ "301": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 301 Moved Permanently responses"
+ },
+ "302": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 302 Found responses"
+ },
+ "304": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 304 Not Modified responses"
+ },
+ "400": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 400 Bad Request responses"
+ },
+ "401": {
+ "value": 20,
+ "type": "counter",
+ "desc": "number of HTTP 401 Unauthorized responses"
+ },
+ "403": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 403 Forbidden responses"
+ },
+ "404": {
+ "value": 225,
+ "type": "counter",
+ "desc": "number of HTTP 404 Not Found responses"
+ },
+ "405": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 405 Method Not Allowed responses"
+ },
+ "406": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 406 Not Acceptable responses"
+ },
+ "409": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 409 Conflict responses"
+ },
+ "412": {
+ "value": 3,
+ "type": "counter",
+ "desc": "number of HTTP 412 Precondition Failed responses"
+ },
+ "413": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 413 Request Entity Too Long responses"
+ },
+ "414": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 414 Request URI Too Long responses"
+ },
+ "415": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 415 Unsupported Media Type responses"
+ },
+ "416": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 416 Requested Range Not Satisfiable responses"
+ },
+ "417": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 417 Expectation Failed responses"
+ },
+ "500": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 500 Internal Server Error responses"
+ },
+ "501": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 501 Not Implemented responses"
+ },
+ "503": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP 503 Service unavailable responses"
+ }
+ },
+ "open_databases": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of open databases"
+ },
+ "open_os_files": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of file descriptors CouchDB has open"
+ },
+ "request_time": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of a request inside CouchDB without MochiWeb"
+ },
+ "couch_server": {
+ "lru_skip": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of couch_server LRU operations skipped"
+ }
+ },
+ "query_server": {
+ "vdu_rejects": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of rejections by validate_doc_update function"
+ },
+ "vdu_process_time": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "duration of validate_doc_update function calls"
+ }
+ },
+ "mrview": {
+ "map_doc": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of documents mapped in the view server"
+ },
+ "emits": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of invocations of `emit' in map functions in the view server"
+ }
+ }
+ },
+ "mem3": {
+ "shard_cache": {
+ "eviction": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of shard cache evictions"
+ },
+ "hit": {
+ "value": 185,
+ "type": "counter",
+ "desc": "number of shard cache hits"
+ },
+ "miss": {
+ "value": 252470,
+ "type": "counter",
+ "desc": "number of shard cache misses"
+ }
+ }
+ },
+ "ddoc_cache": {
+ "hit": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of design doc cache hits"
+ },
+ "miss": {
+ "value": 3,
+ "type": "counter",
+ "desc": "number of design doc cache misses"
+ },
+ "recovery": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of design doc cache recoveries"
+ }
+ },
+ "couch_log": {
+ "level": {
+ "alert": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of logged alert messages"
+ },
+ "critical": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of logged critical messages"
+ },
+ "debug": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of logged debug messages"
+ },
+ "emergency": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of logged emergency messages"
+ },
+ "error": {
+ "value": 2,
+ "type": "counter",
+ "desc": "number of logged error messages"
+ },
+ "info": {
+ "value": 8,
+ "type": "counter",
+ "desc": "number of logged info messages"
+ },
+ "notice": {
+ "value": 126250,
+ "type": "counter",
+ "desc": "number of logged notice messages"
+ },
+ "warning": {
+ "value": 8,
+ "type": "counter",
+ "desc": "number of logged warning messages"
+ }
+ }
+ },
+ "dreyfus": {
+ "httpd": {
+ "search": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "Distribution of overall search request latency as experienced by the end user"
+ }
+ },
+ "rpc": {
+ "search": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of a search RPC worker"
+ },
+ "group1": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of a group1 RPC worker"
+ },
+ "group2": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of a group2 RPC worker"
+ },
+ "info": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of an info RPC worker"
+ }
+ },
+ "index": {
+ "await": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of an dreyfus_index await request"
+ },
+ "search": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of an dreyfus_index search request"
+ },
+ "group1": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of an dreyfus_index group1 request"
+ },
+ "group2": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of an dreyfus_index group2 request"
+ },
+ "info": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of an dreyfus_index info request"
+ }
+ }
+ },
+ "fabric": {
+ "worker": {
+ "timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of worker timeouts"
+ }
+ },
+ "open_shard": {
+ "timeouts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of open shard timeouts"
+ }
+ },
+ "read_repairs": {
+ "success": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of successful read repair operations"
+ },
+ "failure": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed read repair operations"
+ }
+ },
+ "doc_update": {
+ "errors": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of document update errors"
+ },
+ "mismatched_errors": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of document update errors with multiple error types"
+ },
+ "write_quorum_errors": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of write quorum errors"
+ }
+ }
+ },
+ "rexi": {
+ "buffered": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of rexi messages buffered"
+ },
+ "down": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of rexi_DOWN messages handled"
+ },
+ "dropped": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of rexi messages dropped from buffers"
+ },
+ "streams": {
+ "timeout": {
+ "init_stream": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of rexi stream initialization timeouts"
+ },
+ "stream": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of rexi stream timeouts"
+ },
+ "wait_for_ack": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of rexi stream timeouts while waiting for acks"
+ }
+ }
+ }
+ },
+ "couch_replicator": {
+ "changes_read_failures": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed replicator changes read failures"
+ },
+ "changes_reader_deaths": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed replicator changes readers"
+ },
+ "changes_manager_deaths": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed replicator changes managers"
+ },
+ "changes_queue_deaths": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed replicator changes work queues"
+ },
+ "checkpoints": {
+ "success": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of checkpoints successfully saves"
+ },
+ "failure": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed checkpoint saves"
+ }
+ },
+ "failed_starts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of replications that have failed to start"
+ },
+ "requests": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of HTTP requests made by the replicator"
+ },
+ "responses": {
+ "failure": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed HTTP responses received by the replicator"
+ },
+ "success": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of successful HTTP responses received by the replicator"
+ }
+ },
+ "stream_responses": {
+ "failure": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed streaming HTTP responses received by the replicator"
+ },
+ "success": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of successful streaming HTTP responses received by the replicator"
+ }
+ },
+ "worker_deaths": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of failed replicator workers"
+ },
+ "workers_started": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of replicator workers started"
+ },
+ "cluster_is_stable": {
+ "value": 1,
+ "type": "gauge",
+ "desc": "1 if cluster is stable, 0 if unstable"
+ },
+ "db_scans": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of times replicator db scans have been started"
+ },
+ "docs": {
+ "dbs_created": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of db shard creations seen by replicator doc processor"
+ },
+ "dbs_deleted": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of db shard deletions seen by replicator doc processor"
+ },
+ "dbs_found": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of db shard found by replicator doc processor"
+ },
+ "db_changes": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of db changes processed by replicator doc processor"
+ },
+ "failed_state_updates": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of 'failed' state document updates"
+ },
+ "completed_state_updates": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of 'completed' state document updates"
+ }
+ },
+ "jobs": {
+ "adds": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of jobs added to replicator scheduler"
+ },
+ "duplicate_adds": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of duplicate jobs added to replicator scheduler"
+ },
+ "removes": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of jobs removed from replicator scheduler"
+ },
+ "starts": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of jobs started by replicator scheduler"
+ },
+ "stops": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of jobs stopped by replicator scheduler"
+ },
+ "crashes": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of job crashed noticed by replicator scheduler"
+ },
+ "running": {
+ "value": 1,
+ "type": "gauge",
+ "desc": "replicator scheduler running jobs"
+ },
+ "pending": {
+ "value": 1,
+ "type": "gauge",
+ "desc": "replicator scheduler pending jobs"
+ },
+ "crashed": {
+ "value": 1,
+ "type": "gauge",
+ "desc": "replicator scheduler crashed jobs"
+ },
+ "total": {
+ "value": 1,
+ "type": "gauge",
+ "desc": "total number of replicator scheduler jobs"
+ }
+ },
+ "connection": {
+ "acquires": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of times connections are shared"
+ },
+ "creates": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of connections created"
+ },
+ "releases": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of times ownership of a connection is released"
+ },
+ "owner_crashes": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of times a connection owner crashes while owning at least one connection"
+ },
+ "worker_crashes": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of times a worker unexpectedly terminates"
+ },
+ "closes": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of times a worker is gracefully shut down"
+ }
+ }
+ },
+ "pread": {
+ "exceed_eof": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of the attempts to read beyond end of db file"
+ },
+ "exceed_limit": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of the attempts to read beyond set limit"
+ }
+ },
+ "mango": {
+ "unindexed_queries": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of mango queries that could not use an index"
+ },
+ "query_invalid_index": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of mango queries that generated an invalid index warning"
+ },
+ "too_many_docs_scanned": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of mango queries that generated an index scan warning"
+ },
+ "docs_examined": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of documents examined by mango queries coordinated by this node"
+ },
+ "quorum_docs_examined": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of documents examined by mango queries, using cluster quorum"
+ },
+ "results_returned": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of rows returned by mango queries"
+ },
+ "query_time": {
+ "value": {
+ "min": 0.0,
+ "max": 0.0,
+ "arithmetic_mean": 0.0,
+ "geometric_mean": 0.0,
+ "harmonic_mean": 0.0,
+ "median": 0.0,
+ "variance": 0.0,
+ "standard_deviation": 0.0,
+ "skewness": 0.0,
+ "kurtosis": 0.0,
+ "percentile": [
+ [
+ 50,
+ 0.0
+ ],
+ [
+ 75,
+ 0.0
+ ],
+ [
+ 90,
+ 0.0
+ ],
+ [
+ 95,
+ 0.0
+ ],
+ [
+ 99,
+ 0.0
+ ],
+ [
+ 999,
+ 0.0
+ ]
+ ],
+ "histogram": [
+ [
+ 0,
+ 0
+ ]
+ ],
+ "n": 0
+ },
+ "type": "histogram",
+ "desc": "length of time processing a mango query"
+ },
+ "evaluate_selector": {
+ "value": 1,
+ "type": "counter",
+ "desc": "number of mango selector evaluations"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/node_system.json b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/node_system.json
new file mode 100644
index 000000000..7084645a4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/node_system.json
@@ -0,0 +1,176 @@
+{
+ "uptime": 253571,
+ "memory": {
+ "other": 20427855,
+ "atom": 504433,
+ "atom_used": 488328,
+ "processes": 9161448,
+ "processes_used": 9160864,
+ "binary": 297696,
+ "code": 11252688,
+ "ets": 1579120
+ },
+ "run_queue": 1,
+ "ets_table_count": 116,
+ "context_switches": 22614499,
+ "reductions": 43211228312,
+ "garbage_collection_count": 11416345,
+ "words_reclaimed": 20241272866,
+ "io_input": 49674812,
+ "io_output": 686400800,
+ "os_proc_count": 1,
+ "stale_proc_count": 1,
+ "process_count": 296,
+ "process_limit": 262144,
+ "message_queues": {
+ "couch_file": {
+ "count": 2,
+ "min": 1,
+ "max": 1,
+ "50": 1,
+ "90": 1,
+ "99": 1
+ },
+ "couch_db_updater": {
+ "count": 2,
+ "min": 1,
+ "max": 1,
+ "50": 1,
+ "90": 1,
+ "99": 1
+ },
+ "httpc_manager": 1,
+ "httpc_handler_sup": 1,
+ "ken_sup": 1,
+ "ken_server": 1,
+ "couch_replication": 1,
+ "standard_error_sup": 1,
+ "chttpd_auth_cache_lru": 1,
+ "couch_index_sup": 1,
+ "ioq_sup": 1,
+ "couch_index_server": 1,
+ "mem3_events": 1,
+ "jwtf_sup": 1,
+ "jwtf_keystore": 1,
+ "ioq": 1,
+ "couch_uuids": 1,
+ "ftp_sup": 1,
+ "ibrowse_sup": 1,
+ "couch_secondary_services": 1,
+ "couch_primary_services": 1,
+ "couch_task_status": 1,
+ "couch_sup": 1,
+ "global_changes_sup": 1,
+ "global_changes_server": 1,
+ "couch_server": 1,
+ "couch_epi_functions_gen_couch_index": 1,
+ "couch_plugin": 1,
+ "ibrowse": 1,
+ "config_event": 1,
+ "couch_epi_functions_gen_chttpd_auth": 1,
+ "chttpd_sup": 1,
+ "couch_epi_functions_gen_couch_db": 1,
+ "couch_epi_data_gen_flags_config": 1,
+ "couch_epi_functions_gen_global_changes": 1,
+ "couch_proc_manager": 1,
+ "release_handler": 1,
+ "sasl_sup": 1,
+ "couch_epi_functions_gen_chttpd_handlers": 1,
+ "couch_epi_functions_gen_feature_flags": 1,
+ "couch_epi_functions_gen_chttpd": 1,
+ "dreyfus_sup": 1,
+ "sasl_safe_sup": 1,
+ "couch_event_sup2": 1,
+ "alarm_handler": 1,
+ "couch_event_server": 1,
+ "dreyfus_index_manager": 1,
+ "timer_server": 1,
+ "runtime_tools_sup": 1,
+ "couch_httpd_vhost": 1,
+ "chttpd_auth_cache": 1,
+ "couch_stats_sup": 1,
+ "couch_stats_process_tracker": 1,
+ "chttpd": 1,
+ "kernel_safe_sup": 1,
+ "tftp_sup": 1,
+ "couch_stats_aggregator": 1,
+ "rex": 1,
+ "folsom_sup": 1,
+ "inet_gethost_native_sup": 1,
+ "kernel_sup": 1,
+ "ddoc_cache_sup": 1,
+ "global_name_server": 1,
+ "ddoc_cache_opener": 1,
+ "folsom_sample_slide_sup": 1,
+ "ddoc_cache_lru": 1,
+ "file_server_2": 1,
+ "standard_error": 1,
+ "rexi_buffer_nonode@nohost": 1,
+ "rexi_server_nonode@nohost": 1,
+ "couch_drv": 1,
+ "couch_peruser_sup": 1,
+ "tls_connection_sup": 1,
+ "couch_peruser": 1,
+ "folsom_metrics_histogram_ets": 1,
+ "couch_replicator_sup": 1,
+ "ssl_sup": 1,
+ "couch_replicator_scheduler_sup": 1,
+ "smoosh_sup": 1,
+ "folsom_meter_timer_server": 1,
+ "smoosh_server": 1,
+ "couch_replicator_scheduler": 1,
+ "couch_epi_data_gen_dreyfus_black_list": 1,
+ "mem3_sync_nodes": 1,
+ "couch_replicator_rate_limiter": 1,
+ "inet_gethost_native": 1,
+ "inets_sup": 1,
+ "setup_sup": 1,
+ "inet_db": 1,
+ "ssl_pem_cache": 1,
+ "mem3_sync": 1,
+ "ssl_manager": 1,
+ "mem3_sup": 1,
+ "ssl_listen_tracker_sup": 1,
+ "mem3_shards": 1,
+ "mem3_seeds": 1,
+ "httpd_sup": 1,
+ "couch_log_sup": 1,
+ "mem3_reshard_sup": 1,
+ "mango_sup": 1,
+ "couch_log_server": 1,
+ "mem3_reshard_job_sup": 1,
+ "erts_code_purger": 1,
+ "global_group": 1,
+ "error_logger": 1,
+ "couch_replicator_doc_processor": 1,
+ "ssl_connection_sup": 1,
+ "init": 1,
+ "mem3_reshard_dbdoc": 1,
+ "couch_replicator_connection": 1,
+ "erl_signal_server": 1,
+ "couch_replicator_clustering": 1,
+ "config": 1,
+ "mem3_reshard": 1,
+ "user": 1,
+ "couch_epi_sup": 1,
+ "mem3_nodes": 1,
+ "ssl_admin_sup": 1,
+ "mochiweb_clock": 1,
+ "rexi_buffer_mon": 1,
+ "dtls_udp_sup": 1,
+ "rexi_buffer_sup": 1,
+ "erl_prim_loader": 1,
+ "code_server": 1,
+ "httpc_sup": 1,
+ "rexi_sup": 1,
+ "dtls_connection_sup": 1,
+ "rexi_server_sup": 1,
+ "rexi_server_mon": 1,
+ "application_controller": 1,
+ "httpc_profile_sup": 1,
+ "config_sup": 1,
+ "rexi_server": 1
+ },
+ "internal_replication_jobs": 1,
+ "distribution": {}
+}
diff --git a/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/root.json b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/root.json
new file mode 100644
index 000000000..e7feb41c7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/couchdb/testdata/v3.1.1/root.json
@@ -0,0 +1,16 @@
+{
+ "couchdb": "Welcome",
+ "version": "3.1.1",
+ "git_sha": "ce596c65d",
+ "uuid": "d7bc2230b8e4de7f20680091bd7a21c7",
+ "features": [
+ "access-ready",
+ "partitioned",
+ "pluggable-storage-engines",
+ "reshard",
+ "scheduler"
+ ],
+ "vendor": {
+ "name": "The Apache Software Foundation"
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dmcache/README.md b/src/go/plugin/go.d/modules/dmcache/README.md
new file mode 120000
index 000000000..9609ec869
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/README.md
@@ -0,0 +1 @@
+integrations/dmcache_devices.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dmcache/charts.go b/src/go/plugin/go.d/modules/dmcache/charts.go
new file mode 100644
index 000000000..c77f3d878
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/charts.go
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dmcache
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioDeviceCacheSpaceUsage = module.Priority + iota
+ prioDeviceMetaSpaceUsage
+ prioDeviceReadEfficiency
+ prioDeviceWriteEfficiency
+ prioDeviceActivity
+ prioDeviceDirty
+)
+
+var deviceChartsTmpl = module.Charts{
+ chartDeviceCacheSpaceUsageTmpl.Copy(),
+ chartDeviceMetadataSpaceUsageTmpl.Copy(),
+
+ chartDeviceReadEfficiencyTmpl.Copy(),
+ chartDeviceWriteEfficiencyTmpl.Copy(),
+
+ chartDeviceActivityTmpl.Copy(),
+
+ chartDeviceDirtySizeTmpl.Copy(),
+}
+
+var (
+ chartDeviceCacheSpaceUsageTmpl = module.Chart{
+ ID: "dmcache_device_%s_cache_space_usage",
+ Title: "DMCache space usage",
+ Units: "bytes",
+ Fam: "space usage",
+ Ctx: "dmcache.device_cache_space_usage",
+ Type: module.Stacked,
+ Priority: prioDeviceCacheSpaceUsage,
+ Dims: module.Dims{
+ {ID: "dmcache_device_%s_cache_free_bytes", Name: "free"},
+ {ID: "dmcache_device_%s_cache_used_bytes", Name: "used"},
+ },
+ }
+ chartDeviceMetadataSpaceUsageTmpl = module.Chart{
+ ID: "dmcache_device_%s_metadata_space_usage",
+ Title: "DMCache metadata space usage",
+ Units: "bytes",
+ Fam: "space usage",
+ Ctx: "dmcache.device_metadata_space_usage",
+ Type: module.Stacked,
+ Priority: prioDeviceMetaSpaceUsage,
+ Dims: module.Dims{
+ {ID: "dmcache_device_%s_metadata_free_bytes", Name: "free"},
+ {ID: "dmcache_device_%s_metadata_used_bytes", Name: "used"},
+ },
+ }
+)
+
+var (
+ chartDeviceReadEfficiencyTmpl = module.Chart{
+ ID: "dmcache_device_%s_read_efficiency",
+ Title: "DMCache read efficiency",
+ Units: "requests/s",
+ Fam: "efficiency",
+ Ctx: "dmcache.device_cache_read_efficiency",
+ Type: module.Stacked,
+ Priority: prioDeviceReadEfficiency,
+ Dims: module.Dims{
+ {ID: "dmcache_device_%s_read_hits", Name: "hits", Algo: module.Incremental},
+ {ID: "dmcache_device_%s_read_misses", Name: "misses", Algo: module.Incremental},
+ },
+ }
+ chartDeviceWriteEfficiencyTmpl = module.Chart{
+ ID: "dmcache_device_%s_write_efficiency",
+ Title: "DMCache write efficiency",
+ Units: "requests/s",
+ Fam: "efficiency",
+ Ctx: "dmcache.device_cache_write_efficiency",
+ Type: module.Stacked,
+ Priority: prioDeviceWriteEfficiency,
+ Dims: module.Dims{
+ {ID: "dmcache_device_%s_write_hits", Name: "hits", Algo: module.Incremental},
+ {ID: "dmcache_device_%s_write_misses", Name: "misses", Algo: module.Incremental},
+ },
+ }
+)
+
+var chartDeviceActivityTmpl = module.Chart{
+ ID: "dmcache_device_%s_activity",
+ Title: "DMCache activity",
+ Units: "bytes/s",
+ Fam: "activity",
+ Ctx: "dmcache.device_cache_activity",
+ Type: module.Area,
+ Priority: prioDeviceActivity,
+ Dims: module.Dims{
+ {ID: "dmcache_device_%s_promotions_bytes", Name: "promotions", Algo: module.Incremental},
+ {ID: "dmcache_device_%s_demotions_bytes", Name: "demotions", Mul: -1, Algo: module.Incremental},
+ },
+}
+
+var chartDeviceDirtySizeTmpl = module.Chart{
+ ID: "dmcache_device_%s_dirty_size",
+ Title: "DMCache dirty data size",
+ Units: "bytes",
+ Fam: "dirty size",
+ Ctx: "dmcache.device_cache_dirty_size",
+ Type: module.Area,
+ Priority: prioDeviceDirty,
+ Dims: module.Dims{
+ {ID: "dmcache_device_%s_dirty_bytes", Name: "dirty"},
+ },
+}
+
+func (c *DmCache) addDeviceCharts(device string) {
+ charts := deviceChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanDeviceName(device))
+ chart.Labels = []module.Label{
+ {Key: "device", Value: device},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, device)
+ }
+ }
+
+ if err := c.Charts().Add(*charts...); err != nil {
+ c.Warning(err)
+ }
+}
+
+func (c *DmCache) removeDeviceCharts(device string) {
+ px := fmt.Sprintf("dmcache_device_%s_", cleanDeviceName(device))
+
+ for _, chart := range *c.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func cleanDeviceName(device string) string {
+ return strings.ReplaceAll(device, ".", "_")
+}
diff --git a/src/go/plugin/go.d/modules/dmcache/collect.go b/src/go/plugin/go.d/modules/dmcache/collect.go
new file mode 100644
index 000000000..eae961b73
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/collect.go
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dmcache
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type dmCacheDevice struct {
+ name string
+ metaBlockSizeSectors int64
+ metaUsedBlocks int64
+ metaTotalBlocks int64
+ cacheBlockSizeSectors int64
+ cacheUsedBlocks int64
+ cacheTotalBlocks int64
+ readHits int64
+ readMisses int64
+ writeHits int64
+ writeMisses int64
+ demotionsBlocks int64
+ promotionsBlocks int64
+ dirtyBlocks int64
+}
+
+func (c *DmCache) collect() (map[string]int64, error) {
+ bs, err := c.exec.cacheStatus()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ if err := c.collectCacheStatus(mx, bs); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (c *DmCache) collectCacheStatus(mx map[string]int64, data []byte) error {
+ var devices []*dmCacheDevice
+
+ sc := bufio.NewScanner(bytes.NewReader(data))
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+ if line == "" {
+ continue
+ }
+
+ dev, err := parseDmsetupStatusLine(line)
+ if err != nil {
+ return fmt.Errorf("malformed dmsetup status line: %v ('%s')", err, line)
+ }
+
+ devices = append(devices, dev)
+ }
+
+ seen := make(map[string]bool)
+
+ for _, dev := range devices {
+ seen[dev.name] = true
+
+ if !c.devices[dev.name] {
+ c.devices[dev.name] = true
+ c.addDeviceCharts(dev.name)
+ }
+
+ px := fmt.Sprintf("dmcache_device_%s_", dev.name)
+
+ const sectorSize = 512
+ metaMul := dev.metaBlockSizeSectors * sectorSize
+ cacheMul := dev.cacheBlockSizeSectors * sectorSize
+
+ mx[px+"metadata_free_bytes"] = (dev.metaTotalBlocks - dev.metaUsedBlocks) * metaMul
+ mx[px+"metadata_used_bytes"] = dev.metaUsedBlocks * metaMul
+ mx[px+"cache_free_bytes"] = (dev.cacheTotalBlocks - dev.cacheUsedBlocks) * cacheMul
+ mx[px+"cache_used_bytes"] = dev.cacheUsedBlocks * cacheMul
+ mx[px+"read_hits"] = dev.readHits
+ mx[px+"read_misses"] = dev.readMisses
+ mx[px+"write_hits"] = dev.writeHits
+ mx[px+"write_misses"] = dev.writeMisses
+ mx[px+"demotions_bytes"] = dev.demotionsBlocks * cacheMul
+ mx[px+"promotions_bytes"] = dev.promotionsBlocks * cacheMul
+ mx[px+"dirty_bytes"] = dev.dirtyBlocks * cacheMul
+ }
+
+ for dev := range c.devices {
+ if !seen[dev] {
+ delete(c.devices, dev)
+ c.removeDeviceCharts(dev)
+ }
+ }
+
+ if len(devices) == 0 {
+ return errors.New("no dm-cache devices found")
+ }
+
+ return nil
+}
+
+func parseDmsetupStatusLine(line string) (*dmCacheDevice, error) {
+ // https://www.kernel.org/doc/html/next/admin-guide/device-mapper/cache.html#status
+
+ parts := strings.Fields(line)
+ if len(parts) < 15 {
+ return nil, fmt.Errorf("want at least 15 fields, got %d", len(parts))
+ }
+
+ var dev dmCacheDevice
+ var err error
+
+ for i, s := range parts {
+ switch i {
+ case 0:
+ dev.name = strings.TrimSuffix(parts[0], ":")
+ case 4:
+ dev.metaBlockSizeSectors, err = parseInt(s)
+ case 5:
+ dev.metaUsedBlocks, dev.metaTotalBlocks, err = parseUsedTotalBlocks(s)
+ case 6:
+ dev.cacheBlockSizeSectors, err = parseInt(s)
+ case 7:
+ dev.cacheUsedBlocks, dev.cacheTotalBlocks, err = parseUsedTotalBlocks(s)
+ case 8:
+ dev.readHits, err = parseInt(s)
+ case 9:
+ dev.readMisses, err = parseInt(s)
+ case 10:
+ dev.writeHits, err = parseInt(s)
+ case 11:
+ dev.writeMisses, err = parseInt(s)
+ case 12:
+ dev.demotionsBlocks, err = parseInt(s)
+ case 13:
+ dev.promotionsBlocks, err = parseInt(s)
+ case 14:
+ dev.dirtyBlocks, err = parseInt(s)
+ }
+
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse %d field '%s': %v", i, s, err)
+ }
+ }
+
+ return &dev, nil
+}
+
+func parseUsedTotalBlocks(info string) (int64, int64, error) {
+ parts := strings.Split(info, "/")
+ if len(parts) != 2 {
+ return 0, 0, errors.New("expected used/total")
+ }
+ used, err := parseInt(parts[0])
+ if err != nil {
+ return 0, 0, err
+ }
+ total, err := parseInt(parts[1])
+ if err != nil {
+ return 0, 0, err
+ }
+ return used, total, nil
+}
+
+func parseInt(s string) (int64, error) {
+ return strconv.ParseInt(s, 10, 64)
+}
diff --git a/src/go/plugin/go.d/modules/dmcache/config_schema.json b/src/go/plugin/go.d/modules/dmcache/config_schema.json
new file mode 100644
index 000000000..4428b4d1b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "DMCache collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dmcache/dmcache.go b/src/go/plugin/go.d/modules/dmcache/dmcache.go
new file mode 100644
index 000000000..9f3844b15
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/dmcache.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dmcache
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("dmcache", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *DmCache {
+ return &DmCache{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ devices: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ DmCache struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec dmsetupCLI
+
+ devices map[string]bool
+ }
+ dmsetupCLI interface {
+ cacheStatus() ([]byte, error)
+ }
+)
+
+func (c *DmCache) Configuration() any {
+ return c.Config
+}
+
+func (c *DmCache) Init() error {
+ dmsetup, err := c.initDmsetupCLI()
+ if err != nil {
+ c.Errorf("dmsetup exec initialization: %v", err)
+ return err
+ }
+ c.exec = dmsetup
+
+ return nil
+}
+
+func (c *DmCache) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (c *DmCache) Charts() *module.Charts {
+ return c.charts
+}
+
+func (c *DmCache) Collect() map[string]int64 {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (c *DmCache) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/dmcache/dmcache_test.go b/src/go/plugin/go.d/modules/dmcache/dmcache_test.go
new file mode 100644
index 000000000..218ae044c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/dmcache_test.go
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dmcache
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestDmCace_Configuration(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DmCache{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDmCache_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if failed to locate ndsudo": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ lvm := New()
+ lvm.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, lvm.Init())
+ } else {
+ assert.NoError(t, lvm.Init())
+ }
+ })
+ }
+}
+
+func TestDmCache_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *DmCache
+ }{
+ "not initialized exec": {
+ prepare: func() *DmCache {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *DmCache {
+ lvm := New()
+ lvm.exec = prepareMockOK()
+ _ = lvm.Check()
+ return lvm
+ },
+ },
+ "after collect": {
+ prepare: func() *DmCache {
+ lvm := New()
+ lvm.exec = prepareMockOK()
+ _ = lvm.Collect()
+ return lvm
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ lvm := test.prepare()
+
+ assert.NotPanics(t, lvm.Cleanup)
+ })
+ }
+}
+
+func TestDmCache_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestDmCache_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockDmsetupExec
+ wantFail bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantFail: false,
+ },
+ "error on cache status": {
+ prepareMock: prepareMockErr,
+ wantFail: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantFail: true,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dmcache := New()
+ mock := test.prepareMock()
+ dmcache.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, dmcache.Check())
+ } else {
+ assert.NoError(t, dmcache.Check())
+ }
+ })
+ }
+}
+
+func TestLVM_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockDmsetupExec
+ wantCharts int
+ wantMetrics map[string]int64
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantCharts: len(deviceChartsTmpl) * 2,
+ wantMetrics: map[string]int64{
+ "dmcache_device_vg_raid1_md21-media_cache_free_bytes": 1252402397184,
+ "dmcache_device_vg_raid1_md21-media_cache_used_bytes": 396412059648,
+ "dmcache_device_vg_raid1_md21-media_demotions_bytes": 0,
+ "dmcache_device_vg_raid1_md21-media_dirty_bytes": 0,
+ "dmcache_device_vg_raid1_md21-media_metadata_free_bytes": 32243712,
+ "dmcache_device_vg_raid1_md21-media_metadata_used_bytes": 9699328,
+ "dmcache_device_vg_raid1_md21-media_promotions_bytes": 48035266560,
+ "dmcache_device_vg_raid1_md21-media_read_hits": 82870357,
+ "dmcache_device_vg_raid1_md21-media_read_misses": 5499462,
+ "dmcache_device_vg_raid1_md21-media_write_hits": 26280342,
+ "dmcache_device_vg_raid1_md21-media_write_misses": 8017854,
+ "dmcache_device_vg_raid2_md22-media_cache_free_bytes": 1252402397184,
+ "dmcache_device_vg_raid2_md22-media_cache_used_bytes": 396412059648,
+ "dmcache_device_vg_raid2_md22-media_demotions_bytes": 0,
+ "dmcache_device_vg_raid2_md22-media_dirty_bytes": 0,
+ "dmcache_device_vg_raid2_md22-media_metadata_free_bytes": 32243712,
+ "dmcache_device_vg_raid2_md22-media_metadata_used_bytes": 9699328,
+ "dmcache_device_vg_raid2_md22-media_promotions_bytes": 48035266560,
+ "dmcache_device_vg_raid2_md22-media_read_hits": 82870357,
+ "dmcache_device_vg_raid2_md22-media_read_misses": 5499462,
+ "dmcache_device_vg_raid2_md22-media_write_hits": 26280342,
+ "dmcache_device_vg_raid2_md22-media_write_misses": 8017854,
+ },
+ },
+ "error on cache status": {
+ prepareMock: prepareMockErr,
+ wantMetrics: nil,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dmcache := New()
+ mock := test.prepareMock()
+ dmcache.exec = mock
+
+ mx := dmcache.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ assert.Len(t, *dmcache.Charts(), test.wantCharts)
+ testMetricsHasAllChartsDims(t, dmcache, mx)
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, dmcache *DmCache, mx map[string]int64) {
+ for _, chart := range *dmcache.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ }
+}
+
+func prepareMockOK() *mockDmsetupExec {
+ return &mockDmsetupExec{
+ cacheStatusData: []byte(`
+vg_raid1_md21-media: 0 2404139008 cache 8 2368/10240 4096 189024/786216 82870357 5499462 26280342 8017854 0 22905 0 3 metadata2 writethrough no_discard_passdown 2 migration_threshold 32768 mq 10 random_threshold 0 sequential_threshold 0 discard_promote_adjustment 0 read_promote_adjustment 0 write_promote_adjustment 0 rw -
+vg_raid2_md22-media: 0 2404139008 cache 8 2368/10240 4096 189024/786216 82870357 5499462 26280342 8017854 0 22905 0 3 metadata2 writethrough no_discard_passdown 2 migration_threshold 32768 mq 10 random_threshold 0 sequential_threshold 0 discard_promote_adjustment 0 read_promote_adjustment 0 write_promote_adjustment 0 rw -
+`),
+ }
+}
+
+func prepareMockErr() *mockDmsetupExec {
+ return &mockDmsetupExec{
+ errOnCacheStatus: true,
+ }
+}
+
+func prepareMockEmptyResponse() *mockDmsetupExec {
+ return &mockDmsetupExec{}
+}
+
+func prepareMockUnexpectedResponse() *mockDmsetupExec {
+ return &mockDmsetupExec{
+ cacheStatusData: []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`),
+ }
+}
+
+type mockDmsetupExec struct {
+ errOnCacheStatus bool
+ cacheStatusData []byte
+}
+
+func (m *mockDmsetupExec) cacheStatus() ([]byte, error) {
+ if m.errOnCacheStatus {
+ return nil, errors.New("mock.cacheStatus() error")
+ }
+
+ return m.cacheStatusData, nil
+}
diff --git a/src/go/plugin/go.d/modules/dmcache/exec.go b/src/go/plugin/go.d/modules/dmcache/exec.go
new file mode 100644
index 000000000..1cd11be31
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/exec.go
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dmcache
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newDmsetupExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *dmsetupExec {
+ return &dmsetupExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ }
+}
+
+type dmsetupExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (e *dmsetupExec) cacheStatus() ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.ndsudoPath, "dmsetup-status-cache")
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/dmcache/init.go b/src/go/plugin/go.d/modules/dmcache/init.go
new file mode 100644
index 000000000..229972da7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/init.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dmcache
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (c *DmCache) initDmsetupCLI() (dmsetupCLI, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+
+ }
+
+ dmsetup := newDmsetupExec(ndsudoPath, c.Timeout.Duration(), c.Logger)
+
+ return dmsetup, nil
+}
diff --git a/src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md b/src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md
new file mode 100644
index 000000000..ac61311b9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/integrations/dmcache_devices.md
@@ -0,0 +1,198 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dmcache/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dmcache/metadata.yaml"
+sidebar_label: "DMCache devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# DMCache devices
+
+
+<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: dmcache
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors DMCache, providing insights into capacity usage, efficiency, and activity. It relies on the [`dmsetup`](https://man7.org/linux/man-pages/man8/dmsetup.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per dmcache device
+
+These metrics refer to the DMCache device.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | Device name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| dmcache.device_cache_space_usage | free, used | bytes |
+| dmcache.device_metadata_space_usage | free, used | bytes |
+| dmcache.device_cache_read_efficiency | hits, misses | requests/s |
+| dmcache.device_cache_write_efficiency | hits, misses | requests/s |
+| dmcache.device_cache_activity | promotions, demotions | bytes/s |
+| dmcache.device_cache_dirty_size | dirty | bytes |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/dmcache.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/dmcache.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| timeout | dmsetup binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: dmcache
+ update_every: 5 # Collect DMCache statistics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `dmcache` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m dmcache
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `dmcache` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep dmcache
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep dmcache /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep dmcache
+```
+
+
diff --git a/src/go/plugin/go.d/modules/dmcache/metadata.yaml b/src/go/plugin/go.d/modules/dmcache/metadata.yaml
new file mode 100644
index 000000000..58d9e4621
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/metadata.yaml
@@ -0,0 +1,131 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-dmcache
+ plugin_name: go.d.plugin
+ module_name: dmcache
+ monitored_instance:
+ name: DMCache devices
+ link: ""
+ icon_filename: filesystem.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - dmcache
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector monitors DMCache, providing insights into capacity usage, efficiency, and activity.
+ It relies on the [`dmsetup`](https://man7.org/linux/man-pages/man8/dmsetup.8.html) CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/dmcache.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: dmsetup binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: dmcache
+ update_every: 5 # Collect DMCache statistics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: dmcache device
+ description: These metrics refer to the DMCache device.
+ labels:
+ - name: device
+ description: Device name
+ metrics:
+ - name: dmcache.device_cache_space_usage
+ description: DMCache space usage
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: dmcache.device_metadata_space_usage
+ description: DMCache metadata space usage
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: dmcache.device_cache_read_efficiency
+ description: DMCache read efficiency
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: dmcache.device_cache_write_efficiency
+ description: DMCache write efficiency
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: dmcache.device_cache_activity
+ description: DMCache activity
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: promotions
+ - name: demotions
+ - name: dmcache.device_cache_dirty_size
+ description: DMCache dirty data size
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: dirty
diff --git a/src/go/plugin/go.d/modules/dmcache/testdata/config.json b/src/go/plugin/go.d/modules/dmcache/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/dmcache/testdata/config.yaml b/src/go/plugin/go.d/modules/dmcache/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dmcache/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/dnsdist/README.md b/src/go/plugin/go.d/modules/dnsdist/README.md
new file mode 120000
index 000000000..c5fd71aa5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/README.md
@@ -0,0 +1 @@
+integrations/dnsdist.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsdist/charts.go b/src/go/plugin/go.d/modules/dnsdist/charts.go
new file mode 100644
index 000000000..24e1a8c89
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/charts.go
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsdist
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+var charts = module.Charts{
+ {
+ ID: "queries",
+ Title: "Client queries received",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "dnsdist.queries",
+ Dims: module.Dims{
+ {ID: "queries", Name: "all", Algo: module.Incremental},
+ {ID: "rdqueries", Name: "recursive", Algo: module.Incremental},
+ {ID: "empty-queries", Name: "empty", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "queries_dropped",
+ Title: "Client queries dropped",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "dnsdist.queries_dropped",
+ Dims: module.Dims{
+ {ID: "rule-drop", Name: "rule drop", Algo: module.Incremental},
+ {ID: "dyn-blocked", Name: "dynamic blocked", Algo: module.Incremental},
+ {ID: "no-policy", Name: "no policy", Algo: module.Incremental},
+ {ID: "noncompliant-queries", Name: "non queries", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "packets_dropped",
+ Title: "Packets dropped",
+ Units: "packets/s",
+ Fam: "packets",
+ Ctx: "dnsdist.packets_dropped",
+ Dims: module.Dims{
+ {ID: "acl-drops", Name: "acl", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "answers",
+ Title: "Answers statistics",
+ Units: "answers/s",
+ Fam: "answers",
+ Ctx: "dnsdist.answers",
+ Dims: module.Dims{
+ {ID: "self-answered", Name: "self answered", Algo: module.Incremental},
+ {ID: "rule-nxdomain", Name: "nxdomain", Algo: module.Incremental, Mul: -1},
+ {ID: "rule-refused", Name: "refused", Algo: module.Incremental, Mul: -1},
+ {ID: "trunc-failures", Name: "trunc failures", Algo: module.Incremental, Mul: -1},
+ },
+ },
+ {
+ ID: "backend_responses",
+ Title: "Backend responses",
+ Units: "responses/s",
+ Fam: "backends",
+ Ctx: "dnsdist.backend_responses",
+ Dims: module.Dims{
+ {ID: "responses", Name: "responses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "backend_commerrors",
+ Title: "Backend communication errors",
+ Units: "errors/s",
+ Fam: "backends",
+ Ctx: "dnsdist.backend_commerrors",
+ Dims: module.Dims{
+ {ID: "downstream-send-errors", Name: "send errors", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "backend_errors",
+ Title: "Backend error responses",
+ Units: "responses/s",
+ Fam: "backends",
+ Ctx: "dnsdist.backend_errors",
+ Dims: module.Dims{
+ {ID: "downstream-timeouts", Name: "timeouts", Algo: module.Incremental},
+ {ID: "servfail-responses", Name: "servfail", Algo: module.Incremental},
+ {ID: "noncompliant-responses", Name: "non compliant", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "cache",
+ Title: "Cache performance",
+ Units: "answers/s",
+ Fam: "cache",
+ Ctx: "dnsdist.cache",
+ Dims: module.Dims{
+ {ID: "cache-hits", Name: "hits", Algo: module.Incremental},
+ {ID: "cache-misses", Name: "misses", Algo: module.Incremental, Mul: -1},
+ },
+ },
+ {
+ ID: "servercpu",
+ Title: "DNSdist server CPU utilization",
+ Units: "ms/s",
+ Fam: "server",
+ Ctx: "dnsdist.servercpu",
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "cpu-sys-msec", Name: "system state", Algo: module.Incremental},
+ {ID: "cpu-user-msec", Name: "user state", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "servermem",
+ Title: "DNSdist server memory utilization",
+ Units: "MiB",
+ Fam: "server",
+ Ctx: "dnsdist.servermem",
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "real-memory-usage", Name: "memory usage", Div: 1 << 20},
+ },
+ },
+ {
+ ID: "query_latency",
+ Title: "Query latency",
+ Units: "queries/s",
+ Fam: "latency",
+ Ctx: "dnsdist.query_latency",
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "latency0-1", Name: "1ms", Algo: module.Incremental},
+ {ID: "latency1-10", Name: "10ms", Algo: module.Incremental},
+ {ID: "latency10-50", Name: "50ms", Algo: module.Incremental},
+ {ID: "latency50-100", Name: "100ms", Algo: module.Incremental},
+ {ID: "latency100-1000", Name: "1sec", Algo: module.Incremental},
+ {ID: "latency-slow", Name: "slow", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "query_latency_avg",
+ Title: "Average latency for the last N queries",
+ Units: "microseconds",
+ Fam: "latency",
+ Ctx: "dnsdist.query_latency_avg",
+ Dims: module.Dims{
+ {ID: "latency-avg100", Name: "100"},
+ {ID: "latency-avg1000", Name: "1k"},
+ {ID: "latency-avg10000", Name: "10k"},
+ {ID: "latency-avg1000000", Name: "1000k"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/dnsdist/collect.go b/src/go/plugin/go.d/modules/dnsdist/collect.go
new file mode 100644
index 000000000..9b860abf4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/collect.go
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsdist
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ urlPathJSONStat = "/jsonstat"
+)
+
+func (d *DNSdist) collect() (map[string]int64, error) {
+ statistics, err := d.scrapeStatistics()
+ if err != nil {
+ return nil, err
+ }
+
+ collected := make(map[string]int64)
+ d.collectStatistic(collected, statistics)
+
+ return collected, nil
+}
+
+func (d *DNSdist) collectStatistic(collected map[string]int64, statistics *statisticMetrics) {
+ for metric, value := range stm.ToMap(statistics) {
+ collected[metric] = value
+ }
+}
+
+func (d *DNSdist) scrapeStatistics() (*statisticMetrics, error) {
+ req, err := web.NewHTTPRequestWithPath(d.Request, urlPathJSONStat)
+ if err != nil {
+ return nil, err
+ }
+ req.URL.RawQuery = url.Values{"command": []string{"stats"}}.Encode()
+
+ var statistics statisticMetrics
+ if err := d.doOKDecode(req, &statistics); err != nil {
+ return nil, err
+ }
+
+ return &statistics, nil
+}
+
+func (d *DNSdist) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := d.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dnsdist/config_schema.json b/src/go/plugin/go.d/modules/dnsdist/config_schema.json
new file mode 100644
index 000000000..a71faaa04
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "DNSDist collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the DNSDist [built-in webserver](https://dnsdist.org/guides/webserver.html).",
+ "type": "string",
+ "default": "http://127.0.0.1:8083",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dnsdist/dnsdist.go b/src/go/plugin/go.d/modules/dnsdist/dnsdist.go
new file mode 100644
index 000000000..fd0d8a381
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/dnsdist.go
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsdist
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("dnsdist", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 1,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *DNSdist {
+ return &DNSdist{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8083",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type DNSdist struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (d *DNSdist) Configuration() any {
+ return d.Config
+}
+
+func (d *DNSdist) Init() error {
+ err := d.validateConfig()
+ if err != nil {
+ d.Errorf("config validation: %v", err)
+ return err
+ }
+
+ client, err := d.initHTTPClient()
+ if err != nil {
+ d.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ d.httpClient = client
+
+ cs, err := d.initCharts()
+ if err != nil {
+ d.Errorf("init charts: %v", err)
+ return err
+ }
+ d.charts = cs
+
+ return nil
+}
+
+func (d *DNSdist) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (d *DNSdist) Charts() *module.Charts {
+ return d.charts
+}
+
+func (d *DNSdist) Collect() map[string]int64 {
+ ms, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+
+ return ms
+}
+
+func (d *DNSdist) Cleanup() {
+ if d.httpClient == nil {
+ return
+ }
+ d.httpClient.CloseIdleConnections()
+}
diff --git a/src/go/plugin/go.d/modules/dnsdist/dnsdist_test.go b/src/go/plugin/go.d/modules/dnsdist/dnsdist_test.go
new file mode 100644
index 000000000..18212c79d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/dnsdist_test.go
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsdist
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer151JSONStat, _ = os.ReadFile("testdata/v1.5.1/jsonstat.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer151JSONStat": dataVer151JSONStat,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDNSdist_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DNSdist{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDNSdist_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset URL": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ "fails on invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:38001",
+ },
+ Client: web.Client{
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ns := New()
+ ns.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, ns.Init())
+ } else {
+ assert.NoError(t, ns.Init())
+ }
+ })
+ }
+}
+
+func TestDNSdist_Charts(t *testing.T) {
+ dist := New()
+ require.NoError(t, dist.Init())
+ assert.NotNil(t, dist.Charts())
+}
+
+func TestDNSdist_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestDNSdist_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (dist *DNSdist, cleanup func())
+ wantFail bool
+ }{
+ "success on valid response v1.5.1": {
+ prepare: preparePowerDNSdistV151,
+ wantFail: false,
+ },
+ "fails on 404 response": {
+ prepare: preparePowerDNSdist404,
+ wantFail: true,
+ },
+ "fails on connection refused": {
+ prepare: preparePowerDNSdistConnectionRefused,
+ wantFail: true,
+ },
+ "fails with invalid data": {
+ prepare: preparePowerDNSdistInvalidData,
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dist, cleanup := test.prepare()
+ defer cleanup()
+ require.NoError(t, dist.Init())
+
+ if test.wantFail {
+ assert.Error(t, dist.Check())
+ } else {
+ assert.NoError(t, dist.Check())
+ }
+ })
+ }
+}
+
+func TestDNSdist_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (dist *DNSdist, cleanup func())
+ wantCollected map[string]int64
+ }{
+ "success on valid response v1.5.1": {
+ prepare: preparePowerDNSdistV151,
+ wantCollected: map[string]int64{
+ "acl-drops": 1,
+ "cache-hits": 1,
+ "cache-misses": 1,
+ "cpu-sys-msec": 411,
+ "cpu-user-msec": 939,
+ "downstream-send-errors": 1,
+ "downstream-timeouts": 1,
+ "dyn-blocked": 1,
+ "empty-queries": 1,
+ "latency-avg100": 14237,
+ "latency-avg1000": 9728,
+ "latency-avg10000": 1514,
+ "latency-avg1000000": 15,
+ "latency-slow": 1,
+ "latency0-1": 1,
+ "latency1-10": 3,
+ "latency10-50": 996,
+ "latency100-1000": 4,
+ "latency50-100": 1,
+ "no-policy": 1,
+ "noncompliant-queries": 1,
+ "noncompliant-responses": 1,
+ "queries": 1003,
+ "rdqueries": 1003,
+ "real-memory-usage": 202125312,
+ "responses": 1003,
+ "rule-drop": 1,
+ "rule-nxdomain": 1,
+ "rule-refused": 1,
+ "self-answered": 1,
+ "servfail-responses": 1,
+ "trunc-failures": 1,
+ },
+ },
+ "fails on 404 response": {
+ prepare: preparePowerDNSdist404,
+ },
+ "fails on connection refused": {
+ prepare: preparePowerDNSdistConnectionRefused,
+ },
+ "fails with invalid data": {
+ prepare: preparePowerDNSdistInvalidData,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dist, cleanup := test.prepare()
+ defer cleanup()
+ require.NoError(t, dist.Init())
+
+ collected := dist.Collect()
+
+ assert.Equal(t, test.wantCollected, collected)
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, dist, collected)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dist *DNSdist, collected map[string]int64) {
+ for _, chart := range *dist.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
+
+func preparePowerDNSdistV151() (*DNSdist, func()) {
+ srv := preparePowerDNSDistEndpoint()
+ ns := New()
+ ns.URL = srv.URL
+
+ return ns, srv.Close
+}
+
+func preparePowerDNSdist404() (*DNSdist, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ ns := New()
+ ns.URL = srv.URL
+
+ return ns, srv.Close
+}
+
+func preparePowerDNSdistConnectionRefused() (*DNSdist, func()) {
+ ns := New()
+ ns.URL = "http://127.0.0.1:38001"
+
+ return ns, func() {}
+}
+
+func preparePowerDNSdistInvalidData() (*DNSdist, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ ns := New()
+ ns.URL = srv.URL
+
+ return ns, srv.Close
+}
+
+func preparePowerDNSDistEndpoint() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.String() {
+ case "/jsonstat?command=stats":
+ _, _ = w.Write(dataVer151JSONStat)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
diff --git a/src/go/plugin/go.d/modules/dnsdist/init.go b/src/go/plugin/go.d/modules/dnsdist/init.go
new file mode 100644
index 000000000..d331da928
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/init.go
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsdist
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (d *DNSdist) validateConfig() error {
+ if d.URL == "" {
+ return errors.New("URL not set")
+ }
+
+ if _, err := web.NewHTTPRequest(d.Request); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (d *DNSdist) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(d.Client)
+}
+
+func (d *DNSdist) initCharts() (*module.Charts, error) {
+ return charts.Copy(), nil
+}
diff --git a/src/go/plugin/go.d/modules/dnsdist/integrations/dnsdist.md b/src/go/plugin/go.d/modules/dnsdist/integrations/dnsdist.md
new file mode 100644
index 000000000..934245a57
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/integrations/dnsdist.md
@@ -0,0 +1,245 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dnsdist/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dnsdist/metadata.yaml"
+sidebar_label: "DNSdist"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# DNSdist
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: dnsdist
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors DNSDist servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per DNSdist instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| dnsdist.queries | all, recursive, empty | queries/s |
+| dnsdist.queries_dropped | rule_drop, dynamic_blocked, no_policy, non_queries | queries/s |
+| dnsdist.packets_dropped | acl | packets/s |
+| dnsdist.answers | self_answered, nxdomain, refused, trunc_failures | answers/s |
+| dnsdist.backend_responses | responses | responses/s |
+| dnsdist.backend_commerrors | send_errors | errors/s |
+| dnsdist.backend_errors | timeouts, servfail, non_compliant | responses/s |
+| dnsdist.cache | hits, misses | answers/s |
+| dnsdist.servercpu | system_state, user_state | ms/s |
+| dnsdist.servermem | memory_usage | MiB |
+| dnsdist.query_latency | 1ms, 10ms, 50ms, 100ms, 1sec, slow | queries/s |
+| dnsdist.query_latency_avg | 100, 1k, 10k, 1000k | microseconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable DNSdist built-in Webserver
+
+For collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/dnsdist.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/dnsdist.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8083 | yes |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| timeout | HTTP request timeout. | 1 | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client tls certificate. | | no |
+| tls_key | Client tls key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8083
+ headers:
+ X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8083
+ headers:
+ X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).
+
+ - name: remote
+ url: http://203.0.113.0:8083
+ headers:
+ X-API-Key: 'your-api-key'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `dnsdist` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m dnsdist
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `dnsdist` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep dnsdist
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep dnsdist /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep dnsdist
+```
+
+
diff --git a/src/go/plugin/go.d/modules/dnsdist/metadata.yaml b/src/go/plugin/go.d/modules/dnsdist/metadata.yaml
new file mode 100644
index 000000000..4e7a45d39
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/metadata.yaml
@@ -0,0 +1,259 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-dnsdist
+ plugin_name: go.d.plugin
+ module_name: dnsdist
+ monitored_instance:
+ name: DNSdist
+ link: https://dnsdist.org/
+ icon_filename: network-wired.svg
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - dnsdist
+ - dns
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors DNSDist servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable DNSdist built-in Webserver
+ description: |
+ For collecting metrics via HTTP, you need to [enable the built-in webserver](https://dnsdist.org/guides/webserver.html).
+ configuration:
+ file:
+ name: go.d/dnsdist.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8083
+ required: true
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client tls certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client tls key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8083
+ headers:
+ X-API-Key: your-api-key # static pre-shared authentication key for access to the REST API (api-key).
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8083
+ headers:
+ X-API-Key: 'your-api-key' # static pre-shared authentication key for access to the REST API (api-key).
+
+ - name: remote
+ url: http://203.0.113.0:8083
+ headers:
+ X-API-Key: 'your-api-key'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: dnsdist.queries
+ description: Client queries received
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: all
+ - name: recursive
+ - name: empty
+ - name: dnsdist.queries_dropped
+ description: Client queries dropped
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: rule_drop
+ - name: dynamic_blocked
+ - name: no_policy
+ - name: non_queries
+ - name: dnsdist.packets_dropped
+ description: Packets dropped
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: acl
+ - name: dnsdist.answers
+ description: Answers statistics
+ unit: answers/s
+ chart_type: line
+ dimensions:
+ - name: self_answered
+ - name: nxdomain
+ - name: refused
+ - name: trunc_failures
+ - name: dnsdist.backend_responses
+ description: Backend responses
+ unit: responses/s
+ chart_type: line
+ dimensions:
+ - name: responses
+ - name: dnsdist.backend_commerrors
+ description: Backend communication errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: send_errors
+ - name: dnsdist.backend_errors
+ description: Backend error responses
+ unit: responses/s
+ chart_type: line
+ dimensions:
+ - name: timeouts
+ - name: servfail
+ - name: non_compliant
+ - name: dnsdist.cache
+ description: Cache performance
+ unit: answers/s
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: dnsdist.servercpu
+ description: DNSdist server CPU utilization
+ unit: ms/s
+ chart_type: stacked
+ dimensions:
+ - name: system_state
+ - name: user_state
+ - name: dnsdist.servermem
+ description: DNSdist server memory utilization
+ unit: MiB
+ chart_type: area
+ dimensions:
+ - name: memory_usage
+ - name: dnsdist.query_latency
+ description: Query latency
+ unit: queries/s
+ chart_type: stacked
+ dimensions:
+ - name: 1ms
+ - name: 10ms
+ - name: 50ms
+ - name: 100ms
+ - name: 1sec
+ - name: slow
+ - name: dnsdist.query_latency_avg
+ description: Average latency for the last N queries
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: "100"
+ - name: 1k
+ - name: 10k
+ - name: 1000k
diff --git a/src/go/plugin/go.d/modules/dnsdist/metrics.go b/src/go/plugin/go.d/modules/dnsdist/metrics.go
new file mode 100644
index 000000000..1de04319d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/metrics.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsdist
+
+// https://dnsdist.org/guides/webserver.html#get--jsonstat
+// https://dnsdist.org/statistics.html
+
+type statisticMetrics struct {
+ AclDrops float64 `stm:"acl-drops" json:"acl-drops"`
+ CacheHits float64 `stm:"cache-hits" json:"cache-hits"`
+ CacheMisses float64 `stm:"cache-misses" json:"cache-misses"`
+ CPUSysMsec float64 `stm:"cpu-sys-msec" json:"cpu-sys-msec"`
+ CPUUserMsec float64 `stm:"cpu-user-msec" json:"cpu-user-msec"`
+ DownStreamSendErrors float64 `stm:"downstream-send-errors" json:"downstream-send-errors"`
+ DownStreamTimeout float64 `stm:"downstream-timeouts" json:"downstream-timeouts"`
+ DynBlocked float64 `stm:"dyn-blocked" json:"dyn-blocked"`
+ EmptyQueries float64 `stm:"empty-queries" json:"empty-queries"`
+ LatencyAvg100 float64 `stm:"latency-avg100" json:"latency-avg100"`
+ LatencyAvg1000 float64 `stm:"latency-avg1000" json:"latency-avg1000"`
+ LatencyAvg10000 float64 `stm:"latency-avg10000" json:"latency-avg10000"`
+ LatencyAvg1000000 float64 `stm:"latency-avg1000000" json:"latency-avg1000000"`
+ LatencySlow float64 `stm:"latency-slow" json:"latency-slow"`
+ Latency0 float64 `stm:"latency0-1" json:"latency0-1"`
+ Latency1 float64 `stm:"latency1-10" json:"latency1-10"`
+ Latency10 float64 `stm:"latency10-50" json:"latency10-50"`
+ Latency100 float64 `stm:"latency100-1000" json:"latency100-1000"`
+ Latency50 float64 `stm:"latency50-100" json:"latency50-100"`
+ NoPolicy float64 `stm:"no-policy" json:"no-policy"`
+ NonCompliantQueries float64 `stm:"noncompliant-queries" json:"noncompliant-queries"`
+ NonCompliantResponses float64 `stm:"noncompliant-responses" json:"noncompliant-responses"`
+ Queries float64 `stm:"queries" json:"queries"`
+ RdQueries float64 `stm:"rdqueries" json:"rdqueries"`
+ RealMemoryUsage float64 `stm:"real-memory-usage" json:"real-memory-usage"`
+ Responses float64 `stm:"responses" json:"responses"`
+ RuleDrop float64 `stm:"rule-drop" json:"rule-drop"`
+ RuleNxDomain float64 `stm:"rule-nxdomain" json:"rule-nxdomain"`
+ RuleRefused float64 `stm:"rule-refused" json:"rule-refused"`
+ SelfAnswered float64 `stm:"self-answered" json:"self-answered"`
+ ServFailResponses float64 `stm:"servfail-responses" json:"servfail-responses"`
+ TruncFailures float64 `stm:"trunc-failures" json:"trunc-failures"`
+}
diff --git a/src/go/plugin/go.d/modules/dnsdist/testdata/config.json b/src/go/plugin/go.d/modules/dnsdist/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/dnsdist/testdata/config.yaml b/src/go/plugin/go.d/modules/dnsdist/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/dnsdist/testdata/v1.5.1/jsonstat.json b/src/go/plugin/go.d/modules/dnsdist/testdata/v1.5.1/jsonstat.json
new file mode 100644
index 000000000..37b791e47
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsdist/testdata/v1.5.1/jsonstat.json
@@ -0,0 +1,56 @@
+{
+ "acl-drops": 1,
+ "cache-hits": 1,
+ "cache-misses": 1,
+ "cpu-iowait": 39284,
+ "cpu-steal": 1,
+ "cpu-sys-msec": 411,
+ "cpu-user-msec": 939,
+ "doh-query-pipe-full": 1,
+ "doh-response-pipe-full": 1,
+ "downstream-send-errors": 1,
+ "downstream-timeouts": 1,
+ "dyn-block-nmg-size": 1,
+ "dyn-blocked": 1,
+ "empty-queries": 1,
+ "fd-usage": 22,
+ "frontend-noerror": 1003,
+ "frontend-nxdomain": 1,
+ "frontend-servfail": 1,
+ "latency-avg100": 14237.416845242331,
+ "latency-avg1000": 9728.0972656536997,
+ "latency-avg10000": 1514.0804874856037,
+ "latency-avg1000000": 15.0804874856037,
+ "latency-count": 1003,
+ "latency-slow": 1,
+ "latency-sum": 15474,
+ "latency0-1": 1,
+ "latency1-10": 3,
+ "latency10-50": 996,
+ "latency100-1000": 4,
+ "latency50-100": 1,
+ "no-policy": 1,
+ "noncompliant-queries": 1,
+ "noncompliant-responses": 1,
+ "over-capacity-drops": 1,
+ "packetcache-hits": 1,
+ "packetcache-misses": 1,
+ "queries": 1003,
+ "rdqueries": 1003,
+ "real-memory-usage": 202125312,
+ "responses": 1003,
+ "rule-drop": 1,
+ "rule-nxdomain": 1,
+ "rule-refused": 1,
+ "rule-servfail": 1,
+ "security-status": 1,
+ "self-answered": 1,
+ "servfail-responses": 1,
+ "too-old-drops": 1,
+ "trunc-failures": 1,
+ "udp-in-errors": 38,
+ "udp-noport-errors": 1102,
+ "udp-recvbuf-errors": 1,
+ "udp-sndbuf-errors": 179,
+ "uptime": 394
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq/README.md b/src/go/plugin/go.d/modules/dnsmasq/README.md
new file mode 120000
index 000000000..a424dd9c6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/README.md
@@ -0,0 +1 @@
+integrations/dnsmasq.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq/charts.go b/src/go/plugin/go.d/modules/dnsmasq/charts.go
new file mode 100644
index 000000000..403e7862c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/charts.go
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+var cacheCharts = module.Charts{
+ {
+ ID: "servers_queries",
+ Title: "Queries forwarded to the upstream servers",
+ Units: "queries/s",
+ Fam: "servers",
+ Ctx: "dnsmasq.servers_queries",
+ Dims: module.Dims{
+ {ID: "queries", Name: "success", Algo: module.Incremental},
+ {ID: "failed_queries", Name: "failed", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "cache_performance",
+ Title: "Cache performance",
+ Units: "events/s",
+ Fam: "cache",
+ Ctx: "dnsmasq.cache_performance",
+ Dims: module.Dims{
+ {ID: "hits", Algo: module.Incremental},
+ {ID: "misses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "cache_operations",
+ Title: "Cache operations",
+ Units: "operations/s",
+ Fam: "cache",
+ Ctx: "dnsmasq.cache_operations",
+ Dims: module.Dims{
+ {ID: "insertions", Algo: module.Incremental},
+ {ID: "evictions", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "cache_size",
+ Title: "Cache size",
+ Units: "entries",
+ Fam: "cache",
+ Ctx: "dnsmasq.cache_size",
+ Dims: module.Dims{
+ {ID: "cachesize", Name: "size"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq/collect.go b/src/go/plugin/go.d/modules/dnsmasq/collect.go
new file mode 100644
index 000000000..9f3f963f0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/collect.go
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/miekg/dns"
+)
+
+func (d *Dnsmasq) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := d.collectCacheStatistics(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (d *Dnsmasq) collectCacheStatistics(mx map[string]int64) error {
+ /*
+ ;; flags: qr aa rd ra; QUERY: 7, ANSWER: 7, AUTHORITY: 0, ADDITIONAL: 0
+
+ ;; QUESTION SECTION:
+ ;cachesize.bind. CH TXT
+ ;insertions.bind. CH TXT
+ ;evictions.bind. CH TXT
+ ;hits.bind. CH TXT
+ ;misses.bind. CH TXT
+ ;auth.bind. CH TXT
+ ;servers.bind. CH TXT
+
+ ;; ANSWER SECTION:
+ cachesize.bind. 0 CH TXT "150"
+ insertions.bind. 0 CH TXT "1"
+ evictions.bind. 0 CH TXT "0"
+ hits.bind. 0 CH TXT "176"
+ misses.bind. 0 CH TXT "4"
+ auth.bind. 0 CH TXT "0"
+ servers.bind. 0 CH TXT "10.0.0.1#53 0 0" "1.1.1.1#53 4 3" "1.0.0.1#53 3 0"
+ */
+
+ questions := []string{
+ "servers.bind.",
+ "cachesize.bind.",
+ "insertions.bind.",
+ "evictions.bind.",
+ "hits.bind.",
+ "misses.bind.",
+ // auth.bind query is only supported if dnsmasq has been built to support running as an authoritative name server
+ // See https://github.com/netdata/netdata/issues/13766
+ //"auth.bind.",
+ }
+
+ for _, q := range questions {
+ resp, err := d.query(q)
+ if err != nil {
+ return err
+ }
+
+ for _, a := range resp.Answer {
+ txt, ok := a.(*dns.TXT)
+ if !ok {
+ continue
+ }
+
+ idx := strings.IndexByte(txt.Hdr.Name, '.')
+ if idx == -1 {
+ continue
+ }
+
+ name := txt.Hdr.Name[:idx]
+
+ switch name {
+ case "servers":
+ for _, entry := range txt.Txt {
+ parts := strings.Fields(entry)
+ if len(parts) != 3 {
+ return fmt.Errorf("parse %s (%s): unexpected format", txt.Hdr.Name, entry)
+ }
+ queries, err := strconv.ParseFloat(parts[1], 64)
+ if err != nil {
+ return fmt.Errorf("parse '%s' (%s): %v", txt.Hdr.Name, entry, err)
+ }
+ failedQueries, err := strconv.ParseFloat(parts[2], 64)
+ if err != nil {
+ return fmt.Errorf("parse '%s' (%s): %v", txt.Hdr.Name, entry, err)
+ }
+
+ mx["queries"] += int64(queries)
+ mx["failed_queries"] += int64(failedQueries)
+ }
+ case "cachesize", "insertions", "evictions", "hits", "misses", "auth":
+ if len(txt.Txt) != 1 {
+ return fmt.Errorf("parse '%s' (%v): unexpected format", txt.Hdr.Name, txt.Txt)
+ }
+ v, err := strconv.ParseFloat(txt.Txt[0], 64)
+ if err != nil {
+ return fmt.Errorf("parse '%s' (%s): %v", txt.Hdr.Name, txt.Txt[0], err)
+ }
+
+ mx[name] = int64(v)
+ }
+ }
+ }
+
+ return nil
+}
+
+func (d *Dnsmasq) query(question string) (*dns.Msg, error) {
+ msg := &dns.Msg{
+ MsgHdr: dns.MsgHdr{
+ Id: dns.Id(),
+ RecursionDesired: true,
+ },
+ Question: []dns.Question{
+ {Name: question, Qtype: dns.TypeTXT, Qclass: dns.ClassCHAOS},
+ },
+ }
+
+ r, _, err := d.dnsClient.Exchange(msg, d.Address)
+ if err != nil {
+ return nil, err
+ }
+
+ if r == nil {
+ return nil, fmt.Errorf("'%s' question '%s', returned an empty response", d.Address, question)
+ }
+
+ if r.Rcode != dns.RcodeSuccess {
+ s := dns.RcodeToString[r.Rcode]
+ return nil, fmt.Errorf("'%s' question '%s' returned '%s' (%d) response code", d.Address, question, s, r.Rcode)
+ }
+
+ return r, nil
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq/config_schema.json b/src/go/plugin/go.d/modules/dnsmasq/config_schema.json
new file mode 100644
index 000000000..79396b364
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/config_schema.json
@@ -0,0 +1,61 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Dnsmasq collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the Dnsmasq daemon listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:53"
+ },
+ "protocol": {
+ "title": "Protocol",
+ "description": "DNS query transport protocol.",
+ "type": "string",
+ "enum": [
+ "udp",
+ "tcp",
+ "tcp-tls"
+ ],
+ "default": "udp"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "default": 1
+ }
+ },
+ "required": [
+ "address",
+ "protocol"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "protocol": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq/dnsmasq.go b/src/go/plugin/go.d/modules/dnsmasq/dnsmasq.go
new file mode 100644
index 000000000..2d2112c05
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/dnsmasq.go
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/miekg/dns"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("dnsmasq", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Dnsmasq {
+ return &Dnsmasq{
+ Config: Config{
+ Protocol: "udp",
+ Address: "127.0.0.1:53",
+ Timeout: web.Duration(time.Second),
+ },
+
+ newDNSClient: func(network string, timeout time.Duration) dnsClient {
+ return &dns.Client{
+ Net: network,
+ Timeout: timeout,
+ }
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Protocol string `yaml:"protocol,omitempty" json:"protocol"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ Dnsmasq struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ dnsClient dnsClient
+ newDNSClient func(network string, timeout time.Duration) dnsClient
+ }
+ dnsClient interface {
+ Exchange(msg *dns.Msg, address string) (resp *dns.Msg, rtt time.Duration, err error)
+ }
+)
+
+func (d *Dnsmasq) Configuration() any {
+ return d.Config
+}
+
+func (d *Dnsmasq) Init() error {
+ err := d.validateConfig()
+ if err != nil {
+ d.Errorf("config validation: %v", err)
+ return err
+ }
+
+ client, err := d.initDNSClient()
+ if err != nil {
+ d.Errorf("init DNS client: %v", err)
+ return err
+ }
+ d.dnsClient = client
+
+ charts, err := d.initCharts()
+ if err != nil {
+ d.Errorf("init charts: %v", err)
+ return err
+ }
+ d.charts = charts
+
+ return nil
+}
+
+func (d *Dnsmasq) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (d *Dnsmasq) Charts() *module.Charts {
+ return d.charts
+}
+
+func (d *Dnsmasq) Collect() map[string]int64 {
+ ms, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+
+func (d *Dnsmasq) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/dnsmasq/dnsmasq_test.go b/src/go/plugin/go.d/modules/dnsmasq/dnsmasq_test.go
new file mode 100644
index 000000000..b3d54ac9c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/dnsmasq_test.go
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/miekg/dns"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDnsmasq_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Dnsmasq{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDnsmasq_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset 'address'": {
+ wantFail: true,
+ config: Config{
+ Protocol: "udp",
+ Address: "",
+ },
+ },
+ "fails on unset 'protocol'": {
+ wantFail: true,
+ config: Config{
+ Protocol: "",
+ Address: "127.0.0.1:53",
+ },
+ },
+ "fails on invalid 'protocol'": {
+ wantFail: true,
+ config: Config{
+ Protocol: "http",
+ Address: "127.0.0.1:53",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ns := New()
+ ns.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, ns.Init())
+ } else {
+ assert.NoError(t, ns.Init())
+ }
+ })
+ }
+}
+
+func TestDnsmasq_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Dnsmasq
+ wantFail bool
+ }{
+ "success on valid response": {
+ prepare: prepareOKDnsmasq,
+ },
+ "fails on error on cache stats query": {
+ wantFail: true,
+ prepare: prepareErrorOnExchangeDnsmasq,
+ },
+ "fails on response rcode is not success": {
+ wantFail: true,
+ prepare: prepareRcodeServerFailureOnExchangeDnsmasq,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dnsmasq := test.prepare()
+ require.NoError(t, dnsmasq.Init())
+
+ if test.wantFail {
+ assert.Error(t, dnsmasq.Check())
+ } else {
+ assert.NoError(t, dnsmasq.Check())
+ }
+ })
+ }
+}
+
+func TestDnsmasq_Charts(t *testing.T) {
+ dnsmasq := New()
+ require.NoError(t, dnsmasq.Init())
+ assert.NotNil(t, dnsmasq.Charts())
+}
+
+func TestDnsmasq_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestDnsmasq_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Dnsmasq
+ wantCollected map[string]int64
+ }{
+ "success on valid response": {
+ prepare: prepareOKDnsmasq,
+ wantCollected: map[string]int64{
+ //"auth": 5,
+ "cachesize": 999,
+ "evictions": 5,
+ "failed_queries": 9,
+ "hits": 100,
+ "insertions": 10,
+ "misses": 50,
+ "queries": 17,
+ },
+ },
+ "fails on error on cache stats query": {
+ prepare: prepareErrorOnExchangeDnsmasq,
+ },
+ "fails on response rcode is not success": {
+ prepare: prepareRcodeServerFailureOnExchangeDnsmasq,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dnsmasq := test.prepare()
+ require.NoError(t, dnsmasq.Init())
+
+ collected := dnsmasq.Collect()
+
+ assert.Equal(t, test.wantCollected, collected)
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, dnsmasq, collected)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dnsmasq *Dnsmasq, collected map[string]int64) {
+ for _, chart := range *dnsmasq.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareOKDnsmasq() *Dnsmasq {
+ dnsmasq := New()
+ dnsmasq.newDNSClient = func(network string, timeout time.Duration) dnsClient {
+ return &mockDNSClient{}
+ }
+ return dnsmasq
+}
+
+func prepareErrorOnExchangeDnsmasq() *Dnsmasq {
+ dnsmasq := New()
+ dnsmasq.newDNSClient = func(network string, timeout time.Duration) dnsClient {
+ return &mockDNSClient{
+ errOnExchange: true,
+ }
+ }
+ return dnsmasq
+}
+
+func prepareRcodeServerFailureOnExchangeDnsmasq() *Dnsmasq {
+ dnsmasq := New()
+ dnsmasq.newDNSClient = func(network string, timeout time.Duration) dnsClient {
+ return &mockDNSClient{
+ rcodeServerFailureOnExchange: true,
+ }
+ }
+ return dnsmasq
+}
+
+type mockDNSClient struct {
+ errOnExchange bool
+ rcodeServerFailureOnExchange bool
+}
+
+func (m mockDNSClient) Exchange(msg *dns.Msg, _ string) (*dns.Msg, time.Duration, error) {
+ if m.errOnExchange {
+ return nil, 0, errors.New("'Exchange' error")
+ }
+ if m.rcodeServerFailureOnExchange {
+ resp := &dns.Msg{MsgHdr: dns.MsgHdr{Rcode: dns.RcodeServerFailure}}
+ return resp, 0, nil
+ }
+
+ var answers []dns.RR
+ for _, q := range msg.Question {
+ a, err := prepareDNSAnswer(q)
+ if err != nil {
+ return nil, 0, err
+ }
+ answers = append(answers, a)
+ }
+
+ resp := &dns.Msg{
+ MsgHdr: dns.MsgHdr{
+ Rcode: dns.RcodeSuccess,
+ },
+ Answer: answers,
+ }
+ return resp, 0, nil
+}
+
+func prepareDNSAnswer(q dns.Question) (dns.RR, error) {
+ if want, got := dns.TypeToString[dns.TypeTXT], dns.TypeToString[q.Qtype]; want != got {
+ return nil, fmt.Errorf("unexpected Qtype, want=%s, got=%s", want, got)
+ }
+ if want, got := dns.ClassToString[dns.ClassCHAOS], dns.ClassToString[q.Qclass]; want != got {
+ return nil, fmt.Errorf("unexpected Qclass, want=%s, got=%s", want, got)
+ }
+
+ var txt []string
+ switch q.Name {
+ case "cachesize.bind.":
+ txt = []string{"999"}
+ case "insertions.bind.":
+ txt = []string{"10"}
+ case "evictions.bind.":
+ txt = []string{"5"}
+ case "hits.bind.":
+ txt = []string{"100"}
+ case "misses.bind.":
+ txt = []string{"50"}
+ case "auth.bind.":
+ txt = []string{"5"}
+ case "servers.bind.":
+ txt = []string{"10.0.0.1#53 10 5", "1.1.1.1#53 4 3", "1.0.0.1#53 3 1"}
+ default:
+ return nil, fmt.Errorf("unexpected question Name: %s", q.Name)
+ }
+
+ rr := &dns.TXT{
+ Hdr: dns.RR_Header{
+ Name: q.Name,
+ Rrtype: dns.TypeTXT,
+ Class: dns.ClassCHAOS,
+ },
+ Txt: txt,
+ }
+ return rr, nil
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq/init.go b/src/go/plugin/go.d/modules/dnsmasq/init.go
new file mode 100644
index 000000000..a660ac774
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/init.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (d *Dnsmasq) validateConfig() error {
+ if d.Address == "" {
+ return errors.New("'address' parameter not set")
+ }
+ if !isProtocolValid(d.Protocol) {
+ return fmt.Errorf("'protocol' (%s) is not valid, expected one of %v", d.Protocol, validProtocols)
+ }
+ return nil
+}
+
+func (d *Dnsmasq) initDNSClient() (dnsClient, error) {
+ return d.newDNSClient(d.Protocol, d.Timeout.Duration()), nil
+}
+
+func (d *Dnsmasq) initCharts() (*module.Charts, error) {
+ return cacheCharts.Copy(), nil
+}
+
+func isProtocolValid(protocol string) bool {
+ for _, v := range validProtocols {
+ if protocol == v {
+ return true
+ }
+ }
+ return false
+}
+
+var validProtocols = []string{
+ "udp",
+ "tcp",
+ "tcp-tls",
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq/integrations/dnsmasq.md b/src/go/plugin/go.d/modules/dnsmasq/integrations/dnsmasq.md
new file mode 100644
index 000000000..d5c358a29
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/integrations/dnsmasq.md
@@ -0,0 +1,230 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dnsmasq/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dnsmasq/metadata.yaml"
+sidebar_label: "Dnsmasq"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dnsmasq
+
+
+<img src="https://netdata.cloud/img/dnsmasq.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: dnsmasq
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Dnsmasq servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Dnsmasq instance
+
+The metrics apply to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| dnsmasq.servers_queries | success, failed | queries/s |
+| dnsmasq.cache_performance | hist, misses | events/s |
+| dnsmasq.cache_operations | insertions, evictions | operations/s |
+| dnsmasq.cache_size | size | entries |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/dnsmasq.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/dnsmasq.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Server address in `ip:port` format. | 127.0.0.1:53 | yes |
+| protocol | DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls. | udp | no |
+| timeout | DNS query timeout (dial, write and read) in seconds. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:53
+
+```
+</details>
+
+##### Using TCP protocol
+
+Local server with specific DNS query transport protocol.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:53
+ protocol: tcp
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:53
+
+ - name: remote
+ address: 203.0.113.0:53
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `dnsmasq` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m dnsmasq
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `dnsmasq` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep dnsmasq
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep dnsmasq /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep dnsmasq
+```
+
+
diff --git a/src/go/plugin/go.d/modules/dnsmasq/metadata.yaml b/src/go/plugin/go.d/modules/dnsmasq/metadata.yaml
new file mode 100644
index 000000000..6911a323a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/metadata.yaml
@@ -0,0 +1,144 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-dnsmasq
+ plugin_name: go.d.plugin
+ module_name: dnsmasq
+ monitored_instance:
+ name: Dnsmasq
+ link: https://thekelleys.org.uk/dnsmasq/doc.html
+ icon_filename: dnsmasq.svg
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - dnsmasq
+ - dns
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Dnsmasq servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/dnsmasq.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: Server address in `ip:port` format.
+ default_value: 127.0.0.1:53
+ required: true
+ - name: protocol
+ description: 'DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls.'
+ default_value: udp
+ required: false
+ - name: timeout
+ description: DNS query timeout (dial, write and read) in seconds.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:53
+ - name: Using TCP protocol
+ description: Local server with specific DNS query transport protocol.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:53
+ protocol: tcp
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:53
+
+ - name: remote
+ address: 203.0.113.0:53
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: The metrics apply to the entire monitored application.
+ labels: []
+ metrics:
+ - name: dnsmasq.servers_queries
+ description: Queries forwarded to the upstream servers
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failed
+ - name: dnsmasq.cache_performance
+ description: Cache performance
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: hist
+ - name: misses
+ - name: dnsmasq.cache_operations
+ description: Cache operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: insertions
+ - name: evictions
+ - name: dnsmasq.cache_size
+ description: Cache size
+ unit: entries
+ chart_type: line
+ dimensions:
+ - name: size
diff --git a/src/go/plugin/go.d/modules/dnsmasq/testdata/config.json b/src/go/plugin/go.d/modules/dnsmasq/testdata/config.json
new file mode 100644
index 000000000..4fff563b8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "protocol": "ok",
+ "address": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq/testdata/config.yaml b/src/go/plugin/go.d/modules/dnsmasq/testdata/config.yaml
new file mode 100644
index 000000000..1a79b8773
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+protocol: "ok"
+address: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/README.md b/src/go/plugin/go.d/modules/dnsmasq_dhcp/README.md
new file mode 120000
index 000000000..ad22eb4ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/README.md
@@ -0,0 +1 @@
+integrations/dnsmasq_dhcp.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/charts.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/charts.go
new file mode 100644
index 000000000..bcef8aa3f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/charts.go
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq_dhcp
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioDHCPRangeUtilization = module.Priority + iota
+ prioDHCPRangeAllocatesLeases
+ prioDHCPRanges
+ prioDHCPHosts
+)
+
+var charts = module.Charts{
+ {
+ ID: "dhcp_ranges",
+ Title: "Number of DHCP Ranges",
+ Units: "ranges",
+ Fam: "dhcp ranges",
+ Ctx: "dnsmasq_dhcp.dhcp_ranges",
+ Type: module.Stacked,
+ Priority: prioDHCPRanges,
+ Dims: module.Dims{
+ {ID: "ipv4_dhcp_ranges", Name: "ipv4"},
+ {ID: "ipv6_dhcp_ranges", Name: "ipv6"},
+ },
+ },
+ {
+ ID: "dhcp_hosts",
+ Title: "Number of DHCP Hosts",
+ Units: "hosts",
+ Fam: "dhcp hosts",
+ Ctx: "dnsmasq_dhcp.dhcp_host",
+ Type: module.Stacked,
+ Priority: prioDHCPHosts,
+ Dims: module.Dims{
+ {ID: "ipv4_dhcp_hosts", Name: "ipv4"},
+ {ID: "ipv6_dhcp_hosts", Name: "ipv6"},
+ },
+ },
+}
+
+var (
+ chartsTmpl = module.Charts{
+ chartTmplDHCPRangeUtilization.Copy(),
+ chartTmplDHCPRangeAllocatedLeases.Copy(),
+ }
+)
+
+var (
+ chartTmplDHCPRangeUtilization = module.Chart{
+ ID: "dhcp_range_%s_utilization",
+ Title: "DHCP Range utilization",
+ Units: "percentage",
+ Fam: "dhcp range utilization",
+ Ctx: "dnsmasq_dhcp.dhcp_range_utilization",
+ Type: module.Area,
+ Priority: prioDHCPRangeUtilization,
+ Dims: module.Dims{
+ {ID: "dhcp_range_%s_utilization", Name: "used"},
+ },
+ }
+ chartTmplDHCPRangeAllocatedLeases = module.Chart{
+ ID: "dhcp_range_%s_allocated_leases",
+ Title: "DHCP Range Allocated Leases",
+ Units: "leases",
+ Fam: "dhcp range leases",
+ Ctx: "dnsmasq_dhcp.dhcp_range_allocated_leases",
+ Priority: prioDHCPRangeAllocatesLeases,
+ Dims: module.Dims{
+ {ID: "dhcp_range_%s_allocated_leases", Name: "leases"},
+ },
+ }
+)
+
+func newDHCPRangeCharts(dhcpRange string) *module.Charts {
+ charts := chartsTmpl.Copy()
+
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, dhcpRange)
+ c.Labels = []module.Label{
+ {Key: "dhcp_range", Value: dhcpRange},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, dhcpRange)
+ }
+ }
+ return charts
+}
+
+func (d *DnsmasqDHCP) addDHCPRangeCharts(dhcpRange string) {
+ charts := newDHCPRangeCharts(dhcpRange)
+ if err := d.Charts().Add(*charts...); err != nil {
+ d.Warning(err)
+ }
+}
+
+func (d *DnsmasqDHCP) removeDHCPRangeCharts(dhcpRange string) {
+ p := "dhcp_range_" + dhcpRange
+ for _, c := range *d.Charts() {
+ if strings.HasSuffix(c.ID, p) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/collect.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/collect.go
new file mode 100644
index 000000000..6de2fa215
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/collect.go
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq_dhcp
+
+import (
+ "bufio"
+ "io"
+ "math"
+ "math/big"
+ "net"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/iprange"
+)
+
+func (d *DnsmasqDHCP) collect() (map[string]int64, error) {
+ now := time.Now()
+ var updated bool
+
+ if now.Sub(d.parseConfigTime) > d.parseConfigEvery {
+ d.parseConfigTime = now
+
+ dhcpRanges, dhcpHosts := d.parseDnsmasqDHCPConfiguration()
+ d.dhcpRanges, d.dhcpHosts = dhcpRanges, dhcpHosts
+ updated = d.updateCharts()
+
+ d.collectV4V6Stats()
+ }
+
+ f, err := os.Open(d.LeasesPath)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = f.Close() }()
+
+ if !updated {
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ if d.leasesModTime.Equal(fi.ModTime()) {
+ d.Debug("lease database file modification time has not changed, old data is returned")
+ return d.mx, nil
+ }
+
+ d.Debug("leases db file modification time has changed, reading it")
+ d.leasesModTime = fi.ModTime()
+ }
+
+ leases := findLeases(f)
+ d.collectRangesStats(leases)
+
+ return d.mx, nil
+}
+
+func (d *DnsmasqDHCP) collectV4V6Stats() {
+ d.mx["ipv4_dhcp_ranges"], d.mx["ipv6_dhcp_ranges"] = 0, 0
+ for _, r := range d.dhcpRanges {
+ if r.Family() == iprange.V6Family {
+ d.mx["ipv6_dhcp_ranges"]++
+ } else {
+ d.mx["ipv4_dhcp_ranges"]++
+ }
+ }
+
+ d.mx["ipv4_dhcp_hosts"], d.mx["ipv6_dhcp_hosts"] = 0, 0
+ for _, ip := range d.dhcpHosts {
+ if ip.To4() == nil {
+ d.mx["ipv6_dhcp_hosts"]++
+ } else {
+ d.mx["ipv4_dhcp_hosts"]++
+ }
+ }
+}
+
+func (d *DnsmasqDHCP) collectRangesStats(leases []net.IP) {
+ for _, r := range d.dhcpRanges {
+ d.mx["dhcp_range_"+r.String()+"_allocated_leases"] = 0
+ d.mx["dhcp_range_"+r.String()+"_utilization"] = 0
+ }
+
+ for _, ip := range leases {
+ for _, r := range d.dhcpRanges {
+ if r.Contains(ip) {
+ d.mx["dhcp_range_"+r.String()+"_allocated_leases"]++
+ break
+ }
+ }
+ }
+
+ for _, ip := range d.dhcpHosts {
+ for _, r := range d.dhcpRanges {
+ if r.Contains(ip) {
+ d.mx["dhcp_range_"+r.String()+"_allocated_leases"]++
+ break
+ }
+ }
+ }
+
+ for _, r := range d.dhcpRanges {
+ name := "dhcp_range_" + r.String() + "_allocated_leases"
+ numOfIps, ok := d.mx[name]
+ if !ok {
+ d.mx[name] = 0
+ }
+ d.mx["dhcp_range_"+r.String()+"_utilization"] = int64(math.Round(calcPercent(numOfIps, r.Size())))
+ }
+}
+
+func (d *DnsmasqDHCP) updateCharts() bool {
+ var updated bool
+ seen := make(map[string]bool)
+ for _, r := range d.dhcpRanges {
+ seen[r.String()] = true
+ if !d.cacheDHCPRanges[r.String()] {
+ d.cacheDHCPRanges[r.String()] = true
+ d.addDHCPRangeCharts(r.String())
+ updated = true
+ }
+ }
+
+ for v := range d.cacheDHCPRanges {
+ if !seen[v] {
+ delete(d.cacheDHCPRanges, v)
+ d.removeDHCPRangeCharts(v)
+ updated = true
+ }
+ }
+ return updated
+}
+
+func findLeases(r io.Reader) []net.IP {
+ /*
+ 1560300536 08:00:27:61:3c:ee 2.2.2.3 debian8 *
+ duid 00:01:00:01:24:90:cf:5b:08:00:27:61:2e:2c
+ 1560300414 660684014 1234::20b * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee
+ */
+ var ips []net.IP
+ s := bufio.NewScanner(r)
+
+ for s.Scan() {
+ parts := strings.Fields(s.Text())
+ if len(parts) != 5 {
+ continue
+ }
+
+ ip := net.ParseIP(parts[2])
+ if ip == nil {
+ continue
+ }
+ ips = append(ips, ip)
+ }
+
+ return ips
+}
+
+func calcPercent(ips int64, hosts *big.Int) float64 {
+ h := hosts.Int64()
+ if ips == 0 || h == 0 || !hosts.IsInt64() {
+ return 0
+ }
+ return float64(ips) * 100 / float64(h)
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/config_schema.json b/src/go/plugin/go.d/modules/dnsmasq_dhcp/config_schema.json
new file mode 100644
index 000000000..f51a3b2a2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/config_schema.json
@@ -0,0 +1,50 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Dnsmasq DHCP collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "leases_path": {
+ "title": "Leases file",
+ "description": "File path to the Dnsmasq DHCP server's lease database.",
+ "type": "string",
+ "default": "/var/lib/misc/dnsmasq.leases",
+ "pattern": "^$|^/"
+ },
+ "conf_path": {
+ "title": "Config file",
+ "description": "File path for the Dnsmasq configuration. Used to find all configured DHCP ranges.",
+ "type": "string",
+ "default": "/etc/dnsmasq.conf",
+ "pattern": "^$|^/"
+ },
+ "conf_dir": {
+ "title": "Config directory",
+ "description": "Directory path for Dnsmasq configurations. The syntax follows the same format as the [--conf-dir](https://thekelleys.org.uk/dnsmasq/docs/dnsmasq-man.html) option.",
+ "type": "string",
+ "default": "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new",
+ "pattern": "^$|^/"
+ }
+ },
+ "required": [
+ "leases_path",
+ "conf_path"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp.go
new file mode 100644
index 000000000..de56723f7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp.go
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq_dhcp
+
+import (
+ _ "embed"
+ "errors"
+ "net"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/iprange"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("dnsmasq_dhcp", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *DnsmasqDHCP {
+ return &DnsmasqDHCP{
+ Config: Config{
+ // debian defaults
+ LeasesPath: "/var/lib/misc/dnsmasq.leases",
+ ConfPath: "/etc/dnsmasq.conf",
+ ConfDir: "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new",
+ },
+ charts: charts.Copy(),
+ parseConfigEvery: time.Minute,
+ cacheDHCPRanges: make(map[string]bool),
+ mx: make(map[string]int64),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ LeasesPath string `yaml:"leases_path" json:"leases_path"`
+ ConfPath string `yaml:"conf_path,omitempty" json:"conf_path"`
+ ConfDir string `yaml:"conf_dir,omitempty" json:"conf_dir"`
+}
+
+type DnsmasqDHCP struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ leasesModTime time.Time
+ parseConfigTime time.Time
+ parseConfigEvery time.Duration
+ dhcpRanges []iprange.Range
+ dhcpHosts []net.IP
+ cacheDHCPRanges map[string]bool
+
+ mx map[string]int64
+}
+
+func (d *DnsmasqDHCP) Configuration() any {
+ return d.Config
+}
+
+func (d *DnsmasqDHCP) Init() error {
+ if err := d.validateConfig(); err != nil {
+ d.Errorf("config validation: %v", err)
+ return err
+ }
+ if err := d.checkLeasesPath(); err != nil {
+ d.Errorf("leases path check: %v", err)
+ return err
+ }
+
+ return nil
+}
+
+func (d *DnsmasqDHCP) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (d *DnsmasqDHCP) Charts() *module.Charts {
+ return d.charts
+}
+
+func (d *DnsmasqDHCP) Collect() map[string]int64 {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (d *DnsmasqDHCP) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp_test.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp_test.go
new file mode 100644
index 000000000..16e0f17d0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/dhcp_test.go
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq_dhcp
+
+import (
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+const (
+ testLeasesPath = "testdata/dnsmasq.leases"
+ testConfPath = "testdata/dnsmasq.conf"
+ testConfDir = "testdata/dnsmasq.d"
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDnsmasqDHCP_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DnsmasqDHCP{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDnsmasqDHCP_Init(t *testing.T) {
+ job := New()
+ job.LeasesPath = testLeasesPath
+ job.ConfPath = testConfPath
+ job.ConfDir = testConfDir
+
+ assert.NoError(t, job.Init())
+}
+
+func TestDnsmasqDHCP_InitEmptyLeasesPath(t *testing.T) {
+ job := New()
+ job.LeasesPath = ""
+
+ assert.Error(t, job.Init())
+}
+
+func TestDnsmasqDHCP_InitInvalidLeasesPath(t *testing.T) {
+ job := New()
+ job.LeasesPath = testLeasesPath
+ job.LeasesPath += "!"
+
+ assert.Error(t, job.Init())
+}
+
+func TestDnsmasqDHCP_InitZeroDHCPRanges(t *testing.T) {
+ job := New()
+ job.LeasesPath = testLeasesPath
+ job.ConfPath = "testdata/dnsmasq3.conf"
+ job.ConfDir = ""
+
+ assert.NoError(t, job.Init())
+}
+
+func TestDnsmasqDHCP_Check(t *testing.T) {
+ job := New()
+ job.LeasesPath = testLeasesPath
+ job.ConfPath = testConfPath
+ job.ConfDir = testConfDir
+
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+}
+
+func TestDnsmasqDHCP_Charts(t *testing.T) {
+ job := New()
+ job.LeasesPath = testLeasesPath
+ job.ConfPath = testConfPath
+ job.ConfDir = testConfDir
+
+ require.NoError(t, job.Init())
+
+ assert.NotNil(t, job.Charts())
+}
+
+func TestDnsmasqDHCP_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestDnsmasqDHCP_Collect(t *testing.T) {
+ job := New()
+ job.LeasesPath = testLeasesPath
+ job.ConfPath = testConfPath
+ job.ConfDir = testConfDir
+
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "dhcp_range_1230::1-1230::64_allocated_leases": 7,
+ "dhcp_range_1230::1-1230::64_utilization": 7,
+ "dhcp_range_1231::1-1231::64_allocated_leases": 1,
+ "dhcp_range_1231::1-1231::64_utilization": 1,
+ "dhcp_range_1232::1-1232::64_allocated_leases": 1,
+ "dhcp_range_1232::1-1232::64_utilization": 1,
+ "dhcp_range_1233::1-1233::64_allocated_leases": 1,
+ "dhcp_range_1233::1-1233::64_utilization": 1,
+ "dhcp_range_1234::1-1234::64_allocated_leases": 1,
+ "dhcp_range_1234::1-1234::64_utilization": 1,
+ "dhcp_range_192.168.0.1-192.168.0.100_allocated_leases": 6,
+ "dhcp_range_192.168.0.1-192.168.0.100_utilization": 6,
+ "dhcp_range_192.168.1.1-192.168.1.100_allocated_leases": 5,
+ "dhcp_range_192.168.1.1-192.168.1.100_utilization": 5,
+ "dhcp_range_192.168.2.1-192.168.2.100_allocated_leases": 4,
+ "dhcp_range_192.168.2.1-192.168.2.100_utilization": 4,
+ "dhcp_range_192.168.200.1-192.168.200.100_allocated_leases": 1,
+ "dhcp_range_192.168.200.1-192.168.200.100_utilization": 1,
+ "dhcp_range_192.168.3.1-192.168.3.100_allocated_leases": 1,
+ "dhcp_range_192.168.3.1-192.168.3.100_utilization": 1,
+ "dhcp_range_192.168.4.1-192.168.4.100_allocated_leases": 1,
+ "dhcp_range_192.168.4.1-192.168.4.100_utilization": 1,
+ "ipv4_dhcp_hosts": 6,
+ "ipv4_dhcp_ranges": 6,
+ "ipv6_dhcp_hosts": 5,
+ "ipv6_dhcp_ranges": 5,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestDnsmasqDHCP_CollectFailedToOpenLeasesPath(t *testing.T) {
+ job := New()
+ job.LeasesPath = testLeasesPath
+ job.ConfPath = testConfPath
+ job.ConfDir = testConfDir
+
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ job.LeasesPath = ""
+ assert.Nil(t, job.Collect())
+}
+
+func TestDnsmasqDHCP_parseDHCPRangeValue(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantFail bool
+ }{
+ "ipv4": {
+ input: "192.168.0.50,192.168.0.150,12h",
+ },
+ "ipv4 with netmask": {
+ input: "192.168.0.50,192.168.0.150,255.255.255.0,12h",
+ },
+ "ipv4 with netmask and tag": {
+ input: "set:red,1.1.1.50,1.1.2.150, 255.255.252.0",
+ },
+ "ipv4 with iface": {
+ input: "enp3s0, 172.16.1.2, 172.16.1.254, 1h",
+ },
+ "ipv4 with iface 2": {
+ input: "enp2s0.100, 192.168.100.2, 192.168.100.254, 1h",
+ },
+ "ipv4 static": {
+ wantFail: true,
+ input: "192.168.0.0,static",
+ },
+ "ipv6": {
+ input: "1234::2,1234::500",
+ },
+ "ipv6 slacc": {
+ input: "1234::2,1234::500, slaac",
+ },
+ "ipv6 with with prefix length and lease time": {
+ input: "1234::2,1234::500, 64, 12h",
+ },
+ "ipv6 ra-only": {
+ wantFail: true,
+ input: "1234::,ra-only",
+ },
+ "ipv6 ra-names": {
+ wantFail: true,
+ input: "1234::,ra-names",
+ },
+ "ipv6 ra-stateless": {
+ wantFail: true,
+ input: "1234::,ra-stateless",
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ v := parseDHCPRangeValue(test.input)
+
+ if test.wantFail {
+ assert.Emptyf(t, v, "parsing '%s' must fail", test.input)
+ } else {
+ assert.NotEmptyf(t, v, "parsing '%s' must not fail", test.input)
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/init.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/init.go
new file mode 100644
index 000000000..6c74674a3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/init.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq_dhcp
+
+import "errors"
+
+func (d *DnsmasqDHCP) validateConfig() error {
+ if d.LeasesPath == "" {
+ return errors.New("empty 'leases_path'")
+ }
+ return nil
+}
+
+func (d *DnsmasqDHCP) checkLeasesPath() error {
+ f, err := openFile(d.LeasesPath)
+ if err != nil {
+ return err
+ }
+ _ = f.Close()
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md b/src/go/plugin/go.d/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md
new file mode 100644
index 000000000..751ebf089
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/integrations/dnsmasq_dhcp.md
@@ -0,0 +1,240 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dnsmasq_dhcp/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml"
+sidebar_label: "Dnsmasq DHCP"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dnsmasq DHCP
+
+
+<img src="https://netdata.cloud/img/dnsmasq.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: dnsmasq_dhcp
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Dnsmasq DHCP leases databases, depending on your configuration.
+
+By default, it uses:
+
+- `/var/lib/misc/dnsmasq.leases` to read leases.
+- `/etc/dnsmasq.conf` to detect dhcp-ranges.
+- `/etc/dnsmasq.d` to find additional configurations.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+All configured dhcp-ranges are detected automatically
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Dnsmasq DHCP instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| dnsmasq_dhcp.dhcp_ranges | ipv4, ipv6 | ranges |
+| dnsmasq_dhcp.dhcp_hosts | ipv4, ipv6 | hosts |
+
+### Per dhcp range
+
+These metrics refer to the DHCP range.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| dhcp_range | DHCP range in `START_IP:END_IP` format |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| dnsmasq_dhcp.dhcp_range_utilization | used | percentage |
+| dnsmasq_dhcp.dhcp_range_allocated_leases | allocated | leases |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ dnsmasq_dhcp_dhcp_range_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf) | dnsmasq_dhcp.dhcp_range_utilization | DHCP range utilization |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/dnsmasq_dhcp.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/dnsmasq_dhcp.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| leases_path | Path to dnsmasq DHCP leases file. | /var/lib/misc/dnsmasq.leases | no |
+| conf_path | Path to dnsmasq configuration file. | /etc/dnsmasq.conf | no |
+| conf_dir | Path to dnsmasq configuration directory. | /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: dnsmasq_dhcp
+ leases_path: /var/lib/misc/dnsmasq.leases
+ conf_path: /etc/dnsmasq.conf
+ conf_dir: /etc/dnsmasq.d
+
+```
+</details>
+
+##### Pi-hole
+
+Dnsmasq DHCP on Pi-hole.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: dnsmasq_dhcp
+ leases_path: /etc/pihole/dhcp.leases
+ conf_path: /etc/dnsmasq.conf
+ conf_dir: /etc/dnsmasq.d
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `dnsmasq_dhcp` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m dnsmasq_dhcp
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `dnsmasq_dhcp` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep dnsmasq_dhcp
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep dnsmasq_dhcp /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep dnsmasq_dhcp
+```
+
+
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml b/src/go/plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml
new file mode 100644
index 000000000..13b73336c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/metadata.yaml
@@ -0,0 +1,151 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-dnsmasq_dhcp
+ plugin_name: go.d.plugin
+ module_name: dnsmasq_dhcp
+ monitored_instance:
+ name: Dnsmasq DHCP
+ link: https://www.thekelleys.org.uk/dnsmasq/doc.html
+ icon_filename: dnsmasq.svg
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - dnsmasq
+ - dhcp
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Dnsmasq DHCP leases databases, depending on your configuration.
+
+ By default, it uses:
+
+ - `/var/lib/misc/dnsmasq.leases` to read leases.
+ - `/etc/dnsmasq.conf` to detect dhcp-ranges.
+ - `/etc/dnsmasq.d` to find additional configurations.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ All configured dhcp-ranges are detected automatically
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/dnsmasq_dhcp.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: leases_path
+ description: Path to dnsmasq DHCP leases file.
+ default_value: /var/lib/misc/dnsmasq.leases
+ required: false
+ - name: conf_path
+ description: Path to dnsmasq configuration file.
+ default_value: /etc/dnsmasq.conf
+ required: false
+ - name: conf_dir
+ description: Path to dnsmasq configuration directory.
+ default_value: /etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: dnsmasq_dhcp
+ leases_path: /var/lib/misc/dnsmasq.leases
+ conf_path: /etc/dnsmasq.conf
+ conf_dir: /etc/dnsmasq.d
+ - name: Pi-hole
+ description: Dnsmasq DHCP on Pi-hole.
+ config: |
+ jobs:
+ - name: dnsmasq_dhcp
+ leases_path: /etc/pihole/dhcp.leases
+ conf_path: /etc/dnsmasq.conf
+ conf_dir: /etc/dnsmasq.d
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: dnsmasq_dhcp_dhcp_range_utilization
+ metric: dnsmasq_dhcp.dhcp_range_utilization
+ info: DHCP range utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/dnsmasq_dhcp.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: dnsmasq_dhcp.dhcp_ranges
+ description: Number of DHCP Ranges
+ unit: ranges
+ chart_type: stacked
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: dnsmasq_dhcp.dhcp_hosts
+ description: Number of DHCP Hosts
+ unit: hosts
+ chart_type: stacked
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: dhcp range
+ description: These metrics refer to the DHCP range.
+ labels:
+ - name: dhcp_range
+ description: DHCP range in `START_IP:END_IP` format
+ metrics:
+ - name: dnsmasq_dhcp.dhcp_range_utilization
+ description: DHCP Range utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: dnsmasq_dhcp.dhcp_range_allocated_leases
+ description: DHCP Range Allocated Leases
+ unit: leases
+ chart_type: line
+ dimensions:
+ - name: allocated
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/parse_configuration.go b/src/go/plugin/go.d/modules/dnsmasq_dhcp/parse_configuration.go
new file mode 100644
index 000000000..558ce7c65
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/parse_configuration.go
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsmasq_dhcp
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/iprange"
+)
+
+func (d *DnsmasqDHCP) parseDnsmasqDHCPConfiguration() ([]iprange.Range, []net.IP) {
+ configs := findConfigurationFiles(d.ConfPath, d.ConfDir)
+
+ dhcpRanges := d.getDHCPRanges(configs)
+ dhcpHosts := d.getDHCPHosts(configs)
+
+ return dhcpRanges, dhcpHosts
+}
+
+func (d *DnsmasqDHCP) getDHCPRanges(configs []*configFile) []iprange.Range {
+ var dhcpRanges []iprange.Range
+ var parsed string
+ seen := make(map[string]bool)
+
+ for _, conf := range configs {
+ d.Debugf("looking in '%s'", conf.path)
+
+ for _, value := range conf.get("dhcp-range") {
+ d.Debugf("found dhcp-range '%s'", value)
+ if parsed = parseDHCPRangeValue(value); parsed == "" || seen[parsed] {
+ continue
+ }
+ seen[parsed] = true
+
+ r, err := iprange.ParseRange(parsed)
+ if r == nil || err != nil {
+ d.Warningf("error on parsing dhcp-range '%s', skipping it", parsed)
+ continue
+ }
+
+ d.Debugf("adding dhcp-range '%s'", parsed)
+ dhcpRanges = append(dhcpRanges, r)
+ }
+ }
+
+ // order: ipv4, ipv6
+ sort.Slice(dhcpRanges, func(i, j int) bool { return dhcpRanges[i].Family() < dhcpRanges[j].Family() })
+
+ return dhcpRanges
+}
+
+func (d *DnsmasqDHCP) getDHCPHosts(configs []*configFile) []net.IP {
+ var dhcpHosts []net.IP
+ seen := make(map[string]bool)
+ var parsed string
+
+ for _, conf := range configs {
+ d.Debugf("looking in '%s'", conf.path)
+
+ for _, value := range conf.get("dhcp-host") {
+ d.Debugf("found dhcp-host '%s'", value)
+ if parsed = parseDHCPHostValue(value); parsed == "" || seen[parsed] {
+ continue
+ }
+ seen[parsed] = true
+
+ v := net.ParseIP(parsed)
+ if v == nil {
+ d.Warningf("error on parsing dhcp-host '%s', skipping it", parsed)
+ continue
+ }
+
+ d.Debugf("adding dhcp-host '%s'", parsed)
+ dhcpHosts = append(dhcpHosts, v)
+ }
+ }
+ return dhcpHosts
+}
+
+/*
+Examples:
+ - 192.168.0.50,192.168.0.150,12h
+ - 192.168.0.50,192.168.0.150,255.255.255.0,12h
+ - set:red,1.1.1.50,1.1.2.150, 255.255.252.0
+ - 192.168.0.0,static
+ - 1234::2,1234::500, 64, 12h
+ - 1234::2,1234::500
+ - 1234::2,1234::500, slaac
+ - 1234::,ra-only
+ - 1234::,ra-names
+ - 1234::,ra-stateless
+*/
+
+func parseDHCPRangeValue(s string) (r string) {
+ if strings.Contains(s, "ra-stateless") {
+ return ""
+ }
+
+ s = strings.ReplaceAll(s, " ", "")
+
+ var start, end net.IP
+ parts := strings.Split(s, ",")
+
+ for i, v := range parts {
+ if start = net.ParseIP(strings.TrimSpace(v)); start == nil {
+ continue
+ }
+ if len(parts) < i+1 {
+ return ""
+ }
+ if end = net.ParseIP(parts[i+1]); end == nil || iprange.New(start, end) == nil {
+ return ""
+ }
+ return fmt.Sprintf("%s-%s", start, end)
+ }
+
+ return ""
+}
+
+/*
+Examples:
+ - 11:22:33:44:55:66,192.168.0.60
+ - 11:22:33:44:55:66,fred,192.168.0.60,45m
+ - 11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60
+ - bert,192.168.0.70,infinite
+ - id:01:02:02:04,192.168.0.60
+ - id:ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:28:05:81,192.168.0.61
+ - id:marjorie,192.168.0.60
+ - id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5]
+*/
+var (
+ reDHCPHostV4 = regexp.MustCompile(`(?:[0-9]{1,3}\.){3}[0-9]{1,3}`)
+ reDHCPHostV6 = regexp.MustCompile(`\[([0-9a-f.:]+)]`)
+)
+
+func parseDHCPHostValue(s string) (r string) {
+ s = strings.ReplaceAll(s, " ", "")
+
+ if strings.Contains(s, "[") {
+ return strings.Trim(reDHCPHostV6.FindString(s), "[]")
+ }
+ return reDHCPHostV4.FindString(s)
+}
+
+type (
+ extension string
+
+ extensions []extension
+
+ configDir struct {
+ path string
+ include extensions
+ exclude extensions
+ }
+)
+
+func (e extension) match(filename string) bool {
+ return strings.HasSuffix(filename, string(e))
+}
+
+func (es extensions) match(filename string) bool {
+ for _, e := range es {
+ if e.match(filename) {
+ return true
+ }
+ }
+ return false
+}
+
+func parseConfDir(confDirStr string) configDir {
+ // # Include all the files in a directory except those ending in .bak
+ //#conf-dir=/etc/dnsmasq.d,.bak
+ //# Include all files in a directory which end in .conf
+ //#conf-dir=/etc/dnsmasq.d/,*.conf
+
+ parts := strings.Split(confDirStr, ",")
+ cd := configDir{path: parts[0]}
+
+ for _, arg := range parts[1:] {
+ arg = strings.TrimSpace(arg)
+ if strings.HasPrefix(arg, "*") {
+ cd.include = append(cd.include, extension(arg[1:]))
+ } else {
+ cd.exclude = append(cd.exclude, extension(arg))
+ }
+ }
+ return cd
+}
+
+func (cd configDir) isValidFilename(filename string) bool {
+ switch {
+ default:
+ return true
+ case strings.HasPrefix(filename, "."):
+ case strings.HasPrefix(filename, "~"):
+ case strings.HasPrefix(filename, "#") && strings.HasSuffix(filename, "#"):
+ }
+ return false
+}
+
+func (cd configDir) match(filename string) bool {
+ switch {
+ default:
+ return true
+ case !cd.isValidFilename(filename):
+ case len(cd.include) > 0 && !cd.include.match(filename):
+ case cd.exclude.match(filename):
+ }
+ return false
+}
+
+func (cd configDir) findConfigs() ([]string, error) {
+ fis, err := os.ReadDir(cd.path)
+ if err != nil {
+ return nil, err
+ }
+
+ var files []string
+ for _, fi := range fis {
+ info, err := fi.Info()
+ if err != nil {
+ return nil, err
+ }
+ if !info.Mode().IsRegular() || !cd.match(fi.Name()) {
+ continue
+ }
+ files = append(files, filepath.Join(cd.path, fi.Name()))
+ }
+ return files, nil
+}
+
+func openFile(filepath string) (f *os.File, err error) {
+ defer func() {
+ if err != nil && f != nil {
+ _ = f.Close()
+ }
+ }()
+
+ f, err = os.Open(filepath)
+ if err != nil {
+ return nil, err
+ }
+
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+
+ if !fi.Mode().IsRegular() {
+ return nil, fmt.Errorf("'%s' is not a regular file", filepath)
+ }
+ return f, nil
+}
+
+type (
+ configOption struct {
+ key, value string
+ }
+
+ configFile struct {
+ path string
+ options []configOption
+ }
+)
+
+func (cf *configFile) get(name string) []string {
+ var options []string
+ for _, o := range cf.options {
+ if o.key != name {
+ continue
+ }
+ options = append(options, o.value)
+ }
+ return options
+}
+
+func parseConfFile(filename string) (*configFile, error) {
+ f, err := openFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = f.Close() }()
+
+ cf := configFile{path: filename}
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ line := strings.TrimSpace(s.Text())
+ if strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ if !strings.Contains(line, "=") {
+ continue
+ }
+
+ line = strings.ReplaceAll(line, " ", "")
+ parts := strings.Split(line, "=")
+ if len(parts) != 2 {
+ continue
+ }
+
+ cf.options = append(cf.options, configOption{key: parts[0], value: parts[1]})
+ }
+ return &cf, nil
+}
+
+type ConfigFinder struct {
+ entryConfig string
+ entryDir string
+ visitedConfigs map[string]bool
+ visitedDirs map[string]bool
+}
+
+func (f *ConfigFinder) find() []*configFile {
+ f.visitedConfigs = make(map[string]bool)
+ f.visitedDirs = make(map[string]bool)
+
+ configs := f.recursiveFind(f.entryConfig)
+
+ for _, file := range f.entryDirConfigs() {
+ configs = append(configs, f.recursiveFind(file)...)
+ }
+ return configs
+}
+
+func (f *ConfigFinder) entryDirConfigs() []string {
+ if f.entryDir == "" {
+ return nil
+ }
+ files, err := parseConfDir(f.entryDir).findConfigs()
+ if err != nil {
+ return nil
+ }
+ return files
+}
+
+func (f *ConfigFinder) recursiveFind(filename string) (configs []*configFile) {
+ if f.visitedConfigs[filename] {
+ return nil
+ }
+
+ config, err := parseConfFile(filename)
+ if err != nil {
+ return nil
+ }
+
+ files, dirs := config.get("conf-file"), config.get("conf-dir")
+
+ f.visitedConfigs[filename] = true
+ configs = append(configs, config)
+
+ for _, file := range files {
+ configs = append(configs, f.recursiveFind(file)...)
+ }
+
+ for _, dir := range dirs {
+ if dir == "" {
+ continue
+ }
+
+ d := parseConfDir(dir)
+
+ if f.visitedDirs[d.path] {
+ continue
+ }
+ f.visitedDirs[d.path] = true
+
+ files, err = d.findConfigs()
+ if err != nil {
+ continue
+ }
+
+ for _, file := range files {
+ configs = append(configs, f.recursiveFind(file)...)
+ }
+ }
+ return configs
+}
+
+func findConfigurationFiles(entryConfig string, entryDir string) []*configFile {
+ cf := ConfigFinder{
+ entryConfig: entryConfig,
+ entryDir: entryDir,
+ }
+ return cf.find()
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/config.json b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/config.json
new file mode 100644
index 000000000..6df6faec6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "leases_path": "ok",
+ "conf_path": "ok",
+ "conf_dir": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/config.yaml b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/config.yaml
new file mode 100644
index 000000000..4a03e6db8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+leases_path: "ok"
+conf_path: "ok"
+conf_dir: "ok"
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.conf b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.conf
new file mode 100644
index 000000000..4cf77478e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.conf
@@ -0,0 +1,77 @@
+# Uncomment this to enable the integrated DHCP server, you need
+# to supply the range of addresses available for lease and optionally
+# a lease time. If you have more than one network, you will need to
+# repeat this for each network on which you want to supply DHCP
+# service.
+#dhcp-range=192.168.0.50,192.168.0.150,12h
+
+# This is an example of a DHCP range where the netmask is given. This
+# is needed for networks we reach the dnsmasq DHCP server via a relay
+# agent. If you don't know what a DHCP relay agent is, you probably
+# don't need to worry about this.
+#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h
+
+# This is an example of a DHCP range which sets a tag, so that
+# some DHCP options may be set only for this network.
+#dhcp-range=set:red,192.168.0.50,192.168.0.150
+
+# Use this DHCP range only when the tag "green" is set.
+#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h
+
+# Specify a subnet which can't be used for dynamic address allocation,
+# is available for hosts with matching --dhcp-host lines. Note that
+# dhcp-host declarations will be ignored unless there is a dhcp-range
+# of some type for the subnet in question.
+# In this case the netmask is implied (it comes from the network
+# configuration on the machine running dnsmasq) it is possible to give
+# an explicit netmask instead.
+#dhcp-range=192.168.0.0,static
+
+# Enable DHCPv6. Note that the prefix-length does not need to be specified
+# and defaults to 64 if missing/
+#dhcp-range=1234::2, 1234::500, 64, 12h
+
+# Do Router Advertisements, BUT NOT DHCP for this subnet.
+#dhcp-range=1234::, ra-only
+
+# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and
+# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack
+# hosts. Use the DHCPv4 lease to derive the name, network segment and
+# MAC address and assume that the host will also have an
+# IPv6 address calculated using the SLAAC alogrithm.
+#dhcp-range=1234::, ra-names
+
+# Do Router Advertisements, BUT NOT DHCP for this subnet.
+# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.)
+#dhcp-range=1234::, ra-only, 48h
+
+# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA
+# so that clients can use SLAAC addresses as well as DHCP ones.
+#dhcp-range=1234::2, 1234::500, slaac
+
+# Do Router Advertisements and stateless DHCP for this subnet. Clients will
+# not get addresses from DHCP, but they will get other configuration information.
+# They will use SLAAC for addresses.
+#dhcp-range=1234::, ra-stateless
+
+# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses
+# from DHCPv4 leases.
+#dhcp-range=1234::, ra-stateless, ra-names
+
+dhcp-range=192.168.0.1,192.168.0.100,12h
+dhcp-range = 1230::1, 1230::64
+
+dhcp-range = 1235::2, 1235::500, ra-stateless
+dhcp-range=1234::, ra-stateless, ra-names
+dhcp-range=1234::, ra-stateless
+dhcp-range=1234::, ra-only, 48h
+
+dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.99
+dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1230::63]
+
+conf-file=testdata/dnsmasq.conf
+conf-file=testdata/dnsmasq2.conf
+
+conf-dir=testdata/dnsmasq.d2
+conf-dir=testdata/dnsmasq.d3,.bak
+conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/.dnsmasq.conf b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/.dnsmasq.conf
new file mode 100644
index 000000000..b9ca78218
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/.dnsmasq.conf
@@ -0,0 +1 @@
+dhcp-range=tag:green,192.168.11.1,192.168.11.100,12h \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv4.any b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv4.any
new file mode 100644
index 000000000..300faa28e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv4.any
@@ -0,0 +1,10 @@
+dhcp-range=tag:green,192.168.1.1,192.168.1.100,12h
+
+dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.1.99
+
+conf-file=testdata/dnsmasq.conf
+conf-file=testdata/dnsmasq2.conf
+
+conf-dir=testdata/dnsmasq.d2
+conf-dir=testdata/dnsmasq.d3,.bak
+conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv6.any b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv6.any
new file mode 100644
index 000000000..414d6819f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d/dnsmasqv6.any
@@ -0,0 +1,10 @@
+dhcp-range = 1231::1, 1231::64
+
+dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1231::63]
+
+conf-file=testdata/dnsmasq.conf
+conf-file=testdata/dnsmasq2.conf
+
+conf-dir=testdata/dnsmasq.d2
+conf-dir=testdata/dnsmasq.d3,.bak
+conf-dir=testdata/dnsmasq.d4,*.conf
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv4.any b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv4.any
new file mode 100644
index 000000000..24a742797
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv4.any
@@ -0,0 +1,10 @@
+dhcp-range=tag:green,192.168.2.1,192.168.2.100,12h
+
+dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.2.99
+
+conf-file=testdata/dnsmasq.conf
+conf-file=testdata/dnsmasq2.conf
+
+conf-dir=testdata/dnsmasq.d2
+conf-dir=testdata/dnsmasq.d3,.bak
+conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv6.any b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv6.any
new file mode 100644
index 000000000..4ae70f0b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/dnsmasqv6.any
@@ -0,0 +1,10 @@
+dhcp-range = 1232::1, 1232::64
+
+dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1232::63]
+
+conf-file=testdata/dnsmasq.conf
+conf-file=testdata/dnsmasq2.conf
+
+conf-dir=testdata/dnsmasq.d2
+conf-dir=testdata/dnsmasq.d3,.bak
+conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/~dnsmasq.conf b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/~dnsmasq.conf
new file mode 100644
index 000000000..dc58bf9d8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d2/~dnsmasq.conf
@@ -0,0 +1 @@
+dhcp-range=192.168.22.0,192.168.22.255,12h \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasq.bak b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasq.bak
new file mode 100644
index 000000000..c3897671a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasq.bak
@@ -0,0 +1 @@
+dhcp-range=tag:green,192.168.33.1,192.168.33.100,12h \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv4.any b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv4.any
new file mode 100644
index 000000000..a55ac969a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv4.any
@@ -0,0 +1,10 @@
+dhcp-range=tag:green,192.168.3.1,192.168.3.100,12h
+
+dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.3.99
+
+conf-file=testdata/dnsmasq.conf
+conf-file=testdata/dnsmasq2.conf
+
+conf-dir=testdata/dnsmasq.d2
+conf-dir=testdata/dnsmasq.d3,.bak
+conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv6.any b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv6.any
new file mode 100644
index 000000000..4bc6cf10f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d3/dnsmasqv6.any
@@ -0,0 +1,3 @@
+dhcp-range = 1233::1, 1233::64
+
+dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1233::63] \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasq.other b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasq.other
new file mode 100644
index 000000000..18fe1ac53
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasq.other
@@ -0,0 +1 @@
+dhcp-range=tag:green,192.168.44.1,192.168.44.100,12h \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv4.conf b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv4.conf
new file mode 100644
index 000000000..1493b8009
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv4.conf
@@ -0,0 +1,10 @@
+dhcp-range=tag:green,192.168.4.1,192.168.4.100,12h
+
+dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.4.99
+
+conf-file=testdata/dnsmasq.conf
+conf-file=testdata/dnsmasq2.conf
+
+conf-dir=testdata/dnsmasq.d2
+conf-dir=testdata/dnsmasq.d3,.bak
+conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv6.conf b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv6.conf
new file mode 100644
index 000000000..389c2c95b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.d4/dnsmasqv6.conf
@@ -0,0 +1,10 @@
+dhcp-range = 1234::1, 1234::64
+
+dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::63]
+
+conf-file=testdata/dnsmasq.conf
+conf-file=testdata/dnsmasq2.conf
+
+conf-dir=testdata/dnsmasq.d2
+conf-dir=testdata/dnsmasq.d3,.bak
+conf-dir=testdata/dnsmasq.d4,*.conf \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.leases b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.leases
new file mode 100644
index 000000000..606e74fba
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq.leases
@@ -0,0 +1,19 @@
+1560300536 08:00:27:61:3c:ee 192.168.0.1 * *
+1560300536 08:00:27:61:3c:ee 192.168.0.2 * *
+1560300536 08:00:27:61:3c:ee 192.168.0.3 * *
+1560300536 08:00:27:61:3c:ee 192.168.0.4 * *
+1560300536 08:00:27:61:3c:ee 192.168.0.5 * *
+1560300536 08:00:27:61:3c:ee 192.168.1.1 * *
+1560300536 08:00:27:61:3c:ee 192.168.1.2 * *
+1560300536 08:00:27:61:3c:ee 192.168.1.3 * *
+1560300536 08:00:27:61:3c:ee 192.168.1.4 * *
+1560300536 08:00:27:61:3c:ee 192.168.2.1 * *
+1560300536 08:00:27:61:3c:ee 192.168.2.2 * *
+1560300536 08:00:27:61:3c:ee 192.168.2.3 * *
+duid 00:01:00:01:24:90:cf:5b:08:00:27:61:2e:2c
+1560300414 660684014 1230::1 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee
+1560300414 660684014 1230::2 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee
+1560300414 660684014 1230::3 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee
+1560300414 660684014 1230::4 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee
+1560300414 660684014 1230::5 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee
+1560300414 660684014 1230::6 * 00:01:00:01:24:90:cf:a3:08:00:27:61:3c:ee
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq2.conf b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq2.conf
new file mode 100644
index 000000000..bd1766adb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq2.conf
@@ -0,0 +1,6 @@
+dhcp-range=192.168.200.1,192.168.200.100,12h
+
+dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.200.99
+
+conf-file=testdata/dnsmasq.conf
+conf-file=testdata/dnsmasq2.conf \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq3.conf b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq3.conf
new file mode 100644
index 000000000..3475544b5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsmasq_dhcp/testdata/dnsmasq3.conf
@@ -0,0 +1,4 @@
+#dhcp-range=192.168.0.50,192.168.0.150,12h
+#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h
+#dhcp-range=set:red,192.168.0.50,192.168.0.150
+#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsquery/README.md b/src/go/plugin/go.d/modules/dnsquery/README.md
new file mode 120000
index 000000000..c5baa8254
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/README.md
@@ -0,0 +1 @@
+integrations/dns_query.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dnsquery/charts.go b/src/go/plugin/go.d/modules/dnsquery/charts.go
new file mode 100644
index 000000000..66c2ea6c9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/charts.go
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsquery
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioDNSQueryStatus = module.Priority + iota
+ prioDNSQueryTime
+)
+
+var (
+ dnsChartsTmpl = module.Charts{
+ dnsQueryStatusChartTmpl.Copy(),
+ dnsQueryTimeChartTmpl.Copy(),
+ }
+ dnsQueryStatusChartTmpl = module.Chart{
+ ID: "server_%s_record_%s_query_status",
+ Title: "DNS Query Status",
+ Units: "status",
+ Fam: "query status",
+ Ctx: "dns_query.query_status",
+ Priority: prioDNSQueryStatus,
+ Dims: module.Dims{
+ {ID: "server_%s_record_%s_query_status_success", Name: "success"},
+ {ID: "server_%s_record_%s_query_status_network_error", Name: "network_error"},
+ {ID: "server_%s_record_%s_query_status_dns_error", Name: "dns_error"},
+ },
+ }
+ dnsQueryTimeChartTmpl = module.Chart{
+ ID: "server_%s_record_%s_query_time",
+ Title: "DNS Query Time",
+ Units: "seconds",
+ Fam: "query time",
+ Ctx: "dns_query.query_time",
+ Priority: prioDNSQueryTime,
+ Dims: module.Dims{
+ {ID: "server_%s_record_%s_query_time", Name: "query_time", Div: 1e9},
+ },
+ }
+)
+
+func newDNSServerCharts(server, network, rtype string) *module.Charts {
+ charts := dnsChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ReplaceAll(server, ".", "_"), rtype)
+ chart.Labels = []module.Label{
+ {Key: "server", Value: server},
+ {Key: "network", Value: network},
+ {Key: "record_type", Value: rtype},
+ }
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, server, rtype)
+ }
+ }
+
+ return charts
+}
diff --git a/src/go/plugin/go.d/modules/dnsquery/collect.go b/src/go/plugin/go.d/modules/dnsquery/collect.go
new file mode 100644
index 000000000..a98e37cad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/collect.go
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsquery
+
+import (
+ "math/rand"
+ "net"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/miekg/dns"
+)
+
+func (d *DNSQuery) collect() (map[string]int64, error) {
+ if d.dnsClient == nil {
+ d.dnsClient = d.newDNSClient(d.Network, d.Timeout.Duration())
+ }
+
+ mx := make(map[string]int64)
+ domain := randomDomain(d.Domains)
+ d.Debugf("current domain : %s", domain)
+
+ var wg sync.WaitGroup
+ var mux sync.RWMutex
+ for _, srv := range d.Servers {
+ for rtypeName, rtype := range d.recordTypes {
+ wg.Add(1)
+ go func(srv, rtypeName string, rtype uint16, wg *sync.WaitGroup) {
+ defer wg.Done()
+
+ msg := new(dns.Msg)
+ msg.SetQuestion(dns.Fqdn(domain), rtype)
+ address := net.JoinHostPort(srv, strconv.Itoa(d.Port))
+
+ resp, rtt, err := d.dnsClient.Exchange(msg, address)
+
+ mux.Lock()
+ defer mux.Unlock()
+
+ px := "server_" + srv + "_record_" + rtypeName + "_"
+
+ mx[px+"query_status_success"] = 0
+ mx[px+"query_status_network_error"] = 0
+ mx[px+"query_status_dns_error"] = 0
+
+ if err != nil {
+ d.Debugf("error on querying %s after %s query for %s : %s", srv, rtypeName, domain, err)
+ mx[px+"query_status_network_error"] = 1
+ return
+ }
+
+ if resp != nil && resp.Rcode != dns.RcodeSuccess {
+ d.Debugf("invalid answer from %s after %s query for %s (rcode %d)", srv, rtypeName, domain, resp.Rcode)
+ mx[px+"query_status_dns_error"] = 1
+ } else {
+ mx[px+"query_status_success"] = 1
+ }
+ mx["server_"+srv+"_record_"+rtypeName+"_query_time"] = rtt.Nanoseconds()
+
+ }(srv, rtypeName, rtype, &wg)
+ }
+ }
+ wg.Wait()
+
+ return mx, nil
+}
+
+func randomDomain(domains []string) string {
+ src := rand.NewSource(time.Now().UnixNano())
+ r := rand.New(src)
+ return domains[r.Intn(len(domains))]
+}
diff --git a/src/go/plugin/go.d/modules/dnsquery/config_schema.json b/src/go/plugin/go.d/modules/dnsquery/config_schema.json
new file mode 100644
index 000000000..cfa6f3a14
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/config_schema.json
@@ -0,0 +1,133 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "DNS query collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for DNS queries, in seconds.",
+ "type": "number",
+ "default": 2
+ },
+ "network": {
+ "title": "Protocol",
+ "description": "Network protocol for DNS queries.",
+ "type": "string",
+ "enum": [
+ "udp",
+ "tcp",
+ "tcp-tls"
+ ],
+ "default": "udp"
+ },
+ "port": {
+ "title": "Port",
+ "description": "Port number for DNS servers.",
+ "type": "integer",
+ "default": 53
+ },
+ "record_types": {
+ "title": "Record types",
+ "description": "Types of DNS records to query for each server.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": "string",
+ "enum": [
+ "A",
+ "AAAA",
+ "ANY",
+ "CNAME",
+ "MX",
+ "NS",
+ "PTR",
+ "SOA",
+ "SPF",
+ "SRV",
+ "TXT"
+ ],
+ "default": "A"
+ },
+ "default": [
+ "A"
+ ],
+ "uniqueItems": true
+ },
+ "servers": {
+ "title": "Servers",
+ "description": "List of DNS servers to query.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "DNS server",
+ "description": "IP address or hostname of the DNS server.",
+ "type": "string"
+ },
+ "default": [
+ "8.8.8.8"
+ ],
+ "uniqueItems": true,
+ "minItems": 1
+ },
+ "domains": {
+ "title": "Domains",
+ "description": "List of domains or subdomains to query. A random domain will be selected from this list at each iteration.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Domain",
+ "type": "string"
+ },
+ "default": [
+ "google.com",
+ "github.com"
+ ],
+ "uniqueItems": true,
+ "minItems": 1
+ }
+ },
+ "required": [
+ "domains",
+ "servers",
+ "network"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "network": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "servers": {
+ "ui:listFlavour": "list"
+ },
+ "domains": {
+ "ui:listFlavour": "list"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dnsquery/dnsquery.go b/src/go/plugin/go.d/modules/dnsquery/dnsquery.go
new file mode 100644
index 000000000..408b08ee8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/dnsquery.go
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsquery
+
+import (
+ _ "embed"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/miekg/dns"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("dns_query", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *DNSQuery {
+ return &DNSQuery{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ Network: "udp",
+ RecordTypes: []string{"A"},
+ Port: 53,
+ },
+ newDNSClient: func(network string, timeout time.Duration) dnsClient {
+ return &dns.Client{
+ Net: network,
+ ReadTimeout: timeout,
+ }
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ Domains []string `yaml:"domains" json:"domains"`
+ Servers []string `yaml:"servers" json:"servers"`
+ Network string `yaml:"network,omitempty" json:"network"`
+ RecordType string `yaml:"record_type,omitempty" json:"record_type"`
+ RecordTypes []string `yaml:"record_types,omitempty" json:"record_types"`
+ Port int `yaml:"port,omitempty" json:"port"`
+}
+
+type (
+ DNSQuery struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ dnsClient dnsClient
+ newDNSClient func(network string, duration time.Duration) dnsClient
+
+ recordTypes map[string]uint16
+ }
+ dnsClient interface {
+ Exchange(msg *dns.Msg, address string) (response *dns.Msg, rtt time.Duration, err error)
+ }
+)
+
+func (d *DNSQuery) Configuration() any {
+ return d.Config
+}
+
+func (d *DNSQuery) Init() error {
+ if err := d.verifyConfig(); err != nil {
+ d.Errorf("config validation: %v", err)
+ return err
+ }
+
+ rt, err := d.initRecordTypes()
+ if err != nil {
+ d.Errorf("init record type: %v", err)
+ return err
+ }
+ d.recordTypes = rt
+
+ charts, err := d.initCharts()
+ if err != nil {
+ d.Errorf("init charts: %v", err)
+ return err
+ }
+ d.charts = charts
+
+ return nil
+}
+
+func (d *DNSQuery) Check() error {
+ return nil
+}
+
+func (d *DNSQuery) Charts() *module.Charts {
+ return d.charts
+}
+
+func (d *DNSQuery) Collect() map[string]int64 {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (d *DNSQuery) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/dnsquery/dnsquery_test.go b/src/go/plugin/go.d/modules/dnsquery/dnsquery_test.go
new file mode 100644
index 000000000..a9f55d6e4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/dnsquery_test.go
@@ -0,0 +1,242 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsquery
+
+import (
+ "errors"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/miekg/dns"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDNSQuery_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DNSQuery{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDNSQuery_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success when all set": {
+ wantFail: false,
+ config: Config{
+ Domains: []string{"example.com"},
+ Servers: []string{"192.0.2.0"},
+ Network: "udp",
+ RecordTypes: []string{"A"},
+ Port: 53,
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ "success when using deprecated record_type": {
+ wantFail: false,
+ config: Config{
+ Domains: []string{"example.com"},
+ Servers: []string{"192.0.2.0"},
+ Network: "udp",
+ RecordType: "A",
+ Port: 53,
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ "fail with default": {
+ wantFail: true,
+ config: New().Config,
+ },
+ "fail when domains not set": {
+ wantFail: true,
+ config: Config{
+ Domains: nil,
+ Servers: []string{"192.0.2.0"},
+ Network: "udp",
+ RecordTypes: []string{"A"},
+ Port: 53,
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ "fail when servers not set": {
+ wantFail: true,
+ config: Config{
+ Domains: []string{"example.com"},
+ Servers: nil,
+ Network: "udp",
+ RecordTypes: []string{"A"},
+ Port: 53,
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ "fail when network is invalid": {
+ wantFail: true,
+ config: Config{
+ Domains: []string{"example.com"},
+ Servers: []string{"192.0.2.0"},
+ Network: "gcp",
+ RecordTypes: []string{"A"},
+ Port: 53,
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ "fail when record_type is invalid": {
+ wantFail: true,
+ config: Config{
+ Domains: []string{"example.com"},
+ Servers: []string{"192.0.2.0"},
+ Network: "udp",
+ RecordTypes: []string{"B"},
+ Port: 53,
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dq := New()
+ dq.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, dq.Init())
+ } else {
+ assert.NoError(t, dq.Init())
+ }
+ })
+ }
+}
+
+func TestDNSQuery_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func() *DNSQuery
+ }{
+ "success when DNS query successful": {
+ wantFail: false,
+ prepare: caseDNSClientOK,
+ },
+ "success when DNS query returns an error": {
+ wantFail: false,
+ prepare: caseDNSClientErr,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dq := test.prepare()
+
+ require.NoError(t, dq.Init())
+
+ if test.wantFail {
+ assert.Error(t, dq.Check())
+ } else {
+ assert.NoError(t, dq.Check())
+ }
+ })
+ }
+}
+
+func TestDNSQuery_Charts(t *testing.T) {
+ dq := New()
+
+ dq.Domains = []string{"google.com"}
+ dq.Servers = []string{"192.0.2.0", "192.0.2.1"}
+ require.NoError(t, dq.Init())
+
+ assert.NotNil(t, dq.Charts())
+ assert.Len(t, *dq.Charts(), len(dnsChartsTmpl)*len(dq.Servers))
+}
+
+func TestDNSQuery_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *DNSQuery
+ wantMetrics map[string]int64
+ }{
+ "success when DNS query successful": {
+ prepare: caseDNSClientOK,
+ wantMetrics: map[string]int64{
+ "server_192.0.2.0_record_A_query_status_dns_error": 0,
+ "server_192.0.2.0_record_A_query_status_network_error": 0,
+ "server_192.0.2.0_record_A_query_status_success": 1,
+ "server_192.0.2.0_record_A_query_time": 1000000000,
+ "server_192.0.2.1_record_A_query_status_dns_error": 0,
+ "server_192.0.2.1_record_A_query_status_network_error": 0,
+ "server_192.0.2.1_record_A_query_status_success": 1,
+ "server_192.0.2.1_record_A_query_time": 1000000000,
+ },
+ },
+ "fail when DNS query returns an error": {
+ prepare: caseDNSClientErr,
+ wantMetrics: map[string]int64{
+ "server_192.0.2.0_record_A_query_status_dns_error": 0,
+ "server_192.0.2.0_record_A_query_status_network_error": 1,
+ "server_192.0.2.0_record_A_query_status_success": 0,
+ "server_192.0.2.1_record_A_query_status_dns_error": 0,
+ "server_192.0.2.1_record_A_query_status_network_error": 1,
+ "server_192.0.2.1_record_A_query_status_success": 0,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dq := test.prepare()
+
+ require.NoError(t, dq.Init())
+
+ mx := dq.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ })
+ }
+}
+
+func caseDNSClientOK() *DNSQuery {
+ dq := New()
+ dq.Domains = []string{"example.com"}
+ dq.Servers = []string{"192.0.2.0", "192.0.2.1"}
+ dq.newDNSClient = func(_ string, _ time.Duration) dnsClient {
+ return mockDNSClient{errOnExchange: false}
+ }
+ return dq
+}
+
+func caseDNSClientErr() *DNSQuery {
+ dq := New()
+ dq.Domains = []string{"example.com"}
+ dq.Servers = []string{"192.0.2.0", "192.0.2.1"}
+ dq.newDNSClient = func(_ string, _ time.Duration) dnsClient {
+ return mockDNSClient{errOnExchange: true}
+ }
+ return dq
+}
+
+type mockDNSClient struct {
+ errOnExchange bool
+}
+
+func (m mockDNSClient) Exchange(_ *dns.Msg, _ string) (response *dns.Msg, rtt time.Duration, err error) {
+ if m.errOnExchange {
+ return nil, time.Second, errors.New("mock.Exchange() error")
+ }
+ return nil, time.Second, nil
+}
diff --git a/src/go/plugin/go.d/modules/dnsquery/init.go b/src/go/plugin/go.d/modules/dnsquery/init.go
new file mode 100644
index 000000000..5899a27b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/init.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dnsquery
+
+import (
+ "errors"
+ "fmt"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/miekg/dns"
+)
+
+func (d *DNSQuery) verifyConfig() error {
+ if len(d.Domains) == 0 {
+ return errors.New("no domains specified")
+ }
+
+ if len(d.Servers) == 0 {
+ return errors.New("no servers specified")
+ }
+
+ if !(d.Network == "" || d.Network == "udp" || d.Network == "tcp" || d.Network == "tcp-tls") {
+ return fmt.Errorf("wrong network transport : %s", d.Network)
+ }
+
+ if d.RecordType != "" {
+ d.Warning("'record_type' config option is deprecated, use 'record_types' instead")
+ d.RecordTypes = append(d.RecordTypes, d.RecordType)
+ }
+
+ if len(d.RecordTypes) == 0 {
+ return errors.New("no record types specified")
+ }
+
+ return nil
+}
+
+func (d *DNSQuery) initRecordTypes() (map[string]uint16, error) {
+ types := make(map[string]uint16)
+ for _, v := range d.RecordTypes {
+ rtype, err := parseRecordType(v)
+ if err != nil {
+ return nil, err
+ }
+ types[v] = rtype
+
+ }
+
+ return types, nil
+}
+
+func (d *DNSQuery) initCharts() (*module.Charts, error) {
+ charts := module.Charts{}
+
+ for _, srv := range d.Servers {
+ for _, rtype := range d.RecordTypes {
+ cs := newDNSServerCharts(srv, d.Network, rtype)
+ if err := charts.Add(*cs...); err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ return &charts, nil
+}
+
+func parseRecordType(recordType string) (uint16, error) {
+ var rtype uint16
+
+ switch recordType {
+ case "A":
+ rtype = dns.TypeA
+ case "AAAA":
+ rtype = dns.TypeAAAA
+ case "ANY":
+ rtype = dns.TypeANY
+ case "CNAME":
+ rtype = dns.TypeCNAME
+ case "MX":
+ rtype = dns.TypeMX
+ case "NS":
+ rtype = dns.TypeNS
+ case "PTR":
+ rtype = dns.TypePTR
+ case "SOA":
+ rtype = dns.TypeSOA
+ case "SPF":
+ rtype = dns.TypeSPF
+ case "SRV":
+ rtype = dns.TypeSRV
+ case "TXT":
+ rtype = dns.TypeTXT
+ default:
+ return 0, fmt.Errorf("unknown record type : %s", recordType)
+ }
+
+ return rtype, nil
+}
diff --git a/src/go/plugin/go.d/modules/dnsquery/integrations/dns_query.md b/src/go/plugin/go.d/modules/dnsquery/integrations/dns_query.md
new file mode 100644
index 000000000..b081a7bbc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/integrations/dns_query.md
@@ -0,0 +1,216 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dnsquery/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dnsquery/metadata.yaml"
+sidebar_label: "DNS query"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# DNS query
+
+
+<img src="https://netdata.cloud/img/network-wired.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: dns_query
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This module monitors DNS query round-trip time (RTT).
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per server
+
+These metrics refer to the DNS server.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| server | DNS server address. |
+| network | Network protocol name (tcp, udp, tcp-tls). |
+| record_type | DNS record type (e.g. A, AAAA, CNAME). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| dns_query.query_status | success, network_error, dns_error | status |
+| dns_query.query_time | query_time | seconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ dns_query_query_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf) | dns_query.query_status | DNS request type ${label:record_type} to server ${label:server} is unsuccessful |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/dns_query.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/dns_query.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>All options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| domains | Domain or subdomains to query. The collector will choose a random domain from the list on every iteration. | | yes |
+| servers | Servers to query. | | yes |
+| port | DNS server port. | 53 | no |
+| network | Network protocol name. Available options: udp, tcp, tcp-tls. | udp | no |
+| record_types | Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV. | A | no |
+| timeout | Query read timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: job1
+ record_types:
+ - A
+ - AAAA
+ domains:
+ - google.com
+ - github.com
+ - reddit.com
+ servers:
+ - 8.8.8.8
+ - 8.8.4.4
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `dns_query` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m dns_query
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `dns_query` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep dns_query
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep dns_query /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep dns_query
+```
+
+
diff --git a/src/go/plugin/go.d/modules/dnsquery/metadata.yaml b/src/go/plugin/go.d/modules/dnsquery/metadata.yaml
new file mode 100644
index 000000000..8c199550f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/metadata.yaml
@@ -0,0 +1,142 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-dns_query
+ plugin_name: go.d.plugin
+ module_name: dns_query
+ monitored_instance:
+ name: DNS query
+ link: ""
+ icon_filename: network-wired.svg
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - dns
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This module monitors DNS query round-trip time (RTT).
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/dns_query.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: All options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: domains
+ description: Domain or subdomains to query. The collector will choose a random domain from the list on every iteration.
+ default_value: ""
+ required: true
+ - name: servers
+ description: Servers to query.
+ default_value: ""
+ required: true
+ - name: port
+ description: DNS server port.
+ default_value: 53
+ required: false
+ - name: network
+ description: "Network protocol name. Available options: udp, tcp, tcp-tls."
+ default_value: udp
+ required: false
+ - name: record_types
+ description: "Query record type. Available options: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, TXT, SRV."
+ default_value: A
+ required: false
+ - name: timeout
+ description: Query read timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: job1
+ record_types:
+ - A
+ - AAAA
+ domains:
+ - google.com
+ - github.com
+ - reddit.com
+ servers:
+ - 8.8.8.8
+ - 8.8.4.4
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: dns_query_query_status
+ metric: dns_query.query_status
+ info: "DNS request type ${label:record_type} to server ${label:server} is unsuccessful"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/dns_query.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: server
+ description: These metrics refer to the DNS server.
+ labels:
+ - name: server
+ description: DNS server address.
+ - name: network
+ description: Network protocol name (tcp, udp, tcp-tls).
+ - name: record_type
+ description: DNS record type (e.g. A, AAAA, CNAME).
+ metrics:
+ - name: dns_query.query_status
+ description: DNS Query Status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: network_error
+ - name: dns_error
+ - name: dns_query.query_time
+ description: DNS Query Time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: query_time
diff --git a/src/go/plugin/go.d/modules/dnsquery/testdata/config.json b/src/go/plugin/go.d/modules/dnsquery/testdata/config.json
new file mode 100644
index 000000000..b16ed18c6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/testdata/config.json
@@ -0,0 +1,16 @@
+{
+ "update_every": 123,
+ "domains": [
+ "ok"
+ ],
+ "servers": [
+ "ok"
+ ],
+ "network": "ok",
+ "record_type": "ok",
+ "record_types": [
+ "ok"
+ ],
+ "port": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/dnsquery/testdata/config.yaml b/src/go/plugin/go.d/modules/dnsquery/testdata/config.yaml
new file mode 100644
index 000000000..6c6b014b6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dnsquery/testdata/config.yaml
@@ -0,0 +1,11 @@
+update_every: 123
+domains:
+ - "ok"
+servers:
+ - "ok"
+network: "ok"
+record_type: "ok"
+record_types:
+ - "ok"
+port: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/docker/README.md b/src/go/plugin/go.d/modules/docker/README.md
new file mode 120000
index 000000000..b4804ee06
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/README.md
@@ -0,0 +1 @@
+integrations/docker.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/docker/charts.go b/src/go/plugin/go.d/modules/docker/charts.go
new file mode 100644
index 000000000..6660dc1e4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/charts.go
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioContainersState = module.Priority + iota
+ prioContainersHealthy
+
+ prioContainerState
+ prioContainerHealthStatus
+ prioContainerWritableLayerSize
+
+ prioImagesCount
+ prioImagesSize
+)
+
+var summaryCharts = module.Charts{
+ containersStateChart.Copy(),
+ containersHealthyChart.Copy(),
+
+ imagesCountChart.Copy(),
+ imagesSizeChart.Copy(),
+}
+
+var (
+ containersStateChart = module.Chart{
+ ID: "containers_state",
+ Title: "Total number of Docker containers in various states",
+ Units: "containers",
+ Fam: "containers",
+ Ctx: "docker.containers_state",
+ Priority: prioContainersState,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "containers_state_running", Name: "running"},
+ {ID: "containers_state_paused", Name: "paused"},
+ {ID: "containers_state_exited", Name: "exited"},
+ },
+ }
+ containersHealthyChart = module.Chart{
+ ID: "healthy_containers",
+ Title: "Total number of Docker containers in various health states",
+ Units: "containers",
+ Fam: "containers",
+ Ctx: "docker.containers_health_status",
+ Priority: prioContainersHealthy,
+ Dims: module.Dims{
+ {ID: "containers_health_status_healthy", Name: "healthy"},
+ {ID: "containers_health_status_unhealthy", Name: "unhealthy"},
+ {ID: "containers_health_status_not_running_unhealthy", Name: "not_running_unhealthy"},
+ {ID: "containers_health_status_starting", Name: "starting"},
+ {ID: "containers_health_status_none", Name: "no_healthcheck"},
+ },
+ }
+)
+
+var (
+ imagesCountChart = module.Chart{
+ ID: "images_count",
+ Title: "Total number of Docker images in various states",
+ Units: "images",
+ Fam: "images",
+ Ctx: "docker.images",
+ Priority: prioImagesCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "images_active", Name: "active"},
+ {ID: "images_dangling", Name: "dangling"},
+ },
+ }
+ imagesSizeChart = module.Chart{
+ ID: "images_size",
+ Title: "Total size of all Docker images",
+ Units: "bytes",
+ Fam: "images",
+ Ctx: "docker.images_size",
+ Priority: prioImagesSize,
+ Dims: module.Dims{
+ {ID: "images_size", Name: "size"},
+ },
+ }
+)
+
+var (
+ containerChartsTmpl = module.Charts{
+ containerStateChartTmpl.Copy(),
+ containerHealthStatusChartTmpl.Copy(),
+ containerWritableLayerSizeChartTmpl.Copy(),
+ }
+
+ containerStateChartTmpl = module.Chart{
+ ID: "container_%s_state",
+ Title: "Docker container state",
+ Units: "state",
+ Fam: "containers",
+ Ctx: "docker.container_state",
+ Priority: prioContainerState,
+ Dims: module.Dims{
+ {ID: "container_%s_state_running", Name: "running"},
+ {ID: "container_%s_state_paused", Name: "paused"},
+ {ID: "container_%s_state_exited", Name: "exited"},
+ {ID: "container_%s_state_created", Name: "created"},
+ {ID: "container_%s_state_restarting", Name: "restarting"},
+ {ID: "container_%s_state_removing", Name: "removing"},
+ {ID: "container_%s_state_dead", Name: "dead"},
+ },
+ }
+ containerHealthStatusChartTmpl = module.Chart{
+ ID: "container_%s_health_status",
+ Title: "Docker container health status",
+ Units: "status",
+ Fam: "containers",
+ Ctx: "docker.container_health_status",
+ Priority: prioContainerHealthStatus,
+ Dims: module.Dims{
+ {ID: "container_%s_health_status_healthy", Name: "healthy"},
+ {ID: "container_%s_health_status_unhealthy", Name: "unhealthy"},
+ {ID: "container_%s_health_status_not_running_unhealthy", Name: "not_running_unhealthy"},
+ {ID: "container_%s_health_status_starting", Name: "starting"},
+ {ID: "container_%s_health_status_none", Name: "no_healthcheck"},
+ },
+ }
+ containerWritableLayerSizeChartTmpl = module.Chart{
+ ID: "container_%s_writable_layer_size",
+ Title: "Docker container writable layer size",
+ Units: "bytes",
+ Fam: "containers",
+ Ctx: "docker.container_writeable_layer_size",
+ Priority: prioContainerWritableLayerSize,
+ Dims: module.Dims{
+ {ID: "container_%s_size_rw", Name: "writable_layer"},
+ },
+ }
+)
+
+func (d *Docker) addContainerCharts(name, image string) {
+ charts := containerChartsTmpl.Copy()
+ if !d.CollectContainerSize {
+ _ = charts.Remove(containerWritableLayerSizeChartTmpl.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Labels = []module.Label{
+ {Key: "container_name", Value: name},
+ {Key: "image", Value: image},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := d.Charts().Add(*charts...); err != nil {
+ d.Warning(err)
+ }
+}
+
+func (d *Docker) removeContainerCharts(name string) {
+ px := fmt.Sprintf("container_%s", name)
+
+ for _, chart := range *d.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/docker/collect.go b/src/go/plugin/go.d/modules/docker/collect.go
new file mode 100644
index 000000000..f23c58f22
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/collect.go
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/docker/docker/api/types"
+ typesContainer "github.com/docker/docker/api/types/container"
+ "github.com/docker/docker/api/types/filters"
+ typesImage "github.com/docker/docker/api/types/image"
+)
+
+func (d *Docker) collect() (map[string]int64, error) {
+ if d.client == nil {
+ client, err := d.newClient(d.Config)
+ if err != nil {
+ return nil, err
+ }
+ d.client = client
+ }
+
+ if !d.verNegotiated {
+ d.verNegotiated = true
+ d.negotiateAPIVersion()
+ }
+
+ defer func() { _ = d.client.Close() }()
+
+ mx := make(map[string]int64)
+
+ if err := d.collectInfo(mx); err != nil {
+ return nil, err
+ }
+ if err := d.collectImages(mx); err != nil {
+ return nil, err
+ }
+ if err := d.collectContainers(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (d *Docker) collectInfo(mx map[string]int64) error {
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration())
+ defer cancel()
+
+ info, err := d.client.Info(ctx)
+ if err != nil {
+ return err
+ }
+
+ mx["containers_state_running"] = int64(info.ContainersRunning)
+ mx["containers_state_paused"] = int64(info.ContainersPaused)
+ mx["containers_state_exited"] = int64(info.ContainersStopped)
+
+ return nil
+}
+
+func (d *Docker) collectImages(mx map[string]int64) error {
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration())
+ defer cancel()
+
+ images, err := d.client.ImageList(ctx, typesImage.ListOptions{})
+ if err != nil {
+ return err
+ }
+
+ mx["images_size"] = 0
+ mx["images_dangling"] = 0
+ mx["images_active"] = 0
+
+ for _, v := range images {
+ mx["images_size"] += v.Size
+ if v.Containers == 0 {
+ mx["images_dangling"]++
+ } else {
+ mx["images_active"]++
+ }
+ }
+
+ return nil
+}
+
+var (
+ containerHealthStatuses = []string{
+ types.Healthy,
+ types.Unhealthy,
+ types.Starting,
+ types.NoHealthcheck,
+ }
+ containerStates = []string{
+ "created",
+ "running",
+ "paused",
+ "restarting",
+ "removing",
+ "exited",
+ "dead",
+ }
+)
+
+func (d *Docker) collectContainers(mx map[string]int64) error {
+ containerSet := make(map[string][]types.Container)
+
+ for _, status := range containerHealthStatuses {
+ if err := func() error {
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration())
+ defer cancel()
+
+ v, err := d.client.ContainerList(ctx, typesContainer.ListOptions{
+ All: true,
+ Filters: filters.NewArgs(filters.KeyValuePair{Key: "health", Value: status}),
+ Size: d.CollectContainerSize,
+ })
+ if err != nil {
+ return err
+ }
+ containerSet[status] = v
+ return nil
+
+ }(); err != nil {
+ return err
+ }
+ }
+
+ seen := make(map[string]bool)
+
+ for _, s := range containerHealthStatuses {
+ mx["containers_health_status_"+s] = 0
+ }
+ mx["containers_health_status_not_running_unhealthy"] = 0
+
+ for status, containers := range containerSet {
+ if status != types.Unhealthy {
+ mx["containers_health_status_"+status] = int64(len(containers))
+ }
+
+ for _, cntr := range containers {
+ if status == types.Unhealthy {
+ if cntr.State == "running" {
+ mx["containers_health_status_"+status] += 1
+ } else {
+ mx["containers_health_status_not_running_unhealthy"] += 1
+ }
+ }
+
+ if len(cntr.Names) == 0 {
+ continue
+ }
+
+ name := strings.TrimPrefix(cntr.Names[0], "/")
+
+ seen[name] = true
+
+ if !d.containers[name] {
+ d.containers[name] = true
+ d.addContainerCharts(name, cntr.Image)
+ }
+
+ px := fmt.Sprintf("container_%s_", name)
+
+ for _, s := range containerHealthStatuses {
+ mx[px+"health_status_"+s] = 0
+ }
+ mx[px+"health_status_not_running_unhealthy"] = 0
+ for _, s := range containerStates {
+ mx[px+"state_"+s] = 0
+ }
+
+ if status == types.Unhealthy && cntr.State != "running" {
+ mx[px+"health_status_not_running_unhealthy"] += 1
+ } else {
+ mx[px+"health_status_"+status] = 1
+ }
+ mx[px+"state_"+cntr.State] = 1
+ mx[px+"size_rw"] = cntr.SizeRw
+ mx[px+"size_root_fs"] = cntr.SizeRootFs
+ }
+ }
+
+ for name := range d.containers {
+ if !seen[name] {
+ delete(d.containers, name)
+ d.removeContainerCharts(name)
+ }
+ }
+
+ return nil
+}
+
+func (d *Docker) negotiateAPIVersion() {
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration())
+ defer cancel()
+
+ d.client.NegotiateAPIVersion(ctx)
+}
diff --git a/src/go/plugin/go.d/modules/docker/config_schema.json b/src/go/plugin/go.d/modules/docker/config_schema.json
new file mode 100644
index 000000000..bd48c9126
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/config_schema.json
@@ -0,0 +1,52 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Docker collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "Docker daemon's Unix or TCP (listening address) socket.",
+ "type": "string",
+ "default": "unix:///var/run/docker.sock"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "default": 2
+ },
+ "collect_container_size": {
+ "title": "Collect container size",
+ "description": "Collect container writable layer size.",
+ "type": "boolean",
+ "default": false
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "address": {
+ "ui:help": "Use `unix://{path_to_socket}` for Unix socket or `tcp://{ip}:{port}` for TCP socket."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/docker/docker.go b/src/go/plugin/go.d/modules/docker/docker.go
new file mode 100644
index 000000000..88890b9fe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/docker.go
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker
+
+import (
+ "context"
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/dockerhost"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/docker/docker/api/types"
+ typesContainer "github.com/docker/docker/api/types/container"
+ typesImage "github.com/docker/docker/api/types/image"
+ typesSystem "github.com/docker/docker/api/types/system"
+ docker "github.com/docker/docker/client"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("docker", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Docker {
+ return &Docker{
+ Config: Config{
+ Address: docker.DefaultDockerHost,
+ Timeout: web.Duration(time.Second * 2),
+ CollectContainerSize: false,
+ },
+
+ charts: summaryCharts.Copy(),
+ newClient: func(cfg Config) (dockerClient, error) {
+ return docker.NewClientWithOpts(docker.WithHost(cfg.Address))
+ },
+ containers: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ CollectContainerSize bool `yaml:"collect_container_size" json:"collect_container_size"`
+}
+
+type (
+ Docker struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ client dockerClient
+ newClient func(Config) (dockerClient, error)
+
+ verNegotiated bool
+ containers map[string]bool
+ }
+ dockerClient interface {
+ NegotiateAPIVersion(context.Context)
+ Info(context.Context) (typesSystem.Info, error)
+ ImageList(context.Context, typesImage.ListOptions) ([]typesImage.Summary, error)
+ ContainerList(context.Context, typesContainer.ListOptions) ([]types.Container, error)
+ Close() error
+ }
+)
+
+func (d *Docker) Configuration() any {
+ return d.Config
+}
+
+func (d *Docker) Init() error {
+ if addr := dockerhost.FromEnv(); addr != "" && d.Address == docker.DefaultDockerHost {
+ d.Infof("using docker host from environment: %s ", addr)
+ d.Address = addr
+ }
+ return nil
+}
+
+func (d *Docker) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (d *Docker) Charts() *module.Charts {
+ return d.charts
+}
+
+func (d *Docker) Collect() map[string]int64 {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (d *Docker) Cleanup() {
+ if d.client == nil {
+ return
+ }
+ if err := d.client.Close(); err != nil {
+ d.Warningf("error on closing docker client: %v", err)
+ }
+ d.client = nil
+}
diff --git a/src/go/plugin/go.d/modules/docker/docker_test.go b/src/go/plugin/go.d/modules/docker/docker_test.go
new file mode 100644
index 000000000..0ab894420
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/docker_test.go
@@ -0,0 +1,852 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker
+
+import (
+ "context"
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/docker/docker/api/types"
+ typesContainer "github.com/docker/docker/api/types/container"
+ typesImage "github.com/docker/docker/api/types/image"
+ typesSystem "github.com/docker/docker/api/types/system"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDocker_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Docker{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDocker_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "unset 'address'": {
+ wantFail: false,
+ config: Config{
+ Address: "",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ d := New()
+ d.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, d.Init())
+ } else {
+ assert.NoError(t, d.Init())
+ }
+ })
+ }
+}
+
+func TestDocker_Charts(t *testing.T) {
+ assert.Equal(t, len(summaryCharts), len(*New().Charts()))
+}
+
+func TestDocker_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(d *Docker)
+ wantClose bool
+ }{
+ "after New": {
+ wantClose: false,
+ prepare: func(d *Docker) {},
+ },
+ "after Init": {
+ wantClose: false,
+ prepare: func(d *Docker) { _ = d.Init() },
+ },
+ "after Check": {
+ wantClose: true,
+ prepare: func(d *Docker) { _ = d.Init(); _ = d.Check() },
+ },
+ "after Collect": {
+ wantClose: true,
+ prepare: func(d *Docker) { _ = d.Init(); d.Collect() },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ m := &mockClient{}
+ d := New()
+ d.newClient = prepareNewClientFunc(m)
+
+ test.prepare(d)
+
+ require.NotPanics(t, d.Cleanup)
+
+ if test.wantClose {
+ assert.True(t, m.closeCalled)
+ } else {
+ assert.False(t, m.closeCalled)
+ }
+ })
+ }
+}
+
+func TestDocker_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Docker
+ wantFail bool
+ }{
+ "case success": {
+ wantFail: false,
+ prepare: func() *Docker {
+ return prepareCaseSuccess()
+ },
+ },
+ "case success without container size": {
+ wantFail: false,
+ prepare: func() *Docker {
+ return prepareCaseSuccessWithoutContainerSize()
+ },
+ },
+ "fail on case err on Info()": {
+ wantFail: true,
+ prepare: func() *Docker {
+ return prepareCaseErrOnInfo()
+ },
+ },
+ "fail on case err on ImageList()": {
+ wantFail: true,
+ prepare: func() *Docker {
+ return prepareCaseErrOnImageList()
+ },
+ },
+ "fail on case err on ContainerList()": {
+ wantFail: true,
+ prepare: func() *Docker {
+ return prepareCaseErrOnContainerList()
+ },
+ },
+ "fail on case err on creating Docker client": {
+ wantFail: true,
+ prepare: func() *Docker {
+ return prepareCaseErrCreatingClient()
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ d := test.prepare()
+
+ require.NoError(t, d.Init())
+
+ if test.wantFail {
+ assert.Error(t, d.Check())
+ } else {
+ assert.NoError(t, d.Check())
+ }
+ })
+ }
+}
+
+func TestDocker_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Docker
+ expected map[string]int64
+ }{
+ "case success": {
+ prepare: func() *Docker {
+ return prepareCaseSuccess()
+ },
+ expected: map[string]int64{
+ "container_container10_health_status_healthy": 0,
+ "container_container10_health_status_none": 0,
+ "container_container10_health_status_not_running_unhealthy": 1,
+ "container_container10_health_status_starting": 0,
+ "container_container10_health_status_unhealthy": 0,
+ "container_container10_size_root_fs": 0,
+ "container_container10_size_rw": 0,
+ "container_container10_state_created": 0,
+ "container_container10_state_dead": 1,
+ "container_container10_state_exited": 0,
+ "container_container10_state_paused": 0,
+ "container_container10_state_removing": 0,
+ "container_container10_state_restarting": 0,
+ "container_container10_state_running": 0,
+ "container_container11_health_status_healthy": 0,
+ "container_container11_health_status_none": 0,
+ "container_container11_health_status_not_running_unhealthy": 0,
+ "container_container11_health_status_starting": 1,
+ "container_container11_health_status_unhealthy": 0,
+ "container_container11_size_root_fs": 0,
+ "container_container11_size_rw": 0,
+ "container_container11_state_created": 0,
+ "container_container11_state_dead": 0,
+ "container_container11_state_exited": 0,
+ "container_container11_state_paused": 0,
+ "container_container11_state_removing": 1,
+ "container_container11_state_restarting": 0,
+ "container_container11_state_running": 0,
+ "container_container12_health_status_healthy": 0,
+ "container_container12_health_status_none": 0,
+ "container_container12_health_status_not_running_unhealthy": 0,
+ "container_container12_health_status_starting": 1,
+ "container_container12_health_status_unhealthy": 0,
+ "container_container12_size_root_fs": 0,
+ "container_container12_size_rw": 0,
+ "container_container12_state_created": 0,
+ "container_container12_state_dead": 0,
+ "container_container12_state_exited": 1,
+ "container_container12_state_paused": 0,
+ "container_container12_state_removing": 0,
+ "container_container12_state_restarting": 0,
+ "container_container12_state_running": 0,
+ "container_container13_health_status_healthy": 0,
+ "container_container13_health_status_none": 0,
+ "container_container13_health_status_not_running_unhealthy": 0,
+ "container_container13_health_status_starting": 1,
+ "container_container13_health_status_unhealthy": 0,
+ "container_container13_size_root_fs": 0,
+ "container_container13_size_rw": 0,
+ "container_container13_state_created": 0,
+ "container_container13_state_dead": 0,
+ "container_container13_state_exited": 1,
+ "container_container13_state_paused": 0,
+ "container_container13_state_removing": 0,
+ "container_container13_state_restarting": 0,
+ "container_container13_state_running": 0,
+ "container_container14_health_status_healthy": 0,
+ "container_container14_health_status_none": 1,
+ "container_container14_health_status_not_running_unhealthy": 0,
+ "container_container14_health_status_starting": 0,
+ "container_container14_health_status_unhealthy": 0,
+ "container_container14_size_root_fs": 0,
+ "container_container14_size_rw": 0,
+ "container_container14_state_created": 0,
+ "container_container14_state_dead": 1,
+ "container_container14_state_exited": 0,
+ "container_container14_state_paused": 0,
+ "container_container14_state_removing": 0,
+ "container_container14_state_restarting": 0,
+ "container_container14_state_running": 0,
+ "container_container15_health_status_healthy": 0,
+ "container_container15_health_status_none": 1,
+ "container_container15_health_status_not_running_unhealthy": 0,
+ "container_container15_health_status_starting": 0,
+ "container_container15_health_status_unhealthy": 0,
+ "container_container15_size_root_fs": 0,
+ "container_container15_size_rw": 0,
+ "container_container15_state_created": 0,
+ "container_container15_state_dead": 1,
+ "container_container15_state_exited": 0,
+ "container_container15_state_paused": 0,
+ "container_container15_state_removing": 0,
+ "container_container15_state_restarting": 0,
+ "container_container15_state_running": 0,
+ "container_container16_health_status_healthy": 0,
+ "container_container16_health_status_none": 1,
+ "container_container16_health_status_not_running_unhealthy": 0,
+ "container_container16_health_status_starting": 0,
+ "container_container16_health_status_unhealthy": 0,
+ "container_container16_size_root_fs": 0,
+ "container_container16_size_rw": 0,
+ "container_container16_state_created": 0,
+ "container_container16_state_dead": 1,
+ "container_container16_state_exited": 0,
+ "container_container16_state_paused": 0,
+ "container_container16_state_removing": 0,
+ "container_container16_state_restarting": 0,
+ "container_container16_state_running": 0,
+ "container_container1_health_status_healthy": 1,
+ "container_container1_health_status_none": 0,
+ "container_container1_health_status_not_running_unhealthy": 0,
+ "container_container1_health_status_starting": 0,
+ "container_container1_health_status_unhealthy": 0,
+ "container_container1_size_root_fs": 0,
+ "container_container1_size_rw": 0,
+ "container_container1_state_created": 1,
+ "container_container1_state_dead": 0,
+ "container_container1_state_exited": 0,
+ "container_container1_state_paused": 0,
+ "container_container1_state_removing": 0,
+ "container_container1_state_restarting": 0,
+ "container_container1_state_running": 0,
+ "container_container2_health_status_healthy": 1,
+ "container_container2_health_status_none": 0,
+ "container_container2_health_status_not_running_unhealthy": 0,
+ "container_container2_health_status_starting": 0,
+ "container_container2_health_status_unhealthy": 0,
+ "container_container2_size_root_fs": 0,
+ "container_container2_size_rw": 0,
+ "container_container2_state_created": 0,
+ "container_container2_state_dead": 0,
+ "container_container2_state_exited": 0,
+ "container_container2_state_paused": 0,
+ "container_container2_state_removing": 0,
+ "container_container2_state_restarting": 0,
+ "container_container2_state_running": 1,
+ "container_container3_health_status_healthy": 1,
+ "container_container3_health_status_none": 0,
+ "container_container3_health_status_not_running_unhealthy": 0,
+ "container_container3_health_status_starting": 0,
+ "container_container3_health_status_unhealthy": 0,
+ "container_container3_size_root_fs": 0,
+ "container_container3_size_rw": 0,
+ "container_container3_state_created": 0,
+ "container_container3_state_dead": 0,
+ "container_container3_state_exited": 0,
+ "container_container3_state_paused": 0,
+ "container_container3_state_removing": 0,
+ "container_container3_state_restarting": 0,
+ "container_container3_state_running": 1,
+ "container_container4_health_status_healthy": 0,
+ "container_container4_health_status_none": 0,
+ "container_container4_health_status_not_running_unhealthy": 1,
+ "container_container4_health_status_starting": 0,
+ "container_container4_health_status_unhealthy": 0,
+ "container_container4_size_root_fs": 0,
+ "container_container4_size_rw": 0,
+ "container_container4_state_created": 1,
+ "container_container4_state_dead": 0,
+ "container_container4_state_exited": 0,
+ "container_container4_state_paused": 0,
+ "container_container4_state_removing": 0,
+ "container_container4_state_restarting": 0,
+ "container_container4_state_running": 0,
+ "container_container5_health_status_healthy": 0,
+ "container_container5_health_status_none": 0,
+ "container_container5_health_status_not_running_unhealthy": 0,
+ "container_container5_health_status_starting": 0,
+ "container_container5_health_status_unhealthy": 1,
+ "container_container5_size_root_fs": 0,
+ "container_container5_size_rw": 0,
+ "container_container5_state_created": 0,
+ "container_container5_state_dead": 0,
+ "container_container5_state_exited": 0,
+ "container_container5_state_paused": 0,
+ "container_container5_state_removing": 0,
+ "container_container5_state_restarting": 0,
+ "container_container5_state_running": 1,
+ "container_container6_health_status_healthy": 0,
+ "container_container6_health_status_none": 0,
+ "container_container6_health_status_not_running_unhealthy": 1,
+ "container_container6_health_status_starting": 0,
+ "container_container6_health_status_unhealthy": 0,
+ "container_container6_size_root_fs": 0,
+ "container_container6_size_rw": 0,
+ "container_container6_state_created": 0,
+ "container_container6_state_dead": 0,
+ "container_container6_state_exited": 0,
+ "container_container6_state_paused": 1,
+ "container_container6_state_removing": 0,
+ "container_container6_state_restarting": 0,
+ "container_container6_state_running": 0,
+ "container_container7_health_status_healthy": 0,
+ "container_container7_health_status_none": 0,
+ "container_container7_health_status_not_running_unhealthy": 1,
+ "container_container7_health_status_starting": 0,
+ "container_container7_health_status_unhealthy": 0,
+ "container_container7_size_root_fs": 0,
+ "container_container7_size_rw": 0,
+ "container_container7_state_created": 0,
+ "container_container7_state_dead": 0,
+ "container_container7_state_exited": 0,
+ "container_container7_state_paused": 0,
+ "container_container7_state_removing": 0,
+ "container_container7_state_restarting": 1,
+ "container_container7_state_running": 0,
+ "container_container8_health_status_healthy": 0,
+ "container_container8_health_status_none": 0,
+ "container_container8_health_status_not_running_unhealthy": 1,
+ "container_container8_health_status_starting": 0,
+ "container_container8_health_status_unhealthy": 0,
+ "container_container8_size_root_fs": 0,
+ "container_container8_size_rw": 0,
+ "container_container8_state_created": 0,
+ "container_container8_state_dead": 0,
+ "container_container8_state_exited": 0,
+ "container_container8_state_paused": 0,
+ "container_container8_state_removing": 1,
+ "container_container8_state_restarting": 0,
+ "container_container8_state_running": 0,
+ "container_container9_health_status_healthy": 0,
+ "container_container9_health_status_none": 0,
+ "container_container9_health_status_not_running_unhealthy": 1,
+ "container_container9_health_status_starting": 0,
+ "container_container9_health_status_unhealthy": 0,
+ "container_container9_size_root_fs": 0,
+ "container_container9_size_rw": 0,
+ "container_container9_state_created": 0,
+ "container_container9_state_dead": 0,
+ "container_container9_state_exited": 1,
+ "container_container9_state_paused": 0,
+ "container_container9_state_removing": 0,
+ "container_container9_state_restarting": 0,
+ "container_container9_state_running": 0,
+ "containers_health_status_healthy": 3,
+ "containers_health_status_none": 3,
+ "containers_health_status_not_running_unhealthy": 6,
+ "containers_health_status_starting": 3,
+ "containers_health_status_unhealthy": 1,
+ "containers_state_exited": 6,
+ "containers_state_paused": 5,
+ "containers_state_running": 4,
+ "images_active": 1,
+ "images_dangling": 1,
+ "images_size": 300,
+ },
+ },
+ "case success without container size": {
+ prepare: func() *Docker {
+ return prepareCaseSuccessWithoutContainerSize()
+ },
+ expected: map[string]int64{
+ "container_container10_health_status_healthy": 0,
+ "container_container10_health_status_none": 0,
+ "container_container10_health_status_not_running_unhealthy": 1,
+ "container_container10_health_status_starting": 0,
+ "container_container10_health_status_unhealthy": 0,
+ "container_container10_size_root_fs": 0,
+ "container_container10_size_rw": 0,
+ "container_container10_state_created": 0,
+ "container_container10_state_dead": 1,
+ "container_container10_state_exited": 0,
+ "container_container10_state_paused": 0,
+ "container_container10_state_removing": 0,
+ "container_container10_state_restarting": 0,
+ "container_container10_state_running": 0,
+ "container_container11_health_status_healthy": 0,
+ "container_container11_health_status_none": 0,
+ "container_container11_health_status_not_running_unhealthy": 0,
+ "container_container11_health_status_starting": 1,
+ "container_container11_health_status_unhealthy": 0,
+ "container_container11_size_root_fs": 0,
+ "container_container11_size_rw": 0,
+ "container_container11_state_created": 0,
+ "container_container11_state_dead": 0,
+ "container_container11_state_exited": 0,
+ "container_container11_state_paused": 0,
+ "container_container11_state_removing": 1,
+ "container_container11_state_restarting": 0,
+ "container_container11_state_running": 0,
+ "container_container12_health_status_healthy": 0,
+ "container_container12_health_status_none": 0,
+ "container_container12_health_status_not_running_unhealthy": 0,
+ "container_container12_health_status_starting": 1,
+ "container_container12_health_status_unhealthy": 0,
+ "container_container12_size_root_fs": 0,
+ "container_container12_size_rw": 0,
+ "container_container12_state_created": 0,
+ "container_container12_state_dead": 0,
+ "container_container12_state_exited": 1,
+ "container_container12_state_paused": 0,
+ "container_container12_state_removing": 0,
+ "container_container12_state_restarting": 0,
+ "container_container12_state_running": 0,
+ "container_container13_health_status_healthy": 0,
+ "container_container13_health_status_none": 0,
+ "container_container13_health_status_not_running_unhealthy": 0,
+ "container_container13_health_status_starting": 1,
+ "container_container13_health_status_unhealthy": 0,
+ "container_container13_size_root_fs": 0,
+ "container_container13_size_rw": 0,
+ "container_container13_state_created": 0,
+ "container_container13_state_dead": 0,
+ "container_container13_state_exited": 1,
+ "container_container13_state_paused": 0,
+ "container_container13_state_removing": 0,
+ "container_container13_state_restarting": 0,
+ "container_container13_state_running": 0,
+ "container_container14_health_status_healthy": 0,
+ "container_container14_health_status_none": 1,
+ "container_container14_health_status_not_running_unhealthy": 0,
+ "container_container14_health_status_starting": 0,
+ "container_container14_health_status_unhealthy": 0,
+ "container_container14_size_root_fs": 0,
+ "container_container14_size_rw": 0,
+ "container_container14_state_created": 0,
+ "container_container14_state_dead": 1,
+ "container_container14_state_exited": 0,
+ "container_container14_state_paused": 0,
+ "container_container14_state_removing": 0,
+ "container_container14_state_restarting": 0,
+ "container_container14_state_running": 0,
+ "container_container15_health_status_healthy": 0,
+ "container_container15_health_status_none": 1,
+ "container_container15_health_status_not_running_unhealthy": 0,
+ "container_container15_health_status_starting": 0,
+ "container_container15_health_status_unhealthy": 0,
+ "container_container15_size_root_fs": 0,
+ "container_container15_size_rw": 0,
+ "container_container15_state_created": 0,
+ "container_container15_state_dead": 1,
+ "container_container15_state_exited": 0,
+ "container_container15_state_paused": 0,
+ "container_container15_state_removing": 0,
+ "container_container15_state_restarting": 0,
+ "container_container15_state_running": 0,
+ "container_container16_health_status_healthy": 0,
+ "container_container16_health_status_none": 1,
+ "container_container16_health_status_not_running_unhealthy": 0,
+ "container_container16_health_status_starting": 0,
+ "container_container16_health_status_unhealthy": 0,
+ "container_container16_size_root_fs": 0,
+ "container_container16_size_rw": 0,
+ "container_container16_state_created": 0,
+ "container_container16_state_dead": 1,
+ "container_container16_state_exited": 0,
+ "container_container16_state_paused": 0,
+ "container_container16_state_removing": 0,
+ "container_container16_state_restarting": 0,
+ "container_container16_state_running": 0,
+ "container_container1_health_status_healthy": 1,
+ "container_container1_health_status_none": 0,
+ "container_container1_health_status_not_running_unhealthy": 0,
+ "container_container1_health_status_starting": 0,
+ "container_container1_health_status_unhealthy": 0,
+ "container_container1_size_root_fs": 0,
+ "container_container1_size_rw": 0,
+ "container_container1_state_created": 1,
+ "container_container1_state_dead": 0,
+ "container_container1_state_exited": 0,
+ "container_container1_state_paused": 0,
+ "container_container1_state_removing": 0,
+ "container_container1_state_restarting": 0,
+ "container_container1_state_running": 0,
+ "container_container2_health_status_healthy": 1,
+ "container_container2_health_status_none": 0,
+ "container_container2_health_status_not_running_unhealthy": 0,
+ "container_container2_health_status_starting": 0,
+ "container_container2_health_status_unhealthy": 0,
+ "container_container2_size_root_fs": 0,
+ "container_container2_size_rw": 0,
+ "container_container2_state_created": 0,
+ "container_container2_state_dead": 0,
+ "container_container2_state_exited": 0,
+ "container_container2_state_paused": 0,
+ "container_container2_state_removing": 0,
+ "container_container2_state_restarting": 0,
+ "container_container2_state_running": 1,
+ "container_container3_health_status_healthy": 1,
+ "container_container3_health_status_none": 0,
+ "container_container3_health_status_not_running_unhealthy": 0,
+ "container_container3_health_status_starting": 0,
+ "container_container3_health_status_unhealthy": 0,
+ "container_container3_size_root_fs": 0,
+ "container_container3_size_rw": 0,
+ "container_container3_state_created": 0,
+ "container_container3_state_dead": 0,
+ "container_container3_state_exited": 0,
+ "container_container3_state_paused": 0,
+ "container_container3_state_removing": 0,
+ "container_container3_state_restarting": 0,
+ "container_container3_state_running": 1,
+ "container_container4_health_status_healthy": 0,
+ "container_container4_health_status_none": 0,
+ "container_container4_health_status_not_running_unhealthy": 1,
+ "container_container4_health_status_starting": 0,
+ "container_container4_health_status_unhealthy": 0,
+ "container_container4_size_root_fs": 0,
+ "container_container4_size_rw": 0,
+ "container_container4_state_created": 1,
+ "container_container4_state_dead": 0,
+ "container_container4_state_exited": 0,
+ "container_container4_state_paused": 0,
+ "container_container4_state_removing": 0,
+ "container_container4_state_restarting": 0,
+ "container_container4_state_running": 0,
+ "container_container5_health_status_healthy": 0,
+ "container_container5_health_status_none": 0,
+ "container_container5_health_status_not_running_unhealthy": 0,
+ "container_container5_health_status_starting": 0,
+ "container_container5_health_status_unhealthy": 1,
+ "container_container5_size_root_fs": 0,
+ "container_container5_size_rw": 0,
+ "container_container5_state_created": 0,
+ "container_container5_state_dead": 0,
+ "container_container5_state_exited": 0,
+ "container_container5_state_paused": 0,
+ "container_container5_state_removing": 0,
+ "container_container5_state_restarting": 0,
+ "container_container5_state_running": 1,
+ "container_container6_health_status_healthy": 0,
+ "container_container6_health_status_none": 0,
+ "container_container6_health_status_not_running_unhealthy": 1,
+ "container_container6_health_status_starting": 0,
+ "container_container6_health_status_unhealthy": 0,
+ "container_container6_size_root_fs": 0,
+ "container_container6_size_rw": 0,
+ "container_container6_state_created": 0,
+ "container_container6_state_dead": 0,
+ "container_container6_state_exited": 0,
+ "container_container6_state_paused": 1,
+ "container_container6_state_removing": 0,
+ "container_container6_state_restarting": 0,
+ "container_container6_state_running": 0,
+ "container_container7_health_status_healthy": 0,
+ "container_container7_health_status_none": 0,
+ "container_container7_health_status_not_running_unhealthy": 1,
+ "container_container7_health_status_starting": 0,
+ "container_container7_health_status_unhealthy": 0,
+ "container_container7_size_root_fs": 0,
+ "container_container7_size_rw": 0,
+ "container_container7_state_created": 0,
+ "container_container7_state_dead": 0,
+ "container_container7_state_exited": 0,
+ "container_container7_state_paused": 0,
+ "container_container7_state_removing": 0,
+ "container_container7_state_restarting": 1,
+ "container_container7_state_running": 0,
+ "container_container8_health_status_healthy": 0,
+ "container_container8_health_status_none": 0,
+ "container_container8_health_status_not_running_unhealthy": 1,
+ "container_container8_health_status_starting": 0,
+ "container_container8_health_status_unhealthy": 0,
+ "container_container8_size_root_fs": 0,
+ "container_container8_size_rw": 0,
+ "container_container8_state_created": 0,
+ "container_container8_state_dead": 0,
+ "container_container8_state_exited": 0,
+ "container_container8_state_paused": 0,
+ "container_container8_state_removing": 1,
+ "container_container8_state_restarting": 0,
+ "container_container8_state_running": 0,
+ "container_container9_health_status_healthy": 0,
+ "container_container9_health_status_none": 0,
+ "container_container9_health_status_not_running_unhealthy": 1,
+ "container_container9_health_status_starting": 0,
+ "container_container9_health_status_unhealthy": 0,
+ "container_container9_size_root_fs": 0,
+ "container_container9_size_rw": 0,
+ "container_container9_state_created": 0,
+ "container_container9_state_dead": 0,
+ "container_container9_state_exited": 1,
+ "container_container9_state_paused": 0,
+ "container_container9_state_removing": 0,
+ "container_container9_state_restarting": 0,
+ "container_container9_state_running": 0,
+ "containers_health_status_healthy": 3,
+ "containers_health_status_none": 3,
+ "containers_health_status_not_running_unhealthy": 6,
+ "containers_health_status_starting": 3,
+ "containers_health_status_unhealthy": 1,
+ "containers_state_exited": 6,
+ "containers_state_paused": 5,
+ "containers_state_running": 4,
+ "images_active": 1,
+ "images_dangling": 1,
+ "images_size": 300,
+ },
+ },
+ "fail on case err on Info()": {
+ prepare: func() *Docker {
+ return prepareCaseErrOnInfo()
+ },
+ expected: nil,
+ },
+ "fail on case err on ImageList()": {
+ prepare: func() *Docker {
+ return prepareCaseErrOnImageList()
+ },
+ expected: nil,
+ },
+ "fail on case err on ContainerList()": {
+ prepare: func() *Docker {
+ return prepareCaseErrOnContainerList()
+ },
+ expected: nil,
+ },
+ "fail on case err on creating Docker client": {
+ prepare: func() *Docker {
+ return prepareCaseErrCreatingClient()
+ },
+ expected: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ d := test.prepare()
+
+ require.NoError(t, d.Init())
+
+ mx := d.Collect()
+
+ require.Equal(t, test.expected, mx)
+
+ if d.client != nil {
+ m, ok := d.client.(*mockClient)
+ require.True(t, ok)
+ require.True(t, m.negotiateAPIVersionCalled)
+ }
+
+ })
+ }
+}
+
+func prepareCaseSuccess() *Docker {
+ d := New()
+ d.CollectContainerSize = true
+ d.newClient = prepareNewClientFunc(&mockClient{})
+ return d
+}
+
+func prepareCaseSuccessWithoutContainerSize() *Docker {
+ d := New()
+ d.CollectContainerSize = false
+ d.newClient = prepareNewClientFunc(&mockClient{})
+ return d
+}
+
+func prepareCaseErrOnInfo() *Docker {
+ d := New()
+ d.newClient = prepareNewClientFunc(&mockClient{errOnInfo: true})
+ return d
+}
+
+func prepareCaseErrOnImageList() *Docker {
+ d := New()
+ d.newClient = prepareNewClientFunc(&mockClient{errOnImageList: true})
+ return d
+}
+
+func prepareCaseErrOnContainerList() *Docker {
+ d := New()
+ d.newClient = prepareNewClientFunc(&mockClient{errOnContainerList: true})
+ return d
+}
+
+func prepareCaseErrCreatingClient() *Docker {
+ d := New()
+ d.newClient = prepareNewClientFunc(nil)
+ return d
+}
+
+func prepareNewClientFunc(m *mockClient) func(_ Config) (dockerClient, error) {
+ if m == nil {
+ return func(_ Config) (dockerClient, error) { return nil, errors.New("mock.newClient() error") }
+ }
+ return func(_ Config) (dockerClient, error) { return m, nil }
+}
+
+type mockClient struct {
+ errOnInfo bool
+ errOnImageList bool
+ errOnContainerList bool
+ negotiateAPIVersionCalled bool
+ closeCalled bool
+}
+
+func (m *mockClient) Info(_ context.Context) (typesSystem.Info, error) {
+ if m.errOnInfo {
+ return typesSystem.Info{}, errors.New("mockClient.Info() error")
+ }
+
+ return typesSystem.Info{
+ ContainersRunning: 4,
+ ContainersPaused: 5,
+ ContainersStopped: 6,
+ }, nil
+}
+
+func (m *mockClient) ContainerList(_ context.Context, opts typesContainer.ListOptions) ([]types.Container, error) {
+ if m.errOnContainerList {
+ return nil, errors.New("mockClient.ContainerList() error")
+ }
+
+ v := opts.Filters.Get("health")
+
+ if len(v) == 0 {
+ return nil, errors.New("mockClient.ContainerList() error (expect 'health' filter)")
+ }
+
+ var containers []types.Container
+
+ switch v[0] {
+ case types.Healthy:
+ containers = []types.Container{
+ {Names: []string{"container1"}, State: "created", Image: "example/example:v1"},
+ {Names: []string{"container2"}, State: "running", Image: "example/example:v1"},
+ {Names: []string{"container3"}, State: "running", Image: "example/example:v1"},
+ }
+ case types.Unhealthy:
+ containers = []types.Container{
+ {Names: []string{"container4"}, State: "created", Image: "example/example:v2"},
+ {Names: []string{"container5"}, State: "running", Image: "example/example:v2"},
+ {Names: []string{"container6"}, State: "paused", Image: "example/example:v2"},
+ {Names: []string{"container7"}, State: "restarting", Image: "example/example:v2"},
+ {Names: []string{"container8"}, State: "removing", Image: "example/example:v2"},
+ {Names: []string{"container9"}, State: "exited", Image: "example/example:v2"},
+ {Names: []string{"container10"}, State: "dead", Image: "example/example:v2"},
+ }
+ case types.Starting:
+ containers = []types.Container{
+ {Names: []string{"container11"}, State: "removing", Image: "example/example:v3"},
+ {Names: []string{"container12"}, State: "exited", Image: "example/example:v3"},
+ {Names: []string{"container13"}, State: "exited", Image: "example/example:v3"},
+ }
+ case types.NoHealthcheck:
+ containers = []types.Container{
+ {Names: []string{"container14"}, State: "dead", Image: "example/example:v4"},
+ {Names: []string{"container15"}, State: "dead", Image: "example/example:v4"},
+ {Names: []string{"container16"}, State: "dead", Image: "example/example:v4"},
+ }
+ }
+
+ if opts.Size {
+ for _, c := range containers {
+ c.SizeRw = 123
+ c.SizeRootFs = 321
+ }
+ }
+
+ return containers, nil
+}
+
+func (m *mockClient) ImageList(_ context.Context, _ typesImage.ListOptions) ([]typesImage.Summary, error) {
+ if m.errOnImageList {
+ return nil, errors.New("mockClient.ImageList() error")
+ }
+
+ return []typesImage.Summary{
+ {
+ Containers: 0,
+ Size: 100,
+ },
+ {
+ Containers: 1,
+ Size: 200,
+ },
+ }, nil
+}
+
+func (m *mockClient) NegotiateAPIVersion(_ context.Context) {
+ m.negotiateAPIVersionCalled = true
+}
+
+func (m *mockClient) Close() error {
+ m.closeCalled = true
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/docker/integrations/docker.md b/src/go/plugin/go.d/modules/docker/integrations/docker.md
new file mode 100644
index 000000000..cb5452530
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/integrations/docker.md
@@ -0,0 +1,243 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/docker/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/docker/metadata.yaml"
+sidebar_label: "Docker"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Containers and VMs"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Docker
+
+
+<img src="https://netdata.cloud/img/docker.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: docker
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Docker containers state, health status and more.
+
+
+It connects to the Docker instance via a TCP or UNIX socket and executes the following commands:
+
+- [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).
+- [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).
+- [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+Requires netdata user to be in the docker group.
+
+### Default Behavior
+
+#### Auto-Detection
+
+It discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+Enabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Docker instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| docker.containers_state | running, paused, stopped | containers |
+| docker.containers_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | containers |
+| docker.images | active, dangling | images |
+| docker.images_size | size | bytes |
+
+### Per container
+
+Metrics related to containers. Each container provides its own set of the following metrics.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| container_name | The container's name |
+| image | The image name the container uses |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| docker.container_state | running, paused, exited, created, restarting, removing, dead | state |
+| docker.container_health_status | healthy, unhealthy, not_running_unhealthy, starting, no_healthcheck | status |
+| docker.container_writeable_layer_size | writeable_layer | size |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ docker_container_unhealthy ](https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf) | docker.container_health_status | ${label:container_name} docker container health status is unhealthy |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/docker.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/docker.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Docker daemon's listening address. When using a TCP socket, the format is: tcp://[ip]:[port] | unix:///var/run/docker.sock | yes |
+| timeout | Request timeout in seconds. | 2 | no |
+| collect_container_size | Whether to collect container writable layer size. | no | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+```yaml
+jobs:
+ - name: local
+ address: 'unix:///var/run/docker.sock'
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 'unix:///var/run/docker.sock'
+
+ - name: remote
+ address: 'tcp://203.0.113.10:2375'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `docker` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m docker
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `docker` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep docker
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep docker /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep docker
+```
+
+
diff --git a/src/go/plugin/go.d/modules/docker/metadata.yaml b/src/go/plugin/go.d/modules/docker/metadata.yaml
new file mode 100644
index 000000000..8fc6853a9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/metadata.yaml
@@ -0,0 +1,190 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-docker
+ plugin_name: go.d.plugin
+ module_name: docker
+ alternative_monitored_instances: []
+ monitored_instance:
+ name: Docker
+ link: https://www.docker.com/
+ categories:
+ - data-collection.containers-and-vms
+ icon_filename: docker.svg
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - container
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Docker containers state, health status and more.
+ method_description: |
+ It connects to the Docker instance via a TCP or UNIX socket and executes the following commands:
+
+ - [System info](https://docs.docker.com/engine/api/v1.43/#tag/System/operation/SystemInfo).
+ - [List images](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageList).
+ - [List containers](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerList).
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: Requires netdata user to be in the docker group.
+ default_behavior:
+ auto_detection:
+ description: |
+ It discovers instances running on localhost by attempting to connect to a known Docker UNIX socket: `/var/run/docker.sock`.
+ limits:
+ description: ""
+ performance_impact:
+ description: |
+ Enabling `collect_container_size` may result in high CPU usage depending on the version of Docker Engine.
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/docker.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: 'Docker daemon''s listening address. When using a TCP socket, the format is: tcp://[ip]:[port]'
+ default_value: unix:///var/run/docker.sock
+ required: true
+ - name: timeout
+ description: Request timeout in seconds.
+ default_value: 2
+ required: false
+ - name: collect_container_size
+ description: Whether to collect container writable layer size.
+ default_value: "no"
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: Config
+ list:
+ - name: Basic
+ description: An example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ address: 'unix:///var/run/docker.sock'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 'unix:///var/run/docker.sock'
+
+ - name: remote
+ address: 'tcp://203.0.113.10:2375'
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: docker_container_unhealthy
+ metric: docker.container_health_status
+ info: ${label:container_name} docker container health status is unhealthy
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/docker.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: docker.containers_state
+ description: Total number of Docker containers in various states
+ unit: containers
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: paused
+ - name: stopped
+ - name: docker.containers_health_status
+ description: Total number of Docker containers in various health states
+ unit: containers
+ chart_type: line
+ dimensions:
+ - name: healthy
+ - name: unhealthy
+ - name: not_running_unhealthy
+ - name: starting
+ - name: no_healthcheck
+ - name: docker.images
+ description: Total number of Docker images in various states
+ unit: images
+ chart_type: stacked
+ dimensions:
+ - name: active
+ - name: dangling
+ - name: docker.images_size
+ description: Total size of all Docker images
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: container
+ description: Metrics related to containers. Each container provides its own set of the following metrics.
+ labels:
+ - name: container_name
+ description: The container's name
+ - name: image
+ description: The image name the container uses
+ metrics:
+ - name: docker.container_state
+ description: Docker container state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: running
+ - name: paused
+ - name: exited
+ - name: created
+ - name: restarting
+ - name: removing
+ - name: dead
+ - name: docker.container_health_status
+ description: Docker container health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: healthy
+ - name: unhealthy
+ - name: not_running_unhealthy
+ - name: starting
+ - name: no_healthcheck
+ - name: docker.container_writeable_layer_size
+ description: Docker container writable layer size
+ unit: size
+ chart_type: line
+ dimensions:
+ - name: writeable_layer
diff --git a/src/go/plugin/go.d/modules/docker/testdata/config.json b/src/go/plugin/go.d/modules/docker/testdata/config.json
new file mode 100644
index 000000000..5e687448c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "collect_container_size": true
+}
diff --git a/src/go/plugin/go.d/modules/docker/testdata/config.yaml b/src/go/plugin/go.d/modules/docker/testdata/config.yaml
new file mode 100644
index 000000000..2b0f32225
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+collect_container_size: yes
diff --git a/src/go/plugin/go.d/modules/docker_engine/README.md b/src/go/plugin/go.d/modules/docker_engine/README.md
new file mode 120000
index 000000000..f00a4cd97
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/README.md
@@ -0,0 +1 @@
+integrations/docker_engine.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/docker_engine/charts.go b/src/go/plugin/go.d/modules/docker_engine/charts.go
new file mode 100644
index 000000000..8a37545ce
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/charts.go
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker_engine
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ Charts = module.Charts
+ Dims = module.Dims
+)
+
+var charts = Charts{
+ {
+ ID: "engine_daemon_container_actions",
+ Title: "Container Actions",
+ Units: "actions/s",
+ Fam: "containers",
+ Ctx: "docker_engine.engine_daemon_container_actions",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "container_actions_changes", Name: "changes", Algo: module.Incremental},
+ {ID: "container_actions_commit", Name: "commit", Algo: module.Incremental},
+ {ID: "container_actions_create", Name: "create", Algo: module.Incremental},
+ {ID: "container_actions_delete", Name: "delete", Algo: module.Incremental},
+ {ID: "container_actions_start", Name: "start", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "engine_daemon_container_states_containers",
+ Title: "Containers In Various States",
+ Units: "containers",
+ Fam: "containers",
+ Ctx: "docker_engine.engine_daemon_container_states_containers",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "container_states_running", Name: "running"},
+ {ID: "container_states_paused", Name: "paused"},
+ {ID: "container_states_stopped", Name: "stopped"},
+ },
+ },
+ {
+ ID: "builder_builds_failed_total",
+ Title: "Builder Builds Fails By Reason",
+ Units: "fails/s",
+ Fam: "builder",
+ Ctx: "docker_engine.builder_builds_failed_total",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "builder_fails_build_canceled", Name: "build_canceled", Algo: module.Incremental},
+ {ID: "builder_fails_build_target_not_reachable_error", Name: "build_target_not_reachable_error", Algo: module.Incremental},
+ {ID: "builder_fails_command_not_supported_error", Name: "command_not_supported_error", Algo: module.Incremental},
+ {ID: "builder_fails_dockerfile_empty_error", Name: "dockerfile_empty_error", Algo: module.Incremental},
+ {ID: "builder_fails_dockerfile_syntax_error", Name: "dockerfile_syntax_error", Algo: module.Incremental},
+ {ID: "builder_fails_error_processing_commands_error", Name: "error_processing_commands_error", Algo: module.Incremental},
+ {ID: "builder_fails_missing_onbuild_arguments_error", Name: "missing_onbuild_arguments_error", Algo: module.Incremental},
+ {ID: "builder_fails_unknown_instruction_error", Name: "unknown_instruction_error", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "engine_daemon_health_checks_failed_total",
+ Title: "Health Checks",
+ Units: "events/s",
+ Fam: "health checks",
+ Ctx: "docker_engine.engine_daemon_health_checks_failed_total",
+ Dims: Dims{
+ {ID: "health_checks_failed", Name: "fails", Algo: module.Incremental},
+ },
+ },
+}
+
+var swarmManagerCharts = Charts{
+ {
+ ID: "swarm_manager_leader",
+ Title: "Swarm Manager Leader",
+ Units: "bool",
+ Fam: "swarm",
+ Ctx: "docker_engine.swarm_manager_leader",
+ Dims: Dims{
+ {ID: "swarm_manager_leader", Name: "is_leader"},
+ },
+ },
+ {
+ ID: "swarm_manager_object_store",
+ Title: "Swarm Manager Object Store",
+ Units: "objects",
+ Fam: "swarm",
+ Type: module.Stacked,
+ Ctx: "docker_engine.swarm_manager_object_store",
+ Dims: Dims{
+ {ID: "swarm_manager_nodes_total", Name: "nodes"},
+ {ID: "swarm_manager_services_total", Name: "services"},
+ {ID: "swarm_manager_tasks_total", Name: "tasks"},
+ {ID: "swarm_manager_networks_total", Name: "networks"},
+ {ID: "swarm_manager_secrets_total", Name: "secrets"},
+ {ID: "swarm_manager_configs_total", Name: "configs"},
+ },
+ },
+ {
+ ID: "swarm_manager_nodes_per_state",
+ Title: "Swarm Manager Nodes Per State",
+ Units: "nodes",
+ Fam: "swarm",
+ Ctx: "docker_engine.swarm_manager_nodes_per_state",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "swarm_manager_nodes_state_ready", Name: "ready"},
+ {ID: "swarm_manager_nodes_state_down", Name: "down"},
+ {ID: "swarm_manager_nodes_state_unknown", Name: "unknown"},
+ {ID: "swarm_manager_nodes_state_disconnected", Name: "disconnected"},
+ },
+ },
+ {
+ ID: "swarm_manager_tasks_per_state",
+ Title: "Swarm Manager Tasks Per State",
+ Units: "tasks",
+ Fam: "swarm",
+ Ctx: "docker_engine.swarm_manager_tasks_per_state",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "swarm_manager_tasks_state_running", Name: "running"},
+ {ID: "swarm_manager_tasks_state_failed", Name: "failed"},
+ {ID: "swarm_manager_tasks_state_ready", Name: "ready"},
+ {ID: "swarm_manager_tasks_state_rejected", Name: "rejected"},
+ {ID: "swarm_manager_tasks_state_starting", Name: "starting"},
+ {ID: "swarm_manager_tasks_state_shutdown", Name: "shutdown"},
+ {ID: "swarm_manager_tasks_state_new", Name: "new"},
+ {ID: "swarm_manager_tasks_state_orphaned", Name: "orphaned"},
+ {ID: "swarm_manager_tasks_state_preparing", Name: "preparing"},
+ {ID: "swarm_manager_tasks_state_pending", Name: "pending"},
+ {ID: "swarm_manager_tasks_state_complete", Name: "complete"},
+ {ID: "swarm_manager_tasks_state_remove", Name: "remove"},
+ {ID: "swarm_manager_tasks_state_accepted", Name: "accepted"},
+ {ID: "swarm_manager_tasks_state_assigned", Name: "assigned"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/docker_engine/collect.go b/src/go/plugin/go.d/modules/docker_engine/collect.go
new file mode 100644
index 000000000..90cd49985
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/collect.go
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker_engine
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func isDockerEngineMetrics(pms prometheus.Series) bool {
+ return pms.FindByName("engine_daemon_engine_info").Len() > 0
+}
+
+func (de *DockerEngine) collect() (map[string]int64, error) {
+ pms, err := de.prom.ScrapeSeries()
+ if err != nil {
+ return nil, err
+ }
+
+ if !isDockerEngineMetrics(pms) {
+ return nil, fmt.Errorf("'%s' returned non docker engine metrics", de.URL)
+ }
+
+ mx := de.collectMetrics(pms)
+ return stm.ToMap(mx), nil
+}
+
+func (de *DockerEngine) collectMetrics(pms prometheus.Series) metrics {
+ var mx metrics
+ collectHealthChecks(&mx, pms)
+ collectContainerActions(&mx, pms)
+ collectBuilderBuildsFails(&mx, pms)
+ if hasContainerStates(pms) {
+ de.hasContainerStates = true
+ mx.Container.States = &containerStates{}
+ collectContainerStates(&mx, pms)
+ }
+ if isSwarmManager(pms) {
+ de.isSwarmManager = true
+ mx.SwarmManager = &swarmManager{}
+ collectSwarmManager(&mx, pms)
+ }
+ return mx
+}
+
+func isSwarmManager(pms prometheus.Series) bool {
+ return pms.FindByName("swarm_node_manager").Max() == 1
+}
+
+func hasContainerStates(pms prometheus.Series) bool {
+ return pms.FindByName("engine_daemon_container_states_containers").Len() > 0
+}
+
+func collectHealthChecks(mx *metrics, raw prometheus.Series) {
+ v := raw.FindByName("engine_daemon_health_checks_failed_total").Max()
+ mx.HealthChecks.Failed = v
+}
+
+func collectContainerActions(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName("engine_daemon_container_actions_seconds_count") {
+ action := metric.Labels.Get("action")
+ if action == "" {
+ continue
+ }
+
+ v := metric.Value
+ switch action {
+ default:
+ case "changes":
+ mx.Container.Actions.Changes = v
+ case "commit":
+ mx.Container.Actions.Commit = v
+ case "create":
+ mx.Container.Actions.Create = v
+ case "delete":
+ mx.Container.Actions.Delete = v
+ case "start":
+ mx.Container.Actions.Start = v
+ }
+ }
+}
+
+func collectContainerStates(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName("engine_daemon_container_states_containers") {
+ state := metric.Labels.Get("state")
+ if state == "" {
+ continue
+ }
+
+ v := metric.Value
+ switch state {
+ default:
+ case "paused":
+ mx.Container.States.Paused = v
+ case "running":
+ mx.Container.States.Running = v
+ case "stopped":
+ mx.Container.States.Stopped = v
+ }
+ }
+}
+
+func collectBuilderBuildsFails(mx *metrics, raw prometheus.Series) {
+ for _, metric := range raw.FindByName("builder_builds_failed_total") {
+ reason := metric.Labels.Get("reason")
+ if reason == "" {
+ continue
+ }
+
+ v := metric.Value
+ switch reason {
+ default:
+ case "build_canceled":
+ mx.Builder.FailsByReason.BuildCanceled = v
+ case "build_target_not_reachable_error":
+ mx.Builder.FailsByReason.BuildTargetNotReachableError = v
+ case "command_not_supported_error":
+ mx.Builder.FailsByReason.CommandNotSupportedError = v
+ case "dockerfile_empty_error":
+ mx.Builder.FailsByReason.DockerfileEmptyError = v
+ case "dockerfile_syntax_error":
+ mx.Builder.FailsByReason.DockerfileSyntaxError = v
+ case "error_processing_commands_error":
+ mx.Builder.FailsByReason.ErrorProcessingCommandsError = v
+ case "missing_onbuild_arguments_error":
+ mx.Builder.FailsByReason.MissingOnbuildArgumentsError = v
+ case "unknown_instruction_error":
+ mx.Builder.FailsByReason.UnknownInstructionError = v
+ }
+ }
+}
+
+func collectSwarmManager(mx *metrics, raw prometheus.Series) {
+ v := raw.FindByName("swarm_manager_configs_total").Max()
+ mx.SwarmManager.Configs = v
+
+ v = raw.FindByName("swarm_manager_networks_total").Max()
+ mx.SwarmManager.Networks = v
+
+ v = raw.FindByName("swarm_manager_secrets_total").Max()
+ mx.SwarmManager.Secrets = v
+
+ v = raw.FindByName("swarm_manager_services_total").Max()
+ mx.SwarmManager.Services = v
+
+ v = raw.FindByName("swarm_manager_leader").Max()
+ mx.SwarmManager.IsLeader = v
+
+ for _, metric := range raw.FindByName("swarm_manager_nodes") {
+ state := metric.Labels.Get("state")
+ if state == "" {
+ continue
+ }
+
+ v := metric.Value
+ switch state {
+ default:
+ case "disconnected":
+ mx.SwarmManager.Nodes.PerState.Disconnected = v
+ case "down":
+ mx.SwarmManager.Nodes.PerState.Down = v
+ case "ready":
+ mx.SwarmManager.Nodes.PerState.Ready = v
+ case "unknown":
+ mx.SwarmManager.Nodes.PerState.Unknown = v
+ }
+ mx.SwarmManager.Nodes.Total += v
+ }
+
+ for _, metric := range raw.FindByName("swarm_manager_tasks_total") {
+ state := metric.Labels.Get("state")
+ if state == "" {
+ continue
+ }
+
+ v := metric.Value
+ switch state {
+ default:
+ case "accepted":
+ mx.SwarmManager.Tasks.PerState.Accepted = v
+ case "assigned":
+ mx.SwarmManager.Tasks.PerState.Assigned = v
+ case "complete":
+ mx.SwarmManager.Tasks.PerState.Complete = v
+ case "failed":
+ mx.SwarmManager.Tasks.PerState.Failed = v
+ case "new":
+ mx.SwarmManager.Tasks.PerState.New = v
+ case "orphaned":
+ mx.SwarmManager.Tasks.PerState.Orphaned = v
+ case "pending":
+ mx.SwarmManager.Tasks.PerState.Pending = v
+ case "preparing":
+ mx.SwarmManager.Tasks.PerState.Preparing = v
+ case "ready":
+ mx.SwarmManager.Tasks.PerState.Ready = v
+ case "rejected":
+ mx.SwarmManager.Tasks.PerState.Rejected = v
+ case "remove":
+ mx.SwarmManager.Tasks.PerState.Remove = v
+ case "running":
+ mx.SwarmManager.Tasks.PerState.Running = v
+ case "shutdown":
+ mx.SwarmManager.Tasks.PerState.Shutdown = v
+ case "starting":
+ mx.SwarmManager.Tasks.PerState.Starting = v
+ }
+ mx.SwarmManager.Tasks.Total += v
+ }
+}
diff --git a/src/go/plugin/go.d/modules/docker_engine/config_schema.json b/src/go/plugin/go.d/modules/docker_engine/config_schema.json
new file mode 100644
index 000000000..1e40bb585
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Docker Engine collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Docker Engine [metrics endpoint](https://docs.docker.com/config/daemon/prometheus/#configure-the-daemon).",
+ "type": "string",
+ "default": "http://127.0.0.1:9323/metrics",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/docker_engine/docker_engine.go b/src/go/plugin/go.d/modules/docker_engine/docker_engine.go
new file mode 100644
index 000000000..4f50ecb43
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/docker_engine.go
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker_engine
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("docker_engine", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *DockerEngine {
+ return &DockerEngine{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:9323/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type DockerEngine struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ prom prometheus.Prometheus
+
+ isSwarmManager bool
+ hasContainerStates bool
+}
+
+func (de *DockerEngine) Configuration() any {
+ return de.Config
+}
+
+func (de *DockerEngine) Init() error {
+ if err := de.validateConfig(); err != nil {
+ de.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prom, err := de.initPrometheusClient()
+ if err != nil {
+ de.Error(err)
+ return err
+ }
+ de.prom = prom
+
+ return nil
+}
+
+func (de *DockerEngine) Check() error {
+ mx, err := de.collect()
+ if err != nil {
+ de.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (de *DockerEngine) Charts() *Charts {
+ cs := charts.Copy()
+ if !de.hasContainerStates {
+ if err := cs.Remove("engine_daemon_container_states_containers"); err != nil {
+ de.Warning(err)
+ }
+ }
+
+ if !de.isSwarmManager {
+ return cs
+ }
+
+ if err := cs.Add(*swarmManagerCharts.Copy()...); err != nil {
+ de.Warning(err)
+ }
+
+ return cs
+}
+
+func (de *DockerEngine) Collect() map[string]int64 {
+ mx, err := de.collect()
+ if err != nil {
+ de.Error(err)
+ return nil
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (de *DockerEngine) Cleanup() {
+ if de.prom != nil && de.prom.HTTPClient() != nil {
+ de.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/docker_engine/docker_engine_test.go b/src/go/plugin/go.d/modules/docker_engine/docker_engine_test.go
new file mode 100644
index 000000000..1734f1829
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/docker_engine_test.go
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker_engine
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNonDockerEngineMetrics, _ = os.ReadFile("testdata/non-docker-engine.txt")
+ dataVer17050Metrics, _ = os.ReadFile("testdata/v17.05.0-ce.txt")
+ dataVer18093Metrics, _ = os.ReadFile("testdata/v18.09.3-ce.txt")
+ dataVer18093SwarmMetrics, _ = os.ReadFile("testdata/v18.09.3-ce-swarm.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNonDockerEngineMetrics": dataNonDockerEngineMetrics,
+ "dataVer17050Metrics": dataVer17050Metrics,
+ "dataVer18093Metrics": dataVer18093Metrics,
+ "dataVer18093SwarmMetrics": dataVer18093SwarmMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDockerEngine_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DockerEngine{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDockerEngine_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestDockerEngine_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default": {
+ config: New().Config,
+ },
+ "empty URL": {
+ config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}},
+ wantFail: true,
+ },
+ "nonexistent TLS CA": {
+ config: Config{HTTP: web.HTTP{
+ Request: web.Request{URL: "http://127.0.0.1:9323/metrics"},
+ Client: web.Client{TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}}}},
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dockerEngine := New()
+ dockerEngine.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, dockerEngine.Init())
+ } else {
+ assert.NoError(t, dockerEngine.Init())
+ }
+ })
+ }
+}
+
+func TestDockerEngine_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (*DockerEngine, *httptest.Server)
+ wantFail bool
+ }{
+ "v17.05.0-ce": {prepare: prepareClientServerV17050CE},
+ "v18.09.3-ce": {prepare: prepareClientServerV18093CE},
+ "v18.09.3-ce-swarm": {prepare: prepareClientServerV18093CESwarm},
+ "non docker engine": {prepare: prepareClientServerNonDockerEngine, wantFail: true},
+ "invalid data": {prepare: prepareClientServerInvalidData, wantFail: true},
+ "404": {prepare: prepareClientServer404, wantFail: true},
+ "connection refused": {prepare: prepareClientServerConnectionRefused, wantFail: true},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dockerEngine, srv := test.prepare(t)
+ defer srv.Close()
+
+ if test.wantFail {
+ assert.Error(t, dockerEngine.Check())
+ } else {
+ assert.NoError(t, dockerEngine.Check())
+ }
+ })
+ }
+}
+
+func TestDockerEngine_Charts(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (*DockerEngine, *httptest.Server)
+ wantNumCharts int
+ }{
+ "v17.05.0-ce": {prepare: prepareClientServerV17050CE, wantNumCharts: len(charts) - 1}, // no container states chart
+ "v18.09.3-ce": {prepare: prepareClientServerV18093CE, wantNumCharts: len(charts)},
+ "v18.09.3-ce-swarm": {prepare: prepareClientServerV18093CESwarm, wantNumCharts: len(charts) + len(swarmManagerCharts)},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dockerEngine, srv := test.prepare(t)
+ defer srv.Close()
+
+ require.NoError(t, dockerEngine.Check())
+ assert.Len(t, *dockerEngine.Charts(), test.wantNumCharts)
+ })
+ }
+}
+
+func TestDockerEngine_Collect_ReturnsNilOnErrors(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (*DockerEngine, *httptest.Server)
+ }{
+ "non docker engine": {prepare: prepareClientServerNonDockerEngine},
+ "invalid data": {prepare: prepareClientServerInvalidData},
+ "404": {prepare: prepareClientServer404},
+ "connection refused": {prepare: prepareClientServerConnectionRefused},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dockerEngine, srv := test.prepare(t)
+ defer srv.Close()
+
+ assert.Nil(t, dockerEngine.Collect())
+ })
+ }
+}
+
+func TestDockerEngine_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (*DockerEngine, *httptest.Server)
+ expected map[string]int64
+ }{
+ "v17.05.0-ce": {
+ prepare: prepareClientServerV17050CE,
+ expected: map[string]int64{
+ "builder_fails_build_canceled": 1,
+ "builder_fails_build_target_not_reachable_error": 2,
+ "builder_fails_command_not_supported_error": 3,
+ "builder_fails_dockerfile_empty_error": 4,
+ "builder_fails_dockerfile_syntax_error": 5,
+ "builder_fails_error_processing_commands_error": 6,
+ "builder_fails_missing_onbuild_arguments_error": 7,
+ "builder_fails_unknown_instruction_error": 8,
+ "container_actions_changes": 1,
+ "container_actions_commit": 1,
+ "container_actions_create": 1,
+ "container_actions_delete": 1,
+ "container_actions_start": 1,
+ "health_checks_failed": 33,
+ },
+ },
+ "v18.09.3-ce": {
+ prepare: prepareClientServerV18093CE,
+ expected: map[string]int64{
+ "builder_fails_build_canceled": 1,
+ "builder_fails_build_target_not_reachable_error": 2,
+ "builder_fails_command_not_supported_error": 3,
+ "builder_fails_dockerfile_empty_error": 4,
+ "builder_fails_dockerfile_syntax_error": 5,
+ "builder_fails_error_processing_commands_error": 6,
+ "builder_fails_missing_onbuild_arguments_error": 7,
+ "builder_fails_unknown_instruction_error": 8,
+ "container_actions_changes": 1,
+ "container_actions_commit": 1,
+ "container_actions_create": 1,
+ "container_actions_delete": 1,
+ "container_actions_start": 1,
+ "container_states_paused": 11,
+ "container_states_running": 12,
+ "container_states_stopped": 13,
+ "health_checks_failed": 33,
+ },
+ },
+ "v18.09.3-ce-swarm": {
+ prepare: prepareClientServerV18093CESwarm,
+ expected: map[string]int64{
+ "builder_fails_build_canceled": 1,
+ "builder_fails_build_target_not_reachable_error": 2,
+ "builder_fails_command_not_supported_error": 3,
+ "builder_fails_dockerfile_empty_error": 4,
+ "builder_fails_dockerfile_syntax_error": 5,
+ "builder_fails_error_processing_commands_error": 6,
+ "builder_fails_missing_onbuild_arguments_error": 7,
+ "builder_fails_unknown_instruction_error": 8,
+ "container_actions_changes": 1,
+ "container_actions_commit": 1,
+ "container_actions_create": 1,
+ "container_actions_delete": 1,
+ "container_actions_start": 1,
+ "container_states_paused": 11,
+ "container_states_running": 12,
+ "container_states_stopped": 13,
+ "health_checks_failed": 33,
+ "swarm_manager_configs_total": 1,
+ "swarm_manager_leader": 1,
+ "swarm_manager_networks_total": 3,
+ "swarm_manager_nodes_state_disconnected": 1,
+ "swarm_manager_nodes_state_down": 2,
+ "swarm_manager_nodes_state_ready": 3,
+ "swarm_manager_nodes_state_unknown": 4,
+ "swarm_manager_nodes_total": 10,
+ "swarm_manager_secrets_total": 1,
+ "swarm_manager_services_total": 1,
+ "swarm_manager_tasks_state_accepted": 1,
+ "swarm_manager_tasks_state_assigned": 2,
+ "swarm_manager_tasks_state_complete": 3,
+ "swarm_manager_tasks_state_failed": 4,
+ "swarm_manager_tasks_state_new": 5,
+ "swarm_manager_tasks_state_orphaned": 6,
+ "swarm_manager_tasks_state_pending": 7,
+ "swarm_manager_tasks_state_preparing": 8,
+ "swarm_manager_tasks_state_ready": 9,
+ "swarm_manager_tasks_state_rejected": 10,
+ "swarm_manager_tasks_state_remove": 11,
+ "swarm_manager_tasks_state_running": 12,
+ "swarm_manager_tasks_state_shutdown": 13,
+ "swarm_manager_tasks_state_starting": 14,
+ "swarm_manager_tasks_total": 105,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pulsar, srv := test.prepare(t)
+ defer srv.Close()
+
+ for i := 0; i < 10; i++ {
+ _ = pulsar.Collect()
+ }
+ collected := pulsar.Collect()
+
+ require.NotNil(t, collected)
+ require.Equal(t, test.expected, collected)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, pulsar, collected)
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dockerEngine *DockerEngine, collected map[string]int64) {
+ t.Helper()
+ for _, chart := range *dockerEngine.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareClientServerV17050CE(t *testing.T) (*DockerEngine, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer17050Metrics)
+ }))
+
+ dockerEngine := New()
+ dockerEngine.URL = srv.URL
+ require.NoError(t, dockerEngine.Init())
+
+ return dockerEngine, srv
+}
+
+func prepareClientServerV18093CE(t *testing.T) (*DockerEngine, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer18093Metrics)
+ }))
+
+ dockerEngine := New()
+ dockerEngine.URL = srv.URL
+ require.NoError(t, dockerEngine.Init())
+
+ return dockerEngine, srv
+}
+
+func prepareClientServerV18093CESwarm(t *testing.T) (*DockerEngine, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer18093SwarmMetrics)
+ }))
+
+ dockerEngine := New()
+ dockerEngine.URL = srv.URL
+ require.NoError(t, dockerEngine.Init())
+
+ return dockerEngine, srv
+}
+
+func prepareClientServerNonDockerEngine(t *testing.T) (*DockerEngine, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataNonDockerEngineMetrics)
+ }))
+
+ dockerEngine := New()
+ dockerEngine.URL = srv.URL
+ require.NoError(t, dockerEngine.Init())
+
+ return dockerEngine, srv
+}
+
+func prepareClientServerInvalidData(t *testing.T) (*DockerEngine, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ dockerEngine := New()
+ dockerEngine.URL = srv.URL
+ require.NoError(t, dockerEngine.Init())
+
+ return dockerEngine, srv
+}
+
+func prepareClientServer404(t *testing.T) (*DockerEngine, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+
+ dockerEngine := New()
+ dockerEngine.URL = srv.URL
+ require.NoError(t, dockerEngine.Init())
+
+ return dockerEngine, srv
+}
+
+func prepareClientServerConnectionRefused(t *testing.T) (*DockerEngine, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(nil)
+
+ dockerEngine := New()
+ dockerEngine.URL = "http://127.0.0.1:38001/metrics"
+ require.NoError(t, dockerEngine.Init())
+
+ return dockerEngine, srv
+}
diff --git a/src/go/plugin/go.d/modules/docker_engine/init.go b/src/go/plugin/go.d/modules/docker_engine/init.go
new file mode 100644
index 000000000..5610af9a9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/init.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker_engine
+
+import (
+ "errors"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+func (de *DockerEngine) validateConfig() error {
+ if de.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (de *DockerEngine) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(de.Client)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.New(client, de.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/docker_engine/integrations/docker_engine.md b/src/go/plugin/go.d/modules/docker_engine/integrations/docker_engine.md
new file mode 100644
index 000000000..eaba917e7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/integrations/docker_engine.md
@@ -0,0 +1,264 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/docker_engine/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/docker_engine/metadata.yaml"
+sidebar_label: "Docker Engine"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Containers and VMs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Docker Engine
+
+
+<img src="https://netdata.cloud/img/docker.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: docker_engine
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the activity and health of Docker Engine and Docker Swarm.
+
+
+The [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Docker Engine instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| docker_engine.engine_daemon_container_actions | changes, commit, create, delete, start | actions/s |
+| docker_engine.engine_daemon_container_states_containers | running, paused, stopped | containers |
+| docker_engine.builder_builds_failed_total | build_canceled, build_target_not_reachable_error, command_not_supported_error, dockerfile_empty_error, dockerfile_syntax_error, error_processing_commands_error, missing_onbuild_arguments_error, unknown_instruction_error | fails/s |
+| docker_engine.engine_daemon_health_checks_failed_total | fails | events/s |
+| docker_engine.swarm_manager_leader | is_leader | bool |
+| docker_engine.swarm_manager_object_store | nodes, services, tasks, networks, secrets, configs | objects |
+| docker_engine.swarm_manager_nodes_per_state | ready, down, unknown, disconnected | nodes |
+| docker_engine.swarm_manager_tasks_per_state | running, failed, ready, rejected, starting, shutdown, new, orphaned, preparing, pending, complete, remove, accepted, assigned | tasks |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable built-in Prometheus exporter
+
+To enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/docker_engine.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/docker_engine.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:9323/metrics | yes |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| timeout | HTTP request timeout. | 1 | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9323/metrics
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9323/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Configuration with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9323/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9323/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9323/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `docker_engine` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m docker_engine
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `docker_engine` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep docker_engine
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep docker_engine /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep docker_engine
+```
+
+
diff --git a/src/go/plugin/go.d/modules/docker_engine/metadata.yaml b/src/go/plugin/go.d/modules/docker_engine/metadata.yaml
new file mode 100644
index 000000000..8f81d4e35
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/metadata.yaml
@@ -0,0 +1,263 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-docker_engine
+ plugin_name: go.d.plugin
+ module_name: docker_engine
+ alternative_monitored_instances: []
+ monitored_instance:
+ name: Docker Engine
+ link: https://docs.docker.com/engine/
+ categories:
+ - data-collection.containers-and-vms
+ icon_filename: docker.svg
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - docker
+ - container
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the activity and health of Docker Engine and Docker Swarm.
+ method_description: |
+ The [built-in](https://docs.docker.com/config/daemon/prometheus/) Prometheus exporter is used to get the metrics.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ It discovers instances running on localhost by attempting to connect to a known Docker TCP socket: `http://127.0.0.1:9323/metrics`.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable built-in Prometheus exporter
+ description: |
+ To enable built-in Prometheus exporter, follow the [official documentation](https://docs.docker.com/config/daemon/prometheus/#configure-docker).
+ configuration:
+ file:
+ name: go.d/docker_engine.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:9323/metrics
+ required: true
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: "no"
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: "no"
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9323/metrics
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9323/metrics
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: Configuration with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9323/metrics
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9323/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9323/metrics
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: docker_engine.engine_daemon_container_actions
+ description: Container Actions
+ unit: actions/s
+ chart_type: stacked
+ dimensions:
+ - name: changes
+ - name: commit
+ - name: create
+ - name: delete
+ - name: start
+ - name: docker_engine.engine_daemon_container_states_containers
+ description: Containers In Various States
+ unit: containers
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: paused
+ - name: stopped
+ - name: docker_engine.builder_builds_failed_total
+ description: Builder Builds Fails By Reason
+ unit: fails/s
+ chart_type: stacked
+ dimensions:
+ - name: build_canceled
+ - name: build_target_not_reachable_error
+ - name: command_not_supported_error
+ - name: dockerfile_empty_error
+ - name: dockerfile_syntax_error
+ - name: error_processing_commands_error
+ - name: missing_onbuild_arguments_error
+ - name: unknown_instruction_error
+ - name: docker_engine.engine_daemon_health_checks_failed_total
+ description: Health Checks
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: fails
+ - name: docker_engine.swarm_manager_leader
+ description: Swarm Manager Leader
+ unit: bool
+ chart_type: line
+ dimensions:
+ - name: is_leader
+ - name: docker_engine.swarm_manager_object_store
+ description: Swarm Manager Object Store
+ unit: objects
+ chart_type: stacked
+ dimensions:
+ - name: nodes
+ - name: services
+ - name: tasks
+ - name: networks
+ - name: secrets
+ - name: configs
+ - name: docker_engine.swarm_manager_nodes_per_state
+ description: Swarm Manager Nodes Per State
+ unit: nodes
+ chart_type: stacked
+ dimensions:
+ - name: ready
+ - name: down
+ - name: unknown
+ - name: disconnected
+ - name: docker_engine.swarm_manager_tasks_per_state
+ description: Swarm Manager Tasks Per State
+ unit: tasks
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: failed
+ - name: ready
+ - name: rejected
+ - name: starting
+ - name: shutdown
+ - name: new
+ - name: orphaned
+ - name: preparing
+ - name: pending
+ - name: complete
+ - name: remove
+ - name: accepted
+ - name: assigned
diff --git a/src/go/plugin/go.d/modules/docker_engine/metrics.go b/src/go/plugin/go.d/modules/docker_engine/metrics.go
new file mode 100644
index 000000000..4c84e8398
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/metrics.go
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker_engine
+
+type metrics struct {
+ Container struct {
+ Actions struct {
+ Changes float64 `stm:"changes"`
+ Commit float64 `stm:"commit"`
+ Create float64 `stm:"create"`
+ Delete float64 `stm:"delete"`
+ Start float64 `stm:"start"`
+ } `stm:"actions"`
+ States *containerStates `stm:"states"`
+ } `stm:"container"`
+ Builder struct {
+ FailsByReason struct {
+ BuildCanceled float64 `stm:"build_canceled"`
+ BuildTargetNotReachableError float64 `stm:"build_target_not_reachable_error"`
+ CommandNotSupportedError float64 `stm:"command_not_supported_error"`
+ DockerfileEmptyError float64 `stm:"dockerfile_empty_error"`
+ DockerfileSyntaxError float64 `stm:"dockerfile_syntax_error"`
+ ErrorProcessingCommandsError float64 `stm:"error_processing_commands_error"`
+ MissingOnbuildArgumentsError float64 `stm:"missing_onbuild_arguments_error"`
+ UnknownInstructionError float64 `stm:"unknown_instruction_error"`
+ } `stm:"fails"`
+ } `stm:"builder"`
+ HealthChecks struct {
+ Failed float64 `stm:"failed"`
+ } `stm:"health_checks"`
+ SwarmManager *swarmManager `stm:"swarm_manager"`
+}
+
+type containerStates struct {
+ Paused float64 `stm:"paused"`
+ Running float64 `stm:"running"`
+ Stopped float64 `stm:"stopped"`
+}
+
+type swarmManager struct {
+ IsLeader float64 `stm:"leader"`
+ Configs float64 `stm:"configs_total"`
+ Networks float64 `stm:"networks_total"`
+ Secrets float64 `stm:"secrets_total"`
+ Services float64 `stm:"services_total"`
+ Nodes struct {
+ Total float64 `stm:"total"`
+ PerState struct {
+ Disconnected float64 `stm:"disconnected"`
+ Down float64 `stm:"down"`
+ Ready float64 `stm:"ready"`
+ Unknown float64 `stm:"unknown"`
+ } `stm:"state"`
+ } `stm:"nodes"`
+ Tasks struct {
+ Total float64 `stm:"total"`
+ PerState struct {
+ Accepted float64 `stm:"accepted"`
+ Assigned float64 `stm:"assigned"`
+ Complete float64 `stm:"complete"`
+ Failed float64 `stm:"failed"`
+ New float64 `stm:"new"`
+ Orphaned float64 `stm:"orphaned"`
+ Pending float64 `stm:"pending"`
+ Preparing float64 `stm:"preparing"`
+ Ready float64 `stm:"ready"`
+ Rejected float64 `stm:"rejected"`
+ Remove float64 `stm:"remove"`
+ Running float64 `stm:"running"`
+ Shutdown float64 `stm:"shutdown"`
+ Starting float64 `stm:"starting"`
+ } `stm:"state"`
+ } `stm:"tasks"`
+}
diff --git a/src/go/plugin/go.d/modules/docker_engine/testdata/config.json b/src/go/plugin/go.d/modules/docker_engine/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/docker_engine/testdata/config.yaml b/src/go/plugin/go.d/modules/docker_engine/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/docker_engine/testdata/non-docker-engine.txt b/src/go/plugin/go.d/modules/docker_engine/testdata/non-docker-engine.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/testdata/non-docker-engine.txt
diff --git a/src/go/plugin/go.d/modules/docker_engine/testdata/v17.05.0-ce.txt b/src/go/plugin/go.d/modules/docker_engine/testdata/v17.05.0-ce.txt
new file mode 100644
index 000000000..8d175a8e9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/testdata/v17.05.0-ce.txt
@@ -0,0 +1,460 @@
+# HELP builder_builds_failed_total Number of failed image builds
+# TYPE builder_builds_failed_total counter
+builder_builds_failed_total{reason="build_canceled"} 1
+builder_builds_failed_total{reason="build_target_not_reachable_error"} 2
+builder_builds_failed_total{reason="command_not_supported_error"} 3
+builder_builds_failed_total{reason="dockerfile_empty_error"} 4
+builder_builds_failed_total{reason="dockerfile_syntax_error"} 5
+builder_builds_failed_total{reason="error_processing_commands_error"} 6
+builder_builds_failed_total{reason="missing_onbuild_arguments_error"} 7
+builder_builds_failed_total{reason="unknown_instruction_error"} 8
+# HELP builder_builds_triggered_total Number of triggered image builds
+# TYPE builder_builds_triggered_total counter
+builder_builds_triggered_total 0
+# HELP engine_daemon_container_actions_seconds The number of seconds it takes to process each container action
+# TYPE engine_daemon_container_actions_seconds histogram
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="changes"} 0
+engine_daemon_container_actions_seconds_count{action="changes"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="commit"} 0
+engine_daemon_container_actions_seconds_count{action="commit"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="create"} 0
+engine_daemon_container_actions_seconds_count{action="create"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="delete"} 0
+engine_daemon_container_actions_seconds_count{action="delete"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="start"} 0
+engine_daemon_container_actions_seconds_count{action="start"} 1
+# HELP engine_daemon_engine_cpus_cpus The number of cpus that the host system of the engine has
+# TYPE engine_daemon_engine_cpus_cpus gauge
+engine_daemon_engine_cpus_cpus 4
+# HELP engine_daemon_engine_info The information related to the engine and the OS it is running on
+# TYPE engine_daemon_engine_info gauge
+engine_daemon_engine_info{architecture="x86_64",commit="774a1f4eee",daemon_id="NFZK:ZHHR:73WY:RV7D:MMU2:SE24:WWRJ:A3WN:WMMA:SPCL:PVO3:VGY7",graphdriver="overlay2",kernel="4.14.105-1-MANJARO",os="Manjaro Linux",os_type="linux",version="18.09.3-ce"} 1
+# HELP engine_daemon_engine_memory_bytes The number of bytes of memory that the host system of the engine has
+# TYPE engine_daemon_engine_memory_bytes gauge
+engine_daemon_engine_memory_bytes 2.5215361024e+10
+# HELP engine_daemon_events_subscribers_total The number of current subscribers to events
+# TYPE engine_daemon_events_subscribers_total gauge
+engine_daemon_events_subscribers_total 0
+# HELP engine_daemon_events_total The number of events logged
+# TYPE engine_daemon_events_total counter
+engine_daemon_events_total 0
+# HELP engine_daemon_health_checks_failed_total The total number of failed health checks
+# TYPE engine_daemon_health_checks_failed_total counter
+engine_daemon_health_checks_failed_total 33
+# HELP engine_daemon_health_checks_total The total number of health checks
+# TYPE engine_daemon_health_checks_total counter
+engine_daemon_health_checks_total 0
+# HELP etcd_debugging_snap_save_marshalling_duration_seconds The marshalling cost distributions of save called by snapshot.
+# TYPE etcd_debugging_snap_save_marshalling_duration_seconds histogram
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.001"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.002"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.004"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.008"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.016"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.032"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.064"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.128"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.256"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.512"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="1.024"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="2.048"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="4.096"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="8.192"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="+Inf"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_sum 0
+etcd_debugging_snap_save_marshalling_duration_seconds_count 0
+# HELP etcd_debugging_snap_save_total_duration_seconds The total latency distributions of save called by snapshot.
+# TYPE etcd_debugging_snap_save_total_duration_seconds histogram
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.001"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.002"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.004"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.008"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.016"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.032"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.064"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.128"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.256"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.512"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="1.024"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="2.048"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="4.096"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="8.192"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="+Inf"} 0
+etcd_debugging_snap_save_total_duration_seconds_sum 0
+etcd_debugging_snap_save_total_duration_seconds_count 0
+# HELP etcd_disk_wal_fsync_duration_seconds The latency distributions of fsync called by wal.
+# TYPE etcd_disk_wal_fsync_duration_seconds histogram
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.001"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.002"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.004"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.008"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.016"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.032"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.064"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.128"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.256"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.512"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="1.024"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="2.048"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="4.096"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="8.192"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="+Inf"} 0
+etcd_disk_wal_fsync_duration_seconds_sum 0
+etcd_disk_wal_fsync_duration_seconds_count 0
+# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 1.0085e-05
+go_gc_duration_seconds{quantile="0.25"} 3.1991e-05
+go_gc_duration_seconds{quantile="0.5"} 4.8062e-05
+go_gc_duration_seconds{quantile="0.75"} 9.067e-05
+go_gc_duration_seconds{quantile="1"} 0.000175239
+go_gc_duration_seconds_sum 0.000724173
+go_gc_duration_seconds_count 12
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 50
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 8.13368e+06
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 2.7343352e+07
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.454057e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 319815
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 2.398208e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 8.13368e+06
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 5.5648256e+07
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 1.0477568e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 114878
+# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes_total counter
+go_memstats_heap_released_bytes_total 5.4738944e+07
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 6.6125824e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.5528438390886765e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 434693
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 6944
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 16384
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 159696
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 196608
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 1.5134512e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 1.112335e+06
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 983040
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 983040
+# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 7.2286456e+07
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="prometheus"} 0
+http_request_duration_microseconds_count{handler="prometheus"} 0
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="prometheus",quantile="0.5"} NaN
+http_request_size_bytes{handler="prometheus",quantile="0.9"} NaN
+http_request_size_bytes{handler="prometheus",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="prometheus"} 0
+http_request_size_bytes_count{handler="prometheus"} 0
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="prometheus",quantile="0.5"} NaN
+http_response_size_bytes{handler="prometheus",quantile="0.9"} NaN
+http_response_size_bytes{handler="prometheus",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="prometheus"} 0
+http_response_size_bytes_count{handler="prometheus"} 0
+# HELP logger_log_entries_size_greater_than_buffer_total Number of log entries which are larger than the log buffer
+# TYPE logger_log_entries_size_greater_than_buffer_total counter
+logger_log_entries_size_greater_than_buffer_total 0
+# HELP logger_log_read_operations_failed_total Number of log reads from container stdio that failed
+# TYPE logger_log_read_operations_failed_total counter
+logger_log_read_operations_failed_total 0
+# HELP logger_log_write_operations_failed_total Number of log write operations that failed
+# TYPE logger_log_write_operations_failed_total counter
+logger_log_write_operations_failed_total 0
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 2.12
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 1.048576e+06
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 24
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 8.5929984e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.55284287673e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 1.257283584e+09
+# HELP swarm_dispatcher_scheduling_delay_seconds Scheduling delay is the time a task takes to go from NEW to RUNNING state.
+# TYPE swarm_dispatcher_scheduling_delay_seconds histogram
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.005"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.01"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.025"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.05"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.1"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.25"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.5"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="1"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="2.5"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="5"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="10"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="+Inf"} 0
+swarm_dispatcher_scheduling_delay_seconds_sum 0
+swarm_dispatcher_scheduling_delay_seconds_count 0
+# HELP swarm_manager_configs_total The number of configs in the cluster object store
+# TYPE swarm_manager_configs_total gauge
+swarm_manager_configs_total 0
+# HELP swarm_manager_leader Indicates if this manager node is a leader
+# TYPE swarm_manager_leader gauge
+swarm_manager_leader 0
+# HELP swarm_manager_networks_total The number of networks in the cluster object store
+# TYPE swarm_manager_networks_total gauge
+swarm_manager_networks_total 0
+# HELP swarm_manager_nodes The number of nodes
+# TYPE swarm_manager_nodes gauge
+swarm_manager_nodes{state="disconnected"} 0
+swarm_manager_nodes{state="down"} 0
+swarm_manager_nodes{state="ready"} 0
+swarm_manager_nodes{state="unknown"} 0
+# HELP swarm_manager_secrets_total The number of secrets in the cluster object store
+# TYPE swarm_manager_secrets_total gauge
+swarm_manager_secrets_total 0
+# HELP swarm_manager_services_total The number of services in the cluster object store
+# TYPE swarm_manager_services_total gauge
+swarm_manager_services_total 0
+# HELP swarm_manager_tasks_total The number of tasks in the cluster object store
+# TYPE swarm_manager_tasks_total gauge
+swarm_manager_tasks_total{state="accepted"} 0
+swarm_manager_tasks_total{state="assigned"} 0
+swarm_manager_tasks_total{state="complete"} 0
+swarm_manager_tasks_total{state="failed"} 0
+swarm_manager_tasks_total{state="new"} 0
+swarm_manager_tasks_total{state="orphaned"} 0
+swarm_manager_tasks_total{state="pending"} 0
+swarm_manager_tasks_total{state="preparing"} 0
+swarm_manager_tasks_total{state="ready"} 0
+swarm_manager_tasks_total{state="rejected"} 0
+swarm_manager_tasks_total{state="remove"} 0
+swarm_manager_tasks_total{state="running"} 0
+swarm_manager_tasks_total{state="shutdown"} 0
+swarm_manager_tasks_total{state="starting"} 0
+# HELP swarm_node_manager Whether this node is a manager or not
+# TYPE swarm_node_manager gauge
+swarm_node_manager 0
+# HELP swarm_raft_snapshot_latency_seconds Raft snapshot create latency.
+# TYPE swarm_raft_snapshot_latency_seconds histogram
+swarm_raft_snapshot_latency_seconds_bucket{le="0.005"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.01"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.025"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.05"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.1"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.25"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.5"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="1"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="2.5"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="5"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="10"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="+Inf"} 0
+swarm_raft_snapshot_latency_seconds_sum 0
+swarm_raft_snapshot_latency_seconds_count 0
+# HELP swarm_raft_transaction_latency_seconds Raft transaction latency.
+# TYPE swarm_raft_transaction_latency_seconds histogram
+swarm_raft_transaction_latency_seconds_bucket{le="0.005"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.01"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.025"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.05"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.1"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.25"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.5"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="1"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="2.5"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="5"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="10"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="+Inf"} 0
+swarm_raft_transaction_latency_seconds_sum 0
+swarm_raft_transaction_latency_seconds_count 0
+# HELP swarm_store_batch_latency_seconds Raft store batch latency.
+# TYPE swarm_store_batch_latency_seconds histogram
+swarm_store_batch_latency_seconds_bucket{le="0.005"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.01"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.025"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.05"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.1"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.25"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.5"} 0
+swarm_store_batch_latency_seconds_bucket{le="1"} 0
+swarm_store_batch_latency_seconds_bucket{le="2.5"} 0
+swarm_store_batch_latency_seconds_bucket{le="5"} 0
+swarm_store_batch_latency_seconds_bucket{le="10"} 0
+swarm_store_batch_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_batch_latency_seconds_sum 0
+swarm_store_batch_latency_seconds_count 0
+# HELP swarm_store_lookup_latency_seconds Raft store read latency.
+# TYPE swarm_store_lookup_latency_seconds histogram
+swarm_store_lookup_latency_seconds_bucket{le="0.005"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.01"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.025"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.05"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.1"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.25"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.5"} 0
+swarm_store_lookup_latency_seconds_bucket{le="1"} 0
+swarm_store_lookup_latency_seconds_bucket{le="2.5"} 0
+swarm_store_lookup_latency_seconds_bucket{le="5"} 0
+swarm_store_lookup_latency_seconds_bucket{le="10"} 0
+swarm_store_lookup_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_lookup_latency_seconds_sum 0
+swarm_store_lookup_latency_seconds_count 0
+# HELP swarm_store_memory_store_lock_duration_seconds Duration for which the raft memory store lock was held.
+# TYPE swarm_store_memory_store_lock_duration_seconds histogram
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.005"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.01"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.025"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.05"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.1"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.25"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.5"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="1"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="2.5"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="5"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="10"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="+Inf"} 0
+swarm_store_memory_store_lock_duration_seconds_sum 0
+swarm_store_memory_store_lock_duration_seconds_count 0
+# HELP swarm_store_read_tx_latency_seconds Raft store read tx latency.
+# TYPE swarm_store_read_tx_latency_seconds histogram
+swarm_store_read_tx_latency_seconds_bucket{le="0.005"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.01"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.025"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.05"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.1"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.25"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.5"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="1"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="2.5"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="5"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="10"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_read_tx_latency_seconds_sum 0
+swarm_store_read_tx_latency_seconds_count 0
+# HELP swarm_store_write_tx_latency_seconds Raft store write tx latency.
+# TYPE swarm_store_write_tx_latency_seconds histogram
+swarm_store_write_tx_latency_seconds_bucket{le="0.005"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.01"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.025"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.05"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.1"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.25"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.5"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="1"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="2.5"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="5"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="10"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_write_tx_latency_seconds_sum 0
+swarm_store_write_tx_latency_seconds_count 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/docker_engine/testdata/v18.09.3-ce-swarm.txt b/src/go/plugin/go.d/modules/docker_engine/testdata/v18.09.3-ce-swarm.txt
new file mode 100644
index 000000000..edd69abee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/testdata/v18.09.3-ce-swarm.txt
@@ -0,0 +1,468 @@
+# HELP builder_builds_failed_total Number of failed image builds
+# TYPE builder_builds_failed_total counter
+builder_builds_failed_total{reason="build_canceled"} 1
+builder_builds_failed_total{reason="build_target_not_reachable_error"} 2
+builder_builds_failed_total{reason="command_not_supported_error"} 3
+builder_builds_failed_total{reason="dockerfile_empty_error"} 4
+builder_builds_failed_total{reason="dockerfile_syntax_error"} 5
+builder_builds_failed_total{reason="error_processing_commands_error"} 6
+builder_builds_failed_total{reason="missing_onbuild_arguments_error"} 7
+builder_builds_failed_total{reason="unknown_instruction_error"} 8
+# HELP builder_builds_triggered_total Number of triggered image builds
+# TYPE builder_builds_triggered_total counter
+builder_builds_triggered_total 0
+# HELP engine_daemon_container_actions_seconds The number of seconds it takes to process each container action
+# TYPE engine_daemon_container_actions_seconds histogram
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="changes"} 0
+engine_daemon_container_actions_seconds_count{action="changes"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="commit"} 0
+engine_daemon_container_actions_seconds_count{action="commit"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="create"} 0
+engine_daemon_container_actions_seconds_count{action="create"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="delete"} 0
+engine_daemon_container_actions_seconds_count{action="delete"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="start"} 0
+engine_daemon_container_actions_seconds_count{action="start"} 1
+# HELP engine_daemon_container_states_containers The count of containers in various states
+# TYPE engine_daemon_container_states_containers gauge
+engine_daemon_container_states_containers{state="paused"} 11
+engine_daemon_container_states_containers{state="running"} 12
+engine_daemon_container_states_containers{state="stopped"} 13
+# HELP engine_daemon_engine_cpus_cpus The number of cpus that the host system of the engine has
+# TYPE engine_daemon_engine_cpus_cpus gauge
+engine_daemon_engine_cpus_cpus 4
+# HELP engine_daemon_engine_info The information related to the engine and the OS it is running on
+# TYPE engine_daemon_engine_info gauge
+engine_daemon_engine_info{architecture="x86_64",commit="774a1f4eee",daemon_id="NFZK:ZHHR:73WY:RV7D:MMU2:SE24:WWRJ:A3WN:WMMA:SPCL:PVO3:VGY7",graphdriver="overlay2",kernel="4.14.105-1-MANJARO",os="Manjaro Linux",os_type="linux",version="18.09.3-ce"} 1
+# HELP engine_daemon_engine_memory_bytes The number of bytes of memory that the host system of the engine has
+# TYPE engine_daemon_engine_memory_bytes gauge
+engine_daemon_engine_memory_bytes 2.5215361024e+10
+# HELP engine_daemon_events_subscribers_total The number of current subscribers to events
+# TYPE engine_daemon_events_subscribers_total gauge
+engine_daemon_events_subscribers_total 0
+# HELP engine_daemon_events_total The number of events logged
+# TYPE engine_daemon_events_total counter
+engine_daemon_events_total 0
+# HELP engine_daemon_health_checks_failed_total The total number of failed health checks
+# TYPE engine_daemon_health_checks_failed_total counter
+engine_daemon_health_checks_failed_total 33
+# HELP engine_daemon_health_checks_total The total number of health checks
+# TYPE engine_daemon_health_checks_total counter
+engine_daemon_health_checks_total 0
+# HELP etcd_debugging_snap_save_marshalling_duration_seconds The marshalling cost distributions of save called by snapshot.
+# TYPE etcd_debugging_snap_save_marshalling_duration_seconds histogram
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.001"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.002"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.004"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.008"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.016"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.032"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.064"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.128"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.256"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.512"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="1.024"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="2.048"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="4.096"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="8.192"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="+Inf"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_sum 0
+etcd_debugging_snap_save_marshalling_duration_seconds_count 0
+# HELP etcd_debugging_snap_save_total_duration_seconds The total latency distributions of save called by snapshot.
+# TYPE etcd_debugging_snap_save_total_duration_seconds histogram
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.001"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.002"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.004"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.008"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.016"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.032"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.064"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.128"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.256"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.512"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="1.024"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="2.048"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="4.096"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="8.192"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="+Inf"} 0
+etcd_debugging_snap_save_total_duration_seconds_sum 0
+etcd_debugging_snap_save_total_duration_seconds_count 0
+# HELP etcd_disk_wal_fsync_duration_seconds The latency distributions of fsync called by wal.
+# TYPE etcd_disk_wal_fsync_duration_seconds histogram
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.001"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.002"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.004"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.008"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.016"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.032"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.064"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.128"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.256"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.512"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="1.024"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="2.048"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="4.096"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="8.192"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="+Inf"} 0
+etcd_disk_wal_fsync_duration_seconds_sum 0
+etcd_disk_wal_fsync_duration_seconds_count 0
+# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 1.0085e-05
+go_gc_duration_seconds{quantile="0.25"} 3.1991e-05
+go_gc_duration_seconds{quantile="0.5"} 4.8062e-05
+go_gc_duration_seconds{quantile="0.75"} 9.067e-05
+go_gc_duration_seconds{quantile="1"} 0.000175239
+go_gc_duration_seconds_sum 0.000724173
+go_gc_duration_seconds_count 12
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 50
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 8.13368e+06
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 2.7343352e+07
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.454057e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 319815
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 2.398208e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 8.13368e+06
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 5.5648256e+07
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 1.0477568e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 114878
+# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes_total counter
+go_memstats_heap_released_bytes_total 5.4738944e+07
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 6.6125824e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.5528438390886765e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 434693
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 6944
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 16384
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 159696
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 196608
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 1.5134512e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 1.112335e+06
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 983040
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 983040
+# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 7.2286456e+07
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="prometheus"} 0
+http_request_duration_microseconds_count{handler="prometheus"} 0
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="prometheus",quantile="0.5"} NaN
+http_request_size_bytes{handler="prometheus",quantile="0.9"} NaN
+http_request_size_bytes{handler="prometheus",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="prometheus"} 0
+http_request_size_bytes_count{handler="prometheus"} 0
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="prometheus",quantile="0.5"} NaN
+http_response_size_bytes{handler="prometheus",quantile="0.9"} NaN
+http_response_size_bytes{handler="prometheus",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="prometheus"} 0
+http_response_size_bytes_count{handler="prometheus"} 0
+# HELP logger_log_entries_size_greater_than_buffer_total Number of log entries which are larger than the log buffer
+# TYPE logger_log_entries_size_greater_than_buffer_total counter
+logger_log_entries_size_greater_than_buffer_total 0
+# HELP logger_log_read_operations_failed_total Number of log reads from container stdio that failed
+# TYPE logger_log_read_operations_failed_total counter
+logger_log_read_operations_failed_total 0
+# HELP logger_log_write_operations_failed_total Number of log write operations that failed
+# TYPE logger_log_write_operations_failed_total counter
+logger_log_write_operations_failed_total 0
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 2.12
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 1.048576e+06
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 24
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 8.5929984e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.55284287673e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 1.257283584e+09
+# HELP swarm_dispatcher_scheduling_delay_seconds Scheduling delay is the time a task takes to go from NEW to RUNNING state.
+# TYPE swarm_dispatcher_scheduling_delay_seconds histogram
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.005"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.01"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.025"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.05"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.1"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.25"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.5"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="1"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="2.5"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="5"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="10"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="+Inf"} 0
+swarm_dispatcher_scheduling_delay_seconds_sum 0
+swarm_dispatcher_scheduling_delay_seconds_count 0
+# HELP swarm_manager_configs_total The number of configs in the cluster object store
+# TYPE swarm_manager_configs_total gauge
+swarm_manager_configs_total 1
+# HELP swarm_manager_leader Indicates if this manager node is a leader
+# TYPE swarm_manager_leader gauge
+swarm_manager_leader 1
+# HELP swarm_manager_networks_total The number of networks in the cluster object store
+# TYPE swarm_manager_networks_total gauge
+swarm_manager_networks_total 3
+# HELP swarm_manager_nodes The number of nodes
+# TYPE swarm_manager_nodes gauge
+swarm_manager_nodes{state="disconnected"} 1
+swarm_manager_nodes{state="down"} 2
+swarm_manager_nodes{state="ready"} 3
+swarm_manager_nodes{state="unknown"} 4
+# HELP swarm_manager_secrets_total The number of secrets in the cluster object store
+# TYPE swarm_manager_secrets_total gauge
+swarm_manager_secrets_total 1
+# HELP swarm_manager_services_total The number of services in the cluster object store
+# TYPE swarm_manager_services_total gauge
+swarm_manager_services_total 1
+# HELP swarm_manager_tasks_total The number of tasks in the cluster object store
+# TYPE swarm_manager_tasks_total gauge
+swarm_manager_tasks_total{state="accepted"} 1
+swarm_manager_tasks_total{state="assigned"} 2
+swarm_manager_tasks_total{state="complete"} 3
+swarm_manager_tasks_total{state="failed"} 4
+swarm_manager_tasks_total{state="new"} 5
+swarm_manager_tasks_total{state="orphaned"} 6
+swarm_manager_tasks_total{state="pending"} 7
+swarm_manager_tasks_total{state="preparing"} 8
+swarm_manager_tasks_total{state="ready"} 9
+swarm_manager_tasks_total{state="rejected"} 10
+swarm_manager_tasks_total{state="remove"} 11
+swarm_manager_tasks_total{state="running"} 12
+swarm_manager_tasks_total{state="shutdown"} 13
+swarm_manager_tasks_total{state="starting"} 14
+# HELP swarm_node_info Information related to the swarm
+# TYPE swarm_node_info gauge
+swarm_node_info{node_id="193816ofdqsg9kkm0hkfladvo",swarm_id="k1a6iu49n97a1vej3u5pjgsbr"} 1
+# HELP swarm_node_manager Whether this node is a manager or not
+# TYPE swarm_node_manager gauge
+swarm_node_manager 1
+# HELP swarm_raft_snapshot_latency_seconds Raft snapshot create latency.
+# TYPE swarm_raft_snapshot_latency_seconds histogram
+swarm_raft_snapshot_latency_seconds_bucket{le="0.005"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.01"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.025"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.05"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.1"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.25"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.5"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="1"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="2.5"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="5"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="10"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="+Inf"} 0
+swarm_raft_snapshot_latency_seconds_sum 0
+swarm_raft_snapshot_latency_seconds_count 0
+# HELP swarm_raft_transaction_latency_seconds Raft transaction latency.
+# TYPE swarm_raft_transaction_latency_seconds histogram
+swarm_raft_transaction_latency_seconds_bucket{le="0.005"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.01"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.025"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.05"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.1"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.25"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.5"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="1"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="2.5"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="5"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="10"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="+Inf"} 0
+swarm_raft_transaction_latency_seconds_sum 0
+swarm_raft_transaction_latency_seconds_count 0
+# HELP swarm_store_batch_latency_seconds Raft store batch latency.
+# TYPE swarm_store_batch_latency_seconds histogram
+swarm_store_batch_latency_seconds_bucket{le="0.005"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.01"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.025"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.05"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.1"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.25"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.5"} 0
+swarm_store_batch_latency_seconds_bucket{le="1"} 0
+swarm_store_batch_latency_seconds_bucket{le="2.5"} 0
+swarm_store_batch_latency_seconds_bucket{le="5"} 0
+swarm_store_batch_latency_seconds_bucket{le="10"} 0
+swarm_store_batch_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_batch_latency_seconds_sum 0
+swarm_store_batch_latency_seconds_count 0
+# HELP swarm_store_lookup_latency_seconds Raft store read latency.
+# TYPE swarm_store_lookup_latency_seconds histogram
+swarm_store_lookup_latency_seconds_bucket{le="0.005"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.01"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.025"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.05"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.1"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.25"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.5"} 0
+swarm_store_lookup_latency_seconds_bucket{le="1"} 0
+swarm_store_lookup_latency_seconds_bucket{le="2.5"} 0
+swarm_store_lookup_latency_seconds_bucket{le="5"} 0
+swarm_store_lookup_latency_seconds_bucket{le="10"} 0
+swarm_store_lookup_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_lookup_latency_seconds_sum 0
+swarm_store_lookup_latency_seconds_count 0
+# HELP swarm_store_memory_store_lock_duration_seconds Duration for which the raft memory store lock was held.
+# TYPE swarm_store_memory_store_lock_duration_seconds histogram
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.005"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.01"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.025"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.05"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.1"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.25"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.5"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="1"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="2.5"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="5"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="10"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="+Inf"} 0
+swarm_store_memory_store_lock_duration_seconds_sum 0
+swarm_store_memory_store_lock_duration_seconds_count 0
+# HELP swarm_store_read_tx_latency_seconds Raft store read tx latency.
+# TYPE swarm_store_read_tx_latency_seconds histogram
+swarm_store_read_tx_latency_seconds_bucket{le="0.005"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.01"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.025"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.05"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.1"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.25"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.5"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="1"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="2.5"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="5"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="10"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_read_tx_latency_seconds_sum 0
+swarm_store_read_tx_latency_seconds_count 0
+# HELP swarm_store_write_tx_latency_seconds Raft store write tx latency.
+# TYPE swarm_store_write_tx_latency_seconds histogram
+swarm_store_write_tx_latency_seconds_bucket{le="0.005"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.01"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.025"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.05"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.1"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.25"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.5"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="1"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="2.5"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="5"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="10"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_write_tx_latency_seconds_sum 0
+swarm_store_write_tx_latency_seconds_count 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/docker_engine/testdata/v18.09.3-ce.txt b/src/go/plugin/go.d/modules/docker_engine/testdata/v18.09.3-ce.txt
new file mode 100644
index 000000000..b54589210
--- /dev/null
+++ b/src/go/plugin/go.d/modules/docker_engine/testdata/v18.09.3-ce.txt
@@ -0,0 +1,465 @@
+# HELP builder_builds_failed_total Number of failed image builds
+# TYPE builder_builds_failed_total counter
+builder_builds_failed_total{reason="build_canceled"} 1
+builder_builds_failed_total{reason="build_target_not_reachable_error"} 2
+builder_builds_failed_total{reason="command_not_supported_error"} 3
+builder_builds_failed_total{reason="dockerfile_empty_error"} 4
+builder_builds_failed_total{reason="dockerfile_syntax_error"} 5
+builder_builds_failed_total{reason="error_processing_commands_error"} 6
+builder_builds_failed_total{reason="missing_onbuild_arguments_error"} 7
+builder_builds_failed_total{reason="unknown_instruction_error"} 8
+# HELP builder_builds_triggered_total Number of triggered image builds
+# TYPE builder_builds_triggered_total counter
+builder_builds_triggered_total 0
+# HELP engine_daemon_container_actions_seconds The number of seconds it takes to process each container action
+# TYPE engine_daemon_container_actions_seconds histogram
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="changes",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="changes"} 0
+engine_daemon_container_actions_seconds_count{action="changes"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="commit",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="commit"} 0
+engine_daemon_container_actions_seconds_count{action="commit"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="create",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="create"} 0
+engine_daemon_container_actions_seconds_count{action="create"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="delete",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="delete"} 0
+engine_daemon_container_actions_seconds_count{action="delete"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.005"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.01"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.025"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.05"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.1"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.25"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="0.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="1"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="2.5"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="5"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="10"} 1
+engine_daemon_container_actions_seconds_bucket{action="start",le="+Inf"} 1
+engine_daemon_container_actions_seconds_sum{action="start"} 0
+engine_daemon_container_actions_seconds_count{action="start"} 1
+# HELP engine_daemon_container_states_containers The count of containers in various states
+# TYPE engine_daemon_container_states_containers gauge
+engine_daemon_container_states_containers{state="paused"} 11
+engine_daemon_container_states_containers{state="running"} 12
+engine_daemon_container_states_containers{state="stopped"} 13
+# HELP engine_daemon_engine_cpus_cpus The number of cpus that the host system of the engine has
+# TYPE engine_daemon_engine_cpus_cpus gauge
+engine_daemon_engine_cpus_cpus 4
+# HELP engine_daemon_engine_info The information related to the engine and the OS it is running on
+# TYPE engine_daemon_engine_info gauge
+engine_daemon_engine_info{architecture="x86_64",commit="774a1f4eee",daemon_id="NFZK:ZHHR:73WY:RV7D:MMU2:SE24:WWRJ:A3WN:WMMA:SPCL:PVO3:VGY7",graphdriver="overlay2",kernel="4.14.105-1-MANJARO",os="Manjaro Linux",os_type="linux",version="18.09.3-ce"} 1
+# HELP engine_daemon_engine_memory_bytes The number of bytes of memory that the host system of the engine has
+# TYPE engine_daemon_engine_memory_bytes gauge
+engine_daemon_engine_memory_bytes 2.5215361024e+10
+# HELP engine_daemon_events_subscribers_total The number of current subscribers to events
+# TYPE engine_daemon_events_subscribers_total gauge
+engine_daemon_events_subscribers_total 0
+# HELP engine_daemon_events_total The number of events logged
+# TYPE engine_daemon_events_total counter
+engine_daemon_events_total 0
+# HELP engine_daemon_health_checks_failed_total The total number of failed health checks
+# TYPE engine_daemon_health_checks_failed_total counter
+engine_daemon_health_checks_failed_total 33
+# HELP engine_daemon_health_checks_total The total number of health checks
+# TYPE engine_daemon_health_checks_total counter
+engine_daemon_health_checks_total 0
+# HELP etcd_debugging_snap_save_marshalling_duration_seconds The marshalling cost distributions of save called by snapshot.
+# TYPE etcd_debugging_snap_save_marshalling_duration_seconds histogram
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.001"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.002"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.004"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.008"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.016"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.032"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.064"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.128"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.256"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="0.512"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="1.024"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="2.048"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="4.096"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="8.192"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_bucket{le="+Inf"} 0
+etcd_debugging_snap_save_marshalling_duration_seconds_sum 0
+etcd_debugging_snap_save_marshalling_duration_seconds_count 0
+# HELP etcd_debugging_snap_save_total_duration_seconds The total latency distributions of save called by snapshot.
+# TYPE etcd_debugging_snap_save_total_duration_seconds histogram
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.001"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.002"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.004"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.008"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.016"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.032"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.064"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.128"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.256"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="0.512"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="1.024"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="2.048"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="4.096"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="8.192"} 0
+etcd_debugging_snap_save_total_duration_seconds_bucket{le="+Inf"} 0
+etcd_debugging_snap_save_total_duration_seconds_sum 0
+etcd_debugging_snap_save_total_duration_seconds_count 0
+# HELP etcd_disk_wal_fsync_duration_seconds The latency distributions of fsync called by wal.
+# TYPE etcd_disk_wal_fsync_duration_seconds histogram
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.001"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.002"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.004"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.008"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.016"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.032"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.064"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.128"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.256"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="0.512"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="1.024"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="2.048"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="4.096"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="8.192"} 0
+etcd_disk_wal_fsync_duration_seconds_bucket{le="+Inf"} 0
+etcd_disk_wal_fsync_duration_seconds_sum 0
+etcd_disk_wal_fsync_duration_seconds_count 0
+# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 1.0085e-05
+go_gc_duration_seconds{quantile="0.25"} 3.1991e-05
+go_gc_duration_seconds{quantile="0.5"} 4.8062e-05
+go_gc_duration_seconds{quantile="0.75"} 9.067e-05
+go_gc_duration_seconds{quantile="1"} 0.000175239
+go_gc_duration_seconds_sum 0.000724173
+go_gc_duration_seconds_count 12
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 50
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 8.13368e+06
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 2.7343352e+07
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.454057e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 319815
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 2.398208e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 8.13368e+06
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 5.5648256e+07
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 1.0477568e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 114878
+# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes_total counter
+go_memstats_heap_released_bytes_total 5.4738944e+07
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 6.6125824e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.5528438390886765e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 434693
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 6944
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 16384
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 159696
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 196608
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 1.5134512e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 1.112335e+06
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 983040
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 983040
+# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 7.2286456e+07
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="prometheus"} 0
+http_request_duration_microseconds_count{handler="prometheus"} 0
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="prometheus",quantile="0.5"} NaN
+http_request_size_bytes{handler="prometheus",quantile="0.9"} NaN
+http_request_size_bytes{handler="prometheus",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="prometheus"} 0
+http_request_size_bytes_count{handler="prometheus"} 0
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="prometheus",quantile="0.5"} NaN
+http_response_size_bytes{handler="prometheus",quantile="0.9"} NaN
+http_response_size_bytes{handler="prometheus",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="prometheus"} 0
+http_response_size_bytes_count{handler="prometheus"} 0
+# HELP logger_log_entries_size_greater_than_buffer_total Number of log entries which are larger than the log buffer
+# TYPE logger_log_entries_size_greater_than_buffer_total counter
+logger_log_entries_size_greater_than_buffer_total 0
+# HELP logger_log_read_operations_failed_total Number of log reads from container stdio that failed
+# TYPE logger_log_read_operations_failed_total counter
+logger_log_read_operations_failed_total 0
+# HELP logger_log_write_operations_failed_total Number of log write operations that failed
+# TYPE logger_log_write_operations_failed_total counter
+logger_log_write_operations_failed_total 0
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 2.12
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 1.048576e+06
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 24
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 8.5929984e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.55284287673e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 1.257283584e+09
+# HELP swarm_dispatcher_scheduling_delay_seconds Scheduling delay is the time a task takes to go from NEW to RUNNING state.
+# TYPE swarm_dispatcher_scheduling_delay_seconds histogram
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.005"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.01"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.025"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.05"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.1"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.25"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="0.5"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="1"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="2.5"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="5"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="10"} 0
+swarm_dispatcher_scheduling_delay_seconds_bucket{le="+Inf"} 0
+swarm_dispatcher_scheduling_delay_seconds_sum 0
+swarm_dispatcher_scheduling_delay_seconds_count 0
+# HELP swarm_manager_configs_total The number of configs in the cluster object store
+# TYPE swarm_manager_configs_total gauge
+swarm_manager_configs_total 0
+# HELP swarm_manager_leader Indicates if this manager node is a leader
+# TYPE swarm_manager_leader gauge
+swarm_manager_leader 0
+# HELP swarm_manager_networks_total The number of networks in the cluster object store
+# TYPE swarm_manager_networks_total gauge
+swarm_manager_networks_total 0
+# HELP swarm_manager_nodes The number of nodes
+# TYPE swarm_manager_nodes gauge
+swarm_manager_nodes{state="disconnected"} 0
+swarm_manager_nodes{state="down"} 0
+swarm_manager_nodes{state="ready"} 0
+swarm_manager_nodes{state="unknown"} 0
+# HELP swarm_manager_secrets_total The number of secrets in the cluster object store
+# TYPE swarm_manager_secrets_total gauge
+swarm_manager_secrets_total 0
+# HELP swarm_manager_services_total The number of services in the cluster object store
+# TYPE swarm_manager_services_total gauge
+swarm_manager_services_total 0
+# HELP swarm_manager_tasks_total The number of tasks in the cluster object store
+# TYPE swarm_manager_tasks_total gauge
+swarm_manager_tasks_total{state="accepted"} 0
+swarm_manager_tasks_total{state="assigned"} 0
+swarm_manager_tasks_total{state="complete"} 0
+swarm_manager_tasks_total{state="failed"} 0
+swarm_manager_tasks_total{state="new"} 0
+swarm_manager_tasks_total{state="orphaned"} 0
+swarm_manager_tasks_total{state="pending"} 0
+swarm_manager_tasks_total{state="preparing"} 0
+swarm_manager_tasks_total{state="ready"} 0
+swarm_manager_tasks_total{state="rejected"} 0
+swarm_manager_tasks_total{state="remove"} 0
+swarm_manager_tasks_total{state="running"} 0
+swarm_manager_tasks_total{state="shutdown"} 0
+swarm_manager_tasks_total{state="starting"} 0
+# HELP swarm_node_manager Whether this node is a manager or not
+# TYPE swarm_node_manager gauge
+swarm_node_manager 0
+# HELP swarm_raft_snapshot_latency_seconds Raft snapshot create latency.
+# TYPE swarm_raft_snapshot_latency_seconds histogram
+swarm_raft_snapshot_latency_seconds_bucket{le="0.005"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.01"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.025"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.05"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.1"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.25"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="0.5"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="1"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="2.5"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="5"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="10"} 0
+swarm_raft_snapshot_latency_seconds_bucket{le="+Inf"} 0
+swarm_raft_snapshot_latency_seconds_sum 0
+swarm_raft_snapshot_latency_seconds_count 0
+# HELP swarm_raft_transaction_latency_seconds Raft transaction latency.
+# TYPE swarm_raft_transaction_latency_seconds histogram
+swarm_raft_transaction_latency_seconds_bucket{le="0.005"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.01"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.025"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.05"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.1"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.25"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="0.5"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="1"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="2.5"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="5"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="10"} 0
+swarm_raft_transaction_latency_seconds_bucket{le="+Inf"} 0
+swarm_raft_transaction_latency_seconds_sum 0
+swarm_raft_transaction_latency_seconds_count 0
+# HELP swarm_store_batch_latency_seconds Raft store batch latency.
+# TYPE swarm_store_batch_latency_seconds histogram
+swarm_store_batch_latency_seconds_bucket{le="0.005"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.01"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.025"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.05"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.1"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.25"} 0
+swarm_store_batch_latency_seconds_bucket{le="0.5"} 0
+swarm_store_batch_latency_seconds_bucket{le="1"} 0
+swarm_store_batch_latency_seconds_bucket{le="2.5"} 0
+swarm_store_batch_latency_seconds_bucket{le="5"} 0
+swarm_store_batch_latency_seconds_bucket{le="10"} 0
+swarm_store_batch_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_batch_latency_seconds_sum 0
+swarm_store_batch_latency_seconds_count 0
+# HELP swarm_store_lookup_latency_seconds Raft store read latency.
+# TYPE swarm_store_lookup_latency_seconds histogram
+swarm_store_lookup_latency_seconds_bucket{le="0.005"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.01"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.025"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.05"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.1"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.25"} 0
+swarm_store_lookup_latency_seconds_bucket{le="0.5"} 0
+swarm_store_lookup_latency_seconds_bucket{le="1"} 0
+swarm_store_lookup_latency_seconds_bucket{le="2.5"} 0
+swarm_store_lookup_latency_seconds_bucket{le="5"} 0
+swarm_store_lookup_latency_seconds_bucket{le="10"} 0
+swarm_store_lookup_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_lookup_latency_seconds_sum 0
+swarm_store_lookup_latency_seconds_count 0
+# HELP swarm_store_memory_store_lock_duration_seconds Duration for which the raft memory store lock was held.
+# TYPE swarm_store_memory_store_lock_duration_seconds histogram
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.005"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.01"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.025"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.05"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.1"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.25"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="0.5"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="1"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="2.5"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="5"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="10"} 0
+swarm_store_memory_store_lock_duration_seconds_bucket{le="+Inf"} 0
+swarm_store_memory_store_lock_duration_seconds_sum 0
+swarm_store_memory_store_lock_duration_seconds_count 0
+# HELP swarm_store_read_tx_latency_seconds Raft store read tx latency.
+# TYPE swarm_store_read_tx_latency_seconds histogram
+swarm_store_read_tx_latency_seconds_bucket{le="0.005"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.01"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.025"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.05"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.1"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.25"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="0.5"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="1"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="2.5"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="5"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="10"} 0
+swarm_store_read_tx_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_read_tx_latency_seconds_sum 0
+swarm_store_read_tx_latency_seconds_count 0
+# HELP swarm_store_write_tx_latency_seconds Raft store write tx latency.
+# TYPE swarm_store_write_tx_latency_seconds histogram
+swarm_store_write_tx_latency_seconds_bucket{le="0.005"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.01"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.025"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.05"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.1"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.25"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="0.5"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="1"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="2.5"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="5"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="10"} 0
+swarm_store_write_tx_latency_seconds_bucket{le="+Inf"} 0
+swarm_store_write_tx_latency_seconds_sum 0
+swarm_store_write_tx_latency_seconds_count 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dockerhub/README.md b/src/go/plugin/go.d/modules/dockerhub/README.md
new file mode 120000
index 000000000..703add4ed
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/README.md
@@ -0,0 +1 @@
+integrations/docker_hub_repository.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dockerhub/apiclient.go b/src/go/plugin/go.d/modules/dockerhub/apiclient.go
new file mode 100644
index 000000000..f0da897f8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/apiclient.go
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerhub
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type repository struct {
+ User string
+ Name string
+ Status int
+ StarCount int `json:"star_count"`
+ PullCount int `json:"pull_count"`
+ LastUpdated string `json:"last_updated"`
+}
+
+func newAPIClient(client *http.Client, request web.Request) *apiClient {
+ return &apiClient{httpClient: client, request: request}
+}
+
+type apiClient struct {
+ httpClient *http.Client
+ request web.Request
+}
+
+func (a apiClient) getRepository(repoName string) (*repository, error) {
+ req, err := a.createRequest(repoName)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating http request : %v", err)
+ }
+
+ resp, err := a.doRequestOK(req)
+ defer closeBody(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var repo repository
+ if err := json.NewDecoder(resp.Body).Decode(&repo); err != nil {
+ return nil, fmt.Errorf("error on parsing response from %s : %v", req.URL, err)
+ }
+
+ return &repo, nil
+}
+
+func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) {
+ resp, err := a.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on request: %v", err)
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
+ }
+ return resp, nil
+}
+
+func (a apiClient) createRequest(urlPath string) (*http.Request, error) {
+ req := a.request.Copy()
+ u, err := url.Parse(req.URL)
+ if err != nil {
+ return nil, err
+ }
+
+ u.Path = path.Join(u.Path, urlPath)
+ req.URL = u.String()
+ return web.NewHTTPRequest(req)
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dockerhub/charts.go b/src/go/plugin/go.d/modules/dockerhub/charts.go
new file mode 100644
index 000000000..78b51eac4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/charts.go
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerhub
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+ // Dim is an alias for module.Dim
+ Dim = module.Dim
+)
+
+var charts = Charts{
+ {
+ ID: "pulls_sum",
+ Title: "Pulls Summary",
+ Units: "pulls",
+ Fam: "pulls",
+ Dims: Dims{
+ {ID: "pull_sum", Name: "sum"},
+ },
+ },
+ {
+ ID: "pulls",
+ Title: "Pulls",
+ Units: "pulls",
+ Fam: "pulls",
+ Type: module.Stacked,
+ },
+ {
+ ID: "pulls_rate",
+ Title: "Pulls Rate",
+ Units: "pulls/s",
+ Fam: "pulls",
+ Type: module.Stacked,
+ },
+ {
+ ID: "stars",
+ Title: "Stars",
+ Units: "stars",
+ Fam: "stars",
+ Type: module.Stacked,
+ },
+ {
+ ID: "status",
+ Title: "Current Status",
+ Units: "status",
+ Fam: "status",
+ },
+ {
+ ID: "last_updated",
+ Title: "Time Since Last Updated",
+ Units: "seconds",
+ Fam: "last updated",
+ },
+}
+
+func addReposToCharts(repositories []string, cs *Charts) {
+ for _, name := range repositories {
+ dimName := strings.Replace(name, "/", "_", -1)
+ _ = cs.Get("pulls").AddDim(&Dim{
+ ID: "pull_count_" + name,
+ Name: dimName,
+ })
+ _ = cs.Get("pulls_rate").AddDim(&Dim{
+ ID: "pull_count_" + name,
+ Name: dimName,
+ Algo: module.Incremental,
+ })
+ _ = cs.Get("stars").AddDim(&Dim{
+ ID: "star_count_" + name,
+ Name: dimName,
+ })
+ _ = cs.Get("status").AddDim(&Dim{
+ ID: "status_" + name,
+ Name: dimName,
+ })
+ _ = cs.Get("last_updated").AddDim(&Dim{
+ ID: "last_updated_" + name,
+ Name: dimName,
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dockerhub/collect.go b/src/go/plugin/go.d/modules/dockerhub/collect.go
new file mode 100644
index 000000000..211c1ea7c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/collect.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerhub
+
+import (
+ "fmt"
+ "time"
+)
+
+func (dh *DockerHub) collect() (map[string]int64, error) {
+ var (
+ reposNum = len(dh.Repositories)
+ ch = make(chan *repository, reposNum)
+ mx = make(map[string]int64)
+ )
+
+ for _, name := range dh.Repositories {
+ go dh.collectRepo(name, ch)
+ }
+
+ var (
+ parsed int
+ pullSum int
+ )
+
+ for i := 0; i < reposNum; i++ {
+ repo := <-ch
+ if repo == nil {
+ continue
+ }
+ if err := parseRepoTo(repo, mx); err != nil {
+ dh.Errorf("error on parsing %s/%s : %v", repo.User, repo.Name, err)
+ continue
+ }
+ pullSum += repo.PullCount
+ parsed++
+ }
+ close(ch)
+
+ if parsed == reposNum {
+ mx["pull_sum"] = int64(pullSum)
+ }
+
+ return mx, nil
+}
+
+func (dh *DockerHub) collectRepo(repoName string, ch chan *repository) {
+ repo, err := dh.client.getRepository(repoName)
+ if err != nil {
+ dh.Error(err)
+ }
+ ch <- repo
+}
+
+func parseRepoTo(repo *repository, mx map[string]int64) error {
+ t, err := time.Parse(time.RFC3339Nano, repo.LastUpdated)
+ if err != nil {
+ return err
+ }
+ mx[fmt.Sprintf("last_updated_%s/%s", repo.User, repo.Name)] = int64(time.Since(t).Seconds())
+ mx[fmt.Sprintf("star_count_%s/%s", repo.User, repo.Name)] = int64(repo.StarCount)
+ mx[fmt.Sprintf("pull_count_%s/%s", repo.User, repo.Name)] = int64(repo.PullCount)
+ mx[fmt.Sprintf("status_%s/%s", repo.User, repo.Name)] = int64(repo.Status)
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/dockerhub/config_schema.json b/src/go/plugin/go.d/modules/dockerhub/config_schema.json
new file mode 100644
index 000000000..7998516f4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/config_schema.json
@@ -0,0 +1,203 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "DockerHub collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the DockerHub repositories endpoint.",
+ "type": "string",
+ "default": "https://hub.docker.com/v2/repositories",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "repositories": {
+ "title": "Repositories",
+ "description": "List of repositories to monitor.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Name",
+ "description": "The name of the repository.",
+ "type": "string"
+ },
+ "uniqueItems": true,
+ "minItems": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url",
+ "repositories"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "repositories",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "repositories": {
+ "ui:listFlavour": "list"
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dockerhub/dockerhub.go b/src/go/plugin/go.d/modules/dockerhub/dockerhub.go
new file mode 100644
index 000000000..37cf64960
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/dockerhub.go
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerhub
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("dockerhub", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *DockerHub {
+ return &DockerHub{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "https://hub.docker.com/v2/repositories",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ },
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ Repositories []string `yaml:"repositories" json:"repositories"`
+}
+
+type DockerHub struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ client *apiClient
+}
+
+func (dh *DockerHub) Configuration() any {
+ return dh.Config
+}
+
+func (dh *DockerHub) Init() error {
+ if err := dh.validateConfig(); err != nil {
+ dh.Errorf("config validation: %v", err)
+ return err
+ }
+
+ client, err := dh.initApiClient()
+ if err != nil {
+ dh.Error(err)
+ return err
+ }
+ dh.client = client
+
+ return nil
+}
+
+func (dh *DockerHub) Check() error {
+ mx, err := dh.collect()
+ if err != nil {
+ dh.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (dh *DockerHub) Charts() *Charts {
+ cs := charts.Copy()
+ addReposToCharts(dh.Repositories, cs)
+ return cs
+}
+
+func (dh *DockerHub) Collect() map[string]int64 {
+ mx, err := dh.collect()
+
+ if err != nil {
+ dh.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (dh *DockerHub) Cleanup() {
+ if dh.client != nil && dh.client.httpClient != nil {
+ dh.client.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dockerhub/dockerhub_test.go b/src/go/plugin/go.d/modules/dockerhub/dockerhub_test.go
new file mode 100644
index 000000000..5d8df4cf3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/dockerhub_test.go
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerhub
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataRepo1, _ = os.ReadFile("testdata/repo1.txt")
+ dataRepo2, _ = os.ReadFile("testdata/repo2.txt")
+ dataRepo3, _ = os.ReadFile("testdata/repo3.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataRepo1": dataRepo1,
+ "dataRepo2": dataRepo2,
+ "dataRepo3": dataRepo3,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDockerHub_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DockerHub{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDockerHub_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
+
+func TestDockerHub_Cleanup(t *testing.T) { New().Cleanup() }
+
+func TestDockerHub_Init(t *testing.T) {
+ job := New()
+ job.Repositories = []string{"name/repo"}
+ assert.NoError(t, job.Init())
+ assert.NotNil(t, job.client)
+}
+
+func TestDockerHub_InitNG(t *testing.T) {
+ assert.Error(t, New().Init())
+}
+
+func TestDockerHub_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case strings.HasSuffix(r.URL.Path, "name1/repo1"):
+ _, _ = w.Write(dataRepo1)
+ case strings.HasSuffix(r.URL.Path, "name2/repo2"):
+ _, _ = w.Write(dataRepo2)
+ case strings.HasSuffix(r.URL.Path, "name3/repo3"):
+ _, _ = w.Write(dataRepo3)
+ }
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+}
+
+func TestDockerHub_CheckNG(t *testing.T) {
+ job := New()
+ job.URL = "http://127.0.0.1:38001/metrics"
+ job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestDockerHub_Collect(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch {
+ case strings.HasSuffix(r.URL.Path, "name1/repo1"):
+ _, _ = w.Write(dataRepo1)
+ case strings.HasSuffix(r.URL.Path, "name2/repo2"):
+ _, _ = w.Write(dataRepo2)
+ case strings.HasSuffix(r.URL.Path, "name3/repo3"):
+ _, _ = w.Write(dataRepo3)
+ }
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "star_count_user1/name1": 45,
+ "pull_count_user1/name1": 18540191,
+ "status_user1/name1": 1,
+ "star_count_user2/name2": 45,
+ "pull_count_user2/name2": 18540192,
+ "status_user2/name2": 1,
+ "star_count_user3/name3": 45,
+ "pull_count_user3/name3": 18540193,
+ "status_user3/name3": 1,
+ "pull_sum": 55620576,
+ }
+
+ collected := job.Collect()
+
+ for k := range collected {
+ if strings.HasPrefix(k, "last") {
+ delete(collected, k)
+ }
+ }
+ assert.Equal(t, expected, collected)
+}
+
+func TestDockerHub_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestDockerHub_404(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
diff --git a/src/go/plugin/go.d/modules/dockerhub/init.go b/src/go/plugin/go.d/modules/dockerhub/init.go
new file mode 100644
index 000000000..7e502a5a7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/init.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerhub
+
+import (
+ "errors"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (dh *DockerHub) validateConfig() error {
+ if dh.URL == "" {
+ return errors.New("url not set")
+ }
+ if len(dh.Repositories) == 0 {
+ return errors.New("repositories not set")
+ }
+ return nil
+}
+
+func (dh *DockerHub) initApiClient() (*apiClient, error) {
+ client, err := web.NewHTTPClient(dh.Client)
+ if err != nil {
+ return nil, err
+ }
+ return newAPIClient(client, dh.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/dockerhub/integrations/docker_hub_repository.md b/src/go/plugin/go.d/modules/dockerhub/integrations/docker_hub_repository.md
new file mode 100644
index 000000000..72c171d6a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/integrations/docker_hub_repository.md
@@ -0,0 +1,209 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dockerhub/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dockerhub/metadata.yaml"
+sidebar_label: "Docker Hub repository"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Containers and VMs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Docker Hub repository
+
+
+<img src="https://netdata.cloud/img/docker.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: dockerhub
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Docker Hub repository instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| dockerhub.pulls_sum | sum | pulls |
+| dockerhub.pulls | a dimension per repository | pulls |
+| dockerhub.pulls_rate | a dimension per repository | pulls/s |
+| dockerhub.stars | a dimension per repository | stars |
+| dockerhub.status | a dimension per repository | status |
+| dockerhub.last_updated | a dimension per repository | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/dockerhub.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/dockerhub.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | DockerHub URL. | https://hub.docker.com/v2/repositories | yes |
+| repositories | List of repositories to monitor. | | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: dockerhub
+ repositories:
+ - 'user1/name1'
+ - 'user2/name2'
+ - 'user3/name3'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `dockerhub` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m dockerhub
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `dockerhub` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep dockerhub
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep dockerhub /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep dockerhub
+```
+
+
diff --git a/src/go/plugin/go.d/modules/dockerhub/metadata.yaml b/src/go/plugin/go.d/modules/dockerhub/metadata.yaml
new file mode 100644
index 000000000..605d6c1cb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/metadata.yaml
@@ -0,0 +1,190 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-dockerhub
+ plugin_name: go.d.plugin
+ module_name: dockerhub
+ monitored_instance:
+ name: Docker Hub repository
+ link: https://hub.docker.com/
+ icon_filename: docker.svg
+ categories:
+ - data-collection.containers-and-vms # FIXME
+ keywords:
+ - dockerhub
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector keeps track of DockerHub repositories statistics such as the number of stars, pulls, current status, and more.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/dockerhub.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: DockerHub URL.
+ default_value: https://hub.docker.com/v2/repositories
+ required: true
+ - name: repositories
+ description: List of repositories to monitor.
+ default_value: ""
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: dockerhub
+ repositories:
+ - 'user1/name1'
+ - 'user2/name2'
+ - 'user3/name3'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: dockerhub.pulls_sum
+ description: Pulls Summary
+ unit: pulls
+ chart_type: line
+ dimensions:
+ - name: sum
+ - name: dockerhub.pulls
+ description: Pulls
+ unit: pulls
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per repository
+ - name: dockerhub.pulls_rate
+ description: Pulls Rate
+ unit: pulls/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per repository
+ - name: dockerhub.stars
+ description: Stars
+ unit: stars
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per repository
+ - name: dockerhub.status
+ description: Current Status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: a dimension per repository
+ - name: dockerhub.last_updated
+ description: Time Since Last Updated
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: a dimension per repository
diff --git a/src/go/plugin/go.d/modules/dockerhub/testdata/config.json b/src/go/plugin/go.d/modules/dockerhub/testdata/config.json
new file mode 100644
index 000000000..3496e747c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/testdata/config.json
@@ -0,0 +1,23 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "repositories": [
+ "ok"
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/dockerhub/testdata/config.yaml b/src/go/plugin/go.d/modules/dockerhub/testdata/config.yaml
new file mode 100644
index 000000000..20c4ba61b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/testdata/config.yaml
@@ -0,0 +1,19 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+repositories:
+ - "ok"
diff --git a/src/go/plugin/go.d/modules/dockerhub/testdata/repo1.txt b/src/go/plugin/go.d/modules/dockerhub/testdata/repo1.txt
new file mode 100644
index 000000000..b67e2f382
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/testdata/repo1.txt
@@ -0,0 +1,22 @@
+{
+ "user": "user1",
+ "name": "name1",
+ "namespace": "namespace",
+ "repository_type": "image",
+ "status": 1,
+ "description": "Description.",
+ "is_private": false,
+ "is_automated": false,
+ "can_edit": false,
+ "star_count": 45,
+ "pull_count": 18540191,
+ "last_updated": "2019-03-28T21:26:05.527650Z",
+ "is_migrated": false,
+ "has_starred": false,
+ "affiliation": null,
+ "permissions": {
+ "read": true,
+ "write": false,
+ "admin": false
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dockerhub/testdata/repo2.txt b/src/go/plugin/go.d/modules/dockerhub/testdata/repo2.txt
new file mode 100644
index 000000000..e84ba989b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/testdata/repo2.txt
@@ -0,0 +1,22 @@
+{
+ "user": "user2",
+ "name": "name2",
+ "namespace": "namespace",
+ "repository_type": "image",
+ "status": 1,
+ "description": "Description.",
+ "is_private": false,
+ "is_automated": false,
+ "can_edit": false,
+ "star_count": 45,
+ "pull_count": 18540192,
+ "last_updated": "2019-03-28T21:26:05.527650Z",
+ "is_migrated": false,
+ "has_starred": false,
+ "affiliation": null,
+ "permissions": {
+ "read": true,
+ "write": false,
+ "admin": false
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dockerhub/testdata/repo3.txt b/src/go/plugin/go.d/modules/dockerhub/testdata/repo3.txt
new file mode 100644
index 000000000..1fc64a9c3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dockerhub/testdata/repo3.txt
@@ -0,0 +1,22 @@
+{
+ "user": "user3",
+ "name": "name3",
+ "namespace": "namespace",
+ "repository_type": "image",
+ "status": 1,
+ "description": "Description.",
+ "is_private": false,
+ "is_automated": false,
+ "can_edit": false,
+ "star_count": 45,
+ "pull_count": 18540193,
+ "last_updated": "2019-03-28T21:26:05.527650Z",
+ "is_migrated": false,
+ "has_starred": false,
+ "affiliation": null,
+ "permissions": {
+ "read": true,
+ "write": false,
+ "admin": false
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dovecot/README.md b/src/go/plugin/go.d/modules/dovecot/README.md
new file mode 120000
index 000000000..c4749cedc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/README.md
@@ -0,0 +1 @@
+integrations/dovecot.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/dovecot/charts.go b/src/go/plugin/go.d/modules/dovecot/charts.go
new file mode 100644
index 000000000..3a8bb1a8c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/charts.go
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dovecot
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioSessions = module.Priority + iota
+ prioLogins
+ prioAuthenticationAttempts
+ prioCommands
+ prioPageFaults
+ prioContextSwitches
+ prioDiskIO
+ prioNetTraffic
+ prioSysCalls
+ prioLookups
+ prioCachePerformance
+ prioAuthCachePerformance
+)
+
+var charts = module.Charts{
+ sessionsChart.Copy(),
+ loginsChart.Copy(),
+ authAttemptsChart.Copy(),
+ commandsChart.Copy(),
+ pageFaultsChart.Copy(),
+ contextSwitchesChart.Copy(),
+ diskIOChart.Copy(),
+ netTrafficChart.Copy(),
+ sysCallsChart.Copy(),
+ lookupsChart.Copy(),
+ cacheChart.Copy(),
+ authCacheChart.Copy(),
+}
+
+var (
+ sessionsChart = module.Chart{
+ ID: "sessions",
+ Title: "Dovecot Active Sessions",
+ Units: "sessions",
+ Fam: "sessions",
+ Ctx: "dovecot.sessions",
+ Priority: prioSessions,
+ Dims: module.Dims{
+ {ID: "num_connected_sessions", Name: "active"},
+ },
+ }
+ loginsChart = module.Chart{
+ ID: "logins",
+ Title: "Dovecot Logins",
+ Units: "logins",
+ Fam: "logins",
+ Ctx: "dovecot.logins",
+ Priority: prioLogins,
+ Dims: module.Dims{
+ {ID: "num_logins", Name: "logins"},
+ },
+ }
+ authAttemptsChart = module.Chart{
+ ID: "auth",
+ Title: "Dovecot Authentications",
+ Units: "attempts/s",
+ Fam: "logins",
+ Ctx: "dovecot.auth",
+ Priority: prioAuthenticationAttempts,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "auth_successes", Name: "ok", Algo: module.Incremental},
+ {ID: "auth_failures", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ commandsChart = module.Chart{
+ ID: "commands",
+ Title: "Dovecot Commands",
+ Units: "commands",
+ Fam: "commands",
+ Ctx: "dovecot.commands",
+ Priority: prioCommands,
+ Dims: module.Dims{
+ {ID: "num_cmds", Name: "commands"},
+ },
+ }
+ pageFaultsChart = module.Chart{
+ ID: "faults",
+ Title: "Dovecot Page Faults",
+ Units: "faults/s",
+ Fam: "page faults",
+ Ctx: "dovecot.faults",
+ Priority: prioPageFaults,
+ Dims: module.Dims{
+ {ID: "min_faults", Name: "minor", Algo: module.Incremental},
+ {ID: "maj_faults", Name: "major", Algo: module.Incremental},
+ },
+ }
+ contextSwitchesChart = module.Chart{
+ ID: "context_switches",
+ Title: "Dovecot Context Switches",
+ Units: "switches/s",
+ Fam: "context switches",
+ Ctx: "dovecot.context_switches",
+ Priority: prioContextSwitches,
+ Dims: module.Dims{
+ {ID: "vol_cs", Name: "voluntary", Algo: module.Incremental},
+ {ID: "invol_cs", Name: "involuntary", Algo: module.Incremental},
+ },
+ }
+ diskIOChart = module.Chart{
+ ID: "io",
+ Title: "Dovecot Disk I/O",
+ Units: "KiB/s",
+ Fam: "disk",
+ Ctx: "dovecot.io",
+ Priority: prioDiskIO,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "disk_input", Name: "read", Div: 1024, Algo: module.Incremental},
+ {ID: "disk_output", Name: "write", Mul: -1, Div: 1024, Algo: module.Incremental},
+ },
+ }
+ netTrafficChart = module.Chart{
+ ID: "net",
+ Title: "Dovecot Network Bandwidth",
+ Units: "kilobits/s",
+ Fam: "network",
+ Ctx: "dovecot.net",
+ Priority: prioNetTraffic,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "read_bytes", Name: "read", Mul: 8, Div: 1000, Algo: module.Incremental},
+ {ID: "write_bytes", Name: "write", Mul: -8, Div: 1000, Algo: module.Incremental},
+ },
+ }
+ sysCallsChart = module.Chart{
+ ID: "syscalls",
+ Title: "Dovecot Number of SysCalls",
+ Units: "syscalls/s",
+ Fam: "system",
+ Ctx: "dovecot.syscalls",
+ Priority: prioSysCalls,
+ Dims: module.Dims{
+ {ID: "read_count", Name: "read", Algo: module.Incremental},
+ {ID: "write_count", Name: "write", Algo: module.Incremental},
+ },
+ }
+ lookupsChart = module.Chart{
+ ID: "lookup",
+ Title: "Dovecot Lookups",
+ Units: "lookups/s",
+ Fam: "lookups",
+ Ctx: "dovecot.lookup",
+ Priority: prioLookups,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "mail_lookup_path", Name: "path", Algo: module.Incremental},
+ {ID: "mail_lookup_attr", Name: "attr", Algo: module.Incremental},
+ },
+ }
+ cacheChart = module.Chart{
+ ID: "cache",
+ Title: "Dovecot Cache Hits",
+ Units: "hits/s",
+ Fam: "cache",
+ Ctx: "dovecot.cache",
+ Priority: prioCachePerformance,
+ Dims: module.Dims{
+ {ID: "mail_cache_hits", Name: "hits", Algo: module.Incremental},
+ },
+ }
+ authCacheChart = module.Chart{
+ ID: "auth_cache",
+ Title: "Dovecot Authentication Cache",
+ Units: "requests/s",
+ Fam: "cache",
+ Ctx: "dovecot.auth_cache",
+ Priority: prioAuthCachePerformance,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "auth_cache_hits", Name: "hits", Algo: module.Incremental},
+ {ID: "auth_cache_misses", Name: "misses", Algo: module.Incremental},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/dovecot/client.go b/src/go/plugin/go.d/modules/dovecot/client.go
new file mode 100644
index 000000000..245d1743f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/client.go
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dovecot
+
+import (
+ "bytes"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+type dovecotConn interface {
+ connect() error
+ disconnect()
+ queryExportGlobal() ([]byte, error)
+}
+
+func newDovecotConn(conf Config) dovecotConn {
+ return &dovecotClient{conn: socket.New(socket.Config{
+ Address: conf.Address,
+ ConnectTimeout: conf.Timeout.Duration(),
+ ReadTimeout: conf.Timeout.Duration(),
+ WriteTimeout: conf.Timeout.Duration(),
+ })}
+}
+
+type dovecotClient struct {
+ conn socket.Client
+}
+
+func (c *dovecotClient) connect() error {
+ return c.conn.Connect()
+}
+
+func (c *dovecotClient) disconnect() {
+ _ = c.conn.Disconnect()
+}
+
+func (c *dovecotClient) queryExportGlobal() ([]byte, error) {
+ var b bytes.Buffer
+ var n int
+
+ err := c.conn.Command("EXPORT\tglobal\n", func(bs []byte) bool {
+ b.Write(bs)
+ b.WriteByte('\n')
+
+ n++
+ return n < 2
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/src/go/plugin/go.d/modules/dovecot/collect.go b/src/go/plugin/go.d/modules/dovecot/collect.go
new file mode 100644
index 000000000..a93bfc811
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/collect.go
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dovecot
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// FIXME: drop using "old_stats" in favour of "stats" (https://doc.dovecot.org/configuration_manual/stats/openmetrics/).
+
+func (d *Dovecot) collect() (map[string]int64, error) {
+ if d.conn == nil {
+ conn, err := d.establishConn()
+ if err != nil {
+ return nil, err
+ }
+ d.conn = conn
+ }
+
+ stats, err := d.conn.queryExportGlobal()
+ if err != nil {
+ d.conn.disconnect()
+ d.conn = nil
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ // https://doc.dovecot.org/configuration_manual/stats/old_statistics/#statistics-gathered
+ if err := d.collectExportGlobal(mx, stats); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (d *Dovecot) collectExportGlobal(mx map[string]int64, resp []byte) error {
+ sc := bufio.NewScanner(bytes.NewReader(resp))
+
+ if !sc.Scan() {
+ return errors.New("failed to read fields line from export global response")
+ }
+ fieldsLine := strings.TrimSpace(sc.Text())
+
+ if !sc.Scan() {
+ return errors.New("failed to read values line from export global response")
+ }
+ valuesLine := strings.TrimSpace(sc.Text())
+
+ if fieldsLine == "" || valuesLine == "" {
+ return errors.New("empty fields line or values line from export global response")
+ }
+
+ fields := strings.Fields(fieldsLine)
+ values := strings.Fields(valuesLine)
+
+ if len(fields) != len(values) {
+ return fmt.Errorf("mismatched fields and values count: fields=%d, values=%d", len(fields), len(values))
+ }
+
+ for i, name := range fields {
+ val := values[i]
+
+ v, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ d.Debugf("failed to parse export value %s %s: %v", name, val, err)
+ continue
+ }
+
+ mx[name] = v
+ }
+
+ return nil
+}
+
+func (d *Dovecot) establishConn() (dovecotConn, error) {
+ conn := d.newConn(d.Config)
+
+ if err := conn.connect(); err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
diff --git a/src/go/plugin/go.d/modules/dovecot/config_schema.json b/src/go/plugin/go.d/modules/dovecot/config_schema.json
new file mode 100644
index 000000000..cf99b6939
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/config_schema.json
@@ -0,0 +1,47 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Dovecot collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The Unix or TCP socket address where the Dovecot [old_stats](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics) plugin listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:24242"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "address": {
+ "ui:help": "Use `unix://{path_to_socket}` for Unix socket or `{ip}:{port}` for TCP socket."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dovecot/dovecot.go b/src/go/plugin/go.d/modules/dovecot/dovecot.go
new file mode 100644
index 000000000..ee3d62399
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/dovecot.go
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dovecot
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("dovecot", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Dovecot {
+ return &Dovecot{
+ Config: Config{
+ Address: "127.0.0.1:24242",
+ Timeout: web.Duration(time.Second * 1),
+ },
+ newConn: newDovecotConn,
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+}
+
+type Dovecot struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ newConn func(Config) dovecotConn
+ conn dovecotConn
+}
+
+func (d *Dovecot) Configuration() any {
+ return d.Config
+}
+
+func (d *Dovecot) Init() error {
+ if d.Address == "" {
+ d.Error("config: 'address' not set")
+ return errors.New("address not set")
+ }
+
+ return nil
+}
+
+func (d *Dovecot) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (d *Dovecot) Charts() *module.Charts {
+ return d.charts
+}
+
+func (d *Dovecot) Collect() map[string]int64 {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (d *Dovecot) Cleanup() {
+ if d.conn != nil {
+ d.conn.disconnect()
+ d.conn = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/dovecot/dovecot_test.go b/src/go/plugin/go.d/modules/dovecot/dovecot_test.go
new file mode 100644
index 000000000..ba60adeb6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/dovecot_test.go
@@ -0,0 +1,281 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dovecot
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataExportGlobal, _ = os.ReadFile("testdata/export_global.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataExportGlobal": dataExportGlobal,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDovecot_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Dovecot{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDovecot_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success with default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if address not set": {
+ wantFail: true,
+ config: func() Config {
+ conf := New().Config
+ conf.Address = ""
+ return conf
+ }(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dovecot := New()
+ dovecot.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, dovecot.Init())
+ } else {
+ assert.NoError(t, dovecot.Init())
+ }
+ })
+ }
+}
+
+func TestDovecot_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Dovecot
+ }{
+ "not initialized": {
+ prepare: func() *Dovecot {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Dovecot {
+ dovecot := New()
+ dovecot.newConn = func(config Config) dovecotConn { return prepareMockOk() }
+ _ = dovecot.Check()
+ return dovecot
+ },
+ },
+ "after collect": {
+ prepare: func() *Dovecot {
+ dovecot := New()
+ dovecot.newConn = func(config Config) dovecotConn { return prepareMockOk() }
+ _ = dovecot.Collect()
+ return dovecot
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dovecot := test.prepare()
+
+ assert.NotPanics(t, dovecot.Cleanup)
+ })
+ }
+}
+
+func TestDovecot_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestDovecot_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockDovecotConn
+ wantFail bool
+ }{
+ "success case": {
+ wantFail: false,
+ prepareMock: prepareMockOk,
+ },
+ "err on connect": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnConnect,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dovecot := New()
+ mock := test.prepareMock()
+ dovecot.newConn = func(config Config) dovecotConn { return mock }
+
+ if test.wantFail {
+ assert.Error(t, dovecot.Check())
+ } else {
+ assert.NoError(t, dovecot.Check())
+ }
+ })
+ }
+}
+
+func TestDovecot_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockDovecotConn
+ wantMetrics map[string]int64
+ disconnectBeforeCleanup bool
+ disconnectAfterCleanup bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOk,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ wantMetrics: map[string]int64{
+ "auth_cache_hits": 1,
+ "auth_cache_misses": 1,
+ "auth_db_tempfails": 1,
+ "auth_failures": 1,
+ "auth_master_successes": 1,
+ "auth_successes": 1,
+ "disk_input": 1,
+ "disk_output": 1,
+ "invol_cs": 1,
+ "mail_cache_hits": 1,
+ "mail_lookup_attr": 1,
+ "mail_lookup_path": 1,
+ "mail_read_bytes": 1,
+ "mail_read_count": 1,
+ "maj_faults": 1,
+ "min_faults": 1,
+ "num_cmds": 1,
+ "num_connected_sessions": 1,
+ "num_logins": 1,
+ "read_bytes": 1,
+ "read_count": 1,
+ "reset_timestamp": 1723481629,
+ "vol_cs": 1,
+ "write_bytes": 1,
+ "write_count": 1,
+ },
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ },
+ "err on connect": {
+ prepareMock: prepareMockErrOnConnect,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: false,
+ },
+ "err on query stats": {
+ prepareMock: prepareMockErrOnQueryExportGlobal,
+ disconnectBeforeCleanup: true,
+ disconnectAfterCleanup: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dovecot := New()
+ mock := test.prepareMock()
+ dovecot.newConn = func(config Config) dovecotConn { return mock }
+
+ mx := dovecot.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ module.TestMetricsHasAllChartsDims(t, dovecot.Charts(), mx)
+ }
+
+ assert.Equal(t, test.disconnectBeforeCleanup, mock.disconnectCalled, "disconnect before cleanup")
+ dovecot.Cleanup()
+ assert.Equal(t, test.disconnectAfterCleanup, mock.disconnectCalled, "disconnect after cleanup")
+ })
+ }
+}
+
+func prepareMockOk() *mockDovecotConn {
+ return &mockDovecotConn{
+ exportGlobalResponse: dataExportGlobal,
+ }
+}
+
+func prepareMockErrOnConnect() *mockDovecotConn {
+ return &mockDovecotConn{
+ errOnConnect: true,
+ }
+}
+
+func prepareMockErrOnQueryExportGlobal() *mockDovecotConn {
+ return &mockDovecotConn{
+ errOnQueryExportGlobal: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockDovecotConn {
+ return &mockDovecotConn{
+ exportGlobalResponse: []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit."),
+ }
+}
+
+func prepareMockEmptyResponse() *mockDovecotConn {
+ return &mockDovecotConn{}
+}
+
+type mockDovecotConn struct {
+ errOnConnect bool
+ errOnQueryExportGlobal bool
+ exportGlobalResponse []byte
+ disconnectCalled bool
+}
+
+func (m *mockDovecotConn) connect() error {
+ if m.errOnConnect {
+ return errors.New("mock.connect() error")
+ }
+ return nil
+}
+
+func (m *mockDovecotConn) disconnect() {
+ m.disconnectCalled = true
+}
+
+func (m *mockDovecotConn) queryExportGlobal() ([]byte, error) {
+ if m.errOnQueryExportGlobal {
+ return nil, errors.New("mock.queryExportGlobal() error")
+ }
+ return m.exportGlobalResponse, nil
+}
diff --git a/src/go/plugin/go.d/modules/dovecot/integrations/dovecot.md b/src/go/plugin/go.d/modules/dovecot/integrations/dovecot.md
new file mode 100644
index 000000000..8b45e2de0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/integrations/dovecot.md
@@ -0,0 +1,244 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dovecot/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/dovecot/metadata.yaml"
+sidebar_label: "Dovecot"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Mail Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dovecot
+
+
+<img src="https://netdata.cloud/img/dovecot.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: dovecot
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.
+
+
+It reads the server's response to the `EXPORT\tglobal\n` command.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+Automatically discovers and collects Dovecot statistics from the following default locations:
+
+- localhost:24242
+- unix:///var/run/dovecot/old-stats
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Dovecot instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| dovecot.session | active | sessions |
+| dovecot.logins | logins | logins |
+| dovecot.auth | ok, failed | attempts/s |
+| dovecot.commands | commands | commands |
+| dovecot.context_switches | voluntary, voluntary | switches/s |
+| dovecot.io | read, write | KiB/s |
+| dovecot.net | read, write | kilobits/s |
+| dovecot.syscalls | read, write | syscalls/s |
+| dovecot.lookup | path, attr | lookups/s |
+| dovecot.cache | hits | hits/s |
+| dovecot.auth_cache | hits, misses | requests/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable old_stats plugin
+
+To enable `old_stats` plugin, see [Old Statistics](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/dovecot.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/dovecot.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | The Unix or TCP socket address where the Dovecot [old_stats](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics) plugin listens for connections. | 127.0.0.1:24242 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### Basic (TCP)
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:24242
+
+```
+</details>
+
+##### Basic (UNIX)
+
+A basic example configuration using a UNIX socket.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: unix:///var/run/dovecot/old-stats
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:24242
+
+ - name: remote
+ address: 203.0.113.0:24242
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `dovecot` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m dovecot
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `dovecot` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep dovecot
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep dovecot /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep dovecot
+```
+
+
diff --git a/src/go/plugin/go.d/modules/dovecot/metadata.yaml b/src/go/plugin/go.d/modules/dovecot/metadata.yaml
new file mode 100644
index 000000000..948990bca
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/metadata.yaml
@@ -0,0 +1,194 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-dovecot
+ plugin_name: go.d.plugin
+ module_name: dovecot
+ monitored_instance:
+ name: Dovecot
+ link: 'https://www.dovecot.org/'
+ categories:
+ - data-collection.mail-servers
+ icon_filename: "dovecot.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - dovecot
+ - imap
+ - mail
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.
+ method_description: |
+ It reads the server's response to the `EXPORT\tglobal\n` command.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ Automatically discovers and collects Dovecot statistics from the following default locations:
+
+ - localhost:24242
+ - unix:///var/run/dovecot/old-stats
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable old_stats plugin
+ description: |
+ To enable `old_stats` plugin, see [Old Statistics](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics).
+ configuration:
+ file:
+ name: go.d/dovecot.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: "The Unix or TCP socket address where the Dovecot [old_stats](https://doc.dovecot.org/configuration_manual/stats/old_statistics/#old-statistics) plugin listens for connections."
+ default_value: 127.0.0.1:24242
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic (TCP)
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:24242
+ - name: Basic (UNIX)
+ description: A basic example configuration using a UNIX socket.
+ config: |
+ jobs:
+ - name: local
+ address: unix:///var/run/dovecot/old-stats
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:24242
+
+ - name: remote
+ address: 203.0.113.0:24242
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: dovecot.session
+ description: Dovecot Active Sessions
+ unit: "sessions"
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: dovecot.logins
+ description: Dovecot Logins
+ unit: "logins"
+ chart_type: line
+ dimensions:
+ - name: logins
+ - name: dovecot.auth
+ description: Dovecot Authentications
+ unit: "attempts/s"
+ chart_type: stacked
+ dimensions:
+ - name: ok
+ - name: failed
+ - name: dovecot.commands
+ description: Dovecot Commands
+ unit: "commands"
+ chart_type: line
+ dimensions:
+ - name: commands
+ - name: dovecot.context_switches
+ description: Dovecot Context Switches
+ unit: "switches/s"
+ chart_type: line
+ dimensions:
+ - name: voluntary
+ - name: voluntary
+ - name: dovecot.io
+ description: Dovecot Disk I/O
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: dovecot.net
+ description: Dovecot Network Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: dovecot.syscalls
+ description: Dovecot Number of SysCalls
+ unit: "syscalls/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: dovecot.lookup
+ description: Dovecot Lookups
+ unit: "lookups/s"
+ chart_type: stacked
+ dimensions:
+ - name: path
+ - name: attr
+ - name: dovecot.cache
+ description: Dovecot Cache Hits
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: dovecot.auth_cache
+ description: Dovecot Authentication Cache
+ unit: "requests/s"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
diff --git a/src/go/plugin/go.d/modules/dovecot/testdata/config.json b/src/go/plugin/go.d/modules/dovecot/testdata/config.json
new file mode 100644
index 000000000..e86834720
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/dovecot/testdata/config.yaml b/src/go/plugin/go.d/modules/dovecot/testdata/config.yaml
new file mode 100644
index 000000000..1b81d09eb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/dovecot/testdata/export_global.txt b/src/go/plugin/go.d/modules/dovecot/testdata/export_global.txt
new file mode 100644
index 000000000..00d28914a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/dovecot/testdata/export_global.txt
@@ -0,0 +1,2 @@
+reset_timestamp last_update num_logins num_cmds num_connected_sessions user_cpu sys_cpu clock_time min_faults maj_faults vol_cs invol_cs disk_input disk_output read_count read_bytes write_count write_bytes mail_lookup_path mail_lookup_attr mail_read_count mail_read_bytes mail_cache_hits auth_successes auth_master_successes auth_failures auth_db_tempfails auth_cache_hits auth_cache_misses
+1723481629 1.111111 1 1 1 1.1 1.1 1.1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
diff --git a/src/go/plugin/go.d/modules/elasticsearch/README.md b/src/go/plugin/go.d/modules/elasticsearch/README.md
new file mode 120000
index 000000000..8951ff7b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/README.md
@@ -0,0 +1 @@
+integrations/elasticsearch.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/elasticsearch/charts.go b/src/go/plugin/go.d/modules/elasticsearch/charts.go
new file mode 100644
index 000000000..049061235
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/charts.go
@@ -0,0 +1,845 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package elasticsearch
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioNodeIndicesIndexingOps = module.Priority + iota
+ prioNodeIndicesIndexingOpsCurrent
+ prioNodeIndicesIndexingOpsTime
+ prioNodeIndicesSearchOps
+ prioNodeIndicesSearchOpsCurrent
+ prioNodeIndicesSearchOpsTime
+ prioNodeIndicesRefreshOps
+ prioNodeIndicesRefreshOpsTime
+ prioNodeIndicesFlushOps
+ prioNodeIndicesFlushOpsTime
+ prioNodeIndicesFieldDataMemoryUsage
+ prioNodeIndicesFieldDataEvictions
+ prioNodeIndicesSegmentsCount
+ prioNodeIndicesSegmentsMemoryUsageTotal
+ prioNodeIndicesSegmentsMemoryUsage
+ prioNodeIndicesTransLogOps
+ prioNodeIndexTransLogSize
+ prioNodeFileDescriptors
+ prioNodeJVMMemHeap
+ prioNodeJVMMemHeapBytes
+ prioNodeJVMBufferPoolsCount
+ prioNodeJVMBufferPoolDirectMemory
+ prioNodeJVMBufferPoolMappedMemory
+ prioNodeJVMGCCount
+ prioNodeJVMGCTime
+ prioNodeThreadPoolQueued
+ prioNodeThreadPoolRejected
+ prioNodeClusterCommunicationPackets
+ prioNodeClusterCommunication
+ prioNodeHTTPConnections
+ prioNodeBreakersTrips
+
+ prioClusterStatus
+ prioClusterNodesCount
+ prioClusterShardsCount
+ prioClusterPendingTasks
+ prioClusterInFlightFetchesCount
+
+ prioClusterIndicesCount
+ prioClusterIndicesShardsCount
+ prioClusterIndicesDocsCount
+ prioClusterIndicesStoreSize
+ prioClusterIndicesQueryCache
+ prioClusterNodesByRoleCount
+
+ prioNodeIndexHealth
+ prioNodeIndexShardsCount
+ prioNodeIndexDocsCount
+ prioNodeIndexStoreSize
+)
+
+var nodeChartsTmpl = module.Charts{
+ nodeIndicesIndexingOpsChartTmpl.Copy(),
+ nodeIndicesIndexingOpsCurrentChartTmpl.Copy(),
+ nodeIndicesIndexingOpsTimeChartTmpl.Copy(),
+
+ nodeIndicesSearchOpsChartTmpl.Copy(),
+ nodeIndicesSearchOpsCurrentChartTmpl.Copy(),
+ nodeIndicesSearchOpsTimeChartTmpl.Copy(),
+
+ nodeIndicesRefreshOpsChartTmpl.Copy(),
+ nodeIndicesRefreshOpsTimeChartTmpl.Copy(),
+
+ nodeIndicesFlushOpsChartTmpl.Copy(),
+ nodeIndicesFlushOpsTimeChartTmpl.Copy(),
+
+ nodeIndicesFieldDataMemoryUsageChartTmpl.Copy(),
+ nodeIndicesFieldDataEvictionsChartTmpl.Copy(),
+
+ nodeIndicesSegmentsCountChartTmpl.Copy(),
+ nodeIndicesSegmentsMemoryUsageTotalChartTmpl.Copy(),
+ nodeIndicesSegmentsMemoryUsageChartTmpl.Copy(),
+
+ nodeIndicesTransLogOpsChartTmpl.Copy(),
+ nodeIndexTransLogSizeChartTmpl.Copy(),
+
+ nodeFileDescriptorsChartTmpl.Copy(),
+
+ nodeJVMMemHeapChartTmpl.Copy(),
+ nodeJVMMemHeapBytesChartTmpl.Copy(),
+ nodeJVMBufferPoolsCountChartTmpl.Copy(),
+ nodeJVMBufferPoolDirectMemoryChartTmpl.Copy(),
+ nodeJVMBufferPoolMappedMemoryChartTmpl.Copy(),
+ nodeJVMGCCountChartTmpl.Copy(),
+ nodeJVMGCTimeChartTmpl.Copy(),
+
+ nodeThreadPoolQueuedChartTmpl.Copy(),
+ nodeThreadPoolRejectedChartTmpl.Copy(),
+
+ nodeClusterCommunicationPacketsChartTmpl.Copy(),
+ nodeClusterCommunicationChartTmpl.Copy(),
+
+ nodeHTTPConnectionsChartTmpl.Copy(),
+
+ nodeBreakersTripsChartTmpl.Copy(),
+}
+
+var (
+ nodeIndicesIndexingOpsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_indexing_operations",
+ Title: "Indexing Operations",
+ Units: "operations/s",
+ Fam: "indices indexing",
+ Ctx: "elasticsearch.node_indices_indexing",
+ Priority: prioNodeIndicesIndexingOps,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_indexing_index_total", Name: "index", Algo: module.Incremental},
+ },
+ }
+ nodeIndicesIndexingOpsCurrentChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_indexing_operations_current",
+ Title: "Indexing Operations Current",
+ Units: "operations",
+ Fam: "indices indexing",
+ Ctx: "elasticsearch.node_indices_indexing_current",
+ Priority: prioNodeIndicesIndexingOpsCurrent,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_indexing_index_current", Name: "index"},
+ },
+ }
+ nodeIndicesIndexingOpsTimeChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_indexing_operations_time",
+ Title: "Time Spent On Indexing Operations",
+ Units: "milliseconds",
+ Fam: "indices indexing",
+ Ctx: "elasticsearch.node_indices_indexing_time",
+ Priority: prioNodeIndicesIndexingOpsTime,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_indexing_index_time_in_millis", Name: "index", Algo: module.Incremental},
+ },
+ }
+
+ nodeIndicesSearchOpsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_search_operations",
+ Title: "Search Operations",
+ Units: "operations/s",
+ Fam: "indices search",
+ Ctx: "elasticsearch.node_indices_search",
+ Type: module.Stacked,
+ Priority: prioNodeIndicesSearchOps,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_search_query_total", Name: "queries", Algo: module.Incremental},
+ {ID: "node_%s_indices_search_fetch_total", Name: "fetches", Algo: module.Incremental},
+ },
+ }
+ nodeIndicesSearchOpsCurrentChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_search_operations_current",
+ Title: "Search Operations Current",
+ Units: "operations",
+ Fam: "indices search",
+ Ctx: "elasticsearch.node_indices_search_current",
+ Type: module.Stacked,
+ Priority: prioNodeIndicesSearchOpsCurrent,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_search_query_current", Name: "queries"},
+ {ID: "node_%s_indices_search_fetch_current", Name: "fetches"},
+ },
+ }
+ nodeIndicesSearchOpsTimeChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_search_operations_time",
+ Title: "Time Spent On Search Operations",
+ Units: "milliseconds",
+ Fam: "indices search",
+ Ctx: "elasticsearch.node_indices_search_time",
+ Type: module.Stacked,
+ Priority: prioNodeIndicesSearchOpsTime,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_search_query_time_in_millis", Name: "query", Algo: module.Incremental},
+ {ID: "node_%s_indices_search_fetch_time_in_millis", Name: "fetch", Algo: module.Incremental},
+ },
+ }
+
+ nodeIndicesRefreshOpsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_refresh_operations",
+ Title: "Refresh Operations",
+ Units: "operations/s",
+ Fam: "indices refresh",
+ Ctx: "elasticsearch.node_indices_refresh",
+ Priority: prioNodeIndicesRefreshOps,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_refresh_total", Name: "refresh", Algo: module.Incremental},
+ },
+ }
+ nodeIndicesRefreshOpsTimeChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_refresh_operations_time",
+ Title: "Time Spent On Refresh Operations",
+ Units: "milliseconds",
+ Fam: "indices refresh",
+ Ctx: "elasticsearch.node_indices_refresh_time",
+ Priority: prioNodeIndicesRefreshOpsTime,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_refresh_total_time_in_millis", Name: "refresh", Algo: module.Incremental},
+ },
+ }
+
+ nodeIndicesFlushOpsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_flush_operations",
+ Title: "Flush Operations",
+ Units: "operations/s",
+ Fam: "indices flush",
+ Ctx: "elasticsearch.node_indices_flush",
+ Priority: prioNodeIndicesFlushOps,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_flush_total", Name: "flush", Algo: module.Incremental},
+ },
+ }
+ nodeIndicesFlushOpsTimeChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_flush_operations_time",
+ Title: "Time Spent On Flush Operations",
+ Units: "milliseconds",
+ Fam: "indices flush",
+ Ctx: "elasticsearch.node_indices_flush_time",
+ Priority: prioNodeIndicesFlushOpsTime,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_flush_total_time_in_millis", Name: "flush", Algo: module.Incremental},
+ },
+ }
+
+ nodeIndicesFieldDataMemoryUsageChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_fielddata_memory_usage",
+ Title: "Fielddata Cache Memory Usage",
+ Units: "bytes",
+ Fam: "indices fielddata",
+ Ctx: "elasticsearch.node_indices_fielddata_memory_usage",
+ Type: module.Area,
+ Priority: prioNodeIndicesFieldDataMemoryUsage,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_fielddata_memory_size_in_bytes", Name: "used"},
+ },
+ }
+ nodeIndicesFieldDataEvictionsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_fielddata_evictions",
+ Title: "Fielddata Evictions",
+ Units: "operations/s",
+ Fam: "indices fielddata",
+ Ctx: "elasticsearch.node_indices_fielddata_evictions",
+ Priority: prioNodeIndicesFieldDataEvictions,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_fielddata_evictions", Name: "evictions", Algo: module.Incremental},
+ },
+ }
+
+ nodeIndicesSegmentsCountChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_segments_count",
+ Title: "Segments Count",
+ Units: "segments",
+ Fam: "indices segments",
+ Ctx: "elasticsearch.node_indices_segments_count",
+ Priority: prioNodeIndicesSegmentsCount,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_segments_count", Name: "segments"},
+ },
+ }
+ nodeIndicesSegmentsMemoryUsageTotalChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_segments_memory_usage_total",
+ Title: "Segments Memory Usage Total",
+ Units: "bytes",
+ Fam: "indices segments",
+ Ctx: "elasticsearch.node_indices_segments_memory_usage_total",
+ Priority: prioNodeIndicesSegmentsMemoryUsageTotal,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_segments_memory_in_bytes", Name: "used"},
+ },
+ }
+ nodeIndicesSegmentsMemoryUsageChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_segments_memory_usage",
+ Title: "Segments Memory Usage",
+ Units: "bytes",
+ Fam: "indices segments",
+ Ctx: "elasticsearch.node_indices_segments_memory_usage",
+ Type: module.Stacked,
+ Priority: prioNodeIndicesSegmentsMemoryUsage,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_segments_terms_memory_in_bytes", Name: "terms"},
+ {ID: "node_%s_indices_segments_stored_fields_memory_in_bytes", Name: "stored_fields"},
+ {ID: "node_%s_indices_segments_term_vectors_memory_in_bytes", Name: "term_vectors"},
+ {ID: "node_%s_indices_segments_norms_memory_in_bytes", Name: "norms"},
+ {ID: "node_%s_indices_segments_points_memory_in_bytes", Name: "points"},
+ {ID: "node_%s_indices_segments_doc_values_memory_in_bytes", Name: "doc_values"},
+ {ID: "node_%s_indices_segments_index_writer_memory_in_bytes", Name: "index_writer"},
+ {ID: "node_%s_indices_segments_version_map_memory_in_bytes", Name: "version_map"},
+ {ID: "node_%s_indices_segments_fixed_bit_set_memory_in_bytes", Name: "fixed_bit_set"},
+ },
+ }
+
+ nodeIndicesTransLogOpsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_indices_translog_operations",
+ Title: "Translog Operations",
+ Units: "operations",
+ Fam: "indices translog",
+ Ctx: "elasticsearch.node_indices_translog_operations",
+ Type: module.Area,
+ Priority: prioNodeIndicesTransLogOps,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_translog_operations", Name: "total"},
+ {ID: "node_%s_indices_translog_uncommitted_operations", Name: "uncommitted"},
+ },
+ }
+ nodeIndexTransLogSizeChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_index_translog_size",
+ Title: "Translog Size",
+ Units: "bytes",
+ Fam: "indices translog",
+ Ctx: "elasticsearch.node_indices_translog_size",
+ Type: module.Area,
+ Priority: prioNodeIndexTransLogSize,
+ Dims: module.Dims{
+ {ID: "node_%s_indices_translog_size_in_bytes", Name: "total"},
+ {ID: "node_%s_indices_translog_uncommitted_size_in_bytes", Name: "uncommitted"},
+ },
+ }
+
+ nodeFileDescriptorsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_file_descriptors",
+ Title: "Process File Descriptors",
+ Units: "fd",
+ Fam: "process",
+ Ctx: "elasticsearch.node_file_descriptors",
+ Priority: prioNodeFileDescriptors,
+ Dims: module.Dims{
+ {ID: "node_%s_process_open_file_descriptors", Name: "open"},
+ },
+ }
+
+ nodeJVMMemHeapChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_jvm_mem_heap",
+ Title: "JVM Heap Percentage Currently in Use",
+ Units: "percentage",
+ Fam: "jvm",
+ Ctx: "elasticsearch.node_jvm_heap",
+ Type: module.Area,
+ Priority: prioNodeJVMMemHeap,
+ Dims: module.Dims{
+ {ID: "node_%s_jvm_mem_heap_used_percent", Name: "inuse"},
+ },
+ }
+ nodeJVMMemHeapBytesChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_jvm_mem_heap_bytes",
+ Title: "JVM Heap Commit And Usage",
+ Units: "bytes",
+ Fam: "jvm",
+ Ctx: "elasticsearch.node_jvm_heap_bytes",
+ Type: module.Area,
+ Priority: prioNodeJVMMemHeapBytes,
+ Dims: module.Dims{
+ {ID: "node_%s_jvm_mem_heap_committed_in_bytes", Name: "committed"},
+ {ID: "node_%s_jvm_mem_heap_used_in_bytes", Name: "used"},
+ },
+ }
+ nodeJVMBufferPoolsCountChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_jvm_buffer_pools_count",
+ Title: "JVM Buffer Pools Count",
+ Units: "pools",
+ Fam: "jvm",
+ Ctx: "elasticsearch.node_jvm_buffer_pools_count",
+ Priority: prioNodeJVMBufferPoolsCount,
+ Dims: module.Dims{
+ {ID: "node_%s_jvm_buffer_pools_direct_count", Name: "direct"},
+ {ID: "node_%s_jvm_buffer_pools_mapped_count", Name: "mapped"},
+ },
+ }
+ nodeJVMBufferPoolDirectMemoryChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_jvm_buffer_pool_direct_memory",
+ Title: "JVM Buffer Pool Direct Memory",
+ Units: "bytes",
+ Fam: "jvm",
+ Ctx: "elasticsearch.node_jvm_buffer_pool_direct_memory",
+ Type: module.Area,
+ Priority: prioNodeJVMBufferPoolDirectMemory,
+ Dims: module.Dims{
+ {ID: "node_%s_jvm_buffer_pools_direct_total_capacity_in_bytes", Name: "total"},
+ {ID: "node_%s_jvm_buffer_pools_direct_used_in_bytes", Name: "used"},
+ },
+ }
+ nodeJVMBufferPoolMappedMemoryChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_jvm_buffer_pool_mapped_memory",
+ Title: "JVM Buffer Pool Mapped Memory",
+ Units: "bytes",
+ Fam: "jvm",
+ Ctx: "elasticsearch.node_jvm_buffer_pool_mapped_memory",
+ Type: module.Area,
+ Priority: prioNodeJVMBufferPoolMappedMemory,
+ Dims: module.Dims{
+ {ID: "node_%s_jvm_buffer_pools_mapped_total_capacity_in_bytes", Name: "total"},
+ {ID: "node_%s_jvm_buffer_pools_mapped_used_in_bytes", Name: "used"},
+ },
+ }
+ nodeJVMGCCountChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_jvm_gc_count",
+ Title: "JVM Garbage Collections",
+ Units: "gc/s",
+ Fam: "jvm",
+ Ctx: "elasticsearch.node_jvm_gc_count",
+ Type: module.Stacked,
+ Priority: prioNodeJVMGCCount,
+ Dims: module.Dims{
+ {ID: "node_%s_jvm_gc_collectors_young_collection_count", Name: "young", Algo: module.Incremental},
+ {ID: "node_%s_jvm_gc_collectors_old_collection_count", Name: "old", Algo: module.Incremental},
+ },
+ }
+ nodeJVMGCTimeChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_jvm_gc_time",
+ Title: "JVM Time Spent On Garbage Collections",
+ Units: "milliseconds",
+ Fam: "jvm",
+ Ctx: "elasticsearch.node_jvm_gc_time",
+ Type: module.Stacked,
+ Priority: prioNodeJVMGCTime,
+ Dims: module.Dims{
+ {ID: "node_%s_jvm_gc_collectors_young_collection_time_in_millis", Name: "young", Algo: module.Incremental},
+ {ID: "node_%s_jvm_gc_collectors_old_collection_time_in_millis", Name: "old", Algo: module.Incremental},
+ },
+ }
+
+ nodeThreadPoolQueuedChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_thread_pool_queued",
+ Title: "Thread Pool Queued Threads Count",
+ Units: "threads",
+ Fam: "thread pool",
+ Ctx: "elasticsearch.node_thread_pool_queued",
+ Type: module.Stacked,
+ Priority: prioNodeThreadPoolQueued,
+ Dims: module.Dims{
+ {ID: "node_%s_thread_pool_generic_queue", Name: "generic"},
+ {ID: "node_%s_thread_pool_search_queue", Name: "search"},
+ {ID: "node_%s_thread_pool_search_throttled_queue", Name: "search_throttled"},
+ {ID: "node_%s_thread_pool_get_queue", Name: "get"},
+ {ID: "node_%s_thread_pool_analyze_queue", Name: "analyze"},
+ {ID: "node_%s_thread_pool_write_queue", Name: "write"},
+ {ID: "node_%s_thread_pool_snapshot_queue", Name: "snapshot"},
+ {ID: "node_%s_thread_pool_warmer_queue", Name: "warmer"},
+ {ID: "node_%s_thread_pool_refresh_queue", Name: "refresh"},
+ {ID: "node_%s_thread_pool_listener_queue", Name: "listener"},
+ {ID: "node_%s_thread_pool_fetch_shard_started_queue", Name: "fetch_shard_started"},
+ {ID: "node_%s_thread_pool_fetch_shard_store_queue", Name: "fetch_shard_store"},
+ {ID: "node_%s_thread_pool_flush_queue", Name: "flush"},
+ {ID: "node_%s_thread_pool_force_merge_queue", Name: "force_merge"},
+ {ID: "node_%s_thread_pool_management_queue", Name: "management"},
+ },
+ }
+ nodeThreadPoolRejectedChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_thread_pool_rejected",
+ Title: "Thread Pool Rejected Threads Count",
+ Units: "threads",
+ Fam: "thread pool",
+ Ctx: "elasticsearch.node_thread_pool_rejected",
+ Type: module.Stacked,
+ Priority: prioNodeThreadPoolRejected,
+ Dims: module.Dims{
+ {ID: "node_%s_thread_pool_generic_rejected", Name: "generic"},
+ {ID: "node_%s_thread_pool_search_rejected", Name: "search"},
+ {ID: "node_%s_thread_pool_search_throttled_rejected", Name: "search_throttled"},
+ {ID: "node_%s_thread_pool_get_rejected", Name: "get"},
+ {ID: "node_%s_thread_pool_analyze_rejected", Name: "analyze"},
+ {ID: "node_%s_thread_pool_write_rejected", Name: "write"},
+ {ID: "node_%s_thread_pool_snapshot_rejected", Name: "snapshot"},
+ {ID: "node_%s_thread_pool_warmer_rejected", Name: "warmer"},
+ {ID: "node_%s_thread_pool_refresh_rejected", Name: "refresh"},
+ {ID: "node_%s_thread_pool_listener_rejected", Name: "listener"},
+ {ID: "node_%s_thread_pool_fetch_shard_started_rejected", Name: "fetch_shard_started"},
+ {ID: "node_%s_thread_pool_fetch_shard_store_rejected", Name: "fetch_shard_store"},
+ {ID: "node_%s_thread_pool_flush_rejected", Name: "flush"},
+ {ID: "node_%s_thread_pool_force_merge_rejected", Name: "force_merge"},
+ {ID: "node_%s_thread_pool_management_rejected", Name: "management"},
+ },
+ }
+
+ nodeClusterCommunicationPacketsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_cluster_communication_packets",
+ Title: "Node Cluster Communication",
+ Units: "pps",
+ Fam: "transport",
+ Ctx: "elasticsearch.node_cluster_communication_packets",
+ Priority: prioNodeClusterCommunicationPackets,
+ Dims: module.Dims{
+ {ID: "node_%s_transport_rx_count", Name: "received", Algo: module.Incremental},
+ {ID: "node_%s_transport_tx_count", Name: "sent", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ nodeClusterCommunicationChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_cluster_communication_traffic",
+ Title: "Cluster Communication Bandwidth",
+ Units: "bytes/s",
+ Fam: "transport",
+ Ctx: "elasticsearch.node_cluster_communication_traffic",
+ Priority: prioNodeClusterCommunication,
+ Dims: module.Dims{
+ {ID: "node_%s_transport_rx_size_in_bytes", Name: "received", Algo: module.Incremental},
+ {ID: "node_%s_transport_tx_size_in_bytes", Name: "sent", Mul: -1, Algo: module.Incremental},
+ },
+ }
+
+ nodeHTTPConnectionsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_http_connections",
+ Title: "HTTP Connections",
+ Units: "connections",
+ Fam: "http",
+ Ctx: "elasticsearch.node_http_connections",
+ Priority: prioNodeHTTPConnections,
+ Dims: module.Dims{
+ {ID: "node_%s_http_current_open", Name: "open"},
+ },
+ }
+
+ nodeBreakersTripsChartTmpl = module.Chart{
+ ID: "node_%s_cluster_%s_breakers_trips",
+ Title: "Circuit Breaker Trips Count",
+ Units: "trips/s",
+ Fam: "circuit breakers",
+ Ctx: "elasticsearch.node_breakers_trips",
+ Type: module.Stacked,
+ Priority: prioNodeBreakersTrips,
+ Dims: module.Dims{
+ {ID: "node_%s_breakers_request_tripped", Name: "requests", Algo: module.Incremental},
+ {ID: "node_%s_breakers_fielddata_tripped", Name: "fielddata", Algo: module.Incremental},
+ {ID: "node_%s_breakers_in_flight_requests_tripped", Name: "in_flight_requests", Algo: module.Incremental},
+ {ID: "node_%s_breakers_model_inference_tripped", Name: "model_inference", Algo: module.Incremental},
+ {ID: "node_%s_breakers_accounting_tripped", Name: "accounting", Algo: module.Incremental},
+ {ID: "node_%s_breakers_parent_tripped", Name: "parent", Algo: module.Incremental},
+ },
+ }
+)
+
+var clusterHealthChartsTmpl = module.Charts{
+ clusterStatusChartTmpl.Copy(),
+ clusterNodesCountChartTmpl.Copy(),
+ clusterShardsCountChartTmpl.Copy(),
+ clusterPendingTasksChartTmpl.Copy(),
+ clusterInFlightFetchesCountChartTmpl.Copy(),
+}
+
+var (
+ clusterStatusChartTmpl = module.Chart{
+ ID: "cluster_%s_status",
+ Title: "Cluster Status",
+ Units: "status",
+ Fam: "cluster health",
+ Ctx: "elasticsearch.cluster_health_status",
+ Priority: prioClusterStatus,
+ Dims: module.Dims{
+ {ID: "cluster_status_green", Name: "green"},
+ {ID: "cluster_status_red", Name: "red"},
+ {ID: "cluster_status_yellow", Name: "yellow"},
+ },
+ }
+ clusterNodesCountChartTmpl = module.Chart{
+ ID: "cluster_%s_number_of_nodes",
+ Title: "Cluster Nodes Count",
+ Units: "nodes",
+ Fam: "cluster health",
+ Ctx: "elasticsearch.cluster_number_of_nodes",
+ Priority: prioClusterNodesCount,
+ Dims: module.Dims{
+ {ID: "cluster_number_of_nodes", Name: "nodes"},
+ {ID: "cluster_number_of_data_nodes", Name: "data_nodes"},
+ },
+ }
+ clusterShardsCountChartTmpl = module.Chart{
+ ID: "cluster_%s_shards_count",
+ Title: "Cluster Shards Count",
+ Units: "shards",
+ Fam: "cluster health",
+ Ctx: "elasticsearch.cluster_shards_count",
+ Priority: prioClusterShardsCount,
+ Dims: module.Dims{
+ {ID: "cluster_active_primary_shards", Name: "active_primary"},
+ {ID: "cluster_active_shards", Name: "active"},
+ {ID: "cluster_relocating_shards", Name: "relocating"},
+ {ID: "cluster_initializing_shards", Name: "initializing"},
+ {ID: "cluster_unassigned_shards", Name: "unassigned"},
+ {ID: "cluster_delayed_unassigned_shards", Name: "delayed_unassigned"},
+ },
+ }
+ clusterPendingTasksChartTmpl = module.Chart{
+ ID: "cluster_%s_pending_tasks",
+ Title: "Cluster Pending Tasks",
+ Units: "tasks",
+ Fam: "cluster health",
+ Ctx: "elasticsearch.cluster_pending_tasks",
+ Priority: prioClusterPendingTasks,
+ Dims: module.Dims{
+ {ID: "cluster_number_of_pending_tasks", Name: "pending"},
+ },
+ }
+ clusterInFlightFetchesCountChartTmpl = module.Chart{
+ ID: "cluster_%s_number_of_in_flight_fetch",
+ Title: "Cluster Unfinished Fetches",
+ Units: "fetches",
+ Fam: "cluster health",
+ Ctx: "elasticsearch.cluster_number_of_in_flight_fetch",
+ Priority: prioClusterInFlightFetchesCount,
+ Dims: module.Dims{
+ {ID: "cluster_number_of_in_flight_fetch", Name: "in_flight_fetch"},
+ },
+ }
+)
+
+var clusterStatsChartsTmpl = module.Charts{
+ clusterIndicesCountChartTmpl.Copy(),
+ clusterIndicesShardsCountChartTmpl.Copy(),
+ clusterIndicesDocsCountChartTmpl.Copy(),
+ clusterIndicesStoreSizeChartTmpl.Copy(),
+ clusterIndicesQueryCacheChartTmpl.Copy(),
+ clusterNodesByRoleCountChartTmpl.Copy(),
+}
+
+var (
+ clusterIndicesCountChartTmpl = module.Chart{
+ ID: "cluster_%s_indices_count",
+ Title: "Cluster Indices Count",
+ Units: "indices",
+ Fam: "cluster stats",
+ Ctx: "elasticsearch.cluster_indices_count",
+ Priority: prioClusterIndicesCount,
+ Dims: module.Dims{
+ {ID: "cluster_indices_count", Name: "indices"},
+ },
+ }
+ clusterIndicesShardsCountChartTmpl = module.Chart{
+ ID: "cluster_%s_indices_shards_count",
+ Title: "Cluster Indices Shards Count",
+ Units: "shards",
+ Fam: "cluster stats",
+ Ctx: "elasticsearch.cluster_indices_shards_count",
+ Priority: prioClusterIndicesShardsCount,
+ Dims: module.Dims{
+ {ID: "cluster_indices_shards_total", Name: "total"},
+ {ID: "cluster_indices_shards_primaries", Name: "primaries"},
+ {ID: "cluster_indices_shards_replication", Name: "replication"},
+ },
+ }
+ clusterIndicesDocsCountChartTmpl = module.Chart{
+ ID: "cluster_%s_indices_docs_count",
+ Title: "Cluster Indices Docs Count",
+ Units: "docs",
+ Fam: "cluster stats",
+ Ctx: "elasticsearch.cluster_indices_docs_count",
+ Priority: prioClusterIndicesDocsCount,
+ Dims: module.Dims{
+ {ID: "cluster_indices_docs_count", Name: "docs"},
+ },
+ }
+ clusterIndicesStoreSizeChartTmpl = module.Chart{
+ ID: "cluster_%s_indices_store_size",
+ Title: "Cluster Indices Store Size",
+ Units: "bytes",
+ Fam: "cluster stats",
+ Ctx: "elasticsearch.cluster_indices_store_size",
+ Priority: prioClusterIndicesStoreSize,
+ Dims: module.Dims{
+ {ID: "cluster_indices_store_size_in_bytes", Name: "size"},
+ },
+ }
+ clusterIndicesQueryCacheChartTmpl = module.Chart{
+ ID: "cluster_%s_indices_query_cache",
+ Title: "Cluster Indices Query Cache",
+ Units: "events/s",
+ Fam: "cluster stats",
+ Ctx: "elasticsearch.cluster_indices_query_cache",
+ Type: module.Stacked,
+ Priority: prioClusterIndicesQueryCache,
+ Dims: module.Dims{
+ {ID: "cluster_indices_query_cache_hit_count", Name: "hit", Algo: module.Incremental},
+ {ID: "cluster_indices_query_cache_miss_count", Name: "miss", Algo: module.Incremental},
+ },
+ }
+ clusterNodesByRoleCountChartTmpl = module.Chart{
+ ID: "cluster_%s_nodes_by_role_count",
+ Title: "Cluster Nodes By Role Count",
+ Units: "nodes",
+ Fam: "cluster stats",
+ Ctx: "elasticsearch.cluster_nodes_by_role_count",
+ Priority: prioClusterNodesByRoleCount,
+ Dims: module.Dims{
+ {ID: "cluster_nodes_count_coordinating_only", Name: "coordinating_only"},
+ {ID: "cluster_nodes_count_data", Name: "data"},
+ {ID: "cluster_nodes_count_data_cold", Name: "data_cold"},
+ {ID: "cluster_nodes_count_data_content", Name: "data_content"},
+ {ID: "cluster_nodes_count_data_frozen", Name: "data_frozen"},
+ {ID: "cluster_nodes_count_data_hot", Name: "data_hot"},
+ {ID: "cluster_nodes_count_data_warm", Name: "data_warm"},
+ {ID: "cluster_nodes_count_ingest", Name: "ingest"},
+ {ID: "cluster_nodes_count_master", Name: "master"},
+ {ID: "cluster_nodes_count_ml", Name: "ml"},
+ {ID: "cluster_nodes_count_remote_cluster_client", Name: "remote_cluster_client"},
+ {ID: "cluster_nodes_count_voting_only", Name: "voting_only"},
+ },
+ }
+)
+
+var nodeIndexChartsTmpl = module.Charts{
+ nodeIndexHealthChartTmpl.Copy(),
+ nodeIndexShardsCountChartTmpl.Copy(),
+ nodeIndexDocsCountChartTmpl.Copy(),
+ nodeIndexStoreSizeChartTmpl.Copy(),
+}
+
+var (
+ nodeIndexHealthChartTmpl = module.Chart{
+ ID: "node_index_%s_cluster_%s_health",
+ Title: "Index Health",
+ Units: "status",
+ Fam: "index stats",
+ Ctx: "elasticsearch.node_index_health",
+ Priority: prioNodeIndexHealth,
+ Dims: module.Dims{
+ {ID: "node_index_%s_stats_health_green", Name: "green"},
+ {ID: "node_index_%s_stats_health_red", Name: "red"},
+ {ID: "node_index_%s_stats_health_yellow", Name: "yellow"},
+ },
+ }
+ nodeIndexShardsCountChartTmpl = module.Chart{
+ ID: "node_index_%s_cluster_%s_shards_count",
+ Title: "Index Shards Count",
+ Units: "shards",
+ Fam: "index stats",
+ Ctx: "elasticsearch.node_index_shards_count",
+ Priority: prioNodeIndexShardsCount,
+ Dims: module.Dims{
+ {ID: "node_index_%s_stats_shards_count", Name: "shards"},
+ },
+ }
+ nodeIndexDocsCountChartTmpl = module.Chart{
+ ID: "node_index_%s_cluster_%s_docs_count",
+ Title: "Index Docs Count",
+ Units: "docs",
+ Fam: "index stats",
+ Ctx: "elasticsearch.node_index_docs_count",
+ Priority: prioNodeIndexDocsCount,
+ Dims: module.Dims{
+ {ID: "node_index_%s_stats_docs_count", Name: "docs"},
+ },
+ }
+ nodeIndexStoreSizeChartTmpl = module.Chart{
+ ID: "node_index_%s_cluster_%s_store_size",
+ Title: "Index Store Size",
+ Units: "bytes",
+ Fam: "index stats",
+ Ctx: "elasticsearch.node_index_store_size",
+ Priority: prioNodeIndexStoreSize,
+ Dims: module.Dims{
+ {ID: "node_index_%s_stats_store_size_in_bytes", Name: "store_size"},
+ },
+ }
+)
+
+func (es *Elasticsearch) addClusterStatsCharts() {
+ charts := clusterStatsChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, es.clusterName)
+ chart.Labels = []module.Label{
+ {Key: "cluster_name", Value: es.clusterName},
+ }
+ }
+
+ if err := es.charts.Add(*charts...); err != nil {
+ es.Warning(err)
+ }
+}
+
+func (es *Elasticsearch) addClusterHealthCharts() {
+ charts := clusterHealthChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, es.clusterName)
+ chart.Labels = []module.Label{
+ {Key: "cluster_name", Value: es.clusterName},
+ }
+ }
+
+ if err := es.charts.Add(*charts...); err != nil {
+ es.Warning(err)
+ }
+}
+
+func (es *Elasticsearch) addNodeCharts(nodeID string, node *esNodeStats) {
+ charts := nodeChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, nodeID, es.clusterName)
+ chart.Labels = []module.Label{
+ {Key: "cluster_name", Value: es.clusterName},
+ {Key: "node_name", Value: node.Name},
+ {Key: "host", Value: node.Host},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, nodeID)
+ }
+ }
+
+ if err := es.Charts().Add(*charts...); err != nil {
+ es.Warning(err)
+ }
+}
+
+func (es *Elasticsearch) removeNodeCharts(nodeID string) {
+ px := fmt.Sprintf("node_%s_cluster_%s_", nodeID, es.clusterName)
+ es.removeCharts(px)
+}
+
+func (es *Elasticsearch) addIndexCharts(index string) {
+ charts := nodeIndexChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, index, es.clusterName)
+ chart.Labels = []module.Label{
+ {Key: "cluster_name", Value: es.clusterName},
+ {Key: "index", Value: index},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, index)
+ }
+ }
+
+ if err := es.Charts().Add(*charts...); err != nil {
+ es.Warning(err)
+ }
+}
+
+func (es *Elasticsearch) removeIndexCharts(index string) {
+ px := fmt.Sprintf("node_index_%s_cluster_%s_", index, es.clusterName)
+ es.removeCharts(px)
+}
+
+func (es *Elasticsearch) removeCharts(prefix string) {
+ for _, chart := range *es.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/collect.go b/src/go/plugin/go.d/modules/elasticsearch/collect.go
new file mode 100644
index 000000000..4f46f1088
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/collect.go
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package elasticsearch
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "math"
+ "net/http"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ urlPathLocalNodeStats = "/_nodes/_local/stats"
+ urlPathNodesStats = "/_nodes/stats"
+ urlPathIndicesStats = "/_cat/indices"
+ urlPathClusterHealth = "/_cluster/health"
+ urlPathClusterStats = "/_cluster/stats"
+)
+
+func (es *Elasticsearch) collect() (map[string]int64, error) {
+ if es.clusterName == "" {
+ name, err := es.getClusterName()
+ if err != nil {
+ return nil, err
+ }
+ es.clusterName = name
+ }
+
+ ms := es.scrapeElasticsearch()
+ if ms.empty() {
+ return nil, nil
+ }
+
+ mx := make(map[string]int64)
+
+ es.collectNodesStats(mx, ms)
+ es.collectClusterHealth(mx, ms)
+ es.collectClusterStats(mx, ms)
+ es.collectLocalIndicesStats(mx, ms)
+
+ return mx, nil
+}
+
+func (es *Elasticsearch) collectNodesStats(mx map[string]int64, ms *esMetrics) {
+ if !ms.hasNodesStats() {
+ return
+ }
+
+ seen := make(map[string]bool)
+
+ for nodeID, node := range ms.NodesStats.Nodes {
+ seen[nodeID] = true
+
+ if !es.nodes[nodeID] {
+ es.nodes[nodeID] = true
+ es.addNodeCharts(nodeID, node)
+ }
+
+ merge(mx, stm.ToMap(node), "node_"+nodeID)
+ }
+
+ for nodeID := range es.nodes {
+ if !seen[nodeID] {
+ delete(es.nodes, nodeID)
+ es.removeNodeCharts(nodeID)
+ }
+ }
+}
+
+func (es *Elasticsearch) collectClusterHealth(mx map[string]int64, ms *esMetrics) {
+ if !ms.hasClusterHealth() {
+ return
+ }
+
+ es.addClusterHealthChartsOnce.Do(es.addClusterHealthCharts)
+
+ merge(mx, stm.ToMap(ms.ClusterHealth), "cluster")
+
+ mx["cluster_status_green"] = boolToInt(ms.ClusterHealth.Status == "green")
+ mx["cluster_status_yellow"] = boolToInt(ms.ClusterHealth.Status == "yellow")
+ mx["cluster_status_red"] = boolToInt(ms.ClusterHealth.Status == "red")
+}
+
+func (es *Elasticsearch) collectClusterStats(mx map[string]int64, ms *esMetrics) {
+ if !ms.hasClusterStats() {
+ return
+ }
+
+ es.addClusterStatsChartsOnce.Do(es.addClusterStatsCharts)
+
+ merge(mx, stm.ToMap(ms.ClusterStats), "cluster")
+}
+
+func (es *Elasticsearch) collectLocalIndicesStats(mx map[string]int64, ms *esMetrics) {
+ if !ms.hasLocalIndicesStats() {
+ return
+ }
+
+ seen := make(map[string]bool)
+
+ for _, v := range ms.LocalIndicesStats {
+ seen[v.Index] = true
+
+ if !es.indices[v.Index] {
+ es.indices[v.Index] = true
+ es.addIndexCharts(v.Index)
+ }
+
+ px := fmt.Sprintf("node_index_%s_stats_", v.Index)
+
+ mx[px+"health_green"] = boolToInt(v.Health == "green")
+ mx[px+"health_yellow"] = boolToInt(v.Health == "yellow")
+ mx[px+"health_red"] = boolToInt(v.Health == "red")
+ mx[px+"shards_count"] = strToInt(v.Rep)
+ mx[px+"docs_count"] = strToInt(v.DocsCount)
+ mx[px+"store_size_in_bytes"] = convertIndexStoreSizeToBytes(v.StoreSize)
+ }
+
+ for index := range es.indices {
+ if !seen[index] {
+ delete(es.indices, index)
+ es.removeIndexCharts(index)
+ }
+ }
+}
+
+func (es *Elasticsearch) scrapeElasticsearch() *esMetrics {
+ ms := &esMetrics{}
+ wg := &sync.WaitGroup{}
+
+ if es.DoNodeStats {
+ wg.Add(1)
+ go func() { defer wg.Done(); es.scrapeNodesStats(ms) }()
+ }
+ if es.DoClusterHealth {
+ wg.Add(1)
+ go func() { defer wg.Done(); es.scrapeClusterHealth(ms) }()
+ }
+ if es.DoClusterStats {
+ wg.Add(1)
+ go func() { defer wg.Done(); es.scrapeClusterStats(ms) }()
+ }
+ if !es.ClusterMode && es.DoIndicesStats {
+ wg.Add(1)
+ go func() { defer wg.Done(); es.scrapeLocalIndicesStats(ms) }()
+ }
+ wg.Wait()
+
+ return ms
+}
+
+func (es *Elasticsearch) scrapeNodesStats(ms *esMetrics) {
+ var p string
+ if es.ClusterMode {
+ p = urlPathNodesStats
+ } else {
+ p = urlPathLocalNodeStats
+ }
+
+ req, _ := web.NewHTTPRequestWithPath(es.Request, p)
+
+ var stats esNodesStats
+ if err := es.doOKDecode(req, &stats); err != nil {
+ es.Warning(err)
+ return
+ }
+
+ ms.NodesStats = &stats
+}
+
+func (es *Elasticsearch) scrapeClusterHealth(ms *esMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(es.Request, urlPathClusterHealth)
+
+ var health esClusterHealth
+ if err := es.doOKDecode(req, &health); err != nil {
+ es.Warning(err)
+ return
+ }
+
+ ms.ClusterHealth = &health
+}
+
+func (es *Elasticsearch) scrapeClusterStats(ms *esMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(es.Request, urlPathClusterStats)
+
+ var stats esClusterStats
+ if err := es.doOKDecode(req, &stats); err != nil {
+ es.Warning(err)
+ return
+ }
+
+ ms.ClusterStats = &stats
+}
+
+func (es *Elasticsearch) scrapeLocalIndicesStats(ms *esMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(es.Request, urlPathIndicesStats)
+ req.URL.RawQuery = "local=true&format=json"
+
+ var stats []esIndexStats
+ if err := es.doOKDecode(req, &stats); err != nil {
+ es.Warning(err)
+ return
+ }
+
+ ms.LocalIndicesStats = removeSystemIndices(stats)
+}
+
+func (es *Elasticsearch) getClusterName() (string, error) {
+ req, _ := web.NewHTTPRequest(es.Request)
+
+ var info struct {
+ ClusterName string `json:"cluster_name"`
+ }
+
+ if err := es.doOKDecode(req, &info); err != nil {
+ return "", err
+ }
+
+ if info.ClusterName == "" {
+ return "", errors.New("empty cluster name")
+ }
+
+ return info.ClusterName, nil
+}
+
+func (es *Elasticsearch) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := es.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func convertIndexStoreSizeToBytes(size string) int64 {
+ var num float64
+ switch {
+ case strings.HasSuffix(size, "kb"):
+ num, _ = strconv.ParseFloat(size[:len(size)-2], 64)
+ num *= math.Pow(1024, 1)
+ case strings.HasSuffix(size, "mb"):
+ num, _ = strconv.ParseFloat(size[:len(size)-2], 64)
+ num *= math.Pow(1024, 2)
+ case strings.HasSuffix(size, "gb"):
+ num, _ = strconv.ParseFloat(size[:len(size)-2], 64)
+ num *= math.Pow(1024, 3)
+ case strings.HasSuffix(size, "tb"):
+ num, _ = strconv.ParseFloat(size[:len(size)-2], 64)
+ num *= math.Pow(1024, 4)
+ case strings.HasSuffix(size, "b"):
+ num, _ = strconv.ParseFloat(size[:len(size)-1], 64)
+ }
+ return int64(num)
+}
+
+func strToInt(s string) int64 {
+ v, _ := strconv.Atoi(s)
+ return int64(v)
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
+
+func removeSystemIndices(indices []esIndexStats) []esIndexStats {
+ var i int
+ for _, index := range indices {
+ if strings.HasPrefix(index.Index, ".") {
+ continue
+ }
+ indices[i] = index
+ i++
+ }
+ return indices[:i]
+}
+
+func merge(dst, src map[string]int64, prefix string) {
+ for k, v := range src {
+ dst[prefix+"_"+k] = v
+ }
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/config_schema.json b/src/go/plugin/go.d/modules/elasticsearch/config_schema.json
new file mode 100644
index 000000000..230993b05
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/config_schema.json
@@ -0,0 +1,218 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Elasticsearch collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the Elasticsearch cluster.",
+ "type": "string",
+ "default": "http://127.0.0.1:9200",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "cluster_mode": {
+ "title": "Cluster mode",
+ "description": "If set, metrics will be collected for all nodes in the Elasticsearch cluster; otherwise, only for the local node where the collector is running.",
+ "type": "boolean",
+ "default": false
+ },
+ "collect_node_stats": {
+ "title": "Collect node stats",
+ "description": "Collect metrics about individual [nodes in the cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html).",
+ "type": "boolean",
+ "default": true
+ },
+ "collect_cluster_health": {
+ "title": "Collect cluster health",
+ "description": "Collect metrics about the overall [health of the cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html).",
+ "type": "boolean",
+ "default": true
+ },
+ "collect_cluster_stats": {
+ "title": "Collect cluster stats",
+ "description": "Collect high-level [cluster statistics](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html).",
+ "type": "boolean",
+ "default": true
+ },
+ "collect_indices_stats": {
+ "title": "Collect indices stats",
+ "description": "Collect metrics about individual [indices in the cluster](https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html).",
+ "type": "boolean",
+ "default": false
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "cluster_mode",
+ "collect_node_stats",
+ "collect_cluster_health",
+ "collect_cluster_stats",
+ "collect_indices_stats"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/elasticsearch.go b/src/go/plugin/go.d/modules/elasticsearch/elasticsearch.go
new file mode 100644
index 000000000..22280f2dd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/elasticsearch.go
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package elasticsearch
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("elasticsearch", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Elasticsearch {
+ return &Elasticsearch{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:9200",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ },
+ ClusterMode: false,
+
+ DoNodeStats: true,
+ DoClusterStats: true,
+ DoClusterHealth: true,
+ DoIndicesStats: false,
+ },
+
+ charts: &module.Charts{},
+ addClusterHealthChartsOnce: &sync.Once{},
+ addClusterStatsChartsOnce: &sync.Once{},
+ nodes: make(map[string]bool),
+ indices: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ ClusterMode bool `yaml:"cluster_mode" json:"cluster_mode"`
+ DoNodeStats bool `yaml:"collect_node_stats" json:"collect_node_stats"`
+ DoClusterHealth bool `yaml:"collect_cluster_health" json:"collect_cluster_health"`
+ DoClusterStats bool `yaml:"collect_cluster_stats" json:"collect_cluster_stats"`
+ DoIndicesStats bool `yaml:"collect_indices_stats" json:"collect_indices_stats"`
+}
+
+type Elasticsearch struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addClusterHealthChartsOnce *sync.Once
+ addClusterStatsChartsOnce *sync.Once
+
+ httpClient *http.Client
+
+ clusterName string
+ nodes map[string]bool
+ indices map[string]bool
+}
+
+func (es *Elasticsearch) Configuration() any {
+ return es.Config
+}
+
+func (es *Elasticsearch) Init() error {
+ err := es.validateConfig()
+ if err != nil {
+ es.Errorf("check configuration: %v", err)
+ return err
+ }
+
+ httpClient, err := es.initHTTPClient()
+ if err != nil {
+ es.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ es.httpClient = httpClient
+
+ return nil
+}
+
+func (es *Elasticsearch) Check() error {
+ mx, err := es.collect()
+ if err != nil {
+ es.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (es *Elasticsearch) Charts() *module.Charts {
+ return es.charts
+}
+
+func (es *Elasticsearch) Collect() map[string]int64 {
+ mx, err := es.collect()
+ if err != nil {
+ es.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (es *Elasticsearch) Cleanup() {
+ if es.httpClient != nil {
+ es.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/elasticsearch_test.go b/src/go/plugin/go.d/modules/elasticsearch/elasticsearch_test.go
new file mode 100644
index 000000000..ca3aa526a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/elasticsearch_test.go
@@ -0,0 +1,743 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package elasticsearch
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer842NodesLocalStats, _ = os.ReadFile("testdata/v8.4.2/nodes_local_stats.json")
+ dataVer842NodesStats, _ = os.ReadFile("testdata/v8.4.2/nodes_stats.json")
+ dataVer842ClusterHealth, _ = os.ReadFile("testdata/v8.4.2/cluster_health.json")
+ dataVer842ClusterStats, _ = os.ReadFile("testdata/v8.4.2/cluster_stats.json")
+ dataVer842CatIndicesStats, _ = os.ReadFile("testdata/v8.4.2/cat_indices_stats.json")
+ dataVer842Info, _ = os.ReadFile("testdata/v8.4.2/info.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer842NodesLocalStats": dataVer842NodesLocalStats,
+ "dataVer842NodesStats": dataVer842NodesStats,
+ "dataVer842ClusterHealth": dataVer842ClusterHealth,
+ "dataVer842ClusterStats": dataVer842ClusterStats,
+ "dataVer842CatIndicesStats": dataVer842CatIndicesStats,
+ "dataVer842Info": dataVer842Info,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestElasticsearch_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Elasticsearch{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestElasticsearch_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default": {
+ config: New().Config,
+ },
+ "all stats": {
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: "http://127.0.0.1:38001"},
+ },
+ DoNodeStats: true,
+ DoClusterHealth: true,
+ DoClusterStats: true,
+ DoIndicesStats: true,
+ },
+ },
+ "only node_stats": {
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: "http://127.0.0.1:38001"},
+ },
+ DoNodeStats: true,
+ DoClusterHealth: false,
+ DoClusterStats: false,
+ DoIndicesStats: false,
+ },
+ },
+ "URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ }},
+ },
+ "invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ }},
+ },
+ "all API calls are disabled": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: "http://127.0.0.1:38001"},
+ },
+ DoNodeStats: false,
+ DoClusterHealth: false,
+ DoClusterStats: false,
+ DoIndicesStats: false,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ es := New()
+ es.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, es.Init())
+ } else {
+ assert.NoError(t, es.Init())
+ }
+ })
+ }
+}
+
+func TestElasticsearch_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (es *Elasticsearch, cleanup func())
+ wantFail bool
+ }{
+ "valid data": {prepare: prepareElasticsearchValidData},
+ "invalid data": {prepare: prepareElasticsearchInvalidData, wantFail: true},
+ "404": {prepare: prepareElasticsearch404, wantFail: true},
+ "connection refused": {prepare: prepareElasticsearchConnectionRefused, wantFail: true},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ es, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, es.Check())
+ } else {
+ assert.NoError(t, es.Check())
+ }
+ })
+ }
+}
+
+func TestElasticsearch_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestElasticsearch_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestElasticsearch_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Elasticsearch
+ wantCollected map[string]int64
+ wantCharts int
+ }{
+ "v842: all nodes stats": {
+ prepare: func() *Elasticsearch {
+ es := New()
+ es.ClusterMode = true
+ es.DoNodeStats = true
+ es.DoClusterHealth = false
+ es.DoClusterStats = false
+ es.DoIndicesStats = false
+ return es
+ },
+ wantCharts: len(nodeChartsTmpl) * 3,
+ wantCollected: map[string]int64{
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_accounting_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_fielddata_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_in_flight_requests_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_model_inference_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_parent_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_request_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_http_current_open": 75,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_fielddata_evictions": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_fielddata_memory_size_in_bytes": 600,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_flush_total": 35130,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_flush_total_time_in_millis": 22204637,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_current": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_time_in_millis": 1100012973,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_total": 3667364815,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_refresh_total": 7720800,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_refresh_total_time_in_millis": 94297737,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_current": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_time_in_millis": 21316723,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_total": 42642621,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_current": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_time_in_millis": 51262303,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_total": 166820275,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_count": 320,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_doc_values_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_fixed_bit_set_memory_in_bytes": 1904,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_index_writer_memory_in_bytes": 262022568,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_norms_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_points_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_stored_fields_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_term_vectors_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_terms_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_version_map_memory_in_bytes": 49200018,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_translog_operations": 352376,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_translog_size_in_bytes": 447695989,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_translog_uncommitted_operations": 352376,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_translog_uncommitted_size_in_bytes": 447695989,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_count": 94,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_total_capacity_in_bytes": 4654848,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_used_in_bytes": 4654850,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_count": 858,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_total_capacity_in_bytes": 103114998135,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_used_in_bytes": 103114998135,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_old_collection_count": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_old_collection_time_in_millis": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_young_collection_count": 78652,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_young_collection_time_in_millis": 6014274,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_committed_in_bytes": 7864320000,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_used_in_bytes": 5059735552,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_used_percent": 64,
+ "node_Klg1CjgMTouentQcJlRGuA_process_max_file_descriptors": 1048576,
+ "node_Klg1CjgMTouentQcJlRGuA_process_open_file_descriptors": 1156,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_analyze_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_analyze_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_started_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_started_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_store_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_store_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_flush_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_flush_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_force_merge_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_force_merge_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_generic_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_generic_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_get_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_get_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_listener_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_listener_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_management_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_management_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_refresh_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_refresh_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_throttled_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_throttled_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_snapshot_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_snapshot_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_warmer_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_warmer_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_write_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_write_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_transport_rx_count": 1300324276,
+ "node_Klg1CjgMTouentQcJlRGuA_transport_rx_size_in_bytes": 1789333458217,
+ "node_Klg1CjgMTouentQcJlRGuA_transport_tx_count": 1300324275,
+ "node_Klg1CjgMTouentQcJlRGuA_transport_tx_size_in_bytes": 2927487680282,
+ "node_k_AifYMWQTykjUq3pgE_-w_breakers_accounting_tripped": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_breakers_fielddata_tripped": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_breakers_in_flight_requests_tripped": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_breakers_model_inference_tripped": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_breakers_parent_tripped": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_breakers_request_tripped": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_http_current_open": 14,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_fielddata_evictions": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_fielddata_memory_size_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_flush_total": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_flush_total_time_in_millis": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_indexing_index_current": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_indexing_index_time_in_millis": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_indexing_index_total": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_refresh_total": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_refresh_total_time_in_millis": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_search_fetch_current": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_search_fetch_time_in_millis": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_search_fetch_total": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_search_query_current": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_search_query_time_in_millis": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_search_query_total": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_count": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_doc_values_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_fixed_bit_set_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_index_writer_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_norms_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_points_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_stored_fields_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_term_vectors_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_terms_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_segments_version_map_memory_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_translog_operations": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_translog_size_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_translog_uncommitted_operations": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_indices_translog_uncommitted_size_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_direct_count": 19,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_direct_total_capacity_in_bytes": 2142214,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_direct_used_in_bytes": 2142216,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_mapped_count": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_mapped_total_capacity_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_buffer_pools_mapped_used_in_bytes": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_gc_collectors_old_collection_count": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_gc_collectors_old_collection_time_in_millis": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_gc_collectors_young_collection_count": 342994,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_gc_collectors_young_collection_time_in_millis": 768917,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_mem_heap_committed_in_bytes": 281018368,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_mem_heap_used_in_bytes": 178362704,
+ "node_k_AifYMWQTykjUq3pgE_-w_jvm_mem_heap_used_percent": 63,
+ "node_k_AifYMWQTykjUq3pgE_-w_process_max_file_descriptors": 1048576,
+ "node_k_AifYMWQTykjUq3pgE_-w_process_open_file_descriptors": 557,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_analyze_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_analyze_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_fetch_shard_started_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_fetch_shard_started_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_fetch_shard_store_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_fetch_shard_store_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_flush_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_flush_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_force_merge_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_force_merge_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_generic_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_generic_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_get_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_get_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_listener_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_listener_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_management_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_management_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_refresh_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_refresh_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_search_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_search_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_search_throttled_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_search_throttled_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_snapshot_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_snapshot_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_warmer_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_warmer_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_write_queue": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_thread_pool_write_rejected": 0,
+ "node_k_AifYMWQTykjUq3pgE_-w_transport_rx_count": 107632996,
+ "node_k_AifYMWQTykjUq3pgE_-w_transport_rx_size_in_bytes": 180620082152,
+ "node_k_AifYMWQTykjUq3pgE_-w_transport_tx_count": 107633007,
+ "node_k_AifYMWQTykjUq3pgE_-w_transport_tx_size_in_bytes": 420999501235,
+ "node_tk_U7GMCRkCG4FoOvusrng_breakers_accounting_tripped": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_breakers_fielddata_tripped": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_breakers_in_flight_requests_tripped": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_breakers_model_inference_tripped": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_breakers_parent_tripped": 93,
+ "node_tk_U7GMCRkCG4FoOvusrng_breakers_request_tripped": 1,
+ "node_tk_U7GMCRkCG4FoOvusrng_http_current_open": 84,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_fielddata_evictions": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_fielddata_memory_size_in_bytes": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_flush_total": 67895,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_flush_total_time_in_millis": 81917283,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_indexing_index_current": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_indexing_index_time_in_millis": 1244633519,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_indexing_index_total": 6550378755,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_refresh_total": 12359783,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_refresh_total_time_in_millis": 300152615,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_search_fetch_current": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_search_fetch_time_in_millis": 24517851,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_search_fetch_total": 25105951,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_search_query_current": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_search_query_time_in_millis": 158980385,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_search_query_total": 157912598,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_count": 291,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_doc_values_memory_in_bytes": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_fixed_bit_set_memory_in_bytes": 55672,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_index_writer_memory_in_bytes": 57432664,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_memory_in_bytes": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_norms_memory_in_bytes": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_points_memory_in_bytes": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_stored_fields_memory_in_bytes": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_term_vectors_memory_in_bytes": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_terms_memory_in_bytes": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_segments_version_map_memory_in_bytes": 568,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_translog_operations": 1449698,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_translog_size_in_bytes": 1214204014,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_translog_uncommitted_operations": 1449698,
+ "node_tk_U7GMCRkCG4FoOvusrng_indices_translog_uncommitted_size_in_bytes": 1214204014,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_direct_count": 90,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_direct_total_capacity_in_bytes": 4571711,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_direct_used_in_bytes": 4571713,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_mapped_count": 831,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_mapped_total_capacity_in_bytes": 99844219805,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_buffer_pools_mapped_used_in_bytes": 99844219805,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_gc_collectors_old_collection_count": 1,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_gc_collectors_old_collection_time_in_millis": 796,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_gc_collectors_young_collection_count": 139959,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_gc_collectors_young_collection_time_in_millis": 3581668,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_mem_heap_committed_in_bytes": 7864320000,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_mem_heap_used_in_bytes": 1884124192,
+ "node_tk_U7GMCRkCG4FoOvusrng_jvm_mem_heap_used_percent": 23,
+ "node_tk_U7GMCRkCG4FoOvusrng_process_max_file_descriptors": 1048576,
+ "node_tk_U7GMCRkCG4FoOvusrng_process_open_file_descriptors": 1180,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_analyze_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_analyze_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_fetch_shard_started_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_fetch_shard_started_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_fetch_shard_store_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_fetch_shard_store_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_flush_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_flush_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_force_merge_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_force_merge_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_generic_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_generic_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_get_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_get_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_listener_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_listener_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_management_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_management_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_refresh_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_refresh_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_search_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_search_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_search_throttled_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_search_throttled_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_snapshot_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_snapshot_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_warmer_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_warmer_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_write_queue": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_thread_pool_write_rejected": 0,
+ "node_tk_U7GMCRkCG4FoOvusrng_transport_rx_count": 2167879292,
+ "node_tk_U7GMCRkCG4FoOvusrng_transport_rx_size_in_bytes": 4905919297323,
+ "node_tk_U7GMCRkCG4FoOvusrng_transport_tx_count": 2167879293,
+ "node_tk_U7GMCRkCG4FoOvusrng_transport_tx_size_in_bytes": 2964638852652,
+ },
+ },
+ "v842: local node stats": {
+ prepare: func() *Elasticsearch {
+ es := New()
+ es.DoNodeStats = true
+ es.DoClusterHealth = false
+ es.DoClusterStats = false
+ es.DoIndicesStats = false
+ return es
+ },
+ wantCharts: len(nodeChartsTmpl),
+ wantCollected: map[string]int64{
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_accounting_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_fielddata_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_in_flight_requests_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_model_inference_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_parent_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_breakers_request_tripped": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_http_current_open": 73,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_fielddata_evictions": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_fielddata_memory_size_in_bytes": 600,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_flush_total": 35134,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_flush_total_time_in_millis": 22213090,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_current": 1,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_time_in_millis": 1100149051,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_indexing_index_total": 3667793202,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_refresh_total": 7721472,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_refresh_total_time_in_millis": 94304142,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_current": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_time_in_millis": 21316820,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_fetch_total": 42645288,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_current": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_time_in_millis": 51265805,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_search_query_total": 166823028,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_count": 307,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_doc_values_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_fixed_bit_set_memory_in_bytes": 2008,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_index_writer_memory_in_bytes": 240481008,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_norms_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_points_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_stored_fields_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_term_vectors_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_terms_memory_in_bytes": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_segments_version_map_memory_in_bytes": 44339216,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_translog_operations": 362831,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_translog_size_in_bytes": 453491882,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_translog_uncommitted_operations": 362831,
+ "node_Klg1CjgMTouentQcJlRGuA_indices_translog_uncommitted_size_in_bytes": 453491882,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_count": 94,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_total_capacity_in_bytes": 4654848,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_direct_used_in_bytes": 4654850,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_count": 844,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_total_capacity_in_bytes": 103411995802,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_buffer_pools_mapped_used_in_bytes": 103411995802,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_old_collection_count": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_old_collection_time_in_millis": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_young_collection_count": 78661,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_gc_collectors_young_collection_time_in_millis": 6014901,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_committed_in_bytes": 7864320000,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_used_in_bytes": 4337402488,
+ "node_Klg1CjgMTouentQcJlRGuA_jvm_mem_heap_used_percent": 55,
+ "node_Klg1CjgMTouentQcJlRGuA_process_max_file_descriptors": 1048576,
+ "node_Klg1CjgMTouentQcJlRGuA_process_open_file_descriptors": 1149,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_analyze_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_analyze_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_started_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_started_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_store_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_fetch_shard_store_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_flush_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_flush_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_force_merge_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_force_merge_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_generic_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_generic_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_get_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_get_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_listener_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_listener_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_management_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_management_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_refresh_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_refresh_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_throttled_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_search_throttled_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_snapshot_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_snapshot_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_warmer_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_warmer_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_write_queue": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_thread_pool_write_rejected": 0,
+ "node_Klg1CjgMTouentQcJlRGuA_transport_rx_count": 1300468666,
+ "node_Klg1CjgMTouentQcJlRGuA_transport_rx_size_in_bytes": 1789647854011,
+ "node_Klg1CjgMTouentQcJlRGuA_transport_tx_count": 1300468665,
+ "node_Klg1CjgMTouentQcJlRGuA_transport_tx_size_in_bytes": 2927853534431,
+ },
+ },
+ "v842: only cluster_health": {
+ prepare: func() *Elasticsearch {
+ es := New()
+ es.DoNodeStats = false
+ es.DoClusterHealth = true
+ es.DoClusterStats = false
+ es.DoIndicesStats = false
+ return es
+ },
+ wantCharts: len(clusterHealthChartsTmpl),
+ wantCollected: map[string]int64{
+ "cluster_active_primary_shards": 97,
+ "cluster_active_shards": 194,
+ "cluster_active_shards_percent_as_number": 100,
+ "cluster_delayed_unassigned_shards": 0,
+ "cluster_initializing_shards": 0,
+ "cluster_number_of_data_nodes": 2,
+ "cluster_number_of_in_flight_fetch": 0,
+ "cluster_number_of_nodes": 3,
+ "cluster_number_of_pending_tasks": 0,
+ "cluster_relocating_shards": 0,
+ "cluster_status_green": 1,
+ "cluster_status_red": 0,
+ "cluster_status_yellow": 0,
+ "cluster_unassigned_shards": 0,
+ },
+ },
+ "v842: only cluster_stats": {
+ prepare: func() *Elasticsearch {
+ es := New()
+ es.DoNodeStats = false
+ es.DoClusterHealth = false
+ es.DoClusterStats = true
+ es.DoIndicesStats = false
+ return es
+ },
+ wantCharts: len(clusterStatsChartsTmpl),
+ wantCollected: map[string]int64{
+ "cluster_indices_count": 97,
+ "cluster_indices_docs_count": 402750703,
+ "cluster_indices_query_cache_hit_count": 96838726,
+ "cluster_indices_query_cache_miss_count": 587768226,
+ "cluster_indices_shards_primaries": 97,
+ "cluster_indices_shards_replication": 1,
+ "cluster_indices_shards_total": 194,
+ "cluster_indices_store_size_in_bytes": 380826136962,
+ "cluster_nodes_count_coordinating_only": 0,
+ "cluster_nodes_count_data": 0,
+ "cluster_nodes_count_data_cold": 0,
+ "cluster_nodes_count_data_content": 2,
+ "cluster_nodes_count_data_frozen": 0,
+ "cluster_nodes_count_data_hot": 2,
+ "cluster_nodes_count_data_warm": 0,
+ "cluster_nodes_count_ingest": 2,
+ "cluster_nodes_count_master": 3,
+ "cluster_nodes_count_ml": 0,
+ "cluster_nodes_count_remote_cluster_client": 2,
+ "cluster_nodes_count_total": 3,
+ "cluster_nodes_count_transform": 2,
+ "cluster_nodes_count_voting_only": 1,
+ },
+ },
+ "v842: only indices_stats": {
+ prepare: func() *Elasticsearch {
+ es := New()
+ es.DoNodeStats = false
+ es.DoClusterHealth = false
+ es.DoClusterStats = false
+ es.DoIndicesStats = true
+ return es
+ },
+ wantCharts: len(nodeIndexChartsTmpl) * 3,
+ wantCollected: map[string]int64{
+ "node_index_my-index-000001_stats_docs_count": 1,
+ "node_index_my-index-000001_stats_health_green": 0,
+ "node_index_my-index-000001_stats_health_red": 0,
+ "node_index_my-index-000001_stats_health_yellow": 1,
+ "node_index_my-index-000001_stats_shards_count": 1,
+ "node_index_my-index-000001_stats_store_size_in_bytes": 208,
+ "node_index_my-index-000002_stats_docs_count": 1,
+ "node_index_my-index-000002_stats_health_green": 0,
+ "node_index_my-index-000002_stats_health_red": 0,
+ "node_index_my-index-000002_stats_health_yellow": 1,
+ "node_index_my-index-000002_stats_shards_count": 1,
+ "node_index_my-index-000002_stats_store_size_in_bytes": 208,
+ "node_index_my-index-000003_stats_docs_count": 1,
+ "node_index_my-index-000003_stats_health_green": 0,
+ "node_index_my-index-000003_stats_health_red": 0,
+ "node_index_my-index-000003_stats_health_yellow": 1,
+ "node_index_my-index-000003_stats_shards_count": 1,
+ "node_index_my-index-000003_stats_store_size_in_bytes": 208,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ es, cleanup := prepareElasticsearch(t, test.prepare)
+ defer cleanup()
+
+ var mx map[string]int64
+ for i := 0; i < 10; i++ {
+ mx = es.Collect()
+ }
+
+ //m := mx
+ //l := make([]string, 0)
+ //for k := range m {
+ // l = append(l, k)
+ //}
+ //sort.Strings(l)
+ //for _, value := range l {
+ // fmt.Println(fmt.Sprintf("\"%s\": %d,", value, m[value]))
+ //}
+ //return
+
+ assert.Equal(t, test.wantCollected, mx)
+ assert.Len(t, *es.Charts(), test.wantCharts)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, es, mx)
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, es *Elasticsearch, collected map[string]int64) {
+ for _, chart := range *es.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareElasticsearch(t *testing.T, createES func() *Elasticsearch) (es *Elasticsearch, cleanup func()) {
+ t.Helper()
+ srv := prepareElasticsearchEndpoint()
+
+ es = createES()
+ es.URL = srv.URL
+ require.NoError(t, es.Init())
+
+ return es, srv.Close
+}
+
+func prepareElasticsearchValidData(t *testing.T) (es *Elasticsearch, cleanup func()) {
+ return prepareElasticsearch(t, New)
+}
+
+func prepareElasticsearchInvalidData(t *testing.T) (*Elasticsearch, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ es := New()
+ es.URL = srv.URL
+ require.NoError(t, es.Init())
+
+ return es, srv.Close
+}
+
+func prepareElasticsearch404(t *testing.T) (*Elasticsearch, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ es := New()
+ es.URL = srv.URL
+ require.NoError(t, es.Init())
+
+ return es, srv.Close
+}
+
+func prepareElasticsearchConnectionRefused(t *testing.T) (*Elasticsearch, func()) {
+ t.Helper()
+ es := New()
+ es.URL = "http://127.0.0.1:38001"
+ require.NoError(t, es.Init())
+
+ return es, func() {}
+}
+
+func prepareElasticsearchEndpoint() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathNodesStats:
+ _, _ = w.Write(dataVer842NodesStats)
+ case urlPathLocalNodeStats:
+ _, _ = w.Write(dataVer842NodesLocalStats)
+ case urlPathClusterHealth:
+ _, _ = w.Write(dataVer842ClusterHealth)
+ case urlPathClusterStats:
+ _, _ = w.Write(dataVer842ClusterStats)
+ case urlPathIndicesStats:
+ _, _ = w.Write(dataVer842CatIndicesStats)
+ case "/":
+ _, _ = w.Write(dataVer842Info)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/init.go b/src/go/plugin/go.d/modules/elasticsearch/init.go
new file mode 100644
index 000000000..f87b594f8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/init.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package elasticsearch
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (es *Elasticsearch) validateConfig() error {
+ if es.URL == "" {
+ return errors.New("URL not set")
+ }
+ if !(es.DoNodeStats || es.DoClusterHealth || es.DoClusterStats || es.DoIndicesStats) {
+ return errors.New("all API calls are disabled")
+ }
+ if _, err := web.NewHTTPRequest(es.Request); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (es *Elasticsearch) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(es.Client)
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/integrations/elasticsearch.md b/src/go/plugin/go.d/modules/elasticsearch/integrations/elasticsearch.md
new file mode 100644
index 000000000..ab6f7d00d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/integrations/elasticsearch.md
@@ -0,0 +1,378 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/elasticsearch/integrations/elasticsearch.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/elasticsearch/metadata.yaml"
+sidebar_label: "Elasticsearch"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Search Engines"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Elasticsearch
+
+
+<img src="https://netdata.cloud/img/elasticsearch.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: elasticsearch
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the performance and health of the Elasticsearch cluster.
+
+
+It uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.
+
+Used endpoints:
+
+| Endpoint | Description | API |
+|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|
+| `/` | Node info | |
+| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |
+| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |
+| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |
+| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on localhost by attempting to connect to port 9200:
+
+- http://127.0.0.1:9200
+- https://127.0.0.1:9200
+
+
+#### Limits
+
+By default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.
+
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per node
+
+These metrics refer to the cluster node.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |
+| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |
+| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| elasticsearch.node_indices_indexing | index | operations/s |
+| elasticsearch.node_indices_indexing_current | index | operations |
+| elasticsearch.node_indices_indexing_time | index | milliseconds |
+| elasticsearch.node_indices_search | queries, fetches | operations/s |
+| elasticsearch.node_indices_search_current | queries, fetches | operations |
+| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |
+| elasticsearch.node_indices_refresh | refresh | operations/s |
+| elasticsearch.node_indices_refresh_time | refresh | milliseconds |
+| elasticsearch.node_indices_flush | flush | operations/s |
+| elasticsearch.node_indices_flush_time | flush | milliseconds |
+| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |
+| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |
+| elasticsearch.node_indices_segments_count | segments | segments |
+| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |
+| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |
+| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |
+| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |
+| elasticsearch.node_file_descriptors | open | fd |
+| elasticsearch.node_jvm_heap | inuse | percentage |
+| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |
+| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |
+| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |
+| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |
+| elasticsearch.node_jvm_gc_count | young, old | gc/s |
+| elasticsearch.node_jvm_gc_time | young, old | milliseconds |
+| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |
+| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |
+| elasticsearch.node_cluster_communication_packets | received, sent | pps |
+| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |
+| elasticsearch.node_http_connections | open | connections |
+| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |
+
+### Per cluster
+
+These metrics refer to the cluster.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| elasticsearch.cluster_health_status | green, yellow, red | status |
+| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |
+| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |
+| elasticsearch.cluster_pending_tasks | pending | tasks |
+| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |
+| elasticsearch.cluster_indices_count | indices | indices |
+| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |
+| elasticsearch.cluster_indices_docs_count | docs | docs |
+| elasticsearch.cluster_indices_store_size | size | bytes |
+| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |
+| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |
+
+### Per index
+
+These metrics refer to the index.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |
+| index | Name of the index. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| elasticsearch.node_index_health | green, yellow, red | status |
+| elasticsearch.node_index_shards_count | shards | shards |
+| elasticsearch.node_index_docs_count | docs | docs |
+| elasticsearch.node_index_store_size | store_size | bytes |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |
+| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |
+| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |
+| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |
+| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/elasticsearch.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/elasticsearch.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:9200 | yes |
+| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |
+| collect_node_stats | Controls whether to collect nodes metrics. | true | no |
+| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |
+| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |
+| collect_indices_stats | Controls whether to collect indices metrics. | false | no |
+| timeout | HTTP request timeout. | 2 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic single node mode
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+
+```
+##### Cluster mode
+
+Cluster mode example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+ cluster_mode: yes
+
+```
+</details>
+
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Elasticsearch with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9200
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+
+ - name: remote
+ url: http://192.0.2.1:9200
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m elasticsearch
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `elasticsearch` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep elasticsearch
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep elasticsearch /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep elasticsearch
+```
+
+
diff --git a/src/go/plugin/go.d/modules/elasticsearch/integrations/opensearch.md b/src/go/plugin/go.d/modules/elasticsearch/integrations/opensearch.md
new file mode 100644
index 000000000..9426ada75
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/integrations/opensearch.md
@@ -0,0 +1,378 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/elasticsearch/integrations/opensearch.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/elasticsearch/metadata.yaml"
+sidebar_label: "OpenSearch"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Search Engines"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenSearch
+
+
+<img src="https://netdata.cloud/img/opensearch.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: elasticsearch
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the performance and health of the Elasticsearch cluster.
+
+
+It uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.
+
+Used endpoints:
+
+| Endpoint | Description | API |
+|------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|
+| `/` | Node info | |
+| `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |
+| `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |
+| `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |
+| `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on localhost by attempting to connect to port 9200:
+
+- http://127.0.0.1:9200
+- https://127.0.0.1:9200
+
+
+#### Limits
+
+By default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.
+
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per node
+
+These metrics refer to the cluster node.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |
+| node_name | Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name). |
+| host | Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| elasticsearch.node_indices_indexing | index | operations/s |
+| elasticsearch.node_indices_indexing_current | index | operations |
+| elasticsearch.node_indices_indexing_time | index | milliseconds |
+| elasticsearch.node_indices_search | queries, fetches | operations/s |
+| elasticsearch.node_indices_search_current | queries, fetches | operations |
+| elasticsearch.node_indices_search_time | queries, fetches | milliseconds |
+| elasticsearch.node_indices_refresh | refresh | operations/s |
+| elasticsearch.node_indices_refresh_time | refresh | milliseconds |
+| elasticsearch.node_indices_flush | flush | operations/s |
+| elasticsearch.node_indices_flush_time | flush | milliseconds |
+| elasticsearch.node_indices_fielddata_memory_usage | used | bytes |
+| elasticsearch.node_indices_fielddata_evictions | evictions | operations/s |
+| elasticsearch.node_indices_segments_count | segments | segments |
+| elasticsearch.node_indices_segments_memory_usage_total | used | bytes |
+| elasticsearch.node_indices_segments_memory_usage | terms, stored_fields, term_vectors, norms, points, doc_values, index_writer, version_map, fixed_bit_set | bytes |
+| elasticsearch.node_indices_translog_operations | total, uncommitted | operations |
+| elasticsearch.node_indices_translog_size | total, uncommitted | bytes |
+| elasticsearch.node_file_descriptors | open | fd |
+| elasticsearch.node_jvm_heap | inuse | percentage |
+| elasticsearch.node_jvm_heap_bytes | committed, used | bytes |
+| elasticsearch.node_jvm_buffer_pools_count | direct, mapped | pools |
+| elasticsearch.node_jvm_buffer_pool_direct_memory | total, used | bytes |
+| elasticsearch.node_jvm_buffer_pool_mapped_memory | total, used | bytes |
+| elasticsearch.node_jvm_gc_count | young, old | gc/s |
+| elasticsearch.node_jvm_gc_time | young, old | milliseconds |
+| elasticsearch.node_thread_pool_queued | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |
+| elasticsearch.node_thread_pool_rejected | generic, search, search_throttled, get, analyze, write, snapshot, warmer, refresh, listener, fetch_shard_started, fetch_shard_store, flush, force_merge, management | threads |
+| elasticsearch.node_cluster_communication_packets | received, sent | pps |
+| elasticsearch.node_cluster_communication_traffic | received, sent | bytes/s |
+| elasticsearch.node_http_connections | open | connections |
+| elasticsearch.node_breakers_trips | requests, fielddata, in_flight_requests, model_inference, accounting, parent | trips/s |
+
+### Per cluster
+
+These metrics refer to the cluster.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| elasticsearch.cluster_health_status | green, yellow, red | status |
+| elasticsearch.cluster_number_of_nodes | nodes, data_nodes | nodes |
+| elasticsearch.cluster_shards_count | active_primary, active, relocating, initializing, unassigned, delayed_unaasigned | shards |
+| elasticsearch.cluster_pending_tasks | pending | tasks |
+| elasticsearch.cluster_number_of_in_flight_fetch | in_flight_fetch | fetches |
+| elasticsearch.cluster_indices_count | indices | indices |
+| elasticsearch.cluster_indices_shards_count | total, primaries, replication | shards |
+| elasticsearch.cluster_indices_docs_count | docs | docs |
+| elasticsearch.cluster_indices_store_size | size | bytes |
+| elasticsearch.cluster_indices_query_cache | hit, miss | events/s |
+| elasticsearch.cluster_nodes_by_role_count | coordinating_only, data, data_cold, data_content, data_frozen, data_hot, data_warm, ingest, master, ml, remote_cluster_client, voting_only | nodes |
+
+### Per index
+
+These metrics refer to the index.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cluster_name | Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name). |
+| index | Name of the index. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| elasticsearch.node_index_health | green, yellow, red | status |
+| elasticsearch.node_index_shards_count | shards | shards |
+| elasticsearch.node_index_docs_count | docs | docs |
+| elasticsearch.node_index_store_size | store_size | bytes |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ elasticsearch_node_indices_search_time_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, queries run slowly. |
+| [ elasticsearch_node_indices_search_time_fetch ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_indices_search_time | search performance is degraded, fetches run slowly. |
+| [ elasticsearch_cluster_health_status_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is red. |
+| [ elasticsearch_cluster_health_status_yellow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.cluster_health_status | cluster health status is yellow. |
+| [ elasticsearch_node_index_health_red ](https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf) | elasticsearch.node_index_health | node index $label:index health status is red. |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/elasticsearch.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/elasticsearch.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:9200 | yes |
+| cluster_mode | Controls whether to collect metrics for all nodes in the cluster or only for the local node. | false | no |
+| collect_node_stats | Controls whether to collect nodes metrics. | true | no |
+| collect_cluster_health | Controls whether to collect cluster health metrics. | true | no |
+| collect_cluster_stats | Controls whether to collect cluster stats metrics. | true | no |
+| collect_indices_stats | Controls whether to collect indices metrics. | false | no |
+| timeout | HTTP request timeout. | 2 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic single node mode
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+
+```
+##### Cluster mode
+
+Cluster mode example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+ cluster_mode: yes
+
+```
+</details>
+
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Elasticsearch with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9200
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+
+ - name: remote
+ url: http://192.0.2.1:9200
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `elasticsearch` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m elasticsearch
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `elasticsearch` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep elasticsearch
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep elasticsearch /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep elasticsearch
+```
+
+
diff --git a/src/go/plugin/go.d/modules/elasticsearch/metadata.yaml b/src/go/plugin/go.d/modules/elasticsearch/metadata.yaml
new file mode 100644
index 000000000..9ee892948
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/metadata.yaml
@@ -0,0 +1,634 @@
+plugin_name: go.d.plugin
+modules:
+ - &module
+ meta: &meta
+ id: collector-go.d.plugin-elasticsearch
+ module_name: elasticsearch
+ plugin_name: go.d.plugin
+ monitored_instance:
+ name: Elasticsearch
+ link: https://www.elastic.co/elasticsearch/
+ icon_filename: elasticsearch.svg
+ categories:
+ - data-collection.search-engines
+ keywords:
+ - elastic
+ - elasticsearch
+ - opensearch
+ - search engine
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the performance and health of the Elasticsearch cluster.
+ method_description: |
+ It uses [Cluster APIs](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster.html) to collect metrics.
+
+ Used endpoints:
+
+ | Endpoint | Description | API |
+ |------------------------|----------------------|-------------------------------------------------------------------------------------------------------------|
+ | `/` | Node info | |
+ | `/_nodes/stats` | Nodes metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |
+ | `/_nodes/_local/stats` | Local node metrics | [Nodes stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html) |
+ | `/_cluster/health` | Cluster health stats | [Cluster health API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html) |
+ | `/_cluster/stats` | Cluster metrics | [Cluster stats API](https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html) |
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects instances running on localhost by attempting to connect to port 9200:
+
+ - http://127.0.0.1:9200
+ - https://127.0.0.1:9200
+ limits:
+ description: |
+ By default, this collector monitors only the node it is connected to. To monitor all cluster nodes, set the `cluster_mode` configuration option to `yes`.
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "go.d/elasticsearch.conf"
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:9200
+ required: true
+ - name: cluster_mode
+ description: Controls whether to collect metrics for all nodes in the cluster or only for the local node.
+ default_value: "false"
+ required: false
+ - name: collect_node_stats
+ description: Controls whether to collect nodes metrics.
+ default_value: "true"
+ required: false
+ - name: collect_cluster_health
+ description: Controls whether to collect cluster health metrics.
+ default_value: "true"
+ required: false
+ - name: collect_cluster_stats
+ description: Controls whether to collect cluster stats metrics.
+ default_value: "true"
+ required: false
+ - name: collect_indices_stats
+ description: Controls whether to collect indices metrics.
+ default_value: "false"
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 2
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic single node mode
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+ - name: Cluster mode
+ description: Cluster mode example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+ cluster_mode: yes
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: Elasticsearch with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:9200
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9200
+
+ - name: remote
+ url: http://192.0.2.1:9200
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: elasticsearch_node_indices_search_time_query
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf
+ metric: elasticsearch.node_indices_search_time
+ info: search performance is degraded, queries run slowly.
+ - name: elasticsearch_node_indices_search_time_fetch
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf
+ metric: elasticsearch.node_indices_search_time
+ info: search performance is degraded, fetches run slowly.
+ - name: elasticsearch_cluster_health_status_red
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf
+ metric: elasticsearch.cluster_health_status
+ info: cluster health status is red.
+ - name: elasticsearch_cluster_health_status_yellow
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf
+ metric: elasticsearch.cluster_health_status
+ info: cluster health status is yellow.
+ - name: elasticsearch_node_index_health_red
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/elasticsearch.conf
+ metric: elasticsearch.node_index_health
+ info: node index $label:index health status is red.
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: node
+ description: These metrics refer to the cluster node.
+ labels:
+ - name: cluster_name
+ description: |
+ Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name).
+ - name: node_name
+ description: |
+ Human-readable identifier for the node. Based on the [Node name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#node-name).
+ - name: host
+ description: |
+ Network host for the node, based on the [Network host setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#network.host).
+ metrics:
+ - name: elasticsearch.node_indices_indexing
+ description: Indexing Operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: index
+ - name: elasticsearch.node_indices_indexing_current
+ description: Indexing Operations Current
+ unit: operations
+ chart_type: line
+ dimensions:
+ - name: index
+ - name: elasticsearch.node_indices_indexing_time
+ description: Time Spent On Indexing Operations
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: index
+ - name: elasticsearch.node_indices_search
+ description: Search Operations
+ unit: operations/s
+ chart_type: stacked
+ dimensions:
+ - name: queries
+ - name: fetches
+ - name: elasticsearch.node_indices_search_current
+ description: Search Operations Current
+ unit: operations
+ chart_type: stacked
+ dimensions:
+ - name: queries
+ - name: fetches
+ - name: elasticsearch.node_indices_search_time
+ description: node_indices_search_time
+ unit: milliseconds
+ chart_type: stacked
+ dimensions:
+ - name: queries
+ - name: fetches
+ - name: elasticsearch.node_indices_refresh
+ description: Refresh Operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: refresh
+ - name: elasticsearch.node_indices_refresh_time
+ description: Time Spent On Refresh Operations
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: refresh
+ - name: elasticsearch.node_indices_flush
+ description: Flush Operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: flush
+ - name: elasticsearch.node_indices_flush_time
+ description: Time Spent On Flush Operations
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: flush
+ - name: elasticsearch.node_indices_fielddata_memory_usage
+ description: Fielddata Cache Memory Usage
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: elasticsearch.node_indices_fielddata_evictions
+ description: Fielddata Evictions
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: evictions
+ - name: elasticsearch.node_indices_segments_count
+ description: Segments Count
+ unit: segments
+ chart_type: line
+ dimensions:
+ - name: segments
+ - name: elasticsearch.node_indices_segments_memory_usage_total
+ description: Segments Memory Usage Total
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: elasticsearch.node_indices_segments_memory_usage
+ description: Segments Memory Usage
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: terms
+ - name: stored_fields
+ - name: term_vectors
+ - name: norms
+ - name: points
+ - name: doc_values
+ - name: index_writer
+ - name: version_map
+ - name: fixed_bit_set
+ - name: elasticsearch.node_indices_translog_operations
+ description: Translog Operations
+ unit: operations
+ chart_type: area
+ dimensions:
+ - name: total
+ - name: uncommitted
+ - name: elasticsearch.node_indices_translog_size
+ description: Translog Size
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: total
+ - name: uncommitted
+ - name: elasticsearch.node_file_descriptors
+ description: Process File Descriptors
+ unit: fd
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: elasticsearch.node_jvm_heap
+ description: JVM Heap Percentage Currently in Use
+ unit: percentage
+ chart_type: area
+ dimensions:
+ - name: inuse
+ - name: elasticsearch.node_jvm_heap_bytes
+ description: JVM Heap Commit And Usage
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: elasticsearch.node_jvm_buffer_pools_count
+ description: JVM Buffer Pools Count
+ unit: pools
+ chart_type: line
+ dimensions:
+ - name: direct
+ - name: mapped
+ - name: elasticsearch.node_jvm_buffer_pool_direct_memory
+ description: JVM Buffer Pool Direct Memory
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: total
+ - name: used
+ - name: elasticsearch.node_jvm_buffer_pool_mapped_memory
+ description: JVM Buffer Pool Mapped Memory
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: total
+ - name: used
+ - name: elasticsearch.node_jvm_gc_count
+ description: JVM Garbage Collections
+ unit: gc/s
+ chart_type: stacked
+ dimensions:
+ - name: young
+ - name: old
+ - name: elasticsearch.node_jvm_gc_time
+ description: JVM Time Spent On Garbage Collections
+ unit: milliseconds
+ chart_type: stacked
+ dimensions:
+ - name: young
+ - name: old
+ - name: elasticsearch.node_thread_pool_queued
+ description: Thread Pool Queued Threads Count
+ unit: threads
+ chart_type: stacked
+ dimensions:
+ - name: generic
+ - name: search
+ - name: search_throttled
+ - name: get
+ - name: analyze
+ - name: write
+ - name: snapshot
+ - name: warmer
+ - name: refresh
+ - name: listener
+ - name: fetch_shard_started
+ - name: fetch_shard_store
+ - name: flush
+ - name: force_merge
+ - name: management
+ - name: elasticsearch.node_thread_pool_rejected
+ description: Thread Pool Rejected Threads Count
+ unit: threads
+ chart_type: stacked
+ dimensions:
+ - name: generic
+ - name: search
+ - name: search_throttled
+ - name: get
+ - name: analyze
+ - name: write
+ - name: snapshot
+ - name: warmer
+ - name: refresh
+ - name: listener
+ - name: fetch_shard_started
+ - name: fetch_shard_store
+ - name: flush
+ - name: force_merge
+ - name: management
+ - name: elasticsearch.node_cluster_communication_packets
+ description: Cluster Communication
+ unit: pps
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: elasticsearch.node_cluster_communication_traffic
+ description: Cluster Communication Bandwidth
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: elasticsearch.node_http_connections
+ description: HTTP Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: elasticsearch.node_breakers_trips
+ description: Circuit Breaker Trips Count
+ unit: trips/s
+ chart_type: stacked
+ dimensions:
+ - name: requests
+ - name: fielddata
+ - name: in_flight_requests
+ - name: model_inference
+ - name: accounting
+ - name: parent
+ - name: cluster
+ description: These metrics refer to the cluster.
+ labels:
+ - name: cluster_name
+ description: |
+ Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name).
+ metrics:
+ - name: elasticsearch.cluster_health_status
+ description: Cluster Status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: yellow
+ - name: red
+ - name: elasticsearch.cluster_number_of_nodes
+ description: Cluster Nodes Count
+ unit: nodes
+ chart_type: line
+ dimensions:
+ - name: nodes
+ - name: data_nodes
+ - name: elasticsearch.cluster_shards_count
+ description: Cluster Shards Count
+ unit: shards
+ chart_type: line
+ dimensions:
+ - name: active_primary
+ - name: active
+ - name: relocating
+ - name: initializing
+ - name: unassigned
+ - name: delayed_unaasigned
+ - name: elasticsearch.cluster_pending_tasks
+ description: Cluster Pending Tasks
+ unit: tasks
+ chart_type: line
+ dimensions:
+ - name: pending
+ - name: elasticsearch.cluster_number_of_in_flight_fetch
+ description: Cluster Unfinished Fetches
+ unit: fetches
+ chart_type: line
+ dimensions:
+ - name: in_flight_fetch
+ - name: elasticsearch.cluster_indices_count
+ description: Cluster Indices Count
+ unit: indices
+ chart_type: line
+ dimensions:
+ - name: indices
+ - name: elasticsearch.cluster_indices_shards_count
+ description: Cluster Indices Shards Count
+ unit: shards
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: primaries
+ - name: replication
+ - name: elasticsearch.cluster_indices_docs_count
+ description: Cluster Indices Docs Count
+ unit: docs
+ chart_type: line
+ dimensions:
+ - name: docs
+ - name: elasticsearch.cluster_indices_store_size
+ description: Cluster Indices Store Size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: elasticsearch.cluster_indices_query_cache
+ description: Cluster Indices Query Cache
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: hit
+ - name: miss
+ - name: elasticsearch.cluster_nodes_by_role_count
+ description: Cluster Nodes By Role Count
+ unit: nodes
+ chart_type: line
+ dimensions:
+ - name: coordinating_only
+ - name: data
+ - name: data_cold
+ - name: data_content
+ - name: data_frozen
+ - name: data_hot
+ - name: data_warm
+ - name: ingest
+ - name: master
+ - name: ml
+ - name: remote_cluster_client
+ - name: voting_only
+ - name: index
+ description: These metrics refer to the index.
+ labels:
+ - name: cluster_name
+ description: |
+ Name of the cluster. Based on the [Cluster name setting](https://www.elastic.co/guide/en/elasticsearch/reference/current/important-settings.html#cluster-name).
+ - name: index
+ description: Name of the index.
+ metrics:
+ - name: elasticsearch.node_index_health
+ description: Index Health
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: yellow
+ - name: red
+ - name: elasticsearch.node_index_shards_count
+ description: Index Shards Count
+ unit: shards
+ chart_type: line
+ dimensions:
+ - name: shards
+ - name: elasticsearch.node_index_docs_count
+ description: Index Docs Count
+ unit: docs
+ chart_type: line
+ dimensions:
+ - name: docs
+ - name: elasticsearch.node_index_store_size
+ description: Index Store Size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: store_size
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-opensearch
+ monitored_instance:
+ name: OpenSearch
+ link: https://opensearch.org/
+ icon_filename: opensearch.svg
+ categories:
+ - data-collection.search-engines
diff --git a/src/go/plugin/go.d/modules/elasticsearch/metrics.go b/src/go/plugin/go.d/modules/elasticsearch/metrics.go
new file mode 100644
index 000000000..e838dc643
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/metrics.go
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package elasticsearch
+
+// https://www.elastic.co/guide/en/elasticsearch/reference/current/docker.html
+
+type esMetrics struct {
+ // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-nodes-stats.html
+ NodesStats *esNodesStats
+ // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-health.html
+ ClusterHealth *esClusterHealth
+ // https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-stats.html
+ ClusterStats *esClusterStats
+ // https://www.elastic.co/guide/en/elasticsearch/reference/current/cat-indices.html
+ LocalIndicesStats []esIndexStats
+}
+
+func (m esMetrics) empty() bool {
+ switch {
+ case m.hasNodesStats(), m.hasClusterHealth(), m.hasClusterStats(), m.hasLocalIndicesStats():
+ return false
+ }
+ return true
+}
+
+func (m esMetrics) hasNodesStats() bool { return m.NodesStats != nil && len(m.NodesStats.Nodes) > 0 }
+func (m esMetrics) hasClusterHealth() bool { return m.ClusterHealth != nil }
+func (m esMetrics) hasClusterStats() bool { return m.ClusterStats != nil }
+func (m esMetrics) hasLocalIndicesStats() bool { return len(m.LocalIndicesStats) > 0 }
+
+type (
+ esNodesStats struct {
+ ClusterName string `json:"cluster_name"`
+ Nodes map[string]*esNodeStats `json:"nodes"`
+ }
+ esNodeStats struct {
+ Name string
+ Host string
+ Indices struct {
+ Indexing struct {
+ IndexTotal float64 `stm:"index_total" json:"index_total"`
+ IndexCurrent float64 `stm:"index_current" json:"index_current"`
+ IndexTimeInMillis float64 `stm:"index_time_in_millis" json:"index_time_in_millis"`
+ } `stm:"indexing"`
+ Search struct {
+ FetchTotal float64 `stm:"fetch_total" json:"fetch_total"`
+ FetchCurrent float64 `stm:"fetch_current" json:"fetch_current"`
+ FetchTimeInMillis float64 `stm:"fetch_time_in_millis" json:"fetch_time_in_millis"`
+ QueryTotal float64 `stm:"query_total" json:"query_total"`
+ QueryCurrent float64 `stm:"query_current" json:"query_current"`
+ QueryTimeInMillis float64 `stm:"query_time_in_millis" json:"query_time_in_millis"`
+ } `stm:"search"`
+ Refresh struct {
+ Total float64 `stm:"total"`
+ TimeInMillis float64 `stm:"total_time_in_millis" json:"total_time_in_millis"`
+ } `stm:"refresh"`
+ Flush struct {
+ Total float64 `stm:"total"`
+ TimeInMillis float64 `stm:"total_time_in_millis" json:"total_time_in_millis"`
+ } `stm:"flush"`
+ FieldData struct {
+ MemorySizeInBytes float64 `stm:"memory_size_in_bytes" json:"memory_size_in_bytes"`
+ Evictions float64 `stm:"evictions"`
+ } `stm:"fielddata"`
+ Segments struct {
+ Count float64 `stm:"count" json:"count"`
+ MemoryInBytes float64 `stm:"memory_in_bytes" json:"memory_in_bytes"`
+ TermsMemoryInBytes float64 `stm:"terms_memory_in_bytes" json:"terms_memory_in_bytes"`
+ StoredFieldsMemoryInBytes float64 `stm:"stored_fields_memory_in_bytes" json:"stored_fields_memory_in_bytes"`
+ TermVectorsMemoryInBytes float64 `stm:"term_vectors_memory_in_bytes" json:"term_vectors_memory_in_bytes"`
+ NormsMemoryInBytes float64 `stm:"norms_memory_in_bytes" json:"norms_memory_in_bytes"`
+ PointsMemoryInBytes float64 `stm:"points_memory_in_bytes" json:"points_memory_in_bytes"`
+ DocValuesMemoryInBytes float64 `stm:"doc_values_memory_in_bytes" json:"doc_values_memory_in_bytes"`
+ IndexWriterMemoryInBytes float64 `stm:"index_writer_memory_in_bytes" json:"index_writer_memory_in_bytes"`
+ VersionMapMemoryInBytes float64 `stm:"version_map_memory_in_bytes" json:"version_map_memory_in_bytes"`
+ FixedBitSetMemoryInBytes float64 `stm:"fixed_bit_set_memory_in_bytes" json:"fixed_bit_set_memory_in_bytes"`
+ } `stm:"segments"`
+ Translog struct {
+ Operations float64 `stm:"operations"`
+ SizeInBytes float64 `stm:"size_in_bytes" json:"size_in_bytes"`
+ UncommittedOperations float64 `stm:"uncommitted_operations" json:"uncommitted_operations"`
+ UncommittedSizeInBytes float64 `stm:"uncommitted_size_in_bytes" json:"uncommitted_size_in_bytes"`
+ } `stm:"translog"`
+ } `stm:"indices"`
+ Process struct {
+ OpenFileDescriptors float64 `stm:"open_file_descriptors" json:"open_file_descriptors"`
+ MaxFileDescriptors float64 `stm:"max_file_descriptors" json:"max_file_descriptors"`
+ } `stm:"process"`
+ JVM struct {
+ Mem struct {
+ HeapUsedPercent float64 `stm:"heap_used_percent" json:"heap_used_percent"`
+ HeapUsedInBytes float64 `stm:"heap_used_in_bytes" json:"heap_used_in_bytes"`
+ HeapCommittedInBytes float64 `stm:"heap_committed_in_bytes" json:"heap_committed_in_bytes"`
+ } `stm:"mem"`
+ GC struct {
+ Collectors struct {
+ Young struct {
+ CollectionCount float64 `stm:"collection_count" json:"collection_count"`
+ CollectionTimeInMillis float64 `stm:"collection_time_in_millis" json:"collection_time_in_millis"`
+ } `stm:"young"`
+ Old struct {
+ CollectionCount float64 `stm:"collection_count" json:"collection_count"`
+ CollectionTimeInMillis float64 `stm:"collection_time_in_millis" json:"collection_time_in_millis"`
+ } `stm:"old"`
+ } `stm:"collectors"`
+ } `stm:"gc"`
+ BufferPools struct {
+ Mapped struct {
+ Count float64 `stm:"count"`
+ UsedInBytes float64 `stm:"used_in_bytes" json:"used_in_bytes"`
+ TotalCapacityInBytes float64 `stm:"total_capacity_in_bytes" json:"total_capacity_in_bytes"`
+ } `stm:"mapped"`
+ Direct struct {
+ Count float64 `stm:"count"`
+ UsedInBytes float64 `stm:"used_in_bytes" json:"used_in_bytes"`
+ TotalCapacityInBytes float64 `stm:"total_capacity_in_bytes" json:"total_capacity_in_bytes"`
+ } `stm:"direct"`
+ } `stm:"buffer_pools" json:"buffer_pools"`
+ } `stm:"jvm"`
+ // https://www.elastic.co/guide/en/elasticsearch/reference/current/modules-threadpool.html
+ ThreadPool struct {
+ Generic struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"generic"`
+ Search struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"search"`
+ SearchThrottled struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"search_throttled" json:"search_throttled"`
+ Get struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"get"`
+ Analyze struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"analyze"`
+ Write struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"write"`
+ Snapshot struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"snapshot"`
+ Warmer struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"warmer"`
+ Refresh struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"refresh"`
+ Listener struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"listener"`
+ FetchShardStarted struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"fetch_shard_started" json:"fetch_shard_started"`
+ FetchShardStore struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"fetch_shard_store" json:"fetch_shard_store"`
+ Flush struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"flush"`
+ ForceMerge struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"force_merge" json:"force_merge"`
+ Management struct {
+ Queue float64 `stm:"queue"`
+ Rejected float64 `stm:"rejected"`
+ } `stm:"management"`
+ } `stm:"thread_pool" json:"thread_pool"`
+ Transport struct {
+ RxCount float64 `stm:"rx_count" json:"rx_count"`
+ RxSizeInBytes float64 `stm:"rx_size_in_bytes" json:"rx_size_in_bytes"`
+ TxCount float64 `stm:"tx_count" json:"tx_count"`
+ TxSizeInBytes float64 `stm:"tx_size_in_bytes" json:"tx_size_in_bytes"`
+ } `stm:"transport"`
+ HTTP struct {
+ CurrentOpen float64 `stm:"current_open" json:"current_open"`
+ } `stm:"http"`
+ Breakers struct {
+ Request struct {
+ Tripped float64 `stm:"tripped"`
+ } `stm:"request"`
+ FieldData struct {
+ Tripped float64 `stm:"tripped"`
+ } `stm:"fielddata"`
+ InFlightRequests struct {
+ Tripped float64 `stm:"tripped"`
+ } `stm:"in_flight_requests" json:"in_flight_requests"`
+ ModelInference struct {
+ Tripped float64 `stm:"tripped"`
+ } `stm:"model_inference" json:"model_inference"`
+ Accounting struct {
+ Tripped float64 `stm:"tripped"`
+ } `stm:"accounting"`
+ Parent struct {
+ Tripped float64 `stm:"tripped"`
+ } `stm:"parent"`
+ } `stm:"breakers"`
+ }
+)
+
+type esClusterHealth struct {
+ ClusterName string `json:"cluster_name"`
+ Status string
+ NumOfNodes float64 `stm:"number_of_nodes" json:"number_of_nodes"`
+ NumOfDataNodes float64 `stm:"number_of_data_nodes" json:"number_of_data_nodes"`
+ ActivePrimaryShards float64 `stm:"active_primary_shards" json:"active_primary_shards"`
+ ActiveShards float64 `stm:"active_shards" json:"active_shards"`
+ RelocatingShards float64 `stm:"relocating_shards" json:"relocating_shards"`
+ InitializingShards float64 `stm:"initializing_shards" json:"initializing_shards"`
+ UnassignedShards float64 `stm:"unassigned_shards" json:"unassigned_shards"`
+ DelayedUnassignedShards float64 `stm:"delayed_unassigned_shards" json:"delayed_unassigned_shards"`
+ NumOfPendingTasks float64 `stm:"number_of_pending_tasks" json:"number_of_pending_tasks"`
+ NumOfInFlightFetch float64 `stm:"number_of_in_flight_fetch" json:"number_of_in_flight_fetch"`
+ ActiveShardsPercentAsNumber float64 `stm:"active_shards_percent_as_number" json:"active_shards_percent_as_number"`
+}
+
+type esClusterStats struct {
+ ClusterName string `json:"cluster_name"`
+ Nodes struct {
+ Count struct {
+ Total float64 `stm:"total"`
+ CoordinatingOnly float64 `stm:"coordinating_only" json:"coordinating_only"`
+ Data float64 `stm:"data"`
+ DataCold float64 `stm:"data_cold" json:"data_cold"`
+ DataContent float64 `stm:"data_content" json:"data_content"`
+ DataFrozen float64 `stm:"data_frozen" json:"data_frozen"`
+ DataHot float64 `stm:"data_hot" json:"data_hot"`
+ DataWarm float64 `stm:"data_warm" json:"data_warm"`
+ Ingest float64 `stm:"ingest"`
+ Master float64 `stm:"master"`
+ ML float64 `stm:"ml"`
+ RemoteClusterClient float64 `stm:"remote_cluster_client" json:"remote_cluster_client"`
+ Transform float64 `stm:"transform"`
+ VotingOnly float64 `stm:"voting_only" json:"voting_only"`
+ } `stm:"count"`
+ } `stm:"nodes"`
+ Indices struct {
+ Count float64 `stm:"count"`
+ Shards struct {
+ Total float64 `stm:"total"`
+ Primaries float64 `stm:"primaries"`
+ Replication float64 `stm:"replication"`
+ } `stm:"shards"`
+ Docs struct {
+ Count float64 `stm:"count"`
+ } `stm:"docs"`
+ Store struct {
+ SizeInBytes float64 `stm:"size_in_bytes" json:"size_in_bytes"`
+ } `stm:"store"`
+ QueryCache struct {
+ HitCount float64 `stm:"hit_count" json:"hit_count"`
+ MissCount float64 `stm:"miss_count" json:"miss_count"`
+ } `stm:"query_cache" json:"query_cache"`
+ } `stm:"indices"`
+}
+
+type esIndexStats struct {
+ Index string
+ Health string
+ Rep string
+ DocsCount string `json:"docs.count"`
+ StoreSize string `json:"store.size"`
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/testdata/config.json b/src/go/plugin/go.d/modules/elasticsearch/testdata/config.json
new file mode 100644
index 000000000..a456d1d56
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/testdata/config.json
@@ -0,0 +1,25 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "cluster_mode": true,
+ "collect_node_stats": true,
+ "collect_cluster_health": true,
+ "collect_cluster_stats": true,
+ "collect_indices_stats": true
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/testdata/config.yaml b/src/go/plugin/go.d/modules/elasticsearch/testdata/config.yaml
new file mode 100644
index 000000000..af1b4a136
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/testdata/config.yaml
@@ -0,0 +1,22 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+cluster_mode: yes
+collect_node_stats: yes
+collect_cluster_health: yes
+collect_cluster_stats: yes
+collect_indices_stats: yes
diff --git a/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cat_indices_stats.json b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cat_indices_stats.json
new file mode 100644
index 000000000..f46794cc4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cat_indices_stats.json
@@ -0,0 +1,50 @@
+[
+ {
+ "health": "yellow",
+ "status": "open",
+ "index": "my-index-000003",
+ "uuid": "Clrvbw-AQ5CB3xWI3MUXFg",
+ "pri": "1",
+ "rep": "1",
+ "docs.count": "1",
+ "docs.deleted": "1",
+ "store.size": "208b",
+ "pri.store.size": "208b"
+ },
+ {
+ "health": "yellow",
+ "status": "open",
+ "index": "my-index-000002",
+ "uuid": "z7cy4d2PQYSSJDhi8dIjWg",
+ "pri": "1",
+ "rep": "1",
+ "docs.count": "1",
+ "docs.deleted": "1",
+ "store.size": "208b",
+ "pri.store.size": "208b"
+ },
+ {
+ "health": "yellow",
+ "status": "open",
+ "index": "my-index-000001",
+ "uuid": "08YTiZfmQUiO67VOGZOfVg",
+ "pri": "1",
+ "rep": "1",
+ "docs.count": "1",
+ "docs.deleted": "1",
+ "store.size": "208b",
+ "pri.store.size": "208b"
+ },
+ {
+ "health": "yellow",
+ "status": "open",
+ "index": ".my-system-index-000001",
+ "uuid": "08YTiZfmQUiO67VOGZOfVg",
+ "pri": "1",
+ "rep": "1",
+ "docs.count": "1",
+ "docs.deleted": "1",
+ "store.size": "208b",
+ "pri.store.size": "208b"
+ }
+]
diff --git a/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cluster_health.json b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cluster_health.json
new file mode 100644
index 000000000..0fdc0de49
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cluster_health.json
@@ -0,0 +1,17 @@
+{
+ "cluster_name": "36928dce44074ceba64d7b3d698443a7",
+ "status": "green",
+ "timed_out": false,
+ "number_of_nodes": 3,
+ "number_of_data_nodes": 2,
+ "active_primary_shards": 97,
+ "active_shards": 194,
+ "relocating_shards": 0,
+ "initializing_shards": 0,
+ "unassigned_shards": 0,
+ "delayed_unassigned_shards": 0,
+ "number_of_pending_tasks": 0,
+ "number_of_in_flight_fetch": 0,
+ "task_max_waiting_in_queue_millis": 0,
+ "active_shards_percent_as_number": 100
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cluster_stats.json b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cluster_stats.json
new file mode 100644
index 000000000..53bea1b34
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/cluster_stats.json
@@ -0,0 +1,377 @@
+{
+ "_nodes": {
+ "total": 3,
+ "successful": 3,
+ "failed": 0
+ },
+ "cluster_name": "36928dce44074ceba64d7b3d698443a7",
+ "cluster_uuid": "5jO2X31FQ32kJAWoCsp3Vw",
+ "timestamp": 1687866240414,
+ "status": "green",
+ "indices": {
+ "count": 97,
+ "shards": {
+ "total": 194,
+ "primaries": 97,
+ "replication": 1,
+ "index": {
+ "shards": {
+ "min": 2,
+ "max": 2,
+ "avg": 2
+ },
+ "primaries": {
+ "min": 1,
+ "max": 1,
+ "avg": 1
+ },
+ "replication": {
+ "min": 1,
+ "max": 1,
+ "avg": 1
+ }
+ }
+ },
+ "docs": {
+ "count": 402750703,
+ "deleted": 1603
+ },
+ "store": {
+ "size_in_bytes": 380826136962,
+ "total_data_set_size_in_bytes": 380826136962,
+ "reserved_in_bytes": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 600,
+ "evictions": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 37465951,
+ "total_count": 684606952,
+ "hit_count": 96838726,
+ "miss_count": 587768226,
+ "cache_size": 22571,
+ "cache_count": 91319,
+ "evictions": 68748
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 614,
+ "memory_in_bytes": 0,
+ "terms_memory_in_bytes": 0,
+ "stored_fields_memory_in_bytes": 0,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 0,
+ "points_memory_in_bytes": 0,
+ "doc_values_memory_in_bytes": 0,
+ "index_writer_memory_in_bytes": 368167356,
+ "version_map_memory_in_bytes": 54470768,
+ "fixed_bit_set_memory_in_bytes": 57736,
+ "max_unsafe_auto_id_timestamp": 1679747033889,
+ "file_sizes": {}
+ },
+ "mappings": {
+ "total_field_count": 10467,
+ "total_deduplicated_field_count": 2070,
+ "total_deduplicated_mapping_size_in_bytes": 26441,
+ "field_types": [
+ {
+ "name": "alias",
+ "count": 1,
+ "index_count": 1,
+ "script_count": 0
+ },
+ {
+ "name": "boolean",
+ "count": 77,
+ "index_count": 37,
+ "script_count": 0
+ },
+ {
+ "name": "constant_keyword",
+ "count": 10,
+ "index_count": 4,
+ "script_count": 0
+ },
+ {
+ "name": "date",
+ "count": 763,
+ "index_count": 86,
+ "script_count": 0
+ },
+ {
+ "name": "flattened",
+ "count": 27,
+ "index_count": 27,
+ "script_count": 0
+ },
+ {
+ "name": "float",
+ "count": 8,
+ "index_count": 4,
+ "script_count": 0
+ },
+ {
+ "name": "integer",
+ "count": 279,
+ "index_count": 70,
+ "script_count": 0
+ },
+ {
+ "name": "ip",
+ "count": 4,
+ "index_count": 4,
+ "script_count": 0
+ },
+ {
+ "name": "keyword",
+ "count": 4345,
+ "index_count": 86,
+ "script_count": 0
+ },
+ {
+ "name": "long",
+ "count": 1143,
+ "index_count": 79,
+ "script_count": 0
+ },
+ {
+ "name": "match_only_text",
+ "count": 1170,
+ "index_count": 69,
+ "script_count": 0
+ },
+ {
+ "name": "nested",
+ "count": 4,
+ "index_count": 4,
+ "script_count": 0
+ },
+ {
+ "name": "object",
+ "count": 2583,
+ "index_count": 85,
+ "script_count": 0
+ },
+ {
+ "name": "text",
+ "count": 49,
+ "index_count": 17,
+ "script_count": 0
+ },
+ {
+ "name": "version",
+ "count": 4,
+ "index_count": 4,
+ "script_count": 0
+ }
+ ],
+ "runtime_field_types": []
+ },
+ "analysis": {
+ "char_filter_types": [],
+ "tokenizer_types": [],
+ "filter_types": [],
+ "analyzer_types": [],
+ "built_in_char_filters": [],
+ "built_in_tokenizers": [],
+ "built_in_filters": [],
+ "built_in_analyzers": []
+ },
+ "versions": [
+ {
+ "version": "8.4.2",
+ "index_count": 97,
+ "primary_shard_count": 97,
+ "total_primary_bytes": 189671468048
+ }
+ ]
+ },
+ "nodes": {
+ "count": {
+ "total": 3,
+ "coordinating_only": 0,
+ "data": 0,
+ "data_cold": 0,
+ "data_content": 2,
+ "data_frozen": 0,
+ "data_hot": 2,
+ "data_warm": 0,
+ "ingest": 2,
+ "master": 3,
+ "ml": 0,
+ "remote_cluster_client": 2,
+ "transform": 2,
+ "voting_only": 1
+ },
+ "versions": [
+ "8.4.2"
+ ],
+ "os": {
+ "available_processors": 8,
+ "allocated_processors": 8,
+ "names": [
+ {
+ "name": "Linux",
+ "count": 3
+ }
+ ],
+ "pretty_names": [
+ {
+ "pretty_name": "Ubuntu 20.04.5 LTS",
+ "count": 3
+ }
+ ],
+ "architectures": [
+ {
+ "arch": "amd64",
+ "count": 3
+ }
+ ],
+ "mem": {
+ "total_in_bytes": 33285996544,
+ "adjusted_total_in_bytes": 32153534464,
+ "free_in_bytes": 1732333568,
+ "used_in_bytes": 31553662976,
+ "free_percent": 5,
+ "used_percent": 95
+ }
+ },
+ "process": {
+ "cpu": {
+ "percent": 26
+ },
+ "open_file_descriptors": {
+ "min": 557,
+ "max": 1185,
+ "avg": 968
+ }
+ },
+ "jvm": {
+ "max_uptime_in_millis": 23671188288,
+ "versions": [
+ {
+ "version": "18.0.2.1",
+ "vm_name": "OpenJDK 64-Bit Server VM",
+ "vm_version": "18.0.2.1+1-1",
+ "vm_vendor": "Oracle Corporation",
+ "bundled_jdk": true,
+ "using_bundled_jdk": true,
+ "count": 3
+ }
+ ],
+ "mem": {
+ "heap_used_in_bytes": 8044798544,
+ "heap_max_in_bytes": 16009658368
+ },
+ "threads": 272
+ },
+ "fs": {
+ "total_in_bytes": 979252543488,
+ "free_in_bytes": 595738775552,
+ "available_in_bytes": 595738775552
+ },
+ "plugins": [],
+ "network_types": {
+ "transport_types": {
+ "security4": 3
+ },
+ "http_types": {
+ "security4": 3
+ }
+ },
+ "discovery_types": {
+ "multi-node": 3
+ },
+ "packaging_types": [
+ {
+ "flavor": "default",
+ "type": "docker",
+ "count": 3
+ }
+ ],
+ "ingest": {
+ "number_of_pipelines": 20,
+ "processor_stats": {
+ "conditional": {
+ "count": 0,
+ "failed": 0,
+ "current": 0,
+ "time_in_millis": 0
+ },
+ "date": {
+ "count": 0,
+ "failed": 0,
+ "current": 0,
+ "time_in_millis": 0
+ },
+ "geoip": {
+ "count": 0,
+ "failed": 0,
+ "current": 0,
+ "time_in_millis": 0
+ },
+ "pipeline": {
+ "count": 0,
+ "failed": 0,
+ "current": 0,
+ "time_in_millis": 0
+ },
+ "remove": {
+ "count": 0,
+ "failed": 0,
+ "current": 0,
+ "time_in_millis": 0
+ },
+ "rename": {
+ "count": 0,
+ "failed": 0,
+ "current": 0,
+ "time_in_millis": 0
+ },
+ "script": {
+ "count": 0,
+ "failed": 0,
+ "current": 0,
+ "time_in_millis": 0
+ },
+ "set": {
+ "count": 0,
+ "failed": 0,
+ "current": 0,
+ "time_in_millis": 0
+ },
+ "set_security_user": {
+ "count": 0,
+ "failed": 0,
+ "current": 0,
+ "time_in_millis": 0
+ }
+ }
+ },
+ "indexing_pressure": {
+ "memory": {
+ "current": {
+ "combined_coordinating_and_primary_in_bytes": 0,
+ "coordinating_in_bytes": 0,
+ "primary_in_bytes": 0,
+ "replica_in_bytes": 0,
+ "all_in_bytes": 0
+ },
+ "total": {
+ "combined_coordinating_and_primary_in_bytes": 0,
+ "coordinating_in_bytes": 0,
+ "primary_in_bytes": 0,
+ "replica_in_bytes": 0,
+ "all_in_bytes": 0,
+ "coordinating_rejections": 0,
+ "primary_rejections": 0,
+ "replica_rejections": 0
+ },
+ "limit_in_bytes": 0
+ }
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/info.json b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/info.json
new file mode 100644
index 000000000..23e3f1596
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/info.json
@@ -0,0 +1,17 @@
+{
+ "name": "instance-0000000006",
+ "cluster_name": "36928dce44074ceba64d7b3d698443a7",
+ "cluster_uuid": "5jO2X31FQ32kJAWoCsp3Vw",
+ "version": {
+ "number": "8.4.2",
+ "build_flavor": "default",
+ "build_type": "docker",
+ "build_hash": "89f8c6d8429db93b816403ee75e5c270b43a940a",
+ "build_date": "2022-09-14T16:26:04.382547801Z",
+ "build_snapshot": false,
+ "lucene_version": "9.3.0",
+ "minimum_wire_compatibility_version": "7.17.0",
+ "minimum_index_compatibility_version": "7.0.0"
+ },
+ "tagline": "You Know, for Search"
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/nodes_local_stats.json b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/nodes_local_stats.json
new file mode 100644
index 000000000..77e0ad0ba
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/nodes_local_stats.json
@@ -0,0 +1,867 @@
+{
+ "_nodes": {
+ "total": 1,
+ "successful": 1,
+ "failed": 0
+ },
+ "cluster_name": "36928dce44074ceba64d7b3d698443a7",
+ "nodes": {
+ "Klg1CjgMTouentQcJlRGuA": {
+ "timestamp": 1687867033043,
+ "name": "instance-0000000006",
+ "transport_address": "172.25.238.204:19349",
+ "host": "172.25.238.204",
+ "ip": "172.25.238.204:19349",
+ "roles": [
+ "data_content",
+ "data_hot",
+ "ingest",
+ "master",
+ "remote_cluster_client",
+ "transform"
+ ],
+ "attributes": {
+ "xpack.installed": "true",
+ "logical_availability_zone": "zone-0",
+ "availability_zone": "us-east-1a",
+ "region": "us-east-1",
+ "instance_configuration": "aws.es.datahot.i3",
+ "server_name": "instance-0000000006.36928dce44074ceba64d7b3d698443a7",
+ "data": "hot"
+ },
+ "indices": {
+ "docs": {
+ "count": 403212527,
+ "deleted": 2287
+ },
+ "shard_stats": {
+ "total_count": 97
+ },
+ "store": {
+ "size_in_bytes": 189816312947,
+ "total_data_set_size_in_bytes": 189816312947,
+ "reserved_in_bytes": 0
+ },
+ "indexing": {
+ "index_total": 3667793202,
+ "index_time_in_millis": 1100149051,
+ "index_current": 1,
+ "index_failed": 149288,
+ "delete_total": 13333,
+ "delete_time_in_millis": 1883,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 7502889,
+ "time_in_millis": 747395,
+ "exists_total": 7411696,
+ "exists_time_in_millis": 741794,
+ "missing_total": 91193,
+ "missing_time_in_millis": 5601,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 166823028,
+ "query_time_in_millis": 51265805,
+ "query_current": 0,
+ "fetch_total": 42645288,
+ "fetch_time_in_millis": 21316820,
+ "fetch_current": 0,
+ "scroll_total": 13037388,
+ "scroll_time_in_millis": 138762688,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 912669,
+ "total_time_in_millis": 1022950085,
+ "total_docs": 12230404828,
+ "total_size_in_bytes": 5503526044088,
+ "total_stopped_time_in_millis": 3959107,
+ "total_throttled_time_in_millis": 747116999,
+ "total_auto_throttle_in_bytes": 3674596384
+ },
+ "refresh": {
+ "total": 7721472,
+ "total_time_in_millis": 94304142,
+ "external_total": 7659770,
+ "external_total_time_in_millis": 100804787,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 35134,
+ "periodic": 34985,
+ "total_time_in_millis": 22213090
+ },
+ "warmer": {
+ "current": 0,
+ "total": 6096195,
+ "total_time_in_millis": 1439617
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 18034237,
+ "total_count": 274407233,
+ "hit_count": 45114414,
+ "miss_count": 229292819,
+ "cache_size": 11302,
+ "cache_count": 46210,
+ "evictions": 34908
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 600,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 307,
+ "memory_in_bytes": 0,
+ "terms_memory_in_bytes": 0,
+ "stored_fields_memory_in_bytes": 0,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 0,
+ "points_memory_in_bytes": 0,
+ "doc_values_memory_in_bytes": 0,
+ "index_writer_memory_in_bytes": 240481008,
+ "version_map_memory_in_bytes": 44339216,
+ "fixed_bit_set_memory_in_bytes": 2008,
+ "max_unsafe_auto_id_timestamp": 1679747033889,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 362831,
+ "size_in_bytes": 453491882,
+ "uncommitted_operations": 362831,
+ "uncommitted_size_in_bytes": 453491882,
+ "earliest_last_modified_age": 8
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 6779720,
+ "evictions": 0,
+ "hit_count": 10885151,
+ "miss_count": 8798
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 5718894
+ },
+ "bulk": {
+ "total_operations": 465694640,
+ "total_time_in_millis": 1118684280,
+ "total_size_in_bytes": 3998536502390,
+ "avg_time_in_millis": 0,
+ "avg_size_in_bytes": 8526
+ }
+ },
+ "os": {
+ "timestamp": 1687867033054,
+ "cpu": {
+ "percent": 11,
+ "load_average": {
+ "1m": 1.24,
+ "5m": 2.15,
+ "15m": 2.39
+ }
+ },
+ "mem": {
+ "total_in_bytes": 16106127360,
+ "adjusted_total_in_bytes": 15728640000,
+ "free_in_bytes": 517578752,
+ "used_in_bytes": 15588548608,
+ "free_percent": 3,
+ "used_percent": 97
+ },
+ "swap": {
+ "total_in_bytes": 0,
+ "free_in_bytes": 0,
+ "used_in_bytes": 0
+ },
+ "cgroup": {
+ "cpuacct": {
+ "control_group": "/",
+ "usage_nanos": 2633246338856561
+ },
+ "cpu": {
+ "control_group": "/",
+ "cfs_period_micros": 100000,
+ "cfs_quota_micros": 206897,
+ "stat": {
+ "number_of_elapsed_periods": 110099433,
+ "number_of_times_throttled": 389045,
+ "time_throttled_nanos": 34502349002867
+ }
+ },
+ "memory": {
+ "control_group": "/",
+ "limit_in_bytes": "16106127360",
+ "usage_in_bytes": "15588548608"
+ }
+ }
+ },
+ "process": {
+ "timestamp": 1687867033054,
+ "open_file_descriptors": 1149,
+ "max_file_descriptors": 1048576,
+ "cpu": {
+ "percent": 11,
+ "total_in_millis": 2576219400
+ },
+ "mem": {
+ "total_virtual_in_bytes": 117744459776
+ }
+ },
+ "jvm": {
+ "timestamp": 1687867033055,
+ "uptime_in_millis": 11286453256,
+ "mem": {
+ "heap_used_in_bytes": 4337402488,
+ "heap_used_percent": 55,
+ "heap_committed_in_bytes": 7864320000,
+ "heap_max_in_bytes": 7864320000,
+ "non_heap_used_in_bytes": 343633376,
+ "non_heap_committed_in_bytes": 350355456,
+ "pools": {
+ "young": {
+ "used_in_bytes": 2654994432,
+ "max_in_bytes": 0,
+ "peak_used_in_bytes": 4718592000,
+ "peak_max_in_bytes": 0
+ },
+ "old": {
+ "used_in_bytes": 1413394432,
+ "max_in_bytes": 7864320000,
+ "peak_used_in_bytes": 2444862976,
+ "peak_max_in_bytes": 7864320000
+ },
+ "survivor": {
+ "used_in_bytes": 269013624,
+ "max_in_bytes": 0,
+ "peak_used_in_bytes": 591396864,
+ "peak_max_in_bytes": 0
+ }
+ }
+ },
+ "threads": {
+ "count": 112,
+ "peak_count": 117
+ },
+ "gc": {
+ "collectors": {
+ "young": {
+ "collection_count": 78661,
+ "collection_time_in_millis": 6014901
+ },
+ "old": {
+ "collection_count": 0,
+ "collection_time_in_millis": 0
+ }
+ }
+ },
+ "buffer_pools": {
+ "mapped": {
+ "count": 844,
+ "used_in_bytes": 103411995802,
+ "total_capacity_in_bytes": 103411995802
+ },
+ "direct": {
+ "count": 94,
+ "used_in_bytes": 4654850,
+ "total_capacity_in_bytes": 4654848
+ },
+ "mapped - 'non-volatile memory'": {
+ "count": 0,
+ "used_in_bytes": 0,
+ "total_capacity_in_bytes": 0
+ }
+ },
+ "classes": {
+ "current_loaded_count": 36006,
+ "total_loaded_count": 37829,
+ "total_unloaded_count": 1823
+ }
+ },
+ "thread_pool": {
+ "analyze": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "auto_complete": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "azure_event_loop": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ccr": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "cluster_coordination": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 1,
+ "completed": 1130226
+ },
+ "fetch_shard_started": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "fetch_shard_store": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 6,
+ "completed": 38
+ },
+ "flush": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 89892
+ },
+ "force_merge": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 1,
+ "completed": 143
+ },
+ "generic": {
+ "threads": 46,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 46,
+ "completed": 89722038
+ },
+ "get": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "management": {
+ "threads": 3,
+ "queue": 0,
+ "active": 1,
+ "rejected": 0,
+ "largest": 3,
+ "completed": 416796779
+ },
+ "ml_datafeed": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_job_comms": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_native_inference_comms": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_utility": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 22545252
+ },
+ "refresh": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 885152069
+ },
+ "repository_azure": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "rollup_indexing": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "search": {
+ "threads": 5,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 5,
+ "completed": 167558865
+ },
+ "search_coordination": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 14101096
+ },
+ "search_throttled": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "searchable_snapshots_cache_fetch_async": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "searchable_snapshots_cache_prewarming": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "security-crypto": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "security-token-key": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "snapshot": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 806551
+ },
+ "snapshot_meta": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "system_critical_read": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 2350943
+ },
+ "system_critical_write": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 7637
+ },
+ "system_read": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 31143771
+ },
+ "system_write": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 7401359
+ },
+ "vector_tile_generation": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "warmer": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 36139188
+ },
+ "watcher": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "write": {
+ "threads": 3,
+ "queue": 0,
+ "active": 2,
+ "rejected": 0,
+ "largest": 3,
+ "completed": 575385289
+ }
+ },
+ "fs": {
+ "timestamp": 1687867033056,
+ "total": {
+ "total_in_bytes": 483183820800,
+ "free_in_bytes": 292670836736,
+ "available_in_bytes": 292670836736
+ },
+ "data": [
+ {
+ "path": "/app/data",
+ "mount": "/app (/dev/mapper/lxc-data)",
+ "type": "xfs",
+ "total_in_bytes": 483183820800,
+ "free_in_bytes": 292670836736,
+ "available_in_bytes": 292670836736
+ }
+ ],
+ "io_stats": {
+ "devices": [
+ {
+ "device_name": "dm-1",
+ "operations": 6160920260,
+ "read_operations": 376565165,
+ "write_operations": 5784355095,
+ "read_kilobytes": 31265075012,
+ "write_kilobytes": 100985041837,
+ "io_time_in_millis": 184335640
+ }
+ ],
+ "total": {
+ "operations": 6160920260,
+ "read_operations": 376565165,
+ "write_operations": 5784355095,
+ "read_kilobytes": 31265075012,
+ "write_kilobytes": 100985041837,
+ "io_time_in_millis": 184335640
+ }
+ }
+ },
+ "transport": {
+ "server_open": 24,
+ "total_outbound_connections": 11,
+ "rx_count": 1300468666,
+ "rx_size_in_bytes": 1789647854011,
+ "tx_count": 1300468665,
+ "tx_size_in_bytes": 2927853534431,
+ "inbound_handling_time_histogram": [
+ {
+ "lt_millis": 1,
+ "count": 1256244956
+ },
+ {
+ "ge_millis": 1,
+ "lt_millis": 2,
+ "count": 202091898
+ },
+ {
+ "ge_millis": 2,
+ "lt_millis": 4,
+ "count": 3242593
+ },
+ {
+ "ge_millis": 4,
+ "lt_millis": 8,
+ "count": 454964
+ },
+ {
+ "ge_millis": 8,
+ "lt_millis": 16,
+ "count": 173349
+ },
+ {
+ "ge_millis": 16,
+ "lt_millis": 32,
+ "count": 39048
+ },
+ {
+ "ge_millis": 32,
+ "lt_millis": 64,
+ "count": 14155
+ },
+ {
+ "ge_millis": 64,
+ "lt_millis": 128,
+ "count": 75267
+ },
+ {
+ "ge_millis": 128,
+ "lt_millis": 256,
+ "count": 1534
+ },
+ {
+ "ge_millis": 256,
+ "lt_millis": 512,
+ "count": 76
+ },
+ {
+ "ge_millis": 512,
+ "lt_millis": 1024,
+ "count": 3
+ },
+ {
+ "ge_millis": 1024,
+ "lt_millis": 2048,
+ "count": 0
+ },
+ {
+ "ge_millis": 2048,
+ "lt_millis": 4096,
+ "count": 0
+ },
+ {
+ "ge_millis": 4096,
+ "lt_millis": 8192,
+ "count": 0
+ },
+ {
+ "ge_millis": 8192,
+ "lt_millis": 16384,
+ "count": 0
+ },
+ {
+ "ge_millis": 16384,
+ "lt_millis": 32768,
+ "count": 0
+ },
+ {
+ "ge_millis": 32768,
+ "lt_millis": 65536,
+ "count": 0
+ },
+ {
+ "ge_millis": 65536,
+ "count": 0
+ }
+ ],
+ "outbound_handling_time_histogram": [
+ {
+ "lt_millis": 1,
+ "count": 1128511214
+ },
+ {
+ "ge_millis": 1,
+ "lt_millis": 2,
+ "count": 161858180
+ },
+ {
+ "ge_millis": 2,
+ "lt_millis": 4,
+ "count": 6819172
+ },
+ {
+ "ge_millis": 4,
+ "lt_millis": 8,
+ "count": 2563797
+ },
+ {
+ "ge_millis": 8,
+ "lt_millis": 16,
+ "count": 445824
+ },
+ {
+ "ge_millis": 16,
+ "lt_millis": 32,
+ "count": 122462
+ },
+ {
+ "ge_millis": 32,
+ "lt_millis": 64,
+ "count": 95822
+ },
+ {
+ "ge_millis": 64,
+ "lt_millis": 128,
+ "count": 49986
+ },
+ {
+ "ge_millis": 128,
+ "lt_millis": 256,
+ "count": 1931
+ },
+ {
+ "ge_millis": 256,
+ "lt_millis": 512,
+ "count": 250
+ },
+ {
+ "ge_millis": 512,
+ "lt_millis": 1024,
+ "count": 27
+ },
+ {
+ "ge_millis": 1024,
+ "lt_millis": 2048,
+ "count": 0
+ },
+ {
+ "ge_millis": 2048,
+ "lt_millis": 4096,
+ "count": 0
+ },
+ {
+ "ge_millis": 4096,
+ "lt_millis": 8192,
+ "count": 0
+ },
+ {
+ "ge_millis": 8192,
+ "lt_millis": 16384,
+ "count": 0
+ },
+ {
+ "ge_millis": 16384,
+ "lt_millis": 32768,
+ "count": 0
+ },
+ {
+ "ge_millis": 32768,
+ "lt_millis": 65536,
+ "count": 0
+ },
+ {
+ "ge_millis": 65536,
+ "count": 0
+ }
+ ]
+ },
+ "http": {
+ "current_open": 73,
+ "total_opened": 779388
+ },
+ "breakers": {
+ "fielddata": {
+ "limit_size_in_bytes": 3145728000,
+ "limit_size": "2.9gb",
+ "estimated_size_in_bytes": 600,
+ "estimated_size": "600b",
+ "overhead": 1.03,
+ "tripped": 0
+ },
+ "request": {
+ "limit_size_in_bytes": 4718592000,
+ "limit_size": "4.3gb",
+ "estimated_size_in_bytes": 16440,
+ "estimated_size": "16kb",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "inflight_requests": {
+ "limit_size_in_bytes": 7864320000,
+ "limit_size": "7.3gb",
+ "estimated_size_in_bytes": 56628,
+ "estimated_size": "55.3kb",
+ "overhead": 2,
+ "tripped": 0
+ },
+ "model_inference": {
+ "limit_size_in_bytes": 3932160000,
+ "limit_size": "3.6gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "eql_sequence": {
+ "limit_size_in_bytes": 3932160000,
+ "limit_size": "3.6gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "parent": {
+ "limit_size_in_bytes": 7471104000,
+ "limit_size": "6.9gb",
+ "estimated_size_in_bytes": 4341596792,
+ "estimated_size": "4gb",
+ "overhead": 1,
+ "tripped": 0
+ }
+ }
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/nodes_stats.json b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/nodes_stats.json
new file mode 100644
index 000000000..6e6b21b91
--- /dev/null
+++ b/src/go/plugin/go.d/modules/elasticsearch/testdata/v8.4.2/nodes_stats.json
@@ -0,0 +1,2577 @@
+{
+ "_nodes": {
+ "total": 3,
+ "successful": 3,
+ "failed": 0
+ },
+ "cluster_name": "36928dce44074ceba64d7b3d698443a7",
+ "nodes": {
+ "tk_U7GMCRkCG4FoOvusrng": {
+ "timestamp": 1687866153482,
+ "name": "instance-0000000005",
+ "transport_address": "172.22.146.77:19280",
+ "host": "172.22.146.77",
+ "ip": "172.22.146.77:19280",
+ "roles": [
+ "data_content",
+ "data_hot",
+ "ingest",
+ "master",
+ "remote_cluster_client",
+ "transform"
+ ],
+ "attributes": {
+ "instance_configuration": "aws.es.datahot.i3",
+ "server_name": "instance-0000000005.36928dce44074ceba64d7b3d698443a7",
+ "data": "hot",
+ "xpack.installed": "true",
+ "logical_availability_zone": "zone-1",
+ "availability_zone": "us-east-1e",
+ "region": "us-east-1"
+ },
+ "indices": {
+ "docs": {
+ "count": 403028528,
+ "deleted": 430916
+ },
+ "shard_stats": {
+ "total_count": 97
+ },
+ "store": {
+ "size_in_bytes": 190773977702,
+ "total_data_set_size_in_bytes": 190773977702,
+ "reserved_in_bytes": 0
+ },
+ "indexing": {
+ "index_total": 6550378755,
+ "index_time_in_millis": 1244633519,
+ "index_current": 0,
+ "index_failed": 3425,
+ "delete_total": 422502,
+ "delete_time_in_millis": 12139,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 1673415,
+ "time_in_millis": 176085,
+ "exists_total": 1505245,
+ "exists_time_in_millis": 164637,
+ "missing_total": 168170,
+ "missing_time_in_millis": 11448,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 157912598,
+ "query_time_in_millis": 158980385,
+ "query_current": 0,
+ "fetch_total": 25105951,
+ "fetch_time_in_millis": 24517851,
+ "fetch_current": 0,
+ "scroll_total": 4428540,
+ "scroll_time_in_millis": 153962443,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 1,
+ "current_docs": 1768114,
+ "current_size_in_bytes": 954513675,
+ "total": 1494757,
+ "total_time_in_millis": 1621446531,
+ "total_docs": 21027016560,
+ "total_size_in_bytes": 8884898196658,
+ "total_stopped_time_in_millis": 4962617,
+ "total_throttled_time_in_millis": 1169888193,
+ "total_auto_throttle_in_bytes": 4651560300
+ },
+ "refresh": {
+ "total": 12359783,
+ "total_time_in_millis": 300152615,
+ "external_total": 12278845,
+ "external_total_time_in_millis": 311222562,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 67895,
+ "periodic": 67579,
+ "total_time_in_millis": 81917283
+ },
+ "warmer": {
+ "current": 0,
+ "total": 6153265,
+ "total_time_in_millis": 1348469
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 19433507,
+ "total_count": 410202459,
+ "hit_count": 51724734,
+ "miss_count": 358477725,
+ "cache_size": 11311,
+ "cache_count": 45151,
+ "evictions": 33840
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 291,
+ "memory_in_bytes": 0,
+ "terms_memory_in_bytes": 0,
+ "stored_fields_memory_in_bytes": 0,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 0,
+ "points_memory_in_bytes": 0,
+ "doc_values_memory_in_bytes": 0,
+ "index_writer_memory_in_bytes": 57432664,
+ "version_map_memory_in_bytes": 568,
+ "fixed_bit_set_memory_in_bytes": 55672,
+ "max_unsafe_auto_id_timestamp": 1676581446329,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 1449698,
+ "size_in_bytes": 1214204014,
+ "uncommitted_operations": 1449698,
+ "uncommitted_size_in_bytes": 1214204014,
+ "earliest_last_modified_age": 14453
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 6178272,
+ "evictions": 0,
+ "hit_count": 7403041,
+ "miss_count": 10622
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 48470343
+ },
+ "bulk": {
+ "total_operations": 783008788,
+ "total_time_in_millis": 1265052645,
+ "total_size_in_bytes": 6949964886117,
+ "avg_time_in_millis": 0,
+ "avg_size_in_bytes": 8635
+ }
+ },
+ "os": {
+ "timestamp": 1687866153489,
+ "cpu": {
+ "percent": 9,
+ "load_average": {
+ "1m": 0.83,
+ "5m": 1.1,
+ "15m": 1.3
+ }
+ },
+ "mem": {
+ "total_in_bytes": 16106127360,
+ "adjusted_total_in_bytes": 15728640000,
+ "free_in_bytes": 1425637376,
+ "used_in_bytes": 14680489984,
+ "free_percent": 9,
+ "used_percent": 91
+ },
+ "swap": {
+ "total_in_bytes": 0,
+ "free_in_bytes": 0,
+ "used_in_bytes": 0
+ },
+ "cgroup": {
+ "cpuacct": {
+ "control_group": "/",
+ "usage_nanos": 4328157929052960
+ },
+ "cpu": {
+ "control_group": "/",
+ "cfs_period_micros": 100000,
+ "cfs_quota_micros": 206897,
+ "stat": {
+ "number_of_elapsed_periods": 198258313,
+ "number_of_times_throttled": 619367,
+ "time_throttled_nanos": 45229163024496
+ }
+ },
+ "memory": {
+ "control_group": "/",
+ "limit_in_bytes": "16106127360",
+ "usage_in_bytes": "14680489984"
+ }
+ }
+ },
+ "process": {
+ "timestamp": 1687866153489,
+ "open_file_descriptors": 1180,
+ "max_file_descriptors": 1048576,
+ "cpu": {
+ "percent": 9,
+ "total_in_millis": 3994216500
+ },
+ "mem": {
+ "total_virtual_in_bytes": 114185707520
+ }
+ },
+ "jvm": {
+ "timestamp": 1687866153490,
+ "uptime_in_millis": 20231050756,
+ "mem": {
+ "heap_used_in_bytes": 1884124192,
+ "heap_used_percent": 23,
+ "heap_committed_in_bytes": 7864320000,
+ "heap_max_in_bytes": 7864320000,
+ "non_heap_used_in_bytes": 376433344,
+ "non_heap_committed_in_bytes": 385548288,
+ "pools": {
+ "young": {
+ "used_in_bytes": 385875968,
+ "max_in_bytes": 0,
+ "peak_used_in_bytes": 4714397696,
+ "peak_max_in_bytes": 0
+ },
+ "old": {
+ "used_in_bytes": 1399682080,
+ "max_in_bytes": 7864320000,
+ "peak_used_in_bytes": 7851651072,
+ "peak_max_in_bytes": 7864320000
+ },
+ "survivor": {
+ "used_in_bytes": 98566144,
+ "max_in_bytes": 0,
+ "peak_used_in_bytes": 591396864,
+ "peak_max_in_bytes": 0
+ }
+ }
+ },
+ "threads": {
+ "count": 115,
+ "peak_count": 126
+ },
+ "gc": {
+ "collectors": {
+ "young": {
+ "collection_count": 139959,
+ "collection_time_in_millis": 3581668
+ },
+ "old": {
+ "collection_count": 1,
+ "collection_time_in_millis": 796
+ }
+ }
+ },
+ "buffer_pools": {
+ "mapped": {
+ "count": 831,
+ "used_in_bytes": 99844219805,
+ "total_capacity_in_bytes": 99844219805
+ },
+ "direct": {
+ "count": 90,
+ "used_in_bytes": 4571713,
+ "total_capacity_in_bytes": 4571711
+ },
+ "mapped - 'non-volatile memory'": {
+ "count": 0,
+ "used_in_bytes": 0,
+ "total_capacity_in_bytes": 0
+ }
+ },
+ "classes": {
+ "current_loaded_count": 38122,
+ "total_loaded_count": 40402,
+ "total_unloaded_count": 2280
+ }
+ },
+ "thread_pool": {
+ "analyze": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "auto_complete": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 1,
+ "completed": 1
+ },
+ "azure_event_loop": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ccr": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "cluster_coordination": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 1,
+ "completed": 4427981
+ },
+ "fetch_shard_started": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "fetch_shard_store": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 6,
+ "completed": 72
+ },
+ "flush": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 166429
+ },
+ "force_merge": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 1,
+ "completed": 205
+ },
+ "generic": {
+ "threads": 40,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 40,
+ "completed": 171078109
+ },
+ "get": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "management": {
+ "threads": 3,
+ "queue": 0,
+ "active": 1,
+ "rejected": 0,
+ "largest": 3,
+ "completed": 761997145
+ },
+ "ml_datafeed": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_job_comms": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_native_inference_comms": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_utility": {
+ "threads": 3,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 3,
+ "completed": 40979576
+ },
+ "refresh": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 1224783637
+ },
+ "repository_azure": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "rollup_indexing": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "search": {
+ "threads": 5,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 5,
+ "completed": 191798560
+ },
+ "search_coordination": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 18868632
+ },
+ "search_throttled": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "searchable_snapshots_cache_fetch_async": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "searchable_snapshots_cache_prewarming": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "security-crypto": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "security-token-key": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "snapshot": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 1757953
+ },
+ "snapshot_meta": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 9,
+ "completed": 700327
+ },
+ "system_critical_read": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 11110320
+ },
+ "system_critical_write": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 14932
+ },
+ "system_read": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 39897928
+ },
+ "system_write": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 13382379
+ },
+ "vector_tile_generation": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "warmer": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 85786496
+ },
+ "watcher": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "write": {
+ "threads": 3,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 3,
+ "completed": 980512922
+ }
+ },
+ "fs": {
+ "timestamp": 1687866153490,
+ "total": {
+ "total_in_bytes": 483183820800,
+ "free_in_bytes": 290682736640,
+ "available_in_bytes": 290682736640
+ },
+ "data": [
+ {
+ "path": "/app/data",
+ "mount": "/app (/dev/mapper/lxc-data)",
+ "type": "xfs",
+ "total_in_bytes": 483183820800,
+ "free_in_bytes": 290682736640,
+ "available_in_bytes": 290682736640
+ }
+ ],
+ "io_stats": {
+ "devices": [
+ {
+ "device_name": "dm-1",
+ "operations": 5478832410,
+ "read_operations": 89263106,
+ "write_operations": 5389569304,
+ "read_kilobytes": 9500415196,
+ "write_kilobytes": 67144441274,
+ "io_time_in_millis": 271723584
+ }
+ ],
+ "total": {
+ "operations": 5478832410,
+ "read_operations": 89263106,
+ "write_operations": 5389569304,
+ "read_kilobytes": 9500415196,
+ "write_kilobytes": 67144441274,
+ "io_time_in_millis": 271723584
+ }
+ }
+ },
+ "transport": {
+ "server_open": 24,
+ "total_outbound_connections": 9,
+ "rx_count": 2167879292,
+ "rx_size_in_bytes": 4905919297323,
+ "tx_count": 2167879293,
+ "tx_size_in_bytes": 2964638852652,
+ "inbound_handling_time_histogram": [
+ {
+ "lt_millis": 1,
+ "count": 2149806152
+ },
+ {
+ "ge_millis": 1,
+ "lt_millis": 2,
+ "count": 350125308
+ },
+ {
+ "ge_millis": 2,
+ "lt_millis": 4,
+ "count": 6237311
+ },
+ {
+ "ge_millis": 4,
+ "lt_millis": 8,
+ "count": 3462010
+ },
+ {
+ "ge_millis": 8,
+ "lt_millis": 16,
+ "count": 1695688
+ },
+ {
+ "ge_millis": 16,
+ "lt_millis": 32,
+ "count": 446932
+ },
+ {
+ "ge_millis": 32,
+ "lt_millis": 64,
+ "count": 34053
+ },
+ {
+ "ge_millis": 64,
+ "lt_millis": 128,
+ "count": 124821
+ },
+ {
+ "ge_millis": 128,
+ "lt_millis": 256,
+ "count": 1034
+ },
+ {
+ "ge_millis": 256,
+ "lt_millis": 512,
+ "count": 47
+ },
+ {
+ "ge_millis": 512,
+ "lt_millis": 1024,
+ "count": 7
+ },
+ {
+ "ge_millis": 1024,
+ "lt_millis": 2048,
+ "count": 1
+ },
+ {
+ "ge_millis": 2048,
+ "lt_millis": 4096,
+ "count": 2
+ },
+ {
+ "ge_millis": 4096,
+ "lt_millis": 8192,
+ "count": 0
+ },
+ {
+ "ge_millis": 8192,
+ "lt_millis": 16384,
+ "count": 0
+ },
+ {
+ "ge_millis": 16384,
+ "lt_millis": 32768,
+ "count": 0
+ },
+ {
+ "ge_millis": 32768,
+ "lt_millis": 65536,
+ "count": 0
+ },
+ {
+ "ge_millis": 65536,
+ "count": 0
+ }
+ ],
+ "outbound_handling_time_histogram": [
+ {
+ "lt_millis": 1,
+ "count": 1911876454
+ },
+ {
+ "ge_millis": 1,
+ "lt_millis": 2,
+ "count": 246835312
+ },
+ {
+ "ge_millis": 2,
+ "lt_millis": 4,
+ "count": 5928518
+ },
+ {
+ "ge_millis": 4,
+ "lt_millis": 8,
+ "count": 2342608
+ },
+ {
+ "ge_millis": 8,
+ "lt_millis": 16,
+ "count": 566388
+ },
+ {
+ "ge_millis": 16,
+ "lt_millis": 32,
+ "count": 164795
+ },
+ {
+ "ge_millis": 32,
+ "lt_millis": 64,
+ "count": 91456
+ },
+ {
+ "ge_millis": 64,
+ "lt_millis": 128,
+ "count": 68952
+ },
+ {
+ "ge_millis": 128,
+ "lt_millis": 256,
+ "count": 3952
+ },
+ {
+ "ge_millis": 256,
+ "lt_millis": 512,
+ "count": 772
+ },
+ {
+ "ge_millis": 512,
+ "lt_millis": 1024,
+ "count": 51
+ },
+ {
+ "ge_millis": 1024,
+ "lt_millis": 2048,
+ "count": 25
+ },
+ {
+ "ge_millis": 2048,
+ "lt_millis": 4096,
+ "count": 10
+ },
+ {
+ "ge_millis": 4096,
+ "lt_millis": 8192,
+ "count": 0
+ },
+ {
+ "ge_millis": 8192,
+ "lt_millis": 16384,
+ "count": 0
+ },
+ {
+ "ge_millis": 16384,
+ "lt_millis": 32768,
+ "count": 0
+ },
+ {
+ "ge_millis": 32768,
+ "lt_millis": 65536,
+ "count": 0
+ },
+ {
+ "ge_millis": 65536,
+ "count": 0
+ }
+ ]
+ },
+ "http": {
+ "current_open": 84,
+ "total_opened": 1793320
+ },
+ "breakers": {
+ "model_inference": {
+ "limit_size_in_bytes": 3932160000,
+ "limit_size": "3.6gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "eql_sequence": {
+ "limit_size_in_bytes": 3932160000,
+ "limit_size": "3.6gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "fielddata": {
+ "limit_size_in_bytes": 3145728000,
+ "limit_size": "2.9gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1.03,
+ "tripped": 0
+ },
+ "request": {
+ "limit_size_in_bytes": 4718592000,
+ "limit_size": "4.3gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 1
+ },
+ "inflight_requests": {
+ "limit_size_in_bytes": 7864320000,
+ "limit_size": "7.3gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 2,
+ "tripped": 0
+ },
+ "parent": {
+ "limit_size_in_bytes": 7471104000,
+ "limit_size": "6.9gb",
+ "estimated_size_in_bytes": 1884124192,
+ "estimated_size": "1.7gb",
+ "overhead": 1,
+ "tripped": 93
+ }
+ }
+ },
+ "Klg1CjgMTouentQcJlRGuA": {
+ "timestamp": 1687866153482,
+ "name": "instance-0000000006",
+ "transport_address": "172.25.238.204:19349",
+ "host": "172.25.238.204",
+ "ip": "172.25.238.204:19349",
+ "roles": [
+ "data_content",
+ "data_hot",
+ "ingest",
+ "master",
+ "remote_cluster_client",
+ "transform"
+ ],
+ "attributes": {
+ "logical_availability_zone": "zone-0",
+ "availability_zone": "us-east-1a",
+ "server_name": "instance-0000000006.36928dce44074ceba64d7b3d698443a7",
+ "xpack.installed": "true",
+ "data": "hot",
+ "instance_configuration": "aws.es.datahot.i3",
+ "region": "us-east-1"
+ },
+ "indices": {
+ "docs": {
+ "count": 402750701,
+ "deleted": 1501
+ },
+ "shard_stats": {
+ "total_count": 97
+ },
+ "store": {
+ "size_in_bytes": 189584860329,
+ "total_data_set_size_in_bytes": 189584860329,
+ "reserved_in_bytes": 0
+ },
+ "indexing": {
+ "index_total": 3667364815,
+ "index_time_in_millis": 1100012973,
+ "index_current": 0,
+ "index_failed": 149288,
+ "delete_total": 13333,
+ "delete_time_in_millis": 1883,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 7502285,
+ "time_in_millis": 747339,
+ "exists_total": 7411100,
+ "exists_time_in_millis": 741739,
+ "missing_total": 91185,
+ "missing_time_in_millis": 5600,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 166820275,
+ "query_time_in_millis": 51262303,
+ "query_current": 0,
+ "fetch_total": 42642621,
+ "fetch_time_in_millis": 21316723,
+ "fetch_current": 0,
+ "scroll_total": 13036366,
+ "scroll_time_in_millis": 138752334,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 912589,
+ "total_time_in_millis": 1022946643,
+ "total_docs": 12230248422,
+ "total_size_in_bytes": 5503433306347,
+ "total_stopped_time_in_millis": 3959107,
+ "total_throttled_time_in_millis": 747116999,
+ "total_auto_throttle_in_bytes": 3674596384
+ },
+ "refresh": {
+ "total": 7720800,
+ "total_time_in_millis": 94297737,
+ "external_total": 7659102,
+ "external_total_time_in_millis": 100797967,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 35130,
+ "periodic": 34981,
+ "total_time_in_millis": 22204637
+ },
+ "warmer": {
+ "current": 0,
+ "total": 6095530,
+ "total_time_in_millis": 1439528
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 18032444,
+ "total_count": 274404002,
+ "hit_count": 45113976,
+ "miss_count": 229290026,
+ "cache_size": 11260,
+ "cache_count": 46168,
+ "evictions": 34908
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 600,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 320,
+ "memory_in_bytes": 0,
+ "terms_memory_in_bytes": 0,
+ "stored_fields_memory_in_bytes": 0,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 0,
+ "points_memory_in_bytes": 0,
+ "doc_values_memory_in_bytes": 0,
+ "index_writer_memory_in_bytes": 262022568,
+ "version_map_memory_in_bytes": 49200018,
+ "fixed_bit_set_memory_in_bytes": 1904,
+ "max_unsafe_auto_id_timestamp": 1679747033889,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 352376,
+ "size_in_bytes": 447695989,
+ "uncommitted_operations": 352376,
+ "uncommitted_size_in_bytes": 447695989,
+ "earliest_last_modified_age": 233
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 6779128,
+ "evictions": 0,
+ "hit_count": 10884306,
+ "miss_count": 8796
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 5718894
+ },
+ "bulk": {
+ "total_operations": 465641149,
+ "total_time_in_millis": 1118546460,
+ "total_size_in_bytes": 3998028967189,
+ "avg_time_in_millis": 0,
+ "avg_size_in_bytes": 8613
+ }
+ },
+ "os": {
+ "timestamp": 1687866153492,
+ "cpu": {
+ "percent": 10,
+ "load_average": {
+ "1m": 2.38,
+ "5m": 2.74,
+ "15m": 2.45
+ }
+ },
+ "mem": {
+ "total_in_bytes": 16106127360,
+ "adjusted_total_in_bytes": 15728640000,
+ "free_in_bytes": 765980672,
+ "used_in_bytes": 15340146688,
+ "free_percent": 5,
+ "used_percent": 95
+ },
+ "swap": {
+ "total_in_bytes": 0,
+ "free_in_bytes": 0,
+ "used_in_bytes": 0
+ },
+ "cgroup": {
+ "cpuacct": {
+ "control_group": "/",
+ "usage_nanos": 2632999205547019
+ },
+ "cpu": {
+ "control_group": "/",
+ "cfs_period_micros": 100000,
+ "cfs_quota_micros": 206897,
+ "stat": {
+ "number_of_elapsed_periods": 110090960,
+ "number_of_times_throttled": 389008,
+ "time_throttled_nanos": 34498461943176
+ }
+ },
+ "memory": {
+ "control_group": "/",
+ "limit_in_bytes": "16106127360",
+ "usage_in_bytes": "15340146688"
+ }
+ }
+ },
+ "process": {
+ "timestamp": 1687866153493,
+ "open_file_descriptors": 1156,
+ "max_file_descriptors": 1048576,
+ "cpu": {
+ "percent": 10,
+ "total_in_millis": 2575977020
+ },
+ "mem": {
+ "total_virtual_in_bytes": 117447507968
+ }
+ },
+ "jvm": {
+ "timestamp": 1687866153494,
+ "uptime_in_millis": 11285573694,
+ "mem": {
+ "heap_used_in_bytes": 5059735552,
+ "heap_used_percent": 64,
+ "heap_committed_in_bytes": 7864320000,
+ "heap_max_in_bytes": 7864320000,
+ "non_heap_used_in_bytes": 343633376,
+ "non_heap_committed_in_bytes": 350355456,
+ "pools": {
+ "young": {
+ "used_in_bytes": 3351248896,
+ "max_in_bytes": 0,
+ "peak_used_in_bytes": 4718592000,
+ "peak_max_in_bytes": 0
+ },
+ "old": {
+ "used_in_bytes": 1354067968,
+ "max_in_bytes": 7864320000,
+ "peak_used_in_bytes": 2444862976,
+ "peak_max_in_bytes": 7864320000
+ },
+ "survivor": {
+ "used_in_bytes": 354418688,
+ "max_in_bytes": 0,
+ "peak_used_in_bytes": 591396864,
+ "peak_max_in_bytes": 0
+ }
+ }
+ },
+ "threads": {
+ "count": 112,
+ "peak_count": 117
+ },
+ "gc": {
+ "collectors": {
+ "young": {
+ "collection_count": 78652,
+ "collection_time_in_millis": 6014274
+ },
+ "old": {
+ "collection_count": 0,
+ "collection_time_in_millis": 0
+ }
+ }
+ },
+ "buffer_pools": {
+ "mapped": {
+ "count": 858,
+ "used_in_bytes": 103114998135,
+ "total_capacity_in_bytes": 103114998135
+ },
+ "direct": {
+ "count": 94,
+ "used_in_bytes": 4654850,
+ "total_capacity_in_bytes": 4654848
+ },
+ "mapped - 'non-volatile memory'": {
+ "count": 0,
+ "used_in_bytes": 0,
+ "total_capacity_in_bytes": 0
+ }
+ },
+ "classes": {
+ "current_loaded_count": 36006,
+ "total_loaded_count": 37829,
+ "total_unloaded_count": 1823
+ }
+ },
+ "thread_pool": {
+ "analyze": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "auto_complete": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "azure_event_loop": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ccr": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "cluster_coordination": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 1,
+ "completed": 1130214
+ },
+ "fetch_shard_started": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "fetch_shard_store": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 6,
+ "completed": 38
+ },
+ "flush": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 89882
+ },
+ "force_merge": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 1,
+ "completed": 143
+ },
+ "generic": {
+ "threads": 46,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 46,
+ "completed": 89714323
+ },
+ "get": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "management": {
+ "threads": 3,
+ "queue": 0,
+ "active": 1,
+ "rejected": 0,
+ "largest": 3,
+ "completed": 416760833
+ },
+ "ml_datafeed": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_job_comms": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_native_inference_comms": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_utility": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 22543494
+ },
+ "refresh": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 885068032
+ },
+ "repository_azure": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "rollup_indexing": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "search": {
+ "threads": 5,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 5,
+ "completed": 167558078
+ },
+ "search_coordination": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 14101082
+ },
+ "search_throttled": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "searchable_snapshots_cache_fetch_async": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "searchable_snapshots_cache_prewarming": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "security-crypto": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "security-token-key": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "snapshot": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 806551
+ },
+ "snapshot_meta": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "system_critical_read": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 2350761
+ },
+ "system_critical_write": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 7635
+ },
+ "system_read": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 31141408
+ },
+ "system_write": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 7400801
+ },
+ "vector_tile_generation": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "warmer": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 36136481
+ },
+ "watcher": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "write": {
+ "threads": 3,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 3,
+ "completed": 575332197
+ }
+ },
+ "fs": {
+ "timestamp": 1687866153494,
+ "total": {
+ "total_in_bytes": 483183820800,
+ "free_in_bytes": 292886683648,
+ "available_in_bytes": 292886683648
+ },
+ "data": [
+ {
+ "path": "/app/data",
+ "mount": "/app (/dev/mapper/lxc-data)",
+ "type": "xfs",
+ "total_in_bytes": 483183820800,
+ "free_in_bytes": 292886683648,
+ "available_in_bytes": 292886683648
+ }
+ ],
+ "io_stats": {
+ "devices": [
+ {
+ "device_name": "dm-1",
+ "operations": 6160354146,
+ "read_operations": 376563348,
+ "write_operations": 5783790798,
+ "read_kilobytes": 31264865276,
+ "write_kilobytes": 100978561519,
+ "io_time_in_millis": 183984060
+ }
+ ],
+ "total": {
+ "operations": 6160354146,
+ "read_operations": 376563348,
+ "write_operations": 5783790798,
+ "read_kilobytes": 31264865276,
+ "write_kilobytes": 100978561519,
+ "io_time_in_millis": 183984060
+ }
+ }
+ },
+ "transport": {
+ "server_open": 24,
+ "total_outbound_connections": 11,
+ "rx_count": 1300324276,
+ "rx_size_in_bytes": 1789333458217,
+ "tx_count": 1300324275,
+ "tx_size_in_bytes": 2927487680282,
+ "inbound_handling_time_histogram": [
+ {
+ "lt_millis": 1,
+ "count": 1256115237
+ },
+ {
+ "ge_millis": 1,
+ "lt_millis": 2,
+ "count": 202073370
+ },
+ {
+ "ge_millis": 2,
+ "lt_millis": 4,
+ "count": 3242412
+ },
+ {
+ "ge_millis": 4,
+ "lt_millis": 8,
+ "count": 454921
+ },
+ {
+ "ge_millis": 8,
+ "lt_millis": 16,
+ "count": 173321
+ },
+ {
+ "ge_millis": 16,
+ "lt_millis": 32,
+ "count": 39045
+ },
+ {
+ "ge_millis": 32,
+ "lt_millis": 64,
+ "count": 14154
+ },
+ {
+ "ge_millis": 64,
+ "lt_millis": 128,
+ "count": 75261
+ },
+ {
+ "ge_millis": 128,
+ "lt_millis": 256,
+ "count": 1534
+ },
+ {
+ "ge_millis": 256,
+ "lt_millis": 512,
+ "count": 76
+ },
+ {
+ "ge_millis": 512,
+ "lt_millis": 1024,
+ "count": 3
+ },
+ {
+ "ge_millis": 1024,
+ "lt_millis": 2048,
+ "count": 0
+ },
+ {
+ "ge_millis": 2048,
+ "lt_millis": 4096,
+ "count": 0
+ },
+ {
+ "ge_millis": 4096,
+ "lt_millis": 8192,
+ "count": 0
+ },
+ {
+ "ge_millis": 8192,
+ "lt_millis": 16384,
+ "count": 0
+ },
+ {
+ "ge_millis": 16384,
+ "lt_millis": 32768,
+ "count": 0
+ },
+ {
+ "ge_millis": 32768,
+ "lt_millis": 65536,
+ "count": 0
+ },
+ {
+ "ge_millis": 65536,
+ "count": 0
+ }
+ ],
+ "outbound_handling_time_histogram": [
+ {
+ "lt_millis": 1,
+ "count": 1128384926
+ },
+ {
+ "ge_millis": 1,
+ "lt_millis": 2,
+ "count": 161841158
+ },
+ {
+ "ge_millis": 2,
+ "lt_millis": 4,
+ "count": 6818465
+ },
+ {
+ "ge_millis": 4,
+ "lt_millis": 8,
+ "count": 2563517
+ },
+ {
+ "ge_millis": 8,
+ "lt_millis": 16,
+ "count": 445765
+ },
+ {
+ "ge_millis": 16,
+ "lt_millis": 32,
+ "count": 122453
+ },
+ {
+ "ge_millis": 32,
+ "lt_millis": 64,
+ "count": 95805
+ },
+ {
+ "ge_millis": 64,
+ "lt_millis": 128,
+ "count": 49979
+ },
+ {
+ "ge_millis": 128,
+ "lt_millis": 256,
+ "count": 1930
+ },
+ {
+ "ge_millis": 256,
+ "lt_millis": 512,
+ "count": 250
+ },
+ {
+ "ge_millis": 512,
+ "lt_millis": 1024,
+ "count": 27
+ },
+ {
+ "ge_millis": 1024,
+ "lt_millis": 2048,
+ "count": 0
+ },
+ {
+ "ge_millis": 2048,
+ "lt_millis": 4096,
+ "count": 0
+ },
+ {
+ "ge_millis": 4096,
+ "lt_millis": 8192,
+ "count": 0
+ },
+ {
+ "ge_millis": 8192,
+ "lt_millis": 16384,
+ "count": 0
+ },
+ {
+ "ge_millis": 16384,
+ "lt_millis": 32768,
+ "count": 0
+ },
+ {
+ "ge_millis": 32768,
+ "lt_millis": 65536,
+ "count": 0
+ },
+ {
+ "ge_millis": 65536,
+ "count": 0
+ }
+ ]
+ },
+ "http": {
+ "current_open": 75,
+ "total_opened": 779352
+ },
+ "breakers": {
+ "fielddata": {
+ "limit_size_in_bytes": 3145728000,
+ "limit_size": "2.9gb",
+ "estimated_size_in_bytes": 600,
+ "estimated_size": "600b",
+ "overhead": 1.03,
+ "tripped": 0
+ },
+ "request": {
+ "limit_size_in_bytes": 4718592000,
+ "limit_size": "4.3gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "inflight_requests": {
+ "limit_size_in_bytes": 7864320000,
+ "limit_size": "7.3gb",
+ "estimated_size_in_bytes": 1464,
+ "estimated_size": "1.4kb",
+ "overhead": 2,
+ "tripped": 0
+ },
+ "model_inference": {
+ "limit_size_in_bytes": 3932160000,
+ "limit_size": "3.6gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "eql_sequence": {
+ "limit_size_in_bytes": 3932160000,
+ "limit_size": "3.6gb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "parent": {
+ "limit_size_in_bytes": 7471104000,
+ "limit_size": "6.9gb",
+ "estimated_size_in_bytes": 5059735552,
+ "estimated_size": "4.7gb",
+ "overhead": 1,
+ "tripped": 0
+ }
+ }
+ },
+ "k_AifYMWQTykjUq3pgE_-w": {
+ "timestamp": 1687866153482,
+ "name": "tiebreaker-0000000002",
+ "transport_address": "172.25.242.111:19393",
+ "host": "172.25.242.111",
+ "ip": "172.25.242.111:19393",
+ "roles": [
+ "master",
+ "voting_only"
+ ],
+ "attributes": {
+ "logical_availability_zone": "tiebreaker",
+ "availability_zone": "us-east-1b",
+ "server_name": "tiebreaker-0000000002.36928dce44074ceba64d7b3d698443a7",
+ "xpack.installed": "true",
+ "data": "hot",
+ "instance_configuration": "aws.es.master.c5d",
+ "region": "us-east-1"
+ },
+ "indices": {
+ "docs": {
+ "count": 0,
+ "deleted": 0
+ },
+ "shard_stats": {
+ "total_count": 0
+ },
+ "store": {
+ "size_in_bytes": 0,
+ "total_data_set_size_in_bytes": 0,
+ "reserved_in_bytes": 0
+ },
+ "indexing": {
+ "index_total": 0,
+ "index_time_in_millis": 0,
+ "index_current": 0,
+ "index_failed": 0,
+ "delete_total": 0,
+ "delete_time_in_millis": 0,
+ "delete_current": 0,
+ "noop_update_total": 0,
+ "is_throttled": false,
+ "throttle_time_in_millis": 0
+ },
+ "get": {
+ "total": 0,
+ "time_in_millis": 0,
+ "exists_total": 0,
+ "exists_time_in_millis": 0,
+ "missing_total": 0,
+ "missing_time_in_millis": 0,
+ "current": 0
+ },
+ "search": {
+ "open_contexts": 0,
+ "query_total": 0,
+ "query_time_in_millis": 0,
+ "query_current": 0,
+ "fetch_total": 0,
+ "fetch_time_in_millis": 0,
+ "fetch_current": 0,
+ "scroll_total": 0,
+ "scroll_time_in_millis": 0,
+ "scroll_current": 0,
+ "suggest_total": 0,
+ "suggest_time_in_millis": 0,
+ "suggest_current": 0
+ },
+ "merges": {
+ "current": 0,
+ "current_docs": 0,
+ "current_size_in_bytes": 0,
+ "total": 0,
+ "total_time_in_millis": 0,
+ "total_docs": 0,
+ "total_size_in_bytes": 0,
+ "total_stopped_time_in_millis": 0,
+ "total_throttled_time_in_millis": 0,
+ "total_auto_throttle_in_bytes": 0
+ },
+ "refresh": {
+ "total": 0,
+ "total_time_in_millis": 0,
+ "external_total": 0,
+ "external_total_time_in_millis": 0,
+ "listeners": 0
+ },
+ "flush": {
+ "total": 0,
+ "periodic": 0,
+ "total_time_in_millis": 0
+ },
+ "warmer": {
+ "current": 0,
+ "total": 0,
+ "total_time_in_millis": 0
+ },
+ "query_cache": {
+ "memory_size_in_bytes": 0,
+ "total_count": 0,
+ "hit_count": 0,
+ "miss_count": 0,
+ "cache_size": 0,
+ "cache_count": 0,
+ "evictions": 0
+ },
+ "fielddata": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0
+ },
+ "completion": {
+ "size_in_bytes": 0
+ },
+ "segments": {
+ "count": 0,
+ "memory_in_bytes": 0,
+ "terms_memory_in_bytes": 0,
+ "stored_fields_memory_in_bytes": 0,
+ "term_vectors_memory_in_bytes": 0,
+ "norms_memory_in_bytes": 0,
+ "points_memory_in_bytes": 0,
+ "doc_values_memory_in_bytes": 0,
+ "index_writer_memory_in_bytes": 0,
+ "version_map_memory_in_bytes": 0,
+ "fixed_bit_set_memory_in_bytes": 0,
+ "max_unsafe_auto_id_timestamp": -9223372036854776000,
+ "file_sizes": {}
+ },
+ "translog": {
+ "operations": 0,
+ "size_in_bytes": 0,
+ "uncommitted_operations": 0,
+ "uncommitted_size_in_bytes": 0,
+ "earliest_last_modified_age": 0
+ },
+ "request_cache": {
+ "memory_size_in_bytes": 0,
+ "evictions": 0,
+ "hit_count": 0,
+ "miss_count": 0
+ },
+ "recovery": {
+ "current_as_source": 0,
+ "current_as_target": 0,
+ "throttle_time_in_millis": 0
+ },
+ "bulk": {
+ "total_operations": 0,
+ "total_time_in_millis": 0,
+ "total_size_in_bytes": 0,
+ "avg_time_in_millis": 0,
+ "avg_size_in_bytes": 0
+ }
+ },
+ "os": {
+ "timestamp": 1687866153483,
+ "cpu": {
+ "percent": 0,
+ "load_average": {
+ "1m": 3.18,
+ "5m": 2.94,
+ "15m": 2.54
+ }
+ },
+ "mem": {
+ "total_in_bytes": 1073741824,
+ "adjusted_total_in_bytes": 696254464,
+ "free_in_bytes": 101437440,
+ "used_in_bytes": 972304384,
+ "free_percent": 9,
+ "used_percent": 91
+ },
+ "swap": {
+ "total_in_bytes": 536870912,
+ "free_in_bytes": 536870912,
+ "used_in_bytes": 0
+ },
+ "cgroup": {
+ "cpuacct": {
+ "control_group": "/",
+ "usage_nanos": 281986757031142
+ },
+ "cpu": {
+ "control_group": "/",
+ "cfs_period_micros": 100000,
+ "cfs_quota_micros": 847058,
+ "stat": {
+ "number_of_elapsed_periods": 133754533,
+ "number_of_times_throttled": 226,
+ "time_throttled_nanos": 6732992268
+ }
+ },
+ "memory": {
+ "control_group": "/",
+ "limit_in_bytes": "1073741824",
+ "usage_in_bytes": "972304384"
+ }
+ }
+ },
+ "process": {
+ "timestamp": 1687866153483,
+ "open_file_descriptors": 557,
+ "max_file_descriptors": 1048576,
+ "cpu": {
+ "percent": 0,
+ "total_in_millis": 182462990
+ },
+ "mem": {
+ "total_virtual_in_bytes": 6049042432
+ }
+ },
+ "jvm": {
+ "timestamp": 1687866153484,
+ "uptime_in_millis": 23671101768,
+ "mem": {
+ "heap_used_in_bytes": 178362704,
+ "heap_used_percent": 63,
+ "heap_committed_in_bytes": 281018368,
+ "heap_max_in_bytes": 281018368,
+ "non_heap_used_in_bytes": 221757752,
+ "non_heap_committed_in_bytes": 231145472,
+ "pools": {
+ "young": {
+ "used_in_bytes": 71303168,
+ "max_in_bytes": 0,
+ "peak_used_in_bytes": 163577856,
+ "peak_max_in_bytes": 0
+ },
+ "old": {
+ "used_in_bytes": 106872320,
+ "max_in_bytes": 281018368,
+ "peak_used_in_bytes": 246953424,
+ "peak_max_in_bytes": 281018368
+ },
+ "survivor": {
+ "used_in_bytes": 187216,
+ "max_in_bytes": 0,
+ "peak_used_in_bytes": 20971520,
+ "peak_max_in_bytes": 0
+ }
+ }
+ },
+ "threads": {
+ "count": 45,
+ "peak_count": 47
+ },
+ "gc": {
+ "collectors": {
+ "young": {
+ "collection_count": 342994,
+ "collection_time_in_millis": 768917
+ },
+ "old": {
+ "collection_count": 0,
+ "collection_time_in_millis": 0
+ }
+ }
+ },
+ "buffer_pools": {
+ "mapped": {
+ "count": 0,
+ "used_in_bytes": 0,
+ "total_capacity_in_bytes": 0
+ },
+ "direct": {
+ "count": 19,
+ "used_in_bytes": 2142216,
+ "total_capacity_in_bytes": 2142214
+ },
+ "mapped - 'non-volatile memory'": {
+ "count": 0,
+ "used_in_bytes": 0,
+ "total_capacity_in_bytes": 0
+ }
+ },
+ "classes": {
+ "current_loaded_count": 29581,
+ "total_loaded_count": 31244,
+ "total_unloaded_count": 1663
+ }
+ },
+ "thread_pool": {
+ "analyze": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "auto_complete": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "azure_event_loop": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ccr": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "cluster_coordination": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 1,
+ "completed": 1708790
+ },
+ "fetch_shard_started": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "fetch_shard_store": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "flush": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "force_merge": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "generic": {
+ "threads": 9,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 9,
+ "completed": 78631938
+ },
+ "get": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "management": {
+ "threads": 2,
+ "queue": 0,
+ "active": 1,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 86206936
+ },
+ "ml_datafeed": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_job_comms": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_native_inference_comms": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "ml_utility": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 47308828
+ },
+ "refresh": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "repository_azure": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "rollup_indexing": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "search": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "search_coordination": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "search_throttled": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "searchable_snapshots_cache_fetch_async": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "searchable_snapshots_cache_prewarming": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "security-crypto": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "security-token-key": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "snapshot": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "snapshot_meta": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "system_critical_read": {
+ "threads": 1,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 1,
+ "completed": 1
+ },
+ "system_critical_write": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "system_read": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "system_write": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "vector_tile_generation": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "warmer": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "watcher": {
+ "threads": 0,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 0,
+ "completed": 0
+ },
+ "write": {
+ "threads": 2,
+ "queue": 0,
+ "active": 0,
+ "rejected": 0,
+ "largest": 2,
+ "completed": 2
+ }
+ },
+ "fs": {
+ "timestamp": 1687866153484,
+ "total": {
+ "total_in_bytes": 12884901888,
+ "free_in_bytes": 12789022720,
+ "available_in_bytes": 12789022720
+ },
+ "data": [
+ {
+ "path": "/app/data",
+ "mount": "/app (/dev/mapper/lxc-data)",
+ "type": "xfs",
+ "total_in_bytes": 12884901888,
+ "free_in_bytes": 12789022720,
+ "available_in_bytes": 12789022720
+ }
+ ],
+ "io_stats": {
+ "devices": [
+ {
+ "device_name": "dm-1",
+ "operations": 1025442756,
+ "read_operations": 12887271,
+ "write_operations": 1012555485,
+ "read_kilobytes": 666215440,
+ "write_kilobytes": 20200424566,
+ "io_time_in_millis": 547217376
+ }
+ ],
+ "total": {
+ "operations": 1025442756,
+ "read_operations": 12887271,
+ "write_operations": 1012555485,
+ "read_kilobytes": 666215440,
+ "write_kilobytes": 20200424566,
+ "io_time_in_millis": 547217376
+ }
+ }
+ },
+ "transport": {
+ "server_open": 26,
+ "total_outbound_connections": 20,
+ "rx_count": 107632996,
+ "rx_size_in_bytes": 180620082152,
+ "tx_count": 107633007,
+ "tx_size_in_bytes": 420999501235,
+ "inbound_handling_time_histogram": [
+ {
+ "lt_millis": 1,
+ "count": 146874447
+ },
+ {
+ "ge_millis": 1,
+ "lt_millis": 2,
+ "count": 16292686
+ },
+ {
+ "ge_millis": 2,
+ "lt_millis": 4,
+ "count": 50826
+ },
+ {
+ "ge_millis": 4,
+ "lt_millis": 8,
+ "count": 1965
+ },
+ {
+ "ge_millis": 8,
+ "lt_millis": 16,
+ "count": 187
+ },
+ {
+ "ge_millis": 16,
+ "lt_millis": 32,
+ "count": 84
+ },
+ {
+ "ge_millis": 32,
+ "lt_millis": 64,
+ "count": 2
+ },
+ {
+ "ge_millis": 64,
+ "lt_millis": 128,
+ "count": 65800
+ },
+ {
+ "ge_millis": 128,
+ "lt_millis": 256,
+ "count": 14
+ },
+ {
+ "ge_millis": 256,
+ "lt_millis": 512,
+ "count": 0
+ },
+ {
+ "ge_millis": 512,
+ "lt_millis": 1024,
+ "count": 0
+ },
+ {
+ "ge_millis": 1024,
+ "lt_millis": 2048,
+ "count": 0
+ },
+ {
+ "ge_millis": 2048,
+ "lt_millis": 4096,
+ "count": 0
+ },
+ {
+ "ge_millis": 4096,
+ "lt_millis": 8192,
+ "count": 0
+ },
+ {
+ "ge_millis": 8192,
+ "lt_millis": 16384,
+ "count": 0
+ },
+ {
+ "ge_millis": 16384,
+ "lt_millis": 32768,
+ "count": 0
+ },
+ {
+ "ge_millis": 32768,
+ "lt_millis": 65536,
+ "count": 0
+ },
+ {
+ "ge_millis": 65536,
+ "count": 0
+ }
+ ],
+ "outbound_handling_time_histogram": [
+ {
+ "lt_millis": 1,
+ "count": 97208157
+ },
+ {
+ "ge_millis": 1,
+ "lt_millis": 2,
+ "count": 10385725
+ },
+ {
+ "ge_millis": 2,
+ "lt_millis": 4,
+ "count": 28647
+ },
+ {
+ "ge_millis": 4,
+ "lt_millis": 8,
+ "count": 6334
+ },
+ {
+ "ge_millis": 8,
+ "lt_millis": 16,
+ "count": 1042
+ },
+ {
+ "ge_millis": 16,
+ "lt_millis": 32,
+ "count": 818
+ },
+ {
+ "ge_millis": 32,
+ "lt_millis": 64,
+ "count": 1556
+ },
+ {
+ "ge_millis": 64,
+ "lt_millis": 128,
+ "count": 725
+ },
+ {
+ "ge_millis": 128,
+ "lt_millis": 256,
+ "count": 3
+ },
+ {
+ "ge_millis": 256,
+ "lt_millis": 512,
+ "count": 0
+ },
+ {
+ "ge_millis": 512,
+ "lt_millis": 1024,
+ "count": 0
+ },
+ {
+ "ge_millis": 1024,
+ "lt_millis": 2048,
+ "count": 0
+ },
+ {
+ "ge_millis": 2048,
+ "lt_millis": 4096,
+ "count": 0
+ },
+ {
+ "ge_millis": 4096,
+ "lt_millis": 8192,
+ "count": 0
+ },
+ {
+ "ge_millis": 8192,
+ "lt_millis": 16384,
+ "count": 0
+ },
+ {
+ "ge_millis": 16384,
+ "lt_millis": 32768,
+ "count": 0
+ },
+ {
+ "ge_millis": 32768,
+ "lt_millis": 65536,
+ "count": 0
+ },
+ {
+ "ge_millis": 65536,
+ "count": 0
+ }
+ ]
+ },
+ "http": {
+ "current_open": 14,
+ "total_opened": 13364
+ },
+ "breakers": {
+ "model_inference": {
+ "limit_size_in_bytes": 140509184,
+ "limit_size": "134mb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "eql_sequence": {
+ "limit_size_in_bytes": 140509184,
+ "limit_size": "134mb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "fielddata": {
+ "limit_size_in_bytes": 112407347,
+ "limit_size": "107.1mb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1.03,
+ "tripped": 0
+ },
+ "request": {
+ "limit_size_in_bytes": 168611020,
+ "limit_size": "160.7mb",
+ "estimated_size_in_bytes": 0,
+ "estimated_size": "0b",
+ "overhead": 1,
+ "tripped": 0
+ },
+ "inflight_requests": {
+ "limit_size_in_bytes": 281018368,
+ "limit_size": "268mb",
+ "estimated_size_in_bytes": 1464,
+ "estimated_size": "1.4kb",
+ "overhead": 2,
+ "tripped": 0
+ },
+ "parent": {
+ "limit_size_in_bytes": 266967449,
+ "limit_size": "254.5mb",
+ "estimated_size_in_bytes": 178362704,
+ "estimated_size": "170mb",
+ "overhead": 1,
+ "tripped": 0
+ }
+ }
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/envoy/README.md b/src/go/plugin/go.d/modules/envoy/README.md
new file mode 120000
index 000000000..a0d3a2a2c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/README.md
@@ -0,0 +1 @@
+integrations/envoy.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/envoy/charts.go b/src/go/plugin/go.d/modules/envoy/charts.go
new file mode 100644
index 000000000..3abe10e42
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/charts.go
@@ -0,0 +1,870 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package envoy
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+const (
+ prioServerState = module.Priority + iota
+ prioServerMemoryAllocatedSize
+ prioServerMemoryHeapSize
+ prioServerMemoryPhysicalSize
+ prioServerConnectionsCount
+ prioServerParentConnectionsCount
+
+ prioClusterManagerClustersCount
+ prioClusterManagerClusterChangesRate
+ prioClusterManagerClusterUpdatesRate
+ prioClusterManagerClusterUpdatesVieMergeRate
+ prioClusterManagerClusterUpdatesMergeCancelledRate
+ prioClusterManagerClusterUpdatesOufOfMergeWindowsRate
+
+ prioClusterMembershipEndpointsCount
+ prioClusterMembershipChangesRate
+ prioClusterMembershipUpdatesRate
+
+ prioClusterUpstreamActiveConnectionsCount
+ prioClusterUpstreamConnectionsRate
+ prioClusterUpstreamHTTPConnectionsRate
+ prioClusterUpstreamDestroyedConnectionsRate
+ prioClusterUpstreamFailedConnectionsRate
+ prioClusterUpstreamTimedOutConnectionsRate
+ prioClusterUpstreamTrafficRate
+ prioClusterUpstreamBufferedSize
+
+ prioClusterUpstreamActiveRequestsCount
+ prioClusterUpstreamRequestsRate
+ prioClusterUpstreamFailedRequestsRate
+ prioClusterUpstreamActivePendingRequestsCount
+ prioClusterUpstreamPendingRequestsRate
+ prioClusterUpstreamPendingFailedRequestsRate
+ prioClusterUpstreamRequestRetriesRate
+ prioClusterUpstreamRequestSuccessRetriesRate
+ prioClusterUpstreamRequestBackoffRetriesRate
+
+ prioListenerManagerListenerCount
+ prioListenerManagerListenerChangesRate
+ prioListenerManagerListenerObjectEventsRate
+
+ prioListenerAdminDownstreamActiveConnectionsCount
+ prioListenerAdminDownstreamConnectionsRate
+ prioListenerAdminDownstreamDestroyedConnectionsRate
+ prioListenerAdminDownstreamTimedOutConnectionsRate
+ prioListenerAdminDownstreamRejectedConnectionsRate
+ prioListenerAdminDownstreamFilterClosedByRemoteConnectionsRate
+ prioListenerAdminDownstreamFilterReadErrorsRate
+ prioListenerAdminDownstreamActiveSocketsCount
+ prioListenerAdminDownstreamTimedOutSocketsRate
+
+ prioListenerDownstreamActiveConnectionsCount
+ prioListenerDownstreamConnectionsRate
+ prioListenerDownstreamDestroyedConnectionsRate
+ prioListenerDownstreamTimedOutConnectionsRate
+ prioListenerDownstreamRejectedConnectionsRate
+ prioListenerDownstreamFilterClosedByRemoteConnectionsRate
+ prioListenerDownstreamFilterReadErrorsRate
+ prioListenerDownstreamActiveSocketsCount
+ prioListenerDownstreamTimedOutSocketsRate
+
+ prioServerUptime
+)
+
+var (
+ serverChartsTmpl = module.Charts{
+ serverStateChartTmpl.Copy(),
+
+ serverMemoryAllocatedSizeChartTmpl.Copy(),
+ serverMemoryHeapSizeChartTmpl.Copy(),
+ serverMemoryPhysicalSizeChartTmpl.Copy(),
+
+ serverConnectionsCountChartTmpl.Copy(),
+ serverParentConnectionsCountChartTmpl.Copy(),
+
+ serverUptimeChartTmpl.Copy(),
+ }
+ serverStateChartTmpl = module.Chart{
+ ID: "server_state_%s",
+ Title: "Server current state",
+ Units: "state",
+ Fam: "server",
+ Ctx: "envoy.server_state",
+ Priority: prioServerState,
+ Dims: module.Dims{
+ {ID: "envoy_server_state_live_%s", Name: "live"},
+ {ID: "envoy_server_state_draining_%s", Name: "draining"},
+ {ID: "envoy_server_state_pre_initializing_%s", Name: "pre_initializing"},
+ {ID: "envoy_server_state_initializing_%s", Name: "initializing"},
+ },
+ }
+ serverConnectionsCountChartTmpl = module.Chart{
+ ID: "server_connections_%s",
+ Title: "Server current connections",
+ Units: "connections",
+ Fam: "server",
+ Ctx: "envoy.server_connections_count",
+ Priority: prioServerConnectionsCount,
+ Dims: module.Dims{
+ {ID: "envoy_server_total_connections_%s", Name: "connections"},
+ },
+ }
+ serverParentConnectionsCountChartTmpl = module.Chart{
+ ID: "server_parent_connections_%s",
+ Title: "Server current parent connections",
+ Units: "connections",
+ Fam: "server",
+ Ctx: "envoy.server_parent_connections_count",
+ Priority: prioServerParentConnectionsCount,
+ Dims: module.Dims{
+ {ID: "envoy_server_parent_connections_%s", Name: "connections"},
+ },
+ }
+ serverMemoryAllocatedSizeChartTmpl = module.Chart{
+ ID: "server_memory_allocated_size_%s",
+ Title: "Server memory allocated size",
+ Units: "bytes",
+ Fam: "server",
+ Ctx: "envoy.server_memory_allocated_size",
+ Priority: prioServerMemoryAllocatedSize,
+ Dims: module.Dims{
+ {ID: "envoy_server_memory_allocated_%s", Name: "allocated"},
+ },
+ }
+ serverMemoryHeapSizeChartTmpl = module.Chart{
+ ID: "server_memory_heap_size_%s",
+ Title: "Server memory heap size",
+ Units: "bytes",
+ Fam: "server",
+ Ctx: "envoy.server_memory_heap_size",
+ Priority: prioServerMemoryHeapSize,
+ Dims: module.Dims{
+ {ID: "envoy_server_memory_heap_size_%s", Name: "heap"},
+ },
+ }
+ serverMemoryPhysicalSizeChartTmpl = module.Chart{
+ ID: "server_memory_physical_size_%s",
+ Title: "Server memory physical size",
+ Units: "bytes",
+ Fam: "server",
+ Ctx: "envoy.server_memory_physical_size",
+ Priority: prioServerMemoryPhysicalSize,
+ Dims: module.Dims{
+ {ID: "envoy_server_memory_physical_size_%s", Name: "physical"},
+ },
+ }
+ serverUptimeChartTmpl = module.Chart{
+ ID: "server_uptime_%s",
+ Title: "Server uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "envoy.server_uptime",
+ Priority: prioServerUptime,
+ Dims: module.Dims{
+ {ID: "envoy_server_uptime_%s", Name: "uptime"},
+ },
+ }
+)
+
+var (
+ clusterManagerChartsTmpl = module.Charts{
+ clusterManagerClusterCountChartTmpl.Copy(),
+ clusterManagerClusterChangesRateChartTmpl.Copy(),
+ clusterManagerClusterUpdatesRateChartTmpl.Copy(),
+ clusterManagerClusterUpdatesViaMergeRateChartTmpl.Copy(),
+ clusterManagerClusterUpdatesMergeCancelledRateChartTmpl.Copy(),
+ clusterManagerClusterUpdatesOutOfMergeWindowRateChartTmpl.Copy(),
+ }
+ clusterManagerClusterCountChartTmpl = module.Chart{
+ ID: "cluster_manager_cluster_count_%s",
+ Title: "Cluster manager current clusters",
+ Units: "clusters",
+ Fam: "cluster mgr",
+ Ctx: "envoy.cluster_manager_cluster_count",
+ Priority: prioClusterManagerClustersCount,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_manager_active_clusters_%s", Name: "active"},
+ {ID: "envoy_cluster_manager_warming_clusters_%s", Name: "not_active"},
+ },
+ }
+ clusterManagerClusterChangesRateChartTmpl = module.Chart{
+ ID: "cluster_manager_cluster_changes_%s",
+ Title: "Cluster manager cluster changes",
+ Units: "clusters/s",
+ Fam: "cluster mgr",
+ Ctx: "envoy.cluster_manager_cluster_changes_rate",
+ Priority: prioClusterManagerClusterChangesRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_manager_cluster_added_%s", Name: "added", Algo: module.Incremental},
+ {ID: "envoy_cluster_manager_cluster_modified_%s", Name: "modified", Algo: module.Incremental},
+ {ID: "envoy_cluster_manager_cluster_removed_%s", Name: "removed", Algo: module.Incremental},
+ },
+ }
+ clusterManagerClusterUpdatesRateChartTmpl = module.Chart{
+ ID: "cluster_manager_cluster_updates_%s",
+ Title: "Cluster manager updates",
+ Units: "updates/s",
+ Fam: "cluster mgr",
+ Ctx: "envoy.cluster_manager_cluster_updates_rate",
+ Priority: prioClusterManagerClusterUpdatesRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_manager_cluster_updated_%s", Name: "cluster", Algo: module.Incremental},
+ },
+ }
+ clusterManagerClusterUpdatesViaMergeRateChartTmpl = module.Chart{
+ ID: "cluster_manager_cluster_updated_via_merge_%s",
+ Title: "Cluster manager updates applied as merged updates",
+ Units: "updates/s",
+ Fam: "cluster mgr",
+ Ctx: "envoy.cluster_manager_cluster_updated_via_merge_rate",
+ Priority: prioClusterManagerClusterUpdatesVieMergeRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_manager_cluster_updated_via_merge_%s", Name: "via_merge", Algo: module.Incremental},
+ },
+ }
+ clusterManagerClusterUpdatesMergeCancelledRateChartTmpl = module.Chart{
+ ID: "cluster_manager_update_merge_cancelled_%s",
+ Title: "Cluster manager cancelled merged updates",
+ Units: "updates/s",
+ Fam: "cluster mgr",
+ Ctx: "envoy.cluster_manager_update_merge_cancelled_rate",
+ Priority: prioClusterManagerClusterUpdatesMergeCancelledRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_manager_update_merge_cancelled_%s", Name: "merge_cancelled", Algo: module.Incremental},
+ },
+ }
+ clusterManagerClusterUpdatesOutOfMergeWindowRateChartTmpl = module.Chart{
+ ID: "cluster_manager_update_out_of_merge_window_%s",
+ Title: "Cluster manager out of a merge window updates",
+ Units: "updates/s",
+ Fam: "cluster mgr",
+ Ctx: "envoy.cluster_manager_update_out_of_merge_window_rate",
+ Priority: prioClusterManagerClusterUpdatesOufOfMergeWindowsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_manager_update_out_of_merge_window_%s", Name: "out_of_merge_window", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ clusterUpstreamChartsTmpl = module.Charts{
+ clusterUpstreamActiveConnectionsCountChartTmpl.Copy(),
+ clusterUpstreamConnectionsRateChartTmpl.Copy(),
+ clusterUpstreamHTTPConnectionsRateChartTmpl.Copy(),
+ clusterUpstreamDestroyedConnectionsRateChartTmpl.Copy(),
+ clusterUpstreamFailedConnectionsRateChartTmpl.Copy(),
+ clusterUpstreamTimedOutConnectionsRateChartTmpl.Copy(),
+ clusterUpstreamTrafficRateChartTmpl.Copy(),
+ clusterUpstreamBufferedSizeChartTmpl.Copy(),
+
+ clusterUpstreamActiveRequestsCountChartTmpl.Copy(),
+ clusterUpstreamRequestsRateChartTmpl.Copy(),
+ clusterUpstreamFailedRequestsRateChartTmpl.Copy(),
+ clusterUpstreamActivePendingRequestsCountChartTmpl.Copy(),
+ clusterUpstreamPendingRequestsRateChartTmpl.Copy(),
+ clusterUpstreamPendingFailedRequestsRateChartTmpl.Copy(),
+ clusterUpstreamRequestRetriesRateChartTmpl.Copy(),
+ clusterUpstreamRequestSuccessRetriesRateChartTmpl.Copy(),
+ clusterUpstreamRequestRetriesBackoffRateChartTmpl.Copy(),
+
+ clusterMembershipEndpointsCountChartTmpl.Copy(),
+ clusterMembershipChangesRateChartTmpl.Copy(),
+ clusterMembershipUpdatesRateChartTmpl.Copy(),
+ }
+
+ clusterUpstreamActiveConnectionsCountChartTmpl = module.Chart{
+ ID: "cluster_upstream_cx_active_%s",
+ Title: "Cluster upstream current active connections",
+ Units: "connections",
+ Fam: "upstream conns",
+ Ctx: "envoy.cluster_upstream_cx_active_count",
+ Priority: prioClusterUpstreamActiveConnectionsCount,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_cx_active_%s", Name: "active"},
+ },
+ }
+ clusterUpstreamConnectionsRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_cx_total_%s",
+ Title: "Cluster upstream connections",
+ Units: "connections/s",
+ Fam: "upstream conns",
+ Ctx: "envoy.cluster_upstream_cx_rate",
+ Priority: prioClusterUpstreamConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_cx_total_%s", Name: "created", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamHTTPConnectionsRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_cx_http_total_%s",
+ Title: "Cluster upstream connections by HTTP version",
+ Units: "connections/s",
+ Fam: "upstream conns",
+ Ctx: "envoy.cluster_upstream_cx_http_rate",
+ Priority: prioClusterUpstreamHTTPConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_cx_http1_total_%s", Name: "http1", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_cx_http2_total_%s", Name: "http2", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_cx_http3_total_%s", Name: "http3", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamDestroyedConnectionsRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_cx_destroy_%s",
+ Title: "Cluster upstream destroyed connections",
+ Units: "connections/s",
+ Fam: "upstream conns",
+ Ctx: "envoy.cluster_upstream_cx_destroy_rate",
+ Priority: prioClusterUpstreamDestroyedConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_cx_destroy_local_%s", Name: "local", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_cx_destroy_remote_%s", Name: "remote", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamFailedConnectionsRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_cx_connect_fail_%s",
+ Title: "Cluster upstream failed connections",
+ Units: "connections/s",
+ Fam: "upstream conns",
+ Ctx: "envoy.cluster_upstream_cx_connect_fail_rate",
+ Priority: prioClusterUpstreamFailedConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_cx_connect_fail_%s", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamTimedOutConnectionsRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_cx_connect_timeout_%s",
+ Title: "Cluster upstream timed out connections",
+ Units: "connections/s",
+ Fam: "upstream conns",
+ Ctx: "envoy.cluster_upstream_cx_connect_timeout_rate",
+ Priority: prioClusterUpstreamTimedOutConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_cx_connect_timeout_%s", Name: "timeout", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamTrafficRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_cx_bytes_total_%s",
+ Title: "Cluster upstream connection traffic",
+ Units: "bytes/s",
+ Fam: "upstream traffic",
+ Ctx: "envoy.cluster_upstream_cx_bytes_rate",
+ Priority: prioClusterUpstreamTrafficRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_cx_rx_bytes_total_%s", Name: "received", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_cx_tx_bytes_total_%s", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamBufferedSizeChartTmpl = module.Chart{
+ ID: "cluster_upstream_cx_bytes_buffered_%s",
+ Title: "Cluster upstream current connection buffered size",
+ Units: "bytes",
+ Fam: "upstream traffic",
+ Ctx: "envoy.cluster_upstream_cx_bytes_buffered_size",
+ Priority: prioClusterUpstreamBufferedSize,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_cx_rx_bytes_buffered_%s", Name: "received"},
+ {ID: "envoy_cluster_upstream_cx_tx_bytes_buffered_%s", Name: "send"},
+ },
+ }
+
+ clusterUpstreamActiveRequestsCountChartTmpl = module.Chart{
+ ID: "cluster_upstream_rq_active_%s",
+ Title: "Cluster upstream current active requests",
+ Units: "requests",
+ Fam: "upstream requests",
+ Ctx: "envoy.cluster_upstream_rq_active_count",
+ Priority: prioClusterUpstreamActiveRequestsCount,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_rq_active_%s", Name: "active"},
+ },
+ }
+ clusterUpstreamRequestsRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_rq_total_%s",
+ Title: "Cluster upstream requests",
+ Units: "requests/s",
+ Fam: "upstream requests",
+ Ctx: "envoy.cluster_upstream_rq_rate",
+ Priority: prioClusterUpstreamRequestsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_rq_total_%s", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamFailedRequestsRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_rq_failed_total_%s",
+ Title: "Cluster upstream failed requests",
+ Units: "requests/s",
+ Fam: "upstream requests",
+ Ctx: "envoy.cluster_upstream_rq_failed_rate",
+ Priority: prioClusterUpstreamFailedRequestsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_rq_cancelled_%s", Name: "cancelled", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_rq_maintenance_mode_%s", Name: "maintenance_mode", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_rq_timeout_%s", Name: "timeout", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_rq_max_duration_reached_%s", Name: "max_duration_reached", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_rq_per_try_timeout_%s", Name: "per_try_timeout", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_rq_rx_reset_%s", Name: "reset_local", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_rq_tx_reset_%s", Name: "reset_remote", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamActivePendingRequestsCountChartTmpl = module.Chart{
+ ID: "cluster_upstream_rq_pending_active_%s",
+ Title: "Cluster upstream current active pending requests",
+ Units: "requests",
+ Fam: "upstream requests",
+ Ctx: "envoy.cluster_upstream_rq_pending_active_count",
+ Priority: prioClusterUpstreamActivePendingRequestsCount,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_rq_pending_active_%s", Name: "active_pending"},
+ },
+ }
+ clusterUpstreamPendingRequestsRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_rq_pending_total_%s",
+ Title: "Cluster upstream pending requests",
+ Units: "requests/s",
+ Fam: "upstream requests",
+ Ctx: "envoy.cluster_upstream_rq_pending_rate",
+ Priority: prioClusterUpstreamPendingRequestsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_rq_pending_total_%s", Name: "pending", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamPendingFailedRequestsRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_rq_pending_failed_total_%s",
+ Title: "Cluster upstream failed pending requests",
+ Units: "requests/s",
+ Fam: "upstream requests",
+ Ctx: "envoy.cluster_upstream_rq_pending_failed_rate",
+ Priority: prioClusterUpstreamPendingFailedRequestsRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_rq_pending_overflow_%s", Name: "overflow", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_rq_pending_failure_eject_%s", Name: "failure_eject", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamRequestRetriesRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_rq_retry_%s",
+ Title: "Cluster upstream request retries",
+ Units: "retries/s",
+ Fam: "upstream requests",
+ Ctx: "envoy.cluster_upstream_rq_retry_rate",
+ Priority: prioClusterUpstreamRequestRetriesRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_rq_retry_%s", Name: "request", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamRequestSuccessRetriesRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_rq_retry_success_%s",
+ Title: "Cluster upstream request successful retries",
+ Units: "retries/s",
+ Fam: "upstream requests",
+ Ctx: "envoy.cluster_upstream_rq_retry_success_rate",
+ Priority: prioClusterUpstreamRequestSuccessRetriesRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_rq_retry_success_%s", Name: "success", Algo: module.Incremental},
+ },
+ }
+ clusterUpstreamRequestRetriesBackoffRateChartTmpl = module.Chart{
+ ID: "cluster_upstream_rq_retry_backoff_%s",
+ Title: "Cluster upstream request backoff retries",
+ Units: "retries/s",
+ Fam: "upstream requests",
+ Ctx: "envoy.cluster_upstream_rq_retry_backoff_rate",
+ Priority: prioClusterUpstreamRequestBackoffRetriesRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_upstream_rq_retry_backoff_exponential_%s", Name: "exponential", Algo: module.Incremental},
+ {ID: "envoy_cluster_upstream_rq_retry_backoff_ratelimited_%s", Name: "ratelimited", Algo: module.Incremental},
+ },
+ }
+
+ clusterMembershipEndpointsCountChartTmpl = module.Chart{
+ ID: "cluster_membership_endpoints_count_%s",
+ Title: "Cluster membership current endpoints",
+ Units: "endpoints",
+ Fam: "cluster membership",
+ Ctx: "envoy.cluster_membership_endpoints_count",
+ Priority: prioClusterMembershipEndpointsCount,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_membership_healthy_%s", Name: "healthy"},
+ {ID: "envoy_cluster_membership_degraded_%s", Name: "degraded"},
+ {ID: "envoy_cluster_membership_excluded_%s", Name: "excluded"},
+ },
+ }
+ clusterMembershipChangesRateChartTmpl = module.Chart{
+ ID: "cluster_membership_change_%s",
+ Title: "Cluster membership changes",
+ Units: "changes/s",
+ Fam: "cluster membership",
+ Ctx: "envoy.cluster_membership_changes_rate",
+ Priority: prioClusterMembershipChangesRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_membership_change_%s", Name: "membership", Algo: module.Incremental},
+ },
+ }
+ clusterMembershipUpdatesRateChartTmpl = module.Chart{
+ ID: "cluster_membership_updates_%s",
+ Title: "Cluster membership updates",
+ Units: "updates/s",
+ Fam: "cluster membership",
+ Ctx: "envoy.cluster_membership_updates_rate",
+ Priority: prioClusterMembershipUpdatesRate,
+ Dims: module.Dims{
+ {ID: "envoy_cluster_update_success_%s", Name: "success", Algo: module.Incremental},
+ {ID: "envoy_cluster_update_failure_%s", Name: "failure", Algo: module.Incremental},
+ {ID: "envoy_cluster_update_empty_%s", Name: "empty", Algo: module.Incremental},
+ {ID: "envoy_cluster_update_no_rebuild_%s", Name: "no_rebuild", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ listenerManagerChartsTmpl = module.Charts{
+ listenerManagerListenersByStateCountChartTmpl.Copy(),
+ listenerManagerListenerChangesRateChartTmpl.Copy(),
+ listenerManagerListenerObjectEventsRateChartTmpl.Copy(),
+ }
+ listenerManagerListenersByStateCountChartTmpl = module.Chart{
+ ID: "listener_manager_listeners_count_%s",
+ Title: "Listener manager current listeners",
+ Units: "listeners",
+ Fam: "downstream mgr",
+ Ctx: "envoy.listener_manager_listeners_count",
+ Priority: prioListenerManagerListenerCount,
+ Dims: module.Dims{
+ {ID: "envoy_listener_manager_total_listeners_active_%s", Name: "active"},
+ {ID: "envoy_listener_manager_total_listeners_warming_%s", Name: "warming"},
+ {ID: "envoy_listener_manager_total_listeners_draining_%s", Name: "draining"},
+ },
+ }
+ listenerManagerListenerChangesRateChartTmpl = module.Chart{
+ ID: "listener_manager_listener_changes_%s",
+ Title: "Listener manager listener changes",
+ Units: "listeners/s",
+ Fam: "downstream mgr",
+ Ctx: "envoy.listener_manager_listener_changes_rate",
+ Priority: prioListenerManagerListenerChangesRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_manager_listener_added_%s", Name: "added", Algo: module.Incremental},
+ {ID: "envoy_listener_manager_listener_modified_%s", Name: "modified", Algo: module.Incremental},
+ {ID: "envoy_listener_manager_listener_removed_%s", Name: "removed", Algo: module.Incremental},
+ {ID: "envoy_listener_manager_listener_stopped_%s", Name: "stopped", Algo: module.Incremental},
+ },
+ }
+ listenerManagerListenerObjectEventsRateChartTmpl = module.Chart{
+ ID: "listener_manager_listener_object_events_%s",
+ Title: "Listener manager listener object events",
+ Units: "objects/s",
+ Fam: "downstream mgr",
+ Ctx: "envoy.listener_manager_listener_object_events_rate",
+ Priority: prioListenerManagerListenerObjectEventsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_manager_listener_create_success_%s", Name: "create_success", Algo: module.Incremental},
+ {ID: "envoy_listener_manager_listener_create_failure_%s", Name: "create_failure", Algo: module.Incremental},
+ {ID: "envoy_listener_manager_listener_in_place_updated_%s", Name: "in_place_updated", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ listenerAdminDownstreamChartsTmpl = module.Charts{
+ listenerAdminDownstreamActiveConnectionsCountChartTmpl.Copy(),
+ listenerAdminDownstreamConnectionsRateChartTmpl.Copy(),
+ listenerAdminDownstreamDestroyedConnectionsRateChartTmpl.Copy(),
+ listenerAdminDownstreamTimedOutConnectionsRateChartTmpl.Copy(),
+ listenerAdminDownstreamRejectedConnectionsRateChartTmpl.Copy(),
+ listenerAdminDownstreamFilterClosedByRemoteConnectionsRateChartTmpl.Copy(),
+ listenerAdminDownstreamFilterReadErrorsRateChartTmpl.Copy(),
+
+ listenerAdminDownstreamActiveSocketsCountChartTmpl.Copy(),
+ listenerAdminDownstreamTimedOutSocketsRateChartTmpl.Copy(),
+ }
+
+ listenerAdminDownstreamActiveConnectionsCountChartTmpl = module.Chart{
+ ID: "listener_admin_downstream_cx_active_%s",
+ Title: "Listener admin downstream current active connections",
+ Units: "connections",
+ Fam: "downstream adm conns",
+ Ctx: "envoy.listener_admin_downstream_cx_active_count",
+ Priority: prioListenerAdminDownstreamActiveConnectionsCount,
+ Dims: module.Dims{
+ {ID: "envoy_listener_admin_downstream_cx_active_%s", Name: "active"},
+ },
+ }
+ listenerAdminDownstreamConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_admin_downstream_cx_total_%s",
+ Title: "Listener admin downstream connections",
+ Units: "connections/s",
+ Fam: "downstream adm conns",
+ Ctx: "envoy.listener_admin_downstream_cx_rate",
+ Priority: prioListenerAdminDownstreamConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_admin_downstream_cx_total_%s", Name: "created", Algo: module.Incremental},
+ },
+ }
+ listenerAdminDownstreamDestroyedConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_admin_downstream_cx_destroy_%s",
+ Title: "Listener admin downstream destroyed connections",
+ Units: "connections/s",
+ Fam: "downstream adm conns",
+ Ctx: "envoy.listener_admin_downstream_cx_destroy_rate",
+ Priority: prioListenerAdminDownstreamDestroyedConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_admin_downstream_cx_destroy_%s", Name: "destroyed", Algo: module.Incremental},
+ },
+ }
+ listenerAdminDownstreamTimedOutConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_admin_downstream_cx_transport_socket_connect_timeout_%s",
+ Title: "Listener admin downstream timed out connections",
+ Units: "connections/s",
+ Fam: "downstream adm conns",
+ Ctx: "envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate",
+ Priority: prioListenerAdminDownstreamTimedOutConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_admin_downstream_cx_transport_socket_connect_timeout_%s", Name: "timeout", Algo: module.Incremental},
+ },
+ }
+ listenerAdminDownstreamRejectedConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_admin_downstream_cx_rejected_%s",
+ Title: "Listener admin downstream rejected connections",
+ Units: "connections/s",
+ Fam: "downstream adm conns",
+ Ctx: "envoy.listener_admin_downstream_cx_rejected_rate",
+ Priority: prioListenerAdminDownstreamRejectedConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_admin_downstream_cx_overflow_%s", Name: "overflow", Algo: module.Incremental},
+ {ID: "envoy_listener_admin_downstream_cx_overload_reject_%s", Name: "overload", Algo: module.Incremental},
+ {ID: "envoy_listener_admin_downstream_global_cx_overflow_%s", Name: "global_overflow", Algo: module.Incremental},
+ },
+ }
+ listenerAdminDownstreamFilterClosedByRemoteConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_admin_downstream_listener_filter_remote_close_%s",
+ Title: "Listener admin downstream connections closed by remote when peek data for listener filters",
+ Units: "connections/s",
+ Fam: "downstream adm conns",
+ Ctx: "envoy.listener_admin_downstream_listener_filter_remote_close_rate",
+ Priority: prioListenerAdminDownstreamFilterClosedByRemoteConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_admin_downstream_listener_filter_remote_close_%s", Name: "closed", Algo: module.Incremental},
+ },
+ }
+ listenerAdminDownstreamFilterReadErrorsRateChartTmpl = module.Chart{
+ ID: "listener_admin_downstream_listener_filter_error_%s",
+ Title: "Listener admin downstream read errors when peeking data for listener filters",
+ Units: "errors/s",
+ Fam: "downstream adm conns",
+ Ctx: "envoy.listener_admin_downstream_listener_filter_error_rate",
+ Priority: prioListenerAdminDownstreamFilterReadErrorsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_admin_downstream_listener_filter_error_%s", Name: "read", Algo: module.Incremental},
+ },
+ }
+
+ listenerAdminDownstreamActiveSocketsCountChartTmpl = module.Chart{
+ ID: "listener_admin_downstream_pre_cx_active_%s",
+ Title: "Listener admin downstream current active sockets",
+ Units: "sockets",
+ Fam: "downstream adm sockets",
+ Ctx: "envoy.listener_admin_downstream_pre_cx_active_count",
+ Priority: prioListenerAdminDownstreamActiveSocketsCount,
+ Dims: module.Dims{
+ {ID: "envoy_listener_admin_downstream_pre_cx_active_%s", Name: "active"},
+ },
+ }
+ listenerAdminDownstreamTimedOutSocketsRateChartTmpl = module.Chart{
+ ID: "listener_admin_downstream_pre_cx_timeout_%s",
+ Title: "Listener admin downstream timed out sockets",
+ Units: "sockets/s",
+ Fam: "downstream adm sockets",
+ Ctx: "envoy.listener_admin_downstream_pre_cx_timeout_rate",
+ Priority: prioListenerAdminDownstreamTimedOutSocketsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_admin_downstream_pre_cx_timeout_%s", Name: "timeout", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ listenerDownstreamChartsTmpl = module.Charts{
+ listenerDownstreamActiveConnectionsCountChartTmpl.Copy(),
+ listenerDownstreamConnectionsRateChartTmpl.Copy(),
+ listenerDownstreamDestroyedConnectionsRateChartTmpl.Copy(),
+ listenerDownstreamTimedOutConnectionsRateChartTmpl.Copy(),
+ listenerDownstreamRejectedConnectionsRateChartTmpl.Copy(),
+ listenerDownstreamFilterClosedByRemoteConnectionsRateChartTmpl.Copy(),
+ listenerDownstreamFilterReadErrorsRateChartTmpl.Copy(),
+
+ listenerDownstreamActiveSocketsCountChartTmpl.Copy(),
+ listenerDownstreamTimedOutSocketsRateChartTmpl.Copy(),
+ }
+
+ listenerDownstreamActiveConnectionsCountChartTmpl = module.Chart{
+ ID: "listener_downstream_cx_active_%s",
+ Title: "Listener downstream current active connections",
+ Units: "connections",
+ Fam: "downstream conns",
+ Ctx: "envoy.listener_downstream_cx_active_count",
+ Priority: prioListenerDownstreamActiveConnectionsCount,
+ Dims: module.Dims{
+ {ID: "envoy_listener_downstream_cx_active_%s", Name: "active"},
+ },
+ }
+ listenerDownstreamConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_downstream_cx_total_%s",
+ Title: "Listener downstream connections",
+ Units: "connections/s",
+ Fam: "downstream conns",
+ Ctx: "envoy.listener_downstream_cx_rate",
+ Priority: prioListenerDownstreamConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_downstream_cx_total_%s", Name: "created", Algo: module.Incremental},
+ },
+ }
+ listenerDownstreamDestroyedConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_downstream_cx_destroy_%s",
+ Title: "Listener downstream destroyed connections",
+ Units: "connections/s",
+ Fam: "downstream conns",
+ Ctx: "envoy.listener_downstream_cx_destroy_rate",
+ Priority: prioListenerDownstreamDestroyedConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_downstream_cx_destroy_%s", Name: "destroyed", Algo: module.Incremental},
+ },
+ }
+ listenerDownstreamTimedOutConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_downstream_cx_transport_socket_connect_timeout_%s",
+ Title: "Listener downstream timed out connections",
+ Units: "connections/s",
+ Fam: "downstream conns",
+ Ctx: "envoy.listener_downstream_cx_transport_socket_connect_timeout_rate",
+ Priority: prioListenerDownstreamTimedOutConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_downstream_cx_transport_socket_connect_timeout_%s", Name: "timeout", Algo: module.Incremental},
+ },
+ }
+ listenerDownstreamRejectedConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_downstream_cx_rejected_%s",
+ Title: "Listener downstream rejected connections",
+ Units: "connections/s",
+ Fam: "downstream conns",
+ Ctx: "envoy.listener_downstream_cx_rejected_rate",
+ Priority: prioListenerDownstreamRejectedConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_downstream_cx_overflow_%s", Name: "overflow", Algo: module.Incremental},
+ {ID: "envoy_listener_downstream_cx_overload_reject_%s", Name: "overload", Algo: module.Incremental},
+ {ID: "envoy_listener_downstream_global_cx_overflow_%s", Name: "global_overflow", Algo: module.Incremental},
+ },
+ }
+ listenerDownstreamFilterClosedByRemoteConnectionsRateChartTmpl = module.Chart{
+ ID: "listener_downstream_listener_filter_remote_close_%s",
+ Title: "Listener downstream connections closed by remote when peek data for listener filters",
+ Units: "connections/s",
+ Fam: "downstream conns",
+ Ctx: "envoy.listener_downstream_listener_filter_remote_close_rate",
+ Priority: prioListenerDownstreamFilterClosedByRemoteConnectionsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_downstream_listener_filter_remote_close_%s", Name: "closed", Algo: module.Incremental},
+ },
+ }
+ listenerDownstreamFilterReadErrorsRateChartTmpl = module.Chart{
+ ID: "listener_downstream_listener_filter_error_%s",
+ Title: "Listener downstream read errors when peeking data for listener filters",
+ Units: "errors/s",
+ Fam: "downstream conns",
+ Ctx: "envoy.listener_downstream_listener_filter_error_rate",
+ Priority: prioListenerDownstreamFilterReadErrorsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_downstream_listener_filter_error_%s", Name: "read", Algo: module.Incremental},
+ },
+ }
+
+ listenerDownstreamActiveSocketsCountChartTmpl = module.Chart{
+ ID: "listener_downstream_pre_cx_active_%s",
+ Title: "Listener downstream current active sockets",
+ Units: "sockets",
+ Fam: "downstream sockets",
+ Ctx: "envoy.listener_downstream_pre_cx_active_count",
+ Priority: prioListenerDownstreamActiveSocketsCount,
+ Dims: module.Dims{
+ {ID: "envoy_listener_downstream_pre_cx_active_%s", Name: "active"},
+ },
+ }
+ listenerDownstreamTimedOutSocketsRateChartTmpl = module.Chart{
+ ID: "listener_downstream_pre_cx_timeout_%s",
+ Title: "Listener downstream timed out sockets",
+ Units: "sockets/s",
+ Fam: "downstream sockets",
+ Ctx: "envoy.listener_downstream_pre_cx_timeout_rate",
+ Priority: prioListenerDownstreamTimedOutSocketsRate,
+ Dims: module.Dims{
+ {ID: "envoy_listener_downstream_pre_cx_timeout_%s", Name: "timeout", Algo: module.Incremental},
+ },
+ }
+)
+
+func (e *Envoy) addServerCharts(id string, labels labels.Labels) {
+ e.addCharts(serverChartsTmpl.Copy(), id, labels)
+}
+
+func (e *Envoy) addClusterManagerCharts(id string, labels labels.Labels) {
+ e.addCharts(clusterManagerChartsTmpl.Copy(), id, labels)
+}
+
+func (e *Envoy) addClusterUpstreamCharts(id string, labels labels.Labels) {
+ e.addCharts(clusterUpstreamChartsTmpl.Copy(), id, labels)
+}
+
+func (e *Envoy) addListenerManagerCharts(id string, labels labels.Labels) {
+ e.addCharts(listenerManagerChartsTmpl.Copy(), id, labels)
+}
+
+func (e *Envoy) addListenerAdminDownstreamCharts(id string, labels labels.Labels) {
+ e.addCharts(listenerAdminDownstreamChartsTmpl.Copy(), id, labels)
+}
+
+func (e *Envoy) addListenerDownstreamCharts(id string, labels labels.Labels) {
+ e.addCharts(listenerDownstreamChartsTmpl.Copy(), id, labels)
+}
+
+func (e *Envoy) addCharts(charts *module.Charts, id string, labels labels.Labels) {
+ charts = charts.Copy()
+
+ for _, chart := range *charts {
+ if id == "" {
+ chart.ID = strings.Replace(chart.ID, "_%s", "", 1)
+ for _, dim := range chart.Dims {
+ dim.ID = strings.Replace(dim.ID, "_%s", "", 1)
+ }
+ } else {
+ chart.ID = fmt.Sprintf(chart.ID, dotReplacer.Replace(id))
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, id)
+ }
+ }
+
+ for _, lbl := range labels {
+ chart.Labels = append(chart.Labels, module.Label{Key: lbl.Name, Value: lbl.Value})
+ }
+ }
+
+ if err := e.Charts().Add(*charts...); err != nil {
+ e.Warning(err)
+ }
+}
+
+func (e *Envoy) removeCharts(id string) {
+ if id == "" {
+ return
+ }
+
+ id = dotReplacer.Replace(id)
+ for _, chart := range *e.Charts() {
+ if strings.HasSuffix(chart.ID, id) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+var dotReplacer = strings.NewReplacer(".", "_")
diff --git a/src/go/plugin/go.d/modules/envoy/collect.go b/src/go/plugin/go.d/modules/envoy/collect.go
new file mode 100644
index 000000000..922e466d3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/collect.go
@@ -0,0 +1,423 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package envoy
+
+import (
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+// Server stats: https://www.envoyproxy.io/docs/envoy/latest/configuration/observability/statistics#
+// Server state: https://www.envoyproxy.io/docs/envoy/latest/api-v3/admin/v3/server_info.proto#enum-admin-v3-serverinfo-state
+// Listener stats: https://www.envoyproxy.io/docs/envoy/latest/configuration/listeners/stats
+
+func (e *Envoy) collect() (map[string]int64, error) {
+ mfs, err := e.prom.Scrape()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ e.collectServerStats(mx, mfs)
+ e.collectClusterManagerStats(mx, mfs)
+ e.collectClusterUpstreamStats(mx, mfs)
+ e.collectListenerManagerStats(mx, mfs)
+ e.collectListenerAdminDownstreamStats(mx, mfs)
+ e.collectListenerDownstreamStats(mx, mfs)
+
+ return mx, nil
+}
+
+func (e *Envoy) collectServerStats(mx map[string]int64, mfs prometheus.MetricFamilies) {
+ seen := make(map[string]bool)
+ for _, n := range []string{
+ "envoy_server_uptime",
+ "envoy_server_memory_allocated",
+ "envoy_server_memory_heap_size",
+ "envoy_server_memory_physical_size",
+ "envoy_server_parent_connections",
+ "envoy_server_total_connections",
+ } {
+ e.collectGauge(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.servers[id] {
+ e.servers[id] = true
+ e.addServerCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Gauge().Value())
+ })
+ }
+
+ e.collectGauge(mfs, "envoy_server_state", func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ for _, v := range []string{"live", "draining", "pre_initializing", "initializing"} {
+ mx[join(name, v, id)] = 0
+ }
+
+ switch m.Gauge().Value() {
+ case 0:
+ mx[join(name, "live", id)] = 1
+ case 1:
+ mx[join(name, "draining", id)] = 1
+ case 2:
+ mx[join(name, "pre_initializing", id)] = 1
+ case 3:
+ mx[join(name, "initializing", id)] = 1
+ }
+ })
+
+ for id := range e.servers {
+ if id != "" && !seen[id] {
+ delete(e.servers, id)
+ e.removeCharts(id)
+ }
+ }
+}
+
+func (e *Envoy) collectClusterManagerStats(mx map[string]int64, mfs prometheus.MetricFamilies) {
+ seen := make(map[string]bool)
+ for _, n := range []string{
+ "envoy_cluster_manager_cluster_added",
+ "envoy_cluster_manager_cluster_modified",
+ "envoy_cluster_manager_cluster_removed",
+ "envoy_cluster_manager_cluster_updated",
+ "envoy_cluster_manager_cluster_updated_via_merge",
+ "envoy_cluster_manager_update_merge_cancelled",
+ "envoy_cluster_manager_update_out_of_merge_window",
+ } {
+ e.collectCounter(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.clusterMgrs[id] {
+ e.clusterMgrs[id] = true
+ e.addClusterManagerCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Counter().Value())
+ })
+ }
+
+ for _, n := range []string{
+ "envoy_cluster_manager_active_clusters",
+ "envoy_cluster_manager_warming_clusters",
+ } {
+ e.collectGauge(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ mx[join(name, id)] += int64(m.Gauge().Value())
+ })
+ }
+
+ for id := range e.clusterMgrs {
+ if id != "" && !seen[id] {
+ delete(e.clusterMgrs, id)
+ e.removeCharts(id)
+ }
+ }
+}
+
+func (e *Envoy) collectListenerAdminDownstreamStats(mx map[string]int64, mfs prometheus.MetricFamilies) {
+ seen := make(map[string]bool)
+ for _, n := range []string{
+ "envoy_listener_admin_downstream_cx_total",
+ "envoy_listener_admin_downstream_cx_destroy",
+ "envoy_listener_admin_downstream_cx_transport_socket_connect_timeout",
+ "envoy_listener_admin_downstream_cx_overflow",
+ "envoy_listener_admin_downstream_cx_overload_reject",
+ "envoy_listener_admin_downstream_global_cx_overflow",
+ "envoy_listener_admin_downstream_pre_cx_timeout",
+ "envoy_listener_admin_downstream_listener_filter_remote_close",
+ "envoy_listener_admin_downstream_listener_filter_error",
+ } {
+ e.collectCounter(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.listenerAdminDownstream[id] {
+ e.listenerAdminDownstream[id] = true
+ e.addListenerAdminDownstreamCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Counter().Value())
+ })
+ }
+ for _, n := range []string{
+ "envoy_listener_admin_downstream_cx_active",
+ "envoy_listener_admin_downstream_pre_cx_active",
+ } {
+ e.collectGauge(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.listenerAdminDownstream[id] {
+ e.listenerAdminDownstream[id] = true
+ e.addListenerAdminDownstreamCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Gauge().Value())
+ })
+ }
+
+ for id := range e.listenerAdminDownstream {
+ if id != "" && !seen[id] {
+ delete(e.listenerAdminDownstream, id)
+ e.removeCharts(id)
+ }
+ }
+}
+
+func (e *Envoy) collectListenerDownstreamStats(mx map[string]int64, mfs prometheus.MetricFamilies) {
+ seen := make(map[string]bool)
+ for _, n := range []string{
+ "envoy_listener_downstream_cx_total",
+ "envoy_listener_downstream_cx_destroy",
+ "envoy_listener_downstream_cx_transport_socket_connect_timeout",
+ "envoy_listener_downstream_cx_overflow",
+ "envoy_listener_downstream_cx_overload_reject",
+ "envoy_listener_downstream_global_cx_overflow",
+ "envoy_listener_downstream_pre_cx_timeout",
+ "envoy_listener_downstream_listener_filter_remote_close",
+ "envoy_listener_downstream_listener_filter_error",
+ } {
+ e.collectCounter(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.listenerDownstream[id] {
+ e.listenerDownstream[id] = true
+ e.addListenerDownstreamCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Counter().Value())
+ })
+ }
+ for _, n := range []string{
+ "envoy_listener_downstream_cx_active",
+ "envoy_listener_downstream_pre_cx_active",
+ } {
+ e.collectGauge(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.listenerDownstream[id] {
+ e.listenerDownstream[id] = true
+ e.addListenerDownstreamCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Gauge().Value())
+ })
+ }
+
+ for id := range e.listenerDownstream {
+ if id != "" && !seen[id] {
+ delete(e.listenerDownstream, id)
+ e.removeCharts(id)
+ }
+ }
+}
+
+func (e *Envoy) collectClusterUpstreamStats(mx map[string]int64, mfs prometheus.MetricFamilies) {
+ seen := make(map[string]bool)
+ for _, n := range []string{
+ "envoy_cluster_upstream_cx_total",
+ "envoy_cluster_upstream_cx_http1_total",
+ "envoy_cluster_upstream_cx_http2_total",
+ "envoy_cluster_upstream_cx_http3_total",
+ "envoy_cluster_upstream_cx_http3_total",
+ "envoy_cluster_upstream_cx_connect_fail",
+ "envoy_cluster_upstream_cx_connect_timeout",
+ "envoy_cluster_upstream_cx_idle_timeout",
+ "envoy_cluster_upstream_cx_max_duration_reached",
+ "envoy_cluster_upstream_cx_connect_attempts_exceeded",
+ "envoy_cluster_upstream_cx_overflow",
+ "envoy_cluster_upstream_cx_destroy",
+ "envoy_cluster_upstream_cx_destroy_local",
+ "envoy_cluster_upstream_cx_destroy_remote",
+ "envoy_cluster_upstream_cx_rx_bytes_total",
+ "envoy_cluster_upstream_cx_tx_bytes_total",
+ "envoy_cluster_upstream_rq_total",
+ "envoy_cluster_upstream_rq_pending_total",
+ "envoy_cluster_upstream_rq_pending_overflow",
+ "envoy_cluster_upstream_rq_pending_failure_eject",
+ "envoy_cluster_upstream_rq_cancelled",
+ "envoy_cluster_upstream_rq_maintenance_mode",
+ "envoy_cluster_upstream_rq_timeout",
+ "envoy_cluster_upstream_rq_max_duration_reached",
+ "envoy_cluster_upstream_rq_per_try_timeout",
+ "envoy_cluster_upstream_rq_rx_reset",
+ "envoy_cluster_upstream_rq_tx_reset",
+ "envoy_cluster_upstream_rq_retry",
+ "envoy_cluster_upstream_rq_retry_backoff_exponential",
+ "envoy_cluster_upstream_rq_retry_backoff_ratelimited",
+ "envoy_cluster_upstream_rq_retry_success",
+ "envoy_cluster_membership_change",
+ "envoy_cluster_update_success",
+ "envoy_cluster_update_failure",
+ "envoy_cluster_update_empty",
+ "envoy_cluster_update_no_rebuild",
+ } {
+ e.collectCounter(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.clusterUpstream[id] {
+ e.clusterUpstream[id] = true
+ e.addClusterUpstreamCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Counter().Value())
+ })
+ }
+
+ for _, n := range []string{
+ "envoy_cluster_upstream_cx_active",
+ "envoy_cluster_upstream_cx_rx_bytes_buffered",
+ "envoy_cluster_upstream_cx_tx_bytes_buffered",
+ "envoy_cluster_upstream_rq_active",
+ "envoy_cluster_upstream_rq_pending_active",
+ "envoy_cluster_membership_healthy",
+ "envoy_cluster_membership_degraded",
+ "envoy_cluster_membership_excluded",
+ } {
+ e.collectGauge(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.clusterUpstream[id] {
+ e.clusterUpstream[id] = true
+ e.addClusterUpstreamCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Gauge().Value())
+ })
+ }
+
+ for id := range e.clusterUpstream {
+ if id != "" && !seen[id] {
+ delete(e.clusterUpstream, id)
+ e.removeCharts(id)
+ }
+ }
+}
+
+func (e *Envoy) collectListenerManagerStats(mx map[string]int64, mfs prometheus.MetricFamilies) {
+ seen := make(map[string]bool)
+ for _, n := range []string{
+ "envoy_listener_manager_listener_added",
+ "envoy_listener_manager_listener_modified",
+ "envoy_listener_manager_listener_removed",
+ "envoy_listener_manager_listener_stopped",
+ "envoy_listener_manager_listener_create_success",
+ "envoy_listener_manager_listener_create_failure",
+ "envoy_listener_manager_listener_in_place_updated",
+ } {
+ e.collectCounter(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.listenerMgrs[id] {
+ e.listenerMgrs[id] = true
+ e.addListenerManagerCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Counter().Value())
+ })
+ }
+
+ for _, n := range []string{
+ "envoy_listener_manager_total_listeners_warming",
+ "envoy_listener_manager_total_listeners_active",
+ "envoy_listener_manager_total_listeners_draining",
+ } {
+ e.collectGauge(mfs, n, func(name string, m prometheus.Metric) {
+ id := e.joinLabels(m.Labels())
+ seen[id] = true
+
+ if !e.listenerMgrs[id] {
+ e.listenerMgrs[id] = true
+ e.addListenerManagerCharts(id, m.Labels())
+ }
+
+ mx[join(name, id)] += int64(m.Gauge().Value())
+ })
+ }
+
+ for id := range e.listenerMgrs {
+ if id != "" && !seen[id] {
+ delete(e.listenerMgrs, id)
+ e.removeCharts(id)
+ }
+ }
+}
+
+func (e *Envoy) collectGauge(mfs prometheus.MetricFamilies, metric string, process func(name string, m prometheus.Metric)) {
+ if mf := mfs.GetGauge(metric); mf != nil {
+ for _, m := range mf.Metrics() {
+ process(mf.Name(), m)
+ }
+ }
+}
+
+func (e *Envoy) collectCounter(mfs prometheus.MetricFamilies, metric string, process func(name string, m prometheus.Metric)) {
+ if mf := mfs.GetCounter(metric); mf != nil {
+ for _, m := range mf.Metrics() {
+ process(mf.Name(), m)
+ }
+ }
+}
+
+func (e *Envoy) joinLabels(labels labels.Labels) string {
+ var buf strings.Builder
+ first := true
+ for _, lbl := range labels {
+ v := lbl.Value
+ if v == "" {
+ continue
+ }
+ if strings.IndexByte(v, ' ') != -1 {
+ v = spaceReplacer.Replace(v)
+ }
+ if strings.IndexByte(v, '\\') != -1 {
+ if v = decodeLabelValue(v); strings.IndexByte(v, '\\') != -1 {
+ v = backslashReplacer.Replace(v)
+ }
+ }
+ if first {
+ buf.WriteString(v)
+ first = false
+ } else {
+ buf.WriteString("_" + v)
+ }
+ }
+ return buf.String()
+}
+
+var (
+ spaceReplacer = strings.NewReplacer(" ", "_")
+ backslashReplacer = strings.NewReplacer(`\`, "_")
+)
+
+func decodeLabelValue(value string) string {
+ v, err := strconv.Unquote("\"" + value + "\"")
+ if err != nil {
+ return value
+ }
+ return v
+}
+
+func join(name string, elems ...string) string {
+ for _, v := range elems {
+ if v != "" {
+ name += "_" + v
+ }
+ }
+ return name
+}
diff --git a/src/go/plugin/go.d/modules/envoy/config_schema.json b/src/go/plugin/go.d/modules/envoy/config_schema.json
new file mode 100644
index 000000000..7073337dd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Envoy collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Envoy [Prometheus endpoint](https://www.envoyproxy.io/docs/envoy/latest/start/quick-start/admin#admin).",
+ "type": "string",
+ "default": "http://127.0.0.1:9091/stats/prometheus",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/envoy/envoy.go b/src/go/plugin/go.d/modules/envoy/envoy.go
new file mode 100644
index 000000000..194acf17f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/envoy.go
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package envoy
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("envoy", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Envoy {
+ return &Envoy{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:9091/stats/prometheus",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+
+ charts: &module.Charts{},
+
+ servers: make(map[string]bool),
+ clusterMgrs: make(map[string]bool),
+ clusterUpstream: make(map[string]bool),
+ listenerMgrs: make(map[string]bool),
+ listenerAdminDownstream: make(map[string]bool),
+ listenerDownstream: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Envoy struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prom prometheus.Prometheus
+
+ servers map[string]bool
+ clusterMgrs map[string]bool
+ clusterUpstream map[string]bool
+ listenerMgrs map[string]bool
+ listenerAdminDownstream map[string]bool
+ listenerDownstream map[string]bool
+}
+
+func (e *Envoy) Configuration() any {
+ return e.Config
+}
+
+func (e *Envoy) Init() error {
+ if err := e.validateConfig(); err != nil {
+ e.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prom, err := e.initPrometheusClient()
+ if err != nil {
+ e.Errorf("init Prometheus client: %v", err)
+ return err
+ }
+ e.prom = prom
+
+ return nil
+}
+
+func (e *Envoy) Check() error {
+ mx, err := e.collect()
+ if err != nil {
+ e.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (e *Envoy) Charts() *module.Charts {
+ return e.charts
+}
+
+func (e *Envoy) Collect() map[string]int64 {
+ mx, err := e.collect()
+ if err != nil {
+ e.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (e *Envoy) Cleanup() {
+ if e.prom == nil || e.prom.HTTPClient() == nil {
+ return
+ }
+
+ e.prom.HTTPClient().CloseIdleConnections()
+}
diff --git a/src/go/plugin/go.d/modules/envoy/envoy_test.go b/src/go/plugin/go.d/modules/envoy/envoy_test.go
new file mode 100644
index 000000000..cbda31f9a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/envoy_test.go
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package envoy
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataEnvoyConsulDataplane, _ = os.ReadFile("testdata/consul-dataplane.txt")
+ dataEnvoy, _ = os.ReadFile("testdata/envoy.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataEnvoyConsulDataplane": dataEnvoyConsulDataplane,
+ "dataEnvoy": dataEnvoy,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestEnvoy_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Envoy{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestEnvoy_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ envoy := New()
+ envoy.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, envoy.Init())
+ } else {
+ assert.NoError(t, envoy.Init())
+ }
+ })
+ }
+
+}
+
+func TestEnvoy_Cleanup(t *testing.T) {
+ envoy := New()
+ assert.NotPanics(t, envoy.Cleanup)
+
+ require.NoError(t, envoy.Init())
+ assert.NotPanics(t, envoy.Cleanup)
+}
+
+func TestEnvoy_Charts(t *testing.T) {
+ envoy, cleanup := prepareCaseEnvoyStats()
+ defer cleanup()
+
+ require.Empty(t, *envoy.Charts())
+
+ require.NoError(t, envoy.Init())
+ _ = envoy.Collect()
+ require.NotEmpty(t, *envoy.Charts())
+}
+
+func TestEnvoy_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (envoy *Envoy, cleanup func())
+ wantFail bool
+ }{
+ "case envoy consul dataplane": {
+ wantFail: false,
+ prepare: prepareCaseEnvoyConsulDataplaneStats,
+ },
+ "case envoy": {
+ wantFail: false,
+ prepare: prepareCaseEnvoyStats,
+ },
+ "case invalid data response": {
+ wantFail: true,
+ prepare: prepareCaseInvalidDataResponse,
+ },
+ "case 404": {
+ wantFail: true,
+ prepare: prepareCase404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ envoy, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, envoy.Init())
+
+ if test.wantFail {
+ assert.Error(t, envoy.Check())
+ } else {
+ assert.NoError(t, envoy.Check())
+ }
+ })
+ }
+}
+
+func TestEnvoy_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (envoy *Envoy, cleanup func())
+ wantMetrics map[string]int64
+ }{
+ "case envoy consul dataplane": {
+ prepare: prepareCaseEnvoyConsulDataplaneStats,
+ wantMetrics: map[string]int64{
+ "envoy_cluster_manager_active_clusters_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 4,
+ "envoy_cluster_manager_cluster_added_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 4,
+ "envoy_cluster_manager_cluster_modified_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_cluster_manager_cluster_removed_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_cluster_manager_cluster_updated_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 2,
+ "envoy_cluster_manager_cluster_updated_via_merge_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_cluster_manager_update_merge_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_cluster_manager_update_out_of_merge_window_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_cluster_manager_warming_clusters_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_cluster_membership_change_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1,
+ "envoy_cluster_membership_change_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 1,
+ "envoy_cluster_membership_change_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 2,
+ "envoy_cluster_membership_change_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 1,
+ "envoy_cluster_membership_degraded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_membership_degraded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_membership_degraded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_membership_degraded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_membership_excluded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_membership_excluded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_membership_excluded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_membership_excluded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_membership_healthy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1,
+ "envoy_cluster_membership_healthy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 1,
+ "envoy_cluster_membership_healthy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_membership_healthy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 1,
+ "envoy_cluster_update_empty_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_update_empty_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_update_empty_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_update_empty_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_update_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_update_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_update_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_update_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_update_no_rebuild_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_update_no_rebuild_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_update_no_rebuild_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_update_no_rebuild_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_update_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_update_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_update_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_update_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1,
+ "envoy_cluster_upstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 2,
+ "envoy_cluster_upstream_cx_connect_attempts_exceeded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_attempts_exceeded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_attempts_exceeded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_attempts_exceeded_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_fail_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_fail_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_fail_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_fail_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 6507,
+ "envoy_cluster_upstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1,
+ "envoy_cluster_upstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_destroy_local_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_destroy_local_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 6507,
+ "envoy_cluster_upstream_cx_destroy_local_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_destroy_local_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_destroy_remote_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_destroy_remote_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_destroy_remote_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1,
+ "envoy_cluster_upstream_cx_destroy_remote_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_http1_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_http1_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_http1_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_http1_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 2,
+ "envoy_cluster_upstream_cx_http2_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1,
+ "envoy_cluster_upstream_cx_http2_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_http2_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_http2_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_http3_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_http3_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_http3_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_http3_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_idle_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_idle_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_idle_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_idle_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_rx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 17,
+ "envoy_cluster_upstream_cx_rx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_rx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_rx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 102618,
+ "envoy_cluster_upstream_cx_rx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 3853,
+ "envoy_cluster_upstream_cx_rx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_rx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 8645645,
+ "envoy_cluster_upstream_cx_rx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 724779,
+ "envoy_cluster_upstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1,
+ "envoy_cluster_upstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 6507,
+ "envoy_cluster_upstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1,
+ "envoy_cluster_upstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 2,
+ "envoy_cluster_upstream_cx_tx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_cx_tx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_tx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_cx_tx_bytes_buffered_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_cx_tx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 114982,
+ "envoy_cluster_upstream_cx_tx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_cx_tx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1240,
+ "envoy_cluster_upstream_cx_tx_bytes_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 732,
+ "envoy_cluster_upstream_rq_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1,
+ "envoy_cluster_upstream_rq_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 1,
+ "envoy_cluster_upstream_rq_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 4749,
+ "envoy_cluster_upstream_rq_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_cancelled_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_maintenance_mode_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_maintenance_mode_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_maintenance_mode_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_maintenance_mode_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_max_duration_reached_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_failure_eject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_failure_eject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_failure_eject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_failure_eject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_pending_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1,
+ "envoy_cluster_upstream_rq_pending_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 6507,
+ "envoy_cluster_upstream_rq_pending_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1,
+ "envoy_cluster_upstream_rq_pending_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 2,
+ "envoy_cluster_upstream_rq_per_try_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_per_try_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_per_try_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_per_try_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_exponential_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_exponential_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_exponential_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_exponential_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_ratelimited_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_ratelimited_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_ratelimited_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_ratelimited_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_retry_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_rx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_rx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_rx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_rx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_cluster_upstream_rq_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 1,
+ "envoy_cluster_upstream_rq_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 1758,
+ "envoy_cluster_upstream_rq_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 1,
+ "envoy_cluster_upstream_rq_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 3,
+ "envoy_cluster_upstream_rq_tx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_consul-dataplane_mynginx": 0,
+ "envoy_cluster_upstream_rq_tx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_local_app_mynginx": 0,
+ "envoy_cluster_upstream_rq_tx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_original-destination_mynginx": 0,
+ "envoy_cluster_upstream_rq_tx_reset_consul-sandbox-cluster-0159c9d3_default_default_mynginx_prometheus_backend_mynginx": 0,
+ "envoy_listener_admin_downstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 1,
+ "envoy_listener_admin_downstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 2,
+ "envoy_listener_admin_downstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_admin_downstream_cx_overload_reject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_admin_downstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 3,
+ "envoy_listener_admin_downstream_cx_transport_socket_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_admin_downstream_global_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_admin_downstream_listener_filter_error_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_admin_downstream_listener_filter_remote_close_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_admin_downstream_pre_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_admin_downstream_pre_cx_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_downstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 1,
+ "envoy_listener_downstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0,
+ "envoy_listener_downstream_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0,
+ "envoy_listener_downstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 3,
+ "envoy_listener_downstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 6507,
+ "envoy_listener_downstream_cx_destroy_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 1,
+ "envoy_listener_downstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0,
+ "envoy_listener_downstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0,
+ "envoy_listener_downstream_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0,
+ "envoy_listener_downstream_cx_overload_reject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0,
+ "envoy_listener_downstream_cx_overload_reject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0,
+ "envoy_listener_downstream_cx_overload_reject_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0,
+ "envoy_listener_downstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 4,
+ "envoy_listener_downstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 6507,
+ "envoy_listener_downstream_cx_total_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 1,
+ "envoy_listener_downstream_cx_transport_socket_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0,
+ "envoy_listener_downstream_cx_transport_socket_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0,
+ "envoy_listener_downstream_cx_transport_socket_connect_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0,
+ "envoy_listener_downstream_global_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0,
+ "envoy_listener_downstream_global_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0,
+ "envoy_listener_downstream_global_cx_overflow_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0,
+ "envoy_listener_downstream_listener_filter_error_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0,
+ "envoy_listener_downstream_listener_filter_error_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0,
+ "envoy_listener_downstream_listener_filter_error_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0,
+ "envoy_listener_downstream_listener_filter_remote_close_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0,
+ "envoy_listener_downstream_listener_filter_remote_close_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0,
+ "envoy_listener_downstream_listener_filter_remote_close_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0,
+ "envoy_listener_downstream_pre_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0,
+ "envoy_listener_downstream_pre_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0,
+ "envoy_listener_downstream_pre_cx_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0,
+ "envoy_listener_downstream_pre_cx_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_0.0.0.0_20200_mynginx": 0,
+ "envoy_listener_downstream_pre_cx_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_10.50.132.6_20000_mynginx": 0,
+ "envoy_listener_downstream_pre_cx_timeout_consul-sandbox-cluster-0159c9d3_default_default_mynginx_127.0.0.1_15001_mynginx": 0,
+ "envoy_listener_manager_listener_added_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 3,
+ "envoy_listener_manager_listener_create_failure_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_manager_listener_create_success_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 6,
+ "envoy_listener_manager_listener_in_place_updated_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_manager_listener_modified_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_manager_listener_removed_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_manager_listener_stopped_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_manager_total_listeners_active_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 3,
+ "envoy_listener_manager_total_listeners_draining_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_listener_manager_total_listeners_warming_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_server_memory_allocated_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 7742368,
+ "envoy_server_memory_heap_size_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 14680064,
+ "envoy_server_memory_physical_size_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 19175778,
+ "envoy_server_parent_connections_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_server_state_draining_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_server_state_initializing_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_server_state_live_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 1,
+ "envoy_server_state_pre_initializing_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_server_total_connections_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 0,
+ "envoy_server_uptime_consul-sandbox-cluster-0159c9d3_default_default_mynginx_mynginx": 32527,
+ },
+ },
+ "case envoy": {
+ prepare: prepareCaseEnvoyStats,
+ wantMetrics: map[string]int64{
+ "envoy_cluster_manager_active_clusters": 1,
+ "envoy_cluster_manager_cluster_added": 1,
+ "envoy_cluster_manager_cluster_modified": 0,
+ "envoy_cluster_manager_cluster_removed": 0,
+ "envoy_cluster_manager_cluster_updated": 0,
+ "envoy_cluster_manager_cluster_updated_via_merge": 0,
+ "envoy_cluster_manager_update_merge_cancelled": 0,
+ "envoy_cluster_manager_update_out_of_merge_window": 0,
+ "envoy_cluster_manager_warming_clusters": 0,
+ "envoy_cluster_membership_change_service_envoyproxy_io": 1,
+ "envoy_cluster_membership_degraded_service_envoyproxy_io": 0,
+ "envoy_cluster_membership_excluded_service_envoyproxy_io": 0,
+ "envoy_cluster_membership_healthy_service_envoyproxy_io": 1,
+ "envoy_cluster_update_empty_service_envoyproxy_io": 0,
+ "envoy_cluster_update_failure_service_envoyproxy_io": 0,
+ "envoy_cluster_update_no_rebuild_service_envoyproxy_io": 0,
+ "envoy_cluster_update_success_service_envoyproxy_io": 1242,
+ "envoy_cluster_upstream_cx_active_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_connect_attempts_exceeded_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_connect_fail_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_connect_timeout_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_destroy_local_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_destroy_remote_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_destroy_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_http1_total_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_http2_total_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_http3_total_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_idle_timeout_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_max_duration_reached_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_overflow_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_rx_bytes_buffered_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_rx_bytes_total_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_total_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_tx_bytes_buffered_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_cx_tx_bytes_total_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_active_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_cancelled_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_maintenance_mode_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_max_duration_reached_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_pending_active_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_pending_failure_eject_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_pending_overflow_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_pending_total_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_per_try_timeout_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_exponential_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_retry_backoff_ratelimited_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_retry_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_retry_success_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_rx_reset_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_timeout_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_total_service_envoyproxy_io": 0,
+ "envoy_cluster_upstream_rq_tx_reset_service_envoyproxy_io": 0,
+ "envoy_listener_admin_downstream_cx_active": 2,
+ "envoy_listener_admin_downstream_cx_destroy": 4,
+ "envoy_listener_admin_downstream_cx_overflow": 0,
+ "envoy_listener_admin_downstream_cx_overload_reject": 0,
+ "envoy_listener_admin_downstream_cx_total": 6,
+ "envoy_listener_admin_downstream_cx_transport_socket_connect_timeout": 0,
+ "envoy_listener_admin_downstream_global_cx_overflow": 0,
+ "envoy_listener_admin_downstream_listener_filter_error": 0,
+ "envoy_listener_admin_downstream_listener_filter_remote_close": 0,
+ "envoy_listener_admin_downstream_pre_cx_active": 0,
+ "envoy_listener_admin_downstream_pre_cx_timeout": 0,
+ "envoy_listener_downstream_cx_active_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_cx_destroy_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_cx_overflow_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_cx_overload_reject_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_cx_total_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_cx_transport_socket_connect_timeout_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_global_cx_overflow_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_listener_filter_error_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_listener_filter_remote_close_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_pre_cx_active_0.0.0.0_10000": 0,
+ "envoy_listener_downstream_pre_cx_timeout_0.0.0.0_10000": 0,
+ "envoy_listener_manager_listener_added": 1,
+ "envoy_listener_manager_listener_create_failure": 0,
+ "envoy_listener_manager_listener_create_success": 16,
+ "envoy_listener_manager_listener_in_place_updated": 0,
+ "envoy_listener_manager_listener_modified": 0,
+ "envoy_listener_manager_listener_removed": 0,
+ "envoy_listener_manager_listener_stopped": 0,
+ "envoy_listener_manager_total_listeners_active": 1,
+ "envoy_listener_manager_total_listeners_draining": 0,
+ "envoy_listener_manager_total_listeners_warming": 0,
+ "envoy_server_memory_allocated": 7630184,
+ "envoy_server_memory_heap_size": 16777216,
+ "envoy_server_memory_physical_size": 28426958,
+ "envoy_server_parent_connections": 0,
+ "envoy_server_state_draining": 0,
+ "envoy_server_state_initializing": 0,
+ "envoy_server_state_live": 1,
+ "envoy_server_state_pre_initializing": 0,
+ "envoy_server_total_connections": 0,
+ "envoy_server_uptime": 6225,
+ },
+ },
+ "case invalid data response": {
+ prepare: prepareCaseInvalidDataResponse,
+ wantMetrics: nil,
+ },
+ "case 404": {
+ prepare: prepareCase404,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ envoy, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, envoy.Init())
+
+ mx := envoy.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, envoy, mx)
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, envoy *Envoy, mx map[string]int64) {
+ for _, chart := range *envoy.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareCaseEnvoyConsulDataplaneStats() (*Envoy, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataEnvoyConsulDataplane)
+ }))
+ envoy := New()
+ envoy.URL = srv.URL
+
+ return envoy, srv.Close
+}
+
+func prepareCaseEnvoyStats() (*Envoy, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataEnvoy)
+ }))
+ envoy := New()
+ envoy.URL = srv.URL
+
+ return envoy, srv.Close
+}
+
+func prepareCaseInvalidDataResponse() (*Envoy, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ envoy := New()
+ envoy.URL = srv.URL
+
+ return envoy, srv.Close
+}
+
+func prepareCase404() (*Envoy, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ envoy := New()
+ envoy.URL = srv.URL
+
+ return envoy, srv.Close
+}
diff --git a/src/go/plugin/go.d/modules/envoy/init.go b/src/go/plugin/go.d/modules/envoy/init.go
new file mode 100644
index 000000000..8eba65d95
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/init.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package envoy
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (e *Envoy) validateConfig() error {
+ if e.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (e *Envoy) initPrometheusClient() (prometheus.Prometheus, error) {
+ httpClient, err := web.NewHTTPClient(e.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(httpClient, e.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/envoy/integrations/envoy.md b/src/go/plugin/go.d/modules/envoy/integrations/envoy.md
new file mode 100644
index 000000000..3865ca529
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/integrations/envoy.md
@@ -0,0 +1,306 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/envoy/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/envoy/metadata.yaml"
+sidebar_label: "Envoy"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Envoy
+
+
+<img src="https://netdata.cloud/img/envoy.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: envoy
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Envoy proxies. It collects server, cluster, and listener metrics.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Envoy instances running on localhost.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Envoy instance
+
+Envoy exposes metrics in Prometheus format. All metric labels are added to charts.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| envoy.server_state | live, draining, pre_initializing, initializing | state |
+| envoy.server_connections_count | connections | connections |
+| envoy.server_parent_connections_count | connections | connections |
+| envoy.server_memory_allocated_size | allocated | bytes |
+| envoy.server_memory_heap_size | heap | bytes |
+| envoy.server_memory_physical_size | physical | bytes |
+| envoy.server_uptime | uptime | seconds |
+| envoy.cluster_manager_cluster_count | active, not_active | clusters |
+| envoy.cluster_manager_cluster_changes_rate | added, modified, removed | clusters/s |
+| envoy.cluster_manager_cluster_updates_rate | cluster | updates/s |
+| envoy.cluster_manager_cluster_updated_via_merge_rate | via_merge | updates/s |
+| envoy.cluster_manager_update_merge_cancelled_rate | merge_cancelled | updates/s |
+| envoy.cluster_manager_update_out_of_merge_window_rate | out_of_merge_window | updates/s |
+| envoy.cluster_membership_endpoints_count | healthy, degraded, excluded | endpoints |
+| envoy.cluster_membership_changes_rate | membership | changes/s |
+| envoy.cluster_membership_updates_rate | success, failure, empty, no_rebuild | updates/s |
+| envoy.cluster_upstream_cx_active_count | active | connections |
+| envoy.cluster_upstream_cx_rate | created | connections/s |
+| envoy.cluster_upstream_cx_http_rate | http1, http2, http3 | connections/s |
+| envoy.cluster_upstream_cx_destroy_rate | local, remote | connections/s |
+| envoy.cluster_upstream_cx_connect_fail_rate | failed | connections/s |
+| envoy.cluster_upstream_cx_connect_timeout_rate | timeout | connections/s |
+| envoy.cluster_upstream_cx_bytes_rate | received, sent | bytes/s |
+| envoy.cluster_upstream_cx_bytes_buffered_size | received, send | bytes |
+| envoy.cluster_upstream_rq_active_count | active | requests |
+| envoy.cluster_upstream_rq_rate | requests | requests/s |
+| envoy.cluster_upstream_rq_failed_rate | cancelled, maintenance_mode, timeout, max_duration_reached, per_try_timeout, reset_local, reset_remote | requests/s |
+| envoy.cluster_upstream_rq_pending_active_count | active_pending | requests |
+| envoy.cluster_upstream_rq_pending_rate | pending | requests/s |
+| envoy.cluster_upstream_rq_pending_failed_rate | overflow, failure_eject | requests/s |
+| envoy.cluster_upstream_rq_retry_rate | request | retries/s |
+| envoy.cluster_upstream_rq_retry_success_rate | success | retries/s |
+| envoy.cluster_upstream_rq_retry_backoff_rate | exponential, ratelimited | retries/s |
+| envoy.listener_manager_listeners_count | active, warming, draining | listeners |
+| envoy.listener_manager_listener_changes_rate | added, modified, removed, stopped | listeners/s |
+| envoy.listener_manager_listener_object_events_rate | create_success, create_failure, in_place_updated | objects/s |
+| envoy.listener_admin_downstream_cx_active_count | active | connections |
+| envoy.listener_admin_downstream_cx_rate | created | connections/s |
+| envoy.listener_admin_downstream_cx_destroy_rate | destroyed | connections/s |
+| envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |
+| envoy.listener_admin_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |
+| envoy.listener_admin_downstream_listener_filter_remote_close_rate | closed | connections/s |
+| envoy.listener_admin_downstream_listener_filter_error_rate | read | errors/s |
+| envoy.listener_admin_downstream_pre_cx_active_count | active | sockets |
+| envoy.listener_admin_downstream_pre_cx_timeout_rate | timeout | sockets/s |
+| envoy.listener_downstream_cx_active_count | active | connections |
+| envoy.listener_downstream_cx_rate | created | connections/s |
+| envoy.listener_downstream_cx_destroy_rate | destroyed | connections/s |
+| envoy.listener_downstream_cx_transport_socket_connect_timeout_rate | timeout | connections/s |
+| envoy.listener_downstream_cx_rejected_rate | overflow, overload, global_overflow | connections/s |
+| envoy.listener_downstream_listener_filter_remote_close_rate | closed | connections/s |
+| envoy.listener_downstream_listener_filter_error_rate | read | errors/s |
+| envoy.listener_downstream_pre_cx_active_count | active | sockets |
+| envoy.listener_downstream_pre_cx_timeout_rate | timeout | sockets/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/envoy.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/envoy.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:9091/stats/prometheus | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9901/stats/prometheus
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9901/stats/prometheus
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9901/stats/prometheus
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9901/stats/prometheus
+
+ - name: remote
+ url: http://192.0.2.1:9901/stats/prometheus
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `envoy` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m envoy
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `envoy` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep envoy
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep envoy /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep envoy
+```
+
+
diff --git a/src/go/plugin/go.d/modules/envoy/metadata.yaml b/src/go/plugin/go.d/modules/envoy/metadata.yaml
new file mode 100644
index 000000000..def9e726a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/metadata.yaml
@@ -0,0 +1,538 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-envoy
+ plugin_name: go.d.plugin
+ module_name: envoy
+ monitored_instance:
+ name: Envoy
+ link: https://www.envoyproxy.io/
+ icon_filename: envoy.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - envoy
+ - proxy
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Envoy proxies. It collects server, cluster, and listener metrics.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Envoy instances running on localhost.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/envoy.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:9091/stats/prometheus
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9901/stats/prometheus
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9901/stats/prometheus
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: |
+ Do not validate server certificate chain and hostname.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:9901/stats/prometheus
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9901/stats/prometheus
+
+ - name: remote
+ url: http://192.0.2.1:9901/stats/prometheus
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: Envoy exposes metrics in Prometheus format. All metric labels are added to charts.
+ labels: []
+ metrics:
+ - name: envoy.server_state
+ description: Server current state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: live
+ - name: draining
+ - name: pre_initializing
+ - name: initializing
+ - name: envoy.server_connections_count
+ description: Server current connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: envoy.server_parent_connections_count
+ description: Server current parent connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: envoy.server_memory_allocated_size
+ description: Server memory allocated size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: envoy.server_memory_heap_size
+ description: Server memory heap size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: heap
+ - name: envoy.server_memory_physical_size
+ description: Server memory physical size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: physical
+ - name: envoy.server_uptime
+ description: Server uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: envoy.cluster_manager_cluster_count
+ description: Cluster manager current clusters
+ unit: clusters
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: not_active
+ - name: envoy.cluster_manager_cluster_changes_rate
+ description: Cluster manager cluster changes
+ unit: clusters/s
+ chart_type: line
+ dimensions:
+ - name: added
+ - name: modified
+ - name: removed
+ - name: envoy.cluster_manager_cluster_updates_rate
+ description: Cluster manager updates
+ unit: updates/s
+ chart_type: line
+ dimensions:
+ - name: cluster
+ - name: envoy.cluster_manager_cluster_updated_via_merge_rate
+ description: Cluster manager updates applied as merged updates
+ unit: updates/s
+ chart_type: line
+ dimensions:
+ - name: via_merge
+ - name: envoy.cluster_manager_update_merge_cancelled_rate
+ description: Cluster manager cancelled merged updates
+ unit: updates/s
+ chart_type: line
+ dimensions:
+ - name: merge_cancelled
+ - name: envoy.cluster_manager_update_out_of_merge_window_rate
+ description: Cluster manager out of a merge window updates
+ unit: updates/s
+ chart_type: line
+ dimensions:
+ - name: out_of_merge_window
+ - name: envoy.cluster_membership_endpoints_count
+ description: Cluster membership current endpoints
+ unit: endpoints
+ chart_type: line
+ dimensions:
+ - name: healthy
+ - name: degraded
+ - name: excluded
+ - name: envoy.cluster_membership_changes_rate
+ description: Cluster membership changes
+ unit: changes/s
+ chart_type: line
+ dimensions:
+ - name: membership
+ - name: envoy.cluster_membership_updates_rate
+ description: Cluster membership updates
+ unit: updates/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: empty
+ - name: no_rebuild
+ - name: envoy.cluster_upstream_cx_active_count
+ description: Cluster upstream current active connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: envoy.cluster_upstream_cx_rate
+ description: Cluster upstream connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: envoy.cluster_upstream_cx_http_rate
+ description: Cluster upstream connections by HTTP version
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: http1
+ - name: http2
+ - name: http3
+ - name: envoy.cluster_upstream_cx_destroy_rate
+ description: Cluster upstream destroyed connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: local
+ - name: remote
+ - name: envoy.cluster_upstream_cx_connect_fail_rate
+ description: Cluster upstream failed connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: failed
+ - name: envoy.cluster_upstream_cx_connect_timeout_rate
+ description: Cluster upstream timed out connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: timeout
+ - name: envoy.cluster_upstream_cx_bytes_rate
+ description: Cluster upstream connection traffic
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: envoy.cluster_upstream_cx_bytes_buffered_size
+ description: Cluster upstream current connection buffered size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: send
+ - name: envoy.cluster_upstream_rq_active_count
+ description: Cluster upstream current active requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: envoy.cluster_upstream_rq_rate
+ description: Cluster upstream requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: envoy.cluster_upstream_rq_failed_rate
+ description: Cluster upstream failed requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: cancelled
+ - name: maintenance_mode
+ - name: timeout
+ - name: max_duration_reached
+ - name: per_try_timeout
+ - name: reset_local
+ - name: reset_remote
+ - name: envoy.cluster_upstream_rq_pending_active_count
+ description: Cluster upstream current active pending requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: active_pending
+ - name: envoy.cluster_upstream_rq_pending_rate
+ description: Cluster upstream pending requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: pending
+ - name: envoy.cluster_upstream_rq_pending_failed_rate
+ description: Cluster upstream failed pending requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: overflow
+ - name: failure_eject
+ - name: envoy.cluster_upstream_rq_retry_rate
+ description: Cluster upstream request retries
+ unit: retries/s
+ chart_type: line
+ dimensions:
+ - name: request
+ - name: envoy.cluster_upstream_rq_retry_success_rate
+ description: Cluster upstream request successful retries
+ unit: retries/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: envoy.cluster_upstream_rq_retry_backoff_rate
+ description: Cluster upstream request backoff retries
+ unit: retries/s
+ chart_type: line
+ dimensions:
+ - name: exponential
+ - name: ratelimited
+ - name: envoy.listener_manager_listeners_count
+ description: Listener manager current listeners
+ unit: listeners
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: warming
+ - name: draining
+ - name: envoy.listener_manager_listener_changes_rate
+ description: Listener manager listener changes
+ unit: listeners/s
+ chart_type: line
+ dimensions:
+ - name: added
+ - name: modified
+ - name: removed
+ - name: stopped
+ - name: envoy.listener_manager_listener_object_events_rate
+ description: Listener manager listener object events
+ unit: objects/s
+ chart_type: line
+ dimensions:
+ - name: create_success
+ - name: create_failure
+ - name: in_place_updated
+ - name: envoy.listener_admin_downstream_cx_active_count
+ description: Listener admin downstream current active connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: envoy.listener_admin_downstream_cx_rate
+ description: Listener admin downstream connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: envoy.listener_admin_downstream_cx_destroy_rate
+ description: Listener admin downstream destroyed connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: destroyed
+ - name: envoy.listener_admin_downstream_cx_transport_socket_connect_timeout_rate
+ description: Listener admin downstream timed out connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: timeout
+ - name: envoy.listener_admin_downstream_cx_rejected_rate
+ description: Listener admin downstream rejected connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: overflow
+ - name: overload
+ - name: global_overflow
+ - name: envoy.listener_admin_downstream_listener_filter_remote_close_rate
+ description: Listener admin downstream connections closed by remote when peek data for listener filters
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: closed
+ - name: envoy.listener_admin_downstream_listener_filter_error_rate
+ description: Listener admin downstream read errors when peeking data for listener filters
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: envoy.listener_admin_downstream_pre_cx_active_count
+ description: Listener admin downstream current active sockets
+ unit: sockets
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: envoy.listener_admin_downstream_pre_cx_timeout_rate
+ description: Listener admin downstream timed out sockets
+ unit: sockets/s
+ chart_type: line
+ dimensions:
+ - name: timeout
+ - name: envoy.listener_downstream_cx_active_count
+ description: Listener downstream current active connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: envoy.listener_downstream_cx_rate
+ description: Listener downstream connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: envoy.listener_downstream_cx_destroy_rate
+ description: Listener downstream destroyed connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: destroyed
+ - name: envoy.listener_downstream_cx_transport_socket_connect_timeout_rate
+ description: Listener downstream timed out connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: timeout
+ - name: envoy.listener_downstream_cx_rejected_rate
+ description: Listener downstream rejected connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: overflow
+ - name: overload
+ - name: global_overflow
+ - name: envoy.listener_downstream_listener_filter_remote_close_rate
+ description: Listener downstream connections closed by remote when peek data for listener filters
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: closed
+ - name: envoy.listener_downstream_listener_filter_error_rate
+ description: Listener downstream read errors when peeking data for listener filters
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: envoy.listener_downstream_pre_cx_active_count
+ description: Listener downstream current active sockets
+ unit: sockets
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: envoy.listener_downstream_pre_cx_timeout_rate
+ description: Listener downstream timed out sockets
+ unit: sockets/s
+ chart_type: line
+ dimensions:
+ - name: timeout
diff --git a/src/go/plugin/go.d/modules/envoy/testdata/config.json b/src/go/plugin/go.d/modules/envoy/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/envoy/testdata/config.yaml b/src/go/plugin/go.d/modules/envoy/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/envoy/testdata/consul-dataplane.txt b/src/go/plugin/go.d/modules/envoy/testdata/consul-dataplane.txt
new file mode 100644
index 000000000..2dbb91856
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/testdata/consul-dataplane.txt
@@ -0,0 +1,1786 @@
+# HELP consul_dataplane_connect_duration This will be a sample of the time it takes to get connected to a server. This duration will cover everything from making the server features request all the way through to opening an xDS session with a server
+# TYPE consul_dataplane_connect_duration summary
+consul_dataplane_connect_duration{quantile="0.5"} NaN
+consul_dataplane_connect_duration{quantile="0.9"} NaN
+consul_dataplane_connect_duration{quantile="0.99"} NaN
+consul_dataplane_connect_duration_sum 321.85443115234375
+consul_dataplane_connect_duration_count 1
+# HELP consul_dataplane_connection_errors This will track the number of errors encountered during the stream connection
+# TYPE consul_dataplane_connection_errors gauge
+consul_dataplane_connection_errors 0
+# HELP consul_dataplane_consul_connected This will either be 0 or 1 depending on whether the dataplane is currently connected to a Consul server.
+# TYPE consul_dataplane_consul_connected gauge
+consul_dataplane_consul_connected 1
+# HELP consul_dataplane_discover_servers_duration This will be a sample of the time it takes to discover Consul server IPs.
+# TYPE consul_dataplane_discover_servers_duration summary
+consul_dataplane_discover_servers_duration{quantile="0.5"} NaN
+consul_dataplane_discover_servers_duration{quantile="0.9"} NaN
+consul_dataplane_discover_servers_duration{quantile="0.99"} NaN
+consul_dataplane_discover_servers_duration_sum 0.6415159702301025
+consul_dataplane_discover_servers_duration_count 1
+# HELP consul_dataplane_envoy_connected This will either be 0 or 1 depending on whether Envoy is currently running and connected to the local xDS listeners.
+# TYPE consul_dataplane_envoy_connected gauge
+consul_dataplane_envoy_connected 1
+# HELP consul_dataplane_go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE consul_dataplane_go_gc_duration_seconds summary
+consul_dataplane_go_gc_duration_seconds{quantile="0"} 2.194e-05
+consul_dataplane_go_gc_duration_seconds{quantile="0.25"} 3.592e-05
+consul_dataplane_go_gc_duration_seconds{quantile="0.5"} 5.2941e-05
+consul_dataplane_go_gc_duration_seconds{quantile="0.75"} 6.61e-05
+consul_dataplane_go_gc_duration_seconds{quantile="1"} 0.000139612
+consul_dataplane_go_gc_duration_seconds_sum 0.014481198
+consul_dataplane_go_gc_duration_seconds_count 273
+# HELP consul_dataplane_go_goroutines Number of goroutines that currently exist.
+# TYPE consul_dataplane_go_goroutines gauge
+consul_dataplane_go_goroutines 41
+# HELP consul_dataplane_go_info Information about the Go environment.
+# TYPE consul_dataplane_go_info gauge
+consul_dataplane_go_info{version="go1.19.1"} 1
+# HELP consul_dataplane_go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE consul_dataplane_go_memstats_alloc_bytes gauge
+consul_dataplane_go_memstats_alloc_bytes 2.543784e+06
+# HELP consul_dataplane_go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE consul_dataplane_go_memstats_alloc_bytes_total counter
+consul_dataplane_go_memstats_alloc_bytes_total 4.6530512e+07
+# HELP consul_dataplane_go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE consul_dataplane_go_memstats_buck_hash_sys_bytes gauge
+consul_dataplane_go_memstats_buck_hash_sys_bytes 4700
+# HELP consul_dataplane_go_memstats_frees_total Total number of frees.
+# TYPE consul_dataplane_go_memstats_frees_total counter
+consul_dataplane_go_memstats_frees_total 1.356599e+06
+# HELP consul_dataplane_go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE consul_dataplane_go_memstats_gc_sys_bytes gauge
+consul_dataplane_go_memstats_gc_sys_bytes 9.370488e+06
+# HELP consul_dataplane_go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE consul_dataplane_go_memstats_heap_alloc_bytes gauge
+consul_dataplane_go_memstats_heap_alloc_bytes 2.543784e+06
+# HELP consul_dataplane_go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE consul_dataplane_go_memstats_heap_idle_bytes gauge
+consul_dataplane_go_memstats_heap_idle_bytes 3.137536e+06
+# HELP consul_dataplane_go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE consul_dataplane_go_memstats_heap_inuse_bytes gauge
+consul_dataplane_go_memstats_heap_inuse_bytes 4.46464e+06
+# HELP consul_dataplane_go_memstats_heap_objects Number of allocated objects.
+# TYPE consul_dataplane_go_memstats_heap_objects gauge
+consul_dataplane_go_memstats_heap_objects 5982
+# HELP consul_dataplane_go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE consul_dataplane_go_memstats_heap_released_bytes gauge
+consul_dataplane_go_memstats_heap_released_bytes 2.940928e+06
+# HELP consul_dataplane_go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE consul_dataplane_go_memstats_heap_sys_bytes gauge
+consul_dataplane_go_memstats_heap_sys_bytes 7.602176e+06
+# HELP consul_dataplane_go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE consul_dataplane_go_memstats_last_gc_time_seconds gauge
+consul_dataplane_go_memstats_last_gc_time_seconds 1.678889049944119e+09
+# HELP consul_dataplane_go_memstats_lookups_total Total number of pointer lookups.
+# TYPE consul_dataplane_go_memstats_lookups_total counter
+consul_dataplane_go_memstats_lookups_total 0
+# HELP consul_dataplane_go_memstats_mallocs_total Total number of mallocs.
+# TYPE consul_dataplane_go_memstats_mallocs_total counter
+consul_dataplane_go_memstats_mallocs_total 1.362581e+06
+# HELP consul_dataplane_go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE consul_dataplane_go_memstats_mcache_inuse_bytes gauge
+consul_dataplane_go_memstats_mcache_inuse_bytes 4800
+# HELP consul_dataplane_go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE consul_dataplane_go_memstats_mcache_sys_bytes gauge
+consul_dataplane_go_memstats_mcache_sys_bytes 15600
+# HELP consul_dataplane_go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE consul_dataplane_go_memstats_mspan_inuse_bytes gauge
+consul_dataplane_go_memstats_mspan_inuse_bytes 80920
+# HELP consul_dataplane_go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE consul_dataplane_go_memstats_mspan_sys_bytes gauge
+consul_dataplane_go_memstats_mspan_sys_bytes 81600
+# HELP consul_dataplane_go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE consul_dataplane_go_memstats_next_gc_bytes gauge
+consul_dataplane_go_memstats_next_gc_bytes 5.238856e+06
+# HELP consul_dataplane_go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE consul_dataplane_go_memstats_other_sys_bytes gauge
+consul_dataplane_go_memstats_other_sys_bytes 1.258124e+06
+# HELP consul_dataplane_go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE consul_dataplane_go_memstats_stack_inuse_bytes gauge
+consul_dataplane_go_memstats_stack_inuse_bytes 786432
+# HELP consul_dataplane_go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE consul_dataplane_go_memstats_stack_sys_bytes gauge
+consul_dataplane_go_memstats_stack_sys_bytes 786432
+# HELP consul_dataplane_go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE consul_dataplane_go_memstats_sys_bytes gauge
+consul_dataplane_go_memstats_sys_bytes 1.911912e+07
+# HELP consul_dataplane_go_threads Number of OS threads created.
+# TYPE consul_dataplane_go_threads gauge
+consul_dataplane_go_threads 10
+# HELP consul_dataplane_login_duration This will be a sample of the time it takes to login to Consul.
+# TYPE consul_dataplane_login_duration summary
+consul_dataplane_login_duration{quantile="0.5"} NaN
+consul_dataplane_login_duration{quantile="0.9"} NaN
+consul_dataplane_login_duration{quantile="0.99"} NaN
+consul_dataplane_login_duration_sum 18.53141975402832
+consul_dataplane_login_duration_count 1
+# HELP consul_dataplane_runtime_alloc_bytes runtime_alloc_bytes
+# TYPE consul_dataplane_runtime_alloc_bytes gauge
+consul_dataplane_runtime_alloc_bytes 2.526696e+06
+# HELP consul_dataplane_runtime_free_count runtime_free_count
+# TYPE consul_dataplane_runtime_free_count gauge
+consul_dataplane_runtime_free_count 1.356599e+06
+# HELP consul_dataplane_runtime_gc_pause_ns runtime_gc_pause_ns
+# TYPE consul_dataplane_runtime_gc_pause_ns summary
+consul_dataplane_runtime_gc_pause_ns{quantile="0.5"} 55990
+consul_dataplane_runtime_gc_pause_ns{quantile="0.9"} 55990
+consul_dataplane_runtime_gc_pause_ns{quantile="0.99"} 55990
+consul_dataplane_runtime_gc_pause_ns_sum 55990
+consul_dataplane_runtime_gc_pause_ns_count 1
+# HELP consul_dataplane_runtime_heap_objects runtime_heap_objects
+# TYPE consul_dataplane_runtime_heap_objects gauge
+consul_dataplane_runtime_heap_objects 5978
+# HELP consul_dataplane_runtime_malloc_count runtime_malloc_count
+# TYPE consul_dataplane_runtime_malloc_count gauge
+consul_dataplane_runtime_malloc_count 1.362577e+06
+# HELP consul_dataplane_runtime_num_goroutines runtime_num_goroutines
+# TYPE consul_dataplane_runtime_num_goroutines gauge
+consul_dataplane_runtime_num_goroutines 35
+# HELP consul_dataplane_runtime_sys_bytes runtime_sys_bytes
+# TYPE consul_dataplane_runtime_sys_bytes gauge
+consul_dataplane_runtime_sys_bytes 1.911912e+07
+# HELP consul_dataplane_runtime_total_gc_pause_ns runtime_total_gc_pause_ns
+# TYPE consul_dataplane_runtime_total_gc_pause_ns gauge
+consul_dataplane_runtime_total_gc_pause_ns 1.4481198e+07
+# HELP consul_dataplane_runtime_total_gc_runs runtime_total_gc_runs
+# TYPE consul_dataplane_runtime_total_gc_runs gauge
+consul_dataplane_runtime_total_gc_runs 273
+# TYPE envoy_cluster_assignment_stale counter
+envoy_cluster_assignment_stale{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_assignment_stale{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_assignment_stale{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_assignment_stale{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_assignment_timeout_received counter
+envoy_cluster_assignment_timeout_received{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_assignment_timeout_received{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_assignment_timeout_received{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_assignment_timeout_received{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_bind_errors counter
+envoy_cluster_bind_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_bind_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_bind_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_bind_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_default_total_match_count counter
+envoy_cluster_default_total_match_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_default_total_match_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1
+envoy_cluster_default_total_match_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_default_total_match_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1
+# TYPE envoy_cluster_external_upstream_rq counter
+envoy_cluster_external_upstream_rq{envoy_response_code="200",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3
+# TYPE envoy_cluster_external_upstream_rq_completed counter
+envoy_cluster_external_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3
+# TYPE envoy_cluster_external_upstream_rq_xx counter
+envoy_cluster_external_upstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3
+# TYPE envoy_cluster_http1_dropped_headers_with_underscores counter
+envoy_cluster_http1_dropped_headers_with_underscores{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_http1_metadata_not_supported_error counter
+envoy_cluster_http1_metadata_not_supported_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_http1_requests_rejected_with_underscores_in_headers counter
+envoy_cluster_http1_requests_rejected_with_underscores_in_headers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_http1_response_flood counter
+envoy_cluster_http1_response_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_http2_dropped_headers_with_underscores counter
+envoy_cluster_http2_dropped_headers_with_underscores{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_header_overflow counter
+envoy_cluster_http2_header_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_headers_cb_no_stream counter
+envoy_cluster_http2_headers_cb_no_stream{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_inbound_empty_frames_flood counter
+envoy_cluster_http2_inbound_empty_frames_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_inbound_priority_frames_flood counter
+envoy_cluster_http2_inbound_priority_frames_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_inbound_window_update_frames_flood counter
+envoy_cluster_http2_inbound_window_update_frames_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_keepalive_timeout counter
+envoy_cluster_http2_keepalive_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_metadata_empty_frames counter
+envoy_cluster_http2_metadata_empty_frames{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_outbound_control_flood counter
+envoy_cluster_http2_outbound_control_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_outbound_flood counter
+envoy_cluster_http2_outbound_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_requests_rejected_with_underscores_in_headers counter
+envoy_cluster_http2_requests_rejected_with_underscores_in_headers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_rx_messaging_error counter
+envoy_cluster_http2_rx_messaging_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_rx_reset counter
+envoy_cluster_http2_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_stream_refused_errors counter
+envoy_cluster_http2_stream_refused_errors{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_trailers counter
+envoy_cluster_http2_trailers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_tx_flush_timeout counter
+envoy_cluster_http2_tx_flush_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_tx_reset counter
+envoy_cluster_http2_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_internal_upstream_rq counter
+envoy_cluster_internal_upstream_rq{envoy_response_code="200",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+# TYPE envoy_cluster_internal_upstream_rq_completed counter
+envoy_cluster_internal_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+# TYPE envoy_cluster_internal_upstream_rq_xx counter
+envoy_cluster_internal_upstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+# TYPE envoy_cluster_lb_healthy_panic counter
+envoy_cluster_lb_healthy_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_healthy_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_healthy_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_healthy_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_local_cluster_not_ok counter
+envoy_cluster_lb_local_cluster_not_ok{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_local_cluster_not_ok{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_local_cluster_not_ok{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_local_cluster_not_ok{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_recalculate_zone_structures counter
+envoy_cluster_lb_recalculate_zone_structures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_recalculate_zone_structures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_recalculate_zone_structures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_recalculate_zone_structures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_subsets_created counter
+envoy_cluster_lb_subsets_created{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_subsets_created{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_subsets_created{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_subsets_created{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_subsets_fallback counter
+envoy_cluster_lb_subsets_fallback{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_subsets_fallback{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_subsets_fallback{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_subsets_fallback{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_subsets_fallback_panic counter
+envoy_cluster_lb_subsets_fallback_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_subsets_fallback_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_subsets_fallback_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_subsets_fallback_panic{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_subsets_removed counter
+envoy_cluster_lb_subsets_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_subsets_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_subsets_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_subsets_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_subsets_selected counter
+envoy_cluster_lb_subsets_selected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_subsets_selected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_subsets_selected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_subsets_selected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_zone_cluster_too_small counter
+envoy_cluster_lb_zone_cluster_too_small{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_zone_cluster_too_small{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_zone_cluster_too_small{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_zone_cluster_too_small{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_zone_no_capacity_left counter
+envoy_cluster_lb_zone_no_capacity_left{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_zone_no_capacity_left{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_zone_no_capacity_left{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_zone_no_capacity_left{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_zone_number_differs counter
+envoy_cluster_lb_zone_number_differs{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_zone_number_differs{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_zone_number_differs{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_zone_number_differs{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_zone_routing_all_directly counter
+envoy_cluster_lb_zone_routing_all_directly{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_zone_routing_all_directly{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_zone_routing_all_directly{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_zone_routing_all_directly{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_zone_routing_cross_zone counter
+envoy_cluster_lb_zone_routing_cross_zone{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_zone_routing_cross_zone{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_zone_routing_cross_zone{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_zone_routing_cross_zone{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_lb_zone_routing_sampled counter
+envoy_cluster_lb_zone_routing_sampled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_zone_routing_sampled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_zone_routing_sampled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_zone_routing_sampled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_membership_change counter
+envoy_cluster_membership_change{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_membership_change{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1
+envoy_cluster_membership_change{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 2
+envoy_cluster_membership_change{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1
+# TYPE envoy_cluster_original_dst_host_invalid counter
+envoy_cluster_original_dst_host_invalid{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_original_dst_host_invalid{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_original_dst_host_invalid{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_original_dst_host_invalid{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_retry_or_shadow_abandoned counter
+envoy_cluster_retry_or_shadow_abandoned{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_retry_or_shadow_abandoned{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_retry_or_shadow_abandoned{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_retry_or_shadow_abandoned{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_update_attempt counter
+envoy_cluster_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_update_empty counter
+envoy_cluster_update_empty{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_update_empty{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_update_empty{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_update_empty{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_update_failure counter
+envoy_cluster_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_update_no_rebuild counter
+envoy_cluster_update_no_rebuild{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_update_no_rebuild{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_update_no_rebuild{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_update_no_rebuild{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_update_success counter
+envoy_cluster_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_close_notify counter
+envoy_cluster_upstream_cx_close_notify{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_close_notify{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_close_notify{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_close_notify{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_connect_attempts_exceeded counter
+envoy_cluster_upstream_cx_connect_attempts_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_connect_attempts_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_connect_attempts_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_connect_attempts_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_connect_fail counter
+envoy_cluster_upstream_cx_connect_fail{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_connect_fail{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_connect_fail{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_connect_fail{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_connect_timeout counter
+envoy_cluster_upstream_cx_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_connect_with_0_rtt counter
+envoy_cluster_upstream_cx_connect_with_0_rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_connect_with_0_rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_connect_with_0_rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_connect_with_0_rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_destroy counter
+envoy_cluster_upstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6507
+envoy_cluster_upstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_local counter
+envoy_cluster_upstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6507
+envoy_cluster_upstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_local_with_active_rq counter
+envoy_cluster_upstream_cx_destroy_local_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_destroy_local_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1758
+envoy_cluster_upstream_cx_destroy_local_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_destroy_local_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_remote counter
+envoy_cluster_upstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_remote_with_active_rq counter
+envoy_cluster_upstream_cx_destroy_remote_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_destroy_remote_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_destroy_remote_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_cx_destroy_remote_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_with_active_rq counter
+envoy_cluster_upstream_cx_destroy_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_destroy_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1758
+envoy_cluster_upstream_cx_destroy_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_cx_destroy_with_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_http1_total counter
+envoy_cluster_upstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2
+# TYPE envoy_cluster_upstream_cx_http2_total counter
+envoy_cluster_upstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_http3_total counter
+envoy_cluster_upstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_idle_timeout counter
+envoy_cluster_upstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_max_duration_reached counter
+envoy_cluster_upstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_max_requests counter
+envoy_cluster_upstream_cx_max_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_max_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_max_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_max_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_none_healthy counter
+envoy_cluster_upstream_cx_none_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_none_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_none_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_none_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_overflow counter
+envoy_cluster_upstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_pool_overflow counter
+envoy_cluster_upstream_cx_pool_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_pool_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_pool_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_pool_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_protocol_error counter
+envoy_cluster_upstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_cx_rx_bytes_total counter
+envoy_cluster_upstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 3853
+envoy_cluster_upstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 8645645
+envoy_cluster_upstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 724779
+# TYPE envoy_cluster_upstream_cx_total counter
+envoy_cluster_upstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6507
+envoy_cluster_upstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2
+# TYPE envoy_cluster_upstream_cx_tx_bytes_total counter
+envoy_cluster_upstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 114982
+envoy_cluster_upstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1240
+envoy_cluster_upstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 732
+# TYPE envoy_cluster_upstream_flow_control_backed_up_total counter
+envoy_cluster_upstream_flow_control_backed_up_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_flow_control_backed_up_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_flow_control_backed_up_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_flow_control_backed_up_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_flow_control_drained_total counter
+envoy_cluster_upstream_flow_control_drained_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_flow_control_drained_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_flow_control_drained_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_flow_control_drained_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_flow_control_paused_reading_total counter
+envoy_cluster_upstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_flow_control_resumed_reading_total counter
+envoy_cluster_upstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_http3_broken counter
+envoy_cluster_upstream_http3_broken{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_http3_broken{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_http3_broken{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_http3_broken{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_internal_redirect_failed_total counter
+envoy_cluster_upstream_internal_redirect_failed_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_internal_redirect_failed_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_internal_redirect_failed_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_internal_redirect_failed_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_internal_redirect_succeeded_total counter
+envoy_cluster_upstream_internal_redirect_succeeded_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_internal_redirect_succeeded_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_internal_redirect_succeeded_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_internal_redirect_succeeded_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq counter
+envoy_cluster_upstream_rq{envoy_response_code="200",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_rq{envoy_response_code="200",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3
+# TYPE envoy_cluster_upstream_rq_0rtt counter
+envoy_cluster_upstream_rq_0rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_0rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_0rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_0rtt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_cancelled counter
+envoy_cluster_upstream_rq_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 4749
+envoy_cluster_upstream_rq_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_completed counter
+envoy_cluster_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3
+# TYPE envoy_cluster_upstream_rq_maintenance_mode counter
+envoy_cluster_upstream_rq_maintenance_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_maintenance_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_maintenance_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_maintenance_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_max_duration_reached counter
+envoy_cluster_upstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_pending_failure_eject counter
+envoy_cluster_upstream_rq_pending_failure_eject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_pending_failure_eject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_pending_failure_eject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_pending_failure_eject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_pending_overflow counter
+envoy_cluster_upstream_rq_pending_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_pending_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_pending_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_pending_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_pending_total counter
+envoy_cluster_upstream_rq_pending_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_rq_pending_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6507
+envoy_cluster_upstream_rq_pending_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_rq_pending_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2
+# TYPE envoy_cluster_upstream_rq_per_try_idle_timeout counter
+envoy_cluster_upstream_rq_per_try_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_per_try_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_per_try_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_per_try_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_per_try_timeout counter
+envoy_cluster_upstream_rq_per_try_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_per_try_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_per_try_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_per_try_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_retry counter
+envoy_cluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_retry_backoff_exponential counter
+envoy_cluster_upstream_rq_retry_backoff_exponential{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_retry_backoff_exponential{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_retry_backoff_exponential{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_retry_backoff_exponential{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_retry_backoff_ratelimited counter
+envoy_cluster_upstream_rq_retry_backoff_ratelimited{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_retry_backoff_ratelimited{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_retry_backoff_ratelimited{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_retry_backoff_ratelimited{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_retry_limit_exceeded counter
+envoy_cluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_retry_overflow counter
+envoy_cluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_retry_success counter
+envoy_cluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_rx_reset counter
+envoy_cluster_upstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_timeout counter
+envoy_cluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_total counter
+envoy_cluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1758
+envoy_cluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3
+# TYPE envoy_cluster_upstream_rq_tx_reset counter
+envoy_cluster_upstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_xx counter
+envoy_cluster_upstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 3
+# TYPE envoy_cluster_manager_cds_init_fetch_timeout counter
+envoy_cluster_manager_cds_init_fetch_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_cluster_manager_cds_update_attempt counter
+envoy_cluster_manager_cds_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2
+# TYPE envoy_cluster_manager_cds_update_failure counter
+envoy_cluster_manager_cds_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_cluster_manager_cds_update_rejected counter
+envoy_cluster_manager_cds_update_rejected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_cluster_manager_cds_update_success counter
+envoy_cluster_manager_cds_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_cluster_manager_cluster_added counter
+envoy_cluster_manager_cluster_added{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 4
+# TYPE envoy_cluster_manager_cluster_modified counter
+envoy_cluster_manager_cluster_modified{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_cluster_manager_cluster_removed counter
+envoy_cluster_manager_cluster_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_cluster_manager_cluster_updated counter
+envoy_cluster_manager_cluster_updated{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2
+# TYPE envoy_cluster_manager_cluster_updated_via_merge counter
+envoy_cluster_manager_cluster_updated_via_merge{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_cluster_manager_update_merge_cancelled counter
+envoy_cluster_manager_update_merge_cancelled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_cluster_manager_update_out_of_merge_window counter
+envoy_cluster_manager_update_out_of_merge_window{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_connect_authzrbac_allowed counter
+envoy_connect_authzrbac_allowed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_connect_authzrbac_denied counter
+envoy_connect_authzrbac_denied{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_connect_authzrbac_shadow_allowed counter
+envoy_connect_authzrbac_shadow_allowed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_connect_authzrbac_shadow_denied counter
+envoy_connect_authzrbac_shadow_denied{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_control_plane_rate_limit_enforced counter
+envoy_control_plane_rate_limit_enforced{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_dns_cares_get_addr_failure counter
+envoy_dns_cares_get_addr_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_dns_cares_not_found counter
+envoy_dns_cares_not_found{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_dns_cares_resolve_total counter
+envoy_dns_cares_resolve_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_dns_cares_timeouts counter
+envoy_dns_cares_timeouts{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_envoy_overload_actions_reset_high_memory_stream_count counter
+envoy_envoy_overload_actions_reset_high_memory_stream_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_filesystem_flushed_by_timer counter
+envoy_filesystem_flushed_by_timer{} 3253
+# TYPE envoy_filesystem_reopen_failed counter
+envoy_filesystem_reopen_failed{} 0
+# TYPE envoy_filesystem_write_buffered counter
+envoy_filesystem_write_buffered{} 3
+# TYPE envoy_filesystem_write_completed counter
+envoy_filesystem_write_completed{} 3
+# TYPE envoy_filesystem_write_failed counter
+envoy_filesystem_write_failed{} 0
+# TYPE envoy_http_downstream_cx_delayed_close_timeout counter
+envoy_http_downstream_cx_delayed_close_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_delayed_close_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_destroy counter
+envoy_http_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 2
+envoy_http_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3
+# TYPE envoy_http_downstream_cx_destroy_active_rq counter
+envoy_http_downstream_cx_destroy_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_destroy_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_destroy_local counter
+envoy_http_downstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_destroy_local{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_destroy_local_active_rq counter
+envoy_http_downstream_cx_destroy_local_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_destroy_local_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_destroy_remote counter
+envoy_http_downstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 2
+envoy_http_downstream_cx_destroy_remote{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3
+# TYPE envoy_http_downstream_cx_destroy_remote_active_rq counter
+envoy_http_downstream_cx_destroy_remote_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_destroy_remote_active_rq{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_drain_close counter
+envoy_http_downstream_cx_drain_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_drain_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_http1_total counter
+envoy_http_downstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3
+envoy_http_downstream_cx_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4
+# TYPE envoy_http_downstream_cx_http2_total counter
+envoy_http_downstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_http3_total counter
+envoy_http_downstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_idle_timeout counter
+envoy_http_downstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_max_duration_reached counter
+envoy_http_downstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_max_requests_reached counter
+envoy_http_downstream_cx_max_requests_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_max_requests_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_overload_disable_keepalive counter
+envoy_http_downstream_cx_overload_disable_keepalive{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_overload_disable_keepalive{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_protocol_error counter
+envoy_http_downstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_protocol_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_rx_bytes_total counter
+envoy_http_downstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 448
+envoy_http_downstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 353
+# TYPE envoy_http_downstream_cx_ssl_total counter
+envoy_http_downstream_cx_ssl_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_ssl_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_total counter
+envoy_http_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3
+envoy_http_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4
+# TYPE envoy_http_downstream_cx_tx_bytes_total counter
+envoy_http_downstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 1035762
+envoy_http_downstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 725008
+# TYPE envoy_http_downstream_cx_upgrades_total counter
+envoy_http_downstream_cx_upgrades_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_upgrades_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_flow_control_paused_reading_total counter
+envoy_http_downstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_flow_control_resumed_reading_total counter
+envoy_http_downstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_completed counter
+envoy_http_downstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3
+envoy_http_downstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4
+# TYPE envoy_http_downstream_rq_failed_path_normalization counter
+envoy_http_downstream_rq_failed_path_normalization{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_failed_path_normalization{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_header_timeout counter
+envoy_http_downstream_rq_header_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_header_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_http1_total counter
+envoy_http_downstream_rq_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 4
+envoy_http_downstream_rq_http1_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4
+# TYPE envoy_http_downstream_rq_http2_total counter
+envoy_http_downstream_rq_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_http2_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_http3_total counter
+envoy_http_downstream_rq_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_http3_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_idle_timeout counter
+envoy_http_downstream_rq_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_max_duration_reached counter
+envoy_http_downstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_max_duration_reached{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_non_relative_path counter
+envoy_http_downstream_rq_non_relative_path{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_non_relative_path{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_overload_close counter
+envoy_http_downstream_rq_overload_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_overload_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_redirected_with_normalized_path counter
+envoy_http_downstream_rq_redirected_with_normalized_path{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_redirected_with_normalized_path{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_rejected_via_ip_detection counter
+envoy_http_downstream_rq_rejected_via_ip_detection{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_rejected_via_ip_detection{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_response_before_rq_complete counter
+envoy_http_downstream_rq_response_before_rq_complete{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_response_before_rq_complete{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_rx_reset counter
+envoy_http_downstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_rx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_timeout counter
+envoy_http_downstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_too_large counter
+envoy_http_downstream_rq_too_large{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_too_large{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_total counter
+envoy_http_downstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 4
+envoy_http_downstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4
+# TYPE envoy_http_downstream_rq_tx_reset counter
+envoy_http_downstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_tx_reset{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_ws_on_non_ws_route counter
+envoy_http_downstream_rq_ws_on_non_ws_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_ws_on_non_ws_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_xx counter
+envoy_http_downstream_rq_xx{envoy_response_code_class="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3
+envoy_http_downstream_rq_xx{envoy_response_code_class="3",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="4",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="5",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3
+envoy_http_downstream_rq_xx{envoy_response_code_class="3",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="4",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1
+envoy_http_downstream_rq_xx{envoy_response_code_class="5",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_no_cluster counter
+envoy_http_no_cluster{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_no_cluster{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_no_route counter
+envoy_http_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_passthrough_internal_redirect_bad_location counter
+envoy_http_passthrough_internal_redirect_bad_location{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_bad_location{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_passthrough_internal_redirect_no_route counter
+envoy_http_passthrough_internal_redirect_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_passthrough_internal_redirect_predicate counter
+envoy_http_passthrough_internal_redirect_predicate{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_predicate{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_passthrough_internal_redirect_too_many_redirects counter
+envoy_http_passthrough_internal_redirect_too_many_redirects{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_too_many_redirects{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_passthrough_internal_redirect_unsafe_scheme counter
+envoy_http_passthrough_internal_redirect_unsafe_scheme{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_unsafe_scheme{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_rq_direct_response counter
+envoy_http_rq_direct_response{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_rq_direct_response{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1
+# TYPE envoy_http_rq_redirect counter
+envoy_http_rq_redirect{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_rq_redirect{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_rq_reset_after_downstream_response_started counter
+envoy_http_rq_reset_after_downstream_response_started{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_rq_reset_after_downstream_response_started{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_rq_total counter
+envoy_http_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="async-client"} 1
+envoy_http_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 4
+# TYPE envoy_http_rs_too_large counter
+envoy_http_rs_too_large{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_rs_too_large{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_tracing_client_enabled counter
+envoy_http_tracing_client_enabled{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_tracing_health_check counter
+envoy_http_tracing_health_check{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_tracing_not_traceable counter
+envoy_http_tracing_not_traceable{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_tracing_random_sampling counter
+envoy_http_tracing_random_sampling{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_tracing_service_forced counter
+envoy_http_tracing_service_forced{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http1_dropped_headers_with_underscores counter
+envoy_http1_dropped_headers_with_underscores{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_http1_metadata_not_supported_error counter
+envoy_http1_metadata_not_supported_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_http1_requests_rejected_with_underscores_in_headers counter
+envoy_http1_requests_rejected_with_underscores_in_headers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_http1_response_flood counter
+envoy_http1_response_flood{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_admin_downstream_cx_destroy counter
+envoy_listener_admin_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2
+# TYPE envoy_listener_admin_downstream_cx_overflow counter
+envoy_listener_admin_downstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_admin_downstream_cx_overload_reject counter
+envoy_listener_admin_downstream_cx_overload_reject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_admin_downstream_cx_total counter
+envoy_listener_admin_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 3
+# TYPE envoy_listener_admin_downstream_cx_transport_socket_connect_timeout counter
+envoy_listener_admin_downstream_cx_transport_socket_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_admin_downstream_global_cx_overflow counter
+envoy_listener_admin_downstream_global_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_admin_downstream_listener_filter_error counter
+envoy_listener_admin_downstream_listener_filter_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_admin_downstream_listener_filter_remote_close counter
+envoy_listener_admin_downstream_listener_filter_remote_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_admin_downstream_pre_cx_timeout counter
+envoy_listener_admin_downstream_pre_cx_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_admin_http_downstream_rq_completed counter
+envoy_listener_admin_http_downstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3
+# TYPE envoy_listener_admin_http_downstream_rq_xx counter
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="3",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="4",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="5",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+# TYPE envoy_listener_admin_main_thread_downstream_cx_total counter
+envoy_listener_admin_main_thread_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 3
+# TYPE envoy_listener_admin_no_filter_chain_match counter
+envoy_listener_admin_no_filter_chain_match{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_downstream_cx_destroy counter
+envoy_listener_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 3
+envoy_listener_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 6507
+envoy_listener_downstream_cx_destroy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 1
+# TYPE envoy_listener_downstream_cx_overflow counter
+envoy_listener_downstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_downstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_downstream_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_downstream_cx_overload_reject counter
+envoy_listener_downstream_cx_overload_reject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_downstream_cx_overload_reject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_downstream_cx_overload_reject{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_downstream_cx_total counter
+envoy_listener_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 4
+envoy_listener_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 6507
+envoy_listener_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 1
+# TYPE envoy_listener_downstream_cx_transport_socket_connect_timeout counter
+envoy_listener_downstream_cx_transport_socket_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_downstream_cx_transport_socket_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_downstream_cx_transport_socket_connect_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_downstream_global_cx_overflow counter
+envoy_listener_downstream_global_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_downstream_global_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_downstream_global_cx_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_downstream_listener_filter_error counter
+envoy_listener_downstream_listener_filter_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_downstream_listener_filter_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_downstream_listener_filter_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_downstream_listener_filter_remote_close counter
+envoy_listener_downstream_listener_filter_remote_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_downstream_listener_filter_remote_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_downstream_listener_filter_remote_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_downstream_pre_cx_timeout counter
+envoy_listener_downstream_pre_cx_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_downstream_pre_cx_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_downstream_pre_cx_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_extension_config_missing counter
+envoy_listener_extension_config_missing{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_extension_config_missing{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_extension_config_missing{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_http_downstream_rq_completed counter
+envoy_listener_http_downstream_rq_completed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 4
+# TYPE envoy_listener_http_downstream_rq_xx counter
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="2",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 3
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="3",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="4",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 1
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="5",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",envoy_listener_address="0.0.0.0_20200"} 0
+# TYPE envoy_listener_no_filter_chain_match counter
+envoy_listener_no_filter_chain_match{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_no_filter_chain_match{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_no_filter_chain_match{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_server_ssl_socket_factory_downstream_context_secrets_not_ready counter
+envoy_listener_server_ssl_socket_factory_downstream_context_secrets_not_ready{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_server_ssl_socket_factory_ssl_context_update_by_sds counter
+envoy_listener_server_ssl_socket_factory_ssl_context_update_by_sds{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_server_ssl_socket_factory_upstream_context_secrets_not_ready counter
+envoy_listener_server_ssl_socket_factory_upstream_context_secrets_not_ready{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_connection_error counter
+envoy_listener_ssl_connection_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_fail_verify_cert_hash counter
+envoy_listener_ssl_fail_verify_cert_hash{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_fail_verify_error counter
+envoy_listener_ssl_fail_verify_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_fail_verify_no_cert counter
+envoy_listener_ssl_fail_verify_no_cert{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_fail_verify_san counter
+envoy_listener_ssl_fail_verify_san{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_handshake counter
+envoy_listener_ssl_handshake{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_no_certificate counter
+envoy_listener_ssl_no_certificate{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_ocsp_staple_failed counter
+envoy_listener_ssl_ocsp_staple_failed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_ocsp_staple_omitted counter
+envoy_listener_ssl_ocsp_staple_omitted{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_ocsp_staple_requests counter
+envoy_listener_ssl_ocsp_staple_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_ocsp_staple_responses counter
+envoy_listener_ssl_ocsp_staple_responses{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_ssl_session_reused counter
+envoy_listener_ssl_session_reused{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+# TYPE envoy_listener_worker_downstream_cx_total counter
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 2
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 2
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 3169
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 3338
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 1
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_manager_lds_init_fetch_timeout counter
+envoy_listener_manager_lds_init_fetch_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_lds_update_attempt counter
+envoy_listener_manager_lds_update_attempt{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2
+# TYPE envoy_listener_manager_lds_update_failure counter
+envoy_listener_manager_lds_update_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_lds_update_rejected counter
+envoy_listener_manager_lds_update_rejected{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_lds_update_success counter
+envoy_listener_manager_lds_update_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_listener_manager_listener_added counter
+envoy_listener_manager_listener_added{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 3
+# TYPE envoy_listener_manager_listener_create_failure counter
+envoy_listener_manager_listener_create_failure{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_listener_create_success counter
+envoy_listener_manager_listener_create_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 6
+# TYPE envoy_listener_manager_listener_in_place_updated counter
+envoy_listener_manager_listener_in_place_updated{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_listener_modified counter
+envoy_listener_manager_listener_modified{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_listener_removed counter
+envoy_listener_manager_listener_removed{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_listener_stopped counter
+envoy_listener_manager_listener_stopped{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_main_thread_watchdog_mega_miss counter
+envoy_main_thread_watchdog_mega_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_main_thread_watchdog_miss counter
+envoy_main_thread_watchdog_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_runtime_deprecated_feature_use counter
+envoy_runtime_deprecated_feature_use{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_runtime_load_error counter
+envoy_runtime_load_error{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_runtime_load_success counter
+envoy_runtime_load_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_runtime_override_dir_exists counter
+envoy_runtime_override_dir_exists{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_runtime_override_dir_not_exists counter
+envoy_runtime_override_dir_not_exists{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_server_debug_assertion_failures counter
+envoy_server_debug_assertion_failures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_dropped_stat_flushes counter
+envoy_server_dropped_stat_flushes{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_dynamic_unknown_fields counter
+envoy_server_dynamic_unknown_fields{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_envoy_bug_failures counter
+envoy_server_envoy_bug_failures{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_main_thread_watchdog_mega_miss counter
+envoy_server_main_thread_watchdog_mega_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_main_thread_watchdog_miss counter
+envoy_server_main_thread_watchdog_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_static_unknown_fields counter
+envoy_server_static_unknown_fields{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_wip_protos counter
+envoy_server_wip_protos{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_worker_watchdog_mega_miss counter
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_worker_watchdog_miss counter
+envoy_server_worker_watchdog_miss{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_tcp_downstream_cx_no_route counter
+envoy_tcp_downstream_cx_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_tcp_downstream_cx_rx_bytes_total counter
+envoy_tcp_downstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_tcp_downstream_cx_total counter
+envoy_tcp_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 6507
+# TYPE envoy_tcp_downstream_cx_tx_bytes_total counter
+envoy_tcp_downstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_tcp_downstream_flow_control_paused_reading_total counter
+envoy_tcp_downstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_tcp_downstream_flow_control_resumed_reading_total counter
+envoy_tcp_downstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_tcp_idle_timeout counter
+envoy_tcp_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_tcp_max_downstream_connection_duration counter
+envoy_tcp_max_downstream_connection_duration{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_tcp_original_destination_downstream_cx_no_route counter
+envoy_tcp_original_destination_downstream_cx_no_route{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0
+# TYPE envoy_tcp_original_destination_downstream_cx_rx_bytes_total counter
+envoy_tcp_original_destination_downstream_cx_rx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 1240
+# TYPE envoy_tcp_original_destination_downstream_cx_total counter
+envoy_tcp_original_destination_downstream_cx_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 1
+# TYPE envoy_tcp_original_destination_downstream_cx_tx_bytes_total counter
+envoy_tcp_original_destination_downstream_cx_tx_bytes_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 8645645
+# TYPE envoy_tcp_original_destination_downstream_flow_control_paused_reading_total counter
+envoy_tcp_original_destination_downstream_flow_control_paused_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0
+# TYPE envoy_tcp_original_destination_downstream_flow_control_resumed_reading_total counter
+envoy_tcp_original_destination_downstream_flow_control_resumed_reading_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0
+# TYPE envoy_tcp_original_destination_idle_timeout counter
+envoy_tcp_original_destination_idle_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0
+# TYPE envoy_tcp_original_destination_max_downstream_connection_duration counter
+envoy_tcp_original_destination_max_downstream_connection_duration{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0
+# TYPE envoy_tcp_original_destination_upstream_flush_total counter
+envoy_tcp_original_destination_upstream_flush_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0
+# TYPE envoy_tcp_upstream_flush_total counter
+envoy_tcp_upstream_flush_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_vhost_vcluster_upstream_rq_retry counter
+envoy_vhost_vcluster_upstream_rq_retry{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0
+# TYPE envoy_vhost_vcluster_upstream_rq_retry_limit_exceeded counter
+envoy_vhost_vcluster_upstream_rq_retry_limit_exceeded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0
+# TYPE envoy_vhost_vcluster_upstream_rq_retry_overflow counter
+envoy_vhost_vcluster_upstream_rq_retry_overflow{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0
+# TYPE envoy_vhost_vcluster_upstream_rq_retry_success counter
+envoy_vhost_vcluster_upstream_rq_retry_success{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0
+# TYPE envoy_vhost_vcluster_upstream_rq_timeout counter
+envoy_vhost_vcluster_upstream_rq_timeout{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0
+# TYPE envoy_vhost_vcluster_upstream_rq_total counter
+envoy_vhost_vcluster_upstream_rq_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_virtual_cluster="other",envoy_virtual_host="self_admin"} 0
+# TYPE envoy_workers_watchdog_mega_miss counter
+envoy_workers_watchdog_mega_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_workers_watchdog_miss counter
+envoy_workers_watchdog_miss{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_cluster_circuit_breakers_default_cx_open gauge
+envoy_cluster_circuit_breakers_default_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_default_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_default_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_default_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_circuit_breakers_default_cx_pool_open gauge
+envoy_cluster_circuit_breakers_default_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_default_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_default_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_default_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_circuit_breakers_default_rq_open gauge
+envoy_cluster_circuit_breakers_default_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_default_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_default_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_default_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_circuit_breakers_default_rq_pending_open gauge
+envoy_cluster_circuit_breakers_default_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_default_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_default_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_default_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_circuit_breakers_default_rq_retry_open gauge
+envoy_cluster_circuit_breakers_default_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_default_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_default_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_default_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_circuit_breakers_high_cx_open gauge
+envoy_cluster_circuit_breakers_high_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_high_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_high_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_high_cx_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_circuit_breakers_high_cx_pool_open gauge
+envoy_cluster_circuit_breakers_high_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_high_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_high_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_high_cx_pool_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_circuit_breakers_high_rq_open gauge
+envoy_cluster_circuit_breakers_high_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_high_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_high_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_high_rq_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_circuit_breakers_high_rq_pending_open gauge
+envoy_cluster_circuit_breakers_high_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_high_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_high_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_high_rq_pending_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_circuit_breakers_high_rq_retry_open gauge
+envoy_cluster_circuit_breakers_high_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_circuit_breakers_high_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_circuit_breakers_high_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_circuit_breakers_high_rq_retry_open{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_http2_deferred_stream_close gauge
+envoy_cluster_http2_deferred_stream_close{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_pending_send_bytes gauge
+envoy_cluster_http2_pending_send_bytes{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+# TYPE envoy_cluster_http2_streams_active gauge
+envoy_cluster_http2_streams_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+# TYPE envoy_cluster_lb_subsets_active gauge
+envoy_cluster_lb_subsets_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_lb_subsets_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_lb_subsets_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_lb_subsets_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_max_host_weight gauge
+envoy_cluster_max_host_weight{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_max_host_weight{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_max_host_weight{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_max_host_weight{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_membership_degraded gauge
+envoy_cluster_membership_degraded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_membership_degraded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_membership_degraded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_membership_degraded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_membership_excluded gauge
+envoy_cluster_membership_excluded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_membership_excluded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_membership_excluded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_membership_excluded{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_membership_healthy gauge
+envoy_cluster_membership_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_membership_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1
+envoy_cluster_membership_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_membership_healthy{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1
+# TYPE envoy_cluster_membership_total gauge
+envoy_cluster_membership_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_membership_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1
+envoy_cluster_membership_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_membership_total{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1
+# TYPE envoy_cluster_upstream_cx_active gauge
+envoy_cluster_upstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2
+# TYPE envoy_cluster_upstream_cx_rx_bytes_buffered gauge
+envoy_cluster_upstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 17
+envoy_cluster_upstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 102618
+# TYPE envoy_cluster_upstream_cx_tx_bytes_buffered gauge
+envoy_cluster_upstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_active gauge
+envoy_cluster_upstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1
+# TYPE envoy_cluster_upstream_rq_pending_active gauge
+envoy_cluster_upstream_rq_pending_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_rq_pending_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_rq_pending_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_upstream_rq_pending_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_version gauge
+envoy_cluster_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 0
+envoy_cluster_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_manager_active_clusters gauge
+envoy_cluster_manager_active_clusters{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 4
+# TYPE envoy_cluster_manager_cds_update_time gauge
+envoy_cluster_manager_cds_update_time{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1678856528260
+# TYPE envoy_cluster_manager_cds_version gauge
+envoy_cluster_manager_cds_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 17241709254077376921
+# TYPE envoy_cluster_manager_warming_clusters gauge
+envoy_cluster_manager_warming_clusters{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_control_plane_connected_state gauge
+envoy_control_plane_connected_state{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_control_plane_pending_requests gauge
+envoy_control_plane_pending_requests{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_dns_cares_pending_resolutions gauge
+envoy_dns_cares_pending_resolutions{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_filesystem_write_total_buffered gauge
+envoy_filesystem_write_total_buffered{} 0
+# TYPE envoy_http_downstream_cx_active gauge
+envoy_http_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 1
+envoy_http_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1
+# TYPE envoy_http_downstream_cx_http1_active gauge
+envoy_http_downstream_cx_http1_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 1
+envoy_http_downstream_cx_http1_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1
+# TYPE envoy_http_downstream_cx_http2_active gauge
+envoy_http_downstream_cx_http2_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_http2_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_http3_active gauge
+envoy_http_downstream_cx_http3_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_http3_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_rx_bytes_buffered gauge
+envoy_http_downstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 112
+envoy_http_downstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 86
+# TYPE envoy_http_downstream_cx_ssl_active gauge
+envoy_http_downstream_cx_ssl_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_ssl_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_tx_bytes_buffered gauge
+envoy_http_downstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_cx_upgrades_active gauge
+envoy_http_downstream_cx_upgrades_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_upgrades_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 0
+# TYPE envoy_http_downstream_rq_active gauge
+envoy_http_downstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 1
+envoy_http_downstream_rq_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 1
+# TYPE envoy_listener_admin_downstream_cx_active gauge
+envoy_listener_admin_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_listener_admin_downstream_pre_cx_active gauge
+envoy_listener_admin_downstream_pre_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_admin_main_thread_downstream_cx_active gauge
+envoy_listener_admin_main_thread_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_listener_downstream_cx_active gauge
+envoy_listener_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 1
+envoy_listener_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_downstream_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_downstream_pre_cx_active gauge
+envoy_listener_downstream_pre_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_downstream_pre_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_downstream_pre_cx_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_worker_downstream_cx_active gauge
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 1
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="0",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="1",local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 0
+# TYPE envoy_listener_manager_lds_update_time gauge
+envoy_listener_manager_lds_update_time{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1678856528268
+# TYPE envoy_listener_manager_lds_version gauge
+envoy_listener_manager_lds_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 17241709254077376921
+# TYPE envoy_listener_manager_total_filter_chains_draining gauge
+envoy_listener_manager_total_filter_chains_draining{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_total_listeners_active gauge
+envoy_listener_manager_total_listeners_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 3
+# TYPE envoy_listener_manager_total_listeners_draining gauge
+envoy_listener_manager_total_listeners_draining{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_total_listeners_warming gauge
+envoy_listener_manager_total_listeners_warming{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_listener_manager_workers_started gauge
+envoy_listener_manager_workers_started{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_runtime_admin_overrides_active gauge
+envoy_runtime_admin_overrides_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_runtime_deprecated_feature_seen_since_process_start gauge
+envoy_runtime_deprecated_feature_seen_since_process_start{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_runtime_num_keys gauge
+envoy_runtime_num_keys{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_runtime_num_layers gauge
+envoy_runtime_num_layers{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_server_compilation_settings_fips_mode gauge
+envoy_server_compilation_settings_fips_mode{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_concurrency gauge
+envoy_server_concurrency{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2
+# TYPE envoy_server_days_until_first_cert_expiring gauge
+envoy_server_days_until_first_cert_expiring{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2
+# TYPE envoy_server_hot_restart_epoch gauge
+envoy_server_hot_restart_epoch{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_live gauge
+envoy_server_live{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_server_memory_allocated gauge
+envoy_server_memory_allocated{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 7742368
+# TYPE envoy_server_memory_heap_size gauge
+envoy_server_memory_heap_size{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 14680064
+# TYPE envoy_server_memory_physical_size gauge
+envoy_server_memory_physical_size{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 19175778
+# TYPE envoy_server_parent_connections gauge
+envoy_server_parent_connections{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_seconds_until_first_ocsp_response_expiring gauge
+envoy_server_seconds_until_first_ocsp_response_expiring{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_state gauge
+envoy_server_state{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_stats_recent_lookups gauge
+envoy_server_stats_recent_lookups{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 11362
+# TYPE envoy_server_total_connections gauge
+envoy_server_total_connections{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 0
+# TYPE envoy_server_uptime gauge
+envoy_server_uptime{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 32527
+# TYPE envoy_server_version gauge
+envoy_server_version{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1424117
+# TYPE envoy_tcp_downstream_cx_rx_bytes_buffered gauge
+envoy_tcp_downstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_tcp_downstream_cx_tx_bytes_buffered gauge
+envoy_tcp_downstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_tcp_original_destination_downstream_cx_rx_bytes_buffered gauge
+envoy_tcp_original_destination_downstream_cx_rx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0
+# TYPE envoy_tcp_original_destination_downstream_cx_tx_bytes_buffered gauge
+envoy_tcp_original_destination_downstream_cx_tx_bytes_buffered{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0
+# TYPE envoy_tcp_original_destination_upstream_flush_active gauge
+envoy_tcp_original_destination_upstream_flush_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="upstream"} 0
+# TYPE envoy_tcp_upstream_flush_active gauge
+envoy_tcp_upstream_flush_active{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_tcp_prefix="public_listener"} 0
+# TYPE envoy_cluster_external_upstream_rq_time histogram
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="0.5"} 0
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1"} 0
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5"} 0
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="25"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="50"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="100"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="250"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="500"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1000"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="2500"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5000"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10000"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="30000"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="60000"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="300000"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="600000"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1800000"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="3600000"} 2
+envoy_cluster_external_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="+Inf"} 2
+envoy_cluster_external_upstream_rq_time_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 15.1000000000000014210854715202
+envoy_cluster_external_upstream_rq_time_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2
+# TYPE envoy_cluster_upstream_cx_connect_ms histogram
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="0.5"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="5"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="10"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="25"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="50"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="100"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="250"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="500"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="2500"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="5000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="10000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="30000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="60000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="300000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="600000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1800000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="3600000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="+Inf"} 1
+envoy_cluster_upstream_cx_connect_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 8.0500000000000007105427357601002
+envoy_cluster_upstream_cx_connect_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="0.5"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="5"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="10"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="25"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="50"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="100"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="250"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="500"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1000"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="2500"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="5000"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="10000"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="30000"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="60000"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="300000"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="600000"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1800000"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="3600000"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="+Inf"} 1757
+envoy_cluster_upstream_cx_connect_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 0
+envoy_cluster_upstream_cx_connect_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 1757
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="0.5"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="5"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="10"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="25"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="50"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="100"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="250"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="500"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="2500"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="5000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="10000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="30000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="60000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="300000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="600000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1800000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="3600000"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="+Inf"} 1
+envoy_cluster_upstream_cx_connect_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 2.049999999999999822364316059975
+envoy_cluster_upstream_cx_connect_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="0.5"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1"} 1
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="25"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="50"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="100"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="250"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="500"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1000"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="2500"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5000"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10000"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="30000"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="60000"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="300000"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="600000"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1800000"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="3600000"} 2
+envoy_cluster_upstream_cx_connect_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="+Inf"} 2
+envoy_cluster_upstream_cx_connect_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 1.0500000000000000444089209850063
+envoy_cluster_upstream_cx_connect_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2
+# TYPE envoy_cluster_upstream_cx_length_ms histogram
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="0.5"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="5"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="10"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="25"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="50"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="100"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="250"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="500"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="2500"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="5000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="10000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="30000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="60000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="300000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="600000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="1800000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="3600000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane",le="+Inf"} 0
+envoy_cluster_upstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="consul-dataplane"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="0.5"} 6502
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1"} 6502
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="5"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="10"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="25"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="50"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="100"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="250"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="500"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1000"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="2500"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="5000"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="10000"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="30000"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="60000"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="300000"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="600000"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="1800000"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="3600000"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app",le="+Inf"} 6505
+envoy_cluster_upstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 10.1499999999999985789145284798
+envoy_cluster_upstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="local_app"} 6505
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="0.5"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="5"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="10"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="25"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="50"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="100"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="250"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="500"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1000"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="2500"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="5000"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="10000"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="30000"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="60000"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="300000"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="600000"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="1800000"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="3600000"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination",le="+Inf"} 1
+envoy_cluster_upstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 855
+envoy_cluster_upstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="original-destination"} 1
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="0.5"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="25"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="50"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="100"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="250"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="500"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="2500"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="30000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="60000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="300000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="600000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1800000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="3600000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="+Inf"} 0
+envoy_cluster_upstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+envoy_cluster_upstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 0
+# TYPE envoy_cluster_upstream_rq_time histogram
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="0.5"} 0
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1"} 0
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5"} 0
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="25"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="50"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="100"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="250"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="500"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1000"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="2500"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="5000"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="10000"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="30000"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="60000"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="300000"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="600000"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="1800000"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="3600000"} 2
+envoy_cluster_upstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend",le="+Inf"} 2
+envoy_cluster_upstream_rq_time_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 15.1000000000000014210854715202
+envoy_cluster_upstream_rq_time_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_cluster_name="prometheus_backend"} 2
+# TYPE envoy_cluster_manager_cds_update_duration histogram
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="0.5"} 0
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1"} 0
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5"} 0
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10"} 0
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="25"} 0
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="50"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="100"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="250"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="500"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1000"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="2500"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5000"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10000"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="30000"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="60000"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="300000"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="600000"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1800000"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="3600000"} 1
+envoy_cluster_manager_cds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="+Inf"} 1
+envoy_cluster_manager_cds_update_duration_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 34.5
+envoy_cluster_manager_cds_update_duration_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_http_downstream_cx_length_ms histogram
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="0.5"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="5"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="10"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="25"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="50"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="100"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="250"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="500"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1000"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="2500"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="5000"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="10000"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="30000"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="60000"} 0
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="300000"} 2
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="600000"} 2
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1800000"} 2
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="3600000"} 2
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="+Inf"} 2
+envoy_http_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 181000
+envoy_http_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 2
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="0.5"} 1
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1"} 1
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="5"} 1
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="10"} 1
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="25"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="50"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="100"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="250"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="500"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1000"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="2500"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="5000"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="10000"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="30000"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="60000"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="300000"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="600000"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1800000"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="3600000"} 3
+envoy_http_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="+Inf"} 3
+envoy_http_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 23
+envoy_http_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3
+# TYPE envoy_http_downstream_rq_time histogram
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="0.5"} 0
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1"} 0
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="5"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="10"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="25"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="50"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="100"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="250"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="500"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="2500"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="5000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="10000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="30000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="60000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="300000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="600000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="1800000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="3600000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin",le="+Inf"} 3
+envoy_http_downstream_rq_time_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 12.1499999999999985789145284798
+envoy_http_downstream_rq_time_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="admin"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="0.5"} 1
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1"} 1
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="5"} 1
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="10"} 2
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="25"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="50"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="100"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="250"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="500"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="2500"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="5000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="10000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="30000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="60000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="300000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="600000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="1800000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="3600000"} 3
+envoy_http_downstream_rq_time_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics",le="+Inf"} 3
+envoy_http_downstream_rq_time_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 17.5500000000000007105427357601
+envoy_http_downstream_rq_time_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_http_conn_manager_prefix="envoy_prometheus_metrics"} 3
+# TYPE envoy_listener_admin_downstream_cx_length_ms histogram
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="0.5"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="25"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="50"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="100"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="250"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="500"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1000"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="2500"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5000"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10000"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="30000"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="60000"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="300000"} 2
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="600000"} 2
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1800000"} 2
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="3600000"} 2
+envoy_listener_admin_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="+Inf"} 2
+envoy_listener_admin_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 181000
+envoy_listener_admin_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 2
+# TYPE envoy_listener_downstream_cx_length_ms histogram
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="0.5"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="1"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="5"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="10"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="25"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="50"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="100"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="250"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="500"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="1000"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="2500"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="5000"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="10000"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="30000"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="60000"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="300000"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="600000"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="1800000"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="3600000"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200",le="+Inf"} 3
+envoy_listener_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 23
+envoy_listener_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="0.0.0.0_20200"} 3
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="0.5"} 6502
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="1"} 6502
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="5"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="10"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="25"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="50"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="100"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="250"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="500"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="1000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="2500"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="5000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="10000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="30000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="60000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="300000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="600000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="1800000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="3600000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000",le="+Inf"} 6505
+envoy_listener_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 10.1499999999999985789145284798
+envoy_listener_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="10.50.132.6_20000"} 6505
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="0.5"} 0
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="1"} 0
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="5"} 0
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="10"} 0
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="25"} 0
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="50"} 0
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="100"} 0
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="250"} 0
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="500"} 0
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="1000"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="2500"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="5000"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="10000"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="30000"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="60000"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="300000"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="600000"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="1800000"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="3600000"} 1
+envoy_listener_downstream_cx_length_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001",le="+Inf"} 1
+envoy_listener_downstream_cx_length_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 855
+envoy_listener_downstream_cx_length_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",envoy_listener_address="127.0.0.1_15001"} 1
+# TYPE envoy_listener_manager_lds_update_duration histogram
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="0.5"} 0
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1"} 0
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5"} 0
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="25"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="50"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="100"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="250"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="500"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1000"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="2500"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5000"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10000"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="30000"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="60000"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="300000"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="600000"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1800000"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="3600000"} 1
+envoy_listener_manager_lds_update_duration_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="+Inf"} 1
+envoy_listener_manager_lds_update_duration_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 5.049999999999999822364316059975
+envoy_listener_manager_lds_update_duration_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
+# TYPE envoy_server_initialization_time_ms histogram
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="0.5"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="25"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="50"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="100"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="250"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="500"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1000"} 0
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="2500"} 1
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="5000"} 1
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="10000"} 1
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="30000"} 1
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="60000"} 1
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="300000"} 1
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="600000"} 1
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="1800000"} 1
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="3600000"} 1
+envoy_server_initialization_time_ms_bucket{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3",le="+Inf"} 1
+envoy_server_initialization_time_ms_sum{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1150
+envoy_server_initialization_time_ms_count{local_cluster="mynginx",consul_source_service="mynginx",consul_source_namespace="default",consul_source_partition="default",consul_source_datacenter="consul-sandbox-cluster-0159c9d3"} 1
diff --git a/src/go/plugin/go.d/modules/envoy/testdata/envoy.txt b/src/go/plugin/go.d/modules/envoy/testdata/envoy.txt
new file mode 100644
index 000000000..1102c4c0d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/envoy/testdata/envoy.txt
@@ -0,0 +1,929 @@
+# TYPE envoy_cluster_assignment_stale counter
+envoy_cluster_assignment_stale{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_assignment_timeout_received counter
+envoy_cluster_assignment_timeout_received{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_bind_errors counter
+envoy_cluster_bind_errors{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_client_ssl_socket_factory_downstream_context_secrets_not_ready counter
+envoy_cluster_client_ssl_socket_factory_downstream_context_secrets_not_ready{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_client_ssl_socket_factory_ssl_context_update_by_sds counter
+envoy_cluster_client_ssl_socket_factory_ssl_context_update_by_sds{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_client_ssl_socket_factory_upstream_context_secrets_not_ready counter
+envoy_cluster_client_ssl_socket_factory_upstream_context_secrets_not_ready{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_default_total_match_count counter
+envoy_cluster_default_total_match_count{envoy_cluster_name="service_envoyproxy_io"} 1
+# TYPE envoy_cluster_lb_healthy_panic counter
+envoy_cluster_lb_healthy_panic{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_local_cluster_not_ok counter
+envoy_cluster_lb_local_cluster_not_ok{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_recalculate_zone_structures counter
+envoy_cluster_lb_recalculate_zone_structures{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_subsets_created counter
+envoy_cluster_lb_subsets_created{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_subsets_fallback counter
+envoy_cluster_lb_subsets_fallback{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_subsets_fallback_panic counter
+envoy_cluster_lb_subsets_fallback_panic{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_subsets_removed counter
+envoy_cluster_lb_subsets_removed{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_subsets_selected counter
+envoy_cluster_lb_subsets_selected{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_zone_cluster_too_small counter
+envoy_cluster_lb_zone_cluster_too_small{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_zone_no_capacity_left counter
+envoy_cluster_lb_zone_no_capacity_left{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_zone_number_differs counter
+envoy_cluster_lb_zone_number_differs{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_zone_routing_all_directly counter
+envoy_cluster_lb_zone_routing_all_directly{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_zone_routing_cross_zone counter
+envoy_cluster_lb_zone_routing_cross_zone{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_zone_routing_sampled counter
+envoy_cluster_lb_zone_routing_sampled{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_membership_change counter
+envoy_cluster_membership_change{envoy_cluster_name="service_envoyproxy_io"} 1
+# TYPE envoy_cluster_original_dst_host_invalid counter
+envoy_cluster_original_dst_host_invalid{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_retry_or_shadow_abandoned counter
+envoy_cluster_retry_or_shadow_abandoned{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_connection_error counter
+envoy_cluster_ssl_connection_error{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_fail_verify_cert_hash counter
+envoy_cluster_ssl_fail_verify_cert_hash{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_fail_verify_error counter
+envoy_cluster_ssl_fail_verify_error{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_fail_verify_no_cert counter
+envoy_cluster_ssl_fail_verify_no_cert{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_fail_verify_san counter
+envoy_cluster_ssl_fail_verify_san{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_handshake counter
+envoy_cluster_ssl_handshake{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_no_certificate counter
+envoy_cluster_ssl_no_certificate{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_ocsp_staple_failed counter
+envoy_cluster_ssl_ocsp_staple_failed{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_ocsp_staple_omitted counter
+envoy_cluster_ssl_ocsp_staple_omitted{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_ocsp_staple_requests counter
+envoy_cluster_ssl_ocsp_staple_requests{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_ocsp_staple_responses counter
+envoy_cluster_ssl_ocsp_staple_responses{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_ssl_session_reused counter
+envoy_cluster_ssl_session_reused{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_update_attempt counter
+envoy_cluster_update_attempt{envoy_cluster_name="service_envoyproxy_io"} 1242
+# TYPE envoy_cluster_update_empty counter
+envoy_cluster_update_empty{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_update_failure counter
+envoy_cluster_update_failure{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_update_no_rebuild counter
+envoy_cluster_update_no_rebuild{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_update_success counter
+envoy_cluster_update_success{envoy_cluster_name="service_envoyproxy_io"} 1242
+# TYPE envoy_cluster_upstream_cx_close_notify counter
+envoy_cluster_upstream_cx_close_notify{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_connect_attempts_exceeded counter
+envoy_cluster_upstream_cx_connect_attempts_exceeded{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_connect_fail counter
+envoy_cluster_upstream_cx_connect_fail{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_connect_timeout counter
+envoy_cluster_upstream_cx_connect_timeout{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_connect_with_0_rtt counter
+envoy_cluster_upstream_cx_connect_with_0_rtt{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_destroy counter
+envoy_cluster_upstream_cx_destroy{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_local counter
+envoy_cluster_upstream_cx_destroy_local{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_local_with_active_rq counter
+envoy_cluster_upstream_cx_destroy_local_with_active_rq{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_remote counter
+envoy_cluster_upstream_cx_destroy_remote{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_remote_with_active_rq counter
+envoy_cluster_upstream_cx_destroy_remote_with_active_rq{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_destroy_with_active_rq counter
+envoy_cluster_upstream_cx_destroy_with_active_rq{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_http1_total counter
+envoy_cluster_upstream_cx_http1_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_http2_total counter
+envoy_cluster_upstream_cx_http2_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_http3_total counter
+envoy_cluster_upstream_cx_http3_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_idle_timeout counter
+envoy_cluster_upstream_cx_idle_timeout{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_max_duration_reached counter
+envoy_cluster_upstream_cx_max_duration_reached{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_max_requests counter
+envoy_cluster_upstream_cx_max_requests{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_none_healthy counter
+envoy_cluster_upstream_cx_none_healthy{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_overflow counter
+envoy_cluster_upstream_cx_overflow{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_pool_overflow counter
+envoy_cluster_upstream_cx_pool_overflow{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_protocol_error counter
+envoy_cluster_upstream_cx_protocol_error{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_rx_bytes_total counter
+envoy_cluster_upstream_cx_rx_bytes_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_total counter
+envoy_cluster_upstream_cx_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_tx_bytes_total counter
+envoy_cluster_upstream_cx_tx_bytes_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_flow_control_backed_up_total counter
+envoy_cluster_upstream_flow_control_backed_up_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_flow_control_drained_total counter
+envoy_cluster_upstream_flow_control_drained_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_flow_control_paused_reading_total counter
+envoy_cluster_upstream_flow_control_paused_reading_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_flow_control_resumed_reading_total counter
+envoy_cluster_upstream_flow_control_resumed_reading_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_http3_broken counter
+envoy_cluster_upstream_http3_broken{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_internal_redirect_failed_total counter
+envoy_cluster_upstream_internal_redirect_failed_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_internal_redirect_succeeded_total counter
+envoy_cluster_upstream_internal_redirect_succeeded_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_0rtt counter
+envoy_cluster_upstream_rq_0rtt{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_cancelled counter
+envoy_cluster_upstream_rq_cancelled{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_completed counter
+envoy_cluster_upstream_rq_completed{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_maintenance_mode counter
+envoy_cluster_upstream_rq_maintenance_mode{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_max_duration_reached counter
+envoy_cluster_upstream_rq_max_duration_reached{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_pending_failure_eject counter
+envoy_cluster_upstream_rq_pending_failure_eject{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_pending_overflow counter
+envoy_cluster_upstream_rq_pending_overflow{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_pending_total counter
+envoy_cluster_upstream_rq_pending_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_per_try_idle_timeout counter
+envoy_cluster_upstream_rq_per_try_idle_timeout{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_per_try_timeout counter
+envoy_cluster_upstream_rq_per_try_timeout{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_retry counter
+envoy_cluster_upstream_rq_retry{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_retry_backoff_exponential counter
+envoy_cluster_upstream_rq_retry_backoff_exponential{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_retry_backoff_ratelimited counter
+envoy_cluster_upstream_rq_retry_backoff_ratelimited{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_retry_limit_exceeded counter
+envoy_cluster_upstream_rq_retry_limit_exceeded{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_retry_overflow counter
+envoy_cluster_upstream_rq_retry_overflow{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_retry_success counter
+envoy_cluster_upstream_rq_retry_success{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_rx_reset counter
+envoy_cluster_upstream_rq_rx_reset{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_timeout counter
+envoy_cluster_upstream_rq_timeout{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_total counter
+envoy_cluster_upstream_rq_total{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_tx_reset counter
+envoy_cluster_upstream_rq_tx_reset{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_manager_cluster_added counter
+envoy_cluster_manager_cluster_added{} 1
+# TYPE envoy_cluster_manager_cluster_modified counter
+envoy_cluster_manager_cluster_modified{} 0
+# TYPE envoy_cluster_manager_cluster_removed counter
+envoy_cluster_manager_cluster_removed{} 0
+# TYPE envoy_cluster_manager_cluster_updated counter
+envoy_cluster_manager_cluster_updated{} 0
+# TYPE envoy_cluster_manager_cluster_updated_via_merge counter
+envoy_cluster_manager_cluster_updated_via_merge{} 0
+# TYPE envoy_cluster_manager_update_merge_cancelled counter
+envoy_cluster_manager_update_merge_cancelled{} 0
+# TYPE envoy_cluster_manager_update_out_of_merge_window counter
+envoy_cluster_manager_update_out_of_merge_window{} 0
+# TYPE envoy_dns_cares_get_addr_failure counter
+envoy_dns_cares_get_addr_failure{} 0
+# TYPE envoy_dns_cares_not_found counter
+envoy_dns_cares_not_found{} 0
+# TYPE envoy_dns_cares_resolve_total counter
+envoy_dns_cares_resolve_total{} 1242
+# TYPE envoy_dns_cares_timeouts counter
+envoy_dns_cares_timeouts{} 0
+# TYPE envoy_envoy_overload_actions_reset_high_memory_stream_count counter
+envoy_envoy_overload_actions_reset_high_memory_stream_count{} 0
+# TYPE envoy_filesystem_flushed_by_timer counter
+envoy_filesystem_flushed_by_timer{} 0
+# TYPE envoy_filesystem_reopen_failed counter
+envoy_filesystem_reopen_failed{} 0
+# TYPE envoy_filesystem_write_buffered counter
+envoy_filesystem_write_buffered{} 0
+# TYPE envoy_filesystem_write_completed counter
+envoy_filesystem_write_completed{} 0
+# TYPE envoy_filesystem_write_failed counter
+envoy_filesystem_write_failed{} 0
+# TYPE envoy_http_downstream_cx_delayed_close_timeout counter
+envoy_http_downstream_cx_delayed_close_timeout{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_delayed_close_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_destroy counter
+envoy_http_downstream_cx_destroy{envoy_http_conn_manager_prefix="admin"} 4
+envoy_http_downstream_cx_destroy{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_destroy_active_rq counter
+envoy_http_downstream_cx_destroy_active_rq{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_destroy_active_rq{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_destroy_local counter
+envoy_http_downstream_cx_destroy_local{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_destroy_local{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_destroy_local_active_rq counter
+envoy_http_downstream_cx_destroy_local_active_rq{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_destroy_local_active_rq{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_destroy_remote counter
+envoy_http_downstream_cx_destroy_remote{envoy_http_conn_manager_prefix="admin"} 4
+envoy_http_downstream_cx_destroy_remote{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_destroy_remote_active_rq counter
+envoy_http_downstream_cx_destroy_remote_active_rq{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_destroy_remote_active_rq{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_drain_close counter
+envoy_http_downstream_cx_drain_close{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_drain_close{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_http1_total counter
+envoy_http_downstream_cx_http1_total{envoy_http_conn_manager_prefix="admin"} 6
+envoy_http_downstream_cx_http1_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_http2_total counter
+envoy_http_downstream_cx_http2_total{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_http2_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_http3_total counter
+envoy_http_downstream_cx_http3_total{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_http3_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_idle_timeout counter
+envoy_http_downstream_cx_idle_timeout{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_idle_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_max_duration_reached counter
+envoy_http_downstream_cx_max_duration_reached{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_max_duration_reached{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_max_requests_reached counter
+envoy_http_downstream_cx_max_requests_reached{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_max_requests_reached{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_overload_disable_keepalive counter
+envoy_http_downstream_cx_overload_disable_keepalive{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_overload_disable_keepalive{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_protocol_error counter
+envoy_http_downstream_cx_protocol_error{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_protocol_error{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_rx_bytes_total counter
+envoy_http_downstream_cx_rx_bytes_total{envoy_http_conn_manager_prefix="admin"} 678
+envoy_http_downstream_cx_rx_bytes_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_ssl_total counter
+envoy_http_downstream_cx_ssl_total{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_ssl_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_total counter
+envoy_http_downstream_cx_total{envoy_http_conn_manager_prefix="admin"} 6
+envoy_http_downstream_cx_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_tx_bytes_total counter
+envoy_http_downstream_cx_tx_bytes_total{envoy_http_conn_manager_prefix="admin"} 212404
+envoy_http_downstream_cx_tx_bytes_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_upgrades_total counter
+envoy_http_downstream_cx_upgrades_total{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_upgrades_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_flow_control_paused_reading_total counter
+envoy_http_downstream_flow_control_paused_reading_total{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_flow_control_paused_reading_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_flow_control_resumed_reading_total counter
+envoy_http_downstream_flow_control_resumed_reading_total{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_flow_control_resumed_reading_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_completed counter
+envoy_http_downstream_rq_completed{envoy_http_conn_manager_prefix="admin"} 5
+envoy_http_downstream_rq_completed{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_failed_path_normalization counter
+envoy_http_downstream_rq_failed_path_normalization{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_failed_path_normalization{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_header_timeout counter
+envoy_http_downstream_rq_header_timeout{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_header_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_http1_total counter
+envoy_http_downstream_rq_http1_total{envoy_http_conn_manager_prefix="admin"} 6
+envoy_http_downstream_rq_http1_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_http2_total counter
+envoy_http_downstream_rq_http2_total{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_http2_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_http3_total counter
+envoy_http_downstream_rq_http3_total{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_http3_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_idle_timeout counter
+envoy_http_downstream_rq_idle_timeout{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_idle_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_max_duration_reached counter
+envoy_http_downstream_rq_max_duration_reached{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_max_duration_reached{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_non_relative_path counter
+envoy_http_downstream_rq_non_relative_path{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_non_relative_path{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_overload_close counter
+envoy_http_downstream_rq_overload_close{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_overload_close{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_redirected_with_normalized_path counter
+envoy_http_downstream_rq_redirected_with_normalized_path{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_redirected_with_normalized_path{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_rejected_via_ip_detection counter
+envoy_http_downstream_rq_rejected_via_ip_detection{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_rejected_via_ip_detection{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_response_before_rq_complete counter
+envoy_http_downstream_rq_response_before_rq_complete{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_response_before_rq_complete{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_rx_reset counter
+envoy_http_downstream_rq_rx_reset{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_rx_reset{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_timeout counter
+envoy_http_downstream_rq_timeout{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_timeout{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_too_large counter
+envoy_http_downstream_rq_too_large{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_too_large{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_total counter
+envoy_http_downstream_rq_total{envoy_http_conn_manager_prefix="admin"} 6
+envoy_http_downstream_rq_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_tx_reset counter
+envoy_http_downstream_rq_tx_reset{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_tx_reset{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_ws_on_non_ws_route counter
+envoy_http_downstream_rq_ws_on_non_ws_route{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_ws_on_non_ws_route{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_xx counter
+envoy_http_downstream_rq_xx{envoy_response_code_class="1",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="2",envoy_http_conn_manager_prefix="admin"} 3
+envoy_http_downstream_rq_xx{envoy_response_code_class="3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="4",envoy_http_conn_manager_prefix="admin"} 2
+envoy_http_downstream_rq_xx{envoy_response_code_class="5",envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="1",envoy_http_conn_manager_prefix="ingress_http"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="2",envoy_http_conn_manager_prefix="ingress_http"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="3",envoy_http_conn_manager_prefix="ingress_http"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="4",envoy_http_conn_manager_prefix="ingress_http"} 0
+envoy_http_downstream_rq_xx{envoy_response_code_class="5",envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_no_cluster counter
+envoy_http_no_cluster{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_no_cluster{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_no_route counter
+envoy_http_no_route{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_no_route{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_passthrough_internal_redirect_bad_location counter
+envoy_http_passthrough_internal_redirect_bad_location{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_bad_location{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_passthrough_internal_redirect_no_route counter
+envoy_http_passthrough_internal_redirect_no_route{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_no_route{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_passthrough_internal_redirect_predicate counter
+envoy_http_passthrough_internal_redirect_predicate{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_predicate{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_passthrough_internal_redirect_too_many_redirects counter
+envoy_http_passthrough_internal_redirect_too_many_redirects{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_too_many_redirects{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_passthrough_internal_redirect_unsafe_scheme counter
+envoy_http_passthrough_internal_redirect_unsafe_scheme{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_passthrough_internal_redirect_unsafe_scheme{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_rq_direct_response counter
+envoy_http_rq_direct_response{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_rq_direct_response{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_rq_redirect counter
+envoy_http_rq_redirect{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_rq_redirect{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_rq_reset_after_downstream_response_started counter
+envoy_http_rq_reset_after_downstream_response_started{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_rq_reset_after_downstream_response_started{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_rq_total counter
+envoy_http_rq_total{envoy_http_conn_manager_prefix="async-client"} 0
+envoy_http_rq_total{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_rs_too_large counter
+envoy_http_rs_too_large{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_rs_too_large{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_tracing_client_enabled counter
+envoy_http_tracing_client_enabled{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_tracing_health_check counter
+envoy_http_tracing_health_check{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_tracing_not_traceable counter
+envoy_http_tracing_not_traceable{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_tracing_random_sampling counter
+envoy_http_tracing_random_sampling{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_tracing_service_forced counter
+envoy_http_tracing_service_forced{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http1_dropped_headers_with_underscores counter
+envoy_http1_dropped_headers_with_underscores{} 0
+# TYPE envoy_http1_metadata_not_supported_error counter
+envoy_http1_metadata_not_supported_error{} 0
+# TYPE envoy_http1_requests_rejected_with_underscores_in_headers counter
+envoy_http1_requests_rejected_with_underscores_in_headers{} 0
+# TYPE envoy_http1_response_flood counter
+envoy_http1_response_flood{} 0
+# TYPE envoy_listener_admin_downstream_cx_destroy counter
+envoy_listener_admin_downstream_cx_destroy{} 4
+# TYPE envoy_listener_admin_downstream_cx_overflow counter
+envoy_listener_admin_downstream_cx_overflow{} 0
+# TYPE envoy_listener_admin_downstream_cx_overload_reject counter
+envoy_listener_admin_downstream_cx_overload_reject{} 0
+# TYPE envoy_listener_admin_downstream_cx_total counter
+envoy_listener_admin_downstream_cx_total{} 6
+# TYPE envoy_listener_admin_downstream_cx_transport_socket_connect_timeout counter
+envoy_listener_admin_downstream_cx_transport_socket_connect_timeout{} 0
+# TYPE envoy_listener_admin_downstream_global_cx_overflow counter
+envoy_listener_admin_downstream_global_cx_overflow{} 0
+# TYPE envoy_listener_admin_downstream_listener_filter_error counter
+envoy_listener_admin_downstream_listener_filter_error{} 0
+# TYPE envoy_listener_admin_downstream_listener_filter_remote_close counter
+envoy_listener_admin_downstream_listener_filter_remote_close{} 0
+# TYPE envoy_listener_admin_downstream_pre_cx_timeout counter
+envoy_listener_admin_downstream_pre_cx_timeout{} 0
+# TYPE envoy_listener_admin_http_downstream_rq_completed counter
+envoy_listener_admin_http_downstream_rq_completed{envoy_http_conn_manager_prefix="admin"} 5
+# TYPE envoy_listener_admin_http_downstream_rq_xx counter
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="1",envoy_http_conn_manager_prefix="admin"} 0
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="2",envoy_http_conn_manager_prefix="admin"} 3
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="3",envoy_http_conn_manager_prefix="admin"} 0
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="4",envoy_http_conn_manager_prefix="admin"} 2
+envoy_listener_admin_http_downstream_rq_xx{envoy_response_code_class="5",envoy_http_conn_manager_prefix="admin"} 0
+# TYPE envoy_listener_admin_main_thread_downstream_cx_total counter
+envoy_listener_admin_main_thread_downstream_cx_total{} 6
+# TYPE envoy_listener_admin_no_filter_chain_match counter
+envoy_listener_admin_no_filter_chain_match{} 0
+# TYPE envoy_listener_downstream_cx_destroy counter
+envoy_listener_downstream_cx_destroy{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_downstream_cx_overflow counter
+envoy_listener_downstream_cx_overflow{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_downstream_cx_overload_reject counter
+envoy_listener_downstream_cx_overload_reject{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_downstream_cx_total counter
+envoy_listener_downstream_cx_total{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_downstream_cx_transport_socket_connect_timeout counter
+envoy_listener_downstream_cx_transport_socket_connect_timeout{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_downstream_global_cx_overflow counter
+envoy_listener_downstream_global_cx_overflow{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_downstream_listener_filter_error counter
+envoy_listener_downstream_listener_filter_error{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_downstream_listener_filter_remote_close counter
+envoy_listener_downstream_listener_filter_remote_close{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_downstream_pre_cx_timeout counter
+envoy_listener_downstream_pre_cx_timeout{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_extension_config_missing counter
+envoy_listener_extension_config_missing{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_http_downstream_rq_completed counter
+envoy_listener_http_downstream_rq_completed{envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_http_downstream_rq_xx counter
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="1",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="2",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="3",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="4",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_http_downstream_rq_xx{envoy_response_code_class="5",envoy_http_conn_manager_prefix="ingress_http",envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_no_filter_chain_match counter
+envoy_listener_no_filter_chain_match{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_worker_downstream_cx_total counter
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="0",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="1",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="10",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="11",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="12",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="13",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="14",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="15",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="2",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="3",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="4",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="5",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="6",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="7",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="8",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_total{envoy_worker_id="9",envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_manager_listener_added counter
+envoy_listener_manager_listener_added{} 1
+# TYPE envoy_listener_manager_listener_create_failure counter
+envoy_listener_manager_listener_create_failure{} 0
+# TYPE envoy_listener_manager_listener_create_success counter
+envoy_listener_manager_listener_create_success{} 16
+# TYPE envoy_listener_manager_listener_in_place_updated counter
+envoy_listener_manager_listener_in_place_updated{} 0
+# TYPE envoy_listener_manager_listener_modified counter
+envoy_listener_manager_listener_modified{} 0
+# TYPE envoy_listener_manager_listener_removed counter
+envoy_listener_manager_listener_removed{} 0
+# TYPE envoy_listener_manager_listener_stopped counter
+envoy_listener_manager_listener_stopped{} 0
+# TYPE envoy_main_thread_watchdog_mega_miss counter
+envoy_main_thread_watchdog_mega_miss{} 0
+# TYPE envoy_main_thread_watchdog_miss counter
+envoy_main_thread_watchdog_miss{} 0
+# TYPE envoy_runtime_deprecated_feature_use counter
+envoy_runtime_deprecated_feature_use{} 0
+# TYPE envoy_runtime_load_error counter
+envoy_runtime_load_error{} 0
+# TYPE envoy_runtime_load_success counter
+envoy_runtime_load_success{} 1
+# TYPE envoy_runtime_override_dir_exists counter
+envoy_runtime_override_dir_exists{} 0
+# TYPE envoy_runtime_override_dir_not_exists counter
+envoy_runtime_override_dir_not_exists{} 1
+# TYPE envoy_server_debug_assertion_failures counter
+envoy_server_debug_assertion_failures{} 0
+# TYPE envoy_server_dropped_stat_flushes counter
+envoy_server_dropped_stat_flushes{} 0
+# TYPE envoy_server_dynamic_unknown_fields counter
+envoy_server_dynamic_unknown_fields{} 0
+# TYPE envoy_server_envoy_bug_failures counter
+envoy_server_envoy_bug_failures{} 0
+# TYPE envoy_server_main_thread_watchdog_mega_miss counter
+envoy_server_main_thread_watchdog_mega_miss{} 0
+# TYPE envoy_server_main_thread_watchdog_miss counter
+envoy_server_main_thread_watchdog_miss{} 0
+# TYPE envoy_server_static_unknown_fields counter
+envoy_server_static_unknown_fields{} 0
+# TYPE envoy_server_wip_protos counter
+envoy_server_wip_protos{} 0
+# TYPE envoy_server_worker_watchdog_mega_miss counter
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="0"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="1"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="10"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="11"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="12"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="13"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="14"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="15"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="2"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="3"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="4"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="5"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="6"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="7"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="8"} 0
+envoy_server_worker_watchdog_mega_miss{envoy_worker_id="9"} 0
+# TYPE envoy_server_worker_watchdog_miss counter
+envoy_server_worker_watchdog_miss{envoy_worker_id="0"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="1"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="10"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="11"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="12"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="13"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="14"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="15"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="2"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="3"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="4"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="5"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="6"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="7"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="8"} 0
+envoy_server_worker_watchdog_miss{envoy_worker_id="9"} 0
+# TYPE envoy_workers_watchdog_mega_miss counter
+envoy_workers_watchdog_mega_miss{} 0
+# TYPE envoy_workers_watchdog_miss counter
+envoy_workers_watchdog_miss{} 0
+# TYPE envoy_cluster_circuit_breakers_default_cx_open gauge
+envoy_cluster_circuit_breakers_default_cx_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_circuit_breakers_default_cx_pool_open gauge
+envoy_cluster_circuit_breakers_default_cx_pool_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_circuit_breakers_default_rq_open gauge
+envoy_cluster_circuit_breakers_default_rq_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_circuit_breakers_default_rq_pending_open gauge
+envoy_cluster_circuit_breakers_default_rq_pending_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_circuit_breakers_default_rq_retry_open gauge
+envoy_cluster_circuit_breakers_default_rq_retry_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_circuit_breakers_high_cx_open gauge
+envoy_cluster_circuit_breakers_high_cx_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_circuit_breakers_high_cx_pool_open gauge
+envoy_cluster_circuit_breakers_high_cx_pool_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_circuit_breakers_high_rq_open gauge
+envoy_cluster_circuit_breakers_high_rq_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_circuit_breakers_high_rq_pending_open gauge
+envoy_cluster_circuit_breakers_high_rq_pending_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_circuit_breakers_high_rq_retry_open gauge
+envoy_cluster_circuit_breakers_high_rq_retry_open{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_lb_subsets_active gauge
+envoy_cluster_lb_subsets_active{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_max_host_weight gauge
+envoy_cluster_max_host_weight{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_membership_degraded gauge
+envoy_cluster_membership_degraded{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_membership_excluded gauge
+envoy_cluster_membership_excluded{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_membership_healthy gauge
+envoy_cluster_membership_healthy{envoy_cluster_name="service_envoyproxy_io"} 1
+# TYPE envoy_cluster_membership_total gauge
+envoy_cluster_membership_total{envoy_cluster_name="service_envoyproxy_io"} 1
+# TYPE envoy_cluster_upstream_cx_active gauge
+envoy_cluster_upstream_cx_active{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_rx_bytes_buffered gauge
+envoy_cluster_upstream_cx_rx_bytes_buffered{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_tx_bytes_buffered gauge
+envoy_cluster_upstream_cx_tx_bytes_buffered{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_active gauge
+envoy_cluster_upstream_rq_active{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_rq_pending_active gauge
+envoy_cluster_upstream_rq_pending_active{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_version gauge
+envoy_cluster_version{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_manager_active_clusters gauge
+envoy_cluster_manager_active_clusters{} 1
+# TYPE envoy_cluster_manager_warming_clusters gauge
+envoy_cluster_manager_warming_clusters{} 0
+# TYPE envoy_dns_cares_pending_resolutions gauge
+envoy_dns_cares_pending_resolutions{} 0
+# TYPE envoy_filesystem_write_total_buffered gauge
+envoy_filesystem_write_total_buffered{} 0
+# TYPE envoy_http_downstream_cx_active gauge
+envoy_http_downstream_cx_active{envoy_http_conn_manager_prefix="admin"} 2
+envoy_http_downstream_cx_active{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_http1_active gauge
+envoy_http_downstream_cx_http1_active{envoy_http_conn_manager_prefix="admin"} 2
+envoy_http_downstream_cx_http1_active{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_http2_active gauge
+envoy_http_downstream_cx_http2_active{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_http2_active{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_http3_active gauge
+envoy_http_downstream_cx_http3_active{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_http3_active{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_rx_bytes_buffered gauge
+envoy_http_downstream_cx_rx_bytes_buffered{envoy_http_conn_manager_prefix="admin"} 245
+envoy_http_downstream_cx_rx_bytes_buffered{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_ssl_active gauge
+envoy_http_downstream_cx_ssl_active{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_ssl_active{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_tx_bytes_buffered gauge
+envoy_http_downstream_cx_tx_bytes_buffered{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_tx_bytes_buffered{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_cx_upgrades_active gauge
+envoy_http_downstream_cx_upgrades_active{envoy_http_conn_manager_prefix="admin"} 0
+envoy_http_downstream_cx_upgrades_active{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_active gauge
+envoy_http_downstream_rq_active{envoy_http_conn_manager_prefix="admin"} 1
+envoy_http_downstream_rq_active{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_listener_admin_downstream_cx_active gauge
+envoy_listener_admin_downstream_cx_active{} 2
+# TYPE envoy_listener_admin_downstream_pre_cx_active gauge
+envoy_listener_admin_downstream_pre_cx_active{} 0
+# TYPE envoy_listener_admin_main_thread_downstream_cx_active gauge
+envoy_listener_admin_main_thread_downstream_cx_active{} 2
+# TYPE envoy_listener_downstream_cx_active gauge
+envoy_listener_downstream_cx_active{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_downstream_pre_cx_active gauge
+envoy_listener_downstream_pre_cx_active{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_worker_downstream_cx_active gauge
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="0",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="1",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="10",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="11",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="12",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="13",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="14",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="15",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="2",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="3",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="4",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="5",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="6",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="7",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="8",envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_worker_downstream_cx_active{envoy_worker_id="9",envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_listener_manager_total_filter_chains_draining gauge
+envoy_listener_manager_total_filter_chains_draining{} 0
+# TYPE envoy_listener_manager_total_listeners_active gauge
+envoy_listener_manager_total_listeners_active{} 1
+# TYPE envoy_listener_manager_total_listeners_draining gauge
+envoy_listener_manager_total_listeners_draining{} 0
+# TYPE envoy_listener_manager_total_listeners_warming gauge
+envoy_listener_manager_total_listeners_warming{} 0
+# TYPE envoy_listener_manager_workers_started gauge
+envoy_listener_manager_workers_started{} 1
+# TYPE envoy_runtime_admin_overrides_active gauge
+envoy_runtime_admin_overrides_active{} 0
+# TYPE envoy_runtime_deprecated_feature_seen_since_process_start gauge
+envoy_runtime_deprecated_feature_seen_since_process_start{} 0
+# TYPE envoy_runtime_num_keys gauge
+envoy_runtime_num_keys{} 0
+# TYPE envoy_runtime_num_layers gauge
+envoy_runtime_num_layers{} 0
+# TYPE envoy_server_compilation_settings_fips_mode gauge
+envoy_server_compilation_settings_fips_mode{} 0
+# TYPE envoy_server_concurrency gauge
+envoy_server_concurrency{} 16
+# TYPE envoy_server_days_until_first_cert_expiring gauge
+envoy_server_days_until_first_cert_expiring{} 4294967295
+# TYPE envoy_server_hot_restart_epoch gauge
+envoy_server_hot_restart_epoch{} 0
+# TYPE envoy_server_hot_restart_generation gauge
+envoy_server_hot_restart_generation{} 1
+# TYPE envoy_server_live gauge
+envoy_server_live{} 1
+# TYPE envoy_server_memory_allocated gauge
+envoy_server_memory_allocated{} 7630184
+# TYPE envoy_server_memory_heap_size gauge
+envoy_server_memory_heap_size{} 16777216
+# TYPE envoy_server_memory_physical_size gauge
+envoy_server_memory_physical_size{} 28426958
+# TYPE envoy_server_parent_connections gauge
+envoy_server_parent_connections{} 0
+# TYPE envoy_server_seconds_until_first_ocsp_response_expiring gauge
+envoy_server_seconds_until_first_ocsp_response_expiring{} 0
+# TYPE envoy_server_state gauge
+envoy_server_state{} 0
+# TYPE envoy_server_stats_recent_lookups gauge
+envoy_server_stats_recent_lookups{} 1763
+# TYPE envoy_server_total_connections gauge
+envoy_server_total_connections{} 0
+# TYPE envoy_server_uptime gauge
+envoy_server_uptime{} 6225
+# TYPE envoy_server_version gauge
+envoy_server_version{} 9993205
+# TYPE envoy_cluster_upstream_cx_connect_ms histogram
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="0.5"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="5"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="10"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="25"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="50"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="100"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="250"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="500"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1000"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="2500"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="5000"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="10000"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="30000"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="60000"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="300000"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="600000"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1800000"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="3600000"} 0
+envoy_cluster_upstream_cx_connect_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="+Inf"} 0
+envoy_cluster_upstream_cx_connect_ms_sum{envoy_cluster_name="service_envoyproxy_io"} 0
+envoy_cluster_upstream_cx_connect_ms_count{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_cluster_upstream_cx_length_ms histogram
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="0.5"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="5"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="10"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="25"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="50"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="100"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="250"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="500"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="2500"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="5000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="10000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="30000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="60000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="300000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="600000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="1800000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="3600000"} 0
+envoy_cluster_upstream_cx_length_ms_bucket{envoy_cluster_name="service_envoyproxy_io",le="+Inf"} 0
+envoy_cluster_upstream_cx_length_ms_sum{envoy_cluster_name="service_envoyproxy_io"} 0
+envoy_cluster_upstream_cx_length_ms_count{envoy_cluster_name="service_envoyproxy_io"} 0
+# TYPE envoy_http_downstream_cx_length_ms histogram
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="0.5"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="1"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="5"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="10"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="25"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="50"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="100"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="250"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="500"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="1000"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="2500"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="5000"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="10000"} 3
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="30000"} 4
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="60000"} 4
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="300000"} 4
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="600000"} 4
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="1800000"} 4
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="3600000"} 4
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="admin",le="+Inf"} 4
+envoy_http_downstream_cx_length_ms_sum{envoy_http_conn_manager_prefix="admin"} 17506.150000000001455191522836685
+envoy_http_downstream_cx_length_ms_count{envoy_http_conn_manager_prefix="admin"} 4
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="0.5"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="5"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="10"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="25"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="50"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="100"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="250"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="500"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1000"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="2500"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="5000"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="10000"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="30000"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="60000"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="300000"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="600000"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1800000"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="3600000"} 0
+envoy_http_downstream_cx_length_ms_bucket{envoy_http_conn_manager_prefix="ingress_http",le="+Inf"} 0
+envoy_http_downstream_cx_length_ms_sum{envoy_http_conn_manager_prefix="ingress_http"} 0
+envoy_http_downstream_cx_length_ms_count{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_http_downstream_rq_time histogram
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="0.5"} 2
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="1"} 2
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="5"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="10"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="25"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="50"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="100"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="250"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="500"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="1000"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="2500"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="5000"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="10000"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="30000"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="60000"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="300000"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="600000"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="1800000"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="3600000"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="admin",le="+Inf"} 5
+envoy_http_downstream_rq_time_sum{envoy_http_conn_manager_prefix="admin"} 3.1500000000000003552713678800501
+envoy_http_downstream_rq_time_count{envoy_http_conn_manager_prefix="admin"} 5
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="0.5"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="5"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="10"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="25"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="50"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="100"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="250"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="500"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1000"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="2500"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="5000"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="10000"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="30000"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="60000"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="300000"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="600000"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="1800000"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="3600000"} 0
+envoy_http_downstream_rq_time_bucket{envoy_http_conn_manager_prefix="ingress_http",le="+Inf"} 0
+envoy_http_downstream_rq_time_sum{envoy_http_conn_manager_prefix="ingress_http"} 0
+envoy_http_downstream_rq_time_count{envoy_http_conn_manager_prefix="ingress_http"} 0
+# TYPE envoy_listener_admin_downstream_cx_length_ms histogram
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="0.5"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="1"} 0
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="5"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="10"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="25"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="50"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="100"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="250"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="500"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="1000"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="2500"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="5000"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="10000"} 3
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="30000"} 4
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="60000"} 4
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="300000"} 4
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="600000"} 4
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="1800000"} 4
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="3600000"} 4
+envoy_listener_admin_downstream_cx_length_ms_bucket{le="+Inf"} 4
+envoy_listener_admin_downstream_cx_length_ms_sum{} 17506.150000000001455191522836685
+envoy_listener_admin_downstream_cx_length_ms_count{} 4
+# TYPE envoy_listener_downstream_cx_length_ms histogram
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="0.5"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="1"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="5"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="10"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="25"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="50"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="100"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="250"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="500"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="1000"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="2500"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="5000"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="10000"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="30000"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="60000"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="300000"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="600000"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="1800000"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="3600000"} 0
+envoy_listener_downstream_cx_length_ms_bucket{envoy_listener_address="0.0.0.0_10000",le="+Inf"} 0
+envoy_listener_downstream_cx_length_ms_sum{envoy_listener_address="0.0.0.0_10000"} 0
+envoy_listener_downstream_cx_length_ms_count{envoy_listener_address="0.0.0.0_10000"} 0
+# TYPE envoy_server_initialization_time_ms histogram
+envoy_server_initialization_time_ms_bucket{le="0.5"} 0
+envoy_server_initialization_time_ms_bucket{le="1"} 0
+envoy_server_initialization_time_ms_bucket{le="5"} 0
+envoy_server_initialization_time_ms_bucket{le="10"} 0
+envoy_server_initialization_time_ms_bucket{le="25"} 0
+envoy_server_initialization_time_ms_bucket{le="50"} 0
+envoy_server_initialization_time_ms_bucket{le="100"} 1
+envoy_server_initialization_time_ms_bucket{le="250"} 1
+envoy_server_initialization_time_ms_bucket{le="500"} 1
+envoy_server_initialization_time_ms_bucket{le="1000"} 1
+envoy_server_initialization_time_ms_bucket{le="2500"} 1
+envoy_server_initialization_time_ms_bucket{le="5000"} 1
+envoy_server_initialization_time_ms_bucket{le="10000"} 1
+envoy_server_initialization_time_ms_bucket{le="30000"} 1
+envoy_server_initialization_time_ms_bucket{le="60000"} 1
+envoy_server_initialization_time_ms_bucket{le="300000"} 1
+envoy_server_initialization_time_ms_bucket{le="600000"} 1
+envoy_server_initialization_time_ms_bucket{le="1800000"} 1
+envoy_server_initialization_time_ms_bucket{le="3600000"} 1
+envoy_server_initialization_time_ms_bucket{le="+Inf"} 1
+envoy_server_initialization_time_ms_sum{} 76.5
+envoy_server_initialization_time_ms_count{} 1
diff --git a/src/go/plugin/go.d/modules/example/README.md b/src/go/plugin/go.d/modules/example/README.md
new file mode 100644
index 000000000..934dfd108
--- /dev/null
+++ b/src/go/plugin/go.d/modules/example/README.md
@@ -0,0 +1,80 @@
+<!--
+title: "Example module"
+description: "Use this example data collection module, which produces example charts with random values, to better understand how to build your own collector in Go."
+custom_edit_url: "https://github.com/netdata/go.d.plugin/edit/master/modules/example/README.md"
+sidebar_label: "Example module in Go"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "Integrations/Monitor/Mock Collectors"
+-->
+
+# Example module
+
+An example data collection module. Use it as an example writing a new module.
+
+## Charts
+
+This module produces example charts with random values. Number of charts, dimensions and chart type is configurable.
+
+## Configuration
+
+Edit the `go.d/example.conf` configuration file using `edit-config` from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory
+sudo ./edit-config go.d/example.conf
+```
+
+Disabled by default. Should be explicitly enabled
+in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf).
+
+```yaml
+# go.d.conf
+modules:
+ example: yes
+```
+
+Here is an example configuration with several jobs:
+
+```yaml
+jobs:
+ - name: example
+ charts:
+ num: 3
+ dimensions: 5
+
+ - name: hidden_example
+ hidden_charts:
+ num: 3
+ dimensions: 5
+```
+
+---
+
+For all available options, see the Example
+collector's [configuration file](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d/example.conf).
+
+## Troubleshooting
+
+To troubleshoot issues with the `example` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m example
+ ```
diff --git a/src/go/plugin/go.d/modules/example/charts.go b/src/go/plugin/go.d/modules/example/charts.go
new file mode 100644
index 000000000..71ecafdb4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/example/charts.go
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package example
+
+import (
+ "fmt"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+var chartTemplate = module.Chart{
+ ID: "random_%d",
+ Title: "A Random Number",
+ Units: "random",
+ Fam: "random",
+ Ctx: "example.random",
+}
+
+var hiddenChartTemplate = module.Chart{
+ ID: "hidden_random_%d",
+ Title: "A Random Number",
+ Units: "random",
+ Fam: "random",
+ Ctx: "example.random",
+ Opts: module.Opts{
+ Hidden: true,
+ },
+}
+
+func newChart(num, ctx, labels int, typ module.ChartType) *module.Chart {
+ chart := chartTemplate.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, num)
+ chart.Type = typ
+ if ctx > 0 {
+ chart.Ctx += fmt.Sprintf("_%d", ctx)
+ }
+ for i := 0; i < labels; i++ {
+ chart.Labels = append(chart.Labels, module.Label{
+ Key: fmt.Sprintf("example_name_%d", i),
+ Value: fmt.Sprintf("example_value_%d_%d", num, i),
+ })
+ }
+ return chart
+}
+
+func newHiddenChart(num, ctx, labels int, typ module.ChartType) *module.Chart {
+ chart := hiddenChartTemplate.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, num)
+ chart.Type = typ
+ if ctx > 0 {
+ chart.Ctx += fmt.Sprintf("_%d", ctx)
+ }
+ for i := 0; i < labels; i++ {
+ chart.Labels = append(chart.Labels, module.Label{
+ Key: fmt.Sprintf("example_name_%d", i),
+ Value: fmt.Sprintf("example_value_%d_%d", num, i),
+ })
+ }
+ return chart
+}
diff --git a/src/go/plugin/go.d/modules/example/collect.go b/src/go/plugin/go.d/modules/example/collect.go
new file mode 100644
index 000000000..b72d3c252
--- /dev/null
+++ b/src/go/plugin/go.d/modules/example/collect.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package example
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (e *Example) collect() (map[string]int64, error) {
+ collected := make(map[string]int64)
+
+ for _, chart := range *e.Charts() {
+ e.collectChart(collected, chart)
+ }
+ return collected, nil
+}
+
+func (e *Example) collectChart(collected map[string]int64, chart *module.Chart) {
+ var num int
+ if chart.Opts.Hidden {
+ num = e.Config.HiddenCharts.Dims
+ } else {
+ num = e.Config.Charts.Dims
+ }
+
+ for i := 0; i < num; i++ {
+ name := fmt.Sprintf("random%d", i)
+ id := fmt.Sprintf("%s_%s", chart.ID, name)
+
+ if !e.collectedDims[id] {
+ e.collectedDims[id] = true
+
+ dim := &module.Dim{ID: id, Name: name}
+ if err := chart.AddDim(dim); err != nil {
+ e.Warning(err)
+ }
+ chart.MarkNotCreated()
+ }
+ if i%2 == 0 {
+ collected[id] = e.randInt()
+ } else {
+ collected[id] = -e.randInt()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/example/config_schema.json b/src/go/plugin/go.d/modules/example/config_schema.json
new file mode 100644
index 000000000..328773f6d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/example/config_schema.json
@@ -0,0 +1,177 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Example collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "charts": {
+ "title": "Charts configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "type": {
+ "title": "Chart type",
+ "description": "The type of all charts.",
+ "type": "string",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ],
+ "default": "line"
+ },
+ "num": {
+ "title": "Number of charts",
+ "description": "The total number of charts to create.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 1
+ },
+ "contexts": {
+ "title": "Number of contexts",
+ "description": "The total number of unique contexts.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ },
+ "dimensions": {
+ "title": "Number of dimensions",
+ "description": "The number of dimensions each chart will have.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 4
+ },
+ "labels": {
+ "title": "Number of labels",
+ "description": "The number of labels each chart will have.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ }
+ },
+ "required": [
+ "type",
+ "num",
+ "contexts",
+ "dimensions",
+ "labels"
+ ]
+ },
+ "hidden_charts": {
+ "title": "Hidden charts configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "type": {
+ "title": "Chart type",
+ "description": "The type of all charts.",
+ "type": "string",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ],
+ "default": "line"
+ },
+ "num": {
+ "title": "Number of charts",
+ "description": "The total number of charts to create.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ },
+ "contexts": {
+ "title": "Number of contexts",
+ "description": "The total number of unique contexts.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ },
+ "dimensions": {
+ "title": "Number of dimensions",
+ "description": "The number of dimensions each chart will have.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 4
+ },
+ "labels": {
+ "title": "Number of labels",
+ "description": "The number of labels each chart will have.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ }
+ },
+ "required": [
+ "type",
+ "num",
+ "contexts",
+ "dimensions",
+ "labels"
+ ]
+ }
+ },
+ "required": [
+ "charts"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "charts": {
+ "type": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ }
+ },
+ "hidden_charts": {
+ "type": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ }
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every"
+ ]
+ },
+ {
+ "title": "Charts",
+ "fields": [
+ "charts"
+ ]
+ },
+ {
+ "title": "Hidden charts",
+ "fields": [
+ "hidden_charts"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/example/example.go b/src/go/plugin/go.d/modules/example/example.go
new file mode 100644
index 000000000..2ca0ad976
--- /dev/null
+++ b/src/go/plugin/go.d/modules/example/example.go
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package example
+
+import (
+ _ "embed"
+ "math/rand"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("example", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: module.UpdateEvery,
+ Priority: module.Priority,
+ Disabled: true,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Example {
+ return &Example{
+ Config: Config{
+ Charts: ConfigCharts{
+ Num: 1,
+ Dims: 4,
+ },
+ HiddenCharts: ConfigCharts{
+ Num: 0,
+ Dims: 4,
+ },
+ },
+
+ randInt: func() int64 { return rand.Int63n(100) },
+ collectedDims: make(map[string]bool),
+ }
+}
+
+type (
+ Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Charts ConfigCharts `yaml:"charts" json:"charts"`
+ HiddenCharts ConfigCharts `yaml:"hidden_charts" json:"hidden_charts"`
+ }
+ ConfigCharts struct {
+ Type string `yaml:"type,omitempty" json:"type"`
+ Num int `yaml:"num" json:"num"`
+ Contexts int `yaml:"contexts" json:"contexts"`
+ Dims int `yaml:"dimensions" json:"dimensions"`
+ Labels int `yaml:"labels" json:"labels"`
+ }
+)
+
+type Example struct {
+ module.Base // should be embedded by every module
+ Config `yaml:",inline"`
+
+ randInt func() int64
+ charts *module.Charts
+ collectedDims map[string]bool
+}
+
+func (e *Example) Configuration() any {
+ return e.Config
+}
+
+func (e *Example) Init() error {
+ err := e.validateConfig()
+ if err != nil {
+ e.Errorf("config validation: %v", err)
+ return err
+ }
+
+ charts, err := e.initCharts()
+ if err != nil {
+ e.Errorf("charts init: %v", err)
+ return err
+ }
+ e.charts = charts
+ return nil
+}
+
+func (e *Example) Check() error {
+ return nil
+}
+
+func (e *Example) Charts() *module.Charts {
+ return e.charts
+}
+
+func (e *Example) Collect() map[string]int64 {
+ mx, err := e.collect()
+ if err != nil {
+ e.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (e *Example) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/example/example_test.go b/src/go/plugin/go.d/modules/example/example_test.go
new file mode 100644
index 000000000..26b3ec9c8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/example/example_test.go
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package example
+
+import (
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestExample_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Example{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNew(t *testing.T) {
+ // We want to ensure that module is a reference type, nothing more.
+
+ assert.IsType(t, (*Example)(nil), New())
+}
+
+func TestExample_Init(t *testing.T) {
+ // 'Init() bool' initializes the module with an appropriate config, so to test it we need:
+ // - provide the config.
+ // - set module.Config field with the config.
+ // - call Init() and compare its return value with the expected value.
+
+ // 'test' map contains different test cases.
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "success when only 'charts' set": {
+ config: Config{
+ Charts: ConfigCharts{
+ Num: 1,
+ Dims: 2,
+ },
+ },
+ },
+ "success when only 'hidden_charts' set": {
+ config: Config{
+ HiddenCharts: ConfigCharts{
+ Num: 1,
+ Dims: 2,
+ },
+ },
+ },
+ "success when 'charts' and 'hidden_charts' set": {
+ config: Config{
+ Charts: ConfigCharts{
+ Num: 1,
+ Dims: 2,
+ },
+ HiddenCharts: ConfigCharts{
+ Num: 1,
+ Dims: 2,
+ },
+ },
+ },
+ "fails when 'charts' and 'hidden_charts' set, but 'num' == 0": {
+ wantFail: true,
+ config: Config{
+ Charts: ConfigCharts{
+ Num: 0,
+ Dims: 2,
+ },
+ HiddenCharts: ConfigCharts{
+ Num: 0,
+ Dims: 2,
+ },
+ },
+ },
+ "fails when only 'charts' set, 'num' > 0, but 'dimensions' == 0": {
+ wantFail: true,
+ config: Config{
+ Charts: ConfigCharts{
+ Num: 1,
+ Dims: 0,
+ },
+ },
+ },
+ "fails when only 'hidden_charts' set, 'num' > 0, but 'dimensions' == 0": {
+ wantFail: true,
+ config: Config{
+ HiddenCharts: ConfigCharts{
+ Num: 1,
+ Dims: 0,
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ example := New()
+ example.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, example.Init())
+ } else {
+ assert.NoError(t, example.Init())
+ }
+ })
+ }
+}
+
+func TestExample_Check(t *testing.T) {
+ // 'Check() bool' reports whether the module is able to collect any data, so to test it we need:
+ // - provide the module with a specific config.
+ // - initialize the module (call Init()).
+ // - call Check() and compare its return value with the expected value.
+
+ // 'test' map contains different test cases.
+ tests := map[string]struct {
+ prepare func() *Example
+ wantFail bool
+ }{
+ "success on default": {prepare: prepareExampleDefault},
+ "success when only 'charts' set": {prepare: prepareExampleOnlyCharts},
+ "success when only 'hidden_charts' set": {prepare: prepareExampleOnlyHiddenCharts},
+ "success when 'charts' and 'hidden_charts' set": {prepare: prepareExampleChartsAndHiddenCharts},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ example := test.prepare()
+ require.NoError(t, example.Init())
+
+ if test.wantFail {
+ assert.Error(t, example.Check())
+ } else {
+ assert.NoError(t, example.Check())
+ }
+ })
+ }
+}
+
+func TestExample_Charts(t *testing.T) {
+ // We want to ensure that initialized module does not return 'nil'.
+ // If it is not 'nil' we are ok.
+
+ // 'test' map contains different test cases.
+ tests := map[string]struct {
+ prepare func(t *testing.T) *Example
+ wantNil bool
+ }{
+ "not initialized collector": {
+ wantNil: true,
+ prepare: func(t *testing.T) *Example {
+ return New()
+ },
+ },
+ "initialized collector": {
+ prepare: func(t *testing.T) *Example {
+ example := New()
+ require.NoError(t, example.Init())
+ return example
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ example := test.prepare(t)
+
+ if test.wantNil {
+ assert.Nil(t, example.Charts())
+ } else {
+ assert.NotNil(t, example.Charts())
+ }
+ })
+ }
+}
+
+func TestExample_Cleanup(t *testing.T) {
+ // Since this module has nothing to clean up,
+ // we want just to ensure that Cleanup() not panics.
+
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestExample_Collect(t *testing.T) {
+ // 'Collect() map[string]int64' returns collected data, so to test it we need:
+ // - provide the module with a specific config.
+ // - initialize the module (call Init()).
+ // - call Collect() and compare its return value with the expected value.
+
+ // 'test' map contains different test cases.
+ tests := map[string]struct {
+ prepare func() *Example
+ wantCollected map[string]int64
+ }{
+ "default config": {
+ prepare: prepareExampleDefault,
+ wantCollected: map[string]int64{
+ "random_0_random0": 1,
+ "random_0_random1": -1,
+ "random_0_random2": 1,
+ "random_0_random3": -1,
+ },
+ },
+ "only 'charts' set": {
+ prepare: prepareExampleOnlyCharts,
+ wantCollected: map[string]int64{
+ "random_0_random0": 1,
+ "random_0_random1": -1,
+ "random_0_random2": 1,
+ "random_0_random3": -1,
+ "random_0_random4": 1,
+ "random_1_random0": 1,
+ "random_1_random1": -1,
+ "random_1_random2": 1,
+ "random_1_random3": -1,
+ "random_1_random4": 1,
+ },
+ },
+ "only 'hidden_charts' set": {
+ prepare: prepareExampleOnlyHiddenCharts,
+ wantCollected: map[string]int64{
+ "hidden_random_0_random0": 1,
+ "hidden_random_0_random1": -1,
+ "hidden_random_0_random2": 1,
+ "hidden_random_0_random3": -1,
+ "hidden_random_0_random4": 1,
+ "hidden_random_1_random0": 1,
+ "hidden_random_1_random1": -1,
+ "hidden_random_1_random2": 1,
+ "hidden_random_1_random3": -1,
+ "hidden_random_1_random4": 1,
+ },
+ },
+ "'charts' and 'hidden_charts' set": {
+ prepare: prepareExampleChartsAndHiddenCharts,
+ wantCollected: map[string]int64{
+ "hidden_random_0_random0": 1,
+ "hidden_random_0_random1": -1,
+ "hidden_random_0_random2": 1,
+ "hidden_random_0_random3": -1,
+ "hidden_random_0_random4": 1,
+ "hidden_random_1_random0": 1,
+ "hidden_random_1_random1": -1,
+ "hidden_random_1_random2": 1,
+ "hidden_random_1_random3": -1,
+ "hidden_random_1_random4": 1,
+ "random_0_random0": 1,
+ "random_0_random1": -1,
+ "random_0_random2": 1,
+ "random_0_random3": -1,
+ "random_0_random4": 1,
+ "random_1_random0": 1,
+ "random_1_random1": -1,
+ "random_1_random2": 1,
+ "random_1_random3": -1,
+ "random_1_random4": 1,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ example := test.prepare()
+ require.NoError(t, example.Init())
+
+ collected := example.Collect()
+
+ assert.Equal(t, test.wantCollected, collected)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, example, collected)
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, e *Example, collected map[string]int64) {
+ for _, chart := range *e.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok,
+ "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok,
+ "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareExampleDefault() *Example {
+ return prepareExample(New().Config)
+}
+
+func prepareExampleOnlyCharts() *Example {
+ return prepareExample(Config{
+ Charts: ConfigCharts{
+ Num: 2,
+ Dims: 5,
+ },
+ })
+}
+
+func prepareExampleOnlyHiddenCharts() *Example {
+ return prepareExample(Config{
+ HiddenCharts: ConfigCharts{
+ Num: 2,
+ Dims: 5,
+ },
+ })
+}
+
+func prepareExampleChartsAndHiddenCharts() *Example {
+ return prepareExample(Config{
+ Charts: ConfigCharts{
+ Num: 2,
+ Dims: 5,
+ },
+ HiddenCharts: ConfigCharts{
+ Num: 2,
+ Dims: 5,
+ },
+ })
+}
+
+func prepareExample(cfg Config) *Example {
+ example := New()
+ example.Config = cfg
+ example.randInt = func() int64 { return 1 }
+ return example
+}
diff --git a/src/go/plugin/go.d/modules/example/init.go b/src/go/plugin/go.d/modules/example/init.go
new file mode 100644
index 000000000..f159c4b53
--- /dev/null
+++ b/src/go/plugin/go.d/modules/example/init.go
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package example
+
+import (
+ "errors"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (e *Example) validateConfig() error {
+ if e.Config.Charts.Num <= 0 && e.Config.HiddenCharts.Num <= 0 {
+ return errors.New("'charts->num' or `hidden_charts->num` must be > 0")
+ }
+ if e.Config.Charts.Num > 0 && e.Config.Charts.Dims <= 0 {
+ return errors.New("'charts->dimensions' must be > 0")
+ }
+ if e.Config.HiddenCharts.Num > 0 && e.Config.HiddenCharts.Dims <= 0 {
+ return errors.New("'hidden_charts->dimensions' must be > 0")
+ }
+ return nil
+}
+
+func (e *Example) initCharts() (*module.Charts, error) {
+ charts := &module.Charts{}
+
+ var ctx int
+ v := calcContextEvery(e.Config.Charts.Num, e.Config.Charts.Contexts)
+ for i := 0; i < e.Config.Charts.Num; i++ {
+ if i != 0 && v != 0 && ctx < (e.Config.Charts.Contexts-1) && i%v == 0 {
+ ctx++
+ }
+ chart := newChart(i, ctx, e.Config.Charts.Labels, module.ChartType(e.Config.Charts.Type))
+
+ if err := charts.Add(chart); err != nil {
+ return nil, err
+ }
+ }
+
+ ctx = 0
+ v = calcContextEvery(e.Config.HiddenCharts.Num, e.Config.HiddenCharts.Contexts)
+ for i := 0; i < e.Config.HiddenCharts.Num; i++ {
+ if i != 0 && v != 0 && ctx < (e.Config.HiddenCharts.Contexts-1) && i%v == 0 {
+ ctx++
+ }
+ chart := newHiddenChart(i, ctx, e.Config.HiddenCharts.Labels, module.ChartType(e.Config.HiddenCharts.Type))
+
+ if err := charts.Add(chart); err != nil {
+ return nil, err
+ }
+ }
+
+ return charts, nil
+}
+
+func calcContextEvery(charts, contexts int) int {
+ if contexts <= 1 {
+ return 0
+ }
+ if contexts > charts {
+ return 1
+ }
+ return charts / contexts
+}
diff --git a/src/go/plugin/go.d/modules/example/testdata/config.json b/src/go/plugin/go.d/modules/example/testdata/config.json
new file mode 100644
index 000000000..af06e85ac
--- /dev/null
+++ b/src/go/plugin/go.d/modules/example/testdata/config.json
@@ -0,0 +1,17 @@
+{
+ "update_every": 123,
+ "charts": {
+ "type": "ok",
+ "num": 123,
+ "contexts": 123,
+ "dimensions": 123,
+ "labels": 123
+ },
+ "hidden_charts": {
+ "type": "ok",
+ "num": 123,
+ "contexts": 123,
+ "dimensions": 123,
+ "labels": 123
+ }
+}
diff --git a/src/go/plugin/go.d/modules/example/testdata/config.yaml b/src/go/plugin/go.d/modules/example/testdata/config.yaml
new file mode 100644
index 000000000..a5f6556fd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/example/testdata/config.yaml
@@ -0,0 +1,13 @@
+update_every: 123
+charts:
+ type: "ok"
+ num: 123
+ contexts: 123
+ dimensions: 123
+ labels: 123
+hidden_charts:
+ type: "ok"
+ num: 123
+ contexts: 123
+ dimensions: 123
+ labels: 123
diff --git a/src/go/plugin/go.d/modules/exim/README.md b/src/go/plugin/go.d/modules/exim/README.md
new file mode 120000
index 000000000..f1f2ef9f9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/README.md
@@ -0,0 +1 @@
+integrations/exim.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/exim/charts.go b/src/go/plugin/go.d/modules/exim/charts.go
new file mode 100644
index 000000000..f09faf1d0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/charts.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package exim
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioQueueEmailsCount = module.Priority + iota
+)
+
+var charts = module.Charts{
+ queueEmailsCountChart.Copy(),
+}
+
+var queueEmailsCountChart = module.Chart{
+ ID: "qemails",
+ Title: "Exim Queue Emails",
+ Units: "emails",
+ Fam: "queue",
+ Ctx: "exim.qemails",
+ Priority: prioQueueEmailsCount,
+ Dims: module.Dims{
+ {ID: "emails"},
+ },
+}
diff --git a/src/go/plugin/go.d/modules/exim/collect.go b/src/go/plugin/go.d/modules/exim/collect.go
new file mode 100644
index 000000000..ce1a34729
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/collect.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package exim
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func (e *Exim) collect() (map[string]int64, error) {
+ resp, err := e.exec.countMessagesInQueue()
+ if err != nil {
+ return nil, err
+ }
+
+ emails, err := parseResponse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ mx := map[string]int64{
+ "emails": emails,
+ }
+
+ return mx, nil
+}
+
+func parseResponse(resp []byte) (int64, error) {
+ sc := bufio.NewScanner(bytes.NewReader(resp))
+ sc.Scan()
+
+ line := strings.TrimSpace(sc.Text())
+
+ emails, err := strconv.ParseInt(line, 10, 64)
+ if err != nil {
+ return 0, fmt.Errorf("invalid response '%s': %v", line, err)
+ }
+
+ return emails, nil
+}
diff --git a/src/go/plugin/go.d/modules/exim/config_schema.json b/src/go/plugin/go.d/modules/exim/config_schema.json
new file mode 100644
index 000000000..6561ea34f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Exim collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/exim/exec.go b/src/go/plugin/go.d/modules/exim/exec.go
new file mode 100644
index 000000000..241c72aca
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/exec.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package exim
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+type eximBinary interface {
+ countMessagesInQueue() ([]byte, error)
+}
+
+func newEximExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *eximExec {
+ return &eximExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ }
+}
+
+type eximExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (e *eximExec) countMessagesInQueue() ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.ndsudoPath, "exim-bpc")
+
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/exim/exim.go b/src/go/plugin/go.d/modules/exim/exim.go
new file mode 100644
index 000000000..f3c3e6e78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/exim.go
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package exim
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("exim", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Exim {
+ return &Exim{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type Exim struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec eximBinary
+}
+
+func (e *Exim) Configuration() any {
+ return e.Config
+}
+
+func (e *Exim) Init() error {
+ exim, err := e.initEximExec()
+ if err != nil {
+ e.Errorf("exim exec initialization: %v", err)
+ return err
+ }
+ e.exec = exim
+
+ return nil
+}
+
+func (e *Exim) Check() error {
+ mx, err := e.collect()
+ if err != nil {
+ e.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (e *Exim) Charts() *module.Charts {
+ return e.charts
+}
+
+func (e *Exim) Collect() map[string]int64 {
+ mx, err := e.collect()
+ if err != nil {
+ e.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (e *Exim) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/exim/exim_test.go b/src/go/plugin/go.d/modules/exim/exim_test.go
new file mode 100644
index 000000000..16eb025e1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/exim_test.go
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package exim
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestExim_Configuration(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Exim{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestExim_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if failed to locate ndsudo": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ exim := New()
+ exim.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, exim.Init())
+ } else {
+ assert.NoError(t, exim.Init())
+ }
+ })
+ }
+}
+
+func TestExim_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Exim
+ }{
+ "not initialized exec": {
+ prepare: func() *Exim {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Exim {
+ exim := New()
+ exim.exec = prepareMockOK()
+ _ = exim.Check()
+ return exim
+ },
+ },
+ "after collect": {
+ prepare: func() *Exim {
+ exim := New()
+ exim.exec = prepareMockOK()
+ _ = exim.Collect()
+ return exim
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ exim := test.prepare()
+
+ assert.NotPanics(t, exim.Cleanup)
+ })
+ }
+}
+
+func TestEximCharts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestExim_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockEximExec
+ wantFail bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantFail: false,
+ },
+ "error on exec": {
+ prepareMock: prepareMockErr,
+ wantFail: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantFail: true,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ exim := New()
+ mock := test.prepareMock()
+ exim.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, exim.Check())
+ } else {
+ assert.NoError(t, exim.Check())
+ }
+ })
+ }
+}
+
+func TestExim_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockEximExec
+ wantMetrics map[string]int64
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantMetrics: map[string]int64{
+ "emails": 99,
+ },
+ },
+ "error on exec": {
+ prepareMock: prepareMockErr,
+ wantMetrics: nil,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ exim := New()
+ mock := test.prepareMock()
+ exim.exec = mock
+
+ mx := exim.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ assert.Len(t, *exim.Charts(), len(charts))
+ module.TestMetricsHasAllChartsDims(t, exim.Charts(), mx)
+ }
+ })
+ }
+}
+
+func prepareMockOK() *mockEximExec {
+ return &mockEximExec{
+ data: []byte("99"),
+ }
+}
+
+func prepareMockErr() *mockEximExec {
+ return &mockEximExec{
+ err: true,
+ }
+}
+
+func prepareMockEmptyResponse() *mockEximExec {
+ return &mockEximExec{}
+}
+
+func prepareMockUnexpectedResponse() *mockEximExec {
+ return &mockEximExec{
+ data: []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`),
+ }
+}
+
+type mockEximExec struct {
+ err bool
+ data []byte
+}
+
+func (m *mockEximExec) countMessagesInQueue() ([]byte, error) {
+ if m.err {
+ return nil, errors.New("mock.countMessagesInQueue() error")
+ }
+ return m.data, nil
+}
diff --git a/src/go/plugin/go.d/modules/exim/init.go b/src/go/plugin/go.d/modules/exim/init.go
new file mode 100644
index 000000000..d1d5c0793
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/init.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package exim
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (e *Exim) initEximExec() (eximBinary, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+
+ }
+
+ exim := newEximExec(ndsudoPath, e.Timeout.Duration(), e.Logger)
+
+ return exim, nil
+}
diff --git a/src/go/plugin/go.d/modules/exim/integrations/exim.md b/src/go/plugin/go.d/modules/exim/integrations/exim.md
new file mode 100644
index 000000000..78f45683c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/integrations/exim.md
@@ -0,0 +1,191 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/exim/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/exim/metadata.yaml"
+sidebar_label: "Exim"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Mail Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Exim
+
+
+<img src="https://netdata.cloud/img/exim.jpg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: exim
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Exim mail queue. It relies on the [`exim`](https://www.exim.org/exim-html-3.20/doc/html/spec_5.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+Executed commands:
+- `exim -bpc`
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Exim instance
+
+These metrics refer to the the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exim.qemails | emails | emails |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/exim.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/exim.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| timeout | exim binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: exim
+ update_every: 5 # Collect logical volume statistics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `exim` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m exim
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `exim` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep exim
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep exim /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep exim
+```
+
+
diff --git a/src/go/plugin/go.d/modules/exim/metadata.yaml b/src/go/plugin/go.d/modules/exim/metadata.yaml
new file mode 100644
index 000000000..c7f4a7a98
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/metadata.yaml
@@ -0,0 +1,100 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-exim
+ plugin_name: go.d.plugin
+ module_name: exim
+ monitored_instance:
+ name: Exim
+ link: "https://www.exim.org/"
+ icon_filename: 'exim.jpg'
+ categories:
+ - data-collection.mail-servers
+ keywords:
+ - exim
+ - mail
+ - email
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector monitors Exim mail queue.
+ It relies on the [`exim`](https://www.exim.org/exim-html-3.20/doc/html/spec_5.html) CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+ Executed commands:
+
+ - `exim -bpc`
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/exim.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: exim binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: exim
+ update_every: 5 # Collect logical volume statistics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the the entire monitored application.
+ labels: []
+ metrics:
+ - name: exim.qemails
+ description: Exim Queue Emails
+ unit: 'emails'
+ chart_type: line
+ dimensions:
+ - name: emails
diff --git a/src/go/plugin/go.d/modules/exim/testdata/config.json b/src/go/plugin/go.d/modules/exim/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/exim/testdata/config.yaml b/src/go/plugin/go.d/modules/exim/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/exim/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/fail2ban/README.md b/src/go/plugin/go.d/modules/fail2ban/README.md
new file mode 120000
index 000000000..642a8bcf5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/README.md
@@ -0,0 +1 @@
+integrations/fail2ban.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/fail2ban/charts.go b/src/go/plugin/go.d/modules/fail2ban/charts.go
new file mode 100644
index 000000000..3015c7388
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/charts.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fail2ban
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioJailBannedIPs = module.Priority + iota
+ prioJailActiveFailures
+)
+
+var jailChartsTmpl = module.Charts{
+ jailCurrentBannedIPs.Copy(),
+ jailActiveFailures.Copy(),
+}
+
+var (
+ jailCurrentBannedIPs = module.Chart{
+ ID: "jail_%s_banned_ips",
+ Title: "Fail2Ban Jail banned IPs",
+ Units: "addresses",
+ Fam: "bans",
+ Ctx: "fail2ban.jail_banned_ips",
+ Type: module.Line,
+ Priority: prioJailBannedIPs,
+ Dims: module.Dims{
+ {ID: "jail_%s_currently_banned", Name: "banned"},
+ },
+ }
+ jailActiveFailures = module.Chart{
+ ID: "jail_%s_active_failures",
+ Title: "Fail2Ban Jail active failures",
+ Units: "failures",
+ Fam: "failures",
+ Ctx: "fail2ban.jail_active_failures",
+ Type: module.Line,
+ Priority: prioJailActiveFailures,
+ Dims: module.Dims{
+ {ID: "jail_%s_currently_failed", Name: "active_failures"},
+ },
+ }
+)
+
+func (f *Fail2Ban) addJailCharts(jail string) {
+ charts := jailChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, jail)
+ chart.Labels = []module.Label{
+ {Key: "jail", Value: jail},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, jail)
+ }
+ }
+
+ if err := f.Charts().Add(*charts...); err != nil {
+ f.Warning(err)
+ }
+}
+
+func (f *Fail2Ban) removeJailCharts(jail string) {
+ px := fmt.Sprintf("jail_%s_", jail)
+ for _, chart := range *f.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/fail2ban/collect.go b/src/go/plugin/go.d/modules/fail2ban/collect.go
new file mode 100644
index 000000000..8ca413c3b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/collect.go
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fail2ban
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+func (f *Fail2Ban) collect() (map[string]int64, error) {
+ now := time.Now()
+
+ if now.Sub(f.lastDiscoverTime) > f.discoverEvery || f.forceDiscover {
+ jails, err := f.discoverJails()
+ if err != nil {
+ return nil, err
+ }
+ f.jails = jails
+ f.lastDiscoverTime = now
+ f.forceDiscover = false
+ }
+
+ mx := make(map[string]int64)
+
+ if err := f.collectJails(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (f *Fail2Ban) discoverJails() ([]string, error) {
+ bs, err := f.exec.status()
+ if err != nil {
+ return nil, err
+ }
+
+ jails, err := parseFail2banStatus(bs)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(jails) == 0 {
+ return nil, errors.New("no jails found")
+ }
+
+ f.Debugf("discovered %d jails: %v", len(jails), jails)
+
+ return jails, nil
+}
+
+func (f *Fail2Ban) collectJails(mx map[string]int64) error {
+ seen := make(map[string]bool)
+
+ for _, jail := range f.jails {
+ f.Debugf("querying status for jail '%s'", jail)
+ bs, err := f.exec.jailStatus(jail)
+ if err != nil {
+ if errors.Is(err, errJailNotExist) {
+ f.forceDiscover = true
+ continue
+ }
+ return err
+ }
+
+ failed, banned, err := parseFail2banJailStatus(bs)
+ if err != nil {
+ return err
+ }
+
+ if !f.seenJails[jail] {
+ f.seenJails[jail] = true
+ f.addJailCharts(jail)
+ }
+ seen[jail] = true
+
+ px := fmt.Sprintf("jail_%s_", jail)
+
+ mx[px+"currently_failed"] = failed
+ mx[px+"currently_banned"] = banned
+ }
+
+ for jail := range f.seenJails {
+ if !seen[jail] {
+ delete(f.seenJails, jail)
+ f.removeJailCharts(jail)
+ }
+ }
+
+ return nil
+}
+
+func parseFail2banJailStatus(jailStatus []byte) (failed, banned int64, err error) {
+ const (
+ failedSub = "Currently failed:"
+ bannedSub = "Currently banned:"
+ )
+
+ var failedFound, bannedFound bool
+
+ sc := bufio.NewScanner(bytes.NewReader(jailStatus))
+
+ for sc.Scan() && !(failedFound && bannedFound) {
+ text := strings.TrimSpace(sc.Text())
+ if text == "" {
+ continue
+ }
+
+ if !failedFound {
+ if i := strings.Index(text, failedSub); i != -1 {
+ failedFound = true
+ s := strings.TrimSpace(text[i+len(failedSub):])
+ if failed, err = strconv.ParseInt(s, 10, 64); err != nil {
+ return 0, 0, fmt.Errorf("failed to parse currently failed value (%s): %v", s, err)
+ }
+ }
+ }
+ if !bannedFound {
+ if i := strings.Index(text, bannedSub); i != -1 {
+ bannedFound = true
+ s := strings.TrimSpace(text[i+len(bannedSub):])
+ if banned, err = strconv.ParseInt(s, 10, 64); err != nil {
+ return 0, 0, fmt.Errorf("failed to parse currently banned value (%s): %v", s, err)
+ }
+ }
+ }
+ }
+
+ if !failedFound || !bannedFound {
+ return 0, 0, errors.New("failed to find failed and banned values")
+ }
+
+ return failed, banned, nil
+}
+
+func parseFail2banStatus(status []byte) ([]string, error) {
+ const sub = "Jail list:"
+
+ var jails []string
+
+ sc := bufio.NewScanner(bytes.NewReader(status))
+
+ for sc.Scan() {
+ text := strings.TrimSpace(sc.Text())
+
+ if i := strings.Index(text, sub); i != -1 {
+ s := strings.ReplaceAll(text[i+len(sub):], ",", "")
+ jails = strings.Fields(s)
+ break
+ }
+ }
+
+ if len(jails) == 0 {
+ return nil, errors.New("failed to find jails")
+ }
+
+ return jails, nil
+}
diff --git a/src/go/plugin/go.d/modules/fail2ban/config_schema.json b/src/go/plugin/go.d/modules/fail2ban/config_schema.json
new file mode 100644
index 000000000..7fd0d91af
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Fail2Ban collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/fail2ban/exec.go b/src/go/plugin/go.d/modules/fail2ban/exec.go
new file mode 100644
index 000000000..b3037a6cf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/exec.go
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fail2ban
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "os/exec"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+var errJailNotExist = errors.New("jail not exist")
+
+const socketPathInDocker = "/host/var/run/fail2ban/fail2ban.sock"
+
+func newFail2BanClientCliExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *fail2banClientCliExec {
+ _, err := os.Stat("/host/var/run")
+
+ return &fail2banClientCliExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ isInsideDocker: err == nil,
+ }
+}
+
+type fail2banClientCliExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+ isInsideDocker bool
+}
+
+func (e *fail2banClientCliExec) status() ([]byte, error) {
+ if e.isInsideDocker {
+ return e.execute("fail2ban-client-status-socket",
+ "--socket_path", socketPathInDocker,
+ )
+ }
+ return e.execute("fail2ban-client-status")
+}
+
+func (e *fail2banClientCliExec) jailStatus(jail string) ([]byte, error) {
+ if e.isInsideDocker {
+ return e.execute("fail2ban-client-status-jail-socket",
+ "--jail", jail,
+ "--socket_path", socketPathInDocker,
+ )
+ }
+ return e.execute("fail2ban-client-status-jail",
+ "--jail", jail,
+ )
+}
+
+func (e *fail2banClientCliExec) execute(args ...string) ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.ndsudoPath, args...)
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ if strings.HasPrefix(strings.TrimSpace(string(bs)), "Sorry but the jail") {
+ return nil, errJailNotExist
+ }
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/fail2ban/fail2ban.go b/src/go/plugin/go.d/modules/fail2ban/fail2ban.go
new file mode 100644
index 000000000..45dcb6e2e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/fail2ban.go
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fail2ban
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("fail2ban", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Fail2Ban {
+ return &Fail2Ban{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ discoverEvery: time.Minute * 5,
+ seenJails: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ Fail2Ban struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec fail2banClientCli
+
+ discoverEvery time.Duration
+ lastDiscoverTime time.Time
+ forceDiscover bool
+ jails []string
+
+ seenJails map[string]bool
+ }
+ fail2banClientCli interface {
+ status() ([]byte, error)
+ jailStatus(s string) ([]byte, error)
+ }
+)
+
+func (f *Fail2Ban) Configuration() any {
+ return f.Config
+}
+
+func (f *Fail2Ban) Init() error {
+ f2bClientExec, err := f.initFail2banClientCliExec()
+ if err != nil {
+ f.Errorf("fail2ban-client exec initialization: %v", err)
+ return err
+ }
+ f.exec = f2bClientExec
+
+ return nil
+}
+
+func (f *Fail2Ban) Check() error {
+ mx, err := f.collect()
+ if err != nil {
+ f.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (f *Fail2Ban) Charts() *module.Charts {
+ return f.charts
+}
+
+func (f *Fail2Ban) Collect() map[string]int64 {
+ mx, err := f.collect()
+ if err != nil {
+ f.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (f *Fail2Ban) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/fail2ban/fail2ban_test.go b/src/go/plugin/go.d/modules/fail2ban/fail2ban_test.go
new file mode 100644
index 000000000..ae84959bd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/fail2ban_test.go
@@ -0,0 +1,238 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fail2ban
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatus, _ = os.ReadFile("testdata/fail2ban-status.txt")
+ dataJailStatus, _ = os.ReadFile("testdata/fail2ban-jail-status.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataStatus": dataStatus,
+ "dataJailStatus": dataJailStatus,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestFail2Ban_Configuration(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Fail2Ban{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestFail2Ban_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if failed to locate ndsudo": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ f2b := New()
+ f2b.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, f2b.Init())
+ } else {
+ assert.NoError(t, f2b.Init())
+ }
+ })
+ }
+}
+
+func TestFail2Ban_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Fail2Ban
+ }{
+ "not initialized exec": {
+ prepare: func() *Fail2Ban {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Fail2Ban {
+ f2b := New()
+ f2b.exec = prepareMockOk()
+ _ = f2b.Check()
+ return f2b
+ },
+ },
+ "after collect": {
+ prepare: func() *Fail2Ban {
+ f2b := New()
+ f2b.exec = prepareMockOk()
+ _ = f2b.Collect()
+ return f2b
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ f2b := test.prepare()
+
+ assert.NotPanics(t, f2b.Cleanup)
+ })
+ }
+}
+
+func TestFail2Ban_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestFail2Ban_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockFail2BanClientCliExec
+ wantFail bool
+ }{
+ "success multiple jails": {
+ wantFail: false,
+ prepareMock: prepareMockOk,
+ },
+ "error on status": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnStatus,
+ },
+ "empty response (no jails)": {
+ prepareMock: prepareMockEmptyResponse,
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ f2b := New()
+ mock := test.prepareMock()
+ f2b.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, f2b.Check())
+ } else {
+ assert.NoError(t, f2b.Check())
+ }
+ })
+ }
+}
+
+func TestFail2Ban_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockFail2BanClientCliExec
+ wantMetrics map[string]int64
+ }{
+ "success multiple jails": {
+ prepareMock: prepareMockOk,
+ wantMetrics: map[string]int64{
+ "jail_dovecot_currently_banned": 30,
+ "jail_dovecot_currently_failed": 10,
+ "jail_sshd_currently_banned": 30,
+ "jail_sshd_currently_failed": 10,
+ },
+ },
+ "error on status": {
+ prepareMock: prepareMockErrOnStatus,
+ wantMetrics: nil,
+ },
+ "empty response (no jails)": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ f2b := New()
+ mock := test.prepareMock()
+ f2b.exec = mock
+
+ mx := f2b.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ assert.Len(t, *f2b.Charts(), len(jailChartsTmpl)*2)
+ testMetricsHasAllChartsDims(t, f2b, mx)
+ }
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, f2b *Fail2Ban, mx map[string]int64) {
+ for _, chart := range *f2b.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareMockOk() *mockFail2BanClientCliExec {
+ return &mockFail2BanClientCliExec{
+ statusData: dataStatus,
+ jailStatusData: dataJailStatus,
+ }
+}
+
+func prepareMockErrOnStatus() *mockFail2BanClientCliExec {
+ return &mockFail2BanClientCliExec{
+ errOnStatus: true,
+ statusData: dataStatus,
+ jailStatusData: dataJailStatus,
+ }
+}
+
+func prepareMockEmptyResponse() *mockFail2BanClientCliExec {
+ return &mockFail2BanClientCliExec{}
+}
+
+type mockFail2BanClientCliExec struct {
+ errOnStatus bool
+ statusData []byte
+
+ errOnJailStatus bool
+ jailStatusData []byte
+}
+
+func (m *mockFail2BanClientCliExec) status() ([]byte, error) {
+ if m.errOnStatus {
+ return nil, errors.New("mock.status() error")
+ }
+
+ return m.statusData, nil
+}
+
+func (m *mockFail2BanClientCliExec) jailStatus(_ string) ([]byte, error) {
+ if m.errOnJailStatus {
+ return nil, errors.New("mock.jailStatus() error")
+ }
+
+ return m.jailStatusData, nil
+}
diff --git a/src/go/plugin/go.d/modules/fail2ban/init.go b/src/go/plugin/go.d/modules/fail2ban/init.go
new file mode 100644
index 000000000..ab963616c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/init.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fail2ban
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (f *Fail2Ban) initFail2banClientCliExec() (fail2banClientCli, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+
+ }
+
+ f2bClientExec := newFail2BanClientCliExec(ndsudoPath, f.Timeout.Duration(), f.Logger)
+
+ return f2bClientExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/fail2ban/integrations/fail2ban.md b/src/go/plugin/go.d/modules/fail2ban/integrations/fail2ban.md
new file mode 100644
index 000000000..0b9679256
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/integrations/fail2ban.md
@@ -0,0 +1,204 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/fail2ban/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/fail2ban/metadata.yaml"
+sidebar_label: "Fail2ban"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Fail2ban
+
+
+<img src="https://netdata.cloud/img/fail2ban.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: fail2ban
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector tracks two main metrics for each jail: currently banned IPs and active failure incidents. It relies on the [`fail2ban-client`](https://linux.die.net/man/1/fail2ban-client) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per jail
+
+These metrics refer to the Jail.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| jail | Jail's name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| fail2ban.jail_banned_ips | banned | addresses |
+| fail2ban.jail_active_failures | active_failures | failures |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### For Netdata running in a Docker container
+
+1. **Install Fail2ban client**.
+
+ Ensure `fail2ban-client` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=fail2ban` when starting the container.
+
+2. **Mount host's `/var/run` directory**.
+
+ Mount the host machine's `/var/run` directory to `/host/var/run` inside your Netdata container. This grants Netdata access to the Fail2ban socket file, typically located at `/var/run/fail2ban/fail2ban.sock`.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/fail2ban.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/fail2ban.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| timeout | fail2ban-client binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: fail2ban
+ update_every: 5 # Collect Fail2Ban jails statistics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `fail2ban` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m fail2ban
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `fail2ban` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep fail2ban
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep fail2ban /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep fail2ban
+```
+
+
diff --git a/src/go/plugin/go.d/modules/fail2ban/metadata.yaml b/src/go/plugin/go.d/modules/fail2ban/metadata.yaml
new file mode 100644
index 000000000..922b4e5ad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/metadata.yaml
@@ -0,0 +1,114 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-fail2ban
+ plugin_name: go.d.plugin
+ module_name: fail2ban
+ monitored_instance:
+ name: Fail2ban
+ link: "https://github.com/fail2ban/fail2ban#readme"
+ icon_filename: fail2ban.png
+ categories:
+ - data-collection.authentication-and-authorization
+ keywords:
+ - fail2ban
+ - security
+ - authentication
+ - authorization
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector tracks two main metrics for each jail: currently banned IPs and active failure incidents.
+ It relies on the [`fail2ban-client`](https://linux.die.net/man/1/fail2ban-client) CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: For Netdata running in a Docker container
+ description: |
+ 1. **Install Fail2ban client**.
+
+ Ensure `fail2ban-client` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=fail2ban` when starting the container.
+
+ 2. **Mount host's `/var/run` directory**.
+
+ Mount the host machine's `/var/run` directory to `/host/var/run` inside your Netdata container. This grants Netdata access to the Fail2ban socket file, typically located at `/var/run/fail2ban/fail2ban.sock`.
+ configuration:
+ file:
+ name: go.d/fail2ban.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: fail2ban-client binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: fail2ban
+ update_every: 5 # Collect Fail2Ban jails statistics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: jail
+ description: These metrics refer to the Jail.
+ labels:
+ - name: jail
+ description: Jail's name
+ metrics:
+ - name: fail2ban.jail_banned_ips
+ description: Fail2Ban Jail banned IPs
+ unit: addresses
+ chart_type: line
+ dimensions:
+ - name: banned
+ - name: fail2ban.jail_active_failures
+ description: Fail2Ban Jail active failures
+ unit: failures
+ chart_type: line
+ dimensions:
+ - name: active_failures
diff --git a/src/go/plugin/go.d/modules/fail2ban/testdata/config.json b/src/go/plugin/go.d/modules/fail2ban/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/fail2ban/testdata/config.yaml b/src/go/plugin/go.d/modules/fail2ban/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/fail2ban/testdata/fail2ban-jail-status.txt b/src/go/plugin/go.d/modules/fail2ban/testdata/fail2ban-jail-status.txt
new file mode 100644
index 000000000..17a3f53c1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/testdata/fail2ban-jail-status.txt
@@ -0,0 +1,9 @@
+Status for the jail: JAIL
+|- Filter
+| |- Currently failed: 10
+| |- Total failed: 20
+| `- File list: /var/log/auth.log
+`- Actions
+ |- Currently banned: 30
+ |- Total banned: 40
+ `- Banned IP list:
diff --git a/src/go/plugin/go.d/modules/fail2ban/testdata/fail2ban-status.txt b/src/go/plugin/go.d/modules/fail2ban/testdata/fail2ban-status.txt
new file mode 100644
index 000000000..1e65a78cf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fail2ban/testdata/fail2ban-status.txt
@@ -0,0 +1,3 @@
+Status
+|- Number of jail: 1
+`- Jail list: sshd, dovecot \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/filecheck/README.md b/src/go/plugin/go.d/modules/filecheck/README.md
new file mode 120000
index 000000000..24dc78d8d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/README.md
@@ -0,0 +1 @@
+integrations/files_and_directories.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/filecheck/cache.go b/src/go/plugin/go.d/modules/filecheck/cache.go
new file mode 100644
index 000000000..1acd6f821
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/cache.go
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filecheck
+
+func newSeenItems() *seenItems {
+ return &seenItems{
+ items: make(map[string]*seenItem),
+ }
+}
+
+type (
+ seenItems struct {
+ items map[string]*seenItem
+ }
+ seenItem struct {
+ hasExistenceCharts bool
+ hasOtherCharts bool
+ }
+)
+
+func (c *seenItems) getp(path string) *seenItem {
+ item, ok := c.items[path]
+ if !ok {
+ item = &seenItem{}
+ c.items[path] = item
+ }
+ return item
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/charts.go b/src/go/plugin/go.d/modules/filecheck/charts.go
new file mode 100644
index 000000000..6d00463a6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/charts.go
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filecheck
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioFileExistenceStatus = module.Priority + iota
+ prioFileModificationTimeAgo
+ prioFileSize
+
+ prioDirExistenceStatus
+ prioDirModificationTimeAgo
+ prioDirSize
+ prioDirFilesCount
+)
+
+var (
+ fileExistenceStatusChartTmpl = module.Chart{
+ ID: "file_%s_existence_status",
+ Title: "File existence",
+ Units: "status",
+ Fam: "file existence",
+ Ctx: "filecheck.file_existence_status",
+ Priority: prioFileExistenceStatus,
+ Dims: module.Dims{
+ {ID: "file_%s_existence_status_exist", Name: "exist"},
+ {ID: "file_%s_existence_status_not_exist", Name: "not_exist"},
+ },
+ }
+
+ fileModificationTimeAgoChartTmpl = module.Chart{
+ ID: "file_%s_modification_time_ago",
+ Title: "File time since the last modification",
+ Units: "seconds",
+ Fam: "file mtime",
+ Ctx: "filecheck.file_modification_time_ago",
+ Priority: prioFileModificationTimeAgo,
+ Dims: module.Dims{
+ {ID: "file_%s_mtime_ago", Name: "mtime_ago"},
+ },
+ }
+ fileSizeChartTmpl = module.Chart{
+ ID: "file_%s_size",
+ Title: "File size",
+ Units: "bytes",
+ Fam: "file size",
+ Ctx: "filecheck.file_size_bytes",
+ Priority: prioFileSize,
+ Dims: module.Dims{
+ {ID: "file_%s_size_bytes", Name: "size"},
+ },
+ }
+)
+
+var (
+ dirExistenceStatusChartTmpl = module.Chart{
+ ID: "dir_%s_existence_status",
+ Title: "Directory existence",
+ Units: "status",
+ Fam: "dir existence",
+ Ctx: "filecheck.dir_existence_status",
+ Priority: prioDirExistenceStatus,
+ Dims: module.Dims{
+ {ID: "dir_%s_existence_status_exist", Name: "exist"},
+ {ID: "dir_%s_existence_status_not_exist", Name: "not_exist"},
+ },
+ }
+
+ dirModificationTimeAgoChartTmpl = module.Chart{
+ ID: "dir_%s_modification_time_ago",
+ Title: "Directory time since the last modification",
+ Units: "seconds",
+ Fam: "dir mtime",
+ Ctx: "filecheck.dir_modification_time_ago",
+ Priority: prioDirModificationTimeAgo,
+ Dims: module.Dims{
+ {ID: "dir_%s_mtime_ago", Name: "mtime_ago"},
+ },
+ }
+ dirSizeChartTmpl = module.Chart{
+ ID: "dir_%s_size",
+ Title: "Directory size",
+ Units: "bytes",
+ Fam: "dir size",
+ Ctx: "filecheck.dir_size_bytes",
+ Priority: prioDirSize,
+ Dims: module.Dims{
+ {ID: "dir_%s_size_bytes", Name: "size"},
+ },
+ }
+ dirFilesCountChartTmpl = module.Chart{
+ ID: "dir_%s_files_count",
+ Title: "Directory files count",
+ Units: "files",
+ Fam: "dir files",
+ Ctx: "filecheck.dir_files_count",
+ Priority: prioDirFilesCount,
+ Dims: module.Dims{
+ {ID: "dir_%s_files_count", Name: "files"},
+ },
+ }
+)
+
+func (f *Filecheck) updateFileCharts(infos []*statInfo) {
+ seen := make(map[string]bool)
+
+ for _, info := range infos {
+ seen[info.path] = true
+
+ sf := f.seenFiles.getp(info.path)
+
+ if !sf.hasExistenceCharts {
+ sf.hasExistenceCharts = true
+ f.addFileCharts(info.path,
+ fileExistenceStatusChartTmpl.Copy(),
+ )
+ }
+
+ if !sf.hasOtherCharts && info.fi != nil {
+ sf.hasOtherCharts = true
+ f.addFileCharts(info.path,
+ fileModificationTimeAgoChartTmpl.Copy(),
+ fileSizeChartTmpl.Copy(),
+ )
+
+ } else if sf.hasOtherCharts && info.fi == nil {
+ sf.hasOtherCharts = false
+ f.removeFileOtherCharts(info.path)
+ }
+ }
+
+ for path := range f.seenFiles.items {
+ if !seen[path] {
+ delete(f.seenFiles.items, path)
+ f.removeFileAllCharts(path)
+ }
+ }
+}
+
+func (f *Filecheck) updateDirCharts(infos []*statInfo) {
+ seen := make(map[string]bool)
+
+ for _, info := range infos {
+ seen[info.path] = true
+
+ sd := f.seenDirs.getp(info.path)
+
+ if !sd.hasExistenceCharts {
+ sd.hasExistenceCharts = true
+ f.addDirCharts(info.path,
+ dirExistenceStatusChartTmpl.Copy(),
+ )
+ }
+
+ if !sd.hasOtherCharts && info.fi != nil {
+ sd.hasOtherCharts = true
+ f.addDirCharts(info.path,
+ dirModificationTimeAgoChartTmpl.Copy(),
+ dirFilesCountChartTmpl.Copy(),
+ )
+ if f.Dirs.CollectDirSize {
+ f.addDirCharts(info.path,
+ dirSizeChartTmpl.Copy(),
+ )
+ }
+
+ } else if sd.hasOtherCharts && info.fi == nil {
+ sd.hasOtherCharts = false
+ f.removeDirOtherCharts(info.path)
+ }
+ }
+
+ for path := range f.seenDirs.items {
+ if !seen[path] {
+ delete(f.seenDirs.items, path)
+ f.removeDirAllCharts(path)
+ }
+ }
+}
+
+func (f *Filecheck) addFileCharts(filePath string, chartsTmpl ...*module.Chart) {
+ cs := append(module.Charts{}, chartsTmpl...)
+ charts := cs.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanPath(filePath))
+ chart.Labels = []module.Label{
+ {Key: "file_path", Value: filePath},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, filePath)
+ }
+ }
+
+ if err := f.Charts().Add(*charts...); err != nil {
+ f.Warning(err)
+ }
+}
+
+func (f *Filecheck) addDirCharts(dirPath string, chartsTmpl ...*module.Chart) {
+ cs := append(module.Charts{}, chartsTmpl...)
+ charts := cs.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanPath(dirPath))
+ chart.Labels = []module.Label{
+ {Key: "dir_path", Value: dirPath},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, dirPath)
+ }
+ }
+
+ if err := f.Charts().Add(*charts...); err != nil {
+ f.Warning(err)
+ }
+}
+
+func (f *Filecheck) removeFileAllCharts(filePath string) {
+ px := fmt.Sprintf("file_%s_", cleanPath(filePath))
+ f.removeCharts(func(id string) bool {
+ return strings.HasPrefix(id, px)
+ })
+}
+
+func (f *Filecheck) removeFileOtherCharts(filePath string) {
+ px := fmt.Sprintf("file_%s_", cleanPath(filePath))
+ f.removeCharts(func(id string) bool {
+ return strings.HasPrefix(id, px) && !strings.HasSuffix(id, "existence_status")
+ })
+}
+
+func (f *Filecheck) removeDirAllCharts(dirPath string) {
+ px := fmt.Sprintf("dir_%s_", cleanPath(dirPath))
+ f.removeCharts(func(id string) bool {
+ return strings.HasPrefix(id, px)
+ })
+}
+
+func (f *Filecheck) removeDirOtherCharts(dirPath string) {
+ px := fmt.Sprintf("dir_%s_", cleanPath(dirPath))
+ f.removeCharts(func(id string) bool {
+ return strings.HasPrefix(id, px) && !strings.HasSuffix(id, "existence_status")
+ })
+}
+
+func (f *Filecheck) removeCharts(match func(id string) bool) {
+ for _, chart := range *f.Charts() {
+ if match(chart.ID) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func cleanPath(path string) string {
+ path = strings.ReplaceAll(path, " ", "_")
+ path = strings.ReplaceAll(path, ".", "_")
+ return path
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/collect.go b/src/go/plugin/go.d/modules/filecheck/collect.go
new file mode 100644
index 000000000..077ad86c6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/collect.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filecheck
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+)
+
+func (f *Filecheck) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ f.collectFiles(mx)
+ f.collectDirs(mx)
+
+ return mx, nil
+}
+
+type statInfo struct {
+ path string
+ exists bool
+ fi fs.FileInfo
+}
+
+func getStatInfo(path string) *statInfo {
+ fi, err := os.Stat(path)
+ if err != nil {
+ return &statInfo{
+ path: path,
+ exists: !errors.Is(err, fs.ErrNotExist),
+ }
+ }
+
+ return &statInfo{
+ path: path,
+ exists: true,
+ fi: fi,
+ }
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/collect_dirs.go b/src/go/plugin/go.d/modules/filecheck/collect_dirs.go
new file mode 100644
index 000000000..143915d4d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/collect_dirs.go
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filecheck
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "time"
+)
+
+func (f *Filecheck) collectDirs(mx map[string]int64) {
+ now := time.Now()
+
+ if f.isTimeToDiscoverDirs(now) {
+ f.lastDiscDirsTime = now
+ f.curDirs = f.discoveryDirs()
+ }
+
+ var infos []*statInfo
+
+ for _, dir := range f.curDirs {
+ si := getStatInfo(dir)
+ infos = append(infos, si)
+
+ f.collectDir(mx, si, now)
+ }
+
+ f.updateDirCharts(infos)
+}
+
+func (f *Filecheck) collectDir(mx map[string]int64, si *statInfo, now time.Time) {
+ px := fmt.Sprintf("dir_%s_", si.path)
+
+ mx[px+"existence_status_exist"] = 0
+ mx[px+"existence_status_not_exist"] = 0
+ if !si.exists {
+ mx[px+"existence_status_not_exist"] = 1
+ } else {
+ mx[px+"existence_status_exist"] = 1
+ }
+
+ if si.fi == nil || !si.fi.IsDir() {
+ return
+ }
+
+ mx[px+"mtime_ago"] = int64(now.Sub(si.fi.ModTime()).Seconds())
+
+ if v, err := calcFilesInDir(si.path); err == nil {
+ mx[px+"files_count"] = v
+ }
+ if f.Dirs.CollectDirSize {
+ if v, err := calcDirSize(si.path); err == nil {
+ mx[px+"size_bytes"] = v
+ }
+ }
+}
+
+func (f *Filecheck) discoveryDirs() (dirs []string) {
+ return discoverFilesOrDirs(f.Dirs.Include, func(v string, fi os.FileInfo) bool {
+ return fi.IsDir() && !f.dirsFilter.MatchString(v)
+ })
+}
+
+func (f *Filecheck) isTimeToDiscoverDirs(now time.Time) bool {
+ return now.After(f.lastDiscDirsTime.Add(f.DiscoveryEvery.Duration()))
+}
+
+func calcFilesInDir(dirPath string) (int64, error) {
+ f, err := os.Open(dirPath)
+ if err != nil {
+ return 0, err
+ }
+ defer func() { _ = f.Close() }()
+ names, err := f.Readdirnames(-1)
+ return int64(len(names)), err
+}
+
+func calcDirSize(dirPath string) (int64, error) {
+ var size int64
+ err := filepath.Walk(dirPath, func(_ string, fi os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if !fi.IsDir() {
+ size += fi.Size()
+ }
+ return nil
+ })
+ return size, err
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/collect_files.go b/src/go/plugin/go.d/modules/filecheck/collect_files.go
new file mode 100644
index 000000000..4c465c111
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/collect_files.go
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filecheck
+
+import (
+ "fmt"
+ "os"
+ "time"
+)
+
+func (f *Filecheck) collectFiles(mx map[string]int64) {
+ now := time.Now()
+
+ if f.isTimeToDiscoverFiles(now) {
+ f.lastDiscFilesTime = now
+ f.curFiles = f.discoverFiles()
+ }
+
+ var infos []*statInfo
+
+ for _, file := range f.curFiles {
+ si := getStatInfo(file)
+
+ infos = append(infos, si)
+
+ f.collectFile(mx, si, now)
+ }
+
+ f.updateFileCharts(infos)
+}
+
+func (f *Filecheck) collectFile(mx map[string]int64, si *statInfo, now time.Time) {
+ px := fmt.Sprintf("file_%s_", si.path)
+
+ mx[px+"existence_status_exist"] = 0
+ mx[px+"existence_status_not_exist"] = 0
+ if !si.exists {
+ mx[px+"existence_status_not_exist"] = 1
+ } else {
+ mx[px+"existence_status_exist"] = 1
+ }
+
+ if si.fi == nil || !si.fi.Mode().IsRegular() {
+ return
+ }
+
+ mx[px+"mtime_ago"] = int64(now.Sub(si.fi.ModTime()).Seconds())
+ mx[px+"size_bytes"] = si.fi.Size()
+}
+
+func (f *Filecheck) discoverFiles() (files []string) {
+ return discoverFilesOrDirs(f.Files.Include, func(absPath string, fi os.FileInfo) bool {
+ return fi.Mode().IsRegular() && !f.filesFilter.MatchString(absPath)
+ })
+}
+
+func (f *Filecheck) isTimeToDiscoverFiles(now time.Time) bool {
+ return now.After(f.lastDiscFilesTime.Add(f.DiscoveryEvery.Duration()))
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/config_schema.json b/src/go/plugin/go.d/modules/filecheck/config_schema.json
new file mode 100644
index 000000000..c64bb941f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/config_schema.json
@@ -0,0 +1,164 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Filecheck collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "discovery_every": {
+ "title": "Scan interval",
+ "description": "Scan frequency interval (seconds) for files and directories with patterns (globs) in their paths.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 60
+ },
+ "files": {
+ "title": "File selector",
+ "description": "Configuration for monitoring specific files. If left empy, no files will be monitored.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "include": {
+ "title": "Include",
+ "description": "Include files that match any of the specified include [patterns](https://golang.org/pkg/path/filepath/#Match).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Filepath",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "uniqueItems": true
+ },
+ "exclude": {
+ "title": "Exclude",
+ "description": "Exclude files that match any of the specified exclude [patterns](https://golang.org/pkg/path/filepath/#Match).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Filepath",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "uniqueItems": true
+ }
+ },
+ "required": [
+ "include"
+ ]
+ },
+ "dirs": {
+ "title": "Directory selector",
+ "description": "Configuration for monitoring specific directories. If left empy, no directories will be monitored.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "collect_dir_size": {
+ "title": "Collect directory size",
+ "description": "Enable the collection of directory sizes for each monitored directory. **Enabling this option may introduce additional overhead** on both Netdata and the host system, particularly if directories contain a large number of subdirectories and files.",
+ "type": "boolean",
+ "default": false
+ },
+ "include": {
+ "title": "Include",
+ "description": "Include directories that match any of the specified include [patterns](https://golang.org/pkg/path/filepath/#Match).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Directory",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "uniqueItems": true
+ },
+ "exclude": {
+ "title": "Exclude",
+ "description": "Exclude directories that match any of the specified exclude [patterns](https://golang.org/pkg/path/filepath/#Match).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Directory",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "uniqueItems": true
+ }
+ },
+ "required": [
+ "include"
+ ]
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "discovery_every"
+ ]
+ },
+ {
+ "title": "Files",
+ "fields": [
+ "files"
+ ]
+ },
+ {
+ "title": "Directories",
+ "fields": [
+ "dirs"
+ ]
+ }
+ ]
+ },
+ "files": {
+ "ui:help": "The logic for inclusion and exclusion is as follows: `(include1 OR include2) AND !(exclude1 OR exclude2)`.",
+ "ui:collapsible": true,
+ "include": {
+ "ui:listFlavour": "list"
+ },
+ "exclude": {
+ "ui:listFlavour": "list"
+ }
+ },
+ "dirs": {
+ "ui:help": "The logic for inclusion and exclusion is as follows: `(include1 OR include2) AND !(exclude1 OR exclude2)`.",
+ "ui:collapsible": true,
+ "include": {
+ "ui:listFlavour": "list"
+ },
+ "exclude": {
+ "ui:listFlavour": "list"
+ }
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/discover.go b/src/go/plugin/go.d/modules/filecheck/discover.go
new file mode 100644
index 000000000..29ae552c5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/discover.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filecheck
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+ "slices"
+ "strings"
+)
+
+func discoverFilesOrDirs(includePaths []string, fn func(absPath string, fi os.FileInfo) bool) []string {
+ var paths []string
+
+ for _, path := range includePaths {
+ if !hasMeta(path) {
+ paths = append(paths, path)
+ continue
+ }
+
+ ps, _ := filepath.Glob(path)
+ for _, path := range ps {
+ if fi, err := os.Lstat(path); err == nil && fn(path, fi) {
+ paths = append(paths, path)
+ }
+ }
+
+ }
+
+ slices.Sort(paths)
+ paths = slices.Compact(paths)
+
+ return paths
+}
+
+func hasMeta(path string) bool {
+ magicChars := `*?[`
+ if runtime.GOOS != "windows" {
+ magicChars = `*?[\`
+ }
+ return strings.ContainsAny(path, magicChars)
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/filecheck.go b/src/go/plugin/go.d/modules/filecheck/filecheck.go
new file mode 100644
index 000000000..8d19c7c64
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/filecheck.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filecheck
+
+import (
+ _ "embed"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("filecheck", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Filecheck {
+ return &Filecheck{
+ Config: Config{
+ DiscoveryEvery: web.Duration(time.Minute * 1),
+ Files: filesConfig{},
+ Dirs: dirsConfig{CollectDirSize: false},
+ },
+ charts: &module.Charts{},
+ seenFiles: newSeenItems(),
+ seenDirs: newSeenItems(),
+ }
+}
+
+type (
+ Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ DiscoveryEvery web.Duration `yaml:"discovery_every,omitempty" json:"discovery_every"`
+ Files filesConfig `yaml:"files" json:"files"`
+ Dirs dirsConfig `yaml:"dirs" json:"dirs"`
+ }
+ filesConfig struct {
+ Include []string `yaml:"include" json:"include"`
+ Exclude []string `yaml:"exclude,omitempty" json:"exclude"`
+ }
+ dirsConfig struct {
+ Include []string `yaml:"include" json:"include"`
+ Exclude []string `yaml:"exclude,omitempty" json:"exclude"`
+ CollectDirSize bool `yaml:"collect_dir_size" json:"collect_dir_size"`
+ }
+)
+
+type Filecheck struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ filesFilter matcher.Matcher
+ lastDiscFilesTime time.Time
+ curFiles []string
+ seenFiles *seenItems
+
+ dirsFilter matcher.Matcher
+ lastDiscDirsTime time.Time
+ curDirs []string
+ seenDirs *seenItems
+}
+
+func (f *Filecheck) Configuration() any {
+ return f.Config
+}
+
+func (f *Filecheck) Init() error {
+ err := f.validateConfig()
+ if err != nil {
+ f.Errorf("config validation: %v", err)
+ return err
+ }
+
+ ff, err := f.initFilesFilter()
+ if err != nil {
+ f.Errorf("files filter initialization: %v", err)
+ return err
+ }
+ f.filesFilter = ff
+
+ df, err := f.initDirsFilter()
+ if err != nil {
+ f.Errorf("dirs filter initialization: %v", err)
+ return err
+ }
+ f.dirsFilter = df
+
+ f.Debugf("monitored files: %v", f.Files.Include)
+ f.Debugf("monitored dirs: %v", f.Dirs.Include)
+
+ return nil
+}
+
+func (f *Filecheck) Check() error {
+ return nil
+}
+
+func (f *Filecheck) Charts() *module.Charts {
+ return f.charts
+}
+
+func (f *Filecheck) Collect() map[string]int64 {
+ mx, err := f.collect()
+ if err != nil {
+ f.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (f *Filecheck) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/filecheck/filecheck_test.go b/src/go/plugin/go.d/modules/filecheck/filecheck_test.go
new file mode 100644
index 000000000..43024b0bc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/filecheck_test.go
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filecheck
+
+import (
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestFilecheck_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Filecheck{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestFilecheck_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestFilecheck_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default": {
+ wantFail: true,
+ config: New().Config,
+ },
+ "empty files->include and dirs->include": {
+ wantFail: true,
+ config: Config{
+ Files: filesConfig{},
+ Dirs: dirsConfig{},
+ },
+ },
+ "files->include and dirs->include": {
+ wantFail: false,
+ config: Config{
+ Files: filesConfig{
+ Include: []string{
+ "/path/to/file1",
+ "/path/to/file2",
+ },
+ },
+ Dirs: dirsConfig{
+ Include: []string{
+ "/path/to/dir1",
+ "/path/to/dir2",
+ },
+ CollectDirSize: true,
+ },
+ },
+ },
+ "only files->include": {
+ wantFail: false,
+ config: Config{
+ Files: filesConfig{
+ Include: []string{
+ "/path/to/file1",
+ "/path/to/file2",
+ },
+ },
+ },
+ },
+ "only dirs->include": {
+ wantFail: false,
+ config: Config{
+ Dirs: dirsConfig{
+ Include: []string{
+ "/path/to/dir1",
+ "/path/to/dir2",
+ },
+ CollectDirSize: true,
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ fc := New()
+ fc.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, fc.Init())
+ } else {
+ require.NoError(t, fc.Init())
+ }
+ })
+ }
+}
+
+func TestFilecheck_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Filecheck
+ }{
+ "collect files": {prepare: prepareFilecheckFiles},
+ "collect files filepath pattern": {prepare: prepareFilecheckGlobFiles},
+ "collect only non existent files": {prepare: prepareFilecheckNonExistentFiles},
+ "collect dirs": {prepare: prepareFilecheckDirs},
+ "collect dirs filepath pattern": {prepare: prepareFilecheckGlobDirs},
+ "collect only non existent dirs": {prepare: prepareFilecheckNonExistentDirs},
+ "collect files and dirs": {prepare: prepareFilecheckFilesDirs},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ fc := test.prepare()
+ require.NoError(t, fc.Init())
+
+ assert.NoError(t, fc.Check())
+ })
+ }
+}
+
+func TestFilecheck_Collect(t *testing.T) {
+ // TODO: should use TEMP dir and create files/dirs dynamically during a test case
+ tests := map[string]struct {
+ prepare func() *Filecheck
+ wantCollected map[string]int64
+ }{
+ "collect files": {
+ prepare: prepareFilecheckFiles,
+ wantCollected: map[string]int64{
+ "file_testdata/empty_file.log_existence_status_exist": 1,
+ "file_testdata/empty_file.log_existence_status_not_exist": 0,
+ "file_testdata/empty_file.log_mtime_ago": 517996,
+ "file_testdata/empty_file.log_size_bytes": 0,
+ "file_testdata/file.log_existence_status_exist": 1,
+ "file_testdata/file.log_existence_status_not_exist": 0,
+ "file_testdata/file.log_mtime_ago": 517996,
+ "file_testdata/file.log_size_bytes": 5707,
+ "file_testdata/non_existent_file.log_existence_status_exist": 0,
+ "file_testdata/non_existent_file.log_existence_status_not_exist": 1,
+ },
+ },
+ "collect files filepath pattern": {
+ prepare: prepareFilecheckGlobFiles,
+ wantCollected: map[string]int64{
+ "file_testdata/empty_file.log_existence_status_exist": 1,
+ "file_testdata/empty_file.log_existence_status_not_exist": 0,
+ "file_testdata/empty_file.log_mtime_ago": 517985,
+ "file_testdata/empty_file.log_size_bytes": 0,
+ "file_testdata/file.log_existence_status_exist": 1,
+ "file_testdata/file.log_existence_status_not_exist": 0,
+ "file_testdata/file.log_mtime_ago": 517985,
+ "file_testdata/file.log_size_bytes": 5707,
+ },
+ },
+ "collect only non existent files": {
+ prepare: prepareFilecheckNonExistentFiles,
+ wantCollected: map[string]int64{
+ "file_testdata/non_existent_file.log_existence_status_exist": 0,
+ "file_testdata/non_existent_file.log_existence_status_not_exist": 1,
+ },
+ },
+ "collect dirs": {
+ prepare: prepareFilecheckDirs,
+ wantCollected: map[string]int64{
+ "dir_testdata/dir_existence_status_exist": 1,
+ "dir_testdata/dir_existence_status_not_exist": 0,
+ "dir_testdata/dir_files_count": 3,
+ "dir_testdata/dir_mtime_ago": 517914,
+ "dir_testdata/non_existent_dir_existence_status_exist": 0,
+ "dir_testdata/non_existent_dir_existence_status_not_exist": 1,
+ },
+ },
+ "collect dirs filepath pattern": {
+ prepare: prepareFilecheckGlobDirs,
+ wantCollected: map[string]int64{
+ "dir_testdata/dir_existence_status_exist": 1,
+ "dir_testdata/dir_existence_status_not_exist": 0,
+ "dir_testdata/dir_files_count": 3,
+ "dir_testdata/dir_mtime_ago": 517902,
+ "dir_testdata/non_existent_dir_existence_status_exist": 0,
+ "dir_testdata/non_existent_dir_existence_status_not_exist": 1,
+ },
+ },
+ "collect dirs w/o size": {
+ prepare: prepareFilecheckDirsWithoutSize,
+ wantCollected: map[string]int64{
+ "dir_testdata/dir_existence_status_exist": 1,
+ "dir_testdata/dir_existence_status_not_exist": 0,
+ "dir_testdata/dir_files_count": 3,
+ "dir_testdata/dir_mtime_ago": 517892,
+ "dir_testdata/non_existent_dir_existence_status_exist": 0,
+ "dir_testdata/non_existent_dir_existence_status_not_exist": 1,
+ },
+ },
+ "collect only non existent dirs": {
+ prepare: prepareFilecheckNonExistentDirs,
+ wantCollected: map[string]int64{
+ "dir_testdata/non_existent_dir_existence_status_exist": 0,
+ "dir_testdata/non_existent_dir_existence_status_not_exist": 1,
+ },
+ },
+ "collect files and dirs": {
+ prepare: prepareFilecheckFilesDirs,
+ wantCollected: map[string]int64{
+ "dir_testdata/dir_existence_status_exist": 1,
+ "dir_testdata/dir_existence_status_not_exist": 0,
+ "dir_testdata/dir_files_count": 3,
+ "dir_testdata/dir_mtime_ago": 517858,
+ "dir_testdata/dir_size_bytes": 8160,
+ "dir_testdata/non_existent_dir_existence_status_exist": 0,
+ "dir_testdata/non_existent_dir_existence_status_not_exist": 1,
+ "file_testdata/empty_file.log_existence_status_exist": 1,
+ "file_testdata/empty_file.log_existence_status_not_exist": 0,
+ "file_testdata/empty_file.log_mtime_ago": 517858,
+ "file_testdata/empty_file.log_size_bytes": 0,
+ "file_testdata/file.log_existence_status_exist": 1,
+ "file_testdata/file.log_existence_status_not_exist": 0,
+ "file_testdata/file.log_mtime_ago": 517858,
+ "file_testdata/file.log_size_bytes": 5707,
+ "file_testdata/non_existent_file.log_existence_status_exist": 0,
+ "file_testdata/non_existent_file.log_existence_status_not_exist": 1,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ fc := test.prepare()
+ require.NoError(t, fc.Init())
+
+ mx := fc.Collect()
+
+ copyModTime(test.wantCollected, mx)
+ assert.Equal(t, test.wantCollected, mx)
+ testMetricsHasAllChartsDims(t, fc, mx)
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, fc *Filecheck, mx map[string]int64) {
+ for _, chart := range *fc.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "mx metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ }
+}
+
+func prepareFilecheckFiles() *Filecheck {
+ fc := New()
+ fc.Config.Files.Include = []string{
+ "testdata/empty_file.log",
+ "testdata/file.log",
+ "testdata/non_existent_file.log",
+ }
+ return fc
+}
+
+func prepareFilecheckGlobFiles() *Filecheck {
+ fc := New()
+ fc.Config.Files.Include = []string{
+ "testdata/*.log",
+ }
+ return fc
+}
+
+func prepareFilecheckNonExistentFiles() *Filecheck {
+ fc := New()
+ fc.Config.Files.Include = []string{
+ "testdata/non_existent_file.log",
+ }
+ return fc
+}
+
+func prepareFilecheckDirs() *Filecheck {
+ fc := New()
+ fc.Config.Dirs.Include = []string{
+ "testdata/dir",
+ "testdata/non_existent_dir",
+ }
+ return fc
+}
+
+func prepareFilecheckGlobDirs() *Filecheck {
+ fc := New()
+ fc.Config.Dirs.Include = []string{
+ "testdata/*ir",
+ "testdata/non_existent_dir",
+ }
+ return fc
+}
+
+func prepareFilecheckDirsWithoutSize() *Filecheck {
+ fc := New()
+ fc.Config.Dirs.Include = []string{
+ "testdata/dir",
+ "testdata/non_existent_dir",
+ }
+ return fc
+}
+
+func prepareFilecheckNonExistentDirs() *Filecheck {
+ fc := New()
+ fc.Config.Dirs.Include = []string{
+ "testdata/non_existent_dir",
+ }
+ return fc
+}
+
+func prepareFilecheckFilesDirs() *Filecheck {
+ fc := New()
+ fc.Config.Dirs.CollectDirSize = true
+ fc.Config.Files.Include = []string{
+ "testdata/empty_file.log",
+ "testdata/file.log",
+ "testdata/non_existent_file.log",
+ }
+ fc.Config.Dirs.Include = []string{
+ "testdata/dir",
+ "testdata/non_existent_dir",
+ }
+ return fc
+}
+
+func copyModTime(dst, src map[string]int64) {
+ if src == nil || dst == nil {
+ return
+ }
+ for key := range src {
+ if strings.Contains(key, "mtime") {
+ dst[key] = src[key]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/init.go b/src/go/plugin/go.d/modules/filecheck/init.go
new file mode 100644
index 000000000..20b30964f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/init.go
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package filecheck
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+func (f *Filecheck) validateConfig() error {
+ if len(f.Files.Include) == 0 && len(f.Dirs.Include) == 0 {
+ return errors.New("both 'files->include' and 'dirs->include' are empty")
+ }
+ return nil
+}
+
+func (f *Filecheck) initFilesFilter() (matcher.Matcher, error) {
+ return newFilter(f.Files.Exclude)
+}
+
+func (f *Filecheck) initDirsFilter() (matcher.Matcher, error) {
+ return newFilter(f.Dirs.Exclude)
+}
+
+func newFilter(patterns []string) (matcher.Matcher, error) {
+ filter := matcher.FALSE()
+
+ for _, s := range patterns {
+ m, err := matcher.NewGlobMatcher(s)
+ if err != nil {
+ return nil, err
+ }
+ filter = matcher.Or(filter, m)
+ }
+
+ return filter, nil
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/integrations/files_and_directories.md b/src/go/plugin/go.d/modules/filecheck/integrations/files_and_directories.md
new file mode 100644
index 000000000..ed131a125
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/integrations/files_and_directories.md
@@ -0,0 +1,280 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/filecheck/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/filecheck/metadata.yaml"
+sidebar_label: "Files and directories"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Other"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Files and directories
+
+
+<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: filecheck
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the existence, last modification time, and size of arbitrary files and directories on the system.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+This collector requires the DAC_READ_SEARCH capability when monitoring files not normally accessible to the Netdata user, but it is set automatically during installation, so no manual configuration is needed.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per file
+
+These metrics refer to the File.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| file_path | File absolute path |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| filecheck.file_existence_status | exist, not_exist | status |
+| filecheck.file_modification_time_ago | mtime_ago | seconds |
+| filecheck.file_size_bytes | size | bytes |
+
+### Per directory
+
+These metrics refer to the Directory.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| dir_path | Directory absolute path |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| filecheck.dir_existence_status | exist, not_exist | status |
+| filecheck.dir_modification_time_ago | mtime_ago | seconds |
+| filecheck.dir_size_bytes | size | bytes |
+| filecheck.dir_files count | files | files |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/filecheck.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/filecheck.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| files | List of files to monitor. | | yes |
+| dirs | List of directories to monitor. | | yes |
+| discovery_every | Files and directories discovery interval. | 60 | no |
+
+##### files
+
+Files matching the selector will be monitored.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)
+- Syntax:
+
+```yaml
+files:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+```
+
+
+##### dirs
+
+Directories matching the selector will be monitored.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)
+- Syntax:
+
+```yaml
+dirs:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Files
+
+Files monitoring example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: files_example
+ files:
+ include:
+ - '/path/to/file1'
+ - '/path/to/file2'
+ - '/path/to/*.log'
+
+```
+</details>
+
+##### Directories
+
+Directories monitoring example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: files_example
+ dirs:
+ collect_dir_size: no
+ include:
+ - '/path/to/dir1'
+ - '/path/to/dir2'
+ - '/path/to/dir3*'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `filecheck` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m filecheck
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `filecheck` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep filecheck
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep filecheck /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep filecheck
+```
+
+
diff --git a/src/go/plugin/go.d/modules/filecheck/metadata.yaml b/src/go/plugin/go.d/modules/filecheck/metadata.yaml
new file mode 100644
index 000000000..446226f22
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/metadata.yaml
@@ -0,0 +1,198 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-filecheck
+ plugin_name: go.d.plugin
+ module_name: filecheck
+ monitored_instance:
+ name: Files and directories
+ link: ""
+ icon_filename: filesystem.svg
+ categories:
+ - data-collection.other
+ keywords:
+ - files
+ - directories
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the existence, last modification time, and size of arbitrary files and directories on the system.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: |
+ This collector requires the DAC_READ_SEARCH capability when monitoring files not normally accessible to the Netdata user, but it is set automatically during installation, so no manual configuration is needed.
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/filecheck.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: files
+ description: List of files to monitor.
+ default_value: ""
+ required: true
+ detailed_description: |
+ Files matching the selector will be monitored.
+
+ - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+ - Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)
+ - Syntax:
+
+ ```yaml
+ files:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+ ```
+ - name: dirs
+ description: List of directories to monitor.
+ default_value: ""
+ required: true
+ detailed_description: |
+ Directories matching the selector will be monitored.
+
+ - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+ - Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)
+ - Syntax:
+
+ ```yaml
+ dirs:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+ ```
+ - name: discovery_every
+ description: Files and directories discovery interval.
+ default_value: 60
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Files
+ description: Files monitoring example configuration.
+ config: |
+ jobs:
+ - name: files_example
+ files:
+ include:
+ - '/path/to/file1'
+ - '/path/to/file2'
+ - '/path/to/*.log'
+ - name: Directories
+ description: Directories monitoring example configuration.
+ config: |
+ jobs:
+ - name: files_example
+ dirs:
+ collect_dir_size: no
+ include:
+ - '/path/to/dir1'
+ - '/path/to/dir2'
+ - '/path/to/dir3*'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: file
+ description: These metrics refer to the File.
+ labels:
+ - name: file_path
+ description: File absolute path
+ metrics:
+ - name: filecheck.file_existence_status
+ description: File existence
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: exist
+ - name: not_exist
+ - name: filecheck.file_modification_time_ago
+ description: File time since the last modification
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: mtime_ago
+ - name: filecheck.file_size_bytes
+ description: File size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: directory
+ description: These metrics refer to the Directory.
+ labels:
+ - name: dir_path
+ description: Directory absolute path
+ metrics:
+ - name: filecheck.dir_existence_status
+ description: Directory existence
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: exist
+ - name: not_exist
+ - name: filecheck.dir_modification_time_ago
+ description: Directory time since the last modification
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: mtime_ago
+ - name: filecheck.dir_size_bytes
+ description: Directory size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: filecheck.dir_files count
+ description: Directory files count
+ unit: files
+ chart_type: line
+ dimensions:
+ - name: files
diff --git a/src/go/plugin/go.d/modules/filecheck/testdata/config.json b/src/go/plugin/go.d/modules/filecheck/testdata/config.json
new file mode 100644
index 000000000..93d286f84
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "discovery_every": 123.123,
+ "files": {
+ "include": [
+ "ok"
+ ],
+ "exclude": [
+ "ok"
+ ]
+ },
+ "dirs": {
+ "include": [
+ "ok"
+ ],
+ "exclude": [
+ "ok"
+ ],
+ "collect_dir_size": true
+ }
+}
diff --git a/src/go/plugin/go.d/modules/filecheck/testdata/config.yaml b/src/go/plugin/go.d/modules/filecheck/testdata/config.yaml
new file mode 100644
index 000000000..494a21855
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/testdata/config.yaml
@@ -0,0 +1,13 @@
+update_every: 123
+discovery_every: 123.123
+files:
+ include:
+ - "ok"
+ exclude:
+ - "ok"
+dirs:
+ include:
+ - "ok"
+ exclude:
+ - "ok"
+ collect_dir_size: yes
diff --git a/src/go/plugin/go.d/modules/filecheck/testdata/dir/empty_file.log b/src/go/plugin/go.d/modules/filecheck/testdata/dir/empty_file.log
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/testdata/dir/empty_file.log
diff --git a/src/go/plugin/go.d/modules/filecheck/testdata/dir/file.log b/src/go/plugin/go.d/modules/filecheck/testdata/dir/file.log
new file mode 100644
index 000000000..c1c152a81
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/testdata/dir/file.log
@@ -0,0 +1,61 @@
+198.51.100.1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 301 4715 4113 174 465 https TLSv1.2 ECDHE-RSA-AES256-SHA dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 1130 1202 409 450 https TLSv1 DHE-RSA-AES256-SHA light beer
+198.51.100.1:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 201 4020 1217 492 135 https TLSv1.2 PSK-RC4-SHA light wine
+test.example.org:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 401 3784 2349 266 63 http TLSv1 ECDHE-RSA-AES256-SHA dark wine
+localhost:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 201 2149 3834 178 197 https TLSv1.1 AES256-SHA dark wine
+198.51.100.1:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 200 1442 4125 23 197 https TLSv1.3 DHE-RSA-AES256-SHA light wine
+test.example.com:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 300 4134 3965 259 296 https TLSv1.3 PSK-RC4-SHA dark wine
+test.example.com:84 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 401 1224 3352 135 468 http SSLv2 PSK-RC4-SHA light wine
+localhost:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 200 2504 4754 58 371 http TLSv1.1 DHE-RSA-AES256-SHA dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 200 4898 2787 398 476 http SSLv2 DHE-RSA-AES256-SHA dark beer
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 100 4957 1848 324 158 https TLSv1.2 AES256-SHA dark wine
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 301 1752 1717 75 317 https SSLv3 PSK-RC4-SHA dark wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 301 3799 4120 71 17 http TLSv1.3 ECDHE-RSA-AES256-SHA dark beer
+198.51.100.1:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 101 1870 3945 392 323 http TLSv1.1 PSK-RC4-SHA light beer
+test.example.com:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 200 1261 3535 52 271 https TLSv1.1 DHE-RSA-AES256-SHA dark wine
+test.example.com:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 101 3228 3545 476 168 http TLSv1.1 AES256-SHA light beer
+test.example.com:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 300 4731 1574 362 184 https SSLv2 ECDHE-RSA-AES256-SHA light wine
+198.51.100.1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 300 4868 1803 23 388 https TLSv1.3 DHE-RSA-AES256-SHA dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 100 3744 3546 296 437 http SSLv2 DHE-RSA-AES256-SHA light beer
+test.example.org:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 401 4858 1493 151 240 http SSLv2 AES256-SHA light wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 300 1367 4284 45 443 https TLSv1.1 AES256-SHA light beer
+localhost:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 100 4392 4982 143 110 http SSLv3 AES256-SHA light beer
+2001:db8:1ce::1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 101 4606 3311 410 273 https TLSv1 PSK-RC4-SHA dark beer
+198.51.100.1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 100 1163 1526 10 186 https SSLv2 AES256-SHA light beer
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 301 3262 3789 144 124 https TLSv1.3 DHE-RSA-AES256-SHA light wine
+198.51.100.1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 400 1365 1447 325 186 http TLSv1.2 PSK-RC4-SHA dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 4546 4409 295 153 http SSLv3 ECDHE-RSA-AES256-SHA light beer
+localhost:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 300 2297 3318 139 227 https TLSv1 ECDHE-RSA-AES256-SHA dark wine
+localhost:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 4671 4285 371 7 https SSLv3 ECDHE-RSA-AES256-SHA dark beer
+test.example.org:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 400 3651 1135 172 159 https TLSv1.1 DHE-RSA-AES256-SHA light beer
+localhost:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 101 3958 3959 350 121 https SSLv2 DHE-RSA-AES256-SHA dark beer
+localhost:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 200 1652 3813 190 11 https SSLv3 AES256-SHA dark wine
+test.example.org:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 101 1228 2344 251 366 https TLSv1 ECDHE-RSA-AES256-SHA light beer
+test.example.org:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 200 1860 3118 187 419 https TLSv1 PSK-RC4-SHA light wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost:82 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 401 4518 3837 18 219 http TLSv1.3 DHE-RSA-AES256-SHA dark beer
+localhost:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 201 2108 2472 257 470 http TLSv1.1 PSK-RC4-SHA dark beer
+2001:db8:1ce::1:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 101 2020 1076 262 106 https TLSv1.3 PSK-RC4-SHA light wine
+localhost:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 100 4815 3052 49 322 https TLSv1.3 DHE-RSA-AES256-SHA light beer
+2001:db8:1ce::1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 300 1642 4001 421 194 https TLSv1 PSK-RC4-SHA light wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 201 3805 2597 25 187 http TLSv1.1 AES256-SHA dark wine
+2001:db8:1ce::1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 301 3435 1760 474 318 https TLSv1.2 ECDHE-RSA-AES256-SHA light wine
+localhost:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 101 1911 4082 356 301 https TLSv1 DHE-RSA-AES256-SHA light beer
+2001:db8:1ce::1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 100 2536 1664 115 474 http SSLv3 PSK-RC4-SHA dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 401 3757 3987 441 469 http SSLv2 ECDHE-RSA-AES256-SHA dark wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 400 1221 4244 232 421 https TLSv1.1 ECDHE-RSA-AES256-SHA dark wine
+localhost:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 101 2001 2405 6 140 http TLSv1 DHE-RSA-AES256-SHA light wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+198.51.100.1:81 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 400 4442 4396 64 49 https TLSv1.1 AES256-SHA light beer
+2001:db8:1ce::1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 401 1461 4623 46 47 https TLSv1.3 ECDHE-RSA-AES256-SHA light beer
+Unmatched! The rat the cat the dog chased killed ate the malt! \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/filecheck/testdata/dir/subdir/empty_file.log b/src/go/plugin/go.d/modules/filecheck/testdata/dir/subdir/empty_file.log
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/testdata/dir/subdir/empty_file.log
diff --git a/src/go/plugin/go.d/modules/filecheck/testdata/empty_file.log b/src/go/plugin/go.d/modules/filecheck/testdata/empty_file.log
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/testdata/empty_file.log
diff --git a/src/go/plugin/go.d/modules/filecheck/testdata/file.log b/src/go/plugin/go.d/modules/filecheck/testdata/file.log
new file mode 100644
index 000000000..e0db68517
--- /dev/null
+++ b/src/go/plugin/go.d/modules/filecheck/testdata/file.log
@@ -0,0 +1,42 @@
+198.51.100.1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 301 4715 4113 174 465 https TLSv1.2 ECDHE-RSA-AES256-SHA dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 1130 1202 409 450 https TLSv1 DHE-RSA-AES256-SHA light beer
+198.51.100.1:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 201 4020 1217 492 135 https TLSv1.2 PSK-RC4-SHA light wine
+test.example.org:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 401 3784 2349 266 63 http TLSv1 ECDHE-RSA-AES256-SHA dark wine
+localhost:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 201 2149 3834 178 197 https TLSv1.1 AES256-SHA dark wine
+198.51.100.1:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 200 1442 4125 23 197 https TLSv1.3 DHE-RSA-AES256-SHA light wine
+test.example.com:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 300 4134 3965 259 296 https TLSv1.3 PSK-RC4-SHA dark wine
+test.example.com:84 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 401 1224 3352 135 468 http SSLv2 PSK-RC4-SHA light wine
+localhost:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 200 2504 4754 58 371 http TLSv1.1 DHE-RSA-AES256-SHA dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 200 4898 2787 398 476 http SSLv2 DHE-RSA-AES256-SHA dark beer
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 100 4957 1848 324 158 https TLSv1.2 AES256-SHA dark wine
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 301 1752 1717 75 317 https SSLv3 PSK-RC4-SHA dark wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 301 3799 4120 71 17 http TLSv1.3 ECDHE-RSA-AES256-SHA dark beer
+198.51.100.1:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 101 1870 3945 392 323 http TLSv1.1 PSK-RC4-SHA light beer
+test.example.com:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 200 1261 3535 52 271 https TLSv1.1 DHE-RSA-AES256-SHA dark wine
+test.example.com:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 101 3228 3545 476 168 http TLSv1.1 AES256-SHA light beer
+test.example.com:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 300 4731 1574 362 184 https SSLv2 ECDHE-RSA-AES256-SHA light wine
+198.51.100.1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 300 4868 1803 23 388 https TLSv1.3 DHE-RSA-AES256-SHA dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 100 3744 3546 296 437 http SSLv2 DHE-RSA-AES256-SHA light beer
+test.example.org:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 401 4858 1493 151 240 http SSLv2 AES256-SHA light wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 300 1367 4284 45 443 https TLSv1.1 AES256-SHA light beer
+localhost:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 100 4392 4982 143 110 http SSLv3 AES256-SHA light beer
+2001:db8:1ce::1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 101 4606 3311 410 273 https TLSv1 PSK-RC4-SHA dark beer
+198.51.100.1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 100 1163 1526 10 186 https SSLv2 AES256-SHA light beer
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 301 3262 3789 144 124 https TLSv1.3 DHE-RSA-AES256-SHA light wine
+198.51.100.1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 400 1365 1447 325 186 http TLSv1.2 PSK-RC4-SHA dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 4546 4409 295 153 http SSLv3 ECDHE-RSA-AES256-SHA light beer
+localhost:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 300 2297 3318 139 227 https TLSv1 ECDHE-RSA-AES256-SHA dark wine
+localhost:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 4671 4285 371 7 https SSLv3 ECDHE-RSA-AES256-SHA dark beer
+test.example.org:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 400 3651 1135 172 159 https TLSv1.1 DHE-RSA-AES256-SHA light beer
+localhost:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 101 3958 3959 350 121 https SSLv2 DHE-RSA-AES256-SHA dark beer
+localhost:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 200 1652 3813 190 11 https SSLv3 AES256-SHA dark wine
+test.example.org:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 101 1228 2344 251 366 https TLSv1 ECDHE-RSA-AES256-SHA light beer
+test.example.org:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 200 1860 3118 187 419 https TLSv1 PSK-RC4-SHA light wine
+Unmatched! The rat the cat the dog chased killed ate the malt! \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/fluentd/README.md b/src/go/plugin/go.d/modules/fluentd/README.md
new file mode 120000
index 000000000..96241702f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/README.md
@@ -0,0 +1 @@
+integrations/fluentd.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/fluentd/apiclient.go b/src/go/plugin/go.d/modules/fluentd/apiclient.go
new file mode 100644
index 000000000..1c6bf85a9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/apiclient.go
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fluentd
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const pluginsPath = "/api/plugins.json"
+
+type pluginsInfo struct {
+ Payload []pluginData `json:"plugins"`
+}
+
+type pluginData struct {
+ ID string `json:"plugin_id"`
+ Type string `json:"type"`
+ Category string `json:"plugin_category"`
+ RetryCount *int64 `json:"retry_count"`
+ BufferTotalQueuedSize *int64 `json:"buffer_total_queued_size"`
+ BufferQueueLength *int64 `json:"buffer_queue_length"`
+}
+
+func (p pluginData) hasCategory() bool {
+ return p.RetryCount != nil
+}
+
+func (p pluginData) hasBufferQueueLength() bool {
+ return p.BufferQueueLength != nil
+}
+
+func (p pluginData) hasBufferTotalQueuedSize() bool {
+ return p.BufferTotalQueuedSize != nil
+}
+
+func newAPIClient(client *http.Client, request web.Request) *apiClient {
+ return &apiClient{httpClient: client, request: request}
+}
+
+type apiClient struct {
+ httpClient *http.Client
+ request web.Request
+}
+
+func (a apiClient) getPluginsInfo() (*pluginsInfo, error) {
+ req, err := a.createRequest(pluginsPath)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating request : %v", err)
+ }
+
+ resp, err := a.doRequestOK(req)
+ defer closeBody(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ var info pluginsInfo
+ if err = json.NewDecoder(resp.Body).Decode(&info); err != nil {
+ return nil, fmt.Errorf("error on decoding response from %s : %v", req.URL, err)
+ }
+
+ return &info, nil
+}
+
+func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) {
+ resp, err := a.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on request: %v", err)
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
+ }
+ return resp, nil
+}
+
+func (a apiClient) createRequest(urlPath string) (*http.Request, error) {
+ req := a.request.Copy()
+ u, err := url.Parse(req.URL)
+ if err != nil {
+ return nil, err
+ }
+
+ u.Path = path.Join(u.Path, urlPath)
+ req.URL = u.String()
+ return web.NewHTTPRequest(req)
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/fluentd/charts.go b/src/go/plugin/go.d/modules/fluentd/charts.go
new file mode 100644
index 000000000..b0034c026
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/charts.go
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fluentd
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dim is an alias for module.Dim
+ Dim = module.Dim
+)
+
+// TODO: units for buffer charts
+var charts = Charts{
+ {
+ ID: "retry_count",
+ Title: "Plugin Retry Count",
+ Units: "count",
+ Fam: "retry count",
+ Ctx: "fluentd.retry_count",
+ },
+ {
+ ID: "buffer_queue_length",
+ Title: "Plugin Buffer Queue Length",
+ Units: "queue length",
+ Fam: "buffer",
+ Ctx: "fluentd.buffer_queue_length",
+ },
+ {
+ ID: "buffer_total_queued_size",
+ Title: "Plugin Buffer Total Size",
+ Units: "buffer total size",
+ Fam: "buffer",
+ Ctx: "fluentd.buffer_total_queued_size",
+ },
+}
diff --git a/src/go/plugin/go.d/modules/fluentd/collect.go b/src/go/plugin/go.d/modules/fluentd/collect.go
new file mode 100644
index 000000000..14ee6df68
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/collect.go
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fluentd
+
+import "fmt"
+
+func (f *Fluentd) collect() (map[string]int64, error) {
+ info, err := f.apiClient.getPluginsInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ for _, p := range info.Payload {
+ // TODO: if p.Category == "input" ?
+ if !p.hasCategory() && !p.hasBufferQueueLength() && !p.hasBufferTotalQueuedSize() {
+ continue
+ }
+
+ if f.permitPlugin != nil && !f.permitPlugin.MatchString(p.ID) {
+ f.Debugf("plugin id: '%s', type: '%s', category: '%s' denied", p.ID, p.Type, p.Category)
+ continue
+ }
+
+ id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category)
+
+ if p.hasCategory() {
+ mx[id+"_retry_count"] = *p.RetryCount
+ }
+ if p.hasBufferQueueLength() {
+ mx[id+"_buffer_queue_length"] = *p.BufferQueueLength
+ }
+ if p.hasBufferTotalQueuedSize() {
+ mx[id+"_buffer_total_queued_size"] = *p.BufferTotalQueuedSize
+ }
+
+ if !f.activePlugins[id] {
+ f.activePlugins[id] = true
+ f.addPluginToCharts(p)
+ }
+
+ }
+
+ return mx, nil
+}
+
+func (f *Fluentd) addPluginToCharts(p pluginData) {
+ id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category)
+
+ if p.hasCategory() {
+ chart := f.charts.Get("retry_count")
+ _ = chart.AddDim(&Dim{ID: id + "_retry_count", Name: p.ID})
+ chart.MarkNotCreated()
+ }
+ if p.hasBufferQueueLength() {
+ chart := f.charts.Get("buffer_queue_length")
+ _ = chart.AddDim(&Dim{ID: id + "_buffer_queue_length", Name: p.ID})
+ chart.MarkNotCreated()
+ }
+ if p.hasBufferTotalQueuedSize() {
+ chart := f.charts.Get("buffer_total_queued_size")
+ _ = chart.AddDim(&Dim{ID: id + "_buffer_total_queued_size", Name: p.ID})
+ chart.MarkNotCreated()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/fluentd/config_schema.json b/src/go/plugin/go.d/modules/fluentd/config_schema.json
new file mode 100644
index 000000000..037420f74
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Fluentd collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Fluentd [monitoring agent](https://docs.fluentd.org/monitoring-fluentd/monitoring-rest-api).",
+ "type": "string",
+ "default": "http://127.0.0.1:24220",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/fluentd/fluentd.go b/src/go/plugin/go.d/modules/fluentd/fluentd.go
new file mode 100644
index 000000000..467edaac8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/fluentd.go
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fluentd
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("fluentd", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Fluentd {
+ return &Fluentd{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:24220",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ }},
+ activePlugins: make(map[string]bool),
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ PermitPlugin string `yaml:"permit_plugin_id,omitempty" json:"permit_plugin_id"`
+}
+
+type Fluentd struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ apiClient *apiClient
+
+ permitPlugin matcher.Matcher
+ activePlugins map[string]bool
+}
+
+func (f *Fluentd) Configuration() any {
+ return f.Config
+}
+
+func (f *Fluentd) Init() error {
+ if err := f.validateConfig(); err != nil {
+ f.Error(err)
+ return err
+ }
+
+ pm, err := f.initPermitPluginMatcher()
+ if err != nil {
+ f.Error(err)
+ return err
+ }
+ f.permitPlugin = pm
+
+ client, err := f.initApiClient()
+ if err != nil {
+ f.Error(err)
+ return err
+ }
+ f.apiClient = client
+
+ f.Debugf("using URL %s", f.URL)
+ f.Debugf("using timeout: %s", f.Timeout.Duration())
+
+ return nil
+}
+
+func (f *Fluentd) Check() error {
+ mx, err := f.collect()
+ if err != nil {
+ f.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (f *Fluentd) Charts() *Charts {
+ return f.charts
+}
+
+func (f *Fluentd) Collect() map[string]int64 {
+ mx, err := f.collect()
+
+ if err != nil {
+ f.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (f *Fluentd) Cleanup() {
+ if f.apiClient != nil && f.apiClient.httpClient != nil {
+ f.apiClient.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/fluentd/fluentd_test.go b/src/go/plugin/go.d/modules/fluentd/fluentd_test.go
new file mode 100644
index 000000000..e21b58fc5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/fluentd_test.go
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fluentd
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataPluginsMetrics, _ = os.ReadFile("testdata/plugins.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataPluginsMetrics": dataPluginsMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestFluentd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Fluentd{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestFluentd_Init(t *testing.T) {
+ // OK
+ job := New()
+ assert.NoError(t, job.Init())
+ assert.NotNil(t, job.apiClient)
+
+ //NG
+ job = New()
+ job.URL = ""
+ assert.Error(t, job.Init())
+}
+
+func TestFluentd_Check(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataPluginsMetrics)
+ }))
+ defer ts.Close()
+
+ // OK
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ // NG
+ job = New()
+ job.URL = "http://127.0.0.1:38001/api/plugins.json"
+ require.NoError(t, job.Init())
+ require.Error(t, job.Check())
+}
+
+func TestFluentd_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestFluentd_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestFluentd_Collect(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataPluginsMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "output_stdout_stdout_output_retry_count": 0,
+ "output_td_tdlog_output_retry_count": 0,
+ "output_td_tdlog_output_buffer_queue_length": 0,
+ "output_td_tdlog_output_buffer_total_queued_size": 0,
+ }
+ assert.Equal(t, expected, job.Collect())
+ assert.Len(t, job.charts.Get("retry_count").Dims, 2)
+ assert.Len(t, job.charts.Get("buffer_queue_length").Dims, 1)
+ assert.Len(t, job.charts.Get("buffer_total_queued_size").Dims, 1)
+}
+
+func TestFluentd_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestFluentd_404(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(404)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
diff --git a/src/go/plugin/go.d/modules/fluentd/init.go b/src/go/plugin/go.d/modules/fluentd/init.go
new file mode 100644
index 000000000..6ee71c0a6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/init.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fluentd
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (f *Fluentd) validateConfig() error {
+ if f.URL == "" {
+ return errors.New("url not set")
+ }
+
+ return nil
+}
+
+func (f *Fluentd) initPermitPluginMatcher() (matcher.Matcher, error) {
+ if f.PermitPlugin == "" {
+ return matcher.TRUE(), nil
+ }
+
+ return matcher.NewSimplePatternsMatcher(f.PermitPlugin)
+}
+
+func (f *Fluentd) initApiClient() (*apiClient, error) {
+ client, err := web.NewHTTPClient(f.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return newAPIClient(client, f.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/fluentd/integrations/fluentd.md b/src/go/plugin/go.d/modules/fluentd/integrations/fluentd.md
new file mode 100644
index 000000000..b4740a77a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/integrations/fluentd.md
@@ -0,0 +1,256 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/fluentd/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/fluentd/metadata.yaml"
+sidebar_label: "Fluentd"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Logs Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Fluentd
+
+
+<img src="https://netdata.cloud/img/fluentd.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: fluentd
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Fluentd servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Fluentd instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| fluentd.retry_count | a dimension per plugin | count |
+| fluentd.buffer_queue_length | a dimension per plugin | queue_length |
+| fluentd.buffer_total_queued_size | a dimension per plugin | queued_size |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable monitor agent
+
+To enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/fluentd.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/fluentd.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:24220 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:24220
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:24220
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Fluentd with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:24220
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:24220
+
+ - name: remote
+ url: http://192.0.2.1:24220
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `fluentd` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m fluentd
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `fluentd` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep fluentd
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep fluentd /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep fluentd
+```
+
+
diff --git a/src/go/plugin/go.d/modules/fluentd/metadata.yaml b/src/go/plugin/go.d/modules/fluentd/metadata.yaml
new file mode 100644
index 000000000..0a6a66058
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/metadata.yaml
@@ -0,0 +1,192 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-fluentd
+ plugin_name: go.d.plugin
+ module_name: fluentd
+ monitored_instance:
+ name: Fluentd
+ link: https://www.fluentd.org/
+ icon_filename: fluentd.svg
+ categories:
+ - data-collection.logs-servers
+ keywords:
+ - fluentd
+ - logging
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Fluentd servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable monitor agent
+ description: |
+ To enable monitor agent, follow the [official documentation](https://docs.fluentd.org/v1.0/articles/monitoring-rest-api).
+ configuration:
+ file:
+ name: go.d/fluentd.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:24220
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:24220
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:24220
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: Fluentd with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:24220
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:24220
+
+ - name: remote
+ url: http://192.0.2.1:24220
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: fluentd.retry_count
+ description: Plugin Retry Count
+ unit: count
+ chart_type: line
+ dimensions:
+ - name: a dimension per plugin
+ - name: fluentd.buffer_queue_length
+ description: Plugin Buffer Queue Length
+ unit: queue_length
+ chart_type: line
+ dimensions:
+ - name: a dimension per plugin
+ - name: fluentd.buffer_total_queued_size
+ description: Plugin Buffer Total Size
+ unit: queued_size
+ chart_type: line
+ dimensions:
+ - name: a dimension per plugin
diff --git a/src/go/plugin/go.d/modules/fluentd/testdata/config.json b/src/go/plugin/go.d/modules/fluentd/testdata/config.json
new file mode 100644
index 000000000..6477bd57d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "permit_plugin_id": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/fluentd/testdata/config.yaml b/src/go/plugin/go.d/modules/fluentd/testdata/config.yaml
new file mode 100644
index 000000000..0afd42e67
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+permit_plugin_id: "ok"
diff --git a/src/go/plugin/go.d/modules/fluentd/testdata/plugins.json b/src/go/plugin/go.d/modules/fluentd/testdata/plugins.json
new file mode 100644
index 000000000..1fd921f7c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/fluentd/testdata/plugins.json
@@ -0,0 +1,101 @@
+{
+ "plugins": [
+ {
+ "plugin_id": "input_forward",
+ "plugin_category": "input",
+ "type": "forward",
+ "config": {
+ "@type": "forward",
+ "@id": "input_forward"
+ },
+ "output_plugin": false,
+ "retry_count": null
+ },
+ {
+ "plugin_id": "input_http",
+ "plugin_category": "input",
+ "type": "http",
+ "config": {
+ "@type": "http",
+ "@id": "input_http",
+ "port": "8888"
+ },
+ "output_plugin": false,
+ "retry_count": null
+ },
+ {
+ "plugin_id": "input_debug_agent",
+ "plugin_category": "input",
+ "type": "debug_agent",
+ "config": {
+ "@type": "debug_agent",
+ "@id": "input_debug_agent",
+ "bind": "127.0.0.1",
+ "port": "24230"
+ },
+ "output_plugin": false,
+ "retry_count": null
+ },
+ {
+ "plugin_id": "object:3f7e4d08e3e0",
+ "plugin_category": "input",
+ "type": "monitor_agent",
+ "config": {
+ "@type": "monitor_agent",
+ "bind": "0.0.0.0",
+ "port": "24220"
+ },
+ "output_plugin": false,
+ "retry_count": null
+ },
+ {
+ "plugin_id": "output_td",
+ "plugin_category": "output",
+ "type": "tdlog",
+ "config": {
+ "@type": "tdlog",
+ "@id": "output_td",
+ "apikey": "xxxxxx",
+ "auto_create_table": ""
+ },
+ "output_plugin": true,
+ "buffer_queue_length": 0,
+ "buffer_total_queued_size": 0,
+ "retry_count": 0,
+ "retry": {}
+ },
+ {
+ "plugin_id": "output_stdout",
+ "plugin_category": "output",
+ "type": "stdout",
+ "config": {
+ "@type": "stdout",
+ "@id": "output_stdout"
+ },
+ "output_plugin": true,
+ "retry_count": 0,
+ "retry": {}
+ },
+ {
+ "plugin_id": "object:3f7e4b836770",
+ "plugin_category": "filter",
+ "type": "grep",
+ "config": {
+ "@type": "grep",
+ "regexp1": "message cool"
+ },
+ "output_plugin": false,
+ "retry_count": null
+ },
+ {
+ "plugin_id": "object:3f7e4bbe5a38",
+ "plugin_category": "filter",
+ "type": "record_transformer",
+ "config": {
+ "@type": "record_transformer"
+ },
+ "output_plugin": false,
+ "retry_count": null
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/freeradius/README.md b/src/go/plugin/go.d/modules/freeradius/README.md
new file mode 120000
index 000000000..66deefdb7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/README.md
@@ -0,0 +1 @@
+integrations/freeradius.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/freeradius/api/client.go b/src/go/plugin/go.d/modules/freeradius/api/client.go
new file mode 100644
index 000000000..01f784c17
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/api/client.go
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package api
+
+import (
+ "context"
+ "crypto/hmac"
+ "crypto/md5"
+ "fmt"
+ "net"
+ "strconv"
+ "time"
+
+ "layeh.com/radius"
+ "layeh.com/radius/rfc2869"
+)
+
+type Status struct {
+ AccessRequests int64 `stm:"access-requests"`
+ AccessAccepts int64 `stm:"access-accepts"`
+ AccessRejects int64 `stm:"access-rejects"`
+ AccessChallenges int64 `stm:"access-challenges"`
+ AuthResponses int64 `stm:"auth-responses"`
+ AuthDuplicateRequests int64 `stm:"auth-duplicate-requests"`
+ AuthMalformedRequests int64 `stm:"auth-malformed-requests"`
+ AuthInvalidRequests int64 `stm:"auth-invalid-requests"`
+ AuthDroppedRequests int64 `stm:"auth-dropped-requests"`
+ AuthUnknownTypes int64 `stm:"auth-unknown-types"`
+
+ AccountingRequests int64 `stm:"accounting-requests"`
+ AccountingResponses int64 `stm:"accounting-responses"`
+ AcctDuplicateRequests int64 `stm:"acct-duplicate-requests"`
+ AcctMalformedRequests int64 `stm:"acct-malformed-requests"`
+ AcctInvalidRequests int64 `stm:"acct-invalid-requests"`
+ AcctDroppedRequests int64 `stm:"acct-dropped-requests"`
+ AcctUnknownTypes int64 `stm:"acct-unknown-types"`
+
+ ProxyAccessRequests int64 `stm:"proxy-access-requests"`
+ ProxyAccessAccepts int64 `stm:"proxy-access-accepts"`
+ ProxyAccessRejects int64 `stm:"proxy-access-rejects"`
+ ProxyAccessChallenges int64 `stm:"proxy-access-challenges"`
+ ProxyAuthResponses int64 `stm:"proxy-auth-responses"`
+ ProxyAuthDuplicateRequests int64 `stm:"proxy-auth-duplicate-requests"`
+ ProxyAuthMalformedRequests int64 `stm:"proxy-auth-malformed-requests"`
+ ProxyAuthInvalidRequests int64 `stm:"proxy-auth-invalid-requests"`
+ ProxyAuthDroppedRequests int64 `stm:"proxy-auth-dropped-requests"`
+ ProxyAuthUnknownTypes int64 `stm:"proxy-auth-unknown-types"`
+
+ ProxyAccountingRequests int64 `stm:"proxy-accounting-requests"`
+ ProxyAccountingResponses int64 `stm:"proxy-accounting-responses"`
+ ProxyAcctDuplicateRequests int64 `stm:"proxy-acct-duplicate-requests"`
+ ProxyAcctMalformedRequests int64 `stm:"proxy-acct-malformed-requests"`
+ ProxyAcctInvalidRequests int64 `stm:"proxy-acct-invalid-requests"`
+ ProxyAcctDroppedRequests int64 `stm:"proxy-acct-dropped-requests"`
+ ProxyAcctUnknownTypes int64 `stm:"proxy-acct-unknown-types"`
+}
+
+type (
+ radiusClient interface {
+ Exchange(ctx context.Context, packet *radius.Packet, address string) (*radius.Packet, error)
+ }
+ Config struct {
+ Address string
+ Port int
+ Secret string
+ Timeout time.Duration
+ }
+ Client struct {
+ address string
+ secret string
+ timeout time.Duration
+ radiusClient
+ }
+)
+
+func New(conf Config) *Client {
+ return &Client{
+ address: net.JoinHostPort(conf.Address, strconv.Itoa(conf.Port)),
+ secret: conf.Secret,
+ timeout: conf.Timeout,
+ radiusClient: &radius.Client{Retry: time.Second, MaxPacketErrors: 10},
+ }
+}
+
+func (c Client) Status() (*Status, error) {
+ packet, err := newStatusServerPacket(c.secret)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating StatusServer packet: %v", err)
+ }
+
+ resp, err := c.queryServer(packet)
+ if err != nil {
+ return nil, fmt.Errorf("error on request to '%s': %v", c.address, err)
+ }
+
+ return decodeResponse(resp), nil
+}
+
+func (c Client) queryServer(packet *radius.Packet) (*radius.Packet, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ resp, err := c.Exchange(ctx, packet, c.address)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.Code != radius.CodeAccessAccept {
+ return nil, fmt.Errorf("'%s' returned response code %d", c.address, resp.Code)
+ }
+ return resp, nil
+}
+
+func newStatusServerPacket(secret string) (*radius.Packet, error) {
+ // https://wiki.freeradius.org/config/Status#status-of-freeradius-server
+ packet := radius.New(radius.CodeStatusServer, []byte(secret))
+ if err := FreeRADIUSStatisticsType_Set(packet, FreeRADIUSStatisticsType_Value_All); err != nil {
+ return nil, err
+ }
+ if err := rfc2869.MessageAuthenticator_Set(packet, make([]byte, 16)); err != nil {
+ return nil, err
+ }
+ hash := hmac.New(md5.New, packet.Secret)
+ encode, err := packet.Encode()
+ if err != nil {
+ return nil, err
+ }
+ if _, err := hash.Write(encode); err != nil {
+ return nil, err
+ }
+ if err := rfc2869.MessageAuthenticator_Set(packet, hash.Sum(nil)); err != nil {
+ return nil, err
+ }
+ return packet, nil
+}
+
+func decodeResponse(resp *radius.Packet) *Status {
+ return &Status{
+ AccessRequests: int64(FreeRADIUSTotalAccessRequests_Get(resp)),
+ AccessAccepts: int64(FreeRADIUSTotalAccessAccepts_Get(resp)),
+ AccessRejects: int64(FreeRADIUSTotalAccessRejects_Get(resp)),
+ AccessChallenges: int64(FreeRADIUSTotalAccessChallenges_Get(resp)),
+ AuthResponses: int64(FreeRADIUSTotalAuthResponses_Get(resp)),
+ AuthDuplicateRequests: int64(FreeRADIUSTotalAuthDuplicateRequests_Get(resp)),
+ AuthMalformedRequests: int64(FreeRADIUSTotalAuthMalformedRequests_Get(resp)),
+ AuthInvalidRequests: int64(FreeRADIUSTotalAuthInvalidRequests_Get(resp)),
+ AuthDroppedRequests: int64(FreeRADIUSTotalAuthDroppedRequests_Get(resp)),
+ AuthUnknownTypes: int64(FreeRADIUSTotalAuthUnknownTypes_Get(resp)),
+ AccountingRequests: int64(FreeRADIUSTotalAccountingRequests_Get(resp)),
+ AccountingResponses: int64(FreeRADIUSTotalAccountingResponses_Get(resp)),
+ AcctDuplicateRequests: int64(FreeRADIUSTotalAcctDuplicateRequests_Get(resp)),
+ AcctMalformedRequests: int64(FreeRADIUSTotalAcctMalformedRequests_Get(resp)),
+ AcctInvalidRequests: int64(FreeRADIUSTotalAcctInvalidRequests_Get(resp)),
+ AcctDroppedRequests: int64(FreeRADIUSTotalAcctDroppedRequests_Get(resp)),
+ AcctUnknownTypes: int64(FreeRADIUSTotalAcctUnknownTypes_Get(resp)),
+ ProxyAccessRequests: int64(FreeRADIUSTotalProxyAccessRequests_Get(resp)),
+ ProxyAccessAccepts: int64(FreeRADIUSTotalProxyAccessAccepts_Get(resp)),
+ ProxyAccessRejects: int64(FreeRADIUSTotalProxyAccessRejects_Get(resp)),
+ ProxyAccessChallenges: int64(FreeRADIUSTotalProxyAccessChallenges_Get(resp)),
+ ProxyAuthResponses: int64(FreeRADIUSTotalProxyAuthResponses_Get(resp)),
+ ProxyAuthDuplicateRequests: int64(FreeRADIUSTotalProxyAuthDuplicateRequests_Get(resp)),
+ ProxyAuthMalformedRequests: int64(FreeRADIUSTotalProxyAuthMalformedRequests_Get(resp)),
+ ProxyAuthInvalidRequests: int64(FreeRADIUSTotalProxyAuthInvalidRequests_Get(resp)),
+ ProxyAuthDroppedRequests: int64(FreeRADIUSTotalProxyAuthDroppedRequests_Get(resp)),
+ ProxyAuthUnknownTypes: int64(FreeRADIUSTotalProxyAuthUnknownTypes_Get(resp)),
+ ProxyAccountingRequests: int64(FreeRADIUSTotalProxyAccountingRequests_Get(resp)),
+ ProxyAccountingResponses: int64(FreeRADIUSTotalProxyAccountingResponses_Get(resp)),
+ ProxyAcctDuplicateRequests: int64(FreeRADIUSTotalProxyAcctDuplicateRequests_Get(resp)),
+ ProxyAcctMalformedRequests: int64(FreeRADIUSTotalProxyAcctMalformedRequests_Get(resp)),
+ ProxyAcctInvalidRequests: int64(FreeRADIUSTotalProxyAcctInvalidRequests_Get(resp)),
+ ProxyAcctDroppedRequests: int64(FreeRADIUSTotalProxyAcctDroppedRequests_Get(resp)),
+ ProxyAcctUnknownTypes: int64(FreeRADIUSTotalProxyAcctUnknownTypes_Get(resp)),
+ }
+}
diff --git a/src/go/plugin/go.d/modules/freeradius/api/client_test.go b/src/go/plugin/go.d/modules/freeradius/api/client_test.go
new file mode 100644
index 000000000..9323aa992
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/api/client_test.go
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package api
+
+import (
+ "context"
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "layeh.com/radius"
+)
+
+func TestNew(t *testing.T) {
+ assert.NotNil(t, New(Config{}))
+}
+
+func TestClient_Status(t *testing.T) {
+ var c Client
+ c.radiusClient = newOKMockFreeRADIUSClient()
+
+ expected := Status{
+ AccessRequests: 1,
+ AccessAccepts: 2,
+ AccessRejects: 3,
+ AccessChallenges: 4,
+ AuthResponses: 5,
+ AuthDuplicateRequests: 6,
+ AuthMalformedRequests: 7,
+ AuthInvalidRequests: 8,
+ AuthDroppedRequests: 9,
+ AuthUnknownTypes: 10,
+ AccountingRequests: 11,
+ AccountingResponses: 12,
+ AcctDuplicateRequests: 13,
+ AcctMalformedRequests: 14,
+ AcctInvalidRequests: 15,
+ AcctDroppedRequests: 16,
+ AcctUnknownTypes: 17,
+ ProxyAccessRequests: 18,
+ ProxyAccessAccepts: 19,
+ ProxyAccessRejects: 20,
+ ProxyAccessChallenges: 21,
+ ProxyAuthResponses: 22,
+ ProxyAuthDuplicateRequests: 23,
+ ProxyAuthMalformedRequests: 24,
+ ProxyAuthInvalidRequests: 25,
+ ProxyAuthDroppedRequests: 26,
+ ProxyAuthUnknownTypes: 27,
+ ProxyAccountingRequests: 28,
+ ProxyAccountingResponses: 29,
+ ProxyAcctDuplicateRequests: 30,
+ ProxyAcctMalformedRequests: 31,
+ ProxyAcctInvalidRequests: 32,
+ ProxyAcctDroppedRequests: 33,
+ ProxyAcctUnknownTypes: 34,
+ }
+
+ s, err := c.Status()
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, *s)
+}
+
+func TestClient_Status_ReturnsErrorIfClientExchangeReturnsError(t *testing.T) {
+ var c Client
+ c.radiusClient = newErrorMockFreeRADIUSClient()
+
+ s, err := c.Status()
+
+ assert.Nil(t, s)
+ assert.Error(t, err)
+}
+
+func TestClient_Status_ReturnsErrorIfServerResponseHasBadStatus(t *testing.T) {
+ var c Client
+ c.radiusClient = newBadRespCodeMockFreeRADIUSClient()
+
+ s, err := c.Status()
+
+ assert.Nil(t, s)
+ assert.Error(t, err)
+}
+
+type mockFreeRADIUSClient struct {
+ errOnExchange bool
+ badRespCode bool
+}
+
+func newOKMockFreeRADIUSClient() *mockFreeRADIUSClient {
+ return &mockFreeRADIUSClient{}
+}
+
+func newErrorMockFreeRADIUSClient() *mockFreeRADIUSClient {
+ return &mockFreeRADIUSClient{errOnExchange: true}
+}
+
+func newBadRespCodeMockFreeRADIUSClient() *mockFreeRADIUSClient {
+ return &mockFreeRADIUSClient{badRespCode: true}
+}
+
+func (m mockFreeRADIUSClient) Exchange(_ context.Context, _ *radius.Packet, _ string) (*radius.Packet, error) {
+ if m.errOnExchange {
+ return nil, errors.New("mock Exchange error")
+ }
+ resp := radius.New(radius.CodeAccessAccept, []byte("secret"))
+ if m.badRespCode {
+ resp.Code = radius.CodeAccessReject
+ } else {
+ resp.Code = radius.CodeAccessAccept
+ }
+ addValues(resp)
+ return resp, nil
+}
+
+func addValues(resp *radius.Packet) {
+ _ = FreeRADIUSTotalAccessRequests_Add(resp, 1)
+ _ = FreeRADIUSTotalAccessAccepts_Add(resp, 2)
+ _ = FreeRADIUSTotalAccessRejects_Add(resp, 3)
+ _ = FreeRADIUSTotalAccessChallenges_Add(resp, 4)
+ _ = FreeRADIUSTotalAuthResponses_Add(resp, 5)
+ _ = FreeRADIUSTotalAuthDuplicateRequests_Add(resp, 6)
+ _ = FreeRADIUSTotalAuthMalformedRequests_Add(resp, 7)
+ _ = FreeRADIUSTotalAuthInvalidRequests_Add(resp, 8)
+ _ = FreeRADIUSTotalAuthDroppedRequests_Add(resp, 9)
+ _ = FreeRADIUSTotalAuthUnknownTypes_Add(resp, 10)
+ _ = FreeRADIUSTotalAccountingRequests_Add(resp, 11)
+ _ = FreeRADIUSTotalAccountingResponses_Add(resp, 12)
+ _ = FreeRADIUSTotalAcctDuplicateRequests_Add(resp, 13)
+ _ = FreeRADIUSTotalAcctMalformedRequests_Add(resp, 14)
+ _ = FreeRADIUSTotalAcctInvalidRequests_Add(resp, 15)
+ _ = FreeRADIUSTotalAcctDroppedRequests_Add(resp, 16)
+ _ = FreeRADIUSTotalAcctUnknownTypes_Add(resp, 17)
+ _ = FreeRADIUSTotalProxyAccessRequests_Add(resp, 18)
+ _ = FreeRADIUSTotalProxyAccessAccepts_Add(resp, 19)
+ _ = FreeRADIUSTotalProxyAccessRejects_Add(resp, 20)
+ _ = FreeRADIUSTotalProxyAccessChallenges_Add(resp, 21)
+ _ = FreeRADIUSTotalProxyAuthResponses_Add(resp, 22)
+ _ = FreeRADIUSTotalProxyAuthDuplicateRequests_Add(resp, 23)
+ _ = FreeRADIUSTotalProxyAuthMalformedRequests_Add(resp, 24)
+ _ = FreeRADIUSTotalProxyAuthInvalidRequests_Add(resp, 25)
+ _ = FreeRADIUSTotalProxyAuthDroppedRequests_Add(resp, 26)
+ _ = FreeRADIUSTotalProxyAuthUnknownTypes_Add(resp, 27)
+ _ = FreeRADIUSTotalProxyAccountingRequests_Add(resp, 28)
+ _ = FreeRADIUSTotalProxyAccountingResponses_Add(resp, 29)
+ _ = FreeRADIUSTotalProxyAcctDuplicateRequests_Add(resp, 30)
+ _ = FreeRADIUSTotalProxyAcctMalformedRequests_Add(resp, 31)
+ _ = FreeRADIUSTotalProxyAcctInvalidRequests_Add(resp, 32)
+ _ = FreeRADIUSTotalProxyAcctDroppedRequests_Add(resp, 33)
+ _ = FreeRADIUSTotalProxyAcctUnknownTypes_Add(resp, 34)
+}
diff --git a/src/go/plugin/go.d/modules/freeradius/api/dictionary.go b/src/go/plugin/go.d/modules/freeradius/api/dictionary.go
new file mode 100644
index 000000000..0ed348ae3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/api/dictionary.go
@@ -0,0 +1,2683 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package api
+
+import (
+ "strconv"
+ "time"
+
+ "layeh.com/radius"
+ "layeh.com/radius/rfc2865"
+)
+
+/*
+The response from the freeradius 3.0.12+dfsg-5+deb9u1
+
+Sent Status-Server Id 23 from 0.0.0.0:37131 to 127.0.0.1:18121 length 50
+ Message-Authenticator = 0x00
+ FreeRADIUS-Statistics-Type = All
+Received Access-Accept Id 23 from 127.0.0.1:18121 to 0.0.0.0:0 length 536
+ FreeRADIUS-Total-Access-Requests = 3
+ FreeRADIUS-Total-Access-Accepts = 0
+ FreeRADIUS-Total-Access-Rejects = 0
+ FreeRADIUS-Total-Access-Challenges = 0
+ FreeRADIUS-Total-Auth-Responses = 0
+ FreeRADIUS-Total-Auth-Duplicate-Requests = 0
+ FreeRADIUS-Total-Auth-Malformed-Requests = 0
+ FreeRADIUS-Total-Auth-Invalid-Requests = 0
+ FreeRADIUS-Total-Auth-Dropped-Requests = 0
+ FreeRADIUS-Total-Auth-Unknown-Types = 0
+ FreeRADIUS-Total-Accounting-Requests = 0
+ FreeRADIUS-Total-Accounting-Responses = 0
+ FreeRADIUS-Total-Acct-Duplicate-Requests = 0
+ FreeRADIUS-Total-Acct-Malformed-Requests = 0
+ FreeRADIUS-Total-Acct-Invalid-Requests = 0
+ FreeRADIUS-Total-Acct-Dropped-Requests = 0
+ FreeRADIUS-Total-Acct-Unknown-Types = 0
+ FreeRADIUS-Total-Proxy-Access-Requests = 0
+ FreeRADIUS-Total-Proxy-Access-Accepts = 0
+ FreeRADIUS-Total-Proxy-Access-Rejects = 0
+ FreeRADIUS-Total-Proxy-Access-Challenges = 0
+ FreeRADIUS-Total-Proxy-Auth-Responses = 0
+ FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = 0
+ FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = 0
+ FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = 0
+ FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = 0
+ FreeRADIUS-Total-Proxy-Auth-Unknown-Types = 0
+ FreeRADIUS-Total-Proxy-Accounting-Requests = 0
+ FreeRADIUS-Total-Proxy-Accounting-Responses = 0
+ FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = 0
+ FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = 0
+ FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = 0
+ FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = 0
+ FreeRADIUS-Total-Proxy-Acct-Unknown-Types = 0
+ FreeRADIUS-Stats-Start-Time = "Dec 27 2018 05:08:02 +09"
+ FreeRADIUS-Stats-HUP-Time = "Dec 27 2018 05:08:02 +09"
+ FreeRADIUS-Queue-Len-Internal = 0
+ FreeRADIUS-Queue-Len-Proxy = 0
+ FreeRADIUS-Queue-Len-Auth = 0
+ FreeRADIUS-Queue-Len-Acct = 0
+ FreeRADIUS-Queue-Len-Detail = 0
+ FreeRADIUS-Queue-PPS-In = 0
+ FreeRADIUS-Queue-PPS-Out = 0
+*/
+
+// Code generation tool https://github.com/layeh/radius/tree/master/cmd/radius-dict-gen.
+// Used dictionary: dictionary.freeradius from /usr/share/freeradius/ directory (freeradius 3.0.12+dfsg-5+deb9u1)
+// All attributes that are not in response were removed to reduce the amount of generated code.
+
+// Code generated by radius-dict-gen. DO NOT EDIT.
+
+const (
+ _FreeRADIUS_VendorID = 11344
+)
+
+func _FreeRADIUS_AddVendor(p *radius.Packet, typ byte, attr radius.Attribute) (err error) {
+ var vsa radius.Attribute
+ vendor := make(radius.Attribute, 2+len(attr))
+ vendor[0] = typ
+ vendor[1] = byte(len(vendor))
+ copy(vendor[2:], attr)
+ vsa, err = radius.NewVendorSpecific(_FreeRADIUS_VendorID, vendor)
+ if err != nil {
+ return
+ }
+ p.Add(rfc2865.VendorSpecific_Type, vsa)
+ return
+}
+
+func _FreeRADIUS_GetsVendor(p *radius.Packet, typ byte) (values []radius.Attribute) {
+ for _, attr := range p.Attributes[rfc2865.VendorSpecific_Type] {
+ vendorID, vsa, err := radius.VendorSpecific(attr)
+ if err != nil || vendorID != _FreeRADIUS_VendorID {
+ continue
+ }
+ for len(vsa) >= 3 {
+ vsaTyp, vsaLen := vsa[0], vsa[1]
+ if int(vsaLen) > len(vsa) || vsaLen < 3 {
+ break
+ }
+ if vsaTyp == typ {
+ values = append(values, vsa[2:int(vsaLen)])
+ }
+ vsa = vsa[int(vsaLen):]
+ }
+ }
+ return
+}
+
+func _FreeRADIUS_LookupVendor(p *radius.Packet, typ byte) (attr radius.Attribute, ok bool) {
+ for _, a := range p.Attributes[rfc2865.VendorSpecific_Type] {
+ vendorID, vsa, err := radius.VendorSpecific(a)
+ if err != nil || vendorID != _FreeRADIUS_VendorID {
+ continue
+ }
+ for len(vsa) >= 3 {
+ vsaTyp, vsaLen := vsa[0], vsa[1]
+ if int(vsaLen) > len(vsa) || vsaLen < 3 {
+ break
+ }
+ if vsaTyp == typ {
+ return vsa[2:int(vsaLen)], true
+ }
+ vsa = vsa[int(vsaLen):]
+ }
+ }
+ return
+}
+
+func _FreeRADIUS_SetVendor(p *radius.Packet, typ byte, attr radius.Attribute) (err error) {
+ for i := 0; i < len(p.Attributes[rfc2865.VendorSpecific_Type]); {
+ vendorID, vsa, err := radius.VendorSpecific(p.Attributes[rfc2865.VendorSpecific_Type][i])
+ if err != nil || vendorID != _FreeRADIUS_VendorID {
+ i++
+ continue
+ }
+ for j := 0; len(vsa[j:]) >= 3; {
+ vsaTyp, vsaLen := vsa[0], vsa[1]
+ if int(vsaLen) > len(vsa[j:]) || vsaLen < 3 {
+ i++
+ break
+ }
+ if vsaTyp == typ {
+ vsa = append(vsa[:j], vsa[j+int(vsaLen):]...)
+ }
+ j += int(vsaLen)
+ }
+ if len(vsa) > 0 {
+ copy(p.Attributes[rfc2865.VendorSpecific_Type][i][4:], vsa)
+ i++
+ } else {
+ p.Attributes[rfc2865.VendorSpecific_Type] = append(p.Attributes[rfc2865.VendorSpecific_Type][:i], p.Attributes[rfc2865.VendorSpecific_Type][i+i:]...)
+ }
+ }
+ return _FreeRADIUS_AddVendor(p, typ, attr)
+}
+
+func _FreeRADIUS_DelVendor(p *radius.Packet, typ byte) {
+vsaLoop:
+ for i := 0; i < len(p.Attributes[rfc2865.VendorSpecific_Type]); {
+ attr := p.Attributes[rfc2865.VendorSpecific_Type][i]
+ vendorID, vsa, err := radius.VendorSpecific(attr)
+ if err != nil || vendorID != _FreeRADIUS_VendorID {
+ continue
+ }
+ offset := 0
+ for len(vsa[offset:]) >= 3 {
+ vsaTyp, vsaLen := vsa[offset], vsa[offset+1]
+ if int(vsaLen) > len(vsa) || vsaLen < 3 {
+ continue vsaLoop
+ }
+ if vsaTyp == typ {
+ copy(vsa[offset:], vsa[offset+int(vsaLen):])
+ vsa = vsa[:len(vsa)-int(vsaLen)]
+ } else {
+ offset += int(vsaLen)
+ }
+ }
+ if offset == 0 {
+ p.Attributes[rfc2865.VendorSpecific_Type] = append(p.Attributes[rfc2865.VendorSpecific_Type][:i], p.Attributes[rfc2865.VendorSpecific_Type][i+1:]...)
+ } else {
+ i++
+ }
+ }
+ return
+}
+
+type FreeRADIUSStatisticsType uint32
+
+const (
+ FreeRADIUSStatisticsType_Value_All FreeRADIUSStatisticsType = 31
+)
+
+var FreeRADIUSStatisticsType_Strings = map[FreeRADIUSStatisticsType]string{
+ FreeRADIUSStatisticsType_Value_All: "All",
+}
+
+func (a FreeRADIUSStatisticsType) String() string {
+ if str, ok := FreeRADIUSStatisticsType_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSStatisticsType(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSStatisticsType_Add(p *radius.Packet, value FreeRADIUSStatisticsType) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 127, a)
+}
+
+func FreeRADIUSStatisticsType_Get(p *radius.Packet) (value FreeRADIUSStatisticsType) {
+ value, _ = FreeRADIUSStatisticsType_Lookup(p)
+ return
+}
+
+func FreeRADIUSStatisticsType_Gets(p *radius.Packet) (values []FreeRADIUSStatisticsType, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 127) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSStatisticsType(i))
+ }
+ return
+}
+
+func FreeRADIUSStatisticsType_Lookup(p *radius.Packet) (value FreeRADIUSStatisticsType, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 127)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSStatisticsType(i)
+ return
+}
+
+func FreeRADIUSStatisticsType_Set(p *radius.Packet, value FreeRADIUSStatisticsType) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 127, a)
+}
+
+func FreeRADIUSStatisticsType_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 127)
+}
+
+type FreeRADIUSTotalAccessRequests uint32
+
+var FreeRADIUSTotalAccessRequests_Strings = map[FreeRADIUSTotalAccessRequests]string{}
+
+func (a FreeRADIUSTotalAccessRequests) String() string {
+ if str, ok := FreeRADIUSTotalAccessRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAccessRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAccessRequests_Add(p *radius.Packet, value FreeRADIUSTotalAccessRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 128, a)
+}
+
+func FreeRADIUSTotalAccessRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAccessRequests) {
+ value, _ = FreeRADIUSTotalAccessRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAccessRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccessRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 128) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAccessRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAccessRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccessRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 128)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAccessRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAccessRequests_Set(p *radius.Packet, value FreeRADIUSTotalAccessRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 128, a)
+}
+
+func FreeRADIUSTotalAccessRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 128)
+}
+
+type FreeRADIUSTotalAccessAccepts uint32
+
+var FreeRADIUSTotalAccessAccepts_Strings = map[FreeRADIUSTotalAccessAccepts]string{}
+
+func (a FreeRADIUSTotalAccessAccepts) String() string {
+ if str, ok := FreeRADIUSTotalAccessAccepts_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAccessAccepts(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAccessAccepts_Add(p *radius.Packet, value FreeRADIUSTotalAccessAccepts) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 129, a)
+}
+
+func FreeRADIUSTotalAccessAccepts_Get(p *radius.Packet) (value FreeRADIUSTotalAccessAccepts) {
+ value, _ = FreeRADIUSTotalAccessAccepts_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAccessAccepts_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccessAccepts, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 129) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAccessAccepts(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAccessAccepts_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccessAccepts, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 129)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAccessAccepts(i)
+ return
+}
+
+func FreeRADIUSTotalAccessAccepts_Set(p *radius.Packet, value FreeRADIUSTotalAccessAccepts) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 129, a)
+}
+
+func FreeRADIUSTotalAccessAccepts_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 129)
+}
+
+type FreeRADIUSTotalAccessRejects uint32
+
+var FreeRADIUSTotalAccessRejects_Strings = map[FreeRADIUSTotalAccessRejects]string{}
+
+func (a FreeRADIUSTotalAccessRejects) String() string {
+ if str, ok := FreeRADIUSTotalAccessRejects_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAccessRejects(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAccessRejects_Add(p *radius.Packet, value FreeRADIUSTotalAccessRejects) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 130, a)
+}
+
+func FreeRADIUSTotalAccessRejects_Get(p *radius.Packet) (value FreeRADIUSTotalAccessRejects) {
+ value, _ = FreeRADIUSTotalAccessRejects_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAccessRejects_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccessRejects, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 130) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAccessRejects(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAccessRejects_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccessRejects, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 130)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAccessRejects(i)
+ return
+}
+
+func FreeRADIUSTotalAccessRejects_Set(p *radius.Packet, value FreeRADIUSTotalAccessRejects) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 130, a)
+}
+
+func FreeRADIUSTotalAccessRejects_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 130)
+}
+
+type FreeRADIUSTotalAccessChallenges uint32
+
+var FreeRADIUSTotalAccessChallenges_Strings = map[FreeRADIUSTotalAccessChallenges]string{}
+
+func (a FreeRADIUSTotalAccessChallenges) String() string {
+ if str, ok := FreeRADIUSTotalAccessChallenges_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAccessChallenges(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAccessChallenges_Add(p *radius.Packet, value FreeRADIUSTotalAccessChallenges) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 131, a)
+}
+
+func FreeRADIUSTotalAccessChallenges_Get(p *radius.Packet) (value FreeRADIUSTotalAccessChallenges) {
+ value, _ = FreeRADIUSTotalAccessChallenges_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAccessChallenges_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccessChallenges, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 131) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAccessChallenges(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAccessChallenges_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccessChallenges, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 131)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAccessChallenges(i)
+ return
+}
+
+func FreeRADIUSTotalAccessChallenges_Set(p *radius.Packet, value FreeRADIUSTotalAccessChallenges) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 131, a)
+}
+
+func FreeRADIUSTotalAccessChallenges_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 131)
+}
+
+type FreeRADIUSTotalAuthResponses uint32
+
+var FreeRADIUSTotalAuthResponses_Strings = map[FreeRADIUSTotalAuthResponses]string{}
+
+func (a FreeRADIUSTotalAuthResponses) String() string {
+ if str, ok := FreeRADIUSTotalAuthResponses_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAuthResponses(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAuthResponses_Add(p *radius.Packet, value FreeRADIUSTotalAuthResponses) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 132, a)
+}
+
+func FreeRADIUSTotalAuthResponses_Get(p *radius.Packet) (value FreeRADIUSTotalAuthResponses) {
+ value, _ = FreeRADIUSTotalAuthResponses_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAuthResponses_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthResponses, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 132) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAuthResponses(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAuthResponses_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthResponses, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 132)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAuthResponses(i)
+ return
+}
+
+func FreeRADIUSTotalAuthResponses_Set(p *radius.Packet, value FreeRADIUSTotalAuthResponses) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 132, a)
+}
+
+func FreeRADIUSTotalAuthResponses_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 132)
+}
+
+type FreeRADIUSTotalAuthDuplicateRequests uint32
+
+var FreeRADIUSTotalAuthDuplicateRequests_Strings = map[FreeRADIUSTotalAuthDuplicateRequests]string{}
+
+func (a FreeRADIUSTotalAuthDuplicateRequests) String() string {
+ if str, ok := FreeRADIUSTotalAuthDuplicateRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAuthDuplicateRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAuthDuplicateRequests_Add(p *radius.Packet, value FreeRADIUSTotalAuthDuplicateRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 133, a)
+}
+
+func FreeRADIUSTotalAuthDuplicateRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAuthDuplicateRequests) {
+ value, _ = FreeRADIUSTotalAuthDuplicateRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAuthDuplicateRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthDuplicateRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 133) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAuthDuplicateRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAuthDuplicateRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthDuplicateRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 133)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAuthDuplicateRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAuthDuplicateRequests_Set(p *radius.Packet, value FreeRADIUSTotalAuthDuplicateRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 133, a)
+}
+
+func FreeRADIUSTotalAuthDuplicateRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 133)
+}
+
+type FreeRADIUSTotalAuthMalformedRequests uint32
+
+var FreeRADIUSTotalAuthMalformedRequests_Strings = map[FreeRADIUSTotalAuthMalformedRequests]string{}
+
+func (a FreeRADIUSTotalAuthMalformedRequests) String() string {
+ if str, ok := FreeRADIUSTotalAuthMalformedRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAuthMalformedRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAuthMalformedRequests_Add(p *radius.Packet, value FreeRADIUSTotalAuthMalformedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 134, a)
+}
+
+func FreeRADIUSTotalAuthMalformedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAuthMalformedRequests) {
+ value, _ = FreeRADIUSTotalAuthMalformedRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAuthMalformedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthMalformedRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 134) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAuthMalformedRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAuthMalformedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthMalformedRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 134)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAuthMalformedRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAuthMalformedRequests_Set(p *radius.Packet, value FreeRADIUSTotalAuthMalformedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 134, a)
+}
+
+func FreeRADIUSTotalAuthMalformedRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 134)
+}
+
+type FreeRADIUSTotalAuthInvalidRequests uint32
+
+var FreeRADIUSTotalAuthInvalidRequests_Strings = map[FreeRADIUSTotalAuthInvalidRequests]string{}
+
+func (a FreeRADIUSTotalAuthInvalidRequests) String() string {
+ if str, ok := FreeRADIUSTotalAuthInvalidRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAuthInvalidRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAuthInvalidRequests_Add(p *radius.Packet, value FreeRADIUSTotalAuthInvalidRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 135, a)
+}
+
+func FreeRADIUSTotalAuthInvalidRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAuthInvalidRequests) {
+ value, _ = FreeRADIUSTotalAuthInvalidRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAuthInvalidRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthInvalidRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 135) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAuthInvalidRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAuthInvalidRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthInvalidRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 135)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAuthInvalidRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAuthInvalidRequests_Set(p *radius.Packet, value FreeRADIUSTotalAuthInvalidRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 135, a)
+}
+
+func FreeRADIUSTotalAuthInvalidRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 135)
+}
+
+type FreeRADIUSTotalAuthDroppedRequests uint32
+
+var FreeRADIUSTotalAuthDroppedRequests_Strings = map[FreeRADIUSTotalAuthDroppedRequests]string{}
+
+func (a FreeRADIUSTotalAuthDroppedRequests) String() string {
+ if str, ok := FreeRADIUSTotalAuthDroppedRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAuthDroppedRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAuthDroppedRequests_Add(p *radius.Packet, value FreeRADIUSTotalAuthDroppedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 136, a)
+}
+
+func FreeRADIUSTotalAuthDroppedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAuthDroppedRequests) {
+ value, _ = FreeRADIUSTotalAuthDroppedRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAuthDroppedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthDroppedRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 136) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAuthDroppedRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAuthDroppedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthDroppedRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 136)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAuthDroppedRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAuthDroppedRequests_Set(p *radius.Packet, value FreeRADIUSTotalAuthDroppedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 136, a)
+}
+
+func FreeRADIUSTotalAuthDroppedRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 136)
+}
+
+type FreeRADIUSTotalAuthUnknownTypes uint32
+
+var FreeRADIUSTotalAuthUnknownTypes_Strings = map[FreeRADIUSTotalAuthUnknownTypes]string{}
+
+func (a FreeRADIUSTotalAuthUnknownTypes) String() string {
+ if str, ok := FreeRADIUSTotalAuthUnknownTypes_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAuthUnknownTypes(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAuthUnknownTypes_Add(p *radius.Packet, value FreeRADIUSTotalAuthUnknownTypes) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 137, a)
+}
+
+func FreeRADIUSTotalAuthUnknownTypes_Get(p *radius.Packet) (value FreeRADIUSTotalAuthUnknownTypes) {
+ value, _ = FreeRADIUSTotalAuthUnknownTypes_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAuthUnknownTypes_Gets(p *radius.Packet) (values []FreeRADIUSTotalAuthUnknownTypes, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 137) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAuthUnknownTypes(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAuthUnknownTypes_Lookup(p *radius.Packet) (value FreeRADIUSTotalAuthUnknownTypes, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 137)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAuthUnknownTypes(i)
+ return
+}
+
+func FreeRADIUSTotalAuthUnknownTypes_Set(p *radius.Packet, value FreeRADIUSTotalAuthUnknownTypes) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 137, a)
+}
+
+func FreeRADIUSTotalAuthUnknownTypes_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 137)
+}
+
+type FreeRADIUSTotalProxyAccessRequests uint32
+
+var FreeRADIUSTotalProxyAccessRequests_Strings = map[FreeRADIUSTotalProxyAccessRequests]string{}
+
+func (a FreeRADIUSTotalProxyAccessRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAccessRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAccessRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAccessRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccessRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 138, a)
+}
+
+func FreeRADIUSTotalProxyAccessRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccessRequests) {
+ value, _ = FreeRADIUSTotalProxyAccessRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAccessRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccessRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 138) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAccessRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAccessRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccessRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 138)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAccessRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAccessRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccessRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 138, a)
+}
+
+func FreeRADIUSTotalProxyAccessRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 138)
+}
+
+type FreeRADIUSTotalProxyAccessAccepts uint32
+
+var FreeRADIUSTotalProxyAccessAccepts_Strings = map[FreeRADIUSTotalProxyAccessAccepts]string{}
+
+func (a FreeRADIUSTotalProxyAccessAccepts) String() string {
+ if str, ok := FreeRADIUSTotalProxyAccessAccepts_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAccessAccepts(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAccessAccepts_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccessAccepts) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 139, a)
+}
+
+func FreeRADIUSTotalProxyAccessAccepts_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccessAccepts) {
+ value, _ = FreeRADIUSTotalProxyAccessAccepts_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAccessAccepts_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccessAccepts, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 139) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAccessAccepts(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAccessAccepts_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccessAccepts, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 139)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAccessAccepts(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAccessAccepts_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccessAccepts) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 139, a)
+}
+
+func FreeRADIUSTotalProxyAccessAccepts_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 139)
+}
+
+type FreeRADIUSTotalProxyAccessRejects uint32
+
+var FreeRADIUSTotalProxyAccessRejects_Strings = map[FreeRADIUSTotalProxyAccessRejects]string{}
+
+func (a FreeRADIUSTotalProxyAccessRejects) String() string {
+ if str, ok := FreeRADIUSTotalProxyAccessRejects_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAccessRejects(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAccessRejects_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccessRejects) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 140, a)
+}
+
+func FreeRADIUSTotalProxyAccessRejects_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccessRejects) {
+ value, _ = FreeRADIUSTotalProxyAccessRejects_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAccessRejects_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccessRejects, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 140) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAccessRejects(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAccessRejects_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccessRejects, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 140)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAccessRejects(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAccessRejects_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccessRejects) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 140, a)
+}
+
+func FreeRADIUSTotalProxyAccessRejects_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 140)
+}
+
+type FreeRADIUSTotalProxyAccessChallenges uint32
+
+var FreeRADIUSTotalProxyAccessChallenges_Strings = map[FreeRADIUSTotalProxyAccessChallenges]string{}
+
+func (a FreeRADIUSTotalProxyAccessChallenges) String() string {
+ if str, ok := FreeRADIUSTotalProxyAccessChallenges_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAccessChallenges(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAccessChallenges_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccessChallenges) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 141, a)
+}
+
+func FreeRADIUSTotalProxyAccessChallenges_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccessChallenges) {
+ value, _ = FreeRADIUSTotalProxyAccessChallenges_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAccessChallenges_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccessChallenges, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 141) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAccessChallenges(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAccessChallenges_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccessChallenges, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 141)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAccessChallenges(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAccessChallenges_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccessChallenges) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 141, a)
+}
+
+func FreeRADIUSTotalProxyAccessChallenges_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 141)
+}
+
+type FreeRADIUSTotalProxyAuthResponses uint32
+
+var FreeRADIUSTotalProxyAuthResponses_Strings = map[FreeRADIUSTotalProxyAuthResponses]string{}
+
+func (a FreeRADIUSTotalProxyAuthResponses) String() string {
+ if str, ok := FreeRADIUSTotalProxyAuthResponses_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAuthResponses(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAuthResponses_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthResponses) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 142, a)
+}
+
+func FreeRADIUSTotalProxyAuthResponses_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthResponses) {
+ value, _ = FreeRADIUSTotalProxyAuthResponses_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthResponses_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthResponses, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 142) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAuthResponses(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAuthResponses_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthResponses, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 142)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAuthResponses(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthResponses_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthResponses) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 142, a)
+}
+
+func FreeRADIUSTotalProxyAuthResponses_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 142)
+}
+
+type FreeRADIUSTotalProxyAuthDuplicateRequests uint32
+
+var FreeRADIUSTotalProxyAuthDuplicateRequests_Strings = map[FreeRADIUSTotalProxyAuthDuplicateRequests]string{}
+
+func (a FreeRADIUSTotalProxyAuthDuplicateRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAuthDuplicateRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAuthDuplicateRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAuthDuplicateRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthDuplicateRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 143, a)
+}
+
+func FreeRADIUSTotalProxyAuthDuplicateRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthDuplicateRequests) {
+ value, _ = FreeRADIUSTotalProxyAuthDuplicateRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthDuplicateRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthDuplicateRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 143) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAuthDuplicateRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAuthDuplicateRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthDuplicateRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 143)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAuthDuplicateRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthDuplicateRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthDuplicateRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 143, a)
+}
+
+func FreeRADIUSTotalProxyAuthDuplicateRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 143)
+}
+
+type FreeRADIUSTotalProxyAuthMalformedRequests uint32
+
+var FreeRADIUSTotalProxyAuthMalformedRequests_Strings = map[FreeRADIUSTotalProxyAuthMalformedRequests]string{}
+
+func (a FreeRADIUSTotalProxyAuthMalformedRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAuthMalformedRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAuthMalformedRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAuthMalformedRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthMalformedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 144, a)
+}
+
+func FreeRADIUSTotalProxyAuthMalformedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthMalformedRequests) {
+ value, _ = FreeRADIUSTotalProxyAuthMalformedRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthMalformedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthMalformedRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 144) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAuthMalformedRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAuthMalformedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthMalformedRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 144)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAuthMalformedRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthMalformedRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthMalformedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 144, a)
+}
+
+func FreeRADIUSTotalProxyAuthMalformedRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 144)
+}
+
+type FreeRADIUSTotalProxyAuthInvalidRequests uint32
+
+var FreeRADIUSTotalProxyAuthInvalidRequests_Strings = map[FreeRADIUSTotalProxyAuthInvalidRequests]string{}
+
+func (a FreeRADIUSTotalProxyAuthInvalidRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAuthInvalidRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAuthInvalidRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAuthInvalidRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthInvalidRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 145, a)
+}
+
+func FreeRADIUSTotalProxyAuthInvalidRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthInvalidRequests) {
+ value, _ = FreeRADIUSTotalProxyAuthInvalidRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthInvalidRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthInvalidRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 145) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAuthInvalidRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAuthInvalidRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthInvalidRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 145)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAuthInvalidRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthInvalidRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthInvalidRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 145, a)
+}
+
+func FreeRADIUSTotalProxyAuthInvalidRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 145)
+}
+
+type FreeRADIUSTotalProxyAuthDroppedRequests uint32
+
+var FreeRADIUSTotalProxyAuthDroppedRequests_Strings = map[FreeRADIUSTotalProxyAuthDroppedRequests]string{}
+
+func (a FreeRADIUSTotalProxyAuthDroppedRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAuthDroppedRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAuthDroppedRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAuthDroppedRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthDroppedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 146, a)
+}
+
+func FreeRADIUSTotalProxyAuthDroppedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthDroppedRequests) {
+ value, _ = FreeRADIUSTotalProxyAuthDroppedRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthDroppedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthDroppedRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 146) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAuthDroppedRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAuthDroppedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthDroppedRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 146)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAuthDroppedRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthDroppedRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthDroppedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 146, a)
+}
+
+func FreeRADIUSTotalProxyAuthDroppedRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 146)
+}
+
+type FreeRADIUSTotalProxyAuthUnknownTypes uint32
+
+var FreeRADIUSTotalProxyAuthUnknownTypes_Strings = map[FreeRADIUSTotalProxyAuthUnknownTypes]string{}
+
+func (a FreeRADIUSTotalProxyAuthUnknownTypes) String() string {
+ if str, ok := FreeRADIUSTotalProxyAuthUnknownTypes_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAuthUnknownTypes(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAuthUnknownTypes_Add(p *radius.Packet, value FreeRADIUSTotalProxyAuthUnknownTypes) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 147, a)
+}
+
+func FreeRADIUSTotalProxyAuthUnknownTypes_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAuthUnknownTypes) {
+ value, _ = FreeRADIUSTotalProxyAuthUnknownTypes_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthUnknownTypes_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAuthUnknownTypes, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 147) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAuthUnknownTypes(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAuthUnknownTypes_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAuthUnknownTypes, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 147)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAuthUnknownTypes(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAuthUnknownTypes_Set(p *radius.Packet, value FreeRADIUSTotalProxyAuthUnknownTypes) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 147, a)
+}
+
+func FreeRADIUSTotalProxyAuthUnknownTypes_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 147)
+}
+
+type FreeRADIUSTotalAccountingRequests uint32
+
+var FreeRADIUSTotalAccountingRequests_Strings = map[FreeRADIUSTotalAccountingRequests]string{}
+
+func (a FreeRADIUSTotalAccountingRequests) String() string {
+ if str, ok := FreeRADIUSTotalAccountingRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAccountingRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAccountingRequests_Add(p *radius.Packet, value FreeRADIUSTotalAccountingRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 148, a)
+}
+
+func FreeRADIUSTotalAccountingRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAccountingRequests) {
+ value, _ = FreeRADIUSTotalAccountingRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAccountingRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccountingRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 148) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAccountingRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAccountingRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccountingRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 148)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAccountingRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAccountingRequests_Set(p *radius.Packet, value FreeRADIUSTotalAccountingRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 148, a)
+}
+
+func FreeRADIUSTotalAccountingRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 148)
+}
+
+type FreeRADIUSTotalAccountingResponses uint32
+
+var FreeRADIUSTotalAccountingResponses_Strings = map[FreeRADIUSTotalAccountingResponses]string{}
+
+func (a FreeRADIUSTotalAccountingResponses) String() string {
+ if str, ok := FreeRADIUSTotalAccountingResponses_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAccountingResponses(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAccountingResponses_Add(p *radius.Packet, value FreeRADIUSTotalAccountingResponses) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 149, a)
+}
+
+func FreeRADIUSTotalAccountingResponses_Get(p *radius.Packet) (value FreeRADIUSTotalAccountingResponses) {
+ value, _ = FreeRADIUSTotalAccountingResponses_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAccountingResponses_Gets(p *radius.Packet) (values []FreeRADIUSTotalAccountingResponses, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 149) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAccountingResponses(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAccountingResponses_Lookup(p *radius.Packet) (value FreeRADIUSTotalAccountingResponses, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 149)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAccountingResponses(i)
+ return
+}
+
+func FreeRADIUSTotalAccountingResponses_Set(p *radius.Packet, value FreeRADIUSTotalAccountingResponses) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 149, a)
+}
+
+func FreeRADIUSTotalAccountingResponses_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 149)
+}
+
+type FreeRADIUSTotalAcctDuplicateRequests uint32
+
+var FreeRADIUSTotalAcctDuplicateRequests_Strings = map[FreeRADIUSTotalAcctDuplicateRequests]string{}
+
+func (a FreeRADIUSTotalAcctDuplicateRequests) String() string {
+ if str, ok := FreeRADIUSTotalAcctDuplicateRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAcctDuplicateRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAcctDuplicateRequests_Add(p *radius.Packet, value FreeRADIUSTotalAcctDuplicateRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 150, a)
+}
+
+func FreeRADIUSTotalAcctDuplicateRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAcctDuplicateRequests) {
+ value, _ = FreeRADIUSTotalAcctDuplicateRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAcctDuplicateRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctDuplicateRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 150) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAcctDuplicateRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAcctDuplicateRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctDuplicateRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 150)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAcctDuplicateRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAcctDuplicateRequests_Set(p *radius.Packet, value FreeRADIUSTotalAcctDuplicateRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 150, a)
+}
+
+func FreeRADIUSTotalAcctDuplicateRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 150)
+}
+
+type FreeRADIUSTotalAcctMalformedRequests uint32
+
+var FreeRADIUSTotalAcctMalformedRequests_Strings = map[FreeRADIUSTotalAcctMalformedRequests]string{}
+
+func (a FreeRADIUSTotalAcctMalformedRequests) String() string {
+ if str, ok := FreeRADIUSTotalAcctMalformedRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAcctMalformedRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAcctMalformedRequests_Add(p *radius.Packet, value FreeRADIUSTotalAcctMalformedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 151, a)
+}
+
+func FreeRADIUSTotalAcctMalformedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAcctMalformedRequests) {
+ value, _ = FreeRADIUSTotalAcctMalformedRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAcctMalformedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctMalformedRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 151) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAcctMalformedRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAcctMalformedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctMalformedRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 151)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAcctMalformedRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAcctMalformedRequests_Set(p *radius.Packet, value FreeRADIUSTotalAcctMalformedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 151, a)
+}
+
+func FreeRADIUSTotalAcctMalformedRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 151)
+}
+
+type FreeRADIUSTotalAcctInvalidRequests uint32
+
+var FreeRADIUSTotalAcctInvalidRequests_Strings = map[FreeRADIUSTotalAcctInvalidRequests]string{}
+
+func (a FreeRADIUSTotalAcctInvalidRequests) String() string {
+ if str, ok := FreeRADIUSTotalAcctInvalidRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAcctInvalidRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAcctInvalidRequests_Add(p *radius.Packet, value FreeRADIUSTotalAcctInvalidRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 152, a)
+}
+
+func FreeRADIUSTotalAcctInvalidRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAcctInvalidRequests) {
+ value, _ = FreeRADIUSTotalAcctInvalidRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAcctInvalidRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctInvalidRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 152) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAcctInvalidRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAcctInvalidRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctInvalidRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 152)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAcctInvalidRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAcctInvalidRequests_Set(p *radius.Packet, value FreeRADIUSTotalAcctInvalidRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 152, a)
+}
+
+func FreeRADIUSTotalAcctInvalidRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 152)
+}
+
+type FreeRADIUSTotalAcctDroppedRequests uint32
+
+var FreeRADIUSTotalAcctDroppedRequests_Strings = map[FreeRADIUSTotalAcctDroppedRequests]string{}
+
+func (a FreeRADIUSTotalAcctDroppedRequests) String() string {
+ if str, ok := FreeRADIUSTotalAcctDroppedRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAcctDroppedRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAcctDroppedRequests_Add(p *radius.Packet, value FreeRADIUSTotalAcctDroppedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 153, a)
+}
+
+func FreeRADIUSTotalAcctDroppedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalAcctDroppedRequests) {
+ value, _ = FreeRADIUSTotalAcctDroppedRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAcctDroppedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctDroppedRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 153) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAcctDroppedRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAcctDroppedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctDroppedRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 153)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAcctDroppedRequests(i)
+ return
+}
+
+func FreeRADIUSTotalAcctDroppedRequests_Set(p *radius.Packet, value FreeRADIUSTotalAcctDroppedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 153, a)
+}
+
+func FreeRADIUSTotalAcctDroppedRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 153)
+}
+
+type FreeRADIUSTotalAcctUnknownTypes uint32
+
+var FreeRADIUSTotalAcctUnknownTypes_Strings = map[FreeRADIUSTotalAcctUnknownTypes]string{}
+
+func (a FreeRADIUSTotalAcctUnknownTypes) String() string {
+ if str, ok := FreeRADIUSTotalAcctUnknownTypes_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalAcctUnknownTypes(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalAcctUnknownTypes_Add(p *radius.Packet, value FreeRADIUSTotalAcctUnknownTypes) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 154, a)
+}
+
+func FreeRADIUSTotalAcctUnknownTypes_Get(p *radius.Packet) (value FreeRADIUSTotalAcctUnknownTypes) {
+ value, _ = FreeRADIUSTotalAcctUnknownTypes_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalAcctUnknownTypes_Gets(p *radius.Packet) (values []FreeRADIUSTotalAcctUnknownTypes, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 154) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalAcctUnknownTypes(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalAcctUnknownTypes_Lookup(p *radius.Packet) (value FreeRADIUSTotalAcctUnknownTypes, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 154)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalAcctUnknownTypes(i)
+ return
+}
+
+func FreeRADIUSTotalAcctUnknownTypes_Set(p *radius.Packet, value FreeRADIUSTotalAcctUnknownTypes) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 154, a)
+}
+
+func FreeRADIUSTotalAcctUnknownTypes_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 154)
+}
+
+type FreeRADIUSTotalProxyAccountingRequests uint32
+
+var FreeRADIUSTotalProxyAccountingRequests_Strings = map[FreeRADIUSTotalProxyAccountingRequests]string{}
+
+func (a FreeRADIUSTotalProxyAccountingRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAccountingRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAccountingRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAccountingRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccountingRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 155, a)
+}
+
+func FreeRADIUSTotalProxyAccountingRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccountingRequests) {
+ value, _ = FreeRADIUSTotalProxyAccountingRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAccountingRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccountingRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 155) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAccountingRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAccountingRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccountingRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 155)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAccountingRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAccountingRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccountingRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 155, a)
+}
+
+func FreeRADIUSTotalProxyAccountingRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 155)
+}
+
+type FreeRADIUSTotalProxyAccountingResponses uint32
+
+var FreeRADIUSTotalProxyAccountingResponses_Strings = map[FreeRADIUSTotalProxyAccountingResponses]string{}
+
+func (a FreeRADIUSTotalProxyAccountingResponses) String() string {
+ if str, ok := FreeRADIUSTotalProxyAccountingResponses_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAccountingResponses(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAccountingResponses_Add(p *radius.Packet, value FreeRADIUSTotalProxyAccountingResponses) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 156, a)
+}
+
+func FreeRADIUSTotalProxyAccountingResponses_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAccountingResponses) {
+ value, _ = FreeRADIUSTotalProxyAccountingResponses_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAccountingResponses_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAccountingResponses, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 156) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAccountingResponses(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAccountingResponses_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAccountingResponses, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 156)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAccountingResponses(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAccountingResponses_Set(p *radius.Packet, value FreeRADIUSTotalProxyAccountingResponses) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 156, a)
+}
+
+func FreeRADIUSTotalProxyAccountingResponses_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 156)
+}
+
+type FreeRADIUSTotalProxyAcctDuplicateRequests uint32
+
+var FreeRADIUSTotalProxyAcctDuplicateRequests_Strings = map[FreeRADIUSTotalProxyAcctDuplicateRequests]string{}
+
+func (a FreeRADIUSTotalProxyAcctDuplicateRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAcctDuplicateRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAcctDuplicateRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAcctDuplicateRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctDuplicateRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 157, a)
+}
+
+func FreeRADIUSTotalProxyAcctDuplicateRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctDuplicateRequests) {
+ value, _ = FreeRADIUSTotalProxyAcctDuplicateRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctDuplicateRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctDuplicateRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 157) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAcctDuplicateRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAcctDuplicateRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctDuplicateRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 157)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAcctDuplicateRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctDuplicateRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctDuplicateRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 157, a)
+}
+
+func FreeRADIUSTotalProxyAcctDuplicateRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 157)
+}
+
+type FreeRADIUSTotalProxyAcctMalformedRequests uint32
+
+var FreeRADIUSTotalProxyAcctMalformedRequests_Strings = map[FreeRADIUSTotalProxyAcctMalformedRequests]string{}
+
+func (a FreeRADIUSTotalProxyAcctMalformedRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAcctMalformedRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAcctMalformedRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAcctMalformedRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctMalformedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 158, a)
+}
+
+func FreeRADIUSTotalProxyAcctMalformedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctMalformedRequests) {
+ value, _ = FreeRADIUSTotalProxyAcctMalformedRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctMalformedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctMalformedRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 158) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAcctMalformedRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAcctMalformedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctMalformedRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 158)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAcctMalformedRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctMalformedRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctMalformedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 158, a)
+}
+
+func FreeRADIUSTotalProxyAcctMalformedRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 158)
+}
+
+type FreeRADIUSTotalProxyAcctInvalidRequests uint32
+
+var FreeRADIUSTotalProxyAcctInvalidRequests_Strings = map[FreeRADIUSTotalProxyAcctInvalidRequests]string{}
+
+func (a FreeRADIUSTotalProxyAcctInvalidRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAcctInvalidRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAcctInvalidRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAcctInvalidRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctInvalidRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 159, a)
+}
+
+func FreeRADIUSTotalProxyAcctInvalidRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctInvalidRequests) {
+ value, _ = FreeRADIUSTotalProxyAcctInvalidRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctInvalidRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctInvalidRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 159) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAcctInvalidRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAcctInvalidRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctInvalidRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 159)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAcctInvalidRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctInvalidRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctInvalidRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 159, a)
+}
+
+func FreeRADIUSTotalProxyAcctInvalidRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 159)
+}
+
+type FreeRADIUSTotalProxyAcctDroppedRequests uint32
+
+var FreeRADIUSTotalProxyAcctDroppedRequests_Strings = map[FreeRADIUSTotalProxyAcctDroppedRequests]string{}
+
+func (a FreeRADIUSTotalProxyAcctDroppedRequests) String() string {
+ if str, ok := FreeRADIUSTotalProxyAcctDroppedRequests_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAcctDroppedRequests(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAcctDroppedRequests_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctDroppedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 160, a)
+}
+
+func FreeRADIUSTotalProxyAcctDroppedRequests_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctDroppedRequests) {
+ value, _ = FreeRADIUSTotalProxyAcctDroppedRequests_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctDroppedRequests_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctDroppedRequests, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 160) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAcctDroppedRequests(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAcctDroppedRequests_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctDroppedRequests, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 160)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAcctDroppedRequests(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctDroppedRequests_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctDroppedRequests) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 160, a)
+}
+
+func FreeRADIUSTotalProxyAcctDroppedRequests_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 160)
+}
+
+type FreeRADIUSTotalProxyAcctUnknownTypes uint32
+
+var FreeRADIUSTotalProxyAcctUnknownTypes_Strings = map[FreeRADIUSTotalProxyAcctUnknownTypes]string{}
+
+func (a FreeRADIUSTotalProxyAcctUnknownTypes) String() string {
+ if str, ok := FreeRADIUSTotalProxyAcctUnknownTypes_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSTotalProxyAcctUnknownTypes(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSTotalProxyAcctUnknownTypes_Add(p *radius.Packet, value FreeRADIUSTotalProxyAcctUnknownTypes) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 161, a)
+}
+
+func FreeRADIUSTotalProxyAcctUnknownTypes_Get(p *radius.Packet) (value FreeRADIUSTotalProxyAcctUnknownTypes) {
+ value, _ = FreeRADIUSTotalProxyAcctUnknownTypes_Lookup(p)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctUnknownTypes_Gets(p *radius.Packet) (values []FreeRADIUSTotalProxyAcctUnknownTypes, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 161) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSTotalProxyAcctUnknownTypes(i))
+ }
+ return
+}
+
+func FreeRADIUSTotalProxyAcctUnknownTypes_Lookup(p *radius.Packet) (value FreeRADIUSTotalProxyAcctUnknownTypes, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 161)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSTotalProxyAcctUnknownTypes(i)
+ return
+}
+
+func FreeRADIUSTotalProxyAcctUnknownTypes_Set(p *radius.Packet, value FreeRADIUSTotalProxyAcctUnknownTypes) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 161, a)
+}
+
+func FreeRADIUSTotalProxyAcctUnknownTypes_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 161)
+}
+
+type FreeRADIUSQueueLenInternal uint32
+
+var FreeRADIUSQueueLenInternal_Strings = map[FreeRADIUSQueueLenInternal]string{}
+
+func (a FreeRADIUSQueueLenInternal) String() string {
+ if str, ok := FreeRADIUSQueueLenInternal_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSQueueLenInternal(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSQueueLenInternal_Add(p *radius.Packet, value FreeRADIUSQueueLenInternal) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 162, a)
+}
+
+func FreeRADIUSQueueLenInternal_Get(p *radius.Packet) (value FreeRADIUSQueueLenInternal) {
+ value, _ = FreeRADIUSQueueLenInternal_Lookup(p)
+ return
+}
+
+func FreeRADIUSQueueLenInternal_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenInternal, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 162) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSQueueLenInternal(i))
+ }
+ return
+}
+
+func FreeRADIUSQueueLenInternal_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenInternal, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 162)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSQueueLenInternal(i)
+ return
+}
+
+func FreeRADIUSQueueLenInternal_Set(p *radius.Packet, value FreeRADIUSQueueLenInternal) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 162, a)
+}
+
+func FreeRADIUSQueueLenInternal_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 162)
+}
+
+type FreeRADIUSQueueLenProxy uint32
+
+var FreeRADIUSQueueLenProxy_Strings = map[FreeRADIUSQueueLenProxy]string{}
+
+func (a FreeRADIUSQueueLenProxy) String() string {
+ if str, ok := FreeRADIUSQueueLenProxy_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSQueueLenProxy(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSQueueLenProxy_Add(p *radius.Packet, value FreeRADIUSQueueLenProxy) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 163, a)
+}
+
+func FreeRADIUSQueueLenProxy_Get(p *radius.Packet) (value FreeRADIUSQueueLenProxy) {
+ value, _ = FreeRADIUSQueueLenProxy_Lookup(p)
+ return
+}
+
+func FreeRADIUSQueueLenProxy_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenProxy, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 163) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSQueueLenProxy(i))
+ }
+ return
+}
+
+func FreeRADIUSQueueLenProxy_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenProxy, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 163)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSQueueLenProxy(i)
+ return
+}
+
+func FreeRADIUSQueueLenProxy_Set(p *radius.Packet, value FreeRADIUSQueueLenProxy) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 163, a)
+}
+
+func FreeRADIUSQueueLenProxy_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 163)
+}
+
+type FreeRADIUSQueueLenAuth uint32
+
+var FreeRADIUSQueueLenAuth_Strings = map[FreeRADIUSQueueLenAuth]string{}
+
+func (a FreeRADIUSQueueLenAuth) String() string {
+ if str, ok := FreeRADIUSQueueLenAuth_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSQueueLenAuth(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSQueueLenAuth_Add(p *radius.Packet, value FreeRADIUSQueueLenAuth) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 164, a)
+}
+
+func FreeRADIUSQueueLenAuth_Get(p *radius.Packet) (value FreeRADIUSQueueLenAuth) {
+ value, _ = FreeRADIUSQueueLenAuth_Lookup(p)
+ return
+}
+
+func FreeRADIUSQueueLenAuth_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenAuth, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 164) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSQueueLenAuth(i))
+ }
+ return
+}
+
+func FreeRADIUSQueueLenAuth_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenAuth, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 164)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSQueueLenAuth(i)
+ return
+}
+
+func FreeRADIUSQueueLenAuth_Set(p *radius.Packet, value FreeRADIUSQueueLenAuth) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 164, a)
+}
+
+func FreeRADIUSQueueLenAuth_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 164)
+}
+
+type FreeRADIUSQueueLenAcct uint32
+
+var FreeRADIUSQueueLenAcct_Strings = map[FreeRADIUSQueueLenAcct]string{}
+
+func (a FreeRADIUSQueueLenAcct) String() string {
+ if str, ok := FreeRADIUSQueueLenAcct_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSQueueLenAcct(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSQueueLenAcct_Add(p *radius.Packet, value FreeRADIUSQueueLenAcct) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 165, a)
+}
+
+func FreeRADIUSQueueLenAcct_Get(p *radius.Packet) (value FreeRADIUSQueueLenAcct) {
+ value, _ = FreeRADIUSQueueLenAcct_Lookup(p)
+ return
+}
+
+func FreeRADIUSQueueLenAcct_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenAcct, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 165) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSQueueLenAcct(i))
+ }
+ return
+}
+
+func FreeRADIUSQueueLenAcct_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenAcct, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 165)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSQueueLenAcct(i)
+ return
+}
+
+func FreeRADIUSQueueLenAcct_Set(p *radius.Packet, value FreeRADIUSQueueLenAcct) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 165, a)
+}
+
+func FreeRADIUSQueueLenAcct_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 165)
+}
+
+type FreeRADIUSQueueLenDetail uint32
+
+var FreeRADIUSQueueLenDetail_Strings = map[FreeRADIUSQueueLenDetail]string{}
+
+func (a FreeRADIUSQueueLenDetail) String() string {
+ if str, ok := FreeRADIUSQueueLenDetail_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSQueueLenDetail(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSQueueLenDetail_Add(p *radius.Packet, value FreeRADIUSQueueLenDetail) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 166, a)
+}
+
+func FreeRADIUSQueueLenDetail_Get(p *radius.Packet) (value FreeRADIUSQueueLenDetail) {
+ value, _ = FreeRADIUSQueueLenDetail_Lookup(p)
+ return
+}
+
+func FreeRADIUSQueueLenDetail_Gets(p *radius.Packet) (values []FreeRADIUSQueueLenDetail, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 166) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSQueueLenDetail(i))
+ }
+ return
+}
+
+func FreeRADIUSQueueLenDetail_Lookup(p *radius.Packet) (value FreeRADIUSQueueLenDetail, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 166)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSQueueLenDetail(i)
+ return
+}
+
+func FreeRADIUSQueueLenDetail_Set(p *radius.Packet, value FreeRADIUSQueueLenDetail) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 166, a)
+}
+
+func FreeRADIUSQueueLenDetail_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 166)
+}
+
+func FreeRADIUSStatsStartTime_Add(p *radius.Packet, value time.Time) (err error) {
+ var a radius.Attribute
+ a, err = radius.NewDate(value)
+ if err != nil {
+ return
+ }
+ return _FreeRADIUS_AddVendor(p, 176, a)
+}
+
+func FreeRADIUSStatsStartTime_Get(p *radius.Packet) (value time.Time) {
+ value, _ = FreeRADIUSStatsStartTime_Lookup(p)
+ return
+}
+
+func FreeRADIUSStatsStartTime_Gets(p *radius.Packet) (values []time.Time, err error) {
+ var i time.Time
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 176) {
+ i, err = radius.Date(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, i)
+ }
+ return
+}
+
+func FreeRADIUSStatsStartTime_Lookup(p *radius.Packet) (value time.Time, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 176)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ value, err = radius.Date(a)
+ return
+}
+
+func FreeRADIUSStatsStartTime_Set(p *radius.Packet, value time.Time) (err error) {
+ var a radius.Attribute
+ a, err = radius.NewDate(value)
+ if err != nil {
+ return
+ }
+ return _FreeRADIUS_SetVendor(p, 176, a)
+}
+
+func FreeRADIUSStatsStartTime_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 176)
+}
+
+func FreeRADIUSStatsHUPTime_Add(p *radius.Packet, value time.Time) (err error) {
+ var a radius.Attribute
+ a, err = radius.NewDate(value)
+ if err != nil {
+ return
+ }
+ return _FreeRADIUS_AddVendor(p, 177, a)
+}
+
+func FreeRADIUSStatsHUPTime_Get(p *radius.Packet) (value time.Time) {
+ value, _ = FreeRADIUSStatsHUPTime_Lookup(p)
+ return
+}
+
+func FreeRADIUSStatsHUPTime_Gets(p *radius.Packet) (values []time.Time, err error) {
+ var i time.Time
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 177) {
+ i, err = radius.Date(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, i)
+ }
+ return
+}
+
+func FreeRADIUSStatsHUPTime_Lookup(p *radius.Packet) (value time.Time, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 177)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ value, err = radius.Date(a)
+ return
+}
+
+func FreeRADIUSStatsHUPTime_Set(p *radius.Packet, value time.Time) (err error) {
+ var a radius.Attribute
+ a, err = radius.NewDate(value)
+ if err != nil {
+ return
+ }
+ return _FreeRADIUS_SetVendor(p, 177, a)
+}
+
+func FreeRADIUSStatsHUPTime_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 177)
+}
+
+type FreeRADIUSQueuePPSIn uint32
+
+var FreeRADIUSQueuePPSIn_Strings = map[FreeRADIUSQueuePPSIn]string{}
+
+func (a FreeRADIUSQueuePPSIn) String() string {
+ if str, ok := FreeRADIUSQueuePPSIn_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSQueuePPSIn(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSQueuePPSIn_Add(p *radius.Packet, value FreeRADIUSQueuePPSIn) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 181, a)
+}
+
+func FreeRADIUSQueuePPSIn_Get(p *radius.Packet) (value FreeRADIUSQueuePPSIn) {
+ value, _ = FreeRADIUSQueuePPSIn_Lookup(p)
+ return
+}
+
+func FreeRADIUSQueuePPSIn_Gets(p *radius.Packet) (values []FreeRADIUSQueuePPSIn, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 181) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSQueuePPSIn(i))
+ }
+ return
+}
+
+func FreeRADIUSQueuePPSIn_Lookup(p *radius.Packet) (value FreeRADIUSQueuePPSIn, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 181)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSQueuePPSIn(i)
+ return
+}
+
+func FreeRADIUSQueuePPSIn_Set(p *radius.Packet, value FreeRADIUSQueuePPSIn) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 181, a)
+}
+
+func FreeRADIUSQueuePPSIn_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 181)
+}
+
+type FreeRADIUSQueuePPSOut uint32
+
+var FreeRADIUSQueuePPSOut_Strings = map[FreeRADIUSQueuePPSOut]string{}
+
+func (a FreeRADIUSQueuePPSOut) String() string {
+ if str, ok := FreeRADIUSQueuePPSOut_Strings[a]; ok {
+ return str
+ }
+ return "FreeRADIUSQueuePPSOut(" + strconv.FormatUint(uint64(a), 10) + ")"
+}
+
+func FreeRADIUSQueuePPSOut_Add(p *radius.Packet, value FreeRADIUSQueuePPSOut) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_AddVendor(p, 182, a)
+}
+
+func FreeRADIUSQueuePPSOut_Get(p *radius.Packet) (value FreeRADIUSQueuePPSOut) {
+ value, _ = FreeRADIUSQueuePPSOut_Lookup(p)
+ return
+}
+
+func FreeRADIUSQueuePPSOut_Gets(p *radius.Packet) (values []FreeRADIUSQueuePPSOut, err error) {
+ var i uint32
+ for _, attr := range _FreeRADIUS_GetsVendor(p, 182) {
+ i, err = radius.Integer(attr)
+ if err != nil {
+ return
+ }
+ values = append(values, FreeRADIUSQueuePPSOut(i))
+ }
+ return
+}
+
+func FreeRADIUSQueuePPSOut_Lookup(p *radius.Packet) (value FreeRADIUSQueuePPSOut, err error) {
+ a, ok := _FreeRADIUS_LookupVendor(p, 182)
+ if !ok {
+ err = radius.ErrNoAttribute
+ return
+ }
+ var i uint32
+ i, err = radius.Integer(a)
+ if err != nil {
+ return
+ }
+ value = FreeRADIUSQueuePPSOut(i)
+ return
+}
+
+func FreeRADIUSQueuePPSOut_Set(p *radius.Packet, value FreeRADIUSQueuePPSOut) (err error) {
+ a := radius.NewInteger(uint32(value))
+ return _FreeRADIUS_SetVendor(p, 182, a)
+}
+
+func FreeRADIUSQueuePPSOut_Del(p *radius.Packet) {
+ _FreeRADIUS_DelVendor(p, 182)
+}
diff --git a/src/go/plugin/go.d/modules/freeradius/charts.go b/src/go/plugin/go.d/modules/freeradius/charts.go
new file mode 100644
index 000000000..a9df720fc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/charts.go
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package freeradius
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+)
+
+var charts = Charts{
+ {
+ ID: "authentication",
+ Title: "Authentication",
+ Units: "packets/s",
+ Fam: "authentication",
+ Ctx: "freeradius.authentication",
+ Dims: Dims{
+ {ID: "access-requests", Name: "requests", Algo: module.Incremental},
+ {ID: "auth-responses", Name: "responses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "authentication_responses",
+ Title: "Authentication Responses",
+ Units: "packets/s",
+ Fam: "authentication",
+ Ctx: "freeradius.authentication_access_responses",
+ Dims: Dims{
+ {ID: "access-accepts", Name: "accepts", Algo: module.Incremental},
+ {ID: "access-rejects", Name: "rejects", Algo: module.Incremental},
+ {ID: "access-challenges", Name: "challenges", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "bad_authentication_requests",
+ Title: "Bad Authentication Requests",
+ Units: "packets/s",
+ Fam: "authentication",
+ Ctx: "freeradius.bad_authentication",
+ Dims: Dims{
+ {ID: "auth-dropped-requests", Name: "dropped", Algo: module.Incremental},
+ {ID: "auth-duplicate-requests", Name: "duplicate", Algo: module.Incremental},
+ {ID: "auth-invalid-requests", Name: "invalid", Algo: module.Incremental},
+ {ID: "auth-malformed-requests", Name: "malformed", Algo: module.Incremental},
+ {ID: "auth-unknown-types", Name: "unknown-types", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "proxy_authentication",
+ Title: "Authentication",
+ Units: "packets/s",
+ Fam: "proxy authentication",
+ Ctx: "freeradius.proxy_authentication",
+ Dims: Dims{
+ {ID: "proxy-access-requests", Name: "requests", Algo: module.Incremental},
+ {ID: "proxy-auth-responses", Name: "responses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "proxy_authentication_responses",
+ Title: "Authentication Responses",
+ Units: "packets/s",
+ Fam: "proxy authentication",
+ Ctx: "freeradius.proxy_authentication_access_responses",
+ Dims: Dims{
+ {ID: "proxy-access-accepts", Name: "accepts", Algo: module.Incremental},
+ {ID: "proxy-access-rejects", Name: "rejects", Algo: module.Incremental},
+ {ID: "proxy-access-challenges", Name: "challenges", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "bad_proxy_authentication_requests",
+ Title: "Bad Authentication Requests",
+ Units: "packets/s",
+ Fam: "proxy authentication",
+ Ctx: "freeradius.proxy_bad_authentication",
+ Dims: Dims{
+ {ID: "proxy-auth-dropped-requests", Name: "dropped", Algo: module.Incremental},
+ {ID: "proxy-auth-duplicate-requests", Name: "duplicate", Algo: module.Incremental},
+ {ID: "proxy-auth-invalid-requests", Name: "invalid", Algo: module.Incremental},
+ {ID: "proxy-auth-malformed-requests", Name: "malformed", Algo: module.Incremental},
+ {ID: "proxy-auth-unknown-types", Name: "unknown-types", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "accounting",
+ Title: "Accounting",
+ Units: "packets/s",
+ Fam: "accounting",
+ Ctx: "freeradius.accounting",
+ Dims: Dims{
+ {ID: "accounting-requests", Name: "requests", Algo: module.Incremental},
+ {ID: "accounting-responses", Name: "responses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "bad_accounting_requests",
+ Title: "Bad Accounting Requests",
+ Units: "packets/s",
+ Fam: "accounting",
+ Ctx: "freeradius.bad_accounting",
+ Dims: Dims{
+ {ID: "acct-dropped-requests", Name: "dropped", Algo: module.Incremental},
+ {ID: "acct-duplicate-requests", Name: "duplicate", Algo: module.Incremental},
+ {ID: "acct-invalid-requests", Name: "invalid", Algo: module.Incremental},
+ {ID: "acct-malformed-requests", Name: "malformed", Algo: module.Incremental},
+ {ID: "acct-unknown-types", Name: "unknown-types", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "proxy_accounting",
+ Title: "Accounting",
+ Units: "packets/s",
+ Fam: "proxy accounting",
+ Ctx: "freeradius.proxy_accounting",
+ Dims: Dims{
+ {ID: "proxy-accounting-requests", Name: "requests", Algo: module.Incremental},
+ {ID: "proxy-accounting-responses", Name: "responses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "bad_proxy_accounting_requests",
+ Title: "Bad Accounting Requests",
+ Units: "packets/s",
+ Fam: "proxy accounting",
+ Ctx: "freeradius.proxy_bad_accounting",
+ Dims: Dims{
+ {ID: "proxy-acct-dropped-requests", Name: "dropped", Algo: module.Incremental},
+ {ID: "proxy-acct-duplicate-requests", Name: "duplicate", Algo: module.Incremental},
+ {ID: "proxy-acct-invalid-requests", Name: "invalid", Algo: module.Incremental},
+ {ID: "proxy-acct-malformed-requests", Name: "malformed", Algo: module.Incremental},
+ {ID: "proxy-acct-unknown-types", Name: "unknown-types", Algo: module.Incremental},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/freeradius/collect.go b/src/go/plugin/go.d/modules/freeradius/collect.go
new file mode 100644
index 000000000..05fd82322
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/collect.go
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package freeradius
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func (f *FreeRADIUS) collect() (map[string]int64, error) {
+ status, err := f.client.Status()
+ if err != nil {
+ return nil, err
+ }
+
+ return stm.ToMap(status), nil
+}
diff --git a/src/go/plugin/go.d/modules/freeradius/config_schema.json b/src/go/plugin/go.d/modules/freeradius/config_schema.json
new file mode 100644
index 000000000..7e1a3a4e9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/config_schema.json
@@ -0,0 +1,60 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "FreeRADIUS collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "Server address.",
+ "type": "string",
+ "default": "127.0.0.1"
+ },
+ "port": {
+ "title": "Port",
+ "description": "Server port.",
+ "type": "integer",
+ "default": 18121
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "secret": {
+ "title": "Secret",
+ "description": "Shared secret key.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "address",
+ "port",
+ "secret"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "secret": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/freeradius/freeradius.go b/src/go/plugin/go.d/modules/freeradius/freeradius.go
new file mode 100644
index 000000000..e3c995b5e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/freeradius.go
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package freeradius
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/freeradius/api"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("freeradius", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *FreeRADIUS {
+ return &FreeRADIUS{
+ Config: Config{
+ Address: "127.0.0.1",
+ Port: 18121,
+ Secret: "adminsecret",
+ Timeout: web.Duration(time.Second),
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Port int `yaml:"port" json:"port"`
+ Secret string `yaml:"secret" json:"secret"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+}
+
+type (
+ FreeRADIUS struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ client
+ }
+ client interface {
+ Status() (*api.Status, error)
+ }
+)
+
+func (f *FreeRADIUS) Configuration() any {
+ return f.Config
+}
+
+func (f *FreeRADIUS) Init() error {
+ if err := f.validateConfig(); err != nil {
+ f.Errorf("config validation: %v", err)
+ return err
+ }
+
+ f.client = api.New(api.Config{
+ Address: f.Address,
+ Port: f.Port,
+ Secret: f.Secret,
+ Timeout: f.Timeout.Duration(),
+ })
+
+ return nil
+}
+
+func (f *FreeRADIUS) Check() error {
+ mx, err := f.collect()
+ if err != nil {
+ f.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (f *FreeRADIUS) Charts() *Charts {
+ return charts.Copy()
+}
+
+func (f *FreeRADIUS) Collect() map[string]int64 {
+ mx, err := f.collect()
+ if err != nil {
+ f.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (f *FreeRADIUS) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/freeradius/freeradius_test.go b/src/go/plugin/go.d/modules/freeradius/freeradius_test.go
new file mode 100644
index 000000000..58e2dce59
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/freeradius_test.go
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package freeradius
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/freeradius/api"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestFreeRADIUS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &FreeRADIUS{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestFreeRADIUS_Init(t *testing.T) {
+ freeRADIUS := New()
+
+ assert.NoError(t, freeRADIUS.Init())
+}
+
+func TestFreeRADIUS_Init_ReturnsFalseIfAddressNotSet(t *testing.T) {
+ freeRADIUS := New()
+ freeRADIUS.Address = ""
+
+ assert.Error(t, freeRADIUS.Init())
+}
+
+func TestFreeRADIUS_Init_ReturnsFalseIfPortNotSet(t *testing.T) {
+ freeRADIUS := New()
+ freeRADIUS.Port = 0
+
+ assert.Error(t, freeRADIUS.Init())
+}
+
+func TestFreeRADIUS_Init_ReturnsFalseIfSecretNotSet(t *testing.T) {
+ freeRADIUS := New()
+ freeRADIUS.Secret = ""
+
+ assert.Error(t, freeRADIUS.Init())
+}
+
+func TestFreeRADIUS_Check(t *testing.T) {
+ freeRADIUS := New()
+ freeRADIUS.client = newOKMockClient()
+
+ assert.NoError(t, freeRADIUS.Check())
+}
+
+func TestFreeRADIUS_Check_ReturnsFalseIfClientStatusReturnsError(t *testing.T) {
+ freeRADIUS := New()
+ freeRADIUS.client = newErrorMockClient()
+
+ assert.Error(t, freeRADIUS.Check())
+}
+
+func TestFreeRADIUS_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestFreeRADIUS_Collect(t *testing.T) {
+ freeRADIUS := New()
+ freeRADIUS.client = newOKMockClient()
+
+ expected := map[string]int64{
+ "access-requests": 1,
+ "access-accepts": 2,
+ "access-rejects": 3,
+ "access-challenges": 4,
+ "auth-responses": 5,
+ "auth-duplicate-requests": 6,
+ "auth-malformed-requests": 7,
+ "auth-invalid-requests": 8,
+ "auth-dropped-requests": 9,
+ "auth-unknown-types": 10,
+ "accounting-requests": 11,
+ "accounting-responses": 12,
+ "acct-duplicate-requests": 13,
+ "acct-malformed-requests": 14,
+ "acct-invalid-requests": 15,
+ "acct-dropped-requests": 16,
+ "acct-unknown-types": 17,
+ "proxy-access-requests": 18,
+ "proxy-access-accepts": 19,
+ "proxy-access-rejects": 20,
+ "proxy-access-challenges": 21,
+ "proxy-auth-responses": 22,
+ "proxy-auth-duplicate-requests": 23,
+ "proxy-auth-malformed-requests": 24,
+ "proxy-auth-invalid-requests": 25,
+ "proxy-auth-dropped-requests": 26,
+ "proxy-auth-unknown-types": 27,
+ "proxy-accounting-requests": 28,
+ "proxy-accounting-responses": 29,
+ "proxy-acct-duplicate-requests": 30,
+ "proxy-acct-malformed-requests": 31,
+ "proxy-acct-invalid-requests": 32,
+ "proxy-acct-dropped-requests": 33,
+ "proxy-acct-unknown-types": 34,
+ }
+ collected := freeRADIUS.Collect()
+
+ assert.Equal(t, expected, collected)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, freeRADIUS, collected)
+}
+
+func TestFreeRADIUS_Collect_ReturnsNilIfClientStatusReturnsError(t *testing.T) {
+ freeRADIUS := New()
+ freeRADIUS.client = newErrorMockClient()
+
+ assert.Nil(t, freeRADIUS.Collect())
+}
+
+func TestFreeRADIUS_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, f *FreeRADIUS, collected map[string]int64) {
+ for _, chart := range *f.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func newOKMockClient() *mockClient {
+ return &mockClient{}
+}
+
+func newErrorMockClient() *mockClient {
+ return &mockClient{errOnStatus: true}
+}
+
+type mockClient struct {
+ errOnStatus bool
+}
+
+func (m mockClient) Status() (*api.Status, error) {
+ if m.errOnStatus {
+ return nil, errors.New("mock Status error")
+ }
+
+ status := &api.Status{
+ AccessRequests: 1,
+ AccessAccepts: 2,
+ AccessRejects: 3,
+ AccessChallenges: 4,
+ AuthResponses: 5,
+ AuthDuplicateRequests: 6,
+ AuthMalformedRequests: 7,
+ AuthInvalidRequests: 8,
+ AuthDroppedRequests: 9,
+ AuthUnknownTypes: 10,
+ AccountingRequests: 11,
+ AccountingResponses: 12,
+ AcctDuplicateRequests: 13,
+ AcctMalformedRequests: 14,
+ AcctInvalidRequests: 15,
+ AcctDroppedRequests: 16,
+ AcctUnknownTypes: 17,
+ ProxyAccessRequests: 18,
+ ProxyAccessAccepts: 19,
+ ProxyAccessRejects: 20,
+ ProxyAccessChallenges: 21,
+ ProxyAuthResponses: 22,
+ ProxyAuthDuplicateRequests: 23,
+ ProxyAuthMalformedRequests: 24,
+ ProxyAuthInvalidRequests: 25,
+ ProxyAuthDroppedRequests: 26,
+ ProxyAuthUnknownTypes: 27,
+ ProxyAccountingRequests: 28,
+ ProxyAccountingResponses: 29,
+ ProxyAcctDuplicateRequests: 30,
+ ProxyAcctMalformedRequests: 31,
+ ProxyAcctInvalidRequests: 32,
+ ProxyAcctDroppedRequests: 33,
+ ProxyAcctUnknownTypes: 34,
+ }
+ return status, nil
+}
diff --git a/src/go/plugin/go.d/modules/freeradius/init.go b/src/go/plugin/go.d/modules/freeradius/init.go
new file mode 100644
index 000000000..9c14da0ea
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/init.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package freeradius
+
+import (
+ "errors"
+)
+
+func (f *FreeRADIUS) validateConfig() error {
+ if f.Address == "" {
+ return errors.New("address not set")
+ }
+ if f.Port == 0 {
+ return errors.New("port not set")
+ }
+ if f.Secret == "" {
+ return errors.New("secret not set")
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/freeradius/integrations/freeradius.md b/src/go/plugin/go.d/modules/freeradius/integrations/freeradius.md
new file mode 100644
index 000000000..59b124f7e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/integrations/freeradius.md
@@ -0,0 +1,234 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/freeradius/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/freeradius/metadata.yaml"
+sidebar_label: "FreeRADIUS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# FreeRADIUS
+
+
+<img src="https://netdata.cloud/img/freeradius.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: freeradius
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors FreeRADIUS servers.
+
+It collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It automatically detects FreeRadius instances running on localhost.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per FreeRADIUS instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| freeradius.authentication | requests, responses | packets/s |
+| freeradius.authentication_access_responses | accepts, rejects, challenges | packets/s |
+| freeradius.bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |
+| freeradius.proxy_authentication | requests, responses | packets/s |
+| freeradius.proxy_authentication_access_responses | accepts, rejects, challenges | packets/s |
+| freeradius.proxy_bad_authentication | dropped, duplicate, invalid, malformed, unknown-types | packets/s |
+| freeradius.accounting | requests, responses | packets/s |
+| freeradius.bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |
+| freeradius.proxy_accounting | requests, responses | packets/s |
+| freeradius.proxy_bad_accounting | dropped, duplicate, invalid, malformed, unknown-types | packets/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable status server
+
+To enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/freeradius.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/freeradius.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Server address. | 127.0.0.1 | yes |
+| port | Server port. | 18121 | no |
+| secret | FreeRADIUS secret. | adminsecret | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1
+ port: 18121
+ secert: adminsecret
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1
+ port: 18121
+ secert: adminsecret
+
+ - name: remote
+ address: 192.0.2.1
+ port: 18121
+ secert: adminsecret
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `freeradius` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m freeradius
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `freeradius` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep freeradius
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep freeradius /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep freeradius
+```
+
+
diff --git a/src/go/plugin/go.d/modules/freeradius/metadata.yaml b/src/go/plugin/go.d/modules/freeradius/metadata.yaml
new file mode 100644
index 000000000..5ecdcf417
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/metadata.yaml
@@ -0,0 +1,206 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-freeradius
+ plugin_name: go.d.plugin
+ module_name: freeradius
+ monitored_instance:
+ name: FreeRADIUS
+ link: https://freeradius.org/
+ categories:
+ - data-collection.authentication-and-authorization
+ icon_filename: freeradius.svg
+ keywords:
+ - freeradius
+ - radius
+ most_popular: false
+ info_provided_to_referring_integrations:
+ description: ""
+ related_resources:
+ integrations:
+ list: []
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors FreeRADIUS servers.
+
+ It collect metrics by sending [status-server](https://wiki.freeradius.org/config/Status) messages to the server.
+ method_description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ It automatically detects FreeRadius instances running on localhost.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list:
+ - title: Enable status server
+ description: |
+ To enable status server, follow the [official documentation](https://wiki.freeradius.org/config/Status).
+ configuration:
+ file:
+ name: go.d/freeradius.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: Server address.
+ default_value: 127.0.0.1
+ required: true
+ - name: port
+ description: Server port.
+ default_value: 18121
+ required: false
+ - name: secret
+ description: FreeRADIUS secret.
+ default_value: adminsecret
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1
+ port: 18121
+ secert: adminsecret
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1
+ port: 18121
+ secert: adminsecret
+
+ - name: remote
+ address: 192.0.2.1
+ port: 18121
+ secert: adminsecret
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: freeradius.authentication
+ description: Authentication
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: responses
+ - name: freeradius.authentication_access_responses
+ description: Authentication Responses
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: accepts
+ - name: rejects
+ - name: challenges
+ - name: freeradius.bad_authentication
+ description: Bad Authentication Requests
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: dropped
+ - name: duplicate
+ - name: invalid
+ - name: malformed
+ - name: unknown-types
+ - name: freeradius.proxy_authentication
+ description: Authentication
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: responses
+ - name: freeradius.proxy_authentication_access_responses
+ description: Authentication Responses
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: accepts
+ - name: rejects
+ - name: challenges
+ - name: freeradius.proxy_bad_authentication
+ description: Bad Authentication Requests
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: dropped
+ - name: duplicate
+ - name: invalid
+ - name: malformed
+ - name: unknown-types
+ - name: freeradius.accounting
+ description: Accounting
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: responses
+ - name: freeradius.bad_accounting
+ description: Bad Accounting Requests
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: dropped
+ - name: duplicate
+ - name: invalid
+ - name: malformed
+ - name: unknown-types
+ - name: freeradius.proxy_accounting
+ description: Accounting
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: responses
+ - name: freeradius.proxy_bad_accounting
+ description: Bad Accounting Requests
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: dropped
+ - name: duplicate
+ - name: invalid
+ - name: malformed
+ - name: unknown-types
diff --git a/src/go/plugin/go.d/modules/freeradius/testdata/config.json b/src/go/plugin/go.d/modules/freeradius/testdata/config.json
new file mode 100644
index 000000000..5a1939b60
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/testdata/config.json
@@ -0,0 +1,7 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "port": 123,
+ "secret": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/freeradius/testdata/config.yaml b/src/go/plugin/go.d/modules/freeradius/testdata/config.yaml
new file mode 100644
index 000000000..4a3d1f8cd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/freeradius/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+address: "ok"
+port: 123
+secret: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/gearman/README.md b/src/go/plugin/go.d/modules/gearman/README.md
new file mode 120000
index 000000000..70189d698
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/README.md
@@ -0,0 +1 @@
+integrations/gearman.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/gearman/charts.go b/src/go/plugin/go.d/modules/gearman/charts.go
new file mode 100644
index 000000000..425c00fd4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/charts.go
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package gearman
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioQueuedJobsByActivity = module.Priority + iota
+ prioQueuedJobsByPriority
+
+ prioFunctionQueuedJobsByActivity
+ prioFunctionQueuedJobsByPriority
+ prioFunctionAvailableWorkers
+)
+
+var summaryCharts = module.Charts{
+ chartQueuedJobsActivity.Copy(),
+ chartQueuedJobsPriority.Copy(),
+}
+
+var (
+ chartQueuedJobsActivity = module.Chart{
+ ID: "queued_jobs_by_activity",
+ Title: "Jobs Activity",
+ Units: "jobs",
+ Fam: "jobs",
+ Ctx: "gearman.queued_jobs_activity",
+ Priority: prioQueuedJobsByActivity,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "total_jobs_running", Name: "running"},
+ {ID: "total_jobs_waiting", Name: "waiting"},
+ },
+ }
+ chartQueuedJobsPriority = module.Chart{
+ ID: "queued_jobs_by_priority",
+ Title: "Jobs Priority",
+ Units: "jobs",
+ Fam: "jobs",
+ Ctx: "gearman.queued_jobs_priority",
+ Priority: prioQueuedJobsByPriority,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "total_high_priority_jobs", Name: "high"},
+ {ID: "total_normal_priority_jobs", Name: "normal"},
+ {ID: "total_low_priority_jobs", Name: "low"},
+ },
+ }
+)
+
+var functionStatusChartsTmpl = module.Charts{
+ functionQueuedJobsActivityChartTmpl.Copy(),
+ functionWorkersChartTmpl.Copy(),
+}
+
+var (
+ functionQueuedJobsActivityChartTmpl = module.Chart{
+ ID: "function_%s_queued_jobs_by_activity",
+ Title: "Function Jobs Activity",
+ Units: "jobs",
+ Fam: "fn jobs",
+ Ctx: "gearman.function_queued_jobs_activity",
+ Priority: prioFunctionQueuedJobsByActivity,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "function_%s_jobs_running", Name: "running"},
+ {ID: "function_%s_jobs_waiting", Name: "waiting"},
+ },
+ }
+ functionWorkersChartTmpl = module.Chart{
+ ID: "function_%s_workers",
+ Title: "Function Workers",
+ Units: "workers",
+ Fam: "fn workers",
+ Ctx: "gearman.function_workers",
+ Priority: prioFunctionAvailableWorkers,
+ Type: module.Line,
+ Dims: module.Dims{
+ {ID: "function_%s_workers_available", Name: "available"},
+ },
+ }
+)
+
+var functionPriorityStatusChartsTmpl = module.Charts{
+ functionQueuedJobsByPriorityChartTmpl.Copy(),
+}
+
+var (
+ functionQueuedJobsByPriorityChartTmpl = module.Chart{
+ ID: "prio_function_%s_queued_jobs_by_priority",
+ Title: "Function Jobs Priority",
+ Units: "jobs",
+ Fam: "fn jobs",
+ Ctx: "gearman.function_queued_jobs_priority",
+ Priority: prioFunctionQueuedJobsByPriority,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "function_%s_high_priority_jobs", Name: "high"},
+ {ID: "function_%s_normal_priority_jobs", Name: "normal"},
+ {ID: "function_%s_low_priority_jobs", Name: "low"},
+ },
+ }
+)
+
+func (g *Gearman) addFunctionStatusCharts(name string) {
+ g.addFunctionCharts(name, functionStatusChartsTmpl.Copy())
+}
+
+func (g *Gearman) removeFunctionStatusCharts(name string) {
+ px := fmt.Sprintf("function_%s_", cleanFunctionName(name))
+ g.removeCharts(px)
+}
+
+func (g *Gearman) addFunctionPriorityStatusCharts(name string) {
+ g.addFunctionCharts(name, functionPriorityStatusChartsTmpl.Copy())
+}
+
+func (g *Gearman) removeFunctionPriorityStatusCharts(name string) {
+ px := fmt.Sprintf("prio_function_%s_", cleanFunctionName(name))
+ g.removeCharts(px)
+}
+
+func (g *Gearman) addFunctionCharts(name string, charts *module.Charts) {
+ charts = charts.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanFunctionName(name))
+ chart.Labels = []module.Label{
+ {Key: "function_name", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := g.Charts().Add(*charts...); err != nil {
+ g.Warning(err)
+ }
+}
+
+func (g *Gearman) removeCharts(px string) {
+ for _, chart := range *g.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func cleanFunctionName(name string) string {
+ r := strings.NewReplacer(".", "_", ",", "_", " ", "_")
+ return r.Replace(name)
+}
diff --git a/src/go/plugin/go.d/modules/gearman/client.go b/src/go/plugin/go.d/modules/gearman/client.go
new file mode 100644
index 000000000..dff9a1be4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/client.go
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package gearman
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+type gearmanConn interface {
+ connect() error
+ disconnect()
+ queryStatus() ([]byte, error)
+ queryPriorityStatus() ([]byte, error)
+}
+
+func newGearmanConn(conf Config) gearmanConn {
+ return &gearmanClient{conn: socket.New(socket.Config{
+ Address: conf.Address,
+ ConnectTimeout: conf.Timeout.Duration(),
+ ReadTimeout: conf.Timeout.Duration(),
+ WriteTimeout: conf.Timeout.Duration(),
+ })}
+}
+
+type gearmanClient struct {
+ conn socket.Client
+}
+
+func (c *gearmanClient) connect() error {
+ return c.conn.Connect()
+}
+
+func (c *gearmanClient) disconnect() {
+ _ = c.conn.Disconnect()
+}
+
+func (c *gearmanClient) queryStatus() ([]byte, error) {
+ return c.query("status")
+}
+
+func (c *gearmanClient) queryPriorityStatus() ([]byte, error) {
+ return c.query("prioritystatus")
+}
+
+func (c *gearmanClient) query(cmd string) ([]byte, error) {
+ const limitReadLines = 10000
+ var num int
+ var err error
+ var b bytes.Buffer
+
+ clientErr := c.conn.Command(cmd+"\n", func(bs []byte) bool {
+ s := string(bs)
+
+ if strings.HasPrefix(s, "ERR") {
+ err = fmt.Errorf("command '%s': %s", cmd, s)
+ return false
+ }
+
+ b.WriteString(s)
+ b.WriteByte('\n')
+
+ if num++; num >= limitReadLines {
+ err = fmt.Errorf("command '%s': read line limit exceeded (%d)", cmd, limitReadLines)
+ return false
+ }
+ return !strings.HasPrefix(s, ".")
+ })
+ if clientErr != nil {
+ return nil, fmt.Errorf("command '%s' client error: %v", cmd, clientErr)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/src/go/plugin/go.d/modules/gearman/collect.go b/src/go/plugin/go.d/modules/gearman/collect.go
new file mode 100644
index 000000000..ddfd8c96b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/collect.go
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package gearman
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func (g *Gearman) collect() (map[string]int64, error) {
+ if g.conn == nil {
+ conn, err := g.establishConn()
+ if err != nil {
+ return nil, err
+ }
+ g.conn = conn
+ }
+
+ status, err := g.conn.queryStatus()
+ if err != nil {
+ g.Cleanup()
+ return nil, fmt.Errorf("couldn't query status: %v", err)
+ }
+
+ prioStatus, err := g.conn.queryPriorityStatus()
+ if err != nil {
+ g.Cleanup()
+ return nil, fmt.Errorf("couldn't query priority status: %v", err)
+ }
+
+ mx := make(map[string]int64)
+
+ if err := g.collectStatus(mx, status); err != nil {
+ return nil, fmt.Errorf("couldn't collect status: %v", err)
+ }
+ if err := g.collectPriorityStatus(mx, prioStatus); err != nil {
+ return nil, fmt.Errorf("couldn't collect priority status: %v", err)
+ }
+
+ return mx, nil
+
+}
+
+func (g *Gearman) collectStatus(mx map[string]int64, statusData []byte) error {
+ /*
+ Same output as the "gearadmin --status" command:
+
+ FUNCTION\tTOTAL\tRUNNING\tAVAILABLE_WORKERS
+
+ E.g.:
+
+ prefix generic_worker4 78 78 500
+ generic_worker2 78 78 500
+ generic_worker3 0 0 760
+ generic_worker1 0 0 500
+ */
+
+ seen := make(map[string]bool)
+ var foundEnd bool
+ sc := bufio.NewScanner(bytes.NewReader(statusData))
+
+ mx["total_jobs_queued"] = 0
+ mx["total_jobs_running"] = 0
+ mx["total_jobs_waiting"] = 0
+ mx["total_workers_avail"] = 0
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+
+ if foundEnd = line == "."; foundEnd {
+ break
+ }
+
+ parts := strings.Fields(line)
+
+ // Gearman does not remove old tasks. We are only interested in tasks that have stats.
+ if len(parts) < 4 {
+ continue
+ }
+
+ name := strings.Join(parts[:len(parts)-3], "_")
+ metrics := parts[len(parts)-3:]
+
+ var queued, running, availWorkers int64
+ var err error
+
+ if queued, err = strconv.ParseInt(metrics[0], 10, 64); err != nil {
+ return fmt.Errorf("couldn't parse queued count: %v", err)
+ }
+ if running, err = strconv.ParseInt(metrics[1], 10, 64); err != nil {
+ return fmt.Errorf("couldn't parse running count: %v", err)
+ }
+ if availWorkers, err = strconv.ParseInt(metrics[2], 10, 64); err != nil {
+ return fmt.Errorf("couldn't parse available count: %v", err)
+ }
+
+ px := fmt.Sprintf("function_%s_", name)
+
+ waiting := queued - running
+
+ mx[px+"jobs_queued"] = queued
+ mx[px+"jobs_running"] = running
+ mx[px+"jobs_waiting"] = waiting
+ mx[px+"workers_available"] = availWorkers
+
+ mx["total_jobs_queued"] += queued
+ mx["total_jobs_running"] += running
+ mx["total_jobs_waiting"] += waiting
+ mx["total_workers_available"] += availWorkers
+
+ seen[name] = true
+ }
+
+ if !foundEnd {
+ return errors.New("unexpected status response")
+ }
+
+ for name := range seen {
+ if !g.seenTasks[name] {
+ g.seenTasks[name] = true
+ g.addFunctionStatusCharts(name)
+ }
+ }
+ for name := range g.seenTasks {
+ if !seen[name] {
+ delete(g.seenTasks, name)
+ g.removeFunctionStatusCharts(name)
+ }
+ }
+
+ return nil
+}
+
+func (g *Gearman) collectPriorityStatus(mx map[string]int64, prioStatusData []byte) error {
+ /*
+ Same output as the "gearadmin --priority-status" command:
+
+ FUNCTION\tHIGH\tNORMAL\tLOW\tAVAILABLE_WORKERS
+ */
+
+ seen := make(map[string]bool)
+ var foundEnd bool
+ sc := bufio.NewScanner(bytes.NewReader(prioStatusData))
+
+ mx["total_high_priority_jobs"] = 0
+ mx["total_normal_priority_jobs"] = 0
+ mx["total_low_priority_jobs"] = 0
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+
+ if foundEnd = line == "."; foundEnd {
+ break
+ }
+
+ parts := strings.Fields(line)
+ if len(parts) < 5 {
+ continue
+ }
+
+ name := strings.Join(parts[:len(parts)-4], "_")
+ metrics := parts[len(parts)-4:]
+
+ var high, normal, low int64
+ var err error
+
+ if high, err = strconv.ParseInt(metrics[0], 10, 64); err != nil {
+ return fmt.Errorf("couldn't parse high count: %v", err)
+ }
+ if normal, err = strconv.ParseInt(metrics[1], 10, 64); err != nil {
+ return fmt.Errorf("couldn't parse normal count: %v", err)
+ }
+ if low, err = strconv.ParseInt(metrics[2], 10, 64); err != nil {
+ return fmt.Errorf("couldn't parse low count: %v", err)
+ }
+
+ px := fmt.Sprintf("function_%s_", name)
+
+ mx[px+"high_priority_jobs"] = high
+ mx[px+"normal_priority_jobs"] = normal
+ mx[px+"low_priority_jobs"] = low
+ mx["total_high_priority_jobs"] += high
+ mx["total_normal_priority_jobs"] += normal
+ mx["total_low_priority_jobs"] += low
+
+ seen[name] = true
+ }
+
+ if !foundEnd {
+ return errors.New("unexpected priority status response")
+ }
+
+ for name := range seen {
+ if !g.seenPriorityTasks[name] {
+ g.seenPriorityTasks[name] = true
+ g.addFunctionPriorityStatusCharts(name)
+ }
+ }
+ for name := range g.seenPriorityTasks {
+ if !seen[name] {
+ delete(g.seenPriorityTasks, name)
+ g.removeFunctionPriorityStatusCharts(name)
+ }
+ }
+
+ return nil
+}
+
+func (g *Gearman) establishConn() (gearmanConn, error) {
+ conn := g.newConn(g.Config)
+
+ if err := conn.connect(); err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
diff --git a/src/go/plugin/go.d/modules/gearman/config_schema.json b/src/go/plugin/go.d/modules/gearman/config_schema.json
new file mode 100644
index 000000000..dd5d3a0b8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/config_schema.json
@@ -0,0 +1,44 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Gearman collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the Gearman service listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:4730"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/gearman/gearman.go b/src/go/plugin/go.d/modules/gearman/gearman.go
new file mode 100644
index 000000000..e1780a95c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/gearman.go
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package gearman
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("gearman", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Gearman {
+ return &Gearman{
+ Config: Config{
+ Address: "127.0.0.1:4730",
+ Timeout: web.Duration(time.Second * 1),
+ },
+ newConn: newGearmanConn,
+ charts: summaryCharts.Copy(),
+ seenTasks: make(map[string]bool),
+ seenPriorityTasks: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+}
+
+type Gearman struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ newConn func(Config) gearmanConn
+ conn gearmanConn
+
+ seenTasks map[string]bool
+ seenPriorityTasks map[string]bool
+}
+
+func (g *Gearman) Configuration() any {
+ return g.Config
+}
+
+func (g *Gearman) Init() error {
+ if g.Address == "" {
+ g.Error("config: 'address' not set")
+ return errors.New("address not set")
+ }
+
+ return nil
+}
+
+func (g *Gearman) Check() error {
+ mx, err := g.collect()
+ if err != nil {
+ g.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (g *Gearman) Charts() *module.Charts {
+ return g.charts
+}
+
+func (g *Gearman) Collect() map[string]int64 {
+ mx, err := g.collect()
+ if err != nil {
+ g.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (g *Gearman) Cleanup() {
+ if g.conn != nil {
+ g.conn.disconnect()
+ g.conn = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/gearman/gearman_test.go b/src/go/plugin/go.d/modules/gearman/gearman_test.go
new file mode 100644
index 000000000..43069abce
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/gearman_test.go
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package gearman
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatus, _ = os.ReadFile("testdata/status.txt")
+ dataPriorityStatus, _ = os.ReadFile("testdata/priority-status.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataStatus": dataStatus,
+ "dataPriorityStatus": dataPriorityStatus,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestGearman_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Gearman{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestGearman_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success with default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if address not set": {
+ wantFail: true,
+ config: func() Config {
+ conf := New().Config
+ conf.Address = ""
+ return conf
+ }(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ gear := New()
+ gear.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, gear.Init())
+ } else {
+ assert.NoError(t, gear.Init())
+ }
+ })
+ }
+}
+
+func TestGearman_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Gearman
+ }{
+ "not initialized": {
+ prepare: func() *Gearman {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Gearman {
+ gear := New()
+ gear.newConn = func(config Config) gearmanConn { return prepareMockOk() }
+ _ = gear.Check()
+ return gear
+ },
+ },
+ "after collect": {
+ prepare: func() *Gearman {
+ gear := New()
+ gear.newConn = func(config Config) gearmanConn { return prepareMockOk() }
+ _ = gear.Collect()
+ return gear
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ gear := test.prepare()
+
+ assert.NotPanics(t, gear.Cleanup)
+ })
+ }
+}
+
+func TestGearman_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestGearman_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockGearmanConn
+ wantFail bool
+ }{
+ "success case": {
+ wantFail: false,
+ prepareMock: prepareMockOk,
+ },
+ "err on connect": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnConnect,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response": {
+ wantFail: false,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ gear := New()
+ mock := test.prepareMock()
+ gear.newConn = func(config Config) gearmanConn { return mock }
+
+ if test.wantFail {
+ assert.Error(t, gear.Check())
+ } else {
+ assert.NoError(t, gear.Check())
+ }
+ })
+ }
+}
+
+func TestGearman_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockGearmanConn
+ wantMetrics map[string]int64
+ wantCharts int
+ disconnectBeforeCleanup bool
+ disconnectAfterCleanup bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOk,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ wantCharts: len(summaryCharts) + len(functionStatusChartsTmpl)*4 + len(functionPriorityStatusChartsTmpl)*4,
+ wantMetrics: map[string]int64{
+ "function_generic_worker1_high_priority_jobs": 10,
+ "function_generic_worker1_jobs_queued": 4,
+ "function_generic_worker1_jobs_running": 3,
+ "function_generic_worker1_jobs_waiting": 1,
+ "function_generic_worker1_low_priority_jobs": 12,
+ "function_generic_worker1_normal_priority_jobs": 11,
+ "function_generic_worker1_workers_available": 500,
+ "function_generic_worker2_high_priority_jobs": 4,
+ "function_generic_worker2_jobs_queued": 78,
+ "function_generic_worker2_jobs_running": 78,
+ "function_generic_worker2_jobs_waiting": 0,
+ "function_generic_worker2_low_priority_jobs": 6,
+ "function_generic_worker2_normal_priority_jobs": 5,
+ "function_generic_worker2_workers_available": 500,
+ "function_generic_worker3_high_priority_jobs": 7,
+ "function_generic_worker3_jobs_queued": 2,
+ "function_generic_worker3_jobs_running": 1,
+ "function_generic_worker3_jobs_waiting": 1,
+ "function_generic_worker3_low_priority_jobs": 9,
+ "function_generic_worker3_normal_priority_jobs": 8,
+ "function_generic_worker3_workers_available": 760,
+ "function_prefix_generic_worker4_high_priority_jobs": 1,
+ "function_prefix_generic_worker4_jobs_queued": 78,
+ "function_prefix_generic_worker4_jobs_running": 78,
+ "function_prefix_generic_worker4_jobs_waiting": 0,
+ "function_prefix_generic_worker4_low_priority_jobs": 3,
+ "function_prefix_generic_worker4_normal_priority_jobs": 2,
+ "function_prefix_generic_worker4_workers_available": 500,
+ "total_high_priority_jobs": 22,
+ "total_jobs_queued": 162,
+ "total_jobs_running": 160,
+ "total_jobs_waiting": 2,
+ "total_low_priority_jobs": 30,
+ "total_normal_priority_jobs": 26,
+ "total_workers_avail": 0,
+ "total_workers_available": 2260,
+ },
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ wantCharts: len(summaryCharts),
+ wantMetrics: map[string]int64{
+ "total_high_priority_jobs": 0,
+ "total_jobs_queued": 0,
+ "total_jobs_running": 0,
+ "total_jobs_waiting": 0,
+ "total_low_priority_jobs": 0,
+ "total_normal_priority_jobs": 0,
+ "total_workers_avail": 0,
+ },
+ },
+ "err on connect": {
+ prepareMock: prepareMockErrOnConnect,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: false,
+ },
+ "err on query status": {
+ prepareMock: prepareMockErrOnQueryStatus,
+ disconnectBeforeCleanup: true,
+ disconnectAfterCleanup: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ gear := New()
+ mock := test.prepareMock()
+ gear.newConn = func(config Config) gearmanConn { return mock }
+
+ mx := gear.Collect()
+
+ require.Equal(t, test.wantMetrics, mx, "want metrics")
+
+ if len(test.wantMetrics) > 0 {
+ module.TestMetricsHasAllChartsDims(t, gear.Charts(), mx)
+ assert.Equal(t, test.wantCharts, len(*gear.Charts()), "want charts")
+ }
+
+ assert.Equal(t, test.disconnectBeforeCleanup, mock.disconnectCalled, "disconnect before cleanup")
+ gear.Cleanup()
+ assert.Equal(t, test.disconnectAfterCleanup, mock.disconnectCalled, "disconnect after cleanup")
+ })
+ }
+}
+
+func prepareMockOk() *mockGearmanConn {
+ return &mockGearmanConn{
+ responseStatus: dataStatus,
+ responsePriorityStatus: dataPriorityStatus,
+ }
+}
+
+func prepareMockErrOnConnect() *mockGearmanConn {
+ return &mockGearmanConn{
+ errOnConnect: true,
+ }
+}
+
+func prepareMockErrOnQueryStatus() *mockGearmanConn {
+ return &mockGearmanConn{
+ errOnQueryStatus: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockGearmanConn {
+ resp := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit.")
+ return &mockGearmanConn{
+ responseStatus: resp,
+ responsePriorityStatus: resp,
+ }
+}
+
+func prepareMockEmptyResponse() *mockGearmanConn {
+ return &mockGearmanConn{
+ responseStatus: []byte("."),
+ responsePriorityStatus: []byte("."),
+ }
+}
+
+type mockGearmanConn struct {
+ errOnConnect bool
+
+ responseStatus []byte
+ errOnQueryStatus bool
+
+ responsePriorityStatus []byte
+ errOnQueryPriorityStatus bool
+
+ disconnectCalled bool
+}
+
+func (m *mockGearmanConn) connect() error {
+ if m.errOnConnect {
+ return errors.New("mock.connect() error")
+ }
+ return nil
+}
+
+func (m *mockGearmanConn) disconnect() {
+ m.disconnectCalled = true
+}
+
+func (m *mockGearmanConn) queryStatus() ([]byte, error) {
+ if m.errOnQueryStatus {
+ return nil, errors.New("mock.queryStatus() error")
+ }
+ return m.responseStatus, nil
+}
+
+func (m *mockGearmanConn) queryPriorityStatus() ([]byte, error) {
+ if m.errOnQueryPriorityStatus {
+ return nil, errors.New("mock.queryPriorityStatus() error")
+ }
+ return m.responsePriorityStatus, nil
+}
diff --git a/src/go/plugin/go.d/modules/gearman/integrations/gearman.md b/src/go/plugin/go.d/modules/gearman/integrations/gearman.md
new file mode 100644
index 000000000..0a97a4cd4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/integrations/gearman.md
@@ -0,0 +1,235 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/gearman/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/gearman/metadata.yaml"
+sidebar_label: "Gearman"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Distributed Computing Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Gearman
+
+
+<img src="https://netdata.cloud/img/gearman.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: gearman
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitors jobs activity, priority and available workers. It collects summary and function-specific statistics.
+
+
+This collector connects to a Gearman instance via TCP socket and executes the following commands:
+
+- status
+- priority-status
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Gearman instances running on localhost that are listening on port 4730.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Gearman instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| gearman.queued_jobs_activity | running, waiting | jobs |
+| gearman.queued_jobs_priority | high, normal, low | jobs |
+
+### Per Gearman instance
+
+These metrics refer to the Function (task).
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| function_name | Function name. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| gearman.function_queued_jobs_activity | running, waiting | jobs |
+| gearman.function_queued_jobs_priority | high, normal, low | jobs |
+| gearman.function_workers | available | workers |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/gearman.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/gearman.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | The IP address and port where the Gearman service listens for connections. | 127.0.0.1:11211 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:4730
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:4730
+
+ - name: remote
+ address: 203.0.113.0:4730
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `gearman` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m gearman
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `gearman` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep gearman
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep gearman /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep gearman
+```
+
+
diff --git a/src/go/plugin/go.d/modules/gearman/metadata.yaml b/src/go/plugin/go.d/modules/gearman/metadata.yaml
new file mode 100644
index 000000000..2312c9a53
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/metadata.yaml
@@ -0,0 +1,152 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-gearman
+ plugin_name: go.d.plugin
+ module_name: gearman
+ monitored_instance:
+ name: Gearman
+ link: https://gearman.org/
+ categories:
+ - data-collection.distributed-computing-systems
+ icon_filename: "gearman.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - gearman
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Monitors jobs activity, priority and available workers. It collects summary and function-specific statistics.
+ method_description: |
+ This collector connects to a Gearman instance via TCP socket and executes the following commands:
+
+ - status
+ - priority-status
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Gearman instances running on localhost that are listening on port 4730.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/gearman.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: The IP address and port where the Gearman service listens for connections.
+ default_value: 127.0.0.1:11211
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:4730
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:4730
+
+ - name: remote
+ address: 203.0.113.0:4730
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: gearman.queued_jobs_activity
+ description: Jobs Activity
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: waiting
+ - name: gearman.queued_jobs_priority
+ description: Jobs Priority
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: high
+ - name: normal
+ - name: low
+ - name: global
+ description: "These metrics refer to the Function (task)."
+ labels:
+ - name: function_name
+ description: Function name.
+ metrics:
+ - name: gearman.function_queued_jobs_activity
+ description: Function Jobs Activity
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: waiting
+ - name: gearman.function_queued_jobs_priority
+ description: Function Jobs Priority
+ unit: "jobs"
+ chart_type: stacked
+ dimensions:
+ - name: high
+ - name: normal
+ - name: low
+ - name: gearman.function_workers
+ description: Function Workers
+ unit: "workers"
+ chart_type: line
+ dimensions:
+ - name: available
diff --git a/src/go/plugin/go.d/modules/gearman/testdata/config.json b/src/go/plugin/go.d/modules/gearman/testdata/config.json
new file mode 100644
index 000000000..e86834720
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/gearman/testdata/config.yaml b/src/go/plugin/go.d/modules/gearman/testdata/config.yaml
new file mode 100644
index 000000000..1b81d09eb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/gearman/testdata/priority-status.txt b/src/go/plugin/go.d/modules/gearman/testdata/priority-status.txt
new file mode 100644
index 000000000..3cb669d10
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/testdata/priority-status.txt
@@ -0,0 +1,5 @@
+prefix generic_worker4 1 2 3 500
+generic_worker2 4 5 6 500
+generic_worker3 7 8 9 760
+generic_worker1 10 11 12 500
+.
diff --git a/src/go/plugin/go.d/modules/gearman/testdata/status.txt b/src/go/plugin/go.d/modules/gearman/testdata/status.txt
new file mode 100644
index 000000000..33d77ab83
--- /dev/null
+++ b/src/go/plugin/go.d/modules/gearman/testdata/status.txt
@@ -0,0 +1,5 @@
+prefix generic_worker4 78 78 500
+generic_worker2 78 78 500
+generic_worker3 2 1 760
+generic_worker1 4 3 500
+.
diff --git a/src/go/plugin/go.d/modules/geth/README.md b/src/go/plugin/go.d/modules/geth/README.md
new file mode 120000
index 000000000..3a8eb0b68
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/README.md
@@ -0,0 +1 @@
+integrations/go-ethereum.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/geth/charts.go b/src/go/plugin/go.d/modules/geth/charts.go
new file mode 100644
index 000000000..5b87168a8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/charts.go
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package geth
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ Charts = module.Charts
+ Chart = module.Chart
+ Dims = module.Dims
+ Dim = module.Dim
+)
+
+var charts = Charts{
+ chartAncientChainData.Copy(),
+ chartChaindataDisk.Copy(),
+ chartAncientChainDataRate.Copy(),
+ chartChaindataDiskRate.Copy(),
+ chartChainDataSize.Copy(),
+ chartChainHead.Copy(),
+ chartP2PNetwork.Copy(),
+ chartNumberOfPeers.Copy(),
+ chartp2pDialsServes.Copy(),
+ chartReorgs.Copy(),
+ chartReorgsBlocks.Copy(),
+ chartGoRoutines.Copy(),
+ chartTxPoolCurrent.Copy(),
+ chartTxPoolQueued.Copy(),
+ chartTxPoolPending.Copy(),
+ chartRpcInformation.Copy(),
+}
+
+var (
+ chartAncientChainDataRate = Chart{
+ ID: "chaindata_ancient_rate",
+ Title: "Ancient Chaindata rate",
+ Units: "bytes/s",
+ Fam: "chaindata",
+ Ctx: "geth.eth_db_chaindata_ancient_io_rate",
+ Dims: Dims{
+ {ID: ethDbChainDataAncientRead, Name: "reads", Algo: "incremental"},
+ {ID: ethDbChainDataAncientWrite, Name: "writes", Mul: -1, Algo: "incremental"},
+ },
+ }
+
+ chartAncientChainData = Chart{
+ ID: "chaindata_ancient",
+ Title: "Session ancient Chaindata",
+ Units: "bytes",
+ Fam: "chaindata",
+ Ctx: "geth.eth_db_chaindata_ancient_io",
+ Dims: Dims{
+ {ID: ethDbChainDataAncientRead, Name: "reads"},
+ {ID: ethDbChainDataAncientWrite, Name: "writes", Mul: -1},
+ },
+ }
+ chartChaindataDisk = Chart{
+ ID: "chaindata_disk",
+ Title: "Session chaindata on disk",
+ Units: "bytes",
+ Fam: "chaindata",
+ Ctx: "geth.eth_db_chaindata_disk_io",
+ Dims: Dims{
+ {ID: ethDbChaindataDiskRead, Name: "reads"},
+ {ID: ethDbChainDataDiskWrite, Name: "writes", Mul: -1},
+ },
+ }
+ chartGoRoutines = Chart{
+ ID: "goroutines",
+ Title: "Number of goroutines",
+ Units: "goroutines",
+ Fam: "goroutines",
+ Ctx: "geth.goroutines",
+ Dims: Dims{
+ {ID: goRoutines, Name: "goroutines"},
+ },
+ }
+ chartChaindataDiskRate = Chart{
+ ID: "chaindata_disk_date",
+ Title: "On disk Chaindata rate",
+ Units: "bytes/s",
+ Fam: "chaindata",
+ Ctx: "geth.eth_db_chaindata_disk_io_rate",
+ Dims: Dims{
+ {ID: ethDbChaindataDiskRead, Name: "reads", Algo: "incremental"},
+ {ID: ethDbChainDataDiskWrite, Name: "writes", Mul: -1, Algo: "incremental"},
+ },
+ }
+ chartChainDataSize = Chart{
+ ID: "chaindata_db_size",
+ Title: "Chaindata Size",
+ Units: "bytes",
+ Fam: "chaindata",
+ Ctx: "geth.chaindata_db_size",
+ Dims: Dims{
+ {ID: ethDbChainDataDiskSize, Name: "levelDB"},
+ {ID: ethDbChainDataAncientSize, Name: "ancientDB"},
+ },
+ }
+ chartChainHead = Chart{
+ ID: "chainhead_overall",
+ Title: "Chainhead",
+ Units: "block",
+ Fam: "chainhead",
+ Ctx: "geth.chainhead",
+ Dims: Dims{
+ {ID: chainHeadBlock, Name: "block"},
+ {ID: chainHeadReceipt, Name: "receipt"},
+ {ID: chainHeadHeader, Name: "header"},
+ },
+ }
+ chartTxPoolPending = Chart{
+ ID: "txpoolpending",
+ Title: "Pending Transaction Pool",
+ Units: "transactions",
+ Fam: "tx_pool",
+ Ctx: "geth.tx_pool_pending",
+ Dims: Dims{
+ {ID: txPoolInvalid, Name: "invalid"},
+ {ID: txPoolPending, Name: "pending"},
+ {ID: txPoolLocal, Name: "local"},
+ {ID: txPoolPendingDiscard, Name: " discard"},
+ {ID: txPoolNofunds, Name: "no funds"},
+ {ID: txPoolPendingRatelimit, Name: "ratelimit"},
+ {ID: txPoolPendingReplace, Name: "replace"},
+ },
+ }
+ chartTxPoolCurrent = Chart{
+ ID: "txpoolcurrent",
+ Title: "Transaction Pool",
+ Units: "transactions",
+ Fam: "tx_pool",
+ Ctx: "geth.tx_pool_current",
+ Dims: Dims{
+ {ID: txPoolInvalid, Name: "invalid"},
+ {ID: txPoolPending, Name: "pending"},
+ {ID: txPoolLocal, Name: "local"},
+ {ID: txPoolNofunds, Name: "pool"},
+ },
+ }
+ chartTxPoolQueued = Chart{
+ ID: "txpoolqueued",
+ Title: "Queued Transaction Pool",
+ Units: "transactions",
+ Fam: "tx_pool",
+ Ctx: "geth.tx_pool_queued",
+ Dims: Dims{
+ {ID: txPoolQueuedDiscard, Name: "discard"},
+ {ID: txPoolQueuedEviction, Name: "eviction"},
+ {ID: txPoolQueuedNofunds, Name: "no_funds"},
+ {ID: txPoolQueuedRatelimit, Name: "ratelimit"},
+ },
+ }
+ chartP2PNetwork = Chart{
+ ID: "p2p_network",
+ Title: "P2P bandwidth",
+ Units: "bytes/s",
+ Fam: "p2p_bandwidth",
+ Ctx: "geth.p2p_bandwidth",
+ Dims: Dims{
+ {ID: p2pIngress, Name: "ingress", Algo: "incremental"},
+ {ID: p2pEgress, Name: "egress", Mul: -1, Algo: "incremental"},
+ },
+ }
+ chartReorgs = Chart{
+ ID: "reorgs_executed",
+ Title: "Executed Reorgs",
+ Units: "reorgs",
+ Fam: "reorgs",
+ Ctx: "geth.reorgs",
+ Dims: Dims{
+ {ID: reorgsExecuted, Name: "executed"},
+ },
+ }
+ chartReorgsBlocks = Chart{
+ ID: "reorgs_blocks",
+ Title: "Blocks Added/Removed from Reorg",
+ Units: "blocks",
+ Fam: "reorgs",
+ Ctx: "geth.reorgs_blocks",
+ Dims: Dims{
+ {ID: reorgsAdd, Name: "added"},
+ {ID: reorgsDropped, Name: "dropped"},
+ },
+ }
+
+ chartNumberOfPeers = Chart{
+ ID: "p2p_peers_number",
+ Title: "Number of Peers",
+ Units: "peers",
+ Fam: "p2p_peers",
+ Ctx: "geth.p2p_peers",
+ Dims: Dims{
+ {ID: p2pPeers, Name: "peers"},
+ },
+ }
+
+ chartp2pDialsServes = Chart{
+ ID: "p2p_dials_serves",
+ Title: "P2P Serves and Dials",
+ Units: "calls/s",
+ Fam: "p2p_peers",
+ Ctx: "geth.p2p_peers_calls",
+ Dims: Dims{
+ {ID: p2pDials, Name: "dials", Algo: "incremental"},
+ {ID: p2pServes, Name: "serves", Algo: "incremental"},
+ },
+ }
+ chartRpcInformation = Chart{
+ ID: "rpc_calls",
+ Title: "rpc calls",
+ Units: "calls/s",
+ Fam: "rpc",
+ Ctx: "geth.rpc_calls",
+ Dims: Dims{
+ {ID: rpcFailure, Name: "failed", Algo: "incremental"},
+ {ID: rpcSuccess, Name: "successful", Algo: "incremental"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/geth/collect.go b/src/go/plugin/go.d/modules/geth/collect.go
new file mode 100644
index 000000000..bd7b1d5b3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/collect.go
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package geth
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func (g *Geth) collect() (map[string]int64, error) {
+ pms, err := g.prom.ScrapeSeries()
+ if err != nil {
+ return nil, err
+ }
+ mx := g.collectGeth(pms)
+
+ return stm.ToMap(mx), nil
+}
+
+func (g *Geth) collectGeth(pms prometheus.Series) map[string]float64 {
+ mx := make(map[string]float64)
+ g.collectChainData(mx, pms)
+ g.collectP2P(mx, pms)
+ g.collectTxPool(mx, pms)
+ g.collectRpc(mx, pms)
+ return mx
+}
+
+func (g *Geth) collectChainData(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ chainValidation,
+ chainWrite,
+ ethDbChainDataAncientRead,
+ ethDbChainDataAncientWrite,
+ ethDbChaindataDiskRead,
+ ethDbChainDataDiskWrite,
+ chainHeadBlock,
+ chainHeadHeader,
+ chainHeadReceipt,
+ ethDbChainDataAncientSize,
+ ethDbChainDataDiskSize,
+ reorgsAdd,
+ reorgsDropped,
+ reorgsExecuted,
+ goRoutines,
+ )
+ g.collectEth(mx, pms)
+
+}
+
+func (g *Geth) collectRpc(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ rpcRequests,
+ rpcSuccess,
+ rpcFailure,
+ )
+ g.collectEth(mx, pms)
+}
+
+func (g *Geth) collectTxPool(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ txPoolInvalid,
+ txPoolPending,
+ txPoolLocal,
+ txPoolPendingDiscard,
+ txPoolNofunds,
+ txPoolPendingRatelimit,
+ txPoolPendingReplace,
+ txPoolQueuedDiscard,
+ txPoolQueuedEviction,
+ txPoolQueuedEviction,
+ txPoolQueuedRatelimit,
+ )
+ g.collectEth(mx, pms)
+}
+
+func (g *Geth) collectP2P(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ p2pDials,
+ p2pEgress,
+ p2pIngress,
+ p2pPeers,
+ p2pServes,
+ )
+ g.collectEth(mx, pms)
+}
+
+func (g *Geth) collectEth(mx map[string]float64, pms prometheus.Series) {
+ for _, pm := range pms {
+ mx[pm.Name()] += pm.Value
+ }
+}
diff --git a/src/go/plugin/go.d/modules/geth/config_schema.json b/src/go/plugin/go.d/modules/geth/config_schema.json
new file mode 100644
index 000000000..00b3071d0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Geth collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Geth [Prometheus endpoint](https://geth.ethereum.org/docs/monitoring/metrics).",
+ "type": "string",
+ "default": "http://127.0.0.1:6060/debug/metrics/prometheus",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/geth/geth.go b/src/go/plugin/go.d/modules/geth/geth.go
new file mode 100644
index 000000000..6448965f5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/geth.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package geth
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("geth", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Geth {
+ return &Geth{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:6060/debug/metrics/prometheus",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
+
+type Geth struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ prom prometheus.Prometheus
+}
+
+func (g *Geth) Configuration() any {
+ return g.Config
+}
+
+func (g *Geth) Init() error {
+ if err := g.validateConfig(); err != nil {
+ g.Errorf("error on validating config: %g", err)
+ return err
+ }
+
+ prom, err := g.initPrometheusClient()
+ if err != nil {
+ g.Error(err)
+ return err
+ }
+ g.prom = prom
+
+ return nil
+}
+
+func (g *Geth) Check() error {
+ mx, err := g.collect()
+ if err != nil {
+ g.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (g *Geth) Charts() *Charts {
+ return g.charts
+}
+
+func (g *Geth) Collect() map[string]int64 {
+ mx, err := g.collect()
+ if err != nil {
+ g.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (g *Geth) Cleanup() {
+ if g.prom != nil && g.prom.HTTPClient() != nil {
+ g.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/geth/geth_test.go b/src/go/plugin/go.d/modules/geth/geth_test.go
new file mode 100644
index 000000000..c68701c14
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/geth_test.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package geth
+
+import (
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestGeth_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Geth{}, dataConfigJSON, dataConfigYAML)
+}
diff --git a/src/go/plugin/go.d/modules/geth/init.go b/src/go/plugin/go.d/modules/geth/init.go
new file mode 100644
index 000000000..da908560e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/init.go
@@ -0,0 +1,24 @@
+package geth
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (g *Geth) validateConfig() error {
+ if g.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (g *Geth) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(g.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(client, g.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/geth/integrations/go-ethereum.md b/src/go/plugin/go.d/modules/geth/integrations/go-ethereum.md
new file mode 100644
index 000000000..86f830529
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/integrations/go-ethereum.md
@@ -0,0 +1,252 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/geth/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/geth/metadata.yaml"
+sidebar_label: "Go-ethereum"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Blockchain Servers"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Go-ethereum
+
+
+<img src="https://netdata.cloud/img/geth.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: geth
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Go-ethereum instances.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Go-ethereum instances running on localhost.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Go-ethereum instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| geth.eth_db_chaindata_ancient_io_rate | reads, writes | bytes/s |
+| geth.eth_db_chaindata_ancient_io | reads, writes | bytes |
+| geth.eth_db_chaindata_disk_io | reads, writes | bytes |
+| geth.goroutines | goroutines | goroutines |
+| geth.eth_db_chaindata_disk_io_rate | reads, writes | bytes/s |
+| geth.chaindata_db_size | level_db, ancient_db | bytes |
+| geth.chainhead | block, receipt, header | block |
+| geth.tx_pool_pending | invalid, pending, local, discard, no_funds, ratelimit, replace | transactions |
+| geth.tx_pool_current | invalid, pending, local, pool | transactions |
+| geth.tx_pool_queued | discard, eviction, no_funds, ratelimit | transactions |
+| geth.p2p_bandwidth | ingress, egress | bytes/s |
+| geth.reorgs | executed | reorgs |
+| geth.reorgs_blocks | added, dropped | blocks |
+| geth.p2p_peers | peers | peers |
+| geth.p2p_peers_calls | dials, serves | calls/s |
+| geth.rpc_calls | failed, successful | calls/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/geth.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/geth.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:6060/debug/metrics/prometheus | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:6060/debug/metrics/prometheus
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:6060/debug/metrics/prometheus
+ username: username
+ password: password
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:6060/debug/metrics/prometheus
+
+ - name: remote
+ url: http://192.0.2.1:6060/debug/metrics/prometheus
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `geth` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m geth
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `geth` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep geth
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep geth /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep geth
+```
+
+
diff --git a/src/go/plugin/go.d/modules/geth/metadata.yaml b/src/go/plugin/go.d/modules/geth/metadata.yaml
new file mode 100644
index 000000000..ef131776a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/metadata.yaml
@@ -0,0 +1,291 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-geth
+ plugin_name: go.d.plugin
+ module_name: geth
+ monitored_instance:
+ name: Go-ethereum
+ link: https://github.com/ethereum/go-ethereum
+ icon_filename: geth.png
+ categories:
+ - data-collection.blockchain-servers
+ keywords:
+ - geth
+ - ethereum
+ - blockchain
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Go-ethereum instances.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Go-ethereum instances running on localhost.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/geth.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:6060/debug/metrics/prometheus
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:6060/debug/metrics/prometheus
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:6060/debug/metrics/prometheus
+ username: username
+ password: password
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:6060/debug/metrics/prometheus
+
+ - name: remote
+ url: http://192.0.2.1:6060/debug/metrics/prometheus
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: geth.eth_db_chaindata_ancient_io_rate
+ description: Ancient Chaindata rate
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: geth.eth_db_chaindata_ancient_io
+ description: Session ancient Chaindata
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: geth.eth_db_chaindata_disk_io
+ description: Session chaindata on disk
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: geth.goroutines
+ description: Number of goroutines
+ unit: goroutines
+ chart_type: line
+ dimensions:
+ - name: goroutines
+ - name: geth.eth_db_chaindata_disk_io_rate
+ description: On disk Chaindata rate
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: geth.chaindata_db_size
+ description: Chaindata Size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: level_db
+ - name: ancient_db
+ - name: geth.chainhead
+ description: Chainhead
+ unit: block
+ chart_type: line
+ dimensions:
+ - name: block
+ - name: receipt
+ - name: header
+ - name: geth.tx_pool_pending
+ description: Pending Transaction Pool
+ unit: transactions
+ chart_type: line
+ dimensions:
+ - name: invalid
+ - name: pending
+ - name: local
+ - name: discard
+ - name: no_funds
+ - name: ratelimit
+ - name: replace
+ - name: geth.tx_pool_current
+ description: Transaction Pool
+ unit: transactions
+ chart_type: line
+ dimensions:
+ - name: invalid
+ - name: pending
+ - name: local
+ - name: pool
+ - name: geth.tx_pool_queued
+ description: Queued Transaction Pool
+ unit: transactions
+ chart_type: line
+ dimensions:
+ - name: discard
+ - name: eviction
+ - name: no_funds
+ - name: ratelimit
+ - name: geth.p2p_bandwidth
+ description: P2P bandwidth
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: ingress
+ - name: egress
+ - name: geth.reorgs
+ description: Executed Reorgs
+ unit: reorgs
+ chart_type: line
+ dimensions:
+ - name: executed
+ - name: geth.reorgs_blocks
+ description: Blocks Added/Removed from Reorg
+ unit: blocks
+ chart_type: line
+ dimensions:
+ - name: added
+ - name: dropped
+ - name: geth.p2p_peers
+ description: Number of Peers
+ unit: peers
+ chart_type: line
+ dimensions:
+ - name: peers
+ - name: geth.p2p_peers_calls
+ description: P2P Serves and Dials
+ unit: calls/s
+ chart_type: line
+ dimensions:
+ - name: dials
+ - name: serves
+ - name: geth.rpc_calls
+ description: rpc calls
+ unit: calls/s
+ chart_type: line
+ dimensions:
+ - name: failed
+ - name: successful
diff --git a/src/go/plugin/go.d/modules/geth/metrics.go b/src/go/plugin/go.d/modules/geth/metrics.go
new file mode 100644
index 000000000..642973d69
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/metrics.go
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package geth
+
+// summary
+const (
+ chainValidation = "chain_validation"
+ chainWrite = "chain_write"
+ chainHeadBlock = "chain_head_block"
+ chainHeadHeader = "chain_head_header"
+ chainHeadReceipt = "chain_head_receipt"
+)
+
+// + rate
+const (
+ ethDbChainDataAncientRead = "eth_db_chaindata_ancient_read"
+ ethDbChainDataAncientWrite = "eth_db_chaindata_ancient_write"
+ ethDbChaindataDiskRead = "eth_db_chaindata_disk_read"
+ ethDbChainDataDiskWrite = "eth_db_chaindata_disk_write"
+ ethDbChainDataDiskSize = "eth_db_chaindata_disk_size"
+ ethDbChainDataAncientSize = "eth_db_chaindata_ancient_size"
+
+ txPoolInvalid = "txpool_invalid"
+ txPoolPending = "txpool_pending"
+ txPoolLocal = "txpool_local"
+ txPoolPendingDiscard = "txpool_pending_discard"
+ txPoolNofunds = "txpool_pending_nofunds"
+ txPoolPendingRatelimit = "txpool_pending_ratelimit"
+ txPoolPendingReplace = "txpool_pending_replace"
+ txPoolQueuedDiscard = "txpool_queued_discard"
+ txPoolQueuedEviction = "txpool_queued_eviction"
+ txPoolQueuedNofunds = "txpool_queued_nofunds"
+ txPoolQueuedRatelimit = "txpool_queued_ratelimit"
+)
+
+const (
+ // gauge
+ p2pEgress = "p2p_egress"
+ p2pIngress = "p2p_ingress"
+
+ p2pPeers = "p2p_peers"
+ p2pServes = "p2p_serves"
+ p2pDials = "p2p_dials"
+
+ rpcRequests = "rpc_requests"
+ rpcSuccess = "rpc_success"
+ rpcFailure = "rpc_failure"
+
+ reorgsAdd = "chain_reorg_add"
+ reorgsExecuted = "chain_reorg_executes"
+ reorgsDropped = "chain_reorg_drop"
+
+ goRoutines = "system_cpu_goroutines"
+)
diff --git a/src/go/plugin/go.d/modules/geth/testdata/config.json b/src/go/plugin/go.d/modules/geth/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/geth/testdata/config.yaml b/src/go/plugin/go.d/modules/geth/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/geth/testdata/metrics_geth.txt b/src/go/plugin/go.d/modules/geth/testdata/metrics_geth.txt
new file mode 100644
index 000000000..055fea893
--- /dev/null
+++ b/src/go/plugin/go.d/modules/geth/testdata/metrics_geth.txt
@@ -0,0 +1,1569 @@
+# TYPE chain_account_commits_count counter
+chain_account_commits_count 0
+
+# TYPE chain_account_commits summary IMP
+chain_account_commits {quantile="0.5"} 0
+chain_account_commits {quantile="0.75"} 0
+chain_account_commits {quantile="0.95"} 0
+chain_account_commits {quantile="0.99"} 0
+chain_account_commits {quantile="0.999"} 0
+chain_account_commits {quantile="0.9999"} 0
+
+# TYPE chain_account_hashes_count counter
+chain_account_hashes_count 0
+
+# TYPE chain_account_hashes summary
+chain_account_hashes {quantile="0.5"} 0
+chain_account_hashes {quantile="0.75"} 0
+chain_account_hashes {quantile="0.95"} 0
+chain_account_hashes {quantile="0.99"} 0
+chain_account_hashes {quantile="0.999"} 0
+chain_account_hashes {quantile="0.9999"} 0
+
+# TYPE chain_account_reads_count counter
+chain_account_reads_count 0
+
+# TYPE chain_account_reads summary IMP
+chain_account_reads {quantile="0.5"} 0
+chain_account_reads {quantile="0.75"} 0
+chain_account_reads {quantile="0.95"} 0
+chain_account_reads {quantile="0.99"} 0
+chain_account_reads {quantile="0.999"} 0
+chain_account_reads {quantile="0.9999"} 0
+
+# TYPE chain_account_updates_count counter
+chain_account_updates_count 0
+
+# TYPE chain_account_updates summary IMP
+chain_account_updates {quantile="0.5"} 0
+chain_account_updates {quantile="0.75"} 0
+chain_account_updates {quantile="0.95"} 0
+chain_account_updates {quantile="0.99"} 0
+chain_account_updates {quantile="0.999"} 0
+chain_account_updates {quantile="0.9999"} 0
+
+# TYPE chain_execution_count counter
+chain_execution_count 0
+
+# TYPE chain_execution summary IMP
+chain_execution {quantile="0.5"} 0
+chain_execution {quantile="0.75"} 0
+chain_execution {quantile="0.95"} 0
+chain_execution {quantile="0.99"} 0
+chain_execution {quantile="0.999"} 0
+chain_execution {quantile="0.9999"} 0
+#---
+# TYPE chain_head_block gauge IMP
+chain_head_block 0
+
+# TYPE chain_head_header gauge IMP
+chain_head_header 24576
+
+# TYPE chain_head_receipt gauge IMP
+chain_head_receipt 24576
+#---
+# TYPE chain_inserts_count counter
+chain_inserts_count 0
+
+# TYPE chain_inserts summary
+chain_inserts {quantile="0.5"} 0
+chain_inserts {quantile="0.75"} 0
+chain_inserts {quantile="0.95"} 0
+chain_inserts {quantile="0.99"} 0
+chain_inserts {quantile="0.999"} 0
+chain_inserts {quantile="0.9999"} 0
+
+# TYPE chain_prefetch_executes_count counter
+chain_prefetch_executes_count 0
+
+# TYPE chain_prefetch_executes summary
+chain_prefetch_executes {quantile="0.5"} 0
+chain_prefetch_executes {quantile="0.75"} 0
+chain_prefetch_executes {quantile="0.95"} 0
+chain_prefetch_executes {quantile="0.99"} 0
+chain_prefetch_executes {quantile="0.999"} 0
+chain_prefetch_executes {quantile="0.9999"} 0
+
+# TYPE chain_prefetch_interrupts gauge
+chain_prefetch_interrupts 0
+
+# TYPE chain_reorg_add gauge
+chain_reorg_add 0
+
+# TYPE chain_reorg_drop gauge
+chain_reorg_drop 0
+
+# TYPE chain_reorg_executes gauge
+chain_reorg_executes 0
+
+# TYPE chain_reorg_invalidTx gauge
+chain_reorg_invalidTx 0
+
+# TYPE chain_snapshot_account_reads_count counter
+chain_snapshot_account_reads_count 0
+
+# TYPE chain_snapshot_account_reads summary
+chain_snapshot_account_reads {quantile="0.5"} 0
+chain_snapshot_account_reads {quantile="0.75"} 0
+chain_snapshot_account_reads {quantile="0.95"} 0
+chain_snapshot_account_reads {quantile="0.99"} 0
+chain_snapshot_account_reads {quantile="0.999"} 0
+chain_snapshot_account_reads {quantile="0.9999"} 0
+
+# TYPE chain_snapshot_commits_count counter
+chain_snapshot_commits_count 0
+
+# TYPE chain_snapshot_commits summary
+chain_snapshot_commits {quantile="0.5"} 0
+chain_snapshot_commits {quantile="0.75"} 0
+chain_snapshot_commits {quantile="0.95"} 0
+chain_snapshot_commits {quantile="0.99"} 0
+chain_snapshot_commits {quantile="0.999"} 0
+chain_snapshot_commits {quantile="0.9999"} 0
+
+# TYPE chain_snapshot_storage_reads_count counter
+chain_snapshot_storage_reads_count 0
+
+# TYPE chain_snapshot_storage_reads summary IMP
+chain_snapshot_storage_reads {quantile="0.5"} 0
+chain_snapshot_storage_reads {quantile="0.75"} 0
+chain_snapshot_storage_reads {quantile="0.95"} 0
+chain_snapshot_storage_reads {quantile="0.99"} 0
+chain_snapshot_storage_reads {quantile="0.999"} 0
+chain_snapshot_storage_reads {quantile="0.9999"} 0
+
+# TYPE chain_storage_commits_count counter
+chain_storage_commits_count 0
+
+# TYPE chain_storage_commits summary IMP
+chain_storage_commits {quantile="0.5"} 0
+chain_storage_commits {quantile="0.75"} 0
+chain_storage_commits {quantile="0.95"} 0
+chain_storage_commits {quantile="0.99"} 0
+chain_storage_commits {quantile="0.999"} 0
+chain_storage_commits {quantile="0.9999"} 0
+
+# TYPE chain_storage_hashes_count counter
+chain_storage_hashes_count 0
+
+# TYPE chain_storage_hashes summary IMP
+chain_storage_hashes {quantile="0.5"} 0
+chain_storage_hashes {quantile="0.75"} 0
+chain_storage_hashes {quantile="0.95"} 0
+chain_storage_hashes {quantile="0.99"} 0
+chain_storage_hashes {quantile="0.999"} 0
+chain_storage_hashes {quantile="0.9999"} 0
+
+# TYPE chain_storage_reads_count counter
+chain_storage_reads_count 0
+
+# TYPE chain_storage_reads summary
+chain_storage_reads {quantile="0.5"} 0
+chain_storage_reads {quantile="0.75"} 0
+chain_storage_reads {quantile="0.95"} 0
+chain_storage_reads {quantile="0.99"} 0
+chain_storage_reads {quantile="0.999"} 0
+chain_storage_reads {quantile="0.9999"} 0
+
+# TYPE chain_storage_updates_count counter
+chain_storage_updates_count 0
+
+# TYPE chain_storage_updates summary IMP
+chain_storage_updates {quantile="0.5"} 0
+chain_storage_updates {quantile="0.75"} 0
+chain_storage_updates {quantile="0.95"} 0
+chain_storage_updates {quantile="0.99"} 0
+chain_storage_updates {quantile="0.999"} 0
+chain_storage_updates {quantile="0.9999"} 0
+
+# TYPE chain_validation_count counter
+chain_validation_count 0
+
+# TYPE chain_validation summary IMP
+chain_validation {quantile="0.5"} 0
+chain_validation {quantile="0.75"} 0
+chain_validation {quantile="0.95"} 0
+chain_validation {quantile="0.99"} 0
+chain_validation {quantile="0.999"} 0
+chain_validation {quantile="0.9999"} 0
+
+# TYPE chain_write_count counter
+chain_write_count 0
+
+# TYPE chain_write summary IMP
+chain_write {quantile="0.5"} 0
+chain_write {quantile="0.75"} 0
+chain_write {quantile="0.95"} 0
+chain_write {quantile="0.99"} 0
+chain_write {quantile="0.999"} 0
+chain_write {quantile="0.9999"} 0
+
+# TYPE db_preimage_hits gauge
+db_preimage_hits 8893
+
+# TYPE db_preimage_total gauge
+db_preimage_total 8893
+
+# TYPE eth_db_chaindata_ancient_read gauge IMP + rate
+eth_db_chaindata_ancient_read 954
+
+# TYPE eth_db_chaindata_ancient_size gauge
+eth_db_chaindata_ancient_size 9901428
+
+# TYPE eth_db_chaindata_ancient_write gauge IMP + rate
+eth_db_chaindata_ancient_write 7256150
+
+# TYPE eth_db_chaindata_compact_input gauge
+eth_db_chaindata_compact_input 0
+
+# TYPE eth_db_chaindata_compact_level0 gauge
+eth_db_chaindata_compact_level0 0
+
+# TYPE eth_db_chaindata_compact_memory gauge
+eth_db_chaindata_compact_memory 0
+
+# TYPE eth_db_chaindata_compact_nonlevel0 gauge
+eth_db_chaindata_compact_nonlevel0 0
+
+# TYPE eth_db_chaindata_compact_output gauge
+eth_db_chaindata_compact_output 0
+
+# TYPE eth_db_chaindata_compact_seek gauge
+eth_db_chaindata_compact_seek 0
+
+# TYPE eth_db_chaindata_compact_time gauge
+eth_db_chaindata_compact_time 0
+
+# TYPE eth_db_chaindata_compact_writedelay_counter gauge
+eth_db_chaindata_compact_writedelay_counter 0
+
+# TYPE eth_db_chaindata_compact_writedelay_duration gauge
+eth_db_chaindata_compact_writedelay_duration 0
+
+# TYPE eth_db_chaindata_disk_read gauge IMP + rate
+eth_db_chaindata_disk_read 0
+
+# TYPE eth_db_chaindata_disk_size gauge
+eth_db_chaindata_disk_size 0
+
+# TYPE eth_db_chaindata_disk_write gauge IMP + rate
+eth_db_chaindata_disk_write 10028946
+
+# TYPE eth_downloader_bodies_drop gauge
+eth_downloader_bodies_drop 0
+
+# TYPE eth_downloader_bodies_in gauge
+eth_downloader_bodies_in 2061
+
+# TYPE eth_downloader_bodies_req_count counter
+eth_downloader_bodies_req_count 100
+
+# TYPE eth_downloader_bodies_req summary
+eth_downloader_bodies_req {quantile="0.5"} 1.73698035e+07
+eth_downloader_bodies_req {quantile="0.75"} 2.534998e+07
+eth_downloader_bodies_req {quantile="0.95"} 2.806964048999994e+08
+eth_downloader_bodies_req {quantile="0.99"} 7.47070292879998e+08
+eth_downloader_bodies_req {quantile="0.999"} 7.51141436e+08
+eth_downloader_bodies_req {quantile="0.9999"} 7.51141436e+08
+
+# TYPE eth_downloader_bodies_timeout gauge
+eth_downloader_bodies_timeout 0
+
+# TYPE eth_downloader_headers_drop gauge
+eth_downloader_headers_drop 0
+
+# TYPE eth_downloader_headers_in gauge
+eth_downloader_headers_in 20133
+
+# TYPE eth_downloader_headers_req_count counter
+eth_downloader_headers_req_count 129
+
+# TYPE eth_downloader_headers_req summary
+eth_downloader_headers_req {quantile="0.5"} 4.0981132e+07
+eth_downloader_headers_req {quantile="0.75"} 4.5769116e+07
+eth_downloader_headers_req {quantile="0.95"} 2.53663427e+08
+eth_downloader_headers_req {quantile="0.99"} 6.901528164999979e+08
+eth_downloader_headers_req {quantile="0.999"} 7.45691875e+08
+eth_downloader_headers_req {quantile="0.9999"} 7.45691875e+08
+
+# TYPE eth_downloader_headers_timeout gauge
+eth_downloader_headers_timeout 0
+
+# TYPE eth_downloader_receipts_drop gauge
+eth_downloader_receipts_drop 0
+
+# TYPE eth_downloader_receipts_in gauge
+eth_downloader_receipts_in 0
+
+# TYPE eth_downloader_receipts_req_count counter
+eth_downloader_receipts_req_count 0
+
+# TYPE eth_downloader_receipts_req summary
+eth_downloader_receipts_req {quantile="0.5"} 0
+eth_downloader_receipts_req {quantile="0.75"} 0
+eth_downloader_receipts_req {quantile="0.95"} 0
+eth_downloader_receipts_req {quantile="0.99"} 0
+eth_downloader_receipts_req {quantile="0.999"} 0
+eth_downloader_receipts_req {quantile="0.9999"} 0
+
+# TYPE eth_downloader_receipts_timeout gauge
+eth_downloader_receipts_timeout 0
+
+# TYPE eth_downloader_states_drop gauge
+eth_downloader_states_drop 0
+
+# TYPE eth_downloader_states_in gauge
+eth_downloader_states_in 0
+
+# TYPE eth_downloader_throttle gauge
+eth_downloader_throttle 0
+
+# TYPE eth_fetcher_block_announces_dos gauge
+eth_fetcher_block_announces_dos 0
+
+# TYPE eth_fetcher_block_announces_drop gauge
+eth_fetcher_block_announces_drop 0
+
+# TYPE eth_fetcher_block_announces_in gauge
+eth_fetcher_block_announces_in 0
+
+# TYPE eth_fetcher_block_announces_out_count counter
+eth_fetcher_block_announces_out_count 0
+
+# TYPE eth_fetcher_block_announces_out summary
+eth_fetcher_block_announces_out {quantile="0.5"} 0
+eth_fetcher_block_announces_out {quantile="0.75"} 0
+eth_fetcher_block_announces_out {quantile="0.95"} 0
+eth_fetcher_block_announces_out {quantile="0.99"} 0
+eth_fetcher_block_announces_out {quantile="0.999"} 0
+eth_fetcher_block_announces_out {quantile="0.9999"} 0
+
+# TYPE eth_fetcher_block_bodies gauge
+eth_fetcher_block_bodies 0
+
+# TYPE eth_fetcher_block_broadcasts_dos gauge
+eth_fetcher_block_broadcasts_dos 0
+
+# TYPE eth_fetcher_block_broadcasts_drop gauge
+eth_fetcher_block_broadcasts_drop 0
+
+# TYPE eth_fetcher_block_broadcasts_in gauge
+eth_fetcher_block_broadcasts_in 0
+
+# TYPE eth_fetcher_block_broadcasts_out_count counter
+eth_fetcher_block_broadcasts_out_count 0
+
+# TYPE eth_fetcher_block_broadcasts_out summary
+eth_fetcher_block_broadcasts_out {quantile="0.5"} 0
+eth_fetcher_block_broadcasts_out {quantile="0.75"} 0
+eth_fetcher_block_broadcasts_out {quantile="0.95"} 0
+eth_fetcher_block_broadcasts_out {quantile="0.99"} 0
+eth_fetcher_block_broadcasts_out {quantile="0.999"} 0
+eth_fetcher_block_broadcasts_out {quantile="0.9999"} 0
+
+# TYPE eth_fetcher_block_filter_bodies_in gauge
+eth_fetcher_block_filter_bodies_in 2061
+
+# TYPE eth_fetcher_block_filter_bodies_out gauge
+eth_fetcher_block_filter_bodies_out 2061
+
+# TYPE eth_fetcher_block_filter_headers_in gauge
+eth_fetcher_block_filter_headers_in 23
+
+# TYPE eth_fetcher_block_filter_headers_out gauge
+eth_fetcher_block_filter_headers_out 23
+
+# TYPE eth_fetcher_block_headers gauge
+eth_fetcher_block_headers 0
+
+# TYPE eth_fetcher_transaction_announces_dos gauge
+eth_fetcher_transaction_announces_dos 0
+
+# TYPE eth_fetcher_transaction_announces_in gauge
+eth_fetcher_transaction_announces_in 0
+
+# TYPE eth_fetcher_transaction_announces_known gauge
+eth_fetcher_transaction_announces_known 0
+
+# TYPE eth_fetcher_transaction_announces_underpriced gauge
+eth_fetcher_transaction_announces_underpriced 0
+
+# TYPE eth_fetcher_transaction_broadcasts_in gauge
+eth_fetcher_transaction_broadcasts_in 0
+
+# TYPE eth_fetcher_transaction_broadcasts_known gauge
+eth_fetcher_transaction_broadcasts_known 0
+
+# TYPE eth_fetcher_transaction_broadcasts_otherreject gauge
+eth_fetcher_transaction_broadcasts_otherreject 0
+
+# TYPE eth_fetcher_transaction_broadcasts_underpriced gauge
+eth_fetcher_transaction_broadcasts_underpriced 0
+
+# TYPE eth_fetcher_transaction_fetching_hashes gauge
+eth_fetcher_transaction_fetching_hashes 0
+
+# TYPE eth_fetcher_transaction_fetching_peers gauge
+eth_fetcher_transaction_fetching_peers 0
+
+# TYPE eth_fetcher_transaction_queueing_hashes gauge
+eth_fetcher_transaction_queueing_hashes 0
+
+# TYPE eth_fetcher_transaction_queueing_peers gauge
+eth_fetcher_transaction_queueing_peers 0
+
+# TYPE eth_fetcher_transaction_replies_in gauge
+eth_fetcher_transaction_replies_in 0
+
+# TYPE eth_fetcher_transaction_replies_known gauge
+eth_fetcher_transaction_replies_known 0
+
+# TYPE eth_fetcher_transaction_replies_otherreject gauge
+eth_fetcher_transaction_replies_otherreject 0
+
+# TYPE eth_fetcher_transaction_replies_underpriced gauge
+eth_fetcher_transaction_replies_underpriced 0
+
+# TYPE eth_fetcher_transaction_request_done gauge
+eth_fetcher_transaction_request_done 0
+
+# TYPE eth_fetcher_transaction_request_fail gauge
+eth_fetcher_transaction_request_fail 0
+
+# TYPE eth_fetcher_transaction_request_out gauge
+eth_fetcher_transaction_request_out 0
+
+# TYPE eth_fetcher_transaction_request_timeout gauge
+eth_fetcher_transaction_request_timeout 0
+
+# TYPE eth_fetcher_transaction_waiting_hashes gauge
+eth_fetcher_transaction_waiting_hashes 0
+
+# TYPE eth_fetcher_transaction_waiting_peers gauge
+eth_fetcher_transaction_waiting_peers 0
+
+# TYPE les_client_req_rtt_count counter
+les_client_req_rtt_count 0
+
+# TYPE les_client_req_rtt summary
+les_client_req_rtt {quantile="0.5"} 0
+les_client_req_rtt {quantile="0.75"} 0
+les_client_req_rtt {quantile="0.95"} 0
+les_client_req_rtt {quantile="0.99"} 0
+les_client_req_rtt {quantile="0.999"} 0
+les_client_req_rtt {quantile="0.9999"} 0
+
+# TYPE les_client_req_sendDelay_count counter
+les_client_req_sendDelay_count 0
+
+# TYPE les_client_req_sendDelay summary
+les_client_req_sendDelay {quantile="0.5"} 0
+les_client_req_sendDelay {quantile="0.75"} 0
+les_client_req_sendDelay {quantile="0.95"} 0
+les_client_req_sendDelay {quantile="0.99"} 0
+les_client_req_sendDelay {quantile="0.999"} 0
+les_client_req_sendDelay {quantile="0.9999"} 0
+
+# TYPE les_client_serverPool_connected gauge
+les_client_serverPool_connected 0
+
+# TYPE les_client_serverPool_dialed gauge
+les_client_serverPool_dialed 0
+
+# TYPE les_client_serverPool_selectable gauge
+les_client_serverPool_selectable 0
+
+# TYPE les_client_serverPool_sessionValue gauge
+les_client_serverPool_sessionValue 0
+
+# TYPE les_client_serverPool_timeout gauge
+les_client_serverPool_timeout 0
+
+# TYPE les_client_serverPool_totalValue gauge
+les_client_serverPool_totalValue 0
+
+# TYPE les_connection_duration_count counter
+les_connection_duration_count 0
+
+# TYPE les_connection_duration summary
+les_connection_duration {quantile="0.5"} 0
+les_connection_duration {quantile="0.75"} 0
+les_connection_duration {quantile="0.95"} 0
+les_connection_duration {quantile="0.99"} 0
+les_connection_duration {quantile="0.999"} 0
+les_connection_duration {quantile="0.9999"} 0
+
+# TYPE les_connection_server gauge
+les_connection_server 0
+
+# TYPE les_misc_in_packets_body gauge
+les_misc_in_packets_body 0
+
+# TYPE les_misc_in_packets_code gauge
+les_misc_in_packets_code 0
+
+# TYPE les_misc_in_packets_header gauge
+les_misc_in_packets_header 0
+
+# TYPE les_misc_in_packets_helperTrie gauge
+les_misc_in_packets_helperTrie 0
+
+# TYPE les_misc_in_packets_proof gauge
+les_misc_in_packets_proof 0
+
+# TYPE les_misc_in_packets_receipt gauge
+les_misc_in_packets_receipt 0
+
+# TYPE les_misc_in_packets_total gauge
+les_misc_in_packets_total 0
+
+# TYPE les_misc_in_packets_txStatus gauge
+les_misc_in_packets_txStatus 0
+
+# TYPE les_misc_in_packets_txs gauge
+les_misc_in_packets_txs 0
+
+# TYPE les_misc_in_traffic_body gauge
+les_misc_in_traffic_body 0
+
+# TYPE les_misc_in_traffic_code gauge
+les_misc_in_traffic_code 0
+
+# TYPE les_misc_in_traffic_header gauge
+les_misc_in_traffic_header 0
+
+# TYPE les_misc_in_traffic_helperTrie gauge
+les_misc_in_traffic_helperTrie 0
+
+# TYPE les_misc_in_traffic_proof gauge
+les_misc_in_traffic_proof 0
+
+# TYPE les_misc_in_traffic_receipt gauge
+les_misc_in_traffic_receipt 0
+
+# TYPE les_misc_in_traffic_total gauge
+les_misc_in_traffic_total 0
+
+# TYPE les_misc_in_traffic_txStatus gauge
+les_misc_in_traffic_txStatus 0
+
+# TYPE les_misc_in_traffic_txs gauge
+les_misc_in_traffic_txs 0
+
+# TYPE les_misc_out_packets_body gauge
+les_misc_out_packets_body 0
+
+# TYPE les_misc_out_packets_code gauge
+les_misc_out_packets_code 0
+
+# TYPE les_misc_out_packets_header gauge
+les_misc_out_packets_header 0
+
+# TYPE les_misc_out_packets_helperTrie gauge
+les_misc_out_packets_helperTrie 0
+
+# TYPE les_misc_out_packets_proof gauge
+les_misc_out_packets_proof 0
+
+# TYPE les_misc_out_packets_receipt gauge
+les_misc_out_packets_receipt 0
+
+# TYPE les_misc_out_packets_total gauge
+les_misc_out_packets_total 0
+
+# TYPE les_misc_out_packets_txStatus gauge
+les_misc_out_packets_txStatus 0
+
+# TYPE les_misc_out_packets_txs gauge
+les_misc_out_packets_txs 0
+
+# TYPE les_misc_out_traffic_body gauge
+les_misc_out_traffic_body 0
+
+# TYPE les_misc_out_traffic_code gauge
+les_misc_out_traffic_code 0
+
+# TYPE les_misc_out_traffic_header gauge
+les_misc_out_traffic_header 0
+
+# TYPE les_misc_out_traffic_helperTrie gauge
+les_misc_out_traffic_helperTrie 0
+
+# TYPE les_misc_out_traffic_proof gauge
+les_misc_out_traffic_proof 0
+
+# TYPE les_misc_out_traffic_receipt gauge
+les_misc_out_traffic_receipt 0
+
+# TYPE les_misc_out_traffic_total gauge
+les_misc_out_traffic_total 0
+
+# TYPE les_misc_out_traffic_txStatus gauge
+les_misc_out_traffic_txStatus 0
+
+# TYPE les_misc_out_traffic_txs gauge
+les_misc_out_traffic_txs 0
+
+# TYPE les_misc_serve_body_count counter
+les_misc_serve_body_count 0
+
+# TYPE les_misc_serve_body summary
+les_misc_serve_body {quantile="0.5"} 0
+les_misc_serve_body {quantile="0.75"} 0
+les_misc_serve_body {quantile="0.95"} 0
+les_misc_serve_body {quantile="0.99"} 0
+les_misc_serve_body {quantile="0.999"} 0
+les_misc_serve_body {quantile="0.9999"} 0
+
+# TYPE les_misc_serve_code_count counter
+les_misc_serve_code_count 0
+
+# TYPE les_misc_serve_code summary
+les_misc_serve_code {quantile="0.5"} 0
+les_misc_serve_code {quantile="0.75"} 0
+les_misc_serve_code {quantile="0.95"} 0
+les_misc_serve_code {quantile="0.99"} 0
+les_misc_serve_code {quantile="0.999"} 0
+les_misc_serve_code {quantile="0.9999"} 0
+
+# TYPE les_misc_serve_header_count counter
+les_misc_serve_header_count 0
+
+# TYPE les_misc_serve_header summary
+les_misc_serve_header {quantile="0.5"} 0
+les_misc_serve_header {quantile="0.75"} 0
+les_misc_serve_header {quantile="0.95"} 0
+les_misc_serve_header {quantile="0.99"} 0
+les_misc_serve_header {quantile="0.999"} 0
+les_misc_serve_header {quantile="0.9999"} 0
+
+# TYPE les_misc_serve_helperTrie_count counter
+les_misc_serve_helperTrie_count 0
+
+# TYPE les_misc_serve_helperTrie summary
+les_misc_serve_helperTrie {quantile="0.5"} 0
+les_misc_serve_helperTrie {quantile="0.75"} 0
+les_misc_serve_helperTrie {quantile="0.95"} 0
+les_misc_serve_helperTrie {quantile="0.99"} 0
+les_misc_serve_helperTrie {quantile="0.999"} 0
+les_misc_serve_helperTrie {quantile="0.9999"} 0
+
+# TYPE les_misc_serve_proof_count counter
+les_misc_serve_proof_count 0
+
+# TYPE les_misc_serve_proof summary
+les_misc_serve_proof {quantile="0.5"} 0
+les_misc_serve_proof {quantile="0.75"} 0
+les_misc_serve_proof {quantile="0.95"} 0
+les_misc_serve_proof {quantile="0.99"} 0
+les_misc_serve_proof {quantile="0.999"} 0
+les_misc_serve_proof {quantile="0.9999"} 0
+
+# TYPE les_misc_serve_receipt_count counter
+les_misc_serve_receipt_count 0
+
+# TYPE les_misc_serve_receipt summary
+les_misc_serve_receipt {quantile="0.5"} 0
+les_misc_serve_receipt {quantile="0.75"} 0
+les_misc_serve_receipt {quantile="0.95"} 0
+les_misc_serve_receipt {quantile="0.99"} 0
+les_misc_serve_receipt {quantile="0.999"} 0
+les_misc_serve_receipt {quantile="0.9999"} 0
+
+# TYPE les_misc_serve_txStatus_count counter
+les_misc_serve_txStatus_count 0
+
+# TYPE les_misc_serve_txStatus summary
+les_misc_serve_txStatus {quantile="0.5"} 0
+les_misc_serve_txStatus {quantile="0.75"} 0
+les_misc_serve_txStatus {quantile="0.95"} 0
+les_misc_serve_txStatus {quantile="0.99"} 0
+les_misc_serve_txStatus {quantile="0.999"} 0
+les_misc_serve_txStatus {quantile="0.9999"} 0
+
+# TYPE les_misc_serve_txs_count counter
+les_misc_serve_txs_count 0
+
+# TYPE les_misc_serve_txs summary
+les_misc_serve_txs {quantile="0.5"} 0
+les_misc_serve_txs {quantile="0.75"} 0
+les_misc_serve_txs {quantile="0.95"} 0
+les_misc_serve_txs {quantile="0.99"} 0
+les_misc_serve_txs {quantile="0.999"} 0
+les_misc_serve_txs {quantile="0.9999"} 0
+
+# TYPE les_server_blockProcessingTime_count counter
+les_server_blockProcessingTime_count 0
+
+# TYPE les_server_blockProcessingTime summary IMP
+les_server_blockProcessingTime {quantile="0.5"} 0
+les_server_blockProcessingTime {quantile="0.75"} 0
+les_server_blockProcessingTime {quantile="0.95"} 0
+les_server_blockProcessingTime {quantile="0.99"} 0
+les_server_blockProcessingTime {quantile="0.999"} 0
+les_server_blockProcessingTime {quantile="0.9999"} 0
+
+# TYPE les_server_clientEvent_error gauge
+les_server_clientEvent_error 0
+
+# TYPE les_server_clientEvent_freeze gauge
+les_server_clientEvent_freeze 0
+
+# TYPE les_server_globalFactor gauge
+les_server_globalFactor 0
+
+# TYPE les_server_recentRequestEstimated gauge
+les_server_recentRequestEstimated 0
+
+# TYPE les_server_recentRequestServed gauge
+les_server_recentRequestServed 0
+
+# TYPE les_server_req_avgEstimatedTime gauge
+les_server_req_avgEstimatedTime 0
+
+# TYPE les_server_req_avgServedTime gauge
+les_server_req_avgServedTime 0
+
+# TYPE les_server_req_estimatedTime_count counter
+les_server_req_estimatedTime_count 0
+
+# TYPE les_server_req_estimatedTime summary
+les_server_req_estimatedTime {quantile="0.5"} 0
+les_server_req_estimatedTime {quantile="0.75"} 0
+les_server_req_estimatedTime {quantile="0.95"} 0
+les_server_req_estimatedTime {quantile="0.99"} 0
+les_server_req_estimatedTime {quantile="0.999"} 0
+les_server_req_estimatedTime {quantile="0.9999"} 0
+
+# TYPE les_server_req_relative_count counter
+les_server_req_relative_count 0
+
+# TYPE les_server_req_relative summary
+les_server_req_relative {quantile="0.5"} 0
+les_server_req_relative {quantile="0.75"} 0
+les_server_req_relative {quantile="0.95"} 0
+les_server_req_relative {quantile="0.99"} 0
+les_server_req_relative {quantile="0.999"} 0
+les_server_req_relative {quantile="0.9999"} 0
+
+# TYPE les_server_req_relative_body_count counter
+les_server_req_relative_body_count 0
+
+# TYPE les_server_req_relative_body summary
+les_server_req_relative_body {quantile="0.5"} 0
+les_server_req_relative_body {quantile="0.75"} 0
+les_server_req_relative_body {quantile="0.95"} 0
+les_server_req_relative_body {quantile="0.99"} 0
+les_server_req_relative_body {quantile="0.999"} 0
+les_server_req_relative_body {quantile="0.9999"} 0
+
+# TYPE les_server_req_relative_code_count counter
+les_server_req_relative_code_count 0
+
+# TYPE les_server_req_relative_code summary
+les_server_req_relative_code {quantile="0.5"} 0
+les_server_req_relative_code {quantile="0.75"} 0
+les_server_req_relative_code {quantile="0.95"} 0
+les_server_req_relative_code {quantile="0.99"} 0
+les_server_req_relative_code {quantile="0.999"} 0
+les_server_req_relative_code {quantile="0.9999"} 0
+
+# TYPE les_server_req_relative_header_count counter
+les_server_req_relative_header_count 0
+
+# TYPE les_server_req_relative_header summary
+les_server_req_relative_header {quantile="0.5"} 0
+les_server_req_relative_header {quantile="0.75"} 0
+les_server_req_relative_header {quantile="0.95"} 0
+les_server_req_relative_header {quantile="0.99"} 0
+les_server_req_relative_header {quantile="0.999"} 0
+les_server_req_relative_header {quantile="0.9999"} 0
+
+# TYPE les_server_req_relative_helperTrie_count counter
+les_server_req_relative_helperTrie_count 0
+
+# TYPE les_server_req_relative_helperTrie summary
+les_server_req_relative_helperTrie {quantile="0.5"} 0
+les_server_req_relative_helperTrie {quantile="0.75"} 0
+les_server_req_relative_helperTrie {quantile="0.95"} 0
+les_server_req_relative_helperTrie {quantile="0.99"} 0
+les_server_req_relative_helperTrie {quantile="0.999"} 0
+les_server_req_relative_helperTrie {quantile="0.9999"} 0
+
+# TYPE les_server_req_relative_proof_count counter
+les_server_req_relative_proof_count 0
+
+# TYPE les_server_req_relative_proof summary
+les_server_req_relative_proof {quantile="0.5"} 0
+les_server_req_relative_proof {quantile="0.75"} 0
+les_server_req_relative_proof {quantile="0.95"} 0
+les_server_req_relative_proof {quantile="0.99"} 0
+les_server_req_relative_proof {quantile="0.999"} 0
+les_server_req_relative_proof {quantile="0.9999"} 0
+
+# TYPE les_server_req_relative_receipt_count counter
+les_server_req_relative_receipt_count 0
+
+# TYPE les_server_req_relative_receipt summary
+les_server_req_relative_receipt {quantile="0.5"} 0
+les_server_req_relative_receipt {quantile="0.75"} 0
+les_server_req_relative_receipt {quantile="0.95"} 0
+les_server_req_relative_receipt {quantile="0.99"} 0
+les_server_req_relative_receipt {quantile="0.999"} 0
+les_server_req_relative_receipt {quantile="0.9999"} 0
+
+# TYPE les_server_req_relative_txStatus_count counter
+les_server_req_relative_txStatus_count 0
+
+# TYPE les_server_req_relative_txStatus summary
+les_server_req_relative_txStatus {quantile="0.5"} 0
+les_server_req_relative_txStatus {quantile="0.75"} 0
+les_server_req_relative_txStatus {quantile="0.95"} 0
+les_server_req_relative_txStatus {quantile="0.99"} 0
+les_server_req_relative_txStatus {quantile="0.999"} 0
+les_server_req_relative_txStatus {quantile="0.9999"} 0
+
+# TYPE les_server_req_relative_txs_count counter
+les_server_req_relative_txs_count 0
+
+# TYPE les_server_req_relative_txs summary
+les_server_req_relative_txs {quantile="0.5"} 0
+les_server_req_relative_txs {quantile="0.75"} 0
+les_server_req_relative_txs {quantile="0.95"} 0
+les_server_req_relative_txs {quantile="0.99"} 0
+les_server_req_relative_txs {quantile="0.999"} 0
+les_server_req_relative_txs {quantile="0.9999"} 0
+
+# TYPE les_server_req_servedTime_count counter
+les_server_req_servedTime_count 0
+
+# TYPE les_server_req_servedTime summary
+les_server_req_servedTime {quantile="0.5"} 0
+les_server_req_servedTime {quantile="0.75"} 0
+les_server_req_servedTime {quantile="0.95"} 0
+les_server_req_servedTime {quantile="0.99"} 0
+les_server_req_servedTime {quantile="0.999"} 0
+les_server_req_servedTime {quantile="0.9999"} 0
+
+# TYPE les_server_servingQueue_queued gauge
+les_server_servingQueue_queued 0
+
+# TYPE les_server_servingQueue_served gauge
+les_server_servingQueue_served 0
+
+# TYPE les_server_totalCapacity gauge
+les_server_totalCapacity 0
+
+# TYPE les_server_totalRecharge gauge
+les_server_totalRecharge 0
+
+# TYPE p2p_dials gauge IMP
+p2p_dials 69
+
+#IMP all egress
+
+# TYPE p2p_egress gauge IMP
+p2p_egress 134834
+
+# TYPE p2p_egress_eth_65_0x00 gauge
+p2p_egress_eth_65_0x00 177
+
+# TYPE p2p_egress_eth_65_0x00_packets gauge
+p2p_egress_eth_65_0x00_packets 3
+
+# TYPE p2p_egress_eth_65_0x03 gauge
+p2p_egress_eth_65_0x03 1315
+
+# TYPE p2p_egress_eth_65_0x03_packets gauge
+p2p_egress_eth_65_0x03_packets 132
+
+# TYPE p2p_egress_eth_65_0x04 gauge
+p2p_egress_eth_65_0x04 3
+
+# TYPE p2p_egress_eth_65_0x04_packets gauge
+p2p_egress_eth_65_0x04_packets 1
+
+# TYPE p2p_egress_eth_65_0x05 gauge
+p2p_egress_eth_65_0x05 68658
+
+# TYPE p2p_egress_eth_65_0x05_packets gauge
+p2p_egress_eth_65_0x05_packets 83
+
+# TYPE p2p_egress_eth_66_0x00 gauge
+p2p_egress_eth_66_0x00 327
+
+# TYPE p2p_egress_eth_66_0x00_packets gauge
+p2p_egress_eth_66_0x00_packets 5
+
+# TYPE p2p_egress_eth_66_0x03 gauge
+p2p_egress_eth_66_0x03 20
+
+# TYPE p2p_egress_eth_66_0x03_packets gauge
+p2p_egress_eth_66_0x03_packets 1
+
+# TYPE p2p_egress_eth_66_0x04 gauge
+p2p_egress_eth_66_0x04 0
+
+# TYPE p2p_egress_eth_66_0x04_packets gauge
+p2p_egress_eth_66_0x04_packets 0
+
+# TYPE p2p_egress_eth_66_0x05 gauge
+p2p_egress_eth_66_0x05 0
+
+# TYPE p2p_egress_eth_66_0x05_packets gauge
+p2p_egress_eth_66_0x05_packets 0
+
+# TYPE p2p_egress_snap_1_0x00 gauge
+p2p_egress_snap_1_0x00 0
+
+# TYPE p2p_egress_snap_1_0x00_packets gauge
+p2p_egress_snap_1_0x00_packets 0
+
+# TYPE p2p_handle_eth_65_0x01_count counter
+p2p_handle_eth_65_0x01_count 1
+
+# TYPE p2p_handle_eth_65_0x01 summary
+p2p_handle_eth_65_0x01 {quantile="0.5"} 185
+p2p_handle_eth_65_0x01 {quantile="0.75"} 185
+p2p_handle_eth_65_0x01 {quantile="0.95"} 185
+p2p_handle_eth_65_0x01 {quantile="0.99"} 185
+p2p_handle_eth_65_0x01 {quantile="0.999"} 185
+p2p_handle_eth_65_0x01 {quantile="0.9999"} 185
+
+# TYPE p2p_handle_eth_65_0x03_count counter
+p2p_handle_eth_65_0x03_count 1
+
+# TYPE p2p_handle_eth_65_0x03 summary
+p2p_handle_eth_65_0x03 {quantile="0.5"} 126
+p2p_handle_eth_65_0x03 {quantile="0.75"} 126
+p2p_handle_eth_65_0x03 {quantile="0.95"} 126
+p2p_handle_eth_65_0x03 {quantile="0.99"} 126
+p2p_handle_eth_65_0x03 {quantile="0.999"} 126
+p2p_handle_eth_65_0x03 {quantile="0.9999"} 126
+
+# TYPE p2p_handle_eth_65_0x04_count counter
+p2p_handle_eth_65_0x04_count 154
+
+# TYPE p2p_handle_eth_65_0x04 summary
+p2p_handle_eth_65_0x04 {quantile="0.5"} 855
+p2p_handle_eth_65_0x04 {quantile="0.75"} 1172
+p2p_handle_eth_65_0x04 {quantile="0.95"} 1673.5
+p2p_handle_eth_65_0x04 {quantile="0.99"} 8296.449999999888
+p2p_handle_eth_65_0x04 {quantile="0.999"} 13775
+p2p_handle_eth_65_0x04 {quantile="0.9999"} 13775
+
+# TYPE p2p_handle_eth_65_0x06_count counter
+p2p_handle_eth_65_0x06_count 99
+
+# TYPE p2p_handle_eth_65_0x06 summary
+p2p_handle_eth_65_0x06 {quantile="0.5"} 180
+p2p_handle_eth_65_0x06 {quantile="0.75"} 250
+p2p_handle_eth_65_0x06 {quantile="0.95"} 2105
+p2p_handle_eth_65_0x06 {quantile="0.99"} 7910
+p2p_handle_eth_65_0x06 {quantile="0.999"} 7910
+p2p_handle_eth_65_0x06 {quantile="0.9999"} 7910
+
+# TYPE p2p_handle_eth_65_0x08_count counter
+p2p_handle_eth_65_0x08_count 17
+
+# TYPE p2p_handle_eth_65_0x08 summary
+p2p_handle_eth_65_0x08 {quantile="0.5"} 5
+p2p_handle_eth_65_0x08 {quantile="0.75"} 7
+p2p_handle_eth_65_0x08 {quantile="0.95"} 87
+p2p_handle_eth_65_0x08 {quantile="0.99"} 87
+p2p_handle_eth_65_0x08 {quantile="0.999"} 87
+p2p_handle_eth_65_0x08 {quantile="0.9999"} 87
+
+# TYPE p2p_handle_eth_66_0x03_count counter
+p2p_handle_eth_66_0x03_count 1
+
+# TYPE p2p_handle_eth_66_0x03 summary
+p2p_handle_eth_66_0x03 {quantile="0.5"} 405
+p2p_handle_eth_66_0x03 {quantile="0.75"} 405
+p2p_handle_eth_66_0x03 {quantile="0.95"} 405
+p2p_handle_eth_66_0x03 {quantile="0.99"} 405
+p2p_handle_eth_66_0x03 {quantile="0.999"} 405
+p2p_handle_eth_66_0x03 {quantile="0.9999"} 405
+
+# TYPE p2p_handle_eth_66_0x04_count counter
+p2p_handle_eth_66_0x04_count 2
+
+# TYPE p2p_handle_eth_66_0x04 summary
+p2p_handle_eth_66_0x04 {quantile="0.5"} 595.5
+p2p_handle_eth_66_0x04 {quantile="0.75"} 1091
+p2p_handle_eth_66_0x04 {quantile="0.95"} 1091
+p2p_handle_eth_66_0x04 {quantile="0.99"} 1091
+p2p_handle_eth_66_0x04 {quantile="0.999"} 1091
+p2p_handle_eth_66_0x04 {quantile="0.9999"} 1091
+
+# TYPE p2p_handle_eth_66_0x06_count counter
+p2p_handle_eth_66_0x06_count 1
+
+# TYPE p2p_handle_eth_66_0x06 summary
+p2p_handle_eth_66_0x06 {quantile="0.5"} 1309
+p2p_handle_eth_66_0x06 {quantile="0.75"} 1309
+p2p_handle_eth_66_0x06 {quantile="0.95"} 1309
+p2p_handle_eth_66_0x06 {quantile="0.99"} 1309
+p2p_handle_eth_66_0x06 {quantile="0.999"} 1309
+p2p_handle_eth_66_0x06 {quantile="0.9999"} 1309
+
+# TYPE p2p_handle_eth_66_0x08_count counter
+p2p_handle_eth_66_0x08_count 2
+
+# TYPE p2p_handle_eth_66_0x08 summary
+p2p_handle_eth_66_0x08 {quantile="0.5"} 16
+p2p_handle_eth_66_0x08 {quantile="0.75"} 28
+p2p_handle_eth_66_0x08 {quantile="0.95"} 28
+p2p_handle_eth_66_0x08 {quantile="0.99"} 28
+p2p_handle_eth_66_0x08 {quantile="0.999"} 28
+p2p_handle_eth_66_0x08 {quantile="0.9999"} 28
+
+# TYPE p2p_handle_snap_1_0x01_count counter
+p2p_handle_snap_1_0x01_count 1
+
+# TYPE p2p_handle_snap_1_0x01 summary
+p2p_handle_snap_1_0x01 {quantile="0.5"} 375
+p2p_handle_snap_1_0x01 {quantile="0.75"} 375
+p2p_handle_snap_1_0x01 {quantile="0.95"} 375
+p2p_handle_snap_1_0x01 {quantile="0.99"} 375
+p2p_handle_snap_1_0x01 {quantile="0.999"} 375
+p2p_handle_snap_1_0x01 {quantile="0.9999"} 375
+
+# TYPE p2p_ingress gauge #IMP
+p2p_ingress 3918214
+
+# TYPE p2p_ingress_eth_65_0x00 gauge #IMP
+p2p_ingress_eth_65_0x00 271
+
+# TYPE p2p_ingress_eth_65_0x00_packets gauge IMP
+p2p_ingress_eth_65_0x00_packets 3
+
+# TYPE p2p_ingress_eth_65_0x01 gauge IMP
+p2p_ingress_eth_65_0x01 0
+
+# TYPE p2p_ingress_eth_65_0x01_packets gauge IMP
+p2p_ingress_eth_65_0x01_packets 0
+
+# TYPE p2p_ingress_eth_65_0x03 gauge IMP
+p2p_ingress_eth_65_0x03 10
+
+# TYPE p2p_ingress_eth_65_0x03_packets gauge IMP
+p2p_ingress_eth_65_0x03_packets 1
+
+# TYPE p2p_ingress_eth_65_0x04 gauge IMP
+p2p_ingress_eth_65_0x04 3362209
+
+# TYPE p2p_ingress_eth_65_0x04_packets gauge IMP
+p2p_ingress_eth_65_0x04_packets 131
+
+# TYPE p2p_ingress_eth_65_0x06 gauge IMP
+p2p_ingress_eth_65_0x06 383458
+
+# TYPE p2p_ingress_eth_65_0x06_packets gauge IMP
+p2p_ingress_eth_65_0x06_packets 83
+
+# TYPE p2p_ingress_eth_65_0x08 gauge
+p2p_ingress_eth_65_0x08 96828
+
+# TYPE p2p_ingress_eth_65_0x08_packets gauge
+p2p_ingress_eth_65_0x08_packets 9
+
+# TYPE p2p_ingress_eth_66_0x00 gauge
+p2p_ingress_eth_66_0x00 436
+
+# TYPE p2p_ingress_eth_66_0x00_packets gauge
+p2p_ingress_eth_66_0x00_packets 5
+
+# TYPE p2p_ingress_eth_66_0x03 gauge
+p2p_ingress_eth_66_0x03 0
+
+# TYPE p2p_ingress_eth_66_0x03_packets gauge
+p2p_ingress_eth_66_0x03_packets 0
+
+# TYPE p2p_ingress_eth_66_0x04 gauge
+p2p_ingress_eth_66_0x04 0
+
+# TYPE p2p_ingress_eth_66_0x04_packets gauge
+p2p_ingress_eth_66_0x04_packets 0
+
+# TYPE p2p_ingress_eth_66_0x06 gauge
+p2p_ingress_eth_66_0x06 0
+
+# TYPE p2p_ingress_eth_66_0x06_packets gauge
+p2p_ingress_eth_66_0x06_packets 0
+
+# TYPE p2p_ingress_eth_66_0x08 gauge
+p2p_ingress_eth_66_0x08 0
+
+# TYPE p2p_ingress_eth_66_0x08_packets gauge
+p2p_ingress_eth_66_0x08_packets 0
+
+# TYPE p2p_ingress_snap_1_0x01 gauge
+p2p_ingress_snap_1_0x01 0
+
+# TYPE p2p_ingress_snap_1_0x01_packets gauge
+p2p_ingress_snap_1_0x01_packets 0
+
+# TYPE p2p_peers gauge IMP
+p2p_peers 8
+
+# TYPE p2p_serves gauge IMP
+p2p_serves 70
+
+# TYPE p2p_tracked_eth_66_0x03 gauge
+p2p_tracked_eth_66_0x03 2
+
+# TYPE p2p_tracked_eth_66_0x05 gauge
+p2p_tracked_eth_66_0x05 0
+
+# TYPE p2p_tracked_snap_1_0x00 gauge
+p2p_tracked_snap_1_0x00 0
+
+# TYPE p2p_wait_eth_66_0x03_count counter
+p2p_wait_eth_66_0x03_count 2
+
+# TYPE p2p_wait_eth_66_0x03 summary
+p2p_wait_eth_66_0x03 {quantile="0.5"} 567440.5
+p2p_wait_eth_66_0x03 {quantile="0.75"} 574606
+p2p_wait_eth_66_0x03 {quantile="0.95"} 574606
+p2p_wait_eth_66_0x03 {quantile="0.99"} 574606
+p2p_wait_eth_66_0x03 {quantile="0.999"} 574606
+p2p_wait_eth_66_0x03 {quantile="0.9999"} 574606
+
+# TYPE p2p_wait_eth_66_0x05_count counter
+p2p_wait_eth_66_0x05_count 1
+
+# TYPE p2p_wait_eth_66_0x05 summary
+p2p_wait_eth_66_0x05 {quantile="0.5"} 212272
+p2p_wait_eth_66_0x05 {quantile="0.75"} 212272
+p2p_wait_eth_66_0x05 {quantile="0.95"} 212272
+p2p_wait_eth_66_0x05 {quantile="0.99"} 212272
+p2p_wait_eth_66_0x05 {quantile="0.999"} 212272
+p2p_wait_eth_66_0x05 {quantile="0.9999"} 212272
+
+# TYPE p2p_wait_snap_1_0x00_count counter
+p2p_wait_snap_1_0x00_count 1
+
+# TYPE p2p_wait_snap_1_0x00 summary
+p2p_wait_snap_1_0x00 {quantile="0.5"} 574823
+p2p_wait_snap_1_0x00 {quantile="0.75"} 574823
+p2p_wait_snap_1_0x00 {quantile="0.95"} 574823
+p2p_wait_snap_1_0x00 {quantile="0.99"} 574823
+p2p_wait_snap_1_0x00 {quantile="0.999"} 574823
+p2p_wait_snap_1_0x00 {quantile="0.9999"} 574823
+
+# TYPE rpc_duration_all_count counter
+rpc_duration_all_count 0
+
+# TYPE rpc_duration_all summary
+rpc_duration_all {quantile="0.5"} 0
+rpc_duration_all {quantile="0.75"} 0
+rpc_duration_all {quantile="0.95"} 0
+rpc_duration_all {quantile="0.99"} 0
+rpc_duration_all {quantile="0.999"} 0
+rpc_duration_all {quantile="0.9999"} 0
+
+# TYPE rpc_failure gauge
+rpc_failure 0
+
+# TYPE rpc_requests gauge
+rpc_requests 0
+
+# TYPE rpc_success gauge
+rpc_success 0
+
+# TYPE state_snapshot_bloom_account_falsehit gauge
+state_snapshot_bloom_account_falsehit 0
+
+# TYPE state_snapshot_bloom_account_miss gauge
+state_snapshot_bloom_account_miss 0
+
+# TYPE state_snapshot_bloom_account_truehit gauge
+state_snapshot_bloom_account_truehit 0
+
+# TYPE state_snapshot_bloom_error gauge
+state_snapshot_bloom_error 0
+
+# TYPE state_snapshot_bloom_storage_falsehit gauge
+state_snapshot_bloom_storage_falsehit 0
+
+# TYPE state_snapshot_bloom_storage_miss gauge
+state_snapshot_bloom_storage_miss 0
+
+# TYPE state_snapshot_bloom_storage_truehit gauge
+state_snapshot_bloom_storage_truehit 0
+
+# TYPE state_snapshot_clean_account_hit gauge
+state_snapshot_clean_account_hit 0
+
+# TYPE state_snapshot_clean_account_inex gauge
+state_snapshot_clean_account_inex 0
+
+# TYPE state_snapshot_clean_account_miss gauge
+state_snapshot_clean_account_miss 0
+
+# TYPE state_snapshot_clean_account_read gauge
+state_snapshot_clean_account_read 0
+
+# TYPE state_snapshot_clean_account_write gauge
+state_snapshot_clean_account_write 0
+
+# TYPE state_snapshot_clean_storage_hit gauge
+state_snapshot_clean_storage_hit 0
+
+# TYPE state_snapshot_clean_storage_inex gauge
+state_snapshot_clean_storage_inex 0
+
+# TYPE state_snapshot_clean_storage_miss gauge
+state_snapshot_clean_storage_miss 0
+
+# TYPE state_snapshot_clean_storage_read gauge
+state_snapshot_clean_storage_read 0
+
+# TYPE state_snapshot_clean_storage_write gauge
+state_snapshot_clean_storage_write 0
+
+# TYPE state_snapshot_dirty_account_hit gauge
+state_snapshot_dirty_account_hit 0
+
+# TYPE state_snapshot_dirty_account_hit_depth_count counter
+state_snapshot_dirty_account_hit_depth_count 0
+
+# TYPE state_snapshot_dirty_account_hit_depth summary
+state_snapshot_dirty_account_hit_depth {quantile="0.5"} 0
+state_snapshot_dirty_account_hit_depth {quantile="0.75"} 0
+state_snapshot_dirty_account_hit_depth {quantile="0.95"} 0
+state_snapshot_dirty_account_hit_depth {quantile="0.99"} 0
+state_snapshot_dirty_account_hit_depth {quantile="0.999"} 0
+state_snapshot_dirty_account_hit_depth {quantile="0.9999"} 0
+
+# TYPE state_snapshot_dirty_account_inex gauge
+state_snapshot_dirty_account_inex 0
+
+# TYPE state_snapshot_dirty_account_miss gauge
+state_snapshot_dirty_account_miss 0
+
+# TYPE state_snapshot_dirty_account_read gauge
+state_snapshot_dirty_account_read 0
+
+# TYPE state_snapshot_dirty_account_write gauge
+state_snapshot_dirty_account_write 0
+
+# TYPE state_snapshot_dirty_storage_hit gauge
+state_snapshot_dirty_storage_hit 0
+
+# TYPE state_snapshot_dirty_storage_hit_depth_count counter
+state_snapshot_dirty_storage_hit_depth_count 0
+
+# TYPE state_snapshot_dirty_storage_hit_depth summary
+state_snapshot_dirty_storage_hit_depth {quantile="0.5"} 0
+state_snapshot_dirty_storage_hit_depth {quantile="0.75"} 0
+state_snapshot_dirty_storage_hit_depth {quantile="0.95"} 0
+state_snapshot_dirty_storage_hit_depth {quantile="0.99"} 0
+state_snapshot_dirty_storage_hit_depth {quantile="0.999"} 0
+state_snapshot_dirty_storage_hit_depth {quantile="0.9999"} 0
+
+# TYPE state_snapshot_dirty_storage_inex gauge
+state_snapshot_dirty_storage_inex 0
+
+# TYPE state_snapshot_dirty_storage_miss gauge
+state_snapshot_dirty_storage_miss 0
+
+# TYPE state_snapshot_dirty_storage_read gauge
+state_snapshot_dirty_storage_read 0
+
+# TYPE state_snapshot_dirty_storage_write gauge
+state_snapshot_dirty_storage_write 0
+
+# TYPE state_snapshot_flush_account_item gauge
+state_snapshot_flush_account_item 0
+
+# TYPE state_snapshot_flush_account_size gauge
+state_snapshot_flush_account_size 0
+
+# TYPE state_snapshot_flush_storage_item gauge
+state_snapshot_flush_storage_item 0
+
+# TYPE state_snapshot_flush_storage_size gauge
+state_snapshot_flush_storage_size 0
+
+# TYPE state_snapshot_generation_account_generated gauge
+state_snapshot_generation_account_generated 8893
+
+# TYPE state_snapshot_generation_account_missall gauge
+state_snapshot_generation_account_missall 1
+
+# TYPE state_snapshot_generation_account_recovered gauge
+state_snapshot_generation_account_recovered 0
+
+# TYPE state_snapshot_generation_account_wiped gauge
+state_snapshot_generation_account_wiped 0
+
+# TYPE state_snapshot_generation_duration_account_prove gauge
+state_snapshot_generation_duration_account_prove 16221
+
+# TYPE state_snapshot_generation_duration_account_snapread gauge
+state_snapshot_generation_duration_account_snapread 89448
+
+# TYPE state_snapshot_generation_duration_account_trieread gauge
+state_snapshot_generation_duration_account_trieread 78590307
+
+# TYPE state_snapshot_generation_duration_account_write gauge
+state_snapshot_generation_duration_account_write 84327092
+
+# TYPE state_snapshot_generation_duration_storage_prove gauge
+state_snapshot_generation_duration_storage_prove 0
+
+# TYPE state_snapshot_generation_duration_storage_snapread gauge
+state_snapshot_generation_duration_storage_snapread 0
+
+# TYPE state_snapshot_generation_duration_storage_trieread gauge
+state_snapshot_generation_duration_storage_trieread 0
+
+# TYPE state_snapshot_generation_duration_storage_write gauge
+state_snapshot_generation_duration_storage_write 0
+
+# TYPE state_snapshot_generation_proof_failure gauge
+state_snapshot_generation_proof_failure 1
+
+# TYPE state_snapshot_generation_proof_success gauge
+state_snapshot_generation_proof_success 0
+
+# TYPE state_snapshot_generation_storage_generated gauge
+state_snapshot_generation_storage_generated 0
+
+# TYPE state_snapshot_generation_storage_missall gauge
+state_snapshot_generation_storage_missall 0
+
+# TYPE state_snapshot_generation_storage_recovered gauge
+state_snapshot_generation_storage_recovered 0
+
+# TYPE state_snapshot_generation_storage_wiped gauge
+state_snapshot_generation_storage_wiped 0
+
+# TYPE system_cpu_goroutines gauge
+system_cpu_goroutines 129
+
+# TYPE system_cpu_procload gauge
+system_cpu_procload 47
+
+# TYPE system_cpu_sysload gauge
+system_cpu_sysload 215
+
+# TYPE system_cpu_syswait gauge
+system_cpu_syswait 25
+
+# TYPE system_cpu_threads gauge
+system_cpu_threads 13
+
+# TYPE system_disk_readbytes gauge
+system_disk_readbytes 5017534
+
+# TYPE system_disk_readcount gauge
+system_disk_readcount 913
+
+# TYPE system_disk_readdata gauge
+system_disk_readdata 1777439
+
+# TYPE system_disk_writebytes gauge
+system_disk_writebytes 36555070
+
+# TYPE system_disk_writecount gauge
+system_disk_writecount 72172
+
+# TYPE system_disk_writedata gauge
+system_disk_writedata 13225794
+
+# TYPE system_memory_allocs gauge
+system_memory_allocs 2144962
+
+# TYPE system_memory_frees gauge
+system_memory_frees 1268637
+
+# TYPE system_memory_held gauge
+system_memory_held 728506368
+
+# TYPE system_memory_pauses gauge
+system_memory_pauses 4199764
+
+# TYPE system_memory_used gauge
+system_memory_used 577212048
+
+# TYPE trie_bloom_add gauge
+trie_bloom_add 0
+
+# TYPE trie_bloom_error gauge
+trie_bloom_error 0
+
+# TYPE trie_bloom_fault gauge
+trie_bloom_fault 2
+
+# TYPE trie_bloom_load gauge
+trie_bloom_load 0
+
+# TYPE trie_bloom_miss gauge
+trie_bloom_miss 0
+
+# TYPE trie_bloom_test gauge
+trie_bloom_test 0
+
+# TYPE trie_memcache_clean_hit gauge
+trie_memcache_clean_hit 6
+
+# TYPE trie_memcache_clean_miss gauge
+trie_memcache_clean_miss 12356
+
+# TYPE trie_memcache_clean_read gauge
+trie_memcache_clean_read 2679
+
+# TYPE trie_memcache_clean_write gauge
+trie_memcache_clean_write 1483023
+
+# TYPE trie_memcache_commit_nodes gauge
+trie_memcache_commit_nodes 12356
+
+# TYPE trie_memcache_commit_size gauge
+trie_memcache_commit_size 1869429
+
+# TYPE trie_memcache_dirty_hit gauge
+trie_memcache_dirty_hit 0
+
+# TYPE trie_memcache_dirty_miss gauge
+trie_memcache_dirty_miss 12356
+
+# TYPE trie_memcache_dirty_read gauge
+trie_memcache_dirty_read 0
+
+# TYPE trie_memcache_dirty_write gauge
+trie_memcache_dirty_write 1474037
+
+# TYPE trie_memcache_flush_nodes gauge
+trie_memcache_flush_nodes 0
+
+# TYPE trie_memcache_flush_size gauge
+trie_memcache_flush_size 0
+
+# TYPE trie_memcache_gc_nodes gauge
+trie_memcache_gc_nodes 0
+
+# TYPE trie_memcache_gc_size gauge
+trie_memcache_gc_size 0
+
+# TYPE trie_prefetch_miner_account_dup gauge
+trie_prefetch_miner_account_dup 0
+
+# TYPE trie_prefetch_miner_account_load gauge
+trie_prefetch_miner_account_load 0
+
+# TYPE trie_prefetch_miner_account_skip gauge
+trie_prefetch_miner_account_skip 0
+
+# TYPE trie_prefetch_miner_account_waste gauge
+trie_prefetch_miner_account_waste 0
+
+# TYPE trie_prefetch_miner_deliverymiss gauge
+trie_prefetch_miner_deliverymiss 1
+
+# TYPE trie_prefetch_miner_storage_dup gauge
+trie_prefetch_miner_storage_dup 0
+
+# TYPE trie_prefetch_miner_storage_load gauge
+trie_prefetch_miner_storage_load 0
+
+# TYPE trie_prefetch_miner_storage_skip gauge
+trie_prefetch_miner_storage_skip 0
+
+# TYPE trie_prefetch_miner_storage_waste gauge
+trie_prefetch_miner_storage_waste 0
+
+# TYPE txpool_invalid gauge IMP + rate
+txpool_invalid 0
+
+# TYPE txpool_known gauge
+txpool_known 0
+
+
+# TYPE txpool_overflowed gauge
+txpool_overflowed 0
+
+#---
+# TYPE txpool_pending gauge IMP
+txpool_pending 0
+
+# TYPE txpool_local gauge IMP
+txpool_local 0
+
+# TYPE txpool_queued gauge
+txpool_queued 0
+#---
+
+# TYPE txpool_pending_discard gauge IMP + rate
+txpool_pending_discard 0
+
+# TYPE txpool_pending_nofunds gauge IMP + rate
+txpool_pending_nofunds 0
+
+# TYPE txpool_pending_ratelimit gauge IMP + rate
+txpool_pending_ratelimit 0
+
+# TYPE txpool_pending_replace gauge IMP + rate
+txpool_pending_replace 0
+
+
+
+# TYPE txpool_queued_discard gauge IMP + rate
+txpool_queued_discard 0
+ƒga
+# TYPE txpool_queued_eviction gauge IMP + rate
+txpool_queued_eviction 0
+
+# TYPE txpool_queued_nofunds gauge IMP + rate
+txpool_queued_nofunds 0
+
+# TYPE txpool_queued_ratelimit gauge IMP + rate
+txpool_queued_ratelimit 0
+
+# TYPE txpool_queued_replace gauge IMP + rate
+txpool_queued_replace 0
+
+# TYPE txpool_reheap_count counter
+txpool_reheap_count 0
+
+# TYPE txpool_reheap summary
+txpool_reheap {quantile="0.5"} 0
+txpool_reheap {quantile="0.75"} 0
+txpool_reheap {quantile="0.95"} 0
+txpool_reheap {quantile="0.99"} 0
+txpool_reheap {quantile="0.999"} 0
+txpool_reheap {quantile="0.9999"} 0
+
+# TYPE txpool_slots gauge
+txpool_slots 0
+
+# TYPE txpool_underpriced gauge IMP + rate
+txpool_underpriced 0
+
+# TYPE txpool_valid gauge IMP + rate
+txpool_valid 0
+
+# TYPE vflux_server_capQueryNonZero gauge
+vflux_server_capQueryNonZero 0
+
+# TYPE vflux_server_capQueryZero gauge
+vflux_server_capQueryZero 0
+
+# TYPE vflux_server_clientEvent_activated gauge
+vflux_server_clientEvent_activated 0
+
+# TYPE vflux_server_clientEvent_connected gauge
+vflux_server_clientEvent_connected 0
+
+# TYPE vflux_server_clientEvent_deactivated gauge
+vflux_server_clientEvent_deactivated 0
+
+# TYPE vflux_server_clientEvent_disconnected gauge
+vflux_server_clientEvent_disconnected 0
+
+# TYPE vflux_server_totalConnected gauge
+vflux_server_totalConnected 0
+
diff --git a/src/go/plugin/go.d/modules/haproxy/README.md b/src/go/plugin/go.d/modules/haproxy/README.md
new file mode 120000
index 000000000..2f52cf846
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/README.md
@@ -0,0 +1 @@
+integrations/haproxy.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/haproxy/charts.go b/src/go/plugin/go.d/modules/haproxy/charts.go
new file mode 100644
index 000000000..e7118a078
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/charts.go
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package haproxy
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+var charts = module.Charts{
+ chartBackendCurrentSessions.Copy(),
+ chartBackendSessions.Copy(),
+
+ chartBackendResponseTimeAverage.Copy(),
+
+ chartBackendQueueTimeAverage.Copy(),
+ chartBackendCurrentQueue.Copy(),
+}
+
+var (
+ chartBackendCurrentSessions = module.Chart{
+ ID: "backend_current_sessions",
+ Title: "Current number of active sessions",
+ Units: "sessions",
+ Fam: "backend sessions",
+ Ctx: "haproxy.backend_current_sessions",
+ }
+ chartBackendSessions = module.Chart{
+ ID: "backend_sessions",
+ Title: "Sessions rate",
+ Units: "sessions/s",
+ Fam: "backend sessions",
+ Ctx: "haproxy.backend_sessions",
+ }
+)
+
+var (
+ chartBackendResponseTimeAverage = module.Chart{
+ ID: "backend_response_time_average",
+ Title: "Average response time for last 1024 successful connections",
+ Units: "milliseconds",
+ Fam: "backend responses",
+ Ctx: "haproxy.backend_response_time_average",
+ }
+ chartTemplateBackendHTTPResponses = module.Chart{
+ ID: "backend_http_responses_proxy_%s",
+ Title: "HTTP responses by code class for <code>%s</code> proxy",
+ Units: "responses/s",
+ Fam: "backend responses",
+ Ctx: "haproxy.backend_http_responses",
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "haproxy_backend_http_responses_1xx_proxy_%s", Name: "1xx", Algo: module.Incremental},
+ {ID: "haproxy_backend_http_responses_2xx_proxy_%s", Name: "2xx", Algo: module.Incremental},
+ {ID: "haproxy_backend_http_responses_3xx_proxy_%s", Name: "3xx", Algo: module.Incremental},
+ {ID: "haproxy_backend_http_responses_4xx_proxy_%s", Name: "4xx", Algo: module.Incremental},
+ {ID: "haproxy_backend_http_responses_5xx_proxy_%s", Name: "5xx", Algo: module.Incremental},
+ {ID: "haproxy_backend_http_responses_other_proxy_%s", Name: "other", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartBackendQueueTimeAverage = module.Chart{
+ ID: "backend_queue_time_average",
+ Title: "Average queue time for last 1024 successful connections",
+ Units: "milliseconds",
+ Fam: "backend queue",
+ Ctx: "haproxy.backend_queue_time_average",
+ }
+ chartBackendCurrentQueue = module.Chart{
+ ID: "backend_current_queue",
+ Title: "Current number of queued requests",
+ Units: "requests",
+ Fam: "backend queue",
+ Ctx: "haproxy.backend_current_queue",
+ }
+)
+
+var (
+ chartTemplateBackendNetworkIO = module.Chart{
+ ID: "backend_network_io_proxy_%s",
+ Title: "Network traffic for <code>%s</code> proxy",
+ Units: "bytes/s",
+ Fam: "backend network",
+ Ctx: "haproxy.backend_network_io",
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "haproxy_backend_bytes_in_proxy_%s", Name: "in", Algo: module.Incremental},
+ {ID: "haproxy_backend_bytes_out_proxy_%s", Name: "out", Algo: module.Incremental, Mul: -1},
+ },
+ }
+)
+
+func newChartBackendHTTPResponses(proxy string) *module.Chart {
+ return newBackendChartFromTemplate(chartTemplateBackendHTTPResponses, proxy)
+}
+
+func newChartBackendNetworkIO(proxy string) *module.Chart {
+ return newBackendChartFromTemplate(chartTemplateBackendNetworkIO, proxy)
+}
+
+func newBackendChartFromTemplate(tpl module.Chart, proxy string) *module.Chart {
+ c := tpl.Copy()
+ c.ID = fmt.Sprintf(c.ID, proxy)
+ c.Title = fmt.Sprintf(c.Title, proxy)
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, proxy)
+ }
+ return c
+}
diff --git a/src/go/plugin/go.d/modules/haproxy/collect.go b/src/go/plugin/go.d/modules/haproxy/collect.go
new file mode 100644
index 000000000..e3ade66a5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/collect.go
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package haproxy
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricBackendSessionsTotal = "haproxy_backend_sessions_total"
+ metricBackendCurrentSessions = "haproxy_backend_current_sessions"
+ metricBackendHTTPResponsesTotal = "haproxy_backend_http_responses_total"
+ metricBackendResponseTimeAverageSeconds = "haproxy_backend_response_time_average_seconds"
+ metricBackendCurrentQueue = "haproxy_backend_current_queue"
+ metricBackendQueueTimeAverageSeconds = "haproxy_backend_queue_time_average_seconds"
+ metricBackendBytesInTotal = "haproxy_backend_bytes_in_total"
+ metricBackendBytesOutTotal = "haproxy_backend_bytes_out_total"
+)
+
+func isHaproxyMetrics(pms prometheus.Series) bool {
+ for _, pm := range pms {
+ if strings.HasPrefix(pm.Name(), "haproxy_") {
+ return true
+ }
+ }
+ return false
+}
+
+func (h *Haproxy) collect() (map[string]int64, error) {
+ pms, err := h.prom.ScrapeSeries()
+ if err != nil {
+ return nil, err
+ }
+
+ if h.validateMetrics && !isHaproxyMetrics(pms) {
+ return nil, errors.New("unexpected metrics (not HAProxy)")
+ }
+ h.validateMetrics = false
+
+ mx := make(map[string]int64)
+ for _, pm := range pms {
+ proxy := pm.Labels.Get("proxy")
+ if proxy == "" {
+ continue
+ }
+
+ if !h.proxies[proxy] {
+ h.proxies[proxy] = true
+ h.addProxyToCharts(proxy)
+ }
+
+ mx[dimID(pm)] = int64(pm.Value * multiplier(pm))
+ }
+
+ return mx, nil
+}
+
+func (h *Haproxy) addProxyToCharts(proxy string) {
+ h.addDimToChart(chartBackendCurrentSessions.ID, &module.Dim{
+ ID: proxyDimID(metricBackendCurrentSessions, proxy),
+ Name: proxy,
+ })
+ h.addDimToChart(chartBackendSessions.ID, &module.Dim{
+ ID: proxyDimID(metricBackendSessionsTotal, proxy),
+ Name: proxy,
+ Algo: module.Incremental,
+ })
+
+ h.addDimToChart(chartBackendResponseTimeAverage.ID, &module.Dim{
+ ID: proxyDimID(metricBackendResponseTimeAverageSeconds, proxy),
+ Name: proxy,
+ })
+ if err := h.Charts().Add(newChartBackendHTTPResponses(proxy)); err != nil {
+ h.Warning(err)
+ }
+
+ h.addDimToChart(chartBackendCurrentQueue.ID, &module.Dim{
+ ID: proxyDimID(metricBackendCurrentQueue, proxy),
+ Name: proxy,
+ })
+ h.addDimToChart(chartBackendQueueTimeAverage.ID, &module.Dim{
+ ID: proxyDimID(metricBackendQueueTimeAverageSeconds, proxy),
+ Name: proxy,
+ })
+
+ if err := h.Charts().Add(newChartBackendNetworkIO(proxy)); err != nil {
+ h.Warning(err)
+ }
+}
+
+func (h *Haproxy) addDimToChart(chartID string, dim *module.Dim) {
+ chart := h.Charts().Get(chartID)
+ if chart == nil {
+ h.Warningf("error on adding '%s' dimension: can not find '%s' chart", dim.ID, chartID)
+ return
+ }
+ if err := chart.AddDim(dim); err != nil {
+ h.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func multiplier(pm prometheus.SeriesSample) float64 {
+ switch pm.Name() {
+ case metricBackendResponseTimeAverageSeconds,
+ metricBackendQueueTimeAverageSeconds:
+ // to milliseconds
+ return 1000
+ }
+ return 1
+}
+
+func dimID(pm prometheus.SeriesSample) string {
+ proxy := pm.Labels.Get("proxy")
+ if proxy == "" {
+ return ""
+ }
+
+ name := cleanMetricName(pm.Name())
+ if pm.Name() == metricBackendHTTPResponsesTotal {
+ name += "_" + pm.Labels.Get("code")
+ }
+ return proxyDimID(name, proxy)
+}
+
+func proxyDimID(metric, proxy string) string {
+ return cleanMetricName(metric) + "_proxy_" + proxy
+}
+
+func cleanMetricName(name string) string {
+ if strings.HasSuffix(name, "_total") {
+ return name[:len(name)-6]
+ }
+ if strings.HasSuffix(name, "_seconds") {
+ return name[:len(name)-8]
+ }
+ return name
+}
diff --git a/src/go/plugin/go.d/modules/haproxy/config_schema.json b/src/go/plugin/go.d/modules/haproxy/config_schema.json
new file mode 100644
index 000000000..6a794145e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "HAProxy collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the HAProxy [Prometheus endpoint](https://www.haproxy.com/documentation/haproxy-configuration-tutorials/alerts-and-monitoring/prometheus/).",
+ "type": "string",
+ "default": "http://127.0.0.1:8404/metrics",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/haproxy/haproxy.go b/src/go/plugin/go.d/modules/haproxy/haproxy.go
new file mode 100644
index 000000000..0e3f9f3d1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/haproxy.go
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package haproxy
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("haproxy", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Haproxy {
+ return &Haproxy{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8404/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+
+ charts: charts.Copy(),
+ proxies: make(map[string]bool),
+ validateMetrics: true,
+ }
+}
+
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
+
+type Haproxy struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prom prometheus.Prometheus
+
+ validateMetrics bool
+ proxies map[string]bool
+}
+
+func (h *Haproxy) Configuration() any {
+ return h.Config
+}
+
+func (h *Haproxy) Init() error {
+ if err := h.validateConfig(); err != nil {
+ h.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prom, err := h.initPrometheusClient()
+ if err != nil {
+ h.Errorf("prometheus client initialization: %v", err)
+ return err
+ }
+ h.prom = prom
+
+ return nil
+}
+
+func (h *Haproxy) Check() error {
+ mx, err := h.collect()
+ if err != nil {
+ h.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (h *Haproxy) Charts() *module.Charts {
+ return h.charts
+}
+
+func (h *Haproxy) Collect() map[string]int64 {
+ mx, err := h.collect()
+ if err != nil {
+ h.Error(err)
+ return nil
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (h *Haproxy) Cleanup() {
+ if h.prom != nil && h.prom.HTTPClient() != nil {
+ h.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/haproxy/haproxy_test.go b/src/go/plugin/go.d/modules/haproxy/haproxy_test.go
new file mode 100644
index 000000000..80a733ffb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/haproxy_test.go
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package haproxy
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer2310Metrics, _ = os.ReadFile("testdata/v2.3.10/metrics.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer2310Metrics": dataVer2310Metrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestHaproxy_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Haproxy{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestHaproxy_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset 'url'": {
+ wantFail: true,
+ config: Config{HTTP: web.HTTP{
+ Request: web.Request{},
+ }},
+ },
+ "fails on invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ }},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rdb := New()
+ rdb.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, rdb.Init())
+ } else {
+ assert.NoError(t, rdb.Init())
+ }
+ })
+ }
+}
+
+func TestHaproxy_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestHaproxy_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestHaproxy_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (h *Haproxy, cleanup func())
+ }{
+ "success on valid response v2.3.1": {
+ wantFail: false,
+ prepare: prepareCaseHaproxyV231Metrics,
+ },
+ "fails on response with unexpected metrics (not HAProxy)": {
+ wantFail: true,
+ prepare: prepareCaseNotHaproxyMetrics,
+ },
+ "fails on 404 response": {
+ wantFail: true,
+ prepare: prepareCase404Response,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ h, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, h.Check())
+ } else {
+ assert.NoError(t, h.Check())
+ }
+ })
+ }
+}
+
+func TestHaproxy_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (h *Haproxy, cleanup func())
+ wantCollected map[string]int64
+ }{
+ "success on valid response v2.3.1": {
+ prepare: prepareCaseHaproxyV231Metrics,
+ wantCollected: map[string]int64{
+ "haproxy_backend_bytes_in_proxy_proxy1": 21057046294,
+ "haproxy_backend_bytes_in_proxy_proxy2": 2493759083896,
+ "haproxy_backend_bytes_out_proxy_proxy1": 41352782609,
+ "haproxy_backend_bytes_out_proxy_proxy2": 5131407558,
+ "haproxy_backend_current_queue_proxy_proxy1": 1,
+ "haproxy_backend_current_queue_proxy_proxy2": 1,
+ "haproxy_backend_current_sessions_proxy_proxy1": 1,
+ "haproxy_backend_current_sessions_proxy_proxy2": 1322,
+ "haproxy_backend_http_responses_1xx_proxy_proxy1": 1,
+ "haproxy_backend_http_responses_1xx_proxy_proxy2": 4130401,
+ "haproxy_backend_http_responses_2xx_proxy_proxy1": 21338013,
+ "haproxy_backend_http_responses_2xx_proxy_proxy2": 1,
+ "haproxy_backend_http_responses_3xx_proxy_proxy1": 10004,
+ "haproxy_backend_http_responses_3xx_proxy_proxy2": 1,
+ "haproxy_backend_http_responses_4xx_proxy_proxy1": 10170758,
+ "haproxy_backend_http_responses_4xx_proxy_proxy2": 1,
+ "haproxy_backend_http_responses_5xx_proxy_proxy1": 3075,
+ "haproxy_backend_http_responses_5xx_proxy_proxy2": 1,
+ "haproxy_backend_http_responses_other_proxy_proxy1": 5657,
+ "haproxy_backend_http_responses_other_proxy_proxy2": 1,
+ "haproxy_backend_queue_time_average_proxy_proxy1": 0,
+ "haproxy_backend_queue_time_average_proxy_proxy2": 0,
+ "haproxy_backend_response_time_average_proxy_proxy1": 52,
+ "haproxy_backend_response_time_average_proxy_proxy2": 1,
+ "haproxy_backend_sessions_proxy_proxy1": 31527507,
+ "haproxy_backend_sessions_proxy_proxy2": 4131723,
+ },
+ },
+ "fails on response with unexpected metrics (not HAProxy)": {
+ prepare: prepareCaseNotHaproxyMetrics,
+ },
+ "fails on 404 response": {
+ prepare: prepareCase404Response,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ h, cleanup := test.prepare(t)
+ defer cleanup()
+
+ ms := h.Collect()
+
+ assert.Equal(t, test.wantCollected, ms)
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, h, ms)
+ }
+ })
+ }
+}
+
+func prepareCaseHaproxyV231Metrics(t *testing.T) (*Haproxy, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer2310Metrics)
+ }))
+ h := New()
+ h.URL = srv.URL
+ require.NoError(t, h.Init())
+
+ return h, srv.Close
+}
+
+func prepareCaseNotHaproxyMetrics(t *testing.T) (*Haproxy, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(`
+# HELP haproxy_backend_http_responses_total Total number of HTTP responses.
+# TYPE haproxy_backend_http_responses_total counter
+application_backend_http_responses_total{proxy="infra-traefik-web",code="1xx"} 0
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="1xx"} 4130401
+application_backend_http_responses_total{proxy="infra-traefik-web",code="2xx"} 21338013
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="2xx"} 0
+application_backend_http_responses_total{proxy="infra-traefik-web",code="3xx"} 10004
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="3xx"} 0
+application_backend_http_responses_total{proxy="infra-traefik-web",code="4xx"} 10170758
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="4xx"} 0
+application_backend_http_responses_total{proxy="infra-traefik-web",code="5xx"} 3075
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="5xx"} 0
+application_backend_http_responses_total{proxy="infra-traefik-web",code="other"} 5657
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"} 0
+`))
+ }))
+ h := New()
+ h.URL = srv.URL
+ require.NoError(t, h.Init())
+
+ return h, srv.Close
+}
+
+func prepareCase404Response(t *testing.T) (*Haproxy, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ h := New()
+ h.URL = srv.URL
+ require.NoError(t, h.Init())
+
+ return h, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*Haproxy, func()) {
+ t.Helper()
+ h := New()
+ h.URL = "http://127.0.0.1:38001"
+ require.NoError(t, h.Init())
+
+ return h, func() {}
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, h *Haproxy, ms map[string]int64) {
+ for _, chart := range *h.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := ms[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := ms[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/haproxy/init.go b/src/go/plugin/go.d/modules/haproxy/init.go
new file mode 100644
index 000000000..0922a9b2d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/init.go
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package haproxy
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus/selector"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (h *Haproxy) validateConfig() error {
+ if h.URL == "" {
+ return errors.New("'url' is not set")
+ }
+ if _, err := web.NewHTTPRequest(h.Request); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (h *Haproxy) initPrometheusClient() (prometheus.Prometheus, error) {
+ httpClient, err := web.NewHTTPClient(h.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ prom := prometheus.NewWithSelector(httpClient, h.Request, sr)
+ return prom, nil
+}
+
+var sr, _ = selector.Expr{
+ Allow: []string{
+ metricBackendHTTPResponsesTotal,
+ metricBackendCurrentQueue,
+ metricBackendQueueTimeAverageSeconds,
+ metricBackendBytesInTotal,
+ metricBackendResponseTimeAverageSeconds,
+ metricBackendSessionsTotal,
+ metricBackendCurrentSessions,
+ metricBackendBytesOutTotal,
+ },
+}.Parse()
diff --git a/src/go/plugin/go.d/modules/haproxy/integrations/haproxy.md b/src/go/plugin/go.d/modules/haproxy/integrations/haproxy.md
new file mode 100644
index 000000000..1619b9d70
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/integrations/haproxy.md
@@ -0,0 +1,276 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/haproxy/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/haproxy/metadata.yaml"
+sidebar_label: "HAProxy"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HAProxy
+
+
+<img src="https://netdata.cloud/img/haproxy.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: haproxy
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors HAProxy servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per HAProxy instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| haproxy.backend_current_sessions | a dimension per proxy | sessions |
+| haproxy.backend_sessions | a dimension per proxy | sessions/s |
+| haproxy.backend_response_time_average | a dimension per proxy | milliseconds |
+| haproxy.backend_queue_time_average | a dimension per proxy | milliseconds |
+| haproxy.backend_current_queue | a dimension per proxy | requests |
+
+### Per proxy
+
+These metrics refer to the Proxy.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| haproxy.backend_http_responses | 1xx, 2xx, 3xx, 4xx, 5xx, other | responses/s |
+| haproxy.backend_network_io | in, out | bytes/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable PROMEX addon.
+
+To enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/haproxy.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/haproxy.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8404/metrics
+
+```
+</details>
+
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8404/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+NGINX Plus with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:8404/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8404/metrics
+
+ - name: remote
+ url: http://192.0.2.1:8404/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `haproxy` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m haproxy
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `haproxy` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep haproxy
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep haproxy /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep haproxy
+```
+
+
diff --git a/src/go/plugin/go.d/modules/haproxy/metadata.yaml b/src/go/plugin/go.d/modules/haproxy/metadata.yaml
new file mode 100644
index 000000000..adc879602
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/metadata.yaml
@@ -0,0 +1,231 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-haproxy
+ plugin_name: go.d.plugin
+ module_name: haproxy
+ monitored_instance:
+ name: HAProxy
+ link: https://www.haproxy.org/
+ icon_filename: haproxy.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - haproxy
+ - web
+ - webserver
+ - http
+ - proxy
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors HAProxy servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable PROMEX addon.
+ description: |
+ To enable PROMEX addon, follow the [official documentation](https://github.com/haproxy/haproxy/tree/master/addons/promex).
+ configuration:
+ file:
+ name: go.d/haproxy.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8404/metrics
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8404/metrics
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: NGINX Plus with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:8404/metrics
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8404/metrics
+
+ - name: remote
+ url: http://192.0.2.1:8404/metrics
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: haproxy.backend_current_sessions
+ description: Current number of active sessions
+ unit: sessions
+ chart_type: line
+ dimensions:
+ - name: a dimension per proxy
+ - name: haproxy.backend_sessions
+ description: Sessions rate
+ unit: sessions/s
+ chart_type: line
+ dimensions:
+ - name: a dimension per proxy
+ - name: haproxy.backend_response_time_average
+ description: Average response time for last 1024 successful connections
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: a dimension per proxy
+ - name: haproxy.backend_queue_time_average
+ description: Average queue time for last 1024 successful connections
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: a dimension per proxy
+ - name: haproxy.backend_current_queue
+ description: Current number of queued requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: a dimension per proxy
+ - name: proxy
+ description: These metrics refer to the Proxy.
+ labels: []
+ metrics:
+ - name: haproxy.backend_http_responses
+ description: HTTP responses by code class
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: 1xx
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: other
+ - name: haproxy.backend_network_io
+ description: Network traffic
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
diff --git a/src/go/plugin/go.d/modules/haproxy/testdata/config.json b/src/go/plugin/go.d/modules/haproxy/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/haproxy/testdata/config.yaml b/src/go/plugin/go.d/modules/haproxy/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/haproxy/testdata/v2.3.10/metrics.txt b/src/go/plugin/go.d/modules/haproxy/testdata/v2.3.10/metrics.txt
new file mode 100644
index 000000000..a156485d9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/haproxy/testdata/v2.3.10/metrics.txt
@@ -0,0 +1,382 @@
+# HELP haproxy_frontend_status Current status of the service (frontend: 0=STOP, 1=UP - backend: 0=DOWN, 1=UP - server: 0=DOWN, 1=UP, 2=MAINT, 3=DRAIN, 4=NOLB).
+# TYPE haproxy_frontend_status gauge
+haproxy_frontend_status{proxy="healthz"} 1
+haproxy_frontend_status{proxy="http"} 1
+haproxy_frontend_status{proxy="https"} 1
+haproxy_frontend_status{proxy="stats"} 1
+# HELP haproxy_frontend_current_sessions Current number of active sessions.
+# TYPE haproxy_frontend_current_sessions gauge
+haproxy_frontend_current_sessions{proxy="healthz"} 1
+haproxy_frontend_current_sessions{proxy="http"} 1
+haproxy_frontend_current_sessions{proxy="https"} 1348
+haproxy_frontend_current_sessions{proxy="stats"} 2
+# HELP haproxy_frontend_max_sessions Maximum observed number of active sessions.
+# TYPE haproxy_frontend_max_sessions gauge
+haproxy_frontend_max_sessions{proxy="healthz"} 10
+haproxy_frontend_max_sessions{proxy="http"} 5
+haproxy_frontend_max_sessions{proxy="https"} 1389
+haproxy_frontend_max_sessions{proxy="stats"} 8
+# HELP haproxy_frontend_limit_sessions Configured session limit.
+# TYPE haproxy_frontend_limit_sessions gauge
+haproxy_frontend_limit_sessions{proxy="healthz"} 524181
+haproxy_frontend_limit_sessions{proxy="http"} 524181
+haproxy_frontend_limit_sessions{proxy="https"} 524181
+haproxy_frontend_limit_sessions{proxy="stats"} 524181
+# HELP haproxy_frontend_sessions_total Total number of sessions.
+# TYPE haproxy_frontend_sessions_total counter
+haproxy_frontend_sessions_total{proxy="healthz"} 723971
+haproxy_frontend_sessions_total{proxy="http"} 1392
+haproxy_frontend_sessions_total{proxy="https"} 23433914
+haproxy_frontend_sessions_total{proxy="stats"} 4207
+# HELP haproxy_frontend_limit_session_rate Configured limit on new sessions per second.
+# TYPE haproxy_frontend_limit_session_rate gauge
+haproxy_frontend_limit_session_rate{proxy="healthz"} 1
+haproxy_frontend_limit_session_rate{proxy="http"} 1
+haproxy_frontend_limit_session_rate{proxy="https"} 1
+haproxy_frontend_limit_session_rate{proxy="stats"} 1
+# HELP haproxy_frontend_max_session_rate Maximum observed number of sessions per second.
+# TYPE haproxy_frontend_max_session_rate gauge
+haproxy_frontend_max_session_rate{proxy="healthz"} 1
+haproxy_frontend_max_session_rate{proxy="http"} 12
+haproxy_frontend_max_session_rate{proxy="https"} 96
+haproxy_frontend_max_session_rate{proxy="stats"} 2
+# HELP haproxy_frontend_connections_rate_max Maximum observed number of connections per second.
+# TYPE haproxy_frontend_connections_rate_max gauge
+haproxy_frontend_connections_rate_max{proxy="healthz"} 1
+haproxy_frontend_connections_rate_max{proxy="http"} 12
+haproxy_frontend_connections_rate_max{proxy="https"} 85
+haproxy_frontend_connections_rate_max{proxy="stats"} 2
+# HELP haproxy_frontend_connections_total Total number of connections.
+# TYPE haproxy_frontend_connections_total counter
+haproxy_frontend_connections_total{proxy="healthz"} 723971
+haproxy_frontend_connections_total{proxy="http"} 1392
+haproxy_frontend_connections_total{proxy="https"} 23476808
+haproxy_frontend_connections_total{proxy="stats"} 4207
+# HELP haproxy_frontend_bytes_in_total Current total of incoming bytes.
+# TYPE haproxy_frontend_bytes_in_total counter
+haproxy_frontend_bytes_in_total{proxy="healthz"} 79636810
+haproxy_frontend_bytes_in_total{proxy="http"} 73990
+haproxy_frontend_bytes_in_total{proxy="https"} 2514816135823
+haproxy_frontend_bytes_in_total{proxy="stats"} 14694474
+# HELP haproxy_frontend_bytes_out_total Current total of outgoing bytes.
+# TYPE haproxy_frontend_bytes_out_total counter
+haproxy_frontend_bytes_out_total{proxy="healthz"} 112215505
+haproxy_frontend_bytes_out_total{proxy="http"} 260431
+haproxy_frontend_bytes_out_total{proxy="https"} 46485344378
+haproxy_frontend_bytes_out_total{proxy="stats"} 23646727611
+# HELP haproxy_frontend_requests_denied_total Total number of denied requests.
+# TYPE haproxy_frontend_requests_denied_total counter
+haproxy_frontend_requests_denied_total{proxy="healthz"} 1
+haproxy_frontend_requests_denied_total{proxy="http"} 1
+haproxy_frontend_requests_denied_total{proxy="https"} 1
+haproxy_frontend_requests_denied_total{proxy="stats"} 1
+# HELP haproxy_frontend_responses_denied_total Total number of denied responses.
+# TYPE haproxy_frontend_responses_denied_total counter
+haproxy_frontend_responses_denied_total{proxy="healthz"} 1
+haproxy_frontend_responses_denied_total{proxy="http"} 1
+haproxy_frontend_responses_denied_total{proxy="https"} 1
+haproxy_frontend_responses_denied_total{proxy="stats"} 1
+# HELP haproxy_frontend_request_errors_total Total number of request errors.
+# TYPE haproxy_frontend_request_errors_total counter
+haproxy_frontend_request_errors_total{proxy="healthz"} 1
+haproxy_frontend_request_errors_total{proxy="http"} 1107
+haproxy_frontend_request_errors_total{proxy="https"} 5922
+haproxy_frontend_request_errors_total{proxy="stats"} 12
+# HELP haproxy_frontend_denied_connections_total Total number of requests denied by "tcp-request connection" rules.
+# TYPE haproxy_frontend_denied_connections_total counter
+haproxy_frontend_denied_connections_total{proxy="healthz"} 1
+haproxy_frontend_denied_connections_total{proxy="http"} 1
+haproxy_frontend_denied_connections_total{proxy="https"} 1
+haproxy_frontend_denied_connections_total{proxy="stats"} 1
+# HELP haproxy_frontend_denied_sessions_total Total number of requests denied by "tcp-request session" rules.
+# TYPE haproxy_frontend_denied_sessions_total counter
+haproxy_frontend_denied_sessions_total{proxy="healthz"} 1
+haproxy_frontend_denied_sessions_total{proxy="http"} 1
+haproxy_frontend_denied_sessions_total{proxy="https"} 1
+haproxy_frontend_denied_sessions_total{proxy="stats"} 1
+# HELP haproxy_frontend_failed_header_rewriting_total Total number of failed header rewriting warnings.
+# TYPE haproxy_frontend_failed_header_rewriting_total counter
+haproxy_frontend_failed_header_rewriting_total{proxy="healthz"} 1
+haproxy_frontend_failed_header_rewriting_total{proxy="http"} 1
+haproxy_frontend_failed_header_rewriting_total{proxy="https"} 1
+haproxy_frontend_failed_header_rewriting_total{proxy="stats"} 1
+# HELP haproxy_frontend_internal_errors_total Total number of internal errors.
+# TYPE haproxy_frontend_internal_errors_total counter
+haproxy_frontend_internal_errors_total{proxy="healthz"} 1
+haproxy_frontend_internal_errors_total{proxy="http"} 1
+haproxy_frontend_internal_errors_total{proxy="https"} 1
+haproxy_frontend_internal_errors_total{proxy="stats"} 1
+# HELP haproxy_frontend_http_requests_rate_max Maximum observed number of HTTP requests per second.
+# TYPE haproxy_frontend_http_requests_rate_max gauge
+haproxy_frontend_http_requests_rate_max{proxy="healthz"} 1
+haproxy_frontend_http_requests_rate_max{proxy="http"} 12
+haproxy_frontend_http_requests_rate_max{proxy="https"} 101
+haproxy_frontend_http_requests_rate_max{proxy="stats"} 2
+# HELP haproxy_frontend_http_requests_total Total number of HTTP requests received.
+# TYPE haproxy_frontend_http_requests_total counter
+haproxy_frontend_http_requests_total{proxy="healthz"} 723971
+haproxy_frontend_http_requests_total{proxy="http"} 1402
+haproxy_frontend_http_requests_total{proxy="https"} 35664484
+haproxy_frontend_http_requests_total{proxy="stats"} 60011
+# HELP haproxy_frontend_http_responses_total Total number of HTTP responses.
+# TYPE haproxy_frontend_http_responses_total counter
+haproxy_frontend_http_responses_total{proxy="healthz",code="1xx"} 1
+haproxy_frontend_http_responses_total{proxy="http",code="1xx"} 1
+haproxy_frontend_http_responses_total{proxy="https",code="1xx"} 4130401
+haproxy_frontend_http_responses_total{proxy="stats",code="1xx"} 1
+haproxy_frontend_http_responses_total{proxy="healthz",code="2xx"} 723971
+haproxy_frontend_http_responses_total{proxy="http",code="2xx"} 1
+haproxy_frontend_http_responses_total{proxy="https",code="2xx"} 21338013
+haproxy_frontend_http_responses_total{proxy="stats",code="2xx"} 59998
+haproxy_frontend_http_responses_total{proxy="healthz",code="3xx"} 1
+haproxy_frontend_http_responses_total{proxy="http",code="3xx"} 147
+haproxy_frontend_http_responses_total{proxy="https",code="3xx"} 10004
+haproxy_frontend_http_responses_total{proxy="stats",code="3xx"} 1
+haproxy_frontend_http_responses_total{proxy="healthz",code="4xx"} 1
+haproxy_frontend_http_responses_total{proxy="http",code="4xx"} 1107
+haproxy_frontend_http_responses_total{proxy="https",code="4xx"} 10175979
+haproxy_frontend_http_responses_total{proxy="stats",code="4xx"} 12
+haproxy_frontend_http_responses_total{proxy="healthz",code="5xx"} 1
+haproxy_frontend_http_responses_total{proxy="http",code="5xx"} 148
+haproxy_frontend_http_responses_total{proxy="https",code="5xx"} 3108
+haproxy_frontend_http_responses_total{proxy="stats",code="5xx"} 1
+haproxy_frontend_http_responses_total{proxy="healthz",code="other"} 1
+haproxy_frontend_http_responses_total{proxy="http",code="other"} 1
+haproxy_frontend_http_responses_total{proxy="https",code="other"} 5657
+haproxy_frontend_http_responses_total{proxy="stats",code="other"} 1
+# HELP haproxy_frontend_intercepted_requests_total Total number of intercepted HTTP requests.
+# TYPE haproxy_frontend_intercepted_requests_total counter
+haproxy_frontend_intercepted_requests_total{proxy="healthz"} 723971
+haproxy_frontend_intercepted_requests_total{proxy="http"} 147
+haproxy_frontend_intercepted_requests_total{proxy="https"} 1
+haproxy_frontend_intercepted_requests_total{proxy="stats"} 59999
+# HELP haproxy_frontend_http_cache_lookups_total Total number of HTTP cache lookups.
+# TYPE haproxy_frontend_http_cache_lookups_total counter
+haproxy_frontend_http_cache_lookups_total{proxy="healthz"} 1
+haproxy_frontend_http_cache_lookups_total{proxy="http"} 1
+haproxy_frontend_http_cache_lookups_total{proxy="https"} 1
+haproxy_frontend_http_cache_lookups_total{proxy="stats"} 1
+# HELP haproxy_frontend_http_cache_hits_total Total number of HTTP cache hits.
+# TYPE haproxy_frontend_http_cache_hits_total counter
+haproxy_frontend_http_cache_hits_total{proxy="healthz"} 1
+haproxy_frontend_http_cache_hits_total{proxy="http"} 1
+haproxy_frontend_http_cache_hits_total{proxy="https"} 1
+haproxy_frontend_http_cache_hits_total{proxy="stats"} 1
+# HELP haproxy_frontend_http_comp_bytes_in_total Total number of HTTP response bytes fed to the compressor.
+# TYPE haproxy_frontend_http_comp_bytes_in_total counter
+haproxy_frontend_http_comp_bytes_in_total{proxy="healthz"} 1
+haproxy_frontend_http_comp_bytes_in_total{proxy="http"} 1
+haproxy_frontend_http_comp_bytes_in_total{proxy="https"} 1
+haproxy_frontend_http_comp_bytes_in_total{proxy="stats"} 1
+# HELP haproxy_frontend_http_comp_bytes_out_total Total number of HTTP response bytes emitted by the compressor.
+# TYPE haproxy_frontend_http_comp_bytes_out_total counter
+haproxy_frontend_http_comp_bytes_out_total{proxy="healthz"} 1
+haproxy_frontend_http_comp_bytes_out_total{proxy="http"} 1
+haproxy_frontend_http_comp_bytes_out_total{proxy="https"} 1
+haproxy_frontend_http_comp_bytes_out_total{proxy="stats"} 1
+# HELP haproxy_frontend_http_comp_bytes_bypassed_total Total number of bytes that bypassed the HTTP compressor (CPU/BW limit).
+# TYPE haproxy_frontend_http_comp_bytes_bypassed_total counter
+haproxy_frontend_http_comp_bytes_bypassed_total{proxy="healthz"} 1
+haproxy_frontend_http_comp_bytes_bypassed_total{proxy="http"} 1
+haproxy_frontend_http_comp_bytes_bypassed_total{proxy="https"} 1
+haproxy_frontend_http_comp_bytes_bypassed_total{proxy="stats"} 1
+# HELP haproxy_frontend_http_comp_responses_total Total number of HTTP responses that were compressed.
+# TYPE haproxy_frontend_http_comp_responses_total counter
+haproxy_frontend_http_comp_responses_total{proxy="healthz"} 1
+haproxy_frontend_http_comp_responses_total{proxy="http"} 1
+haproxy_frontend_http_comp_responses_total{proxy="https"} 1
+haproxy_frontend_http_comp_responses_total{proxy="stats"} 1
+# HELP haproxy_backend_status Current status of the service (frontend: 0=STOP, 1=UP - backend: 0=DOWN, 1=UP - server: 0=DOWN, 1=UP, 2=MAINT, 3=DRAIN, 4=NOLB).
+# TYPE haproxy_backend_status gauge
+haproxy_backend_status{proxy="proxy1"} 1
+haproxy_backend_status{proxy="proxy2"} 1
+# HELP haproxy_backend_current_sessions Current number of active sessions.
+# TYPE haproxy_backend_current_sessions gauge
+haproxy_backend_current_sessions{proxy="proxy1"} 1
+haproxy_backend_current_sessions{proxy="proxy2"} 1322
+# HELP haproxy_backend_max_sessions Maximum observed number of active sessions.
+# TYPE haproxy_backend_max_sessions gauge
+haproxy_backend_max_sessions{proxy="proxy1"} 112
+haproxy_backend_max_sessions{proxy="proxy2"} 1367
+# HELP haproxy_backend_limit_sessions Configured session limit.
+# TYPE haproxy_backend_limit_sessions gauge
+haproxy_backend_limit_sessions{proxy="proxy1"} 1
+haproxy_backend_limit_sessions{proxy="proxy2"} 1
+# HELP haproxy_backend_sessions_total Total number of sessions.
+# TYPE haproxy_backend_sessions_total counter
+haproxy_backend_sessions_total{proxy="proxy1"} 31527507
+haproxy_backend_sessions_total{proxy="proxy2"} 4131723
+# HELP haproxy_backend_max_session_rate Maximum observed number of sessions per second.
+# TYPE haproxy_backend_max_session_rate gauge
+haproxy_backend_max_session_rate{proxy="proxy1"} 82
+haproxy_backend_max_session_rate{proxy="proxy2"} 41
+# HELP haproxy_backend_last_session_seconds Number of seconds since last session assigned to server/backend.
+# TYPE haproxy_backend_last_session_seconds gauge
+haproxy_backend_last_session_seconds{proxy="proxy1"} 1
+haproxy_backend_last_session_seconds{proxy="proxy2"} 3
+# HELP haproxy_backend_current_queue Current number of queued requests.
+# TYPE haproxy_backend_current_queue gauge
+haproxy_backend_current_queue{proxy="proxy1"} 1
+haproxy_backend_current_queue{proxy="proxy2"} 1
+# HELP haproxy_backend_max_queue Maximum observed number of queued requests.
+# TYPE haproxy_backend_max_queue gauge
+haproxy_backend_max_queue{proxy="proxy1"} 1
+haproxy_backend_max_queue{proxy="proxy2"} 1
+# HELP haproxy_backend_connection_attempts_total Total number of connection establishment attempts.
+# TYPE haproxy_backend_connection_attempts_total counter
+haproxy_backend_connection_attempts_total{proxy="proxy1"} 19864884
+haproxy_backend_connection_attempts_total{proxy="proxy2"} 4131723
+# HELP haproxy_backend_connection_reuses_total Total number of connection reuses.
+# TYPE haproxy_backend_connection_reuses_total counter
+haproxy_backend_connection_reuses_total{proxy="proxy1"} 11661922
+haproxy_backend_connection_reuses_total{proxy="proxy2"} 1
+# HELP haproxy_backend_bytes_in_total Current total of incoming bytes.
+# TYPE haproxy_backend_bytes_in_total counter
+haproxy_backend_bytes_in_total{proxy="proxy1"} 21057046294
+haproxy_backend_bytes_in_total{proxy="proxy2"} 2493759083896
+# HELP haproxy_backend_bytes_out_total Current total of outgoing bytes.
+# TYPE haproxy_backend_bytes_out_total counter
+haproxy_backend_bytes_out_total{proxy="proxy1"} 41352782609
+haproxy_backend_bytes_out_total{proxy="proxy2"} 5131407558
+# HELP haproxy_backend_queue_time_average_seconds Avg. queue time for last 1024 successful connections.
+# TYPE haproxy_backend_queue_time_average_seconds gauge
+haproxy_backend_queue_time_average_seconds{proxy="proxy1"} 0.000000
+haproxy_backend_queue_time_average_seconds{proxy="proxy2"} 0.000000
+# HELP haproxy_backend_connect_time_average_seconds Avg. connect time for last 1024 successful connections.
+# TYPE haproxy_backend_connect_time_average_seconds gauge
+haproxy_backend_connect_time_average_seconds{proxy="proxy1"} 0.000000
+haproxy_backend_connect_time_average_seconds{proxy="proxy2"} 0.001000
+# HELP haproxy_backend_response_time_average_seconds Avg. response time for last 1024 successful connections.
+# TYPE haproxy_backend_response_time_average_seconds gauge
+haproxy_backend_response_time_average_seconds{proxy="proxy1"} 0.052000
+haproxy_backend_response_time_average_seconds{proxy="proxy2"} 0.001000
+# HELP haproxy_backend_total_time_average_seconds Avg. total time for last 1024 successful connections.
+# TYPE haproxy_backend_total_time_average_seconds gauge
+haproxy_backend_total_time_average_seconds{proxy="proxy1"} 1.746000
+haproxy_backend_total_time_average_seconds{proxy="proxy2"} 198.639000
+# HELP haproxy_backend_max_queue_time_seconds Maximum observed time spent in the queue
+# TYPE haproxy_backend_max_queue_time_seconds gauge
+haproxy_backend_max_queue_time_seconds{proxy="proxy1"} 0.000000
+haproxy_backend_max_queue_time_seconds{proxy="proxy2"} 0.000000
+# HELP haproxy_backend_max_connect_time_seconds Maximum observed time spent waiting for a connection to complete
+# TYPE haproxy_backend_max_connect_time_seconds gauge
+haproxy_backend_max_connect_time_seconds{proxy="proxy1"} 1.063000
+haproxy_backend_max_connect_time_seconds{proxy="proxy2"} 1.061000
+# HELP haproxy_backend_max_response_time_seconds Maximum observed time spent waiting for a server response
+# TYPE haproxy_backend_max_response_time_seconds gauge
+haproxy_backend_max_response_time_seconds{proxy="proxy1"} 74.050000
+haproxy_backend_max_response_time_seconds{proxy="proxy2"} 1.396000
+# HELP haproxy_backend_max_total_time_seconds Maximum observed total request+response time (request+queue+connect+response+processing)
+# TYPE haproxy_backend_max_total_time_seconds gauge
+haproxy_backend_max_total_time_seconds{proxy="proxy1"} 331.297000
+haproxy_backend_max_total_time_seconds{proxy="proxy2"} 3116820.243000
+# HELP haproxy_backend_requests_denied_total Total number of denied requests.
+# TYPE haproxy_backend_requests_denied_total counter
+haproxy_backend_requests_denied_total{proxy="proxy1"} 1
+haproxy_backend_requests_denied_total{proxy="proxy2"} 1
+# HELP haproxy_backend_responses_denied_total Total number of denied responses.
+# TYPE haproxy_backend_responses_denied_total counter
+haproxy_backend_responses_denied_total{proxy="proxy1"} 1
+haproxy_backend_responses_denied_total{proxy="proxy2"} 1
+# HELP haproxy_backend_connection_errors_total Total number of connection errors.
+# TYPE haproxy_backend_connection_errors_total counter
+haproxy_backend_connection_errors_total{proxy="proxy1"} 1
+haproxy_backend_connection_errors_total{proxy="proxy2"} 1
+# HELP haproxy_backend_response_errors_total Total number of response errors.
+# TYPE haproxy_backend_response_errors_total counter
+haproxy_backend_response_errors_total{proxy="proxy1"} 13
+haproxy_backend_response_errors_total{proxy="proxy2"} 4122625
+# HELP haproxy_backend_retry_warnings_total Total number of retry warnings.
+# TYPE haproxy_backend_retry_warnings_total counter
+haproxy_backend_retry_warnings_total{proxy="proxy1"} 1
+haproxy_backend_retry_warnings_total{proxy="proxy2"} 1
+# HELP haproxy_backend_redispatch_warnings_total Total number of redispatch warnings.
+# TYPE haproxy_backend_redispatch_warnings_total counter
+haproxy_backend_redispatch_warnings_total{proxy="proxy1"} 1
+haproxy_backend_redispatch_warnings_total{proxy="proxy2"} 1
+# HELP haproxy_backend_failed_header_rewriting_total Total number of failed header rewriting warnings.
+# TYPE haproxy_backend_failed_header_rewriting_total counter
+haproxy_backend_failed_header_rewriting_total{proxy="proxy1"} 1
+haproxy_backend_failed_header_rewriting_total{proxy="proxy2"} 1
+# HELP haproxy_backend_internal_errors_total Total number of internal errors.
+# TYPE haproxy_backend_internal_errors_total counter
+haproxy_backend_internal_errors_total{proxy="proxy1"} 1
+haproxy_backend_internal_errors_total{proxy="proxy2"} 1
+# HELP haproxy_backend_client_aborts_total Total number of data transfers aborted by the client.
+# TYPE haproxy_backend_client_aborts_total counter
+haproxy_backend_client_aborts_total{proxy="proxy1"} 27231
+haproxy_backend_client_aborts_total{proxy="proxy2"} 7777
+# HELP haproxy_backend_server_aborts_total Total number of data transfers aborted by the server.
+# TYPE haproxy_backend_server_aborts_total counter
+haproxy_backend_server_aborts_total{proxy="proxy1"} 1
+haproxy_backend_server_aborts_total{proxy="proxy2"} 4122625
+# HELP haproxy_backend_weight Service weight.
+# TYPE haproxy_backend_weight gauge
+haproxy_backend_weight{proxy="proxy1"} 256
+haproxy_backend_weight{proxy="proxy2"} 640
+# HELP haproxy_backend_active_servers Current number of active servers.
+# TYPE haproxy_backend_active_servers gauge
+haproxy_backend_active_servers{proxy="proxy1"} 2
+haproxy_backend_active_servers{proxy="proxy2"} 5
+# HELP haproxy_backend_backup_servers Current number of backup servers.
+# TYPE haproxy_backend_backup_servers gauge
+haproxy_backend_backup_servers{proxy="proxy1"} 1
+haproxy_backend_backup_servers{proxy="proxy2"} 1
+# HELP haproxy_backend_check_up_down_total Total number of UP->DOWN transitions.
+# TYPE haproxy_backend_check_up_down_total counter
+haproxy_backend_check_up_down_total{proxy="proxy1"} 1
+haproxy_backend_check_up_down_total{proxy="proxy2"} 1
+# HELP haproxy_backend_check_last_change_seconds Number of seconds since the last UP<->DOWN transition.
+# TYPE haproxy_backend_check_last_change_seconds gauge
+haproxy_backend_check_last_change_seconds{proxy="proxy1"} 3619864
+haproxy_backend_check_last_change_seconds{proxy="proxy2"} 3619864
+# HELP haproxy_backend_downtime_seconds_total Total downtime (in seconds) for the service.
+# TYPE haproxy_backend_downtime_seconds_total counter
+haproxy_backend_downtime_seconds_total{proxy="proxy1"} 1
+haproxy_backend_downtime_seconds_total{proxy="proxy2"} 1
+# HELP haproxy_backend_loadbalanced_total Total number of times a service was selected, either for new sessions, or when redispatching.
+# TYPE haproxy_backend_loadbalanced_total counter
+haproxy_backend_loadbalanced_total{proxy="proxy1"} 31526806
+haproxy_backend_loadbalanced_total{proxy="proxy2"} 4131723
+# HELP haproxy_backend_http_requests_total Total number of HTTP requests received.
+# TYPE haproxy_backend_http_requests_total counter
+haproxy_backend_http_requests_total{proxy="proxy1"} 31527507
+haproxy_backend_http_requests_total{proxy="proxy2"} 4130401
+# HELP haproxy_backend_http_responses_total Total number of HTTP responses.
+# TYPE haproxy_backend_http_responses_total counter
+haproxy_backend_http_responses_total{proxy="proxy1",code="1xx"} 1
+haproxy_backend_http_responses_total{proxy="proxy2",code="1xx"} 4130401
+haproxy_backend_http_responses_total{proxy="proxy1",code="2xx"} 21338013
+haproxy_backend_http_responses_total{proxy="proxy2",code="2xx"} 1
+haproxy_backend_http_responses_total{proxy="proxy1",code="3xx"} 10004
+haproxy_backend_http_responses_total{proxy="proxy2",code="3xx"} 1
+haproxy_backend_http_responses_total{proxy="proxy1",code="4xx"} 10170758
+haproxy_backend_http_responses_total{proxy="proxy2",code="4xx"} 1
+haproxy_backend_http_responses_total{proxy="proxy1",code="5xx"} 3075
+haproxy_backend_http_responses_total{proxy="proxy2",code="5xx"} 1
+haproxy_backend_http_responses_total{proxy="proxy1",code="other"} 5657
+haproxy_backend_http_responses_total{proxy="proxy2",code="other"} 1
+# HELP haproxy_backend_http_cache_lookups_total Total number of HTTP cache lookups.
+# TYPE haproxy_backend_http_cache_lookups_total counter
+haproxy_backend_http_cache_lookups_total{proxy="proxy1"} 1
+haproxy_backend_http_cache_lookups_total{proxy="proxy2"} 1
+# HELP haproxy_backend_http_cache_hits_total Total number of HTTP cache hits.
+# TYPE haproxy_backend_http_cache_hits_total counter
+haproxy_backend_http_cache_hits_total{proxy="proxy1"} 1
+haproxy_backend_http_cache_hits_total{proxy="proxy2"} 1
+# HELP haproxy_backend_http_comp_bytes_in_total Total number of HTTP response bytes fed to the compressor.
+# TYPE haproxy_backend_http_comp_bytes_in_total counter
+haproxy_backend_http_comp_bytes_in_total{proxy="proxy1"} 1
+haproxy_backend_http_comp_bytes_in_total{proxy="proxy2"} 1
+# HELP haproxy_backend_http_comp_bytes_out_total Total number of HTTP response bytes emitted by the compressor.
+# TYPE haproxy_backend_http_comp_bytes_out_total counter
+haproxy_backend_http_comp_bytes_out_total{proxy="proxy1"} 1
+haproxy_backend_http_comp_bytes_out_total{proxy="proxy2"} 1
+# HELP haproxy_backend_http_comp_bytes_bypassed_total Total number of bytes that bypassed the HTTP compressor (CPU/BW limit).
+# TYPE haproxy_backend_http_comp_bytes_bypassed_total counter
+haproxy_backend_http_comp_bytes_bypassed_total{proxy="proxy1"} 1
+haproxy_backend_http_comp_bytes_bypassed_total{proxy="proxy2"} 1
+# HELP haproxy_backend_http_comp_responses_total Total number of HTTP responses that were compressed.
+# TYPE haproxy_backend_http_comp_responses_total counter
+haproxy_backend_http_comp_responses_total{proxy="proxy1"} 1
+haproxy_backend_http_comp_responses_total{proxy="proxy2"} 1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/hddtemp/README.md b/src/go/plugin/go.d/modules/hddtemp/README.md
new file mode 120000
index 000000000..95c7593f8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/README.md
@@ -0,0 +1 @@
+integrations/hdd_temperature.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/hddtemp/charts.go b/src/go/plugin/go.d/modules/hddtemp/charts.go
new file mode 100644
index 000000000..7e0766c4f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/charts.go
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hddtemp
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioDiskTemperature = module.Priority + iota
+ prioDiskTemperatureSensorStatus
+)
+
+var (
+ diskTemperatureChartsTmpl = module.Chart{
+ ID: "disk_%s_temperature",
+ Title: "Disk temperature",
+ Units: "Celsius",
+ Fam: "temperature",
+ Ctx: "hddtemp.disk_temperature",
+ Type: module.Line,
+ Priority: prioDiskTemperature,
+ Dims: module.Dims{
+ {ID: "disk_%s_temperature", Name: "temperature"},
+ },
+ }
+ diskTemperatureSensorChartsTmpl = module.Chart{
+ ID: "disk_%s_temperature_sensor_status",
+ Title: "Disk temperature sensor status",
+ Units: "status",
+ Fam: "sensor",
+ Ctx: "hddtemp.disk_temperature_sensor_status",
+ Type: module.Line,
+ Priority: prioDiskTemperatureSensorStatus,
+ Dims: module.Dims{
+ {ID: "disk_%s_temp_sensor_status_ok", Name: "ok"},
+ {ID: "disk_%s_temp_sensor_status_err", Name: "err"},
+ {ID: "disk_%s_temp_sensor_status_na", Name: "na"},
+ {ID: "disk_%s_temp_sensor_status_unk", Name: "unk"},
+ {ID: "disk_%s_temp_sensor_status_nos", Name: "nos"},
+ {ID: "disk_%s_temp_sensor_status_slp", Name: "slp"},
+ },
+ }
+)
+
+func (h *HddTemp) addDiskTempSensorStatusChart(id string, disk diskStats) {
+ h.addDiskChart(id, disk, diskTemperatureSensorChartsTmpl.Copy())
+}
+
+func (h *HddTemp) addDiskTempChart(id string, disk diskStats) {
+ h.addDiskChart(id, disk, diskTemperatureChartsTmpl.Copy())
+}
+
+func (h *HddTemp) addDiskChart(id string, disk diskStats, chart *module.Chart) {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(id))
+ chart.Labels = []module.Label{
+ {Key: "disk_id", Value: id},
+ {Key: "model", Value: disk.model},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, id)
+ }
+
+ if err := h.Charts().Add(chart); err != nil {
+ h.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hddtemp/client.go b/src/go/plugin/go.d/modules/hddtemp/client.go
new file mode 100644
index 000000000..b89be10a2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/client.go
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hddtemp
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+func newHddTempConn(conf Config) hddtempConn {
+ return &hddtempClient{conn: socket.New(socket.Config{
+ Address: conf.Address,
+ ConnectTimeout: conf.Timeout.Duration(),
+ ReadTimeout: conf.Timeout.Duration(),
+ WriteTimeout: conf.Timeout.Duration(),
+ })}
+}
+
+type hddtempClient struct {
+ conn socket.Client
+}
+
+func (c *hddtempClient) connect() error {
+ return c.conn.Connect()
+}
+
+func (c *hddtempClient) disconnect() {
+ _ = c.conn.Disconnect()
+}
+
+func (c *hddtempClient) queryHddTemp() (string, error) {
+ var i int
+ var s string
+ err := c.conn.Command("", func(bytes []byte) bool {
+ if i++; i > 1 {
+ return false
+ }
+ s = string(bytes)
+ return true
+ })
+ if err != nil {
+ return "", err
+ }
+ return s, nil
+}
diff --git a/src/go/plugin/go.d/modules/hddtemp/collect.go b/src/go/plugin/go.d/modules/hddtemp/collect.go
new file mode 100644
index 000000000..f5c75db04
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/collect.go
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hddtemp
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type diskStats struct {
+ devPath string
+ model string
+ temperature string
+ unit string
+}
+
+func (h *HddTemp) collect() (map[string]int64, error) {
+ conn := h.newHddTempConn(h.Config)
+
+ if err := conn.connect(); err != nil {
+ return nil, err
+ }
+
+ defer conn.disconnect()
+
+ msg, err := conn.queryHddTemp()
+ if err != nil {
+ return nil, err
+ }
+
+ h.Debugf("hddtemp daemon response: %s", msg)
+
+ disks, err := parseHddTempMessage(msg)
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ for _, disk := range disks {
+ id := getDiskID(disk)
+ if id == "" {
+ h.Debugf("can not extract disk id from '%s'", disk.devPath)
+ continue
+ }
+
+ if !h.disks[id] {
+ h.disks[id] = true
+ h.addDiskTempSensorStatusChart(id, disk)
+ }
+
+ px := fmt.Sprintf("disk_%s_", id)
+
+ for _, st := range []string{"ok", "na", "unk", "nos", "slp", "err"} {
+ mx[px+"temp_sensor_status_"+st] = 0
+ }
+ switch disk.temperature {
+ case "NA":
+ mx[px+"temp_sensor_status_na"] = 1
+ case "UNK":
+ mx[px+"temp_sensor_status_unk"] = 1
+ case "NOS":
+ mx[px+"temp_sensor_status_nos"] = 1
+ case "SLP":
+ mx[px+"temp_sensor_status_slp"] = 1
+ case "ERR":
+ mx[px+"temp_sensor_status_err"] = 1
+ default:
+ if v, ok := getTemperature(disk); ok {
+ if !h.disksTemp[id] {
+ h.disksTemp[id] = true
+ h.addDiskTempChart(id, disk)
+ }
+ mx[px+"temp_sensor_status_ok"] = 1
+ mx[px+"temperature"] = v
+ } else {
+ mx[px+"temp_sensor_status_unk"] = 1
+ }
+ }
+ }
+
+ return mx, nil
+}
+
+func getDiskID(d diskStats) string {
+ i := strings.LastIndexByte(d.devPath, '/')
+ if i == -1 {
+ return ""
+ }
+ return d.devPath[i+1:]
+}
+
+func getTemperature(d diskStats) (int64, bool) {
+ v, err := strconv.ParseInt(d.temperature, 10, 64)
+ if err != nil {
+ return 0, false
+ }
+ if d.unit == "F" {
+ v = (v - 32) * 5 / 9
+ }
+ return v, true
+}
+
+func parseHddTempMessage(msg string) ([]diskStats, error) {
+ if msg == "" {
+ return nil, errors.New("empty hddtemp message")
+ }
+
+ // https://github.com/guzu/hddtemp/blob/e16aed6d0145d7ad8b3308dd0b9199fc701c0417/src/daemon.c#L165
+ parts := strings.Split(msg, "|")
+
+ var i int
+ // remove empty values
+ for _, v := range parts {
+ if v = strings.TrimSpace(v); v != "" {
+ parts[i] = v
+ i++
+ }
+ }
+ parts = parts[:i]
+
+ if len(parts) == 0 || len(parts)%4 != 0 {
+ return nil, errors.New("invalid hddtemp output format")
+ }
+
+ var disks []diskStats
+
+ for i := 0; i < len(parts); i += 4 {
+ disks = append(disks, diskStats{
+ devPath: parts[i],
+ model: parts[i+1],
+ temperature: parts[i+2],
+ unit: parts[i+3],
+ })
+ }
+
+ return disks, nil
+}
diff --git a/src/go/plugin/go.d/modules/hddtemp/config_schema.json b/src/go/plugin/go.d/modules/hddtemp/config_schema.json
new file mode 100644
index 000000000..2858fbe02
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/config_schema.json
@@ -0,0 +1,44 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "HddTemp collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the hddtemp daemon listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:7634"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hddtemp/hddtemp.go b/src/go/plugin/go.d/modules/hddtemp/hddtemp.go
new file mode 100644
index 000000000..ac283d6ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/hddtemp.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hddtemp
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("hddtemp", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *HddTemp {
+ return &HddTemp{
+ Config: Config{
+ Address: "127.0.0.1:7634",
+ Timeout: web.Duration(time.Second * 1),
+ },
+ newHddTempConn: newHddTempConn,
+ charts: &module.Charts{},
+ disks: make(map[string]bool),
+ disksTemp: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+}
+
+type (
+ HddTemp struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ newHddTempConn func(Config) hddtempConn
+
+ disks map[string]bool
+ disksTemp map[string]bool
+ }
+
+ hddtempConn interface {
+ connect() error
+ disconnect()
+ queryHddTemp() (string, error)
+ }
+)
+
+func (h *HddTemp) Configuration() any {
+ return h.Config
+}
+
+func (h *HddTemp) Init() error {
+ if h.Address == "" {
+ h.Error("config: 'address' not set")
+ return errors.New("address not set")
+ }
+
+ return nil
+}
+
+func (h *HddTemp) Check() error {
+ mx, err := h.collect()
+ if err != nil {
+ h.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (h *HddTemp) Charts() *module.Charts {
+ return h.charts
+}
+
+func (h *HddTemp) Collect() map[string]int64 {
+ mx, err := h.collect()
+ if err != nil {
+ h.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (h *HddTemp) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/hddtemp/hddtemp_test.go b/src/go/plugin/go.d/modules/hddtemp/hddtemp_test.go
new file mode 100644
index 000000000..d20d79edb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/hddtemp_test.go
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hddtemp
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataAllOK, _ = os.ReadFile("testdata/hddtemp-all-ok.txt")
+ dataAllSleep, _ = os.ReadFile("testdata/hddtemp-all-sleep.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataAllOK": dataAllOK,
+ "dataAllSleep": dataAllSleep,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestHddTemp_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &HddTemp{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestHddTemp_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success with default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if address not set": {
+ wantFail: true,
+ config: func() Config {
+ conf := New().Config
+ conf.Address = ""
+ return conf
+ }(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ hdd := New()
+ hdd.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, hdd.Init())
+ } else {
+ assert.NoError(t, hdd.Init())
+ }
+ })
+ }
+}
+
+func TestHddTemp_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *HddTemp
+ }{
+ "not initialized": {
+ prepare: func() *HddTemp {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *HddTemp {
+ hdd := New()
+ hdd.newHddTempConn = func(config Config) hddtempConn { return prepareMockAllDisksOk() }
+ _ = hdd.Check()
+ return hdd
+ },
+ },
+ "after collect": {
+ prepare: func() *HddTemp {
+ hdd := New()
+ hdd.newHddTempConn = func(config Config) hddtempConn { return prepareMockAllDisksOk() }
+ _ = hdd.Collect()
+ return hdd
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ hdd := test.prepare()
+
+ assert.NotPanics(t, hdd.Cleanup)
+ })
+ }
+}
+
+func TestHddTemp_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestHddTemp_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockHddTempConn
+ wantFail bool
+ }{
+ "all disks ok": {
+ wantFail: false,
+ prepareMock: prepareMockAllDisksOk,
+ },
+ "all disks sleep": {
+ wantFail: false,
+ prepareMock: prepareMockAllDisksSleep,
+ },
+ "err on connect": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnConnect,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ hdd := New()
+ mock := test.prepareMock()
+ hdd.newHddTempConn = func(config Config) hddtempConn { return mock }
+
+ if test.wantFail {
+ assert.Error(t, hdd.Check())
+ } else {
+ assert.NoError(t, hdd.Check())
+ }
+ })
+ }
+}
+
+func TestHddTemp_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockHddTempConn
+ wantMetrics map[string]int64
+ wantDisconnect bool
+ wantCharts int
+ }{
+ "all disks ok": {
+ prepareMock: prepareMockAllDisksOk,
+ wantDisconnect: true,
+ wantCharts: 2 * 4,
+ wantMetrics: map[string]int64{
+ "disk_sda_temp_sensor_status_err": 0,
+ "disk_sda_temp_sensor_status_na": 0,
+ "disk_sda_temp_sensor_status_nos": 0,
+ "disk_sda_temp_sensor_status_ok": 1,
+ "disk_sda_temp_sensor_status_slp": 0,
+ "disk_sda_temp_sensor_status_unk": 0,
+ "disk_sda_temperature": 50,
+ "disk_sdb_temp_sensor_status_err": 0,
+ "disk_sdb_temp_sensor_status_na": 0,
+ "disk_sdb_temp_sensor_status_nos": 0,
+ "disk_sdb_temp_sensor_status_ok": 1,
+ "disk_sdb_temp_sensor_status_slp": 0,
+ "disk_sdb_temp_sensor_status_unk": 0,
+ "disk_sdb_temperature": 49,
+ "disk_sdc_temp_sensor_status_err": 0,
+ "disk_sdc_temp_sensor_status_na": 0,
+ "disk_sdc_temp_sensor_status_nos": 0,
+ "disk_sdc_temp_sensor_status_ok": 1,
+ "disk_sdc_temp_sensor_status_slp": 0,
+ "disk_sdc_temp_sensor_status_unk": 0,
+ "disk_sdc_temperature": 27,
+ "disk_sdd_temp_sensor_status_err": 0,
+ "disk_sdd_temp_sensor_status_na": 0,
+ "disk_sdd_temp_sensor_status_nos": 0,
+ "disk_sdd_temp_sensor_status_ok": 1,
+ "disk_sdd_temp_sensor_status_slp": 0,
+ "disk_sdd_temp_sensor_status_unk": 0,
+ "disk_sdd_temperature": 29,
+ },
+ },
+ "all disks sleep": {
+ prepareMock: prepareMockAllDisksSleep,
+ wantDisconnect: true,
+ wantCharts: 3,
+ wantMetrics: map[string]int64{
+ "disk_ata-HUP722020APA330_BFGWU7WF_temp_sensor_status_err": 0,
+ "disk_ata-HUP722020APA330_BFGWU7WF_temp_sensor_status_na": 0,
+ "disk_ata-HUP722020APA330_BFGWU7WF_temp_sensor_status_nos": 0,
+ "disk_ata-HUP722020APA330_BFGWU7WF_temp_sensor_status_ok": 0,
+ "disk_ata-HUP722020APA330_BFGWU7WF_temp_sensor_status_slp": 1,
+ "disk_ata-HUP722020APA330_BFGWU7WF_temp_sensor_status_unk": 0,
+ "disk_ata-HUP722020APA330_BFJ0WS3F_temp_sensor_status_err": 0,
+ "disk_ata-HUP722020APA330_BFJ0WS3F_temp_sensor_status_na": 0,
+ "disk_ata-HUP722020APA330_BFJ0WS3F_temp_sensor_status_nos": 0,
+ "disk_ata-HUP722020APA330_BFJ0WS3F_temp_sensor_status_ok": 0,
+ "disk_ata-HUP722020APA330_BFJ0WS3F_temp_sensor_status_slp": 1,
+ "disk_ata-HUP722020APA330_BFJ0WS3F_temp_sensor_status_unk": 0,
+ "disk_ata-WDC_WD10EARS-00Y5B1_WD-WCAV5R693922_temp_sensor_status_err": 0,
+ "disk_ata-WDC_WD10EARS-00Y5B1_WD-WCAV5R693922_temp_sensor_status_na": 0,
+ "disk_ata-WDC_WD10EARS-00Y5B1_WD-WCAV5R693922_temp_sensor_status_nos": 0,
+ "disk_ata-WDC_WD10EARS-00Y5B1_WD-WCAV5R693922_temp_sensor_status_ok": 0,
+ "disk_ata-WDC_WD10EARS-00Y5B1_WD-WCAV5R693922_temp_sensor_status_slp": 1,
+ "disk_ata-WDC_WD10EARS-00Y5B1_WD-WCAV5R693922_temp_sensor_status_unk": 0,
+ },
+ },
+ "err on connect": {
+ prepareMock: prepareMockErrOnConnect,
+ wantDisconnect: false,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantDisconnect: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantDisconnect: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ hdd := New()
+ mock := test.prepareMock()
+ hdd.newHddTempConn = func(config Config) hddtempConn { return mock }
+
+ mx := hdd.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ assert.Len(t, *hdd.Charts(), test.wantCharts)
+ assert.Equal(t, test.wantDisconnect, mock.disconnectCalled)
+ testMetricsHasAllChartsDims(t, hdd, mx)
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, hdd *HddTemp, mx map[string]int64) {
+ for _, chart := range *hdd.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareMockAllDisksOk() *mockHddTempConn {
+ return &mockHddTempConn{
+ hddTempLine: string(dataAllOK),
+ }
+}
+
+func prepareMockAllDisksSleep() *mockHddTempConn {
+ return &mockHddTempConn{
+ hddTempLine: string(dataAllSleep),
+ }
+}
+
+func prepareMockErrOnConnect() *mockHddTempConn {
+ return &mockHddTempConn{
+ errOnConnect: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockHddTempConn {
+ return &mockHddTempConn{
+ hddTempLine: "Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
+ }
+}
+
+func prepareMockEmptyResponse() *mockHddTempConn {
+ return &mockHddTempConn{
+ hddTempLine: "",
+ }
+}
+
+type mockHddTempConn struct {
+ errOnConnect bool
+ errOnQueryHddTemp bool
+ hddTempLine string
+ disconnectCalled bool
+}
+
+func (m *mockHddTempConn) connect() error {
+ if m.errOnConnect {
+ return errors.New("mock.connect() error")
+ }
+ return nil
+}
+
+func (m *mockHddTempConn) disconnect() {
+ m.disconnectCalled = true
+}
+
+func (m *mockHddTempConn) queryHddTemp() (string, error) {
+ if m.errOnQueryHddTemp {
+ return "", errors.New("mock.queryHddTemp() error")
+ }
+ return m.hddTempLine, nil
+}
diff --git a/src/go/plugin/go.d/modules/hddtemp/integrations/hdd_temperature.md b/src/go/plugin/go.d/modules/hddtemp/integrations/hdd_temperature.md
new file mode 100644
index 000000000..3d5f3e71a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/integrations/hdd_temperature.md
@@ -0,0 +1,224 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/hddtemp/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/hddtemp/metadata.yaml"
+sidebar_label: "HDD temperature"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HDD temperature
+
+
+<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: hddtemp
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors disk temperatures.
+
+
+It retrieves temperature data for attached disks by querying the hddtemp daemon at regular intervals.
+
+
+This collector is only supported on the following platforms:
+
+- Linux
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per disk
+
+These metrics refer to the Disk.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| disk_id | Disk identifier. It is derived from the device path (e.g. sda or ata-HUP722020APA330_BFJ0WS3F) |
+| model | Disk model |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hddtemp.disk_temperature | temperature | Celsius |
+| hddtemp.disk_temperature_sensor_status | ok, err, na, unk, nos, slp | status |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install hddtemp
+
+Install `hddtemp` using your distribution's package manager.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/hddtemp.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/hddtemp.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | The IP address and port where the hddtemp daemon listens for connections. | 127.0.0.1:7634 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:7634
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:7634
+
+ - name: remote
+ address: 203.0.113.0:7634
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `hddtemp` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m hddtemp
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `hddtemp` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep hddtemp
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep hddtemp /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep hddtemp
+```
+
+
diff --git a/src/go/plugin/go.d/modules/hddtemp/metadata.yaml b/src/go/plugin/go.d/modules/hddtemp/metadata.yaml
new file mode 100644
index 000000000..74206ebc9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/metadata.yaml
@@ -0,0 +1,134 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-hddtemp
+ plugin_name: go.d.plugin
+ module_name: hddtemp
+ monitored_instance:
+ name: HDD temperature
+ link: https://linux.die.net/man/8/hddtemp
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ icon_filename: "hard-drive.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - hardware
+ - hdd temperature
+ - disk temperature
+ - temperature
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors disk temperatures.
+ method_description: |
+ It retrieves temperature data for attached disks by querying the hddtemp daemon at regular intervals.
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: By default, this collector will attempt to connect to the `hddtemp` daemon on `127.0.0.1:7634`
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Install hddtemp
+ description: |
+ Install `hddtemp` using your distribution's package manager.
+ configuration:
+ file:
+ name: go.d/hddtemp.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: The IP address and port where the hddtemp daemon listens for connections.
+ default_value: 127.0.0.1:7634
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:7634
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:7634
+
+ - name: remote
+ address: 203.0.113.0:7634
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: disk
+ description: These metrics refer to the Disk.
+ labels:
+ - name: disk_id
+ description: Disk identifier. It is derived from the device path (e.g. sda or ata-HUP722020APA330_BFJ0WS3F)
+ - name: model
+ description: Disk model
+ metrics:
+ - name: hddtemp.disk_temperature
+ description: Disk temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: hddtemp.disk_temperature_sensor_status
+ description: Disk temperature sensor status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: err
+ - name: na
+ - name: unk
+ - name: nos
+ - name: slp
diff --git a/src/go/plugin/go.d/modules/hddtemp/testdata/config.json b/src/go/plugin/go.d/modules/hddtemp/testdata/config.json
new file mode 100644
index 000000000..e86834720
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/hddtemp/testdata/config.yaml b/src/go/plugin/go.d/modules/hddtemp/testdata/config.yaml
new file mode 100644
index 000000000..1b81d09eb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/hddtemp/testdata/hddtemp-all-ok.txt b/src/go/plugin/go.d/modules/hddtemp/testdata/hddtemp-all-ok.txt
new file mode 100644
index 000000000..5f6606e81
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/testdata/hddtemp-all-ok.txt
@@ -0,0 +1 @@
+|/dev/sda|WDC WD181KRYZ-01AGBB0|122|F||/dev/sdb|WDC WD181KRYZ-01AGBB0|49|C||/dev/sdc|WDC WDS400T1R0A-68A4W0|27|C||/dev/sdd|WDC WDS400T1R0A-68A4W0|29|C| \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/hddtemp/testdata/hddtemp-all-sleep.txt b/src/go/plugin/go.d/modules/hddtemp/testdata/hddtemp-all-sleep.txt
new file mode 100644
index 000000000..732b62c76
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hddtemp/testdata/hddtemp-all-sleep.txt
@@ -0,0 +1 @@
+|/dev/disk/by-id/ata-HUP722020APA330_BFJ0WS3F|HUP722020APA330|SLP|*||/dev/disk/by-id/ata-HUP722020APA330_BFGWU7WF|HUP722020APA330|SLP|*||/dev/disk/by-id/ata-WDC_WD10EARS-00Y5B1_WD-WCAV5R693922|WDC WD10EARS-00Y5B1|SLP|*| \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/hdfs/README.md b/src/go/plugin/go.d/modules/hdfs/README.md
new file mode 120000
index 000000000..38f428a06
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/README.md
@@ -0,0 +1 @@
+integrations/hadoop_distributed_file_system_hdfs.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/hdfs/charts.go b/src/go/plugin/go.d/modules/hdfs/charts.go
new file mode 100644
index 000000000..5b264c64c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/charts.go
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hdfs
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ Charts = module.Charts
+ Dims = module.Dims
+ Vars = module.Vars
+)
+
+var jvmCharts = Charts{
+ {
+ ID: "jvm_heap_memory",
+ Title: "Heap Memory",
+ Units: "MiB",
+ Fam: "jvm",
+ Ctx: "hdfs.heap_memory",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "jvm_mem_heap_committed", Name: "committed", Div: 1000},
+ {ID: "jvm_mem_heap_used", Name: "used", Div: 1000},
+ },
+ Vars: Vars{
+ {ID: "jvm_mem_heap_max"},
+ },
+ },
+ {
+ ID: "jvm_gc_count_total",
+ Title: "GC Events",
+ Units: "events/s",
+ Fam: "jvm",
+ Ctx: "hdfs.gc_count_total",
+ Dims: Dims{
+ {ID: "jvm_gc_count", Name: "gc", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "jvm_gc_time_total",
+ Title: "GC Time",
+ Units: "ms",
+ Fam: "jvm",
+ Ctx: "hdfs.gc_time_total",
+ Dims: Dims{
+ {ID: "jvm_gc_time_millis", Name: "time", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "jvm_gc_threshold",
+ Title: "Number of Times That the GC Threshold is Exceeded",
+ Units: "events/s",
+ Fam: "jvm",
+ Ctx: "hdfs.gc_threshold",
+ Dims: Dims{
+ {ID: "jvm_gc_num_info_threshold_exceeded", Name: "info", Algo: module.Incremental},
+ {ID: "jvm_gc_num_warn_threshold_exceeded", Name: "warn", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "jvm_threads",
+ Title: "Number of Threads",
+ Units: "num",
+ Fam: "jvm",
+ Ctx: "hdfs.threads",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "jvm_threads_new", Name: "new"},
+ {ID: "jvm_threads_runnable", Name: "runnable"},
+ {ID: "jvm_threads_blocked", Name: "blocked"},
+ {ID: "jvm_threads_waiting", Name: "waiting"},
+ {ID: "jvm_threads_timed_waiting", Name: "timed_waiting"},
+ {ID: "jvm_threads_terminated", Name: "terminated"},
+ },
+ },
+ {
+ ID: "jvm_logs_total",
+ Title: "Number of Logs",
+ Units: "logs/s",
+ Fam: "jvm",
+ Ctx: "hdfs.logs_total",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "jvm_log_info", Name: "info", Algo: module.Incremental},
+ {ID: "jvm_log_error", Name: "error", Algo: module.Incremental},
+ {ID: "jvm_log_warn", Name: "warn", Algo: module.Incremental},
+ {ID: "jvm_log_fatal", Name: "fatal", Algo: module.Incremental},
+ },
+ },
+}
+
+var rpcActivityCharts = Charts{
+ {
+ ID: "rpc_bandwidth",
+ Title: "RPC Bandwidth",
+ Units: "kilobits/s",
+ Fam: "rpc",
+ Ctx: "hdfs.rpc_bandwidth",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "rpc_received_bytes", Name: "received", Div: 1000, Algo: module.Incremental},
+ {ID: "rpc_sent_bytes", Name: "sent", Div: -1000, Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "rpc_calls",
+ Title: "RPC Calls",
+ Units: "calls/s",
+ Fam: "rpc",
+ Ctx: "hdfs.rpc_calls",
+ Dims: Dims{
+ {ID: "rpc_queue_time_num_ops", Name: "calls", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "rpc_open_connections",
+ Title: "RPC Open Connections",
+ Units: "connections",
+ Fam: "rpc",
+ Ctx: "hdfs.open_connections",
+ Dims: Dims{
+ {ID: "rpc_num_open_connections", Name: "open"},
+ },
+ },
+ {
+ ID: "rpc_call_queue_length",
+ Title: "RPC Call Queue Length",
+ Units: "num",
+ Fam: "rpc",
+ Ctx: "hdfs.call_queue_length",
+ Dims: Dims{
+ {ID: "rpc_call_queue_length", Name: "length"},
+ },
+ },
+ {
+ ID: "rpc_avg_queue_time",
+ Title: "RPC Avg Queue Time",
+ Units: "ms",
+ Fam: "rpc",
+ Ctx: "hdfs.avg_queue_time",
+ Dims: Dims{
+ {ID: "rpc_queue_time_avg_time", Name: "time", Div: 1000},
+ },
+ },
+ {
+ ID: "rpc_avg_processing_time",
+ Title: "RPC Avg Processing Time",
+ Units: "ms",
+ Fam: "rpc",
+ Ctx: "hdfs.avg_processing_time",
+ Dims: Dims{
+ {ID: "rpc_processing_time_avg_time", Name: "time", Div: 1000},
+ },
+ },
+}
+
+var fsNameSystemCharts = Charts{
+ {
+ ID: "fs_name_system_capacity",
+ Title: "Capacity Across All Datanodes",
+ Units: "KiB",
+ Fam: "fs name system",
+ Ctx: "hdfs.capacity",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "fsns_capacity_remaining", Name: "remaining", Div: 1024},
+ {ID: "fsns_capacity_used", Name: "used", Div: 1024},
+ },
+ Vars: Vars{
+ {ID: "fsns_capacity_total"},
+ },
+ },
+ {
+ ID: "fs_name_system_used_capacity",
+ Title: "Used Capacity Across All Datanodes",
+ Units: "KiB",
+ Fam: "fs name system",
+ Ctx: "hdfs.used_capacity",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "fsns_capacity_used_dfs", Name: "dfs", Div: 1024},
+ {ID: "fsns_capacity_used_non_dfs", Name: "non_dfs", Div: 1024},
+ },
+ },
+ {
+ ID: "fs_name_system_load",
+ Title: "Number of Concurrent File Accesses (read/write) Across All DataNodes",
+ Units: "load",
+ Fam: "fs name system",
+ Ctx: "hdfs.load",
+ Dims: Dims{
+ {ID: "fsns_total_load", Name: "load"},
+ },
+ },
+ {
+ ID: "fs_name_system_volume_failures_total",
+ Title: "Number of Volume Failures Across All Datanodes",
+ Units: "events/s",
+ Fam: "fs name system",
+ Ctx: "hdfs.volume_failures_total",
+ Dims: Dims{
+ {ID: "fsns_volume_failures_total", Name: "failures", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "fs_files_total",
+ Title: "Number of Tracked Files",
+ Units: "num",
+ Fam: "fs name system",
+ Ctx: "hdfs.files_total",
+ Dims: Dims{
+ {ID: "fsns_files_total", Name: "files"},
+ },
+ },
+ {
+ ID: "fs_blocks_total",
+ Title: "Number of Allocated Blocks in the System",
+ Units: "num",
+ Fam: "fs name system",
+ Ctx: "hdfs.blocks_total",
+ Dims: Dims{
+ {ID: "fsns_blocks_total", Name: "blocks"},
+ },
+ },
+ {
+ ID: "fs_problem_blocks",
+ Title: "Number of Problem Blocks (can point to an unhealthy cluster)",
+ Units: "num",
+ Fam: "fs name system",
+ Ctx: "hdfs.blocks",
+ Dims: Dims{
+ {ID: "fsns_corrupt_blocks", Name: "corrupt"},
+ {ID: "fsns_missing_blocks", Name: "missing"},
+ {ID: "fsns_under_replicated_blocks", Name: "under_replicated"},
+ },
+ },
+ {
+ ID: "fs_name_system_data_nodes",
+ Title: "Number of Data Nodes By Status",
+ Units: "num",
+ Fam: "fs name system",
+ Ctx: "hdfs.data_nodes",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "fsns_num_live_data_nodes", Name: "live"},
+ {ID: "fsns_num_dead_data_nodes", Name: "dead"},
+ {ID: "fsns_stale_data_nodes", Name: "stale"},
+ },
+ },
+}
+
+var fsDatasetStateCharts = Charts{
+ {
+ ID: "fs_dataset_state_capacity",
+ Title: "Capacity",
+ Units: "KiB",
+ Fam: "fs dataset",
+ Ctx: "hdfs.datanode_capacity",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "fsds_capacity_remaining", Name: "remaining", Div: 1024},
+ {ID: "fsds_capacity_used", Name: "used", Div: 1024},
+ },
+ Vars: Vars{
+ {ID: "fsds_capacity_total"},
+ },
+ },
+ {
+ ID: "fs_dataset_state_used_capacity",
+ Title: "Used Capacity",
+ Units: "KiB",
+ Fam: "fs dataset",
+ Ctx: "hdfs.datanode_used_capacity",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "fsds_capacity_used_dfs", Name: "dfs", Div: 1024},
+ {ID: "fsds_capacity_used_non_dfs", Name: "non_dfs", Div: 1024},
+ },
+ },
+ {
+ ID: "fs_dataset_state_num_failed_volumes",
+ Title: "Number of Failed Volumes",
+ Units: "num",
+ Fam: "fs dataset",
+ Ctx: "hdfs.datanode_failed_volumes",
+ Dims: Dims{
+ {ID: "fsds_num_failed_volumes", Name: "failed volumes"},
+ },
+ },
+}
+
+var fsDataNodeActivityCharts = Charts{
+ {
+ ID: "dna_bandwidth",
+ Title: "Bandwidth",
+ Units: "KiB/s",
+ Fam: "activity",
+ Ctx: "hdfs.datanode_bandwidth",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "dna_bytes_read", Name: "reads", Div: 1024, Algo: module.Incremental},
+ {ID: "dna_bytes_written", Name: "writes", Div: -1024, Algo: module.Incremental},
+ },
+ },
+}
+
+func dataNodeCharts() *Charts {
+ charts := Charts{}
+ panicIfError(charts.Add(*jvmCharts.Copy()...))
+ panicIfError(charts.Add(*rpcActivityCharts.Copy()...))
+ panicIfError(charts.Add(*fsDatasetStateCharts.Copy()...))
+ panicIfError(charts.Add(*fsDataNodeActivityCharts.Copy()...))
+ return &charts
+}
+
+func nameNodeCharts() *Charts {
+ charts := Charts{}
+ panicIfError(charts.Add(*jvmCharts.Copy()...))
+ panicIfError(charts.Add(*rpcActivityCharts.Copy()...))
+ panicIfError(charts.Add(*fsNameSystemCharts.Copy()...))
+ return &charts
+}
+
+func panicIfError(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/client.go b/src/go/plugin/go.d/modules/hdfs/client.go
new file mode 100644
index 000000000..3c43348be
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/client.go
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hdfs
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func newClient(httpClient *http.Client, request web.Request) *client {
+ return &client{
+ httpClient: httpClient,
+ request: request,
+ }
+}
+
+type client struct {
+ httpClient *http.Client
+ request web.Request
+}
+
+func (c *client) do() (*http.Response, error) {
+ req, err := web.NewHTTPRequest(c.request)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating http request to %s : %v", c.request.URL, err)
+ }
+
+ // req.Header.Add("Accept-Encoding", "gzip")
+ // req.Header.Set("User-Agent", "netdata/go.d.plugin")
+
+ return c.httpClient.Do(req)
+}
+
+func (c *client) doOK() (*http.Response, error) {
+ resp, err := c.do()
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return resp, fmt.Errorf("%s returned %d", c.request.URL, resp.StatusCode)
+ }
+ return resp, nil
+}
+
+func (c *client) doOKWithDecodeJSON(dst interface{}) error {
+ resp, err := c.doOK()
+ defer closeBody(resp)
+ if err != nil {
+ return err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(dst)
+ if err != nil {
+ return fmt.Errorf("error on decoding response from %s : %v", c.request.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/collect.go b/src/go/plugin/go.d/modules/hdfs/collect.go
new file mode 100644
index 000000000..6ac022b87
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/collect.go
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hdfs
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func (h *HDFS) collect() (map[string]int64, error) {
+ var raw rawJMX
+ err := h.client.doOKWithDecodeJSON(&raw)
+ if err != nil {
+ return nil, err
+ }
+
+ if raw.isEmpty() {
+ return nil, errors.New("empty response")
+ }
+
+ mx := h.collectRawJMX(raw)
+
+ return stm.ToMap(mx), nil
+}
+
+func (h *HDFS) determineNodeType() (nodeType, error) {
+ var raw rawJMX
+ err := h.client.doOKWithDecodeJSON(&raw)
+ if err != nil {
+ return "", err
+ }
+
+ if raw.isEmpty() {
+ return "", errors.New("empty response")
+ }
+
+ jvm := raw.findJvm()
+ if jvm == nil {
+ return "", errors.New("couldn't find jvm in response")
+ }
+
+ v, ok := jvm["tag.ProcessName"]
+ if !ok {
+ return "", errors.New("couldn't find process name in JvmMetrics")
+ }
+
+ t := nodeType(strings.Trim(string(v), "\""))
+ if t == nameNodeType || t == dataNodeType {
+ return t, nil
+ }
+ return "", errors.New("unknown node type")
+}
+
+func (h *HDFS) collectRawJMX(raw rawJMX) *metrics {
+ var mx metrics
+ switch h.nodeType {
+ default:
+ panic(fmt.Sprintf("unsupported node type : '%s'", h.nodeType))
+ case nameNodeType:
+ h.collectNameNode(&mx, raw)
+ case dataNodeType:
+ h.collectDataNode(&mx, raw)
+ }
+ return &mx
+}
+
+func (h *HDFS) collectNameNode(mx *metrics, raw rawJMX) {
+ err := h.collectJVM(mx, raw)
+ if err != nil {
+ h.Debugf("error on collecting jvm : %v", err)
+ }
+
+ err = h.collectRPCActivity(mx, raw)
+ if err != nil {
+ h.Debugf("error on collecting rpc activity : %v", err)
+ }
+
+ err = h.collectFSNameSystem(mx, raw)
+ if err != nil {
+ h.Debugf("error on collecting fs name system : %v", err)
+ }
+}
+
+func (h *HDFS) collectDataNode(mx *metrics, raw rawJMX) {
+ err := h.collectJVM(mx, raw)
+ if err != nil {
+ h.Debugf("error on collecting jvm : %v", err)
+ }
+
+ err = h.collectRPCActivity(mx, raw)
+ if err != nil {
+ h.Debugf("error on collecting rpc activity : %v", err)
+ }
+
+ err = h.collectFSDatasetState(mx, raw)
+ if err != nil {
+ h.Debugf("error on collecting fs dataset state : %v", err)
+ }
+
+ err = h.collectDataNodeActivity(mx, raw)
+ if err != nil {
+ h.Debugf("error on collecting datanode activity state : %v", err)
+ }
+}
+
+func (h *HDFS) collectJVM(mx *metrics, raw rawJMX) error {
+ v := raw.findJvm()
+ if v == nil {
+ return nil
+ }
+
+ var jvm jvmMetrics
+ err := writeJSONTo(&jvm, v)
+ if err != nil {
+ return err
+ }
+
+ mx.Jvm = &jvm
+ return nil
+}
+
+func (h *HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error {
+ v := raw.findRPCActivity()
+ if v == nil {
+ return nil
+ }
+
+ var rpc rpcActivityMetrics
+ err := writeJSONTo(&rpc, v)
+ if err != nil {
+ return err
+ }
+
+ mx.Rpc = &rpc
+ return nil
+}
+
+func (h *HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error {
+ v := raw.findFSNameSystem()
+ if v == nil {
+ return nil
+ }
+
+ var fs fsNameSystemMetrics
+ err := writeJSONTo(&fs, v)
+ if err != nil {
+ return err
+ }
+
+ fs.CapacityUsed = fs.CapacityDfsUsed + fs.CapacityUsedNonDFS
+
+ mx.FSNameSystem = &fs
+ return nil
+}
+
+func (h *HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error {
+ v := raw.findFSDatasetState()
+ if v == nil {
+ return nil
+ }
+
+ var fs fsDatasetStateMetrics
+ err := writeJSONTo(&fs, v)
+ if err != nil {
+ return err
+ }
+
+ fs.CapacityUsed = fs.Capacity - fs.Remaining
+ fs.CapacityUsedNonDFS = fs.CapacityUsed - fs.DfsUsed
+
+ mx.FSDatasetState = &fs
+ return nil
+}
+
+func (h *HDFS) collectDataNodeActivity(mx *metrics, raw rawJMX) error {
+ v := raw.findDataNodeActivity()
+ if v == nil {
+ return nil
+ }
+
+ var dna dataNodeActivityMetrics
+ err := writeJSONTo(&dna, v)
+ if err != nil {
+ return err
+ }
+
+ mx.DataNodeActivity = &dna
+ return nil
+}
+
+func writeJSONTo(dst interface{}, src interface{}) error {
+ b, err := json.Marshal(src)
+ if err != nil {
+ return err
+ }
+ return json.Unmarshal(b, dst)
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/config_schema.json b/src/go/plugin/go.d/modules/hdfs/config_schema.json
new file mode 100644
index 000000000..528cc4dbf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/config_schema.json
@@ -0,0 +1,186 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "HDFS collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the HDFS DataNode or NameNode JMX endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:9870/jmx",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "url": {
+ "ui:help": "By default, the DataNode's port is 9864, and the NameNode's port is 9870, as specified in the [HDFS configuration](https://hadoop.apache.org/docs/r3.1.3/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml)."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/hdfs.go b/src/go/plugin/go.d/modules/hdfs/hdfs.go
new file mode 100644
index 000000000..44b5840bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/hdfs.go
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hdfs
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("hdfs", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *HDFS {
+ config := Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:9870/jmx",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ }
+
+ return &HDFS{
+ Config: config,
+ }
+}
+
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
+
+type (
+ HDFS struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ client *client
+
+ nodeType
+ }
+ nodeType string
+)
+
+const (
+ dataNodeType nodeType = "DataNode"
+ nameNodeType nodeType = "NameNode"
+)
+
+func (h *HDFS) Configuration() any {
+ return h.Config
+}
+
+func (h *HDFS) Init() error {
+ if err := h.validateConfig(); err != nil {
+ h.Errorf("config validation: %v", err)
+ return err
+ }
+
+ cl, err := h.createClient()
+ if err != nil {
+ h.Errorf("error on creating client : %v", err)
+ return err
+ }
+ h.client = cl
+
+ return nil
+}
+
+func (h *HDFS) Check() error {
+ typ, err := h.determineNodeType()
+ if err != nil {
+ h.Errorf("error on node type determination : %v", err)
+ return err
+ }
+ h.nodeType = typ
+
+ mx, err := h.collect()
+ if err != nil {
+ h.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (h *HDFS) Charts() *Charts {
+ switch h.nodeType {
+ default:
+ return nil
+ case nameNodeType:
+ return nameNodeCharts()
+ case dataNodeType:
+ return dataNodeCharts()
+ }
+}
+
+func (h *HDFS) Collect() map[string]int64 {
+ mx, err := h.collect()
+
+ if err != nil {
+ h.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (h *HDFS) Cleanup() {
+ if h.client != nil && h.client.httpClient != nil {
+ h.client.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/hdfs_test.go b/src/go/plugin/go.d/modules/hdfs/hdfs_test.go
new file mode 100644
index 000000000..d24e50bb6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/hdfs_test.go
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hdfs
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataUnknownNodeMetrics, _ = os.ReadFile("testdata/unknownnode.json")
+ dataDataNodeMetrics, _ = os.ReadFile("testdata/datanode.json")
+ dataNameNodeMetrics, _ = os.ReadFile("testdata/namenode.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataUnknownNodeMetrics": dataUnknownNodeMetrics,
+ "dataDataNodeMetrics": dataDataNodeMetrics,
+ "dataNameNodeMetrics": dataNameNodeMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestHDFS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &HDFS{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestHDFS_Init(t *testing.T) {
+ job := New()
+
+ assert.NoError(t, job.Init())
+}
+
+func TestHDFS_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) {
+ job := New()
+ job.Client.TLSConfig.TLSCA = "testdata/tls"
+
+ assert.Error(t, job.Init())
+}
+
+func TestHDFS_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataNameNodeMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.NoError(t, job.Check())
+ assert.NotZero(t, job.nodeType)
+}
+
+func TestHDFS_CheckDataNode(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataDataNodeMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.NoError(t, job.Check())
+ assert.Equal(t, dataNodeType, job.nodeType)
+}
+
+func TestHDFS_CheckNameNode(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataNameNodeMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.NoError(t, job.Check())
+ assert.Equal(t, nameNodeType, job.nodeType)
+}
+
+func TestHDFS_CheckErrorOnNodeTypeDetermination(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataUnknownNodeMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.Error(t, job.Check())
+}
+
+func TestHDFS_CheckNoResponse(t *testing.T) {
+ job := New()
+ job.URL = "http://127.0.0.1:38001/jmx"
+ require.NoError(t, job.Init())
+
+ assert.Error(t, job.Check())
+}
+
+func TestHDFS_Charts(t *testing.T) {
+ assert.Nil(t, New().Charts())
+}
+
+func TestHDFS_ChartsUnknownNode(t *testing.T) {
+ job := New()
+
+ assert.Nil(t, job.Charts())
+}
+
+func TestHDFS_ChartsDataNode(t *testing.T) {
+ job := New()
+ job.nodeType = dataNodeType
+
+ assert.Equal(t, dataNodeCharts(), job.Charts())
+}
+
+func TestHDFS_ChartsNameNode(t *testing.T) {
+ job := New()
+ job.nodeType = nameNodeType
+
+ assert.Equal(t, nameNodeCharts(), job.Charts())
+}
+
+func TestHDFS_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestHDFS_CollectDataNode(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataDataNodeMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "dna_bytes_read": 80689178,
+ "dna_bytes_written": 500960407,
+ "fsds_capacity_remaining": 32920760320,
+ "fsds_capacity_total": 53675536384,
+ "fsds_capacity_used": 20754776064,
+ "fsds_capacity_used_dfs": 1186058240,
+ "fsds_capacity_used_non_dfs": 19568717824,
+ "fsds_num_failed_volumes": 0,
+ "jvm_gc_count": 155,
+ "jvm_gc_num_info_threshold_exceeded": 0,
+ "jvm_gc_num_warn_threshold_exceeded": 0,
+ "jvm_gc_time_millis": 672,
+ "jvm_gc_total_extra_sleep_time": 8783,
+ "jvm_log_error": 1,
+ "jvm_log_fatal": 0,
+ "jvm_log_info": 257,
+ "jvm_log_warn": 2,
+ "jvm_mem_heap_committed": 60500,
+ "jvm_mem_heap_max": 843,
+ "jvm_mem_heap_used": 18885,
+ "jvm_threads_blocked": 0,
+ "jvm_threads_new": 0,
+ "jvm_threads_runnable": 11,
+ "jvm_threads_terminated": 0,
+ "jvm_threads_timed_waiting": 25,
+ "jvm_threads_waiting": 11,
+ "rpc_call_queue_length": 0,
+ "rpc_num_open_connections": 0,
+ "rpc_processing_time_avg_time": 0,
+ "rpc_queue_time_avg_time": 0,
+ "rpc_queue_time_num_ops": 0,
+ "rpc_received_bytes": 7,
+ "rpc_sent_bytes": 187,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestHDFS_CollectNameNode(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataNameNodeMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "fsns_blocks_total": 15,
+ "fsns_capacity_remaining": 65861697536,
+ "fsns_capacity_total": 107351072768,
+ "fsns_capacity_used": 41489375232,
+ "fsns_capacity_used_dfs": 2372116480,
+ "fsns_capacity_used_non_dfs": 39117258752,
+ "fsns_corrupt_blocks": 0,
+ "fsns_files_total": 12,
+ "fsns_missing_blocks": 0,
+ "fsns_num_dead_data_nodes": 0,
+ "fsns_num_live_data_nodes": 2,
+ "fsns_stale_data_nodes": 0,
+ "fsns_total_load": 2,
+ "fsns_under_replicated_blocks": 0,
+ "fsns_volume_failures_total": 0,
+ "jvm_gc_count": 1699,
+ "jvm_gc_num_info_threshold_exceeded": 0,
+ "jvm_gc_num_warn_threshold_exceeded": 0,
+ "jvm_gc_time_millis": 3483,
+ "jvm_gc_total_extra_sleep_time": 1944,
+ "jvm_log_error": 0,
+ "jvm_log_fatal": 0,
+ "jvm_log_info": 3382077,
+ "jvm_log_warn": 3378983,
+ "jvm_mem_heap_committed": 67000,
+ "jvm_mem_heap_max": 843,
+ "jvm_mem_heap_used": 26603,
+ "jvm_threads_blocked": 0,
+ "jvm_threads_new": 0,
+ "jvm_threads_runnable": 7,
+ "jvm_threads_terminated": 0,
+ "jvm_threads_timed_waiting": 34,
+ "jvm_threads_waiting": 6,
+ "rpc_call_queue_length": 0,
+ "rpc_num_open_connections": 2,
+ "rpc_processing_time_avg_time": 0,
+ "rpc_queue_time_avg_time": 58,
+ "rpc_queue_time_num_ops": 585402,
+ "rpc_received_bytes": 240431351,
+ "rpc_sent_bytes": 25067414,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestHDFS_CollectUnknownNode(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataUnknownNodeMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.Panics(t, func() { _ = job.Collect() })
+}
+
+func TestHDFS_CollectNoResponse(t *testing.T) {
+ job := New()
+ job.URL = "http://127.0.0.1:38001/jmx"
+ require.NoError(t, job.Init())
+
+ assert.Nil(t, job.Collect())
+}
+
+func TestHDFS_CollectReceiveInvalidResponse(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\ngoodbye!\n"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.Nil(t, job.Collect())
+}
+
+func TestHDFS_CollectReceive404(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.Nil(t, job.Collect())
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/init.go b/src/go/plugin/go.d/modules/hdfs/init.go
new file mode 100644
index 000000000..1159ab73b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/init.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hdfs
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (h *HDFS) validateConfig() error {
+ if h.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (h *HDFS) createClient() (*client, error) {
+ httpClient, err := web.NewHTTPClient(h.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return newClient(httpClient, h.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md b/src/go/plugin/go.d/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md
new file mode 100644
index 000000000..e37ccde0c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/integrations/hadoop_distributed_file_system_hdfs.md
@@ -0,0 +1,286 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/hdfs/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/hdfs/metadata.yaml"
+sidebar_label: "Hadoop Distributed File System (HDFS)"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Hadoop Distributed File System (HDFS)
+
+
+<img src="https://netdata.cloud/img/hadoop.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: hfs
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors HDFS nodes.
+
+Netdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Hadoop Distributed File System (HDFS) instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | DataNode | NameNode |
+|:------|:----------|:----|:---:|:---:|
+| hdfs.heap_memory | committed, used | MiB | • | • |
+| hdfs.gc_count_total | gc | events/s | • | • |
+| hdfs.gc_time_total | ms | ms | • | • |
+| hdfs.gc_threshold | info, warn | events/s | • | • |
+| hdfs.threads | new, runnable, blocked, waiting, timed_waiting, terminated | num | • | • |
+| hdfs.logs_total | info, error, warn, fatal | logs/s | • | • |
+| hdfs.rpc_bandwidth | received, sent | kilobits/s | • | • |
+| hdfs.rpc_calls | calls | calls/s | • | • |
+| hdfs.open_connections | open | connections | • | • |
+| hdfs.call_queue_length | length | num | • | • |
+| hdfs.avg_queue_time | time | ms | • | • |
+| hdfs.avg_processing_time | time | ms | • | • |
+| hdfs.capacity | remaining, used | KiB | | • |
+| hdfs.used_capacity | dfs, non_dfs | KiB | | • |
+| hdfs.load | load | load | | • |
+| hdfs.volume_failures_total | failures | events/s | | • |
+| hdfs.files_total | files | num | | • |
+| hdfs.blocks_total | blocks | num | | • |
+| hdfs.blocks | corrupt, missing, under_replicated | num | | • |
+| hdfs.data_nodes | live, dead, stale | num | | • |
+| hdfs.datanode_capacity | remaining, used | KiB | • | |
+| hdfs.datanode_used_capacity | dfs, non_dfs | KiB | • | |
+| hdfs.datanode_failed_volumes | failed volumes | num | • | |
+| hdfs.datanode_bandwidth | reads, writes | KiB/s | • | |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ hdfs_capacity_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.capacity | summary datanodes space capacity utilization |
+| [ hdfs_missing_blocks ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.blocks | number of missing blocks |
+| [ hdfs_stale_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes marked stale due to delayed heartbeat |
+| [ hdfs_dead_nodes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.data_nodes | number of datanodes which are currently dead |
+| [ hdfs_num_failed_volumes ](https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf) | hdfs.num_failed_volumes | number of failed volumes |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/hdfs.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/hdfs.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:9870/jmx | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9870/jmx
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9870/jmx
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9870/jmx
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9870/jmx
+
+ - name: remote
+ url: http://192.0.2.1:9870/jmx
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `hfs` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m hfs
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `hfs` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep hfs
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep hfs /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep hfs
+```
+
+
diff --git a/src/go/plugin/go.d/modules/hdfs/metadata.yaml b/src/go/plugin/go.d/modules/hdfs/metadata.yaml
new file mode 100644
index 000000000..694868e01
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/metadata.yaml
@@ -0,0 +1,388 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-hfs
+ plugin_name: go.d.plugin
+ module_name: hfs
+ monitored_instance:
+ name: Hadoop Distributed File System (HDFS)
+ link: https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html
+ icon_filename: hadoop.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - hdfs
+ - hadoop
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors HDFS nodes.
+
+ Netdata accesses HDFS metrics over `Java Management Extensions` (JMX) through the web interface of an HDFS daemon.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/hdfs.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:9870/jmx
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9870/jmx
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9870/jmx
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: |
+ Do not validate server certificate chain and hostname.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:9870/jmx
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9870/jmx
+
+ - name: remote
+ url: http://192.0.2.1:9870/jmx
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: hdfs_capacity_usage
+ metric: hdfs.capacity
+ info: summary datanodes space capacity utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf
+ - name: hdfs_missing_blocks
+ metric: hdfs.blocks
+ info: number of missing blocks
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf
+ - name: hdfs_stale_nodes
+ metric: hdfs.data_nodes
+ info: number of datanodes marked stale due to delayed heartbeat
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf
+ - name: hdfs_dead_nodes
+ metric: hdfs.data_nodes
+ info: number of datanodes which are currently dead
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf
+ - name: hdfs_num_failed_volumes
+ metric: hdfs.num_failed_volumes
+ info: number of failed volumes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/hdfs.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability:
+ - DataNode
+ - NameNode
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: hdfs.heap_memory
+ description: Heap Memory
+ unit: MiB
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: hdfs.gc_count_total
+ description: GC Events
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: gc
+ - name: hdfs.gc_time_total
+ description: GC Time
+ unit: ms
+ chart_type: line
+ dimensions:
+ - name: ms
+ - name: hdfs.gc_threshold
+ description: Number of Times That the GC Threshold is Exceeded
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: info
+ - name: warn
+ - name: hdfs.threads
+ description: Number of Threads
+ unit: num
+ chart_type: stacked
+ dimensions:
+ - name: new
+ - name: runnable
+ - name: blocked
+ - name: waiting
+ - name: timed_waiting
+ - name: terminated
+ - name: hdfs.logs_total
+ description: Number of Logs
+ unit: logs/s
+ chart_type: stacked
+ dimensions:
+ - name: info
+ - name: error
+ - name: warn
+ - name: fatal
+ - name: hdfs.rpc_bandwidth
+ description: RPC Bandwidth
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: hdfs.rpc_calls
+ description: RPC Calls
+ unit: calls/s
+ chart_type: line
+ dimensions:
+ - name: calls
+ - name: hdfs.open_connections
+ description: RPC Open Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: hdfs.call_queue_length
+ description: RPC Call Queue Length
+ unit: num
+ chart_type: line
+ dimensions:
+ - name: length
+ - name: hdfs.avg_queue_time
+ description: RPC Avg Queue Time
+ unit: ms
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: hdfs.avg_processing_time
+ description: RPC Avg Processing Time
+ unit: ms
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: hdfs.capacity
+ description: Capacity Across All Datanodes
+ unit: KiB
+ chart_type: stacked
+ availability:
+ - NameNode
+ dimensions:
+ - name: remaining
+ - name: used
+ - name: hdfs.used_capacity
+ description: Used Capacity Across All Datanodes
+ unit: KiB
+ chart_type: stacked
+ availability:
+ - NameNode
+ dimensions:
+ - name: dfs
+ - name: non_dfs
+ - name: hdfs.load
+ description: Number of Concurrent File Accesses (read/write) Across All DataNodes
+ unit: load
+ chart_type: line
+ availability:
+ - NameNode
+ dimensions:
+ - name: load
+ - name: hdfs.volume_failures_total
+ description: Number of Volume Failures Across All Datanodes
+ unit: events/s
+ chart_type: line
+ availability:
+ - NameNode
+ dimensions:
+ - name: failures
+ - name: hdfs.files_total
+ description: Number of Tracked Files
+ unit: num
+ chart_type: line
+ availability:
+ - NameNode
+ dimensions:
+ - name: files
+ - name: hdfs.blocks_total
+ description: Number of Allocated Blocks in the System
+ unit: num
+ chart_type: line
+ availability:
+ - NameNode
+ dimensions:
+ - name: blocks
+ - name: hdfs.blocks
+ description: Number of Problem Blocks (can point to an unhealthy cluster)
+ unit: num
+ chart_type: line
+ availability:
+ - NameNode
+ dimensions:
+ - name: corrupt
+ - name: missing
+ - name: under_replicated
+ - name: hdfs.data_nodes
+ description: Number of Data Nodes By Status
+ unit: num
+ chart_type: stacked
+ availability:
+ - NameNode
+ dimensions:
+ - name: live
+ - name: dead
+ - name: stale
+ - name: hdfs.datanode_capacity
+ description: Capacity
+ unit: KiB
+ chart_type: stacked
+ availability:
+ - DataNode
+ dimensions:
+ - name: remaining
+ - name: used
+ - name: hdfs.datanode_used_capacity
+ description: Used Capacity
+ unit: KiB
+ chart_type: stacked
+ availability:
+ - DataNode
+ dimensions:
+ - name: dfs
+ - name: non_dfs
+ - name: hdfs.datanode_failed_volumes
+ description: Number of Failed Volumes
+ unit: num
+ chart_type: line
+ availability:
+ - DataNode
+ dimensions:
+ - name: failed volumes
+ - name: hdfs.datanode_bandwidth
+ description: Bandwidth
+ unit: KiB/s
+ chart_type: area
+ availability:
+ - DataNode
+ dimensions:
+ - name: reads
+ - name: writes
diff --git a/src/go/plugin/go.d/modules/hdfs/metrics.go b/src/go/plugin/go.d/modules/hdfs/metrics.go
new file mode 100644
index 000000000..972436a5d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/metrics.go
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hdfs
+
+// HDFS Architecture
+// https://hadoop.apache.org/docs/r1.2.1/hdfs_design.html#NameNode+and+DataNodes
+
+// Metrics description
+// https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/Metrics.html
+
+// Good article
+// https://www.datadoghq.com/blog/monitor-hadoop-metrics/#hdfs-metrics
+
+type metrics struct {
+ Jvm *jvmMetrics `stm:"jvm"` // both
+ Rpc *rpcActivityMetrics `stm:"rpc"` // both
+ FSNameSystem *fsNameSystemMetrics `stm:"fsns"` // namenode
+ FSDatasetState *fsDatasetStateMetrics `stm:"fsds"` // datanode
+ DataNodeActivity *dataNodeActivityMetrics `stm:"dna"` // datanode
+}
+
+type jvmMetrics struct {
+ ProcessName string `json:"tag.ProcessName"`
+ HostName string `json:"tag.Hostname"`
+ //MemNonHeapUsedM float64 `stm:"mem_non_heap_used,1000,1"`
+ //MemNonHeapCommittedM float64 `stm:"mem_non_heap_committed,1000,1"`
+ //MemNonHeapMaxM float64 `stm:"mem_non_heap_max"`
+ MemHeapUsedM float64 `stm:"mem_heap_used,1000,1"`
+ MemHeapCommittedM float64 `stm:"mem_heap_committed,1000,1"`
+ MemHeapMaxM float64 `stm:"mem_heap_max"`
+ //MemMaxM float64 `stm:"mem_max"`
+ GcCount float64 `stm:"gc_count"`
+ GcTimeMillis float64 `stm:"gc_time_millis"`
+ GcNumWarnThresholdExceeded float64 `stm:"gc_num_warn_threshold_exceeded"`
+ GcNumInfoThresholdExceeded float64 `stm:"gc_num_info_threshold_exceeded"`
+ GcTotalExtraSleepTime float64 `stm:"gc_total_extra_sleep_time"`
+ ThreadsNew float64 `stm:"threads_new"`
+ ThreadsRunnable float64 `stm:"threads_runnable"`
+ ThreadsBlocked float64 `stm:"threads_blocked"`
+ ThreadsWaiting float64 `stm:"threads_waiting"`
+ ThreadsTimedWaiting float64 `stm:"threads_timed_waiting"`
+ ThreadsTerminated float64 `stm:"threads_terminated"`
+ LogFatal float64 `stm:"log_fatal"`
+ LogError float64 `stm:"log_error"`
+ LogWarn float64 `stm:"log_warn"`
+ LogInfo float64 `stm:"log_info"`
+}
+
+type rpcActivityMetrics struct {
+ ReceivedBytes float64 `stm:"received_bytes"`
+ SentBytes float64 `stm:"sent_bytes"`
+ RpcQueueTimeNumOps float64 `stm:"queue_time_num_ops"`
+ RpcQueueTimeAvgTime float64 `stm:"queue_time_avg_time,1000,1"`
+ //RpcProcessingTimeNumOps float64
+ RpcProcessingTimeAvgTime float64 `stm:"processing_time_avg_time,1000,1"`
+ //DeferredRpcProcessingTimeNumOps float64
+ //DeferredRpcProcessingTimeAvgTime float64
+ //RpcAuthenticationFailures float64
+ //RpcAuthenticationSuccesses float64
+ //RpcAuthorizationFailures float64
+ //RpcAuthorizationSuccesses float64
+ //RpcClientBackoff float64
+ //RpcSlowCalls float64
+ NumOpenConnections float64 `stm:"num_open_connections"`
+ CallQueueLength float64 `stm:"call_queue_length"`
+ //NumDroppedConnections float64
+}
+
+type fsNameSystemMetrics struct {
+ HostName string `json:"tag.Hostname"`
+ HAState string `json:"tag.HAState"`
+ //TotalSyncTimes float64 `json:"tag.tag.TotalSyncTimes"`
+ MissingBlocks float64 `stm:"missing_blocks"`
+ //MissingReplOneBlocks float64 `stm:"missing_repl_one_blocks"`
+ //ExpiredHeartbeats float64 `stm:"expired_heartbeats"`
+ //TransactionsSinceLastCheckpoint float64 `stm:"transactions_since_last_checkpoint"`
+ //TransactionsSinceLastLogRoll float64 `stm:"transactions_since_last_log_roll"`
+ //LastWrittenTransactionId float64 `stm:"last_written_transaction_id"`
+ //LastCheckpointTime float64 `stm:"last_checkpoint_time"`
+ CapacityTotal float64 `stm:"capacity_total"`
+ //CapacityTotalGB float64 `stm:"capacity_total_gb"`
+ CapacityDfsUsed float64 `json:"CapacityUsed" stm:"capacity_used_dfs"`
+ //CapacityUsedGB float64 `stm:"capacity_used_gb"`
+ CapacityRemaining float64 `stm:"capacity_remaining"`
+ //ProvidedCapacityTotal float64 `stm:"provided_capacity_total"`
+ //CapacityRemainingGB float64 `stm:"capacity_remaining_gb"`
+ CapacityUsedNonDFS float64 `stm:"capacity_used_non_dfs"`
+ TotalLoad float64 `stm:"total_load"`
+ //SnapshottableDirectories float64 `stm:"snapshottable_directories"`
+ //Snapshots float64 `stm:"snapshots"`
+ //NumEncryptionZones float64 `stm:"num_encryption_zones"`
+ //LockQueueLength float64 `stm:"lock_queue_length"`
+ BlocksTotal float64 `stm:"blocks_total"`
+ //NumFilesUnderConstruction float64 `stm:"num_files_under_construction"`
+ //NumActiveClients float64 `stm:"num_active_clients"`
+ FilesTotal float64 `stm:"files_total"`
+ //PendingReplicationBlocks float64 `stm:"pending_replication_blocks"`
+ //PendingReconstructionBlocks float64 `stm:"pending_reconstruction_blocks"`
+ UnderReplicatedBlocks float64 `stm:"under_replicated_blocks"`
+ //LowRedundancyBlocks float64 `stm:"low_redundancy_blocks"`
+ CorruptBlocks float64 `stm:"corrupt_blocks"`
+ //ScheduledReplicationBlocks float64 `stm:"scheduled_replication_blocks"`
+ //PendingDeletionBlocks float64 `stm:"pending_deletion_blocks"`
+ //LowRedundancyReplicatedBlocks float64 `stm:"low_redundancy_replicated_blocks"`
+ //CorruptReplicatedBlocks float64 `stm:"corrupt_replicated_blocks"`
+ //MissingReplicatedBlocks float64 `stm:"missing_replicated_blocks"`
+ //MissingReplicationOneBlocks float64 `stm:"missing_replication_one_blocks"`
+ //HighestPriorityLowRedundancyReplicatedBlocks float64 `stm:"highest_priority_low_redundancy_replicated_blocks"`
+ //HighestPriorityLowRedundancyECBlocks float64 `stm:"highest_priority_low_redundancy_ec_blocks"`
+ //BytesInFutureReplicatedBlocks float64 `stm:"bytes_in_future_replicated_blocks"`
+ //PendingDeletionReplicatedBlocks float64 `stm:"pending_deletion_replicated_blocks"`
+ //TotalReplicatedBlocks float64 `stm:"total_replicated_blocks"`
+ //LowRedundancyECBlockGroups float64 `stm:"low_redundancy_ec_block_groups"`
+ //CorruptECBlockGroups float64 `stm:"corrupt_ec_block_groups"`
+ //MissingECBlockGroups float64 `stm:"missing_ec_block_groups"`
+ //BytesInFutureECBlockGroups float64 `stm:"bytes_in_future_ec_block_groups"`
+ //PendingDeletionECBlocks float64 `stm:"pending_deletion_ec_blocks"`
+ //TotalECBlockGroups float64 `stm:"total_ec_block_groups"`
+ //ExcessBlocks float64 `stm:"excess_blocks"`
+ //NumTimedOutPendingReconstructions float64 `stm:"num_timed_out_pending_reconstructions"`
+ //PostponedMisreplicatedBlocks float64 `stm:"postponed_misreplicated_blocks"`
+ //PendingDataNodeMessageCount float64 `stm:"pending_data_node_message_count"`
+ //MillisSinceLastLoadedEdits float64 `stm:"millis_since_last_loaded_edits"`
+ //BlockCapacity float64 `stm:"block_capacity"`
+ NumLiveDataNodes float64 `stm:"num_live_data_nodes"`
+ NumDeadDataNodes float64 `stm:"num_dead_data_nodes"`
+ //NumDecomLiveDataNodes float64 `stm:"num_decom_live_data_nodes"`
+ //NumDecomDeadDataNodes float64 `stm:"num_decom_dead_data_nodes"`
+ VolumeFailuresTotal float64 `stm:"volume_failures_total"`
+ //EstimatedCapacityLostTotal float64 `stm:"estimated_capacity_lost_total"`
+ //NumDecommissioningDataNodes float64 `stm:"num_decommissioning_data_nodes"`
+ StaleDataNodes float64 `stm:"stale_data_nodes"`
+ //NumStaleStorages float64 `stm:"num_stale_storages"`
+ //TotalSyncCount float64 `stm:"total_sync_count"`
+ //NumInMaintenanceLiveDataNodes float64 `stm:"num_in_maintenance_live_data_nodes"`
+ //NumInMaintenanceDeadDataNodes float64 `stm:"num_in_maintenance_dead_data_nodes"`
+ //NumEnteringMaintenanceDataNodes float64 `stm:"num_entering_maintenance_data_nodes"`
+
+ // custom attributes
+ CapacityUsed float64 `json:"-" stm:"capacity_used"`
+}
+
+type fsDatasetStateMetrics struct {
+ HostName string `json:"tag.Hostname"`
+ Capacity float64 `stm:"capacity_total"`
+ DfsUsed float64 `stm:"capacity_used_dfs"`
+ Remaining float64 `stm:"capacity_remaining"`
+ NumFailedVolumes float64 `stm:"num_failed_volumes"`
+ //LastVolumeFailureDate float64 `stm:"LastVolumeFailureDate"`
+ //EstimatedCapacityLostTotal float64 `stm:"EstimatedCapacityLostTotal"`
+ //CacheUsed float64 `stm:"CacheUsed"`
+ //CacheCapacity float64 `stm:"CacheCapacity"`
+ //NumBlocksCached float64 `stm:"NumBlocksCached"`
+ //NumBlocksFailedToCache float64 `stm:"NumBlocksFailedToCache"`
+ //NumBlocksFailedToUnCache float64 `stm:"NumBlocksFailedToUnCache"`
+
+ // custom attributes
+ CapacityUsedNonDFS float64 `stm:"capacity_used_non_dfs"`
+ CapacityUsed float64 `stm:"capacity_used"`
+}
+
+type dataNodeActivityMetrics struct {
+ HostName string `json:"tag.Hostname"`
+ BytesWritten float64 `stm:"bytes_written"`
+ //TotalWriteTime float64
+ BytesRead float64 `stm:"bytes_read"`
+ //TotalReadTime float64
+ //BlocksWritten float64
+ //BlocksRead float64
+ //BlocksReplicated float64
+ //BlocksRemoved float64
+ //BlocksVerified float64
+ //BlockVerificationFailures float64
+ //BlocksCached float64
+ //BlocksUncached float64
+ //ReadsFromLocalClient float64
+ //ReadsFromRemoteClient float64
+ //WritesFromLocalClient float64
+ //WritesFromRemoteClient float64
+ //BlocksGetLocalPathInfo float64
+ //RemoteBytesRead float64
+ //RemoteBytesWritten float64
+ //RamDiskBlocksWrite float64
+ //RamDiskBlocksWriteFallback float64
+ //RamDiskBytesWrite float64
+ //RamDiskBlocksReadHits float64
+ //RamDiskBlocksEvicted float64
+ //RamDiskBlocksEvictedWithoutRead float64
+ //RamDiskBlocksEvictionWindowMsNumOps float64
+ //RamDiskBlocksEvictionWindowMsAvgTime float64
+ //RamDiskBlocksLazyPersisted float64
+ //RamDiskBlocksDeletedBeforeLazyPersisted float64
+ //RamDiskBytesLazyPersisted float64
+ //RamDiskBlocksLazyPersistWindowMsNumOps float64
+ //RamDiskBlocksLazyPersistWindowMsAvgTime float64
+ //FsyncCount float64
+ //VolumeFailures float64
+ //DatanodeNetworkErrors float64
+ //DataNodeActiveXceiversCount float64
+ //ReadBlockOpNumOps float64
+ //ReadBlockOpAvgTime float64
+ //WriteBlockOpNumOps float64
+ //WriteBlockOpAvgTime float64
+ //BlockChecksumOpNumOps float64
+ //BlockChecksumOpAvgTime float64
+ //CopyBlockOpNumOps float64
+ //CopyBlockOpAvgTime float64
+ //ReplaceBlockOpNumOps float64
+ //ReplaceBlockOpAvgTime float64
+ //HeartbeatsNumOps float64
+ //HeartbeatsAvgTime float64
+ //HeartbeatsTotalNumOps float64
+ //HeartbeatsTotalAvgTime float64
+ //LifelinesNumOps float64
+ //LifelinesAvgTime float64
+ //BlockReportsNumOps float64
+ //BlockReportsAvgTime float64
+ //IncrementalBlockReportsNumOps float64
+ //IncrementalBlockReportsAvgTime float64
+ //CacheReportsNumOps float64
+ //CacheReportsAvgTime float64
+ //PacketAckRoundTripTimeNanosNumOps float64
+ //PacketAckRoundTripTimeNanosAvgTime float64
+ //FlushNanosNumOps float64
+ //FlushNanosAvgTime float64
+ //FsyncNanosNumOps float64
+ //FsyncNanosAvgTime float64
+ //SendDataPacketBlockedOnNetworkNanosNumOps float64
+ //SendDataPacketBlockedOnNetworkNanosAvgTime float64
+ //SendDataPacketTransferNanosNumOps float64
+ //SendDataPacketTransferNanosAvgTime float64
+ //BlocksInPendingIBR float64
+ //BlocksReceivingInPendingIBR float64
+ //BlocksReceivedInPendingIBR float64
+ //BlocksDeletedInPendingIBR float64
+ //EcReconstructionTasks float64
+ //EcFailedReconstructionTasks float64
+ //EcDecodingTimeNanos float64
+ //EcReconstructionBytesRead float64
+ //EcReconstructionBytesWritten float64
+ //EcReconstructionRemoteBytesRead float64
+ //EcReconstructionReadTimeMillis float64
+ //EcReconstructionDecodingTimeMillis float64
+ //EcReconstructionWriteTimeMillis float64
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/raw_data.go b/src/go/plugin/go.d/modules/hdfs/raw_data.go
new file mode 100644
index 000000000..ab434ae17
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/raw_data.go
@@ -0,0 +1,51 @@
+package hdfs
+
+import (
+ "encoding/json"
+ "strings"
+)
+
+type (
+ rawData map[string]json.RawMessage
+ rawJMX struct {
+ Beans []rawData
+ }
+)
+
+func (r rawJMX) isEmpty() bool {
+ return len(r.Beans) == 0
+}
+
+func (r rawJMX) find(f func(rawData) bool) rawData {
+ for _, v := range r.Beans {
+ if f(v) {
+ return v
+ }
+ }
+ return nil
+}
+
+func (r rawJMX) findJvm() rawData {
+ f := func(data rawData) bool { return string(data["modelerType"]) == "\"JvmMetrics\"" }
+ return r.find(f)
+}
+
+func (r rawJMX) findRPCActivity() rawData {
+ f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"RpcActivityForPort") }
+ return r.find(f)
+}
+
+func (r rawJMX) findFSNameSystem() rawData {
+ f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSNamesystem\"" }
+ return r.find(f)
+}
+
+func (r rawJMX) findFSDatasetState() rawData {
+ f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSDatasetState\"" }
+ return r.find(f)
+}
+
+func (r rawJMX) findDataNodeActivity() rawData {
+ f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"DataNodeActivity") }
+ return r.find(f)
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/testdata/config.json b/src/go/plugin/go.d/modules/hdfs/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/hdfs/testdata/config.yaml b/src/go/plugin/go.d/modules/hdfs/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/hdfs/testdata/datanode.json b/src/go/plugin/go.d/modules/hdfs/testdata/datanode.json
new file mode 100644
index 000000000..0f657d560
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/testdata/datanode.json
@@ -0,0 +1,165 @@
+{
+ "beans":[
+ {
+ "name":"Hadoop:service=DataNode,name=JvmMetrics",
+ "modelerType":"JvmMetrics",
+ "tag.Context":"jvm",
+ "tag.ProcessName":"DataNode",
+ "tag.SessionId":null,
+ "tag.Hostname":"dev-slave-01.dev.local",
+ "MemNonHeapUsedM":53.67546,
+ "MemNonHeapCommittedM":54.9375,
+ "MemNonHeapMaxM":-1.0,
+ "MemHeapUsedM":18.885735,
+ "MemHeapCommittedM":60.5,
+ "MemHeapMaxM":843.0,
+ "MemMaxM":843.0,
+ "GcCount":155,
+ "GcTimeMillis":672,
+ "GcNumWarnThresholdExceeded":0,
+ "GcNumInfoThresholdExceeded":0,
+ "GcTotalExtraSleepTime":8783,
+ "ThreadsNew":0,
+ "ThreadsRunnable":11,
+ "ThreadsBlocked":0,
+ "ThreadsWaiting":11,
+ "ThreadsTimedWaiting":25,
+ "ThreadsTerminated":0,
+ "LogFatal":0,
+ "LogError":1,
+ "LogWarn":2,
+ "LogInfo":257
+ },
+ {
+ "name":"Hadoop:service=DataNode,name=FSDatasetState",
+ "modelerType":"FSDatasetState",
+ "tag.Context":"FSDatasetState",
+ "tag.StorageInfo":"FSDataset{dirpath='[/data/hdfs/data]'}",
+ "tag.Hostname":"dev-slave-01.dev.local",
+ "Capacity":53675536384,
+ "DfsUsed":1186058240,
+ "Remaining":32920760320,
+ "NumFailedVolumes":0,
+ "LastVolumeFailureDate":0,
+ "EstimatedCapacityLostTotal":0,
+ "CacheUsed":0,
+ "CacheCapacity":0,
+ "NumBlocksCached":0,
+ "NumBlocksFailedToCache":0,
+ "NumBlocksFailedToUnCache":4
+ },
+ {
+ "name":"Hadoop:service=DataNode,name=DataNodeActivity-dev-slave-01.dev.local-9866",
+ "modelerType":"DataNodeActivity-dev-slave-01.dev.local-9866",
+ "tag.SessionId":null,
+ "tag.Context":"dfs",
+ "tag.Hostname":"dev-slave-01.dev.local",
+ "BytesWritten":500960407,
+ "TotalWriteTime":463,
+ "BytesRead":80689178,
+ "TotalReadTime":41203,
+ "BlocksWritten":16,
+ "BlocksRead":16,
+ "BlocksReplicated":4,
+ "BlocksRemoved":4,
+ "BlocksVerified":0,
+ "BlockVerificationFailures":0,
+ "BlocksCached":0,
+ "BlocksUncached":0,
+ "ReadsFromLocalClient":0,
+ "ReadsFromRemoteClient":16,
+ "WritesFromLocalClient":0,
+ "WritesFromRemoteClient":12,
+ "BlocksGetLocalPathInfo":0,
+ "RemoteBytesRead":80689178,
+ "RemoteBytesWritten":97283223,
+ "RamDiskBlocksWrite":0,
+ "RamDiskBlocksWriteFallback":0,
+ "RamDiskBytesWrite":0,
+ "RamDiskBlocksReadHits":0,
+ "RamDiskBlocksEvicted":0,
+ "RamDiskBlocksEvictedWithoutRead":0,
+ "RamDiskBlocksEvictionWindowMsNumOps":0,
+ "RamDiskBlocksEvictionWindowMsAvgTime":0.0,
+ "RamDiskBlocksLazyPersisted":0,
+ "RamDiskBlocksDeletedBeforeLazyPersisted":0,
+ "RamDiskBytesLazyPersisted":0,
+ "RamDiskBlocksLazyPersistWindowMsNumOps":0,
+ "RamDiskBlocksLazyPersistWindowMsAvgTime":0.0,
+ "FsyncCount":0,
+ "VolumeFailures":0,
+ "DatanodeNetworkErrors":7,
+ "DataNodeActiveXceiversCount":0,
+ "ReadBlockOpNumOps":16,
+ "ReadBlockOpAvgTime":2258.2,
+ "WriteBlockOpNumOps":12,
+ "WriteBlockOpAvgTime":12640.666666666666,
+ "BlockChecksumOpNumOps":0,
+ "BlockChecksumOpAvgTime":0.0,
+ "CopyBlockOpNumOps":0,
+ "CopyBlockOpAvgTime":0.0,
+ "ReplaceBlockOpNumOps":0,
+ "ReplaceBlockOpAvgTime":0.0,
+ "HeartbeatsNumOps":285073,
+ "HeartbeatsAvgTime":1.2035398230088497,
+ "HeartbeatsTotalNumOps":285073,
+ "HeartbeatsTotalAvgTime":1.2035398230088497,
+ "LifelinesNumOps":0,
+ "LifelinesAvgTime":0.0,
+ "BlockReportsNumOps":41,
+ "BlockReportsAvgTime":2.0,
+ "IncrementalBlockReportsNumOps":20,
+ "IncrementalBlockReportsAvgTime":1.2,
+ "CacheReportsNumOps":0,
+ "CacheReportsAvgTime":0.0,
+ "PacketAckRoundTripTimeNanosNumOps":603,
+ "PacketAckRoundTripTimeNanosAvgTime":1733672.0,
+ "FlushNanosNumOps":7660,
+ "FlushNanosAvgTime":3988.858108108108,
+ "FsyncNanosNumOps":0,
+ "FsyncNanosAvgTime":0.0,
+ "SendDataPacketBlockedOnNetworkNanosNumOps":7091,
+ "SendDataPacketBlockedOnNetworkNanosAvgTime":2.4469053762711864E7,
+ "SendDataPacketTransferNanosNumOps":7091,
+ "SendDataPacketTransferNanosAvgTime":37130.05084745763,
+ "BlocksInPendingIBR":0,
+ "BlocksReceivingInPendingIBR":0,
+ "BlocksReceivedInPendingIBR":0,
+ "BlocksDeletedInPendingIBR":0,
+ "EcReconstructionTasks":0,
+ "EcFailedReconstructionTasks":0,
+ "EcDecodingTimeNanos":0,
+ "EcReconstructionBytesRead":0,
+ "EcReconstructionBytesWritten":0,
+ "EcReconstructionRemoteBytesRead":0,
+ "EcReconstructionReadTimeMillis":0,
+ "EcReconstructionDecodingTimeMillis":0,
+ "EcReconstructionWriteTimeMillis":0
+ },
+ {
+ "name":"Hadoop:service=DataNode,name=RpcActivityForPort9867",
+ "modelerType":"RpcActivityForPort9867",
+ "tag.port":"9867",
+ "tag.Context":"rpc",
+ "tag.NumOpenConnectionsPerUser":"{}",
+ "tag.Hostname":"dev-slave-01.dev.local",
+ "ReceivedBytes":7,
+ "SentBytes":187,
+ "RpcQueueTimeNumOps":0,
+ "RpcQueueTimeAvgTime":0.0,
+ "RpcProcessingTimeNumOps":0,
+ "RpcProcessingTimeAvgTime":0.0,
+ "DeferredRpcProcessingTimeNumOps":0,
+ "DeferredRpcProcessingTimeAvgTime":0.0,
+ "RpcAuthenticationFailures":0,
+ "RpcAuthenticationSuccesses":0,
+ "RpcAuthorizationFailures":0,
+ "RpcAuthorizationSuccesses":0,
+ "RpcClientBackoff":0,
+ "RpcSlowCalls":0,
+ "NumOpenConnections":0,
+ "CallQueueLength":0,
+ "NumDroppedConnections":0
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/hdfs/testdata/namenode.json b/src/go/plugin/go.d/modules/hdfs/testdata/namenode.json
new file mode 100644
index 000000000..2d33d32f3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/testdata/namenode.json
@@ -0,0 +1,132 @@
+{
+ "beans":[
+ {
+ "name":"Hadoop:service=NameNode,name=JvmMetrics",
+ "modelerType":"JvmMetrics",
+ "tag.Context":"jvm",
+ "tag.ProcessName":"NameNode",
+ "tag.SessionId":null,
+ "tag.Hostname":"dev-master-02.dev.local",
+ "MemNonHeapUsedM":66.170395,
+ "MemNonHeapCommittedM":67.75,
+ "MemNonHeapMaxM":-1.0,
+ "MemHeapUsedM":26.603287,
+ "MemHeapCommittedM":67.0,
+ "MemHeapMaxM":843.0,
+ "MemMaxM":843.0,
+ "GcCount":1699,
+ "GcTimeMillis":3483,
+ "GcNumWarnThresholdExceeded":0,
+ "GcNumInfoThresholdExceeded":0,
+ "GcTotalExtraSleepTime":1944,
+ "ThreadsNew":0,
+ "ThreadsRunnable":7,
+ "ThreadsBlocked":0,
+ "ThreadsWaiting":6,
+ "ThreadsTimedWaiting":34,
+ "ThreadsTerminated":0,
+ "LogFatal":0,
+ "LogError":0,
+ "LogWarn":3378983,
+ "LogInfo":3382077
+ },
+ {
+ "name":"Hadoop:service=NameNode,name=FSNamesystem",
+ "modelerType":"FSNamesystem",
+ "tag.Context":"dfs",
+ "tag.HAState":"active",
+ "tag.TotalSyncTimes":"98 ",
+ "tag.Hostname":"dev-master-02.dev.local",
+ "MissingBlocks":0,
+ "MissingReplOneBlocks":0,
+ "ExpiredHeartbeats":0,
+ "TransactionsSinceLastCheckpoint":1,
+ "TransactionsSinceLastLogRoll":1,
+ "LastWrittenTransactionId":624,
+ "LastCheckpointTime":1566814983890,
+ "CapacityTotal":107351072768,
+ "CapacityTotalGB":100.0,
+ "CapacityUsed":2372116480,
+ "CapacityUsedGB":2.0,
+ "CapacityRemaining":65861697536,
+ "ProvidedCapacityTotal":0,
+ "CapacityRemainingGB":61.0,
+ "CapacityUsedNonDFS":39117258752,
+ "TotalLoad":2,
+ "SnapshottableDirectories":0,
+ "Snapshots":0,
+ "NumEncryptionZones":0,
+ "LockQueueLength":0,
+ "BlocksTotal":15,
+ "NumFilesUnderConstruction":0,
+ "NumActiveClients":0,
+ "FilesTotal":12,
+ "PendingReplicationBlocks":0,
+ "PendingReconstructionBlocks":0,
+ "UnderReplicatedBlocks":0,
+ "LowRedundancyBlocks":0,
+ "CorruptBlocks":0,
+ "ScheduledReplicationBlocks":0,
+ "PendingDeletionBlocks":0,
+ "LowRedundancyReplicatedBlocks":0,
+ "CorruptReplicatedBlocks":0,
+ "MissingReplicatedBlocks":0,
+ "MissingReplicationOneBlocks":0,
+ "HighestPriorityLowRedundancyReplicatedBlocks":0,
+ "HighestPriorityLowRedundancyECBlocks":0,
+ "BytesInFutureReplicatedBlocks":0,
+ "PendingDeletionReplicatedBlocks":0,
+ "TotalReplicatedBlocks":15,
+ "LowRedundancyECBlockGroups":0,
+ "CorruptECBlockGroups":0,
+ "MissingECBlockGroups":0,
+ "BytesInFutureECBlockGroups":0,
+ "PendingDeletionECBlocks":0,
+ "TotalECBlockGroups":0,
+ "ExcessBlocks":0,
+ "NumTimedOutPendingReconstructions":0,
+ "PostponedMisreplicatedBlocks":0,
+ "PendingDataNodeMessageCount":0,
+ "MillisSinceLastLoadedEdits":0,
+ "BlockCapacity":2097152,
+ "NumLiveDataNodes":2,
+ "NumDeadDataNodes":0,
+ "NumDecomLiveDataNodes":0,
+ "NumDecomDeadDataNodes":0,
+ "VolumeFailuresTotal":0,
+ "EstimatedCapacityLostTotal":0,
+ "NumDecommissioningDataNodes":0,
+ "StaleDataNodes":0,
+ "NumStaleStorages":0,
+ "TotalSyncCount":2,
+ "NumInMaintenanceLiveDataNodes":0,
+ "NumInMaintenanceDeadDataNodes":0,
+ "NumEnteringMaintenanceDataNodes":0
+ },
+ {
+ "name":"Hadoop:service=NameNode,name=RpcActivityForPort9000",
+ "modelerType":"RpcActivityForPort9000",
+ "tag.port":"9000",
+ "tag.Context":"rpc",
+ "tag.NumOpenConnectionsPerUser":"{\"hadoop\":2}",
+ "tag.Hostname":"dev-master-02.dev.local",
+ "ReceivedBytes":240431351,
+ "SentBytes":25067414,
+ "RpcQueueTimeNumOps":585402,
+ "RpcQueueTimeAvgTime":0.05813953488372093,
+ "RpcProcessingTimeNumOps":585402,
+ "RpcProcessingTimeAvgTime":0.0,
+ "DeferredRpcProcessingTimeNumOps":0,
+ "DeferredRpcProcessingTimeAvgTime":0.0,
+ "RpcAuthenticationFailures":0,
+ "RpcAuthenticationSuccesses":0,
+ "RpcAuthorizationFailures":0,
+ "RpcAuthorizationSuccesses":14327,
+ "RpcClientBackoff":0,
+ "RpcSlowCalls":0,
+ "NumOpenConnections":2,
+ "CallQueueLength":0,
+ "NumDroppedConnections":0
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/hdfs/testdata/unknownnode.json b/src/go/plugin/go.d/modules/hdfs/testdata/unknownnode.json
new file mode 100644
index 000000000..7370a7a37
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hdfs/testdata/unknownnode.json
@@ -0,0 +1,34 @@
+{
+ "beans":[
+ {
+ "name":"Hadoop:service=UnknownNode,name=JvmMetrics",
+ "modelerType":"JvmMetrics",
+ "tag.Context":"jvm",
+ "tag.ProcessName":"UnknownNode",
+ "tag.SessionId":null,
+ "tag.Hostname":"dev-slave-01.dev.local",
+ "MemNonHeapUsedM":53.67546,
+ "MemNonHeapCommittedM":54.9375,
+ "MemNonHeapMaxM":-1.0,
+ "MemHeapUsedM":18.885735,
+ "MemHeapCommittedM":60.5,
+ "MemHeapMaxM":843.0,
+ "MemMaxM":843.0,
+ "GcCount":155,
+ "GcTimeMillis":672,
+ "GcNumWarnThresholdExceeded":0,
+ "GcNumInfoThresholdExceeded":0,
+ "GcTotalExtraSleepTime":8783,
+ "ThreadsNew":1,
+ "ThreadsRunnable":2,
+ "ThreadsBlocked":3,
+ "ThreadsWaiting":4,
+ "ThreadsTimedWaiting":5,
+ "ThreadsTerminated":6,
+ "LogFatal":10,
+ "LogError":11,
+ "LogWarn":12,
+ "LogInfo":13
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/hpssa/README.md b/src/go/plugin/go.d/modules/hpssa/README.md
new file mode 120000
index 000000000..dd12f5a9c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/README.md
@@ -0,0 +1 @@
+integrations/hpe_smart_arrays.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/hpssa/charts.go b/src/go/plugin/go.d/modules/hpssa/charts.go
new file mode 100644
index 000000000..14b032bd3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/charts.go
@@ -0,0 +1,403 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hpssa
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioControllerStatus = module.Priority + iota
+ prioControllerTemperature
+
+ prioControllerCacheModulePresenceStatus
+ prioControllerCacheModuleStatus
+ prioControllerCacheModuleTemperature
+ prioControllerCacheModuleBatteryStatus
+
+ prioArrayStatus
+
+ prioLogicalDriveStatus
+
+ prioPhysicalDriveStatus
+ prioPhysicalDriveTemperature
+)
+
+var controllerChartsTmpl = module.Charts{
+ controllerStatusChartTmpl.Copy(),
+ controllerTemperatureChartTmpl.Copy(),
+
+ controllerCacheModulePresenceStatusChartTmpl.Copy(),
+ controllerCacheModuleStatusChartTmpl.Copy(),
+ controllerCacheModuleTemperatureChartTmpl.Copy(),
+ controllerCacheModuleBatteryStatusChartTmpl.Copy(),
+}
+
+var (
+ controllerStatusChartTmpl = module.Chart{
+ ID: "cntrl_%s_slot_%s_status",
+ Title: "Controller status",
+ Units: "status",
+ Fam: "controllers",
+ Ctx: "hpssa.controller_status",
+ Type: module.Line,
+ Priority: prioControllerStatus,
+ Dims: module.Dims{
+ {ID: "cntrl_%s_slot_%s_status_ok", Name: "ok"},
+ {ID: "cntrl_%s_slot_%s_status_nok", Name: "nok"},
+ },
+ }
+ controllerTemperatureChartTmpl = module.Chart{
+ ID: "cntrl_%s_slot_%s_temperature",
+ Title: "Controller temperature",
+ Units: "Celsius",
+ Fam: "controllers",
+ Ctx: "hpssa.controller_temperature",
+ Type: module.Line,
+ Priority: prioControllerTemperature,
+ Dims: module.Dims{
+ {ID: "cntrl_%s_slot_%s_temperature", Name: "temperature"},
+ },
+ }
+
+ controllerCacheModulePresenceStatusChartTmpl = module.Chart{
+ ID: "cntrl_%s_slot_%s_cache_presence_status",
+ Title: "Controller cache module presence",
+ Units: "status",
+ Fam: "cache",
+ Ctx: "hpssa.controller_cache_module_presence_status",
+ Type: module.Line,
+ Priority: prioControllerCacheModulePresenceStatus,
+ Dims: module.Dims{
+ {ID: "cntrl_%s_slot_%s_cache_presence_status_present", Name: "present"},
+ {ID: "cntrl_%s_slot_%s_cache_presence_status_not_present", Name: "not_present"},
+ },
+ }
+ controllerCacheModuleStatusChartTmpl = module.Chart{
+ ID: "cntrl_%s_slot_%s_cache_status",
+ Title: "Controller cache module status",
+ Units: "status",
+ Fam: "cache",
+ Ctx: "hpssa.controller_cache_module_status",
+ Type: module.Line,
+ Priority: prioControllerCacheModuleStatus,
+ Dims: module.Dims{
+ {ID: "cntrl_%s_slot_%s_cache_status_ok", Name: "ok"},
+ {ID: "cntrl_%s_slot_%s_cache_status_nok", Name: "nok"},
+ },
+ }
+ controllerCacheModuleTemperatureChartTmpl = module.Chart{
+ ID: "cntrl_%s_slot_%s_cache_temperature",
+ Title: "Controller cache module temperature",
+ Units: "Celsius",
+ Fam: "cache",
+ Ctx: "hpssa.controller_cache_module_temperature",
+ Type: module.Line,
+ Priority: prioControllerCacheModuleTemperature,
+ Dims: module.Dims{
+ {ID: "cntrl_%s_slot_%s_cache_temperature", Name: "temperature"},
+ },
+ }
+ controllerCacheModuleBatteryStatusChartTmpl = module.Chart{
+ ID: "cntrl_%s_slot_%s_cache_battery_status",
+ Title: "Controller cache module battery status",
+ Units: "status",
+ Fam: "cache",
+ Ctx: "hpssa.controller_cache_module_battery_status",
+ Type: module.Line,
+ Priority: prioControllerCacheModuleBatteryStatus,
+ Dims: module.Dims{
+ {ID: "cntrl_%s_slot_%s_cache_battery_status_ok", Name: "ok"},
+ {ID: "cntrl_%s_slot_%s_cache_battery_status_nok", Name: "nok"},
+ },
+ }
+)
+
+var arrayChartsTmpl = module.Charts{
+ arrayStatusChartTmpl.Copy(),
+}
+
+var (
+ arrayStatusChartTmpl = module.Chart{
+ ID: "array_%s_cntrl_%s_slot_%s_status",
+ Title: "Array status",
+ Units: "status",
+ Fam: "arrays",
+ Ctx: "hpssa.array_status",
+ Type: module.Line,
+ Priority: prioArrayStatus,
+ Dims: module.Dims{
+ {ID: "array_%s_cntrl_%s_slot_%s_status_ok", Name: "ok"},
+ {ID: "array_%s_cntrl_%s_slot_%s_status_nok", Name: "nok"},
+ },
+ }
+)
+
+var logicalDriveChartsTmpl = module.Charts{
+ logicalDriveStatusChartTmpl.Copy(),
+}
+
+var (
+ logicalDriveStatusChartTmpl = module.Chart{
+ ID: "ld_%s_array_%s_cntrl_%s_slot_%s_status",
+ Title: "Logical Drive status",
+ Units: "status",
+ Fam: "logical drives",
+ Ctx: "hpssa.logical_drive_status",
+ Type: module.Line,
+ Priority: prioLogicalDriveStatus,
+ Dims: module.Dims{
+ {ID: "ld_%s_array_%s_cntrl_%s_slot_%s_status_ok", Name: "ok"},
+ {ID: "ld_%s_array_%s_cntrl_%s_slot_%s_status_nok", Name: "nok"},
+ },
+ }
+)
+
+var physicalDriveChartsTmpl = module.Charts{
+ physicalDriveStatusChartTmpl.Copy(),
+ physicalDriveTemperatureChartTmpl.Copy(),
+}
+
+var (
+ physicalDriveStatusChartTmpl = module.Chart{
+ ID: "pd_%s_ld_%s_array_%s_cntrl_%s_slot_%s_status",
+ Title: "Physical Drive status",
+ Units: "status",
+ Fam: "physical drives",
+ Ctx: "hpssa.physical_drive_status",
+ Type: module.Line,
+ Priority: prioPhysicalDriveStatus,
+ Dims: module.Dims{
+ {ID: "pd_%s_ld_%s_array_%s_cntrl_%s_slot_%s_status_ok", Name: "ok"},
+ {ID: "pd_%s_ld_%s_array_%s_cntrl_%s_slot_%s_status_nok", Name: "nok"},
+ },
+ }
+ physicalDriveTemperatureChartTmpl = module.Chart{
+ ID: "pd_%s_ld_%s_array_%s_cntrl_%s_slot_%s_temperature",
+ Title: "Physical Drive temperature",
+ Units: "Celsius",
+ Fam: "physical drives",
+ Ctx: "hpssa.physical_drive_temperature",
+ Type: module.Line,
+ Priority: prioPhysicalDriveTemperature,
+ Dims: module.Dims{
+ {ID: "pd_%s_ld_%s_array_%s_cntrl_%s_slot_%s_temperature", Name: "temperature"},
+ },
+ }
+)
+
+func (h *Hpssa) updateCharts(controllers map[string]*hpssaController) {
+ seenControllers := make(map[string]bool)
+ seenArrays := make(map[string]bool)
+ seenLDrives := make(map[string]bool)
+ seenPDrives := make(map[string]bool)
+
+ for _, cntrl := range controllers {
+ key := cntrl.uniqueKey()
+ seenControllers[key] = true
+ if _, ok := h.seenControllers[key]; !ok {
+ h.seenControllers[key] = cntrl
+ h.addControllerCharts(cntrl)
+ }
+
+ for _, pd := range cntrl.unassignedDrives {
+ key := pd.uniqueKey()
+ seenPDrives[key] = true
+ if _, ok := h.seenPDrives[key]; !ok {
+ h.seenPDrives[key] = pd
+ h.addPhysicalDriveCharts(pd)
+ }
+ }
+
+ for _, arr := range cntrl.arrays {
+ key := arr.uniqueKey()
+ seenArrays[key] = true
+ if _, ok := h.seenArrays[key]; !ok {
+ h.seenArrays[key] = arr
+ h.addArrayCharts(arr)
+ }
+
+ for _, ld := range arr.logicalDrives {
+ key := ld.uniqueKey()
+ seenLDrives[key] = true
+ if _, ok := h.seenLDrives[key]; !ok {
+ h.seenLDrives[key] = ld
+ h.addLogicalDriveCharts(ld)
+ }
+
+ for _, pd := range ld.physicalDrives {
+ key := pd.uniqueKey()
+ seenPDrives[key] = true
+ if _, ok := h.seenPDrives[key]; !ok {
+ h.seenPDrives[key] = pd
+ h.addPhysicalDriveCharts(pd)
+ }
+ }
+ }
+ }
+ }
+
+ for k, cntrl := range h.seenControllers {
+ if !seenControllers[k] {
+ delete(h.seenControllers, k)
+ h.removeControllerCharts(cntrl)
+ }
+ }
+ for k, arr := range h.seenArrays {
+ if !seenArrays[k] {
+ delete(h.seenArrays, k)
+ h.removeArrayCharts(arr)
+ }
+ }
+ for k, ld := range h.seenLDrives {
+ if !seenLDrives[k] {
+ delete(h.seenLDrives, k)
+ h.removeLogicalDriveCharts(ld)
+ }
+ }
+ for k, pd := range h.seenPDrives {
+ if !seenPDrives[k] {
+ delete(h.seenPDrives, k)
+ h.removePhysicalDriveCharts(pd)
+ }
+ }
+}
+
+func (h *Hpssa) addControllerCharts(cntrl *hpssaController) {
+ charts := controllerChartsTmpl.Copy()
+
+ if cntrl.controllerTemperatureC == "" {
+ _ = charts.Remove(controllerTemperatureChartTmpl.ID)
+ }
+
+ if cntrl.cacheBoardPresent != "True" {
+ _ = charts.Remove(controllerCacheModuleStatusChartTmpl.ID)
+ _ = charts.Remove(controllerCacheModuleTemperatureChartTmpl.ID)
+ _ = charts.Remove(controllerCacheModuleBatteryStatusChartTmpl.ID)
+ }
+ if cntrl.cacheModuleTemperatureC == "" {
+ _ = charts.Remove(controllerCacheModuleTemperatureChartTmpl.ID)
+ }
+ if cntrl.batteryCapacitorStatus == "" {
+ _ = charts.Remove(controllerCacheModuleBatteryStatusChartTmpl.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(cntrl.model), cntrl.slot)
+ chart.Labels = []module.Label{
+ {Key: "slot", Value: cntrl.slot},
+ {Key: "model", Value: cntrl.model},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, cntrl.model, cntrl.slot)
+ }
+ }
+
+ if err := h.Charts().Add(*charts...); err != nil {
+ h.Warning(err)
+ }
+}
+
+func (h *Hpssa) removeControllerCharts(cntrl *hpssaController) {
+ px := fmt.Sprintf("cntrl_%s_slot_%s_", strings.ToLower(cntrl.model), cntrl.slot)
+ h.removeCharts(px)
+}
+
+func (h *Hpssa) addArrayCharts(arr *hpssaArray) {
+ charts := arrayChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, arr.id, strings.ToLower(arr.cntrl.model), arr.cntrl.slot)
+ chart.Labels = []module.Label{
+ {Key: "slot", Value: arr.cntrl.slot},
+ {Key: "array_id", Value: arr.id},
+ {Key: "interface_type", Value: arr.interfaceType},
+ {Key: "array_type", Value: arr.arrayType},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, arr.id, arr.cntrl.model, arr.cntrl.slot)
+ }
+ }
+
+ if err := h.Charts().Add(*charts...); err != nil {
+ h.Warning(err)
+ }
+}
+
+func (h *Hpssa) removeArrayCharts(arr *hpssaArray) {
+ px := fmt.Sprintf("array_%s_cntrl_%s_slot_%s_", arr.id, strings.ToLower(arr.cntrl.model), arr.cntrl.slot)
+ h.removeCharts(px)
+}
+
+func (h *Hpssa) addLogicalDriveCharts(ld *hpssaLogicalDrive) {
+ charts := logicalDriveChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, ld.id, ld.arr.id, strings.ToLower(ld.cntrl.model), ld.cntrl.slot)
+ chart.Labels = []module.Label{
+ {Key: "slot", Value: ld.cntrl.slot},
+ {Key: "array_id", Value: ld.arr.id},
+ {Key: "logical_drive_id", Value: ld.id},
+ {Key: "disk_name", Value: ld.diskName},
+ {Key: "drive_type", Value: ld.driveType},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, ld.id, ld.arr.id, ld.cntrl.model, ld.cntrl.slot)
+ }
+ }
+
+ if err := h.Charts().Add(*charts...); err != nil {
+ h.Warning(err)
+ }
+}
+
+func (h *Hpssa) removeLogicalDriveCharts(ld *hpssaLogicalDrive) {
+ px := fmt.Sprintf("ld_%s_array_%s_cntrl_%s_slot_%s_", ld.id, ld.arr.id, strings.ToLower(ld.cntrl.model), ld.cntrl.slot)
+ h.removeCharts(px)
+}
+
+func (h *Hpssa) addPhysicalDriveCharts(pd *hpssaPhysicalDrive) {
+ charts := physicalDriveChartsTmpl.Copy()
+
+ if pd.currentTemperatureC == "" {
+ _ = charts.Remove(physicalDriveTemperatureChartTmpl.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, pd.location, pd.ldId(), pd.arrId(), strings.ToLower(pd.cntrl.model), pd.cntrl.slot)
+ chart.Labels = []module.Label{
+ {Key: "slot", Value: pd.cntrl.slot},
+ {Key: "array_id", Value: pd.arrId()},
+ {Key: "logical_drive_id", Value: pd.ldId()},
+ {Key: "location", Value: pd.location},
+ {Key: "interface_type", Value: pd.interfaceType},
+ {Key: "drive_type", Value: pd.driveType},
+ {Key: "model", Value: pd.model},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, pd.location, pd.ldId(), pd.arrId(), pd.cntrl.model, pd.cntrl.slot)
+ }
+ }
+
+ if err := h.Charts().Add(*charts...); err != nil {
+ h.Warning(err)
+ }
+}
+
+func (h *Hpssa) removePhysicalDriveCharts(pd *hpssaPhysicalDrive) {
+ px := fmt.Sprintf("pd_%s_ld_%s_array_%s_cntrl_%s_slot_%s_",
+ pd.location, pd.ldId(), pd.arrId(), strings.ToLower(pd.cntrl.model), pd.cntrl.slot)
+ h.removeCharts(px)
+}
+
+func (h *Hpssa) removeCharts(prefix string) {
+ for _, chart := range *h.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hpssa/collect.go b/src/go/plugin/go.d/modules/hpssa/collect.go
new file mode 100644
index 000000000..a0ce7d0bc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/collect.go
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hpssa
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func (h *Hpssa) collect() (map[string]int64, error) {
+ data, err := h.exec.controllersInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ controllers, err := parseSsacliControllersInfo(data)
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ h.collectControllers(mx, controllers)
+ h.updateCharts(controllers)
+
+ return mx, nil
+}
+
+func (h *Hpssa) collectControllers(mx map[string]int64, controllers map[string]*hpssaController) {
+ for _, cntrl := range controllers {
+ h.collectController(mx, cntrl)
+
+ for _, pd := range cntrl.unassignedDrives {
+ h.collectPhysicalDrive(mx, pd)
+ }
+
+ for _, arr := range cntrl.arrays {
+ h.collectArray(mx, arr)
+
+ for _, ld := range arr.logicalDrives {
+ h.collectLogicalDrive(mx, ld)
+
+ for _, pd := range ld.physicalDrives {
+ h.collectPhysicalDrive(mx, pd)
+ }
+ }
+ }
+ }
+}
+
+func (h *Hpssa) collectController(mx map[string]int64, cntrl *hpssaController) {
+ px := fmt.Sprintf("cntrl_%s_slot_%s_", cntrl.model, cntrl.slot)
+
+ writeStatusOkNok(mx, px, cntrl.controllerStatus)
+
+ if v, ok := parseNumber(cntrl.controllerTemperatureC); ok {
+ mx[px+"temperature"] = v
+ }
+
+ mx[px+"cache_presence_status_present"] = 0
+ mx[px+"cache_presence_status_not_present"] = 0
+ if cntrl.cacheBoardPresent != "True" {
+ mx[px+"cache_presence_status_not_present"] = 1
+ return
+ }
+
+ mx[px+"cache_presence_status_present"] = 1
+
+ writeStatusOkNok(mx, px+"cache_", cntrl.cacheStatus)
+
+ if v, ok := parseNumber(cntrl.cacheModuleTemperatureC); ok {
+ mx[px+"cache_temperature"] = v
+ }
+
+ writeStatusOkNok(mx, px+"cache_battery_", cntrl.batteryCapacitorStatus)
+}
+
+func (h *Hpssa) collectArray(mx map[string]int64, arr *hpssaArray) {
+ if arr.cntrl == nil {
+ return
+ }
+
+ px := fmt.Sprintf("array_%s_cntrl_%s_slot_%s_",
+ arr.id, arr.cntrl.model, arr.cntrl.slot)
+
+ writeStatusOkNok(mx, px, arr.status)
+}
+
+func (h *Hpssa) collectLogicalDrive(mx map[string]int64, ld *hpssaLogicalDrive) {
+ if ld.cntrl == nil || ld.arr == nil {
+ return
+ }
+
+ px := fmt.Sprintf("ld_%s_array_%s_cntrl_%s_slot_%s_",
+ ld.id, ld.arr.id, ld.cntrl.model, ld.cntrl.slot)
+
+ writeStatusOkNok(mx, px, ld.status)
+}
+
+func (h *Hpssa) collectPhysicalDrive(mx map[string]int64, pd *hpssaPhysicalDrive) {
+ if pd.cntrl == nil {
+ return
+ }
+
+ px := fmt.Sprintf("pd_%s_ld_%s_array_%s_cntrl_%s_slot_%s_",
+ pd.location, pd.ldId(), pd.arrId(), pd.cntrl.model, pd.cntrl.slot)
+
+ writeStatusOkNok(mx, px, pd.status)
+
+ if v, ok := parseNumber(pd.currentTemperatureC); ok {
+ mx[px+"temperature"] = v
+ }
+}
+
+func parseNumber(s string) (int64, bool) {
+ v, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return 0, false
+ }
+ return int64(v), true
+}
+
+func writeStatusOkNok(mx map[string]int64, prefix, status string) {
+ if !strings.HasSuffix(prefix, "_") {
+ prefix += "_"
+ }
+
+ mx[prefix+"status_ok"] = 0
+ mx[prefix+"status_nok"] = 0
+
+ switch status {
+ case "":
+ case "OK":
+ mx[prefix+"status_ok"] = 1
+ default:
+ mx[prefix+"status_nok"] = 1
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hpssa/config_schema.json b/src/go/plugin/go.d/modules/hpssa/config_schema.json
new file mode 100644
index 000000000..788d7685e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "HPSSA collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the `ssacli` binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hpssa/exec.go b/src/go/plugin/go.d/modules/hpssa/exec.go
new file mode 100644
index 000000000..510b7d654
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/exec.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hpssa
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newSsacliExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *ssacliExec {
+ return &ssacliExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ }
+}
+
+type ssacliExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (e *ssacliExec) controllersInfo() ([]byte, error) {
+ return e.execute("ssacli-controllers-info")
+}
+
+func (e *ssacliExec) execute(args ...string) ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.ndsudoPath, args...)
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/hpssa/hpssa.go b/src/go/plugin/go.d/modules/hpssa/hpssa.go
new file mode 100644
index 000000000..1245f477f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/hpssa.go
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hpssa
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("hpssa", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Hpssa {
+ return &Hpssa{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ seenControllers: make(map[string]*hpssaController),
+ seenArrays: make(map[string]*hpssaArray),
+ seenLDrives: make(map[string]*hpssaLogicalDrive),
+ seenPDrives: make(map[string]*hpssaPhysicalDrive),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ Hpssa struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec ssacli
+
+ seenControllers map[string]*hpssaController
+ seenArrays map[string]*hpssaArray
+ seenLDrives map[string]*hpssaLogicalDrive
+ seenPDrives map[string]*hpssaPhysicalDrive
+ }
+ ssacli interface {
+ controllersInfo() ([]byte, error)
+ }
+)
+
+func (h *Hpssa) Configuration() any {
+ return h.Config
+}
+
+func (h *Hpssa) Init() error {
+ ssacliExec, err := h.initSsacliExec()
+ if err != nil {
+ h.Errorf("ssacli exec initialization: %v", err)
+ return err
+ }
+ h.exec = ssacliExec
+
+ return nil
+}
+
+func (h *Hpssa) Check() error {
+ mx, err := h.collect()
+ if err != nil {
+ h.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (h *Hpssa) Charts() *module.Charts {
+ return h.charts
+}
+
+func (h *Hpssa) Collect() map[string]int64 {
+ mx, err := h.collect()
+ if err != nil {
+ h.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (h *Hpssa) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/hpssa/hpssa_test.go b/src/go/plugin/go.d/modules/hpssa/hpssa_test.go
new file mode 100644
index 000000000..a3e90d2a7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/hpssa_test.go
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hpssa
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataP212andP410i, _ = os.ReadFile("testdata/ssacli-P212_P410i.txt")
+ dataP400ar, _ = os.ReadFile("testdata/ssacli-P400ar.txt")
+ dataP400iUnassigned, _ = os.ReadFile("testdata/ssacli-P400i-unassigned.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataP212andP410i": dataP212andP410i,
+ "dataP400ar": dataP400ar,
+ "dataP400iUnassigned": dataP400iUnassigned,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestHpssa_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if 'ndsudo' not found": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ hpe := New()
+
+ if test.wantFail {
+ assert.Error(t, hpe.Init())
+ } else {
+ assert.NoError(t, hpe.Init())
+ }
+ })
+ }
+}
+
+func TestHpssa_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Hpssa
+ }{
+ "not initialized exec": {
+ prepare: func() *Hpssa {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Hpssa {
+ hpe := New()
+ hpe.exec = prepareMockOkP212andP410i()
+ _ = hpe.Check()
+ return hpe
+ },
+ },
+ "after collect": {
+ prepare: func() *Hpssa {
+ hpe := New()
+ hpe.exec = prepareMockOkP212andP410i()
+ _ = hpe.Collect()
+ return hpe
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ hpe := test.prepare()
+
+ assert.NotPanics(t, hpe.Cleanup)
+ })
+ }
+}
+
+func TestHpssa_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestHpssa_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockSsacliExec
+ wantFail bool
+ }{
+ "success P212 and P410i": {
+ wantFail: false,
+ prepareMock: prepareMockOkP212andP410i,
+ },
+ "success P400ar": {
+ wantFail: false,
+ prepareMock: prepareMockOkP400ar,
+ },
+ "success P400i with Unassigned": {
+ wantFail: false,
+ prepareMock: prepareMockOkP400iUnassigned,
+ },
+ "fails if error on controllersInfo()": {
+ wantFail: true,
+ prepareMock: prepareMockErr,
+ },
+ "fails if empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ "fails if unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ hpe := New()
+ mock := test.prepareMock()
+ hpe.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, hpe.Check())
+ } else {
+ assert.NoError(t, hpe.Check())
+ }
+ })
+ }
+}
+
+func TestHpssa_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockSsacliExec
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success P212 and P410i": {
+ prepareMock: prepareMockOkP212andP410i,
+ wantCharts: (len(controllerChartsTmpl)*2 - 6) +
+ len(arrayChartsTmpl)*3 +
+ len(logicalDriveChartsTmpl)*3 +
+ len(physicalDriveChartsTmpl)*18,
+ wantMetrics: map[string]int64{
+ "array_A_cntrl_P212_slot_5_status_nok": 0,
+ "array_A_cntrl_P212_slot_5_status_ok": 1,
+ "array_A_cntrl_P410i_slot_0_status_nok": 0,
+ "array_A_cntrl_P410i_slot_0_status_ok": 1,
+ "array_B_cntrl_P410i_slot_0_status_nok": 0,
+ "array_B_cntrl_P410i_slot_0_status_ok": 1,
+ "cntrl_P212_slot_5_cache_battery_status_nok": 0,
+ "cntrl_P212_slot_5_cache_battery_status_ok": 0,
+ "cntrl_P212_slot_5_cache_presence_status_not_present": 0,
+ "cntrl_P212_slot_5_cache_presence_status_present": 1,
+ "cntrl_P212_slot_5_cache_status_nok": 0,
+ "cntrl_P212_slot_5_cache_status_ok": 1,
+ "cntrl_P212_slot_5_status_nok": 0,
+ "cntrl_P212_slot_5_status_ok": 1,
+ "cntrl_P410i_slot_0_cache_battery_status_nok": 0,
+ "cntrl_P410i_slot_0_cache_battery_status_ok": 0,
+ "cntrl_P410i_slot_0_cache_presence_status_not_present": 0,
+ "cntrl_P410i_slot_0_cache_presence_status_present": 1,
+ "cntrl_P410i_slot_0_cache_status_nok": 0,
+ "cntrl_P410i_slot_0_cache_status_ok": 1,
+ "cntrl_P410i_slot_0_status_nok": 0,
+ "cntrl_P410i_slot_0_status_ok": 1,
+ "ld_1_array_A_cntrl_P212_slot_5_status_nok": 0,
+ "ld_1_array_A_cntrl_P212_slot_5_status_ok": 1,
+ "ld_1_array_A_cntrl_P410i_slot_0_status_nok": 0,
+ "ld_1_array_A_cntrl_P410i_slot_0_status_ok": 1,
+ "ld_2_array_B_cntrl_P410i_slot_0_status_nok": 0,
+ "ld_2_array_B_cntrl_P410i_slot_0_status_ok": 1,
+ "pd_1I:1:1_ld_2_array_B_cntrl_P410i_slot_0_status_nok": 0,
+ "pd_1I:1:1_ld_2_array_B_cntrl_P410i_slot_0_status_ok": 1,
+ "pd_1I:1:1_ld_2_array_B_cntrl_P410i_slot_0_temperature": 37,
+ "pd_1I:1:2_ld_2_array_B_cntrl_P410i_slot_0_status_nok": 0,
+ "pd_1I:1:2_ld_2_array_B_cntrl_P410i_slot_0_status_ok": 1,
+ "pd_1I:1:2_ld_2_array_B_cntrl_P410i_slot_0_temperature": 37,
+ "pd_1I:1:3_ld_2_array_B_cntrl_P410i_slot_0_status_nok": 0,
+ "pd_1I:1:3_ld_2_array_B_cntrl_P410i_slot_0_status_ok": 1,
+ "pd_1I:1:3_ld_2_array_B_cntrl_P410i_slot_0_temperature": 43,
+ "pd_1I:1:4_ld_2_array_B_cntrl_P410i_slot_0_status_nok": 0,
+ "pd_1I:1:4_ld_2_array_B_cntrl_P410i_slot_0_status_ok": 1,
+ "pd_1I:1:4_ld_2_array_B_cntrl_P410i_slot_0_temperature": 44,
+ "pd_2E:1:10_ld_na_array_na_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:10_ld_na_array_na_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:10_ld_na_array_na_cntrl_P212_slot_5_temperature": 35,
+ "pd_2E:1:11_ld_na_array_na_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:11_ld_na_array_na_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:11_ld_na_array_na_cntrl_P212_slot_5_temperature": 34,
+ "pd_2E:1:12_ld_na_array_na_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:12_ld_na_array_na_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:12_ld_na_array_na_cntrl_P212_slot_5_temperature": 31,
+ "pd_2E:1:1_ld_1_array_A_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:1_ld_1_array_A_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:1_ld_1_array_A_cntrl_P212_slot_5_temperature": 33,
+ "pd_2E:1:2_ld_1_array_A_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:2_ld_1_array_A_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:2_ld_1_array_A_cntrl_P212_slot_5_temperature": 34,
+ "pd_2E:1:3_ld_1_array_A_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:3_ld_1_array_A_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:3_ld_1_array_A_cntrl_P212_slot_5_temperature": 35,
+ "pd_2E:1:4_ld_1_array_A_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:4_ld_1_array_A_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:4_ld_1_array_A_cntrl_P212_slot_5_temperature": 35,
+ "pd_2E:1:5_ld_1_array_A_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:5_ld_1_array_A_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:5_ld_1_array_A_cntrl_P212_slot_5_temperature": 34,
+ "pd_2E:1:6_ld_1_array_A_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:6_ld_1_array_A_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:6_ld_1_array_A_cntrl_P212_slot_5_temperature": 33,
+ "pd_2E:1:7_ld_na_array_na_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:7_ld_na_array_na_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:7_ld_na_array_na_cntrl_P212_slot_5_temperature": 30,
+ "pd_2E:1:8_ld_na_array_na_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:8_ld_na_array_na_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:8_ld_na_array_na_cntrl_P212_slot_5_temperature": 33,
+ "pd_2E:1:9_ld_na_array_na_cntrl_P212_slot_5_status_nok": 0,
+ "pd_2E:1:9_ld_na_array_na_cntrl_P212_slot_5_status_ok": 1,
+ "pd_2E:1:9_ld_na_array_na_cntrl_P212_slot_5_temperature": 30,
+ "pd_2I:1:5_ld_1_array_A_cntrl_P410i_slot_0_status_nok": 0,
+ "pd_2I:1:5_ld_1_array_A_cntrl_P410i_slot_0_status_ok": 1,
+ "pd_2I:1:5_ld_1_array_A_cntrl_P410i_slot_0_temperature": 38,
+ "pd_2I:1:6_ld_1_array_A_cntrl_P410i_slot_0_status_nok": 0,
+ "pd_2I:1:6_ld_1_array_A_cntrl_P410i_slot_0_status_ok": 1,
+ "pd_2I:1:6_ld_1_array_A_cntrl_P410i_slot_0_temperature": 36,
+ },
+ },
+ "success P400ar": {
+ prepareMock: prepareMockOkP400ar,
+ wantCharts: len(controllerChartsTmpl)*1 +
+ len(arrayChartsTmpl)*2 +
+ len(logicalDriveChartsTmpl)*2 +
+ len(physicalDriveChartsTmpl)*8,
+ wantMetrics: map[string]int64{
+ "array_A_cntrl_P440ar_slot_0_status_nok": 0,
+ "array_A_cntrl_P440ar_slot_0_status_ok": 1,
+ "array_B_cntrl_P440ar_slot_0_status_nok": 0,
+ "array_B_cntrl_P440ar_slot_0_status_ok": 1,
+ "cntrl_P440ar_slot_0_cache_battery_status_nok": 0,
+ "cntrl_P440ar_slot_0_cache_battery_status_ok": 1,
+ "cntrl_P440ar_slot_0_cache_presence_status_not_present": 0,
+ "cntrl_P440ar_slot_0_cache_presence_status_present": 1,
+ "cntrl_P440ar_slot_0_cache_status_nok": 0,
+ "cntrl_P440ar_slot_0_cache_status_ok": 1,
+ "cntrl_P440ar_slot_0_cache_temperature": 41,
+ "cntrl_P440ar_slot_0_status_nok": 0,
+ "cntrl_P440ar_slot_0_status_ok": 1,
+ "cntrl_P440ar_slot_0_temperature": 47,
+ "ld_1_array_A_cntrl_P440ar_slot_0_status_nok": 0,
+ "ld_1_array_A_cntrl_P440ar_slot_0_status_ok": 1,
+ "ld_2_array_B_cntrl_P440ar_slot_0_status_nok": 0,
+ "ld_2_array_B_cntrl_P440ar_slot_0_status_ok": 1,
+ "pd_1I:1:1_ld_1_array_A_cntrl_P440ar_slot_0_status_nok": 0,
+ "pd_1I:1:1_ld_1_array_A_cntrl_P440ar_slot_0_status_ok": 1,
+ "pd_1I:1:1_ld_1_array_A_cntrl_P440ar_slot_0_temperature": 27,
+ "pd_1I:1:2_ld_1_array_A_cntrl_P440ar_slot_0_status_nok": 0,
+ "pd_1I:1:2_ld_1_array_A_cntrl_P440ar_slot_0_status_ok": 1,
+ "pd_1I:1:2_ld_1_array_A_cntrl_P440ar_slot_0_temperature": 28,
+ "pd_1I:1:3_ld_1_array_A_cntrl_P440ar_slot_0_status_nok": 0,
+ "pd_1I:1:3_ld_1_array_A_cntrl_P440ar_slot_0_status_ok": 1,
+ "pd_1I:1:3_ld_1_array_A_cntrl_P440ar_slot_0_temperature": 27,
+ "pd_1I:1:4_ld_2_array_B_cntrl_P440ar_slot_0_status_nok": 0,
+ "pd_1I:1:4_ld_2_array_B_cntrl_P440ar_slot_0_status_ok": 1,
+ "pd_1I:1:4_ld_2_array_B_cntrl_P440ar_slot_0_temperature": 30,
+ "pd_2I:1:5_ld_1_array_A_cntrl_P440ar_slot_0_status_nok": 0,
+ "pd_2I:1:5_ld_1_array_A_cntrl_P440ar_slot_0_status_ok": 1,
+ "pd_2I:1:5_ld_1_array_A_cntrl_P440ar_slot_0_temperature": 26,
+ "pd_2I:1:6_ld_1_array_A_cntrl_P440ar_slot_0_status_nok": 0,
+ "pd_2I:1:6_ld_1_array_A_cntrl_P440ar_slot_0_status_ok": 1,
+ "pd_2I:1:6_ld_1_array_A_cntrl_P440ar_slot_0_temperature": 28,
+ "pd_2I:1:7_ld_1_array_A_cntrl_P440ar_slot_0_status_nok": 0,
+ "pd_2I:1:7_ld_1_array_A_cntrl_P440ar_slot_0_status_ok": 1,
+ "pd_2I:1:7_ld_1_array_A_cntrl_P440ar_slot_0_temperature": 27,
+ "pd_2I:1:8_ld_2_array_B_cntrl_P440ar_slot_0_status_nok": 0,
+ "pd_2I:1:8_ld_2_array_B_cntrl_P440ar_slot_0_status_ok": 1,
+ "pd_2I:1:8_ld_2_array_B_cntrl_P440ar_slot_0_temperature": 29,
+ },
+ },
+ "success P400i with Unassigned": {
+ prepareMock: prepareMockOkP400iUnassigned,
+ wantCharts: (len(controllerChartsTmpl)*1 - 2) +
+ len(arrayChartsTmpl)*1 +
+ len(logicalDriveChartsTmpl)*1 +
+ len(physicalDriveChartsTmpl)*4,
+ wantMetrics: map[string]int64{
+ "array_A_cntrl_P400i_slot_0_status_nok": 0,
+ "array_A_cntrl_P400i_slot_0_status_ok": 1,
+ "cntrl_P400i_slot_0_cache_battery_status_nok": 1,
+ "cntrl_P400i_slot_0_cache_battery_status_ok": 0,
+ "cntrl_P400i_slot_0_cache_presence_status_not_present": 0,
+ "cntrl_P400i_slot_0_cache_presence_status_present": 1,
+ "cntrl_P400i_slot_0_cache_status_nok": 1,
+ "cntrl_P400i_slot_0_cache_status_ok": 0,
+ "cntrl_P400i_slot_0_status_nok": 0,
+ "cntrl_P400i_slot_0_status_ok": 1,
+ "ld_1_array_A_cntrl_P400i_slot_0_status_nok": 0,
+ "ld_1_array_A_cntrl_P400i_slot_0_status_ok": 1,
+ "pd_1I:1:1_ld_na_array_na_cntrl_P400i_slot_0_status_nok": 0,
+ "pd_1I:1:1_ld_na_array_na_cntrl_P400i_slot_0_status_ok": 1,
+ "pd_1I:1:1_ld_na_array_na_cntrl_P400i_slot_0_temperature": 28,
+ "pd_1I:1:2_ld_na_array_na_cntrl_P400i_slot_0_status_nok": 0,
+ "pd_1I:1:2_ld_na_array_na_cntrl_P400i_slot_0_status_ok": 1,
+ "pd_1I:1:2_ld_na_array_na_cntrl_P400i_slot_0_temperature": 28,
+ "pd_1I:1:3_ld_1_array_A_cntrl_P400i_slot_0_status_nok": 0,
+ "pd_1I:1:3_ld_1_array_A_cntrl_P400i_slot_0_status_ok": 1,
+ "pd_1I:1:3_ld_1_array_A_cntrl_P400i_slot_0_temperature": 23,
+ "pd_1I:1:4_ld_1_array_A_cntrl_P400i_slot_0_status_nok": 0,
+ "pd_1I:1:4_ld_1_array_A_cntrl_P400i_slot_0_status_ok": 1,
+ "pd_1I:1:4_ld_1_array_A_cntrl_P400i_slot_0_temperature": 23,
+ },
+ },
+ "fails if error on controllersInfo()": {
+ prepareMock: prepareMockErr,
+ wantMetrics: nil,
+ wantCharts: 0,
+ },
+ "fails if empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ wantCharts: 0,
+ },
+ "fails if unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ wantCharts: 0,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ hpe := New()
+ mock := test.prepareMock()
+ hpe.exec = mock
+
+ mx := hpe.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ assert.Len(t, *hpe.Charts(), test.wantCharts)
+ testMetricsHasAllChartsDims(t, hpe, mx)
+ })
+ }
+}
+
+func TestHpssa_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Hpssa{}, dataConfigJSON, dataConfigYAML)
+}
+
+func prepareMockOkP212andP410i() *mockSsacliExec {
+ return &mockSsacliExec{
+ infoData: dataP212andP410i,
+ }
+}
+
+func prepareMockOkP400ar() *mockSsacliExec {
+ return &mockSsacliExec{
+ infoData: dataP400ar,
+ }
+}
+
+func prepareMockOkP400iUnassigned() *mockSsacliExec {
+ return &mockSsacliExec{
+ infoData: dataP400iUnassigned,
+ }
+}
+
+func prepareMockErr() *mockSsacliExec {
+ return &mockSsacliExec{
+ errOnInfo: true,
+ }
+}
+
+func prepareMockEmptyResponse() *mockSsacliExec {
+ return &mockSsacliExec{}
+}
+
+func prepareMockUnexpectedResponse() *mockSsacliExec {
+ resp := []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`)
+ return &mockSsacliExec{
+ infoData: resp,
+ }
+}
+
+type mockSsacliExec struct {
+ errOnInfo bool
+ infoData []byte
+}
+
+func (m *mockSsacliExec) controllersInfo() ([]byte, error) {
+ if m.errOnInfo {
+ return nil, errors.New("mock.controllersInfo() error")
+ }
+ return m.infoData, nil
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, hpe *Hpssa, mx map[string]int64) {
+ for _, chart := range *hpe.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/hpssa/init.go b/src/go/plugin/go.d/modules/hpssa/init.go
new file mode 100644
index 000000000..3e08c443b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/init.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hpssa
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (h *Hpssa) initSsacliExec() (ssacli, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+ }
+
+ ssacliExec := newSsacliExec(ndsudoPath, h.Timeout.Duration(), h.Logger)
+
+ return ssacliExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/hpssa/integrations/hpe_smart_arrays.md b/src/go/plugin/go.d/modules/hpssa/integrations/hpe_smart_arrays.md
new file mode 100644
index 000000000..47fe74739
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/integrations/hpe_smart_arrays.md
@@ -0,0 +1,271 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/hpssa/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/hpssa/metadata.yaml"
+sidebar_label: "HPE Smart Arrays"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HPE Smart Arrays
+
+
+<img src="https://netdata.cloud/img/hp.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: hpssa
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitors the health of HPE Smart Arrays by tracking the status of controllers, arrays, logical and physical drives in your storage system.
+It relies on the `ssacli` CLI tool but avoids directly executing the binary.
+Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+Executed commands:
+- `ssacli ctrl all show config detail`
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per controller
+
+These metrics refer to the Controller.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| slot | Slot number |
+| model | Controller model |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hpssa.controller_status | ok, nok | status |
+| hpssa.controller_temperature | temperature | Celsius |
+| hpssa.controller_cache_module_presence_status | present, not_present | status |
+| hpssa.controller_cache_module_status | ok, nok | status |
+| hpssa.controller_cache_module_temperature | temperature | Celsius |
+| hpssa.controller_cache_module_battery_status | ok, nok | status |
+
+### Per array
+
+These metrics refer to the Array.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| slot | Slot number |
+| array_id | Array id |
+| interface_type | Array interface type (e.g. SATA) |
+| array_type | Array type (e.g. Data) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hpssa.array_status | ok, nok | status |
+
+### Per logical drive
+
+These metrics refer to the Logical Drive.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| slot | Slot number |
+| array_id | Array id |
+| logical_drive_id | Logical Drive id (number) |
+| disk_name | Disk name (e.g. /dev/sda) |
+| drive_type | Drive type (e.g. Data) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hpssa.logical_drive_status | ok, nok | status |
+
+### Per physical drive
+
+These metrics refer to the Physical Drive.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| slot | Slot number |
+| array_id | Array id or "na" if unassigned |
+| logical_drive_id | Logical Drive id or "na" if unassigned |
+| location | Drive location in port:box:bay format (e.g. 1I:1:1) |
+| interface_type | Drive interface type (e.g. SATA) |
+| drive_type | Drive type (e.g. Data Drive, Unassigned Drive) |
+| model | Drive model |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hpssa.physical_drive_status | ok, nok | status |
+| hpssa.physical_drive_temperature | temperature | status |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install ssacli
+
+See [official installation instructions](https://support.hpe.com/connect/s/softwaredetails?language=en_US&collectionId=MTX-0cb3f808e2514d3d).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/ssacli.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/ssacli.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| timeout | ssacli binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: hpssa
+ update_every: 5 # Collect HPE Smart Array statistics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `hpssa` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m hpssa
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `hpssa` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep hpssa
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep hpssa /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep hpssa
+```
+
+
diff --git a/src/go/plugin/go.d/modules/hpssa/metadata.yaml b/src/go/plugin/go.d/modules/hpssa/metadata.yaml
new file mode 100644
index 000000000..6cf7a6377
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/metadata.yaml
@@ -0,0 +1,213 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-hpssa
+ plugin_name: go.d.plugin
+ module_name: hpssa
+ monitored_instance:
+ name: HPE Smart Arrays
+ link: "https://buy.hpe.com/us/en/options/controller-controller-options/smart-array-controllers-smart-host-bus-adapters/c/7109730"
+ icon_filename: "hp.svg"
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - storage
+ - raid-controller
+ - hp
+ - hpssa
+ - array
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Monitors the health of HPE Smart Arrays by tracking the status of controllers, arrays, logical and physical drives in your storage system.
+ It relies on the `ssacli` CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+ Executed commands:
+ - `ssacli ctrl all show config detail`
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Install ssacli
+ description: |
+ See [official installation instructions](https://support.hpe.com/connect/s/softwaredetails?language=en_US&collectionId=MTX-0cb3f808e2514d3d).
+ configuration:
+ file:
+ name: go.d/ssacli.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: ssacli binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: hpssa
+ update_every: 5 # Collect HPE Smart Array statistics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: controller
+ description: These metrics refer to the Controller.
+ labels:
+ - name: slot
+ description: Slot number
+ - name: model
+ description: Controller model
+ metrics:
+ - name: hpssa.controller_status
+ description: Controller status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: nok
+ - name: hpssa.controller_temperature
+ description: Controller temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: hpssa.controller_cache_module_presence_status
+ description: Controller cache module presence
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: present
+ - name: not_present
+ - name: hpssa.controller_cache_module_status
+ description: Controller cache module status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: nok
+ - name: hpssa.controller_cache_module_temperature
+ description: Controller cache module temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: hpssa.controller_cache_module_battery_status
+ description: Controller cache module battery status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: nok
+ - name: array
+ description: These metrics refer to the Array.
+ labels:
+ - name: slot
+ description: Slot number
+ - name: array_id
+ description: Array id
+ - name: interface_type
+ description: Array interface type (e.g. SATA)
+ - name: array_type
+ description: Array type (e.g. Data)
+ metrics:
+ - name: hpssa.array_status
+ description: Array status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: nok
+ - name: logical drive
+ description: These metrics refer to the Logical Drive.
+ labels:
+ - name: slot
+ description: Slot number
+ - name: array_id
+ description: Array id
+ - name: logical_drive_id
+ description: Logical Drive id (number)
+ - name: disk_name
+ description: Disk name (e.g. /dev/sda)
+ - name: drive_type
+ description: Drive type (e.g. Data)
+ metrics:
+ - name: hpssa.logical_drive_status
+ description: Logical Drive status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: nok
+ - name: physical drive
+ description: These metrics refer to the Physical Drive.
+ labels:
+ - name: slot
+ description: Slot number
+ - name: array_id
+ description: Array id or "na" if unassigned
+ - name: logical_drive_id
+ description: Logical Drive id or "na" if unassigned
+ - name: location
+ description: Drive location in port:box:bay format (e.g. 1I:1:1)
+ - name: interface_type
+ description: Drive interface type (e.g. SATA)
+ - name: drive_type
+ description: Drive type (e.g. Data Drive, Unassigned Drive)
+ - name: model
+ description: Drive model
+ metrics:
+ - name: hpssa.physical_drive_status
+ description: Physical Drive status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: nok
+ - name: hpssa.physical_drive_temperature
+ description: Physical Drive temperature
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: temperature
diff --git a/src/go/plugin/go.d/modules/hpssa/parse.go b/src/go/plugin/go.d/modules/hpssa/parse.go
new file mode 100644
index 000000000..64d1c8ae9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/parse.go
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hpssa
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+type hpssaController struct {
+ model string
+ slot string
+ serialNumber string
+ controllerStatus string
+ cacheBoardPresent string
+ cacheStatus string
+ cacheRatio string
+ batteryCapacitorCount string
+ batteryCapacitorStatus string
+ controllerTemperatureC string
+ cacheModuleTemperatureC string
+ numberOfPorts string
+ driverName string
+ arrays map[string]*hpssaArray
+ unassignedDrives map[string]*hpssaPhysicalDrive
+}
+
+func (c *hpssaController) uniqueKey() string {
+ return fmt.Sprintf("%s/%s", c.model, c.slot)
+}
+
+type hpssaArray struct {
+ cntrl *hpssaController
+
+ id string
+ interfaceType string
+ unusedSpace string
+ usedSpace string
+ status string
+ arrayType string
+ logicalDrives map[string]*hpssaLogicalDrive
+}
+
+func (a *hpssaArray) uniqueKey() string {
+ return fmt.Sprintf("%s/%s/%s", a.cntrl.model, a.cntrl.slot, a.id)
+}
+
+type hpssaLogicalDrive struct {
+ cntrl *hpssaController
+ arr *hpssaArray
+
+ id string
+ size string
+ status string
+ diskName string
+ uniqueIdentifier string
+ logicalDriveLabel string
+ driveType string
+ physicalDrives map[string]*hpssaPhysicalDrive
+}
+
+func (ld *hpssaLogicalDrive) uniqueKey() string {
+ return fmt.Sprintf("%s/%s/%s/%s", ld.cntrl.model, ld.cntrl.slot, ld.arr.id, ld.id)
+}
+
+type hpssaPhysicalDrive struct {
+ cntrl *hpssaController
+ arr *hpssaArray
+ ld *hpssaLogicalDrive
+
+ location string // port:box:bay
+ status string
+ driveType string
+ interfaceType string
+ size string
+ serialNumber string
+ wwid string
+ model string
+ currentTemperatureC string
+}
+
+func (pd *hpssaPhysicalDrive) uniqueKey() string {
+ return fmt.Sprintf("%s/%s/%s/%s/%s", pd.cntrl.model, pd.cntrl.slot, pd.arrId(), pd.ldId(), pd.location)
+}
+
+func (pd *hpssaPhysicalDrive) arrId() string {
+ if pd.arr == nil {
+ return "na"
+ }
+ return pd.arr.id
+}
+
+func (pd *hpssaPhysicalDrive) ldId() string {
+ if pd.ld == nil {
+ return "na"
+ }
+ return pd.ld.id
+}
+
+func parseSsacliControllersInfo(data []byte) (map[string]*hpssaController, error) {
+ var (
+ cntrl *hpssaController
+ arr *hpssaArray
+ ld *hpssaLogicalDrive
+ pd *hpssaPhysicalDrive
+
+ line string
+ prevLine string
+ section string
+ unassigned bool
+ )
+
+ controllers := make(map[string]*hpssaController)
+
+ sc := bufio.NewScanner(bytes.NewReader(data))
+
+ for sc.Scan() {
+ prevLine = line
+ line = sc.Text()
+
+ switch {
+ case line == "":
+ section = ""
+ continue
+ case strings.HasPrefix(line, "Smart Array"):
+ section = "controller"
+
+ v, err := parseControllerLine(line)
+ if err != nil {
+ return nil, err
+ }
+
+ cntrl = v
+ controllers[cntrl.slot] = cntrl
+
+ continue
+ case strings.HasPrefix(line, " Array:") && cntrl != nil:
+ section = "array"
+ unassigned = false
+
+ arr = parseArrayLine(line)
+ cntrl.arrays[arr.id] = arr
+
+ continue
+ case strings.HasPrefix(line, " Logical Drive:") && cntrl != nil && arr != nil:
+ section = "logical drive"
+
+ ld = parseLogicalDriveLine(line)
+ arr.logicalDrives[arr.id] = ld
+
+ continue
+ case strings.HasPrefix(line, " physicaldrive") && prevLine == "":
+ section = "physical drive"
+
+ if unassigned && cntrl == nil {
+ return nil, fmt.Errorf("unassigned drive but controller is nil (line '%s')", line)
+ }
+ if !unassigned && ld == nil {
+ return nil, fmt.Errorf("assigned drive but logical device is nil (line '%s')", line)
+ }
+
+ v, err := parsePhysicalDriveLine(line)
+ if err != nil {
+ return nil, err
+ }
+
+ pd = v
+ if unassigned {
+ cntrl.unassignedDrives[pd.location] = pd
+ } else {
+ ld.physicalDrives[pd.location] = pd
+ }
+
+ continue
+ case strings.HasPrefix(line, " Unassigned"):
+ unassigned = true
+ continue
+ }
+
+ switch section {
+ case "controller":
+ parseControllerSectionLine(line, cntrl)
+ case "array":
+ parseArraySectionLine(line, arr)
+ case "logical drive":
+ parseLogicalDriveSectionLine(line, ld)
+ case "physical drive":
+ parsePhysicalDriveSectionLine(line, pd)
+ }
+ }
+
+ if len(controllers) == 0 {
+ return nil, fmt.Errorf("no controllers found")
+ }
+
+ updateHpssaHierarchy(controllers)
+
+ return controllers, nil
+}
+
+func updateHpssaHierarchy(controllers map[string]*hpssaController) {
+ for _, cntrl := range controllers {
+ for _, pd := range cntrl.unassignedDrives {
+ pd.cntrl = cntrl
+ }
+ for _, arr := range cntrl.arrays {
+ arr.cntrl = cntrl
+ for _, ld := range arr.logicalDrives {
+ ld.cntrl = cntrl
+ ld.arr = arr
+ for _, pd := range ld.physicalDrives {
+ pd.cntrl = cntrl
+ pd.arr = arr
+ pd.ld = ld
+ }
+ }
+ }
+ }
+}
+
+func parseControllerLine(line string) (*hpssaController, error) {
+ parts := strings.Fields(strings.TrimPrefix(line, "Smart Array "))
+ if len(parts) < 4 {
+ return nil, fmt.Errorf("malformed Smart Array line: '%s'", line)
+ }
+
+ cntrl := &hpssaController{
+ model: parts[0],
+ slot: parts[3],
+ arrays: make(map[string]*hpssaArray),
+ unassignedDrives: make(map[string]*hpssaPhysicalDrive),
+ }
+
+ return cntrl, nil
+}
+
+func parseArrayLine(line string) *hpssaArray {
+ arr := &hpssaArray{
+ id: getColonSepValue(line),
+ logicalDrives: make(map[string]*hpssaLogicalDrive),
+ }
+
+ return arr
+}
+
+func parseLogicalDriveLine(line string) *hpssaLogicalDrive {
+ ld := &hpssaLogicalDrive{
+ id: getColonSepValue(line),
+ physicalDrives: make(map[string]*hpssaPhysicalDrive),
+ }
+
+ return ld
+}
+
+func parsePhysicalDriveLine(line string) (*hpssaPhysicalDrive, error) {
+ parts := strings.Fields(strings.TrimSpace(line))
+ if len(parts) != 2 {
+ return nil, fmt.Errorf("malformed physicaldrive line: '%s'", line)
+ }
+
+ pd := &hpssaPhysicalDrive{
+ location: parts[1],
+ }
+
+ return pd, nil
+}
+
+func parseControllerSectionLine(line string, cntrl *hpssaController) {
+ indent := strings.Repeat(" ", 3)
+
+ switch {
+ case strings.HasPrefix(line, indent+"Serial Number:"):
+ cntrl.serialNumber = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Controller Status:"):
+ cntrl.controllerStatus = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Cache Board Present:"):
+ cntrl.cacheBoardPresent = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Cache Status:"):
+ cntrl.cacheStatus = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Cache Ratio:"):
+ cntrl.cacheRatio = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Controller Temperature (C):"):
+ cntrl.controllerTemperatureC = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Cache Module Temperature (C):"):
+ cntrl.cacheModuleTemperatureC = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Number of Ports:"):
+ cntrl.numberOfPorts = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Driver Name:"):
+ cntrl.driverName = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Battery/Capacitor Count:"):
+ cntrl.batteryCapacitorCount = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Battery/Capacitor Status:"):
+ cntrl.batteryCapacitorStatus = getColonSepValue(line)
+ }
+}
+
+func parseArraySectionLine(line string, arr *hpssaArray) {
+ indent := strings.Repeat(" ", 6)
+
+ switch {
+ case strings.HasPrefix(line, indent+"Interface Type:"):
+ arr.interfaceType = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Unused Space:"):
+ arr.unusedSpace = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Used Space:"):
+ arr.usedSpace = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Status:"):
+ arr.status = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Array Type:"):
+ arr.arrayType = getColonSepValue(line)
+ }
+}
+
+func parseLogicalDriveSectionLine(line string, ld *hpssaLogicalDrive) {
+ indent := strings.Repeat(" ", 9)
+
+ switch {
+ case strings.HasPrefix(line, indent+"Size:"):
+ ld.size = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Status:"):
+ ld.status = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Disk Name:"):
+ ld.diskName = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Unique Identifier:"):
+ ld.uniqueIdentifier = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Logical Drive Label:"):
+ ld.logicalDriveLabel = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Drive Type:"):
+ ld.driveType = getColonSepValue(line)
+ }
+}
+
+func parsePhysicalDriveSectionLine(line string, pd *hpssaPhysicalDrive) {
+ indent := strings.Repeat(" ", 9)
+
+ switch {
+ case strings.HasPrefix(line, indent+"Status:"):
+ pd.status = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Drive Type:"):
+ pd.driveType = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Interface Type:"):
+ pd.interfaceType = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Size:"):
+ pd.size = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Serial Number:"):
+ pd.serialNumber = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"WWID:"):
+ pd.wwid = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Model:"):
+ pd.model = getColonSepValue(line)
+ case strings.HasPrefix(line, indent+"Current Temperature (C):"):
+ pd.currentTemperatureC = getColonSepValue(line)
+ }
+}
+
+func getColonSepValue(line string) string {
+ i := strings.IndexByte(line, ':')
+ if i == -1 {
+ return ""
+ }
+ return strings.TrimSpace(line[i+1:])
+}
diff --git a/src/go/plugin/go.d/modules/hpssa/testdata/config.json b/src/go/plugin/go.d/modules/hpssa/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/hpssa/testdata/config.yaml b/src/go/plugin/go.d/modules/hpssa/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P212_P410i.txt b/src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P212_P410i.txt
new file mode 100644
index 000000000..c54cc10c7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P212_P410i.txt
@@ -0,0 +1,748 @@
+Smart Array P212 in Slot 5
+ Bus Interface: PCI
+ Slot: 5
+ Serial Number: REDACTED
+ Cache Serial Number: REDACTED
+ Controller Status: OK
+ Hardware Revision: C
+ Firmware Version: 6.60-0
+ Rebuild Priority: Medium
+ Expand Priority: Medium
+ Surface Scan Delay: 15 secs
+ Surface Scan Mode: Idle
+ Parallel Surface Scan Supported: No
+ Queue Depth: Automatic
+ Monitor and Performance Delay: 60 min
+ Elevator Sort: Enabled
+ Degraded Performance Optimization: Disabled
+ Wait for Cache Room: Disabled
+ Surface Analysis Inconsistency Notification: Disabled
+ Post Prompt Timeout: 0 secs
+ Cache Board Present: True
+ Cache Status: OK
+ Cache Ratio: 100% Read / 0% Write
+ Drive Write Cache: Disabled
+ Total Cache Size: 0.2
+ Total Cache Memory Available: 0.1
+ No-Battery Write Cache: Disabled
+ SATA NCQ Supported: True
+ Number of Ports: 2 (1 Internal / 1 External )
+ Encryption: Not Set
+ Driver Name: hpsa
+ Driver Version: 3.4.20
+ Driver Supports SSD Smart Path: True
+ PCI Address (Domain:Bus:Device.Function): 0000:14:00.0
+ Port Max Phy Rate Limiting Supported: False
+ Host Serial Number: REDACTED
+ Sanitize Erase Supported: False
+ Primary Boot Volume: None
+ Secondary Boot Volume: None
+
+
+ Port Name: 1I
+ Port ID: 0
+ Port Connection Number: 0
+ SAS Address: 5001438014623D00
+ Port Location: Internal
+
+ Port Name: 2E
+ Port ID: 1
+ Port Connection Number: 1
+ SAS Address: 5001438014623D04
+ Port Location: External
+
+
+ StorageWorks MSA 60 at Port 2E, Box 1, OK
+
+ Fan Status: OK
+ Temperature Status: OK
+ Power Supply Status: Redundant
+ Vendor ID: HP
+ Serial Number:
+ Firmware Version: 2.16
+ Drive Bays: 12
+ Port: 2E
+ Box: 1
+ Location: External
+
+ Expander 249
+ Device Number: 249
+ Firmware Version: 2.16
+ WWID: REDACTED
+ Port: 2E
+ Box: 1
+ Vendor ID: HP
+
+ Enclosure SEP (Vendor ID HP, Model MSA60) 248
+ Device Number: 248
+ Firmware Version: 2.16
+ WWID: REDACTED
+ Port: 2E
+ Box: 1
+ Vendor ID: HP
+ Model: MSA60
+ SEP: (1) 2.16
+ Backplane Module (BPM): (1) 2.06, (2) 2.06
+ Fan Control Module (FCM): (1) 1.08, (2) 1.08
+ Health Monitor Module (HMM): (1) 1.10, (2) 1.10
+ Seven Segment LED Display: (1) 0.10
+
+ Physical Drives
+ physicaldrive 2E:1:1 (port 2E:box 1:bay 1, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:2 (port 2E:box 1:bay 2, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:3 (port 2E:box 1:bay 3, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:4 (port 2E:box 1:bay 4, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:5 (port 2E:box 1:bay 5, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:6 (port 2E:box 1:bay 6, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:7 (port 2E:box 1:bay 7, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:8 (port 2E:box 1:bay 8, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:9 (port 2E:box 1:bay 9, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:10 (port 2E:box 1:bay 10, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:11 (port 2E:box 1:bay 11, SATA HDD, 1 TB, OK)
+ physicaldrive 2E:1:12 (port 2E:box 1:bay 12, SATA HDD, 1 TB, OK)
+
+
+ Array: A
+ Interface Type: SATA
+ Unused Space: 0 MB (0.00%)
+ Used Space: 5.46 TB (100.00%)
+ Status: OK
+ Array Type: Data
+ Smart Path: disable
+
+
+ Logical Drive: 1
+ Size: 4.55 TB
+ Fault Tolerance: 5
+ Heads: 255
+ Sectors Per Track: 32
+ Cylinders: 65535
+ Strip Size: 256 KB
+ Full Stripe Size: 1280 KB
+ Status: OK
+ Unrecoverable Media Errors: None
+ Caching: Enabled
+ Parity Initialization Status: Initialization Completed
+ Unique Identifier: 600508B1001C48AFDD414EB0F3004830
+ Disk Name: /dev/sdc
+ Mount Points: /srv/mnt2 4.5 TB Partition Number 1
+ OS Status: LOCKED
+ Logical Drive Label: A4A06DEEPACCPID11170BVJE8DB
+ Drive Type: Data
+ LD Acceleration Method: Controller Cache
+
+
+ physicaldrive 2E:1:1
+ Port: 2E
+ Box: 1
+ Bay: 1
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG2
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EBZQB
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 33
+ Maximum Temperature (C): 51
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:2
+ Port: 2E
+ Box: 1
+ Bay: 2
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG3
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EAMZE
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 34
+ Maximum Temperature (C): 39
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:3
+ Port: 2E
+ Box: 1
+ Bay: 3
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG1
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000ECWCQ
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 35
+ Maximum Temperature (C): 41
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:4
+ Port: 2E
+ Box: 1
+ Bay: 4
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG4
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EAMZE
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 35
+ Maximum Temperature (C): 44
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:5
+ Port: 2E
+ Box: 1
+ Bay: 5
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG1
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EBZQB
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 34
+ Maximum Temperature (C): 51
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:6
+ Port: 2E
+ Box: 1
+ Bay: 6
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG1
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EBZQB
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 33
+ Maximum Temperature (C): 50
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+
+ Unassigned
+
+ physicaldrive 2E:1:7
+ Port: 2E
+ Box: 1
+ Bay: 7
+ Status: OK
+ Drive Type: Unassigned Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG2
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EBZQB
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 30
+ Maximum Temperature (C): 50
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:8
+ Port: 2E
+ Box: 1
+ Bay: 8
+ Status: OK
+ Drive Type: Unassigned Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG1
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EAMZE
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 33
+ Maximum Temperature (C): 41
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:9
+ Port: 2E
+ Box: 1
+ Bay: 9
+ Status: OK
+ Drive Type: Unassigned Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG1
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EBZQB
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 30
+ Maximum Temperature (C): 50
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:10
+ Port: 2E
+ Box: 1
+ Bay: 10
+ Status: OK
+ Drive Type: Unassigned Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG2
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EBNCF
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 35
+ Maximum Temperature (C): 41
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:11
+ Port: 2E
+ Box: 1
+ Bay: 11
+ Status: OK
+ Drive Type: Unassigned Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG1
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA ST31000524NS
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 34
+ Maximum Temperature (C): 42
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2E:1:12
+ Port: 2E
+ Box: 1
+ Bay: 12
+ Status: OK
+ Drive Type: Unassigned Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 7200
+ Firmware Revision: HPG1
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA MB1000EBZQB
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 31
+ Maximum Temperature (C): 50
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+
+ Enclosure SEP (Vendor ID HP, Model MSA60) 248
+ Device Number: 248
+ Firmware Version: 2.16
+ WWID: REDACTED
+ Port: 2E
+ Box: 1
+ Vendor ID: HP
+ Model: MSA60
+ SEP: (1) 2.16
+ Backplane Module (BPM): (1) 2.06, (2) 2.06
+ Fan Control Module (FCM): (1) 1.08, (2) 1.08
+ Health Monitor Module (HMM): (1) 1.10, (2) 1.10
+ Seven Segment LED Display: (1) 0.10
+
+ Expander 249
+ Device Number: 249
+ Firmware Version: 2.16
+ WWID: REDACTED
+ Port: 2E
+ Box: 1
+ Vendor ID: HP
+
+ SEP (Vendor ID PMCSIERA, Model SRC 8x6G) 250
+ Device Number: 250
+ Firmware Version: RevC
+ WWID: REDACTED
+ Vendor ID: PMCSIERA
+ Model: SRC 8x6G
+
+
+Smart Array P410i in Slot 0 (Embedded)
+ Bus Interface: PCI
+ Slot: 0
+ Serial Number: REDACTED
+ Cache Serial Number: REDACTED
+ Controller Status: OK
+ Hardware Revision: C
+ Firmware Version: 6.40-0
+ Rebuild Priority: Medium
+ Expand Priority: Medium
+ Surface Scan Delay: 15 secs
+ Surface Scan Mode: Idle
+ Parallel Surface Scan Supported: No
+ Queue Depth: Automatic
+ Monitor and Performance Delay: 60 min
+ Elevator Sort: Enabled
+ Degraded Performance Optimization: Disabled
+ Wait for Cache Room: Disabled
+ Surface Analysis Inconsistency Notification: Disabled
+ Post Prompt Timeout: 0 secs
+ Cache Board Present: True
+ Cache Status: OK
+ Cache Ratio: 100% Read / 0% Write
+ Drive Write Cache: Disabled
+ Total Cache Size: 0.2
+ Total Cache Memory Available: 0.1
+ No-Battery Write Cache: Disabled
+ SATA NCQ Supported: True
+ Number of Ports: 2 Internal only
+ Encryption: Not Set
+ Driver Name: hpsa
+ Driver Version: 3.4.20
+ Driver Supports SSD Smart Path: True
+ PCI Address (Domain:Bus:Device.Function): 0000:04:00.0
+ Port Max Phy Rate Limiting Supported: False
+ Host Serial Number: REDACTED
+ Sanitize Erase Supported: False
+ Primary Boot Volume: logicaldrive 1 (600508B1001C8CBE468FB9524F39E535)
+ Secondary Boot Volume: None
+
+
+
+ Internal Drive Cage at Port 1I, Box 1, OK
+
+ Power Supply Status: Not Redundant
+ Drive Bays: 4
+ Port: 1I
+ Box: 1
+ Location: Internal
+
+ Physical Drives
+ physicaldrive 1I:1:1 (port 1I:box 1:bay 1, SAS HDD, 146 GB, OK)
+ physicaldrive 1I:1:2 (port 1I:box 1:bay 2, SAS HDD, 146 GB, OK)
+ physicaldrive 1I:1:3 (port 1I:box 1:bay 3, SAS HDD, 146 GB, OK)
+ physicaldrive 1I:1:4 (port 1I:box 1:bay 4, SAS HDD, 146 GB, OK)
+
+
+
+ Internal Drive Cage at Port 2I, Box 1, OK
+
+ Power Supply Status: Not Redundant
+ Drive Bays: 4
+ Port: 2I
+ Box: 1
+ Location: Internal
+
+ Physical Drives
+ physicaldrive 2I:1:5 (port 2I:box 1:bay 5, SAS HDD, 146 GB, OK)
+ physicaldrive 2I:1:6 (port 2I:box 1:bay 6, SAS HDD, 146 GB, OK)
+
+
+ Port Name: 1I
+ Port ID: 0
+ Port Connection Number: 0
+ SAS Address: 50123456789ABCDE
+ Port Location: Internal
+
+ Port Name: 2I
+ Port ID: 1
+ Port Connection Number: 1
+ SAS Address: 50123456789ABCE2
+ Port Location: Internal
+
+ Array: A
+ Interface Type: SAS
+ Unused Space: 6 MB (0.00%)
+ Used Space: 273.40 GB (100.00%)
+ Status: OK
+ Array Type: Data
+ Smart Path: disable
+
+
+ Logical Drive: 1
+ Size: 136.70 GB
+ Fault Tolerance: 1
+ Heads: 255
+ Sectors Per Track: 32
+ Cylinders: 35132
+ Strip Size: 256 KB
+ Full Stripe Size: 256 KB
+ Status: OK
+ Unrecoverable Media Errors: None
+ Caching: Enabled
+ Unique Identifier: 600508B1001C8CBE468FB9524F39E535
+ Disk Name: /dev/sda
+ Mount Points: /boot 243 MB Partition Number 1
+ OS Status: LOCKED
+ Boot Volume: Primary
+ Logical Drive Label: ADF758B150123456789ABCDEDC07
+ Mirror Group 1:
+ physicaldrive 2I:1:5 (port 2I:box 1:bay 5, SAS HDD, 146 GB, OK)
+ Mirror Group 2:
+ physicaldrive 2I:1:6 (port 2I:box 1:bay 6, SAS HDD, 146 GB, OK)
+ Drive Type: Data
+ LD Acceleration Method: Controller Cache
+
+
+ physicaldrive 2I:1:5
+ Port: 2I
+ Box: 1
+ Bay: 5
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SAS
+ Size: 146 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 15000
+ Firmware Revision: HPD5 (FW update is recommended to minimum version: HPDA)
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: HP EH0146FARWD
+ Current Temperature (C): 38
+ Maximum Temperature (C): 38
+ PHY Count: 2
+ PHY Transfer Rate: 6.0Gbps, Unknown
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2I:1:6
+ Port: 2I
+ Box: 1
+ Bay: 6
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SAS
+ Size: 146 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 15000
+ Firmware Revision: HPDF
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: HP EH0146FAWJB
+ Current Temperature (C): 36
+ Maximum Temperature (C): 43
+ PHY Count: 2
+ PHY Transfer Rate: 6.0Gbps, Unknown
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+
+
+ Array: B
+ Interface Type: SAS
+ Unused Space: 0 MB (0.00%)
+ Used Space: 546.81 GB (100.00%)
+ Status: OK
+ Array Type: Data
+ Smart Path: disable
+
+
+ Logical Drive: 2
+ Size: 273.40 GB
+ Fault Tolerance: 1+0
+ Heads: 255
+ Sectors Per Track: 32
+ Cylinders: 65535
+ Strip Size: 256 KB
+ Full Stripe Size: 512 KB
+ Status: OK
+ Unrecoverable Media Errors: None
+ Caching: Enabled
+ Unique Identifier: 600508B1001CE15640A74343E4DD1E18
+ Disk Name: /dev/sdb
+ Mount Points: /srv/mnt1 262.3 GB Partition Number 1
+ OS Status: LOCKED
+ Mirror Group 1:
+ physicaldrive 1I:1:1 (port 1I:box 1:bay 1, SAS HDD, 146 GB, OK)
+ physicaldrive 1I:1:2 (port 1I:box 1:bay 2, SAS HDD, 146 GB, OK)
+ Mirror Group 2:
+ physicaldrive 1I:1:3 (port 1I:box 1:bay 3, SAS HDD, 146 GB, OK)
+ physicaldrive 1I:1:4 (port 1I:box 1:bay 4, SAS HDD, 146 GB, OK)
+ Drive Type: Data
+ LD Acceleration Method: Controller Cache
+
+
+ physicaldrive 1I:1:1
+ Port: 1I
+ Box: 1
+ Bay: 1
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SAS
+ Size: 146 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 10000
+ Firmware Revision: HPDD
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: HP EG0146FAWHU
+ Current Temperature (C): 37
+ Maximum Temperature (C): 43
+ PHY Count: 2
+ PHY Transfer Rate: 6.0Gbps, Unknown
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 1I:1:2
+ Port: 1I
+ Box: 1
+ Bay: 2
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SAS
+ Size: 146 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 10000
+ Firmware Revision: HPDD
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: HP EG0146FAWHU
+ Current Temperature (C): 37
+ Maximum Temperature (C): 44
+ PHY Count: 2
+ PHY Transfer Rate: 6.0Gbps, Unknown
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 1I:1:3
+ Port: 1I
+ Box: 1
+ Bay: 3
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SAS
+ Size: 146 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 10000
+ Firmware Revision: HPDB
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: HP DG146BAAJB
+ Current Temperature (C): 43
+ Maximum Temperature (C): 52
+ PHY Count: 2
+ PHY Transfer Rate: 3.0Gbps, Unknown
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 1I:1:4
+ Port: 1I
+ Box: 1
+ Bay: 4
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SAS
+ Size: 146 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Rotational Speed: 10000
+ Firmware Revision: HPDB
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: HP DG146BAAJB
+ Current Temperature (C): 44
+ Maximum Temperature (C): 55
+ PHY Count: 2
+ PHY Transfer Rate: 3.0Gbps, Unknown
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+
+ SEP (Vendor ID PMCSIERA, Model SRC 8x6G) 250
+ Device Number: 250
+ Firmware Version: RevC
+ WWID: REDACTED
+ Vendor ID: PMCSIERA
+ Model: SRC 8x6G
diff --git a/src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P400ar.txt b/src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P400ar.txt
new file mode 100644
index 000000000..7abec7179
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P400ar.txt
@@ -0,0 +1,397 @@
+Smart Array P440ar in Slot 0 (Embedded)
+ Bus Interface: PCI
+ Slot: 0
+ Serial Number: REDACTED
+ Cache Serial Number: REDACTED
+ RAID 6 (ADG) Status: Enabled
+ Controller Status: OK
+ Hardware Revision: B
+ Firmware Version: 3.56-0
+ Rebuild Priority: Low
+ Expand Priority: Medium
+ Surface Scan Delay: 15 secs
+ Surface Scan Mode: Idle
+ Parallel Surface Scan Supported: Yes
+ Current Parallel Surface Scan Count: 4
+ Max Parallel Surface Scan Count: 16
+ Queue Depth: Automatic
+ Monitor and Performance Delay: 60 min
+ Elevator Sort: Enabled
+ Degraded Performance Optimization: Disabled
+ Inconsistency Repair Policy: Disabled
+ Wait for Cache Room: Disabled
+ Surface Analysis Inconsistency Notification: Disabled
+ Post Prompt Timeout: 0 secs
+ Cache Board Present: True
+ Cache Status: OK
+ Cache Ratio: 10% Read / 90% Write
+ Drive Write Cache: Enabled
+ Total Cache Size: 2.0 GB
+ Total Cache Memory Available: 1.8 GB
+ No-Battery Write Cache: Enabled
+ SSD Caching RAID5 WriteBack Enabled: True
+ SSD Caching Version: 2
+ Cache Backup Power Source: Batteries
+ Battery/Capacitor Count: 1
+ Battery/Capacitor Status: OK
+ SATA NCQ Supported: True
+ Spare Activation Mode: Activate on physical drive failure (default)
+ Controller Temperature (C): 47
+ Cache Module Temperature (C): 41
+ Number of Ports: 2 Internal only
+ Encryption: Disabled
+ Express Local Encryption: False
+ Driver Name: hpsa
+ Driver Version: 3.4.4
+ Driver Supports SSD Smart Path: True
+ PCI Address (Domain:Bus:Device.Function): 0000:03:00.0
+ Negotiated PCIe Data Rate: PCIe 3.0 x8 (7880 MB/s)
+ Controller Mode: RAID
+ Pending Controller Mode: RAID
+ Port Max Phy Rate Limiting Supported: False
+ Latency Scheduler Setting: Disabled
+ Current Power Mode: MaxPerformance
+ Survival Mode: Enabled
+ Host Serial Number: REDACTED
+ Sanitize Erase Supported: False
+ Primary Boot Volume: logicaldrive 1 (600508B1001C158B69C0104DA29E6FF7)
+ Secondary Boot Volume: logicaldrive 2 (600508B1001C6BBD22BCA12CEDF36CB0)
+
+
+ Port Name: 1I
+ Port ID: 0
+ Port Connection Number: 0
+ SAS Address: 5001438037D24990
+ Port Location: Internal
+ Managed Cable Connected: False
+
+ Port Name: 2I
+ Port ID: 1
+ Port Connection Number: 1
+ SAS Address: 5001438037D24994
+ Port Location: Internal
+ Managed Cable Connected: False
+
+
+ Internal Drive Cage at Port 1I, Box 1, OK
+
+ Power Supply Status: Not Redundant
+ Drive Bays: 4
+ Port: 1I
+ Box: 1
+ Location: Internal
+
+ Physical Drives
+ physicaldrive 1I:1:1 (port 1I:box 1:bay 1, SATA SSD, 1.9 TB, OK)
+ physicaldrive 1I:1:2 (port 1I:box 1:bay 2, SATA SSD, 1.9 TB, OK)
+ physicaldrive 1I:1:3 (port 1I:box 1:bay 3, SATA SSD, 1.9 TB, OK)
+ physicaldrive 1I:1:4 (port 1I:box 1:bay 4, SATA HDD, 1 TB, OK)
+
+
+
+ Internal Drive Cage at Port 2I, Box 1, OK
+
+ Power Supply Status: Not Redundant
+ Drive Bays: 4
+ Port: 2I
+ Box: 1
+ Location: Internal
+
+ Physical Drives
+ physicaldrive 2I:1:5 (port 2I:box 1:bay 5, SATA SSD, 1.9 TB, OK)
+ physicaldrive 2I:1:6 (port 2I:box 1:bay 6, SATA SSD, 1.9 TB, OK)
+ physicaldrive 2I:1:7 (port 2I:box 1:bay 7, SATA SSD, 1.9 TB, OK)
+ physicaldrive 2I:1:8 (port 2I:box 1:bay 8, SATA HDD, 1 TB, OK)
+
+
+ Array: A
+ Interface Type: Solid State SATA
+ Unused Space: 0 MB (0.0%)
+ Used Space: 10.5 TB (100.0%)
+ Status: OK
+ MultiDomain Status: OK
+ Array Type: Data
+ Smart Path: disable
+
+
+ Logical Drive: 1
+ Size: 5.2 TB
+ Fault Tolerance: 1+0
+ Heads: 255
+ Sectors Per Track: 32
+ Cylinders: 65535
+ Strip Size: 256 KB
+ Full Stripe Size: 768 KB
+ Status: OK
+ MultiDomain Status: OK
+ Caching: Enabled
+ Unique Identifier: 600508B1001C158B69C0104DA29E6FF7
+ Disk Name: /dev/sda
+ Mount Points: / 18.6 GB Partition Number 2, /data 5.2 TB Partition Number 4
+ OS Status: LOCKED
+ Boot Volume: primary
+ Logical Drive Label: A9255E2C50123456789ABCDE7239
+ Mirror Group 1:
+ physicaldrive 1I:1:1 (port 1I:box 1:bay 1, SATA SSD, 1.9 TB, OK)
+ physicaldrive 1I:1:2 (port 1I:box 1:bay 2, SATA SSD, 1.9 TB, OK)
+ physicaldrive 1I:1:3 (port 1I:box 1:bay 3, SATA SSD, 1.9 TB, OK)
+ Mirror Group 2:
+ physicaldrive 2I:1:5 (port 2I:box 1:bay 5, SATA SSD, 1.9 TB, OK)
+ physicaldrive 2I:1:6 (port 2I:box 1:bay 6, SATA SSD, 1.9 TB, OK)
+ physicaldrive 2I:1:7 (port 2I:box 1:bay 7, SATA SSD, 1.9 TB, OK)
+ Drive Type: Data
+ LD Acceleration Method: Controller Cache
+
+
+ physicaldrive 1I:1:1
+ Port: 1I
+ Box: 1
+ Bay: 1
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: Solid State SATA
+ Size: 1.9 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/4096
+ Firmware Revision: XCV10110
+ Serial Number:REDACTED
+ WWID: REDACTED
+ Model: ATA INTEL SSDSC2KB01
+ SATA NCQ Capable: True
+ SATA NCQ En physicaldriveabled: True
+ Current Temperature (C): 27
+ Maximum Temperature (C): 33
+ SSD Smart Trip Wearout: Not Supported
+ PHY Count: 1
+ PHY Transfer Rate: 6.0Gbps
+ Drive Authentication Status: OK
+ Carrier Application Version: 11
+ Carrier Bootloader Version: 6
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 1I:1:2
+ Port: 1I
+ Box: 1
+ Bay: 2
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: Solid State SATA
+ Size: 1.9 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/4096
+ Firmware Revision: XCV10110
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA INTEL SSDSC2KB01
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 28
+ Maximum Temperature (C): 33
+ SSD Smart Trip Wearout: Not Supported
+ PHY Count: 1
+ PHY Transfer Rate: 6.0Gbps
+ Drive Authentication Status: OK
+ Carrier Application Version: 11
+ Carrier Bootloader Version: 6
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 1I:1:3
+ Port: 1I
+ Box: 1
+ Bay: 3
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: Solid State SATA
+ Size: 1.9 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/4096
+ Firmware Revision: XCV10110
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA INTEL SSDSC2KB01
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 27
+ Maximum Temperature (C): 30
+ SSD Smart Trip Wearout: Not Supported
+ PHY Count: 1
+ PHY Transfer Rate: 6.0Gbps
+ Drive Authentication Status: OK
+ Carrier Application Version: 11
+ Carrier Bootloader Version: 6
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2I:1:5
+ Port: 2I
+ Box: 1
+ Bay: 5
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: Solid State SATA
+ Size: 1.9 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/4096
+ Firmware Revision: XCV10110
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA INTEL SSDSC2KB01
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 26
+ Maximum Temperature (C): 29
+ SSD Smart Trip Wearout: Not Supported
+ PHY Count: 1
+ PHY Transfer Rate: 6.0Gbps
+ Drive Authentication Status: OK
+ Carrier Application Version: 11
+ Carrier Bootloader Version: 6
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2I:1:6
+ Port: 2I
+ Box: 1
+ Bay: 6
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: Solid State SATA
+ Size: 1.9 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/4096
+ Firmware Revision: XCV10110
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA INTEL SSDSC2KB01
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 28
+ Maximum Temperature (C): 32
+ SSD Smart Trip Wearout: Not Supported
+ PHY Count: 1
+ PHY Transfer Rate: 6.0Gbps
+ Drive Authentication Status: OK
+ Carrier Application Version: 11
+ Carrier Bootloader Version: 6
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2I:1:7
+ Port: 2I
+ Box: 1
+ Bay: 7
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: Solid State SATA
+ Size: 1.9 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/4096
+ Firmware Revision: XCV10110
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA INTEL SSDSC2KB01
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 27
+ Maximum Temperature (C): 32
+ SSD Smart Trip Wearout: Not Supported
+ PHY Count: 1
+ PHY Transfer Rate: 6.0Gbps
+ Drive Authentication Status: OK
+ Carrier Application Version: 11
+ Carrier Bootloader Version: 6
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+
+
+ Array: B
+ Interface Type: SATA
+ Unused Space: 0 MB (0.0%)
+ Used Space: 1.8 TB (100.0%)
+ Status: OK
+ MultiDomain Status: OK
+ Array Type: Data
+ Smart Path: disable
+
+
+ Logical Drive: 2
+ Size: 931.5 GB
+ Fault Tolerance: 1
+ Heads: 255
+ Sectors Per Track: 32
+ Cylinders: 65535
+ Strip Size: 256 KB
+ Full Stripe Size: 256 KB
+ Status: OK
+ MultiDomain Status: OK
+ Caching: Enabled
+ Unique Identifier: 600508B1001C6BBD22BCA12CEDF36CB0
+ Disk Name: /dev/sdb
+ Mount Points: /data/pgsql/spaces/big 931.5 GB Partition Number 1
+ OS Status: LOCKED
+ Boot Volume: secondary
+ Logical Drive Label: A9254E3850123456789ABCDE368D
+ Mirror Group 1:
+ physicaldrive 1I:1:4 (port 1I:box 1:bay 4, SATA HDD, 1 TB, OK)
+ Mirror Group 2:
+ physicaldrive 2I:1:8 (port 2I:box 1:bay 8, SATA HDD, 1 TB, OK)
+ Drive Type: Data
+ LD Acceleration Method: Controller Cache
+
+
+ physicaldrive 1I:1:4
+ Port: 1I
+ Box: 1
+ Bay: 4
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/4096
+ Rotational Speed: 5400
+ Firmware Revision: 2BA30001
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA ST1000LM024 HN-M
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 30
+ Maximum Temperature (C): 35
+ PHY Count: 1
+ PHY Transfer Rate: 6.0Gbps
+ Drive Authentication Status: OK
+ Carrier Application Version: 11
+ Carrier Bootloader Version: 6
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 2I:1:8
+ Port: 2I
+ Box: 1
+ Bay: 8
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 1 TB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/4096
+ Rotational Speed: 5400
+ Firmware Revision: 2BA30001
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA ST1000LM024 HN-M
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 29
+ Maximum Temperature (C): 34
+ PHY Count: 1
+ PHY Transfer Rate: 6.0Gbps
+ Drive Authentication Status: OK
+ Carrier Application Version: 11
+ Carrier Bootloader Version: 6
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
diff --git a/src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P400i-unassigned.txt b/src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P400i-unassigned.txt
new file mode 100644
index 000000000..b674f26c2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/hpssa/testdata/ssacli-P400i-unassigned.txt
@@ -0,0 +1,207 @@
+Smart Array P400i in Slot 0 (Embedded)
+ Bus Interface: PCI
+ Slot: 0
+ Serial Number: REDACTED
+ Cache Serial Number: REDACTED
+ RAID 6 (ADG) Status: Enabled
+ Controller Status: OK
+ Hardware Revision: E
+ Firmware Version: 7.24-0
+ Rebuild Priority: Medium
+ Expand Priority: Medium
+ Surface Scan Delay: 15 secs
+ Surface Scan Mode: Idle
+ Parallel Surface Scan Supported: No
+ Elevator Sort: Enabled
+ Wait for Cache Room: Disabled
+ Surface Analysis Inconsistency Notification: Disabled
+ Post Prompt Timeout: 0 secs
+ Cache Board Present: True
+ Cache Status: Temporarily Disabled
+ Cache Status Details: Cache disabled; low batteries.
+ Cache Ratio: 25% Read / 75% Write
+ Drive Write Cache: Disabled
+ Total Cache Size: 256 MB
+ Total Cache Memory Available: 208 MB
+ No-Battery Write Cache: Disabled
+ Cache Backup Power Source: Batteries
+ Battery/Capacitor Count: 1
+ Battery/Capacitor Status: Failed (Replace Batteries)
+ SATA NCQ Supported: True
+ Number of Ports: 2 Internal only
+ Driver Name: cciss
+ Driver Version: 3.6.26
+ PCI Address (Domain:Bus:Device.Function): 0000:06:00.0
+ Port Max Phy Rate Limiting Supported: False
+ Host Serial Number: REDACTED
+ Sanitize Erase Supported: False
+ Primary Boot Volume: None
+ Secondary Boot Volume: None
+
+
+ Port Name: 1I
+ Port ID: 0
+ Port Connection Number: 0
+ SAS Address: 0000000000000000
+ Port Location: Internal
+
+ Port Name: 2I
+ Port ID: 1
+ Port Connection Number: 1
+ SAS Address: 0000000000000000
+ Port Location: Internal
+
+
+ Internal Drive Cage at Port 1I, Box 1, OK
+
+ Power Supply Status: Not Redundant
+ Drive Bays: 4
+ Port: 1I
+ Box: 1
+ Location: Internal
+
+ Physical Drives
+ physicaldrive 1I:1:1 (port 1I:box 1:bay 1, SATA HDD, 250 GB, OK)
+ physicaldrive 1I:1:2 (port 1I:box 1:bay 2, SATA HDD, 250 GB, OK)
+ physicaldrive 1I:1:3 (port 1I:box 1:bay 3, SATA HDD, 100 GB, OK)
+ physicaldrive 1I:1:4 (port 1I:box 1:bay 4, SATA HDD, 100 GB, OK)
+
+
+
+ Internal Drive Cage at Port 2I, Box 1, OK
+
+ Power Supply Status: Not Redundant
+ Drive Bays: 2
+ Port: 2I
+ Box: 1
+ Location: Internal
+
+ Physical Drives
+ None attached
+
+
+ Array: A
+ Interface Type: SATA
+ Unused Space: 0 MB (0.0%)
+ Used Space: 186.3 GB (100.0%)
+ Status: OK
+ Array Type: Data
+
+
+ Logical Drive: 1
+ Size: 93.1 GB
+ Fault Tolerance: 1
+ Heads: 255
+ Sectors Per Track: 32
+ Cylinders: 23934
+ Strip Size: 128 KB
+ Full Stripe Size: 128 KB
+ Status: OK
+ Caching: Enabled
+ Unique Identifier: 600508B1001038333220202020200004
+ Disk Name: /dev/cciss/c0d0
+ Mount Points: /boot 94 MB Partition Number 1, / 91.2 GB Partition Number 3
+ OS Status: LOCKED
+ Logical Drive Label: A00AD958PH89MQ7832 7E6D
+ Mirror Group 1:
+ physicaldrive 1I:1:3 (port 1I:box 1:bay 3, SATA HDD, 100 GB, OK)
+ Mirror Group 2:
+ physicaldrive 1I:1:4 (port 1I:box 1:bay 4, SATA HDD, 100 GB, OK)
+ Drive Type: Data
+ LD Acceleration Method: Controller Cache
+
+
+ physicaldrive 1I:1:3
+ Port: 1I
+ Box: 1
+ Bay: 3
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 100 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Firmware Revision: 6PB10362
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA INTEL SSDSA2BZ10
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 23
+ Maximum Temperature (C): 32
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 1I:1:4
+ Port: 1I
+ Box: 1
+ Bay: 4
+ Status: OK
+ Drive Type: Data Drive
+ Interface Type: SATA
+ Size: 100 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Firmware Revision: 6PB10362
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA INTEL SSDSA2BZ10
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 23
+ Maximum Temperature (C): 33
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+
+ Unassigned
+
+ physicaldrive 1I:1:1
+ Port: 1I
+ Box: 1
+ Bay: 1
+ Status: OK
+ Drive Type: Unassigned Drive
+ Interface Type: SATA
+ Size: 250 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Firmware Revision: 0001EXM1
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA ST250LT021-1AF14
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 28
+ Maximum Temperature (C): 36
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
+
+ physicaldrive 1I:1:2
+ Port: 1I
+ Box: 1
+ Bay: 2
+ Status: OK
+ Drive Type: Unassigned Drive
+ Interface Type: SATA
+ Size: 250 GB
+ Drive exposed to OS: False
+ Logical/Physical Block Size: 512/512
+ Firmware Revision: 0001EXM1
+ Serial Number: REDACTED
+ WWID: REDACTED
+ Model: ATA ST250LT021-1AF14
+ SATA NCQ Capable: True
+ SATA NCQ Enabled: True
+ Current Temperature (C): 28
+ Maximum Temperature (C): 36
+ PHY Count: 1
+ PHY Transfer Rate: 1.5Gbps
+ Sanitize Erase Supported: False
+ Shingled Magnetic Recording Support: None
diff --git a/src/go/plugin/go.d/modules/httpcheck/README.md b/src/go/plugin/go.d/modules/httpcheck/README.md
new file mode 120000
index 000000000..69f056137
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/README.md
@@ -0,0 +1 @@
+integrations/http_endpoints.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/httpcheck/charts.go b/src/go/plugin/go.d/modules/httpcheck/charts.go
new file mode 100644
index 000000000..376ed99a4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/charts.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package httpcheck
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioResponseTime = module.Priority + iota
+ prioResponseLength
+ prioResponseStatus
+ prioResponseInStatusDuration
+)
+
+var httpCheckCharts = module.Charts{
+ responseTimeChart.Copy(),
+ responseLengthChart.Copy(),
+ responseStatusChart.Copy(),
+ responseInStatusDurationChart.Copy(),
+}
+
+var responseTimeChart = module.Chart{
+ ID: "response_time",
+ Title: "HTTP Response Time",
+ Units: "ms",
+ Fam: "response",
+ Ctx: "httpcheck.response_time",
+ Priority: prioResponseTime,
+ Dims: module.Dims{
+ {ID: "time"},
+ },
+}
+
+var responseLengthChart = module.Chart{
+ ID: "response_length",
+ Title: "HTTP Response Body Length",
+ Units: "characters",
+ Fam: "response",
+ Ctx: "httpcheck.response_length",
+ Priority: prioResponseLength,
+ Dims: module.Dims{
+ {ID: "length"},
+ },
+}
+
+var responseStatusChart = module.Chart{
+ ID: "request_status",
+ Title: "HTTP Check Status",
+ Units: "boolean",
+ Fam: "status",
+ Ctx: "httpcheck.status",
+ Priority: prioResponseStatus,
+ Dims: module.Dims{
+ {ID: "success"},
+ {ID: "no_connection"},
+ {ID: "timeout"},
+ {ID: "redirect"},
+ {ID: "bad_content"},
+ {ID: "bad_status"},
+ {ID: "bad_header"},
+ },
+}
+
+var responseInStatusDurationChart = module.Chart{
+ ID: "current_state_duration",
+ Title: "HTTP Current State Duration",
+ Units: "seconds",
+ Fam: "status",
+ Ctx: "httpcheck.in_state",
+ Priority: prioResponseInStatusDuration,
+ Dims: module.Dims{
+ {ID: "in_state", Name: "time"},
+ },
+}
diff --git a/src/go/plugin/go.d/modules/httpcheck/collect.go b/src/go/plugin/go.d/modules/httpcheck/collect.go
new file mode 100644
index 000000000..fa0c96bc3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/collect.go
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package httpcheck
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type reqErrCode int
+
+const (
+ codeTimeout reqErrCode = iota
+ codeRedirect
+ codeNoConnection
+)
+
+func (hc *HTTPCheck) collect() (map[string]int64, error) {
+ req, err := web.NewHTTPRequest(hc.Request)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating HTTP requests to %s : %v", hc.Request.URL, err)
+ }
+
+ if hc.CookieFile != "" {
+ if err := hc.readCookieFile(); err != nil {
+ return nil, fmt.Errorf("error on reading cookie file '%s': %v", hc.CookieFile, err)
+ }
+ }
+
+ start := time.Now()
+ resp, err := hc.httpClient.Do(req)
+ dur := time.Since(start)
+
+ defer closeBody(resp)
+
+ var mx metrics
+
+ if hc.isError(err, resp) {
+ hc.Debug(err)
+ hc.collectErrResponse(&mx, err)
+ } else {
+ mx.ResponseTime = durationToMs(dur)
+ hc.collectOKResponse(&mx, resp)
+ }
+
+ if hc.metrics.Status != mx.Status {
+ mx.InState = hc.UpdateEvery
+ } else {
+ mx.InState = hc.metrics.InState + hc.UpdateEvery
+ }
+ hc.metrics = mx
+
+ return stm.ToMap(mx), nil
+}
+
+func (hc *HTTPCheck) isError(err error, resp *http.Response) bool {
+ return err != nil && !(errors.Is(err, web.ErrRedirectAttempted) && hc.acceptedStatuses[resp.StatusCode])
+}
+
+func (hc *HTTPCheck) collectErrResponse(mx *metrics, err error) {
+ switch code := decodeReqError(err); code {
+ case codeNoConnection:
+ mx.Status.NoConnection = true
+ case codeTimeout:
+ mx.Status.Timeout = true
+ case codeRedirect:
+ mx.Status.Redirect = true
+ default:
+ panic(fmt.Sprintf("unknown request error code : %d", code))
+ }
+}
+
+func (hc *HTTPCheck) collectOKResponse(mx *metrics, resp *http.Response) {
+ hc.Debugf("endpoint '%s' returned %d (%s) HTTP status code", hc.URL, resp.StatusCode, resp.Status)
+
+ if !hc.acceptedStatuses[resp.StatusCode] {
+ mx.Status.BadStatusCode = true
+ return
+ }
+
+ bs, err := io.ReadAll(resp.Body)
+ // golang net/http closes body on redirect
+ if err != nil && !errors.Is(err, io.EOF) && !strings.Contains(err.Error(), "read on closed response body") {
+ hc.Warningf("error on reading body : %v", err)
+ mx.Status.BadContent = true
+ return
+ }
+
+ mx.ResponseLength = len(bs)
+
+ if hc.reResponse != nil && !hc.reResponse.Match(bs) {
+ mx.Status.BadContent = true
+ return
+ }
+
+ if ok := hc.checkHeader(resp); !ok {
+ mx.Status.BadHeader = true
+ return
+ }
+
+ mx.Status.Success = true
+}
+
+func (hc *HTTPCheck) checkHeader(resp *http.Response) bool {
+ for _, m := range hc.headerMatch {
+ value := resp.Header.Get(m.key)
+
+ var ok bool
+ switch {
+ case value == "":
+ ok = m.exclude
+ case m.valMatcher == nil:
+ ok = !m.exclude
+ default:
+ ok = m.valMatcher.MatchString(value)
+ }
+
+ if !ok {
+ hc.Debugf("header match: bad header: exlude '%v' key '%s' value '%s'", m.exclude, m.key, value)
+ return false
+ }
+ }
+
+ return true
+}
+
+func decodeReqError(err error) reqErrCode {
+ if err == nil {
+ panic("nil error")
+ }
+
+ if errors.Is(err, web.ErrRedirectAttempted) {
+ return codeRedirect
+ }
+ var v net.Error
+ if errors.As(err, &v) && v.Timeout() {
+ return codeTimeout
+ }
+ return codeNoConnection
+}
+
+func (hc *HTTPCheck) readCookieFile() error {
+ if hc.CookieFile == "" {
+ return nil
+ }
+
+ fi, err := os.Stat(hc.CookieFile)
+ if err != nil {
+ return err
+ }
+
+ if hc.cookieFileModTime.Equal(fi.ModTime()) {
+ hc.Debugf("cookie file '%s' modification time has not changed, using previously read data", hc.CookieFile)
+ return nil
+ }
+
+ hc.Debugf("reading cookie file '%s'", hc.CookieFile)
+
+ jar, err := loadCookieJar(hc.CookieFile)
+ if err != nil {
+ return err
+ }
+
+ hc.httpClient.Jar = jar
+ hc.cookieFileModTime = fi.ModTime()
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp == nil || resp.Body == nil {
+ return
+ }
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+}
+
+func durationToMs(duration time.Duration) int {
+ return int(duration) / (int(time.Millisecond) / int(time.Nanosecond))
+}
diff --git a/src/go/plugin/go.d/modules/httpcheck/config_schema.json b/src/go/plugin/go.d/modules/httpcheck/config_schema.json
new file mode 100644
index 000000000..82ffc7cb5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/config_schema.json
@@ -0,0 +1,270 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "HTTPCheck collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the HTTP endpoint.",
+ "type": "string",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "method": {
+ "title": "Method",
+ "description": "The [HTTP method](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods) to use for the request. An empty string means `GET`.",
+ "type": "string",
+ "default": "GET",
+ "examples": [
+ "GET",
+ "POST",
+ "PUT",
+ "PATCH"
+ ]
+ },
+ "body": {
+ "title": "Body",
+ "description": "The body content to send along with the HTTP request (if applicable).",
+ "type": "string"
+ },
+ "status_accepted": {
+ "title": "Status code check",
+ "description": "Specifies the list of **HTTP response status codes** that are considered **acceptable**. Responses with status codes not included in this list will be categorized as 'bad status' in the status chart.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Code",
+ "type": "integer",
+ "minimum": 100,
+ "default": 200
+ },
+ "minItems": 1,
+ "uniqueItems": true,
+ "default": [
+ 200
+ ]
+ },
+ "response_match": {
+ "title": "Content check",
+ "description": "Specifies a [regular expression](https://regex101.com/) pattern to match against the content (body) of the HTTP response. This check is performed only if the response's status code is accepted.",
+ "type": "string"
+ },
+ "header_match": {
+ "title": "Header check",
+ "description": "Specifies a set of rules to check for specific key-value pairs in the HTTP headers of the response.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "exclude": {
+ "title": "Exclude",
+ "description": "Determines whether the rule checks for the presence or absence of the specified key-value pair in the HTTP headers.",
+ "type": "boolean"
+ },
+ "key": {
+ "title": "Header key",
+ "description": "Specifies the exact name of the HTTP header to check for.",
+ "type": "string"
+ },
+ "value": {
+ "title": "Header value pattern",
+ "description": "Specifies the [matcher pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme) to match against the value of the specified header.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "key",
+ "value"
+ ]
+ }
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "cookie_file": {
+ "title": "Cookie file",
+ "description": "Specifies the path to the file containing cookies. For more information about the cookie file format, see [cookie file format](https://everything.curl.dev/http/cookies/fileformat).",
+ "type": "string"
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ }
+ },
+ "required": [
+ "url",
+ "status_accepted"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "method",
+ "body"
+ ]
+ },
+ {
+ "title": "Checks",
+ "fields": [
+ "status_accepted",
+ "response_match",
+ "header_match"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password",
+ "cookie_file"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "url": {
+ "ui:placeholder": "http://127.0.0.1"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "method": {
+ "ui:placeholder": "GET"
+ },
+ "body": {
+ "ui:widget": "textarea"
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/httpcheck/cookiejar.go b/src/go/plugin/go.d/modules/httpcheck/cookiejar.go
new file mode 100644
index 000000000..628867caa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/cookiejar.go
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package httpcheck
+
+import (
+ "bufio"
+ "fmt"
+ "net/http"
+ "net/http/cookiejar"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "golang.org/x/net/publicsuffix"
+)
+
+// TODO: implement proper cookie auth support
+// relevant forum topic: https://community.netdata.cloud/t/howto-http-endpoint-collector-with-cookie-and-user-pass/3981/5?u=ilyam8
+
+// cookie file format: https://everything.curl.dev/http/cookies/fileformat
+func loadCookieJar(path string) (http.CookieJar, error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = file.Close() }()
+
+ jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
+ if err != nil {
+ return nil, err
+ }
+
+ sc := bufio.NewScanner(file)
+
+ for sc.Scan() {
+ line, httpOnly := strings.CutPrefix(strings.TrimSpace(sc.Text()), "#HttpOnly_")
+
+ if strings.HasPrefix(line, "#") || line == "" {
+ continue
+ }
+
+ parts := strings.Fields(line)
+ if len(parts) != 6 && len(parts) != 7 {
+ return nil, fmt.Errorf("got %d fields in line '%s', want 6 or 7", len(parts), line)
+ }
+
+ for i, v := range parts {
+ parts[i] = strings.TrimSpace(v)
+ }
+
+ cookie := &http.Cookie{
+ Domain: parts[0],
+ Path: parts[2],
+ Name: parts[5],
+ HttpOnly: httpOnly,
+ }
+ cookie.Secure, err = strconv.ParseBool(parts[3])
+ if err != nil {
+ return nil, err
+ }
+ expires, err := strconv.ParseInt(parts[4], 10, 64)
+ if err != nil {
+ return nil, err
+ }
+ if expires > 0 {
+ cookie.Expires = time.Unix(expires, 0)
+ }
+ if len(parts) == 7 {
+ cookie.Value = parts[6]
+ }
+
+ scheme := "http"
+ if cookie.Secure {
+ scheme = "https"
+ }
+ cookieURL := &url.URL{
+ Scheme: scheme,
+ Host: cookie.Domain,
+ }
+
+ cookies := jar.Cookies(cookieURL)
+ cookies = append(cookies, cookie)
+ jar.SetCookies(cookieURL, cookies)
+ }
+
+ return jar, nil
+}
diff --git a/src/go/plugin/go.d/modules/httpcheck/httpcheck.go b/src/go/plugin/go.d/modules/httpcheck/httpcheck.go
new file mode 100644
index 000000000..1c7b6b1c0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/httpcheck.go
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package httpcheck
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "regexp"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("httpcheck", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *HTTPCheck {
+ return &HTTPCheck{
+ Config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ AcceptedStatuses: []int{200},
+ },
+
+ acceptedStatuses: make(map[int]bool),
+ }
+}
+
+type (
+ Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ AcceptedStatuses []int `yaml:"status_accepted" json:"status_accepted"`
+ ResponseMatch string `yaml:"response_match,omitempty" json:"response_match"`
+ CookieFile string `yaml:"cookie_file,omitempty" json:"cookie_file"`
+ HeaderMatch []headerMatchConfig `yaml:"header_match,omitempty" json:"header_match"`
+ }
+ headerMatchConfig struct {
+ Exclude bool `yaml:"exclude" json:"exclude"`
+ Key string `yaml:"key" json:"key"`
+ Value string `yaml:"value" json:"value"`
+ }
+)
+
+type HTTPCheck struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ acceptedStatuses map[int]bool
+ reResponse *regexp.Regexp
+ headerMatch []headerMatch
+ cookieFileModTime time.Time
+
+ metrics metrics
+}
+
+func (hc *HTTPCheck) Configuration() any {
+ return hc.Config
+}
+
+func (hc *HTTPCheck) Init() error {
+ if err := hc.validateConfig(); err != nil {
+ hc.Errorf("config validation: %v", err)
+ return err
+ }
+
+ hc.charts = hc.initCharts()
+
+ httpClient, err := hc.initHTTPClient()
+ if err != nil {
+ hc.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ hc.httpClient = httpClient
+
+ re, err := hc.initResponseMatchRegexp()
+ if err != nil {
+ hc.Errorf("init response match regexp: %v", err)
+ return err
+ }
+ hc.reResponse = re
+
+ hm, err := hc.initHeaderMatch()
+ if err != nil {
+ hc.Errorf("init header match: %v", err)
+ return err
+ }
+ hc.headerMatch = hm
+
+ for _, v := range hc.AcceptedStatuses {
+ hc.acceptedStatuses[v] = true
+ }
+
+ hc.Debugf("using URL %s", hc.URL)
+ hc.Debugf("using HTTP timeout %s", hc.Timeout.Duration())
+ hc.Debugf("using accepted HTTP statuses %v", hc.AcceptedStatuses)
+ if hc.reResponse != nil {
+ hc.Debugf("using response match regexp %s", hc.reResponse)
+ }
+
+ return nil
+}
+
+func (hc *HTTPCheck) Check() error {
+ mx, err := hc.collect()
+ if err != nil {
+ hc.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (hc *HTTPCheck) Charts() *module.Charts {
+ return hc.charts
+}
+
+func (hc *HTTPCheck) Collect() map[string]int64 {
+ mx, err := hc.collect()
+ if err != nil {
+ hc.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (hc *HTTPCheck) Cleanup() {
+ if hc.httpClient != nil {
+ hc.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/httpcheck/httpcheck_test.go b/src/go/plugin/go.d/modules/httpcheck/httpcheck_test.go
new file mode 100644
index 000000000..9ae0cf4ed
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/httpcheck_test.go
@@ -0,0 +1,604 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package httpcheck
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestHTTPCheck_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &HTTPCheck{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestHTTPCheck_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success if url set": {
+ wantFail: false,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: "http://127.0.0.1:38001"},
+ },
+ },
+ },
+ "fail with default": {
+ wantFail: true,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ "fail if wrong response regex": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: "http://127.0.0.1:38001"},
+ },
+ ResponseMatch: "(?:qwe))",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ httpCheck := New()
+ httpCheck.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, httpCheck.Init())
+ } else {
+ assert.NoError(t, httpCheck.Init())
+ }
+ })
+ }
+}
+
+func TestHTTPCheck_Charts(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) *HTTPCheck
+ wantCharts bool
+ }{
+ "no charts if not inited": {
+ wantCharts: false,
+ prepare: func(t *testing.T) *HTTPCheck {
+ return New()
+ },
+ },
+ "charts if inited": {
+ wantCharts: true,
+ prepare: func(t *testing.T) *HTTPCheck {
+ httpCheck := New()
+ httpCheck.URL = "http://127.0.0.1:38001"
+ require.NoError(t, httpCheck.Init())
+
+ return httpCheck
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ httpCheck := test.prepare(t)
+
+ if test.wantCharts {
+ assert.NotNil(t, httpCheck.Charts())
+ } else {
+ assert.Nil(t, httpCheck.Charts())
+ }
+ })
+ }
+}
+
+func TestHTTPCheck_Cleanup(t *testing.T) {
+ httpCheck := New()
+ assert.NotPanics(t, httpCheck.Cleanup)
+
+ httpCheck.URL = "http://127.0.0.1:38001"
+ require.NoError(t, httpCheck.Init())
+ assert.NotPanics(t, httpCheck.Cleanup)
+}
+
+func TestHTTPCheck_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (httpCheck *HTTPCheck, cleanup func())
+ wantFail bool
+ }{
+ "success case": {wantFail: false, prepare: prepareSuccessCase},
+ "timeout case": {wantFail: false, prepare: prepareTimeoutCase},
+ "redirect success": {wantFail: false, prepare: prepareRedirectSuccessCase},
+ "redirect fail": {wantFail: false, prepare: prepareRedirectFailCase},
+ "bad status case": {wantFail: false, prepare: prepareBadStatusCase},
+ "bad content case": {wantFail: false, prepare: prepareBadContentCase},
+ "no connection case": {wantFail: false, prepare: prepareNoConnectionCase},
+ "cookie auth case": {wantFail: false, prepare: prepareCookieAuthCase},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ httpCheck, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, httpCheck.Init())
+
+ if test.wantFail {
+ assert.Error(t, httpCheck.Check())
+ } else {
+ assert.NoError(t, httpCheck.Check())
+ }
+ })
+ }
+
+}
+
+func TestHTTPCheck_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (httpCheck *HTTPCheck, cleanup func())
+ update func(check *HTTPCheck)
+ wantMetrics map[string]int64
+ }{
+ "success case": {
+ prepare: prepareSuccessCase,
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 5,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 1,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "timeout case": {
+ prepare: prepareTimeoutCase,
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 0,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 0,
+ "time": 0,
+ "timeout": 1,
+ },
+ },
+ "redirect success case": {
+ prepare: prepareRedirectSuccessCase,
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 0,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 1,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "redirect fail case": {
+ prepare: prepareRedirectFailCase,
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 0,
+ "no_connection": 0,
+ "redirect": 1,
+ "success": 0,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "bad status case": {
+ prepare: prepareBadStatusCase,
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 1,
+ "in_state": 2,
+ "length": 0,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 0,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "bad content case": {
+ prepare: prepareBadContentCase,
+ wantMetrics: map[string]int64{
+ "bad_content": 1,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 17,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 0,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "no connection case": {
+ prepare: prepareNoConnectionCase,
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 0,
+ "no_connection": 1,
+ "redirect": 0,
+ "success": 0,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "header match include no value success case": {
+ prepare: prepareSuccessCase,
+ update: func(httpCheck *HTTPCheck) {
+ httpCheck.HeaderMatch = []headerMatchConfig{
+ {Key: "header-key2"},
+ }
+ },
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 5,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 1,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "header match include with value success case": {
+ prepare: prepareSuccessCase,
+ update: func(httpCheck *HTTPCheck) {
+ httpCheck.HeaderMatch = []headerMatchConfig{
+ {Key: "header-key2", Value: "= header-value"},
+ }
+ },
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 5,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 1,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "header match include no value bad headers case": {
+ prepare: prepareSuccessCase,
+ update: func(httpCheck *HTTPCheck) {
+ httpCheck.HeaderMatch = []headerMatchConfig{
+ {Key: "header-key99"},
+ }
+ },
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 1,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 5,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 0,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "header match include with value bad headers case": {
+ prepare: prepareSuccessCase,
+ update: func(httpCheck *HTTPCheck) {
+ httpCheck.HeaderMatch = []headerMatchConfig{
+ {Key: "header-key2", Value: "= header-value99"},
+ }
+ },
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 1,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 5,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 0,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "header match exclude no value success case": {
+ prepare: prepareSuccessCase,
+ update: func(httpCheck *HTTPCheck) {
+ httpCheck.HeaderMatch = []headerMatchConfig{
+ {Exclude: true, Key: "header-key99"},
+ }
+ },
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 5,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 1,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "header match exclude with value success case": {
+ prepare: prepareSuccessCase,
+ update: func(httpCheck *HTTPCheck) {
+ httpCheck.HeaderMatch = []headerMatchConfig{
+ {Exclude: true, Key: "header-key2", Value: "= header-value99"},
+ }
+ },
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 5,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 1,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "header match exclude no value bad headers case": {
+ prepare: prepareSuccessCase,
+ update: func(httpCheck *HTTPCheck) {
+ httpCheck.HeaderMatch = []headerMatchConfig{
+ {Exclude: true, Key: "header-key2"},
+ }
+ },
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 1,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 5,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 0,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "header match exclude with value bad headers case": {
+ prepare: prepareSuccessCase,
+ update: func(httpCheck *HTTPCheck) {
+ httpCheck.HeaderMatch = []headerMatchConfig{
+ {Exclude: true, Key: "header-key2", Value: "= header-value"},
+ }
+ },
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 1,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 5,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 0,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ "cookie auth case": {
+ prepare: prepareCookieAuthCase,
+ wantMetrics: map[string]int64{
+ "bad_content": 0,
+ "bad_header": 0,
+ "bad_status": 0,
+ "in_state": 2,
+ "length": 0,
+ "no_connection": 0,
+ "redirect": 0,
+ "success": 1,
+ "time": 0,
+ "timeout": 0,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ httpCheck, cleanup := test.prepare()
+ defer cleanup()
+
+ if test.update != nil {
+ test.update(httpCheck)
+ }
+
+ require.NoError(t, httpCheck.Init())
+
+ var mx map[string]int64
+
+ for i := 0; i < 2; i++ {
+ mx = httpCheck.Collect()
+ time.Sleep(time.Duration(httpCheck.UpdateEvery) * time.Second)
+ }
+
+ copyResponseTime(test.wantMetrics, mx)
+
+ require.Equal(t, test.wantMetrics, mx)
+ })
+ }
+}
+
+func prepareSuccessCase() (*HTTPCheck, func()) {
+ httpCheck := New()
+ httpCheck.UpdateEvery = 1
+ httpCheck.ResponseMatch = "match"
+
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("header-key1", "header-value")
+ w.Header().Set("header-key2", "header-value")
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte("match"))
+ }))
+
+ httpCheck.URL = srv.URL
+
+ return httpCheck, srv.Close
+}
+
+func prepareTimeoutCase() (*HTTPCheck, func()) {
+ httpCheck := New()
+ httpCheck.UpdateEvery = 1
+ httpCheck.Timeout = web.Duration(time.Millisecond * 100)
+
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ time.Sleep(httpCheck.Timeout.Duration() + time.Millisecond*100)
+ }))
+
+ httpCheck.URL = srv.URL
+
+ return httpCheck, srv.Close
+}
+
+func prepareRedirectSuccessCase() (*HTTPCheck, func()) {
+ httpCheck := New()
+ httpCheck.UpdateEvery = 1
+ httpCheck.NotFollowRedirect = true
+ httpCheck.AcceptedStatuses = []int{301}
+
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, "https://example.com", http.StatusMovedPermanently)
+ }))
+
+ httpCheck.URL = srv.URL
+
+ return httpCheck, srv.Close
+}
+
+func prepareRedirectFailCase() (*HTTPCheck, func()) {
+ httpCheck := New()
+ httpCheck.UpdateEvery = 1
+ httpCheck.NotFollowRedirect = true
+
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ http.Redirect(w, r, "https://example.com", http.StatusMovedPermanently)
+ }))
+
+ httpCheck.URL = srv.URL
+
+ return httpCheck, srv.Close
+}
+
+func prepareBadStatusCase() (*HTTPCheck, func()) {
+ httpCheck := New()
+ httpCheck.UpdateEvery = 1
+
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusBadGateway)
+ }))
+
+ httpCheck.URL = srv.URL
+
+ return httpCheck, srv.Close
+}
+
+func prepareBadContentCase() (*HTTPCheck, func()) {
+ httpCheck := New()
+ httpCheck.UpdateEvery = 1
+ httpCheck.ResponseMatch = "no match"
+
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusOK)
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+
+ httpCheck.URL = srv.URL
+
+ return httpCheck, srv.Close
+}
+
+func prepareNoConnectionCase() (*HTTPCheck, func()) {
+ httpCheck := New()
+ httpCheck.UpdateEvery = 1
+ httpCheck.URL = "http://127.0.0.1:38001"
+
+ return httpCheck, func() {}
+}
+
+func prepareCookieAuthCase() (*HTTPCheck, func()) {
+ httpCheck := New()
+ httpCheck.UpdateEvery = 1
+ httpCheck.CookieFile = "testdata/cookie.txt"
+
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ if _, err := r.Cookie("JSESSIONID"); err != nil {
+ w.WriteHeader(http.StatusUnauthorized)
+ } else {
+ w.WriteHeader(http.StatusOK)
+ }
+ }))
+
+ httpCheck.URL = srv.URL
+
+ return httpCheck, srv.Close
+}
+
+func copyResponseTime(dst, src map[string]int64) {
+ if v, ok := src["time"]; ok {
+ if _, ok := dst["time"]; ok {
+ dst["time"] = v
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/httpcheck/init.go b/src/go/plugin/go.d/modules/httpcheck/init.go
new file mode 100644
index 000000000..a4a3ae27d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/init.go
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package httpcheck
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "regexp"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type headerMatch struct {
+ exclude bool
+ key string
+ valMatcher matcher.Matcher
+}
+
+func (hc *HTTPCheck) validateConfig() error {
+ if hc.URL == "" {
+ return errors.New("'url' not set")
+ }
+ return nil
+}
+
+func (hc *HTTPCheck) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(hc.Client)
+}
+
+func (hc *HTTPCheck) initResponseMatchRegexp() (*regexp.Regexp, error) {
+ if hc.ResponseMatch == "" {
+ return nil, nil
+ }
+ return regexp.Compile(hc.ResponseMatch)
+}
+
+func (hc *HTTPCheck) initHeaderMatch() ([]headerMatch, error) {
+ if len(hc.HeaderMatch) == 0 {
+ return nil, nil
+ }
+
+ var hms []headerMatch
+
+ for _, v := range hc.HeaderMatch {
+ if v.Key == "" {
+ continue
+ }
+
+ hm := headerMatch{
+ exclude: v.Exclude,
+ key: v.Key,
+ valMatcher: nil,
+ }
+
+ if v.Value != "" {
+ m, err := matcher.Parse(v.Value)
+ if err != nil {
+ return nil, fmt.Errorf("parse key '%s value '%s': %v", v.Key, v.Value, err)
+ }
+ if v.Exclude {
+ m = matcher.Not(m)
+ }
+ hm.valMatcher = m
+ }
+
+ hms = append(hms, hm)
+ }
+
+ return hms, nil
+}
+
+func (hc *HTTPCheck) initCharts() *module.Charts {
+ charts := httpCheckCharts.Copy()
+
+ for _, chart := range *charts {
+ chart.Labels = []module.Label{
+ {Key: "url", Value: hc.URL},
+ }
+ }
+
+ return charts
+}
diff --git a/src/go/plugin/go.d/modules/httpcheck/integrations/http_endpoints.md b/src/go/plugin/go.d/modules/httpcheck/integrations/http_endpoints.md
new file mode 100644
index 000000000..b94735dee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/integrations/http_endpoints.md
@@ -0,0 +1,364 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/httpcheck/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/httpcheck/metadata.yaml"
+sidebar_label: "HTTP Endpoints"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HTTP Endpoints
+
+
+<img src="https://netdata.cloud/img/globe.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: httpcheck
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors HTTP servers availability status and response time.
+
+Possible statuses:
+
+| Status | Description |
+|---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| success | HTTP request completed successfully with a status code matching the configured `status_accepted` range (default: 200), and the response body and headers (if configured) match expectations. |
+| timeout | HTTP request timed out before receiving a response (default: 1 second). |
+| no_connection | Failed to establish a connection to the target. |
+| redirect | Received a redirect response (3xx status code) while `not_follow_redirects` is configured. |
+| bad_status | HTTP request completed with a status code outside the configured `status_accepted` range (default: non-200). |
+| bad_content | HTTP request completed successfully but the response body does not match the expected content (when using `response_match`). |
+| bad_header | HTTP request completed successfully but response headers do not match the expected values (when using `headers_match`). |
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per target
+
+The metrics refer to the monitored target.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| url | url value that is set in the configuration file. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| httpcheck.response_time | time | ms |
+| httpcheck.response_length | length | characters |
+| httpcheck.status | success, timeout, redirect, no_connection, bad_content, bad_header, bad_status | boolean |
+| httpcheck.in_state | time | boolean |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/httpcheck.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/httpcheck.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| status_accepted | HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart. | [200] | no |
+| response_match | If the status code is accepted, the content of the response will be matched against this regular expression. | | no |
+| headers_match | This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response. | [] | no |
+| headers_match.exclude | This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it. | no | no |
+| headers_match.key | The exact name of the HTTP header to check for. | | yes |
+| headers_match.value | The [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) to match against the value of the specified header. | | no |
+| cookie_file | Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat). | | no |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+
+```
+</details>
+
+##### With HTTP request headers
+
+Configuration with HTTP request headers that will be sent by the client.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ headers:
+ Host: localhost:8080
+ User-Agent: netdata/go.d.plugin
+ Accept: */*
+
+```
+</details>
+
+##### With `status_accepted`
+
+A basic example configuration with non-default status_accepted.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ status_accepted:
+ - 200
+ - 204
+
+```
+</details>
+
+##### With `header_match`
+
+Example configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) syntax.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ # The "X-Robots-Tag" header must be present in the HTTP response header,
+ # but the value of the header does not matter.
+ # This config checks for the presence of the header regardless of its value.
+ - name: local
+ url: http://127.0.0.1:8080
+ header_match:
+ - key: X-Robots-Tag
+
+ # The "X-Robots-Tag" header must be present in the HTTP response header
+ # only if its value is equal to "noindex, nofollow".
+ # This config checks both the presence of the header and its value.
+ - name: local
+ url: http://127.0.0.1:8080
+ header_match:
+ - key: X-Robots-Tag
+ value: '= noindex,nofollow'
+
+ # The "X-Robots-Tag" header must not be present in the HTTP response header
+ # but the value of the header does not matter.
+ # This config checks for the presence of the header regardless of its value.
+ - name: local
+ url: http://127.0.0.1:8080
+ header_match:
+ - key: X-Robots-Tag
+ exclude: yes
+
+ # The "X-Robots-Tag" header must not be present in the HTTP response header
+ # only if its value is equal to "noindex, nofollow".
+ # This config checks both the presence of the header and its value.
+ - name: local
+ url: http://127.0.0.1:8080
+ header_match:
+ - key: X-Robots-Tag
+ exclude: yes
+ value: '= noindex,nofollow'
+
+```
+</details>
+
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:8080
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+
+ - name: remote
+ url: http://192.0.2.1:8080
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `httpcheck` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m httpcheck
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `httpcheck` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep httpcheck
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep httpcheck /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep httpcheck
+```
+
+
diff --git a/src/go/plugin/go.d/modules/httpcheck/metadata.yaml b/src/go/plugin/go.d/modules/httpcheck/metadata.yaml
new file mode 100644
index 000000000..f34993b5e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/metadata.yaml
@@ -0,0 +1,303 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-httpcheck
+ plugin_name: go.d.plugin
+ module_name: httpcheck
+ monitored_instance:
+ name: HTTP Endpoints
+ link: ""
+ icon_filename: globe.svg
+ categories:
+ - data-collection.synthetic-checks
+ keywords:
+ - webserver
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors HTTP servers availability status and response time.
+
+ Possible statuses:
+
+ | Status | Description |
+ |---------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+ | success | HTTP request completed successfully with a status code matching the configured `status_accepted` range (default: 200), and the response body and headers (if configured) match expectations. |
+ | timeout | HTTP request timed out before receiving a response (default: 1 second). |
+ | no_connection | Failed to establish a connection to the target. |
+ | redirect | Received a redirect response (3xx status code) while `not_follow_redirects` is configured. |
+ | bad_status | HTTP request completed with a status code outside the configured `status_accepted` range (default: non-200). |
+ | bad_content | HTTP request completed successfully but the response body does not match the expected content (when using `response_match`). |
+ | bad_header | HTTP request completed successfully but response headers do not match the expected values (when using `headers_match`). |
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/httpcheck.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: ""
+ required: true
+ - name: status_accepted
+ description: "HTTP accepted response statuses. Anything else will result in 'bad status' in the status chart."
+ default_value: "[200]"
+ required: false
+ - name: response_match
+ description: If the status code is accepted, the content of the response will be matched against this regular expression.
+ default_value: ""
+ required: false
+ - name: headers_match
+ description: "This option defines a set of rules that check for specific key-value pairs in the HTTP headers of the response."
+ default_value: "[]"
+ required: false
+ - name: headers_match.exclude
+ description: "This option determines whether the rule should check for the presence of the specified key-value pair or the absence of it."
+ default_value: false
+ required: false
+ - name: headers_match.key
+ description: "The exact name of the HTTP header to check for."
+ default_value: ""
+ required: true
+ - name: headers_match.value
+ description: "The [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) to match against the value of the specified header."
+ default_value: ""
+ required: false
+ - name: cookie_file
+ description: Path to cookie file. See [cookie file format](https://everything.curl.dev/http/cookies/fileformat).
+ default_value: ""
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ - name: With HTTP request headers
+ description: Configuration with HTTP request headers that will be sent by the client.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ headers:
+ Host: localhost:8080
+ User-Agent: netdata/go.d.plugin
+ Accept: */*
+ - name: With `status_accepted`
+ description: A basic example configuration with non-default status_accepted.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ status_accepted:
+ - 200
+ - 204
+ - name: With `header_match`
+ description: Example configurations with `header_match`. See the value [pattern](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format) syntax.
+ config: |
+ jobs:
+ # The "X-Robots-Tag" header must be present in the HTTP response header,
+ # but the value of the header does not matter.
+ # This config checks for the presence of the header regardless of its value.
+ - name: local
+ url: http://127.0.0.1:8080
+ header_match:
+ - key: X-Robots-Tag
+
+ # The "X-Robots-Tag" header must be present in the HTTP response header
+ # only if its value is equal to "noindex, nofollow".
+ # This config checks both the presence of the header and its value.
+ - name: local
+ url: http://127.0.0.1:8080
+ header_match:
+ - key: X-Robots-Tag
+ value: '= noindex,nofollow'
+
+ # The "X-Robots-Tag" header must not be present in the HTTP response header
+ # but the value of the header does not matter.
+ # This config checks for the presence of the header regardless of its value.
+ - name: local
+ url: http://127.0.0.1:8080
+ header_match:
+ - key: X-Robots-Tag
+ exclude: yes
+
+ # The "X-Robots-Tag" header must not be present in the HTTP response header
+ # only if its value is equal to "noindex, nofollow".
+ # This config checks both the presence of the header and its value.
+ - name: local
+ url: http://127.0.0.1:8080
+ header_match:
+ - key: X-Robots-Tag
+ exclude: yes
+ value: '= noindex,nofollow'
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: |
+ Do not validate server certificate chain and hostname.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:8080
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+
+ - name: remote
+ url: http://192.0.2.1:8080
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: target
+ description: The metrics refer to the monitored target.
+ labels:
+ - name: url
+ description: url value that is set in the configuration file.
+ metrics:
+ - name: httpcheck.response_time
+ description: HTTP Response Time
+ unit: ms
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: httpcheck.response_length
+ description: HTTP Response Body Length
+ unit: characters
+ chart_type: line
+ dimensions:
+ - name: length
+ - name: httpcheck.status
+ description: HTTP Check Status
+ unit: boolean
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: timeout
+ - name: redirect
+ - name: no_connection
+ - name: bad_content
+ - name: bad_header
+ - name: bad_status
+ - name: httpcheck.in_state
+ description: HTTP Current State Duration
+ unit: boolean
+ chart_type: line
+ dimensions:
+ - name: time
diff --git a/src/go/plugin/go.d/modules/httpcheck/metrics.go b/src/go/plugin/go.d/modules/httpcheck/metrics.go
new file mode 100644
index 000000000..676346fa0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/metrics.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package httpcheck
+
+type metrics struct {
+ Status status `stm:""`
+ InState int `stm:"in_state"`
+ ResponseTime int `stm:"time"`
+ ResponseLength int `stm:"length"`
+}
+
+type status struct {
+ Success bool `stm:"success"` // No error on request, body reading and checking its content
+ Timeout bool `stm:"timeout"`
+ Redirect bool `stm:"redirect"`
+ BadContent bool `stm:"bad_content"`
+ BadStatusCode bool `stm:"bad_status"`
+ BadHeader bool `stm:"bad_header"`
+ NoConnection bool `stm:"no_connection"` // All other errors basically
+}
diff --git a/src/go/plugin/go.d/modules/httpcheck/testdata/config.json b/src/go/plugin/go.d/modules/httpcheck/testdata/config.json
new file mode 100644
index 000000000..649393cdd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/testdata/config.json
@@ -0,0 +1,32 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "status_accepted": [
+ 123
+ ],
+ "response_match": "ok",
+ "cookie_file": "ok",
+ "header_match": [
+ {
+ "exclude": true,
+ "key": "ok",
+ "value": "ok"
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/httpcheck/testdata/config.yaml b/src/go/plugin/go.d/modules/httpcheck/testdata/config.yaml
new file mode 100644
index 000000000..1a66590e6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/testdata/config.yaml
@@ -0,0 +1,25 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+status_accepted:
+ - 123
+response_match: "ok"
+cookie_file: "ok"
+header_match:
+ - exclude: yes
+ key: "ok"
+ value: "ok"
diff --git a/src/go/plugin/go.d/modules/httpcheck/testdata/cookie.txt b/src/go/plugin/go.d/modules/httpcheck/testdata/cookie.txt
new file mode 100644
index 000000000..2504c6ffa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/httpcheck/testdata/cookie.txt
@@ -0,0 +1,5 @@
+# HTTP Cookie File
+# Generated by Wget on 2023-03-20 21:38:07.
+# Edit at your own risk.
+
+127.0.0.1 FALSE / FALSE 0 JSESSIONID 23B508B767344EA167A4EB9B4DA4E59F \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/icecast/README.md b/src/go/plugin/go.d/modules/icecast/README.md
new file mode 120000
index 000000000..db3c1b572
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/README.md
@@ -0,0 +1 @@
+integrations/icecast.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/icecast/charts.go b/src/go/plugin/go.d/modules/icecast/charts.go
new file mode 100644
index 000000000..26d3fe100
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/charts.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package icecast
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioSourceListeners = module.Priority + iota
+)
+
+var sourceChartsTmpl = module.Charts{
+ sourceListenersChartTmpl.Copy(),
+}
+
+var (
+ sourceListenersChartTmpl = module.Chart{
+ ID: "icecast_%s_listeners",
+ Title: "Icecast Listeners",
+ Units: "listeners",
+ Fam: "listeners",
+ Ctx: "icecast.listeners",
+ Type: module.Line,
+ Priority: prioSourceListeners,
+ Dims: module.Dims{
+ {ID: "source_%s_listeners", Name: "listeners"},
+ },
+ }
+)
+
+func (ic *Icecast) addSourceCharts(name string) {
+ chart := sourceListenersChartTmpl.Copy()
+
+ chart.ID = fmt.Sprintf(chart.ID, cleanSource(name))
+ chart.Labels = []module.Label{
+ {Key: "source", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+
+ if err := ic.Charts().Add(chart); err != nil {
+ ic.Warning(err)
+ }
+
+}
+
+func (ic *Icecast) removeSourceCharts(name string) {
+ px := fmt.Sprintf("icecast_%s_", cleanSource(name))
+ for _, chart := range *ic.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func cleanSource(name string) string {
+ r := strings.NewReplacer(" ", "_", ".", "_", ",", "_")
+ return r.Replace(name)
+}
diff --git a/src/go/plugin/go.d/modules/icecast/collect.go b/src/go/plugin/go.d/modules/icecast/collect.go
new file mode 100644
index 000000000..102ad31e5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/collect.go
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package icecast
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ urlPathServerStats = "/status-json.xsl" // https://icecast.org/docs/icecast-trunk/server_stats/
+)
+
+func (ic *Icecast) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := ic.collectServerStats(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (ic *Icecast) collectServerStats(mx map[string]int64) error {
+ stats, err := ic.queryServerStats()
+ if err != nil {
+ return err
+ }
+ if stats.IceStats == nil {
+ return fmt.Errorf("unexpected response: no icestats found")
+ }
+ if len(stats.IceStats.Source) == 0 {
+ return fmt.Errorf("no icecast sources found")
+ }
+
+ seen := make(map[string]bool)
+
+ for _, src := range stats.IceStats.Source {
+ name := src.ServerName
+ if name == "" {
+ continue
+ }
+
+ seen[name] = true
+
+ if !ic.seenSources[name] {
+ ic.seenSources[name] = true
+ ic.addSourceCharts(name)
+ }
+
+ px := fmt.Sprintf("source_%s_", name)
+
+ mx[px+"listeners"] = src.Listeners
+ }
+
+ for name := range ic.seenSources {
+ if !seen[name] {
+ delete(ic.seenSources, name)
+ ic.removeSourceCharts(name)
+ }
+ }
+
+ return nil
+}
+
+func (ic *Icecast) queryServerStats() (*serverStats, error) {
+ req, err := web.NewHTTPRequestWithPath(ic.Request, urlPathServerStats)
+ if err != nil {
+ return nil, err
+ }
+
+ var stats serverStats
+
+ if err := ic.doOKDecode(req, &stats); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (ic *Icecast) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := ic.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/icecast/config_schema.json b/src/go/plugin/go.d/modules/icecast/config_schema.json
new file mode 100644
index 000000000..3abda6e75
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/config_schema.json
@@ -0,0 +1,177 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Icecast collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL where the Icecast API can be accessed.",
+ "type": "string",
+ "default": "http://127.0.0.1:8000",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/icecast/icecast.go b/src/go/plugin/go.d/modules/icecast/icecast.go
new file mode 100644
index 000000000..e999421f7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/icecast.go
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package icecast
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("icecast", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Icecast {
+ return &Icecast{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8000",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 1),
+ },
+ },
+ },
+ charts: &module.Charts{},
+
+ seenSources: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Icecast struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ seenSources map[string]bool
+
+ httpClient *http.Client
+}
+
+func (ic *Icecast) Configuration() any {
+ return ic.Config
+}
+
+func (ic *Icecast) Init() error {
+ if ic.URL == "" {
+ ic.Error("URL not set")
+ return errors.New("url not set")
+ }
+
+ client, err := web.NewHTTPClient(ic.Client)
+ if err != nil {
+ ic.Error(err)
+ return err
+ }
+ ic.httpClient = client
+
+ ic.Debugf("using URL %s", ic.URL)
+ ic.Debugf("using timeout: %s", ic.Timeout)
+
+ return nil
+}
+
+func (ic *Icecast) Check() error {
+ mx, err := ic.collect()
+ if err != nil {
+ ic.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (ic *Icecast) Charts() *module.Charts {
+ return ic.charts
+}
+
+func (ic *Icecast) Collect() map[string]int64 {
+ mx, err := ic.collect()
+ if err != nil {
+ ic.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (ic *Icecast) Cleanup() {
+ if ic.httpClient != nil {
+ ic.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/icecast/icecast_test.go b/src/go/plugin/go.d/modules/icecast/icecast_test.go
new file mode 100644
index 000000000..40132986d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/icecast_test.go
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package icecast
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataServerStatsMultiSource, _ = os.ReadFile("testdata/stats_multi_source.json")
+ dataServerStatsSingleSource, _ = os.ReadFile("testdata/stats_single_source.json")
+ dataServerStatsNoSources, _ = os.ReadFile("testdata/stats_no_sources.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataServerStats": dataServerStatsMultiSource,
+ "dataServerStatsSingleSource": dataServerStatsSingleSource,
+ "dataServerStatsNoSources": dataServerStatsNoSources,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestIcecast_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Icecast{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestIcecast_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ icecast := New()
+ icecast.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, icecast.Init())
+ } else {
+ assert.NoError(t, icecast.Init())
+ }
+ })
+ }
+}
+
+func TestIcecast_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestIcecast_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (*Icecast, func())
+ }{
+ "success multiple sources": {
+ wantFail: false,
+ prepare: prepareCaseMultipleSources,
+ },
+ "success single source": {
+ wantFail: false,
+ prepare: prepareCaseMultipleSources,
+ },
+ "fails on no sources": {
+ wantFail: true,
+ prepare: prepareCaseNoSources,
+ },
+ "fails on unexpected json response": {
+ wantFail: true,
+ prepare: prepareCaseUnexpectedJsonResponse,
+ },
+ "fails on invalid format response": {
+ wantFail: true,
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ icecast, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, icecast.Check())
+ } else {
+ assert.NoError(t, icecast.Check())
+ }
+ })
+ }
+}
+
+func TestIcecast_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (*Icecast, func())
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success multiple sources": {
+ prepare: prepareCaseMultipleSources,
+ wantCharts: len(sourceChartsTmpl) * 2,
+ wantMetrics: map[string]int64{
+ "source_abc_listeners": 1,
+ "source_efg_listeners": 10,
+ },
+ },
+ "success single source": {
+ prepare: prepareCaseSingleSource,
+ wantCharts: len(sourceChartsTmpl) * 1,
+ wantMetrics: map[string]int64{
+ "source_abc_listeners": 1,
+ },
+ },
+ "fails on no sources": {
+ prepare: prepareCaseNoSources,
+ },
+ "fails on unexpected json response": {
+ prepare: prepareCaseUnexpectedJsonResponse,
+ },
+ "fails on invalid format response": {
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ icecast, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := icecast.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ assert.Equal(t, test.wantCharts, len(*icecast.Charts()))
+ module.TestMetricsHasAllChartsDims(t, icecast.Charts(), mx)
+ }
+ })
+ }
+}
+
+func prepareCaseMultipleSources(t *testing.T) (*Icecast, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathServerStats:
+ _, _ = w.Write(dataServerStatsMultiSource)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ icecast := New()
+ icecast.URL = srv.URL
+ require.NoError(t, icecast.Init())
+
+ return icecast, srv.Close
+}
+
+func prepareCaseSingleSource(t *testing.T) (*Icecast, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathServerStats:
+ _, _ = w.Write(dataServerStatsSingleSource)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ icecast := New()
+ icecast.URL = srv.URL
+ require.NoError(t, icecast.Init())
+
+ return icecast, srv.Close
+}
+
+func prepareCaseNoSources(t *testing.T) (*Icecast, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathServerStats:
+ _, _ = w.Write(dataServerStatsNoSources)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ icecast := New()
+ icecast.URL = srv.URL
+ require.NoError(t, icecast.Init())
+
+ return icecast, srv.Close
+}
+
+func prepareCaseUnexpectedJsonResponse(t *testing.T) (*Icecast, func()) {
+ t.Helper()
+ resp := `
+{
+ "elephant": {
+ "burn": false,
+ "mountain": true,
+ "fog": false,
+ "skin": -1561907625,
+ "burst": "anyway",
+ "shadow": 1558616893
+ },
+ "start": "ever",
+ "base": 2093056027,
+ "mission": -2007590351,
+ "victory": 999053756,
+ "die": false
+}
+`
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(resp))
+ }))
+
+ icecast := New()
+ icecast.URL = srv.URL
+ require.NoError(t, icecast.Init())
+
+ return icecast, srv.Close
+}
+
+func prepareCaseInvalidFormatResponse(t *testing.T) (*Icecast, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ icecast := New()
+ icecast.URL = srv.URL
+ require.NoError(t, icecast.Init())
+
+ return icecast, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*Icecast, func()) {
+ t.Helper()
+ icecast := New()
+ icecast.URL = "http://127.0.0.1:65001"
+ require.NoError(t, icecast.Init())
+
+ return icecast, func() {}
+}
diff --git a/src/go/plugin/go.d/modules/icecast/integrations/icecast.md b/src/go/plugin/go.d/modules/icecast/integrations/icecast.md
new file mode 100644
index 000000000..9ff06a4dd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/integrations/icecast.md
@@ -0,0 +1,226 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/icecast/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/icecast/metadata.yaml"
+sidebar_label: "Icecast"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Media Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Icecast
+
+
+<img src="https://netdata.cloud/img/icecast.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: icecast
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Icecast listener counts.
+
+It uses the Icecast server statistics `status-json.xsl` endpoint to retrieve the metrics.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Icecast instances running on localhost that are listening on port 8000.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Icecast source
+
+These metrics refer to an icecast source.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| source | Source name. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| icecast.listeners | listeners | listeners |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Icecast minimum version
+
+Needs at least Icecast version >= 2.4.0
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/icecast.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/icecast.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8000 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | POST | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8000
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8000
+
+ - name: remote
+ url: http://192.0.2.1:8000
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `icecast` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m icecast
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `icecast` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep icecast
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep icecast /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep icecast
+```
+
+
diff --git a/src/go/plugin/go.d/modules/icecast/metadata.yaml b/src/go/plugin/go.d/modules/icecast/metadata.yaml
new file mode 100644
index 000000000..bcaa4b07c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/metadata.yaml
@@ -0,0 +1,169 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ plugin_name: go.d.plugin
+ module_name: icecast
+ monitored_instance:
+ name: Icecast
+ link: "https://icecast.org/"
+ categories:
+ - data-collection.media-streaming-servers
+ icon_filename: "icecast.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - icecast
+ - streaming
+ - media
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors Icecast listener counts."
+ method_description: "It uses the Icecast server statistics `status-json.xsl` endpoint to retrieve the metrics."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: By default, it detects Icecast instances running on localhost that are listening on port 8000.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: "Icecast minimum version"
+ description: "Needs at least Icecast version >= 2.4.0"
+ configuration:
+ file:
+ name: go.d/icecast.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8000
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: POST
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: Config
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8000
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8000
+
+ - name: remote
+ url: http://192.0.2.1:8000
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: Icecast source
+ description: "These metrics refer to an icecast source."
+ labels:
+ - name: source
+ description: Source name.
+ metrics:
+ - name: icecast.listeners
+ description: Icecast Listeners
+ unit: "listeners"
+ chart_type: line
+ dimensions:
+ - name: listeners
diff --git a/src/go/plugin/go.d/modules/icecast/server_stats.go b/src/go/plugin/go.d/modules/icecast/server_stats.go
new file mode 100644
index 000000000..404d12555
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/server_stats.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package icecast
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type (
+ serverStats struct {
+ IceStats *struct {
+ Source iceSource `json:"source"`
+ } `json:"icestats"`
+ }
+ iceSource []sourceStats
+ sourceStats struct {
+ ServerName string `json:"server_name"`
+ StreamStart string `json:"stream_start"`
+ Listeners int64 `json:"listeners"`
+ }
+)
+
+func (i *iceSource) UnmarshalJSON(data []byte) error {
+ var v any
+ if err := json.Unmarshal(data, &v); err != nil {
+ return err
+ }
+
+ switch v.(type) {
+ case []any:
+ type plain iceSource
+ return json.Unmarshal(data, (*plain)(i))
+ case map[string]any:
+ var s sourceStats
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ *i = []sourceStats{s}
+ default:
+ return fmt.Errorf("invalid source data type: expected array or object")
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/icecast/testdata/config.json b/src/go/plugin/go.d/modules/icecast/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/icecast/testdata/config.yaml b/src/go/plugin/go.d/modules/icecast/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/icecast/testdata/stats_multi_source.json b/src/go/plugin/go.d/modules/icecast/testdata/stats_multi_source.json
new file mode 100644
index 000000000..0a9c45151
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/testdata/stats_multi_source.json
@@ -0,0 +1,46 @@
+{
+ "icestats": {
+ "admin": "icemaster@localhost",
+ "host": "localhost",
+ "location": "Earth",
+ "server_id": "Icecast 2.4.4",
+ "server_start": "Wed, 17 Jul 2024 11:27:40 +0300",
+ "server_start_iso8601": "2024-07-17T11:27:40+0300",
+ "source": [
+ {
+ "audio_info": "ice-bitrate=128;ice-channels=2;ice-samplerate=44100",
+ "genre": "(null)",
+ "ice-bitrate": 128,
+ "ice-channels": 2,
+ "ice-samplerate": 44100,
+ "listener_peak": 2,
+ "listeners": 1,
+ "listenurl": "http://localhost:8000/line.nsv",
+ "server_description": "(null)",
+ "server_name": "abc",
+ "server_type": "audio/mpeg",
+ "server_url": "(null)",
+ "stream_start": "Wed, 17 Jul 2024 12:10:20 +0300",
+ "stream_start_iso8601": "2024-07-17T12:10:20+0300",
+ "dummy": null
+ },
+ {
+ "audio_info": "ice-bitrate=128;ice-channels=2;ice-samplerate=44100",
+ "genre": "(null)",
+ "ice-bitrate": 128,
+ "ice-channels": 2,
+ "ice-samplerate": 44100,
+ "listener_peak": 10,
+ "listeners": 10,
+ "listenurl": "http://localhost:8000/lineb.nsv",
+ "server_description": "(null)",
+ "server_name": "efg",
+ "server_type": "audio/mpeg",
+ "server_url": "(null)",
+ "stream_start": "Wed, 17 Jul 2024 12:10:20 +0300",
+ "stream_start_iso8601": "2024-07-17T12:10:20+0300",
+ "dummy": null
+ }
+ ]
+ }
+}
diff --git a/src/go/plugin/go.d/modules/icecast/testdata/stats_no_sources.json b/src/go/plugin/go.d/modules/icecast/testdata/stats_no_sources.json
new file mode 100644
index 000000000..3af4fbe37
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/testdata/stats_no_sources.json
@@ -0,0 +1,11 @@
+{
+ "icestats": {
+ "admin": "icemaster@localhost",
+ "host": "localhost",
+ "location": "Earth",
+ "server_id": "Icecast 2.4.4",
+ "server_start": "Wed, 17 Jul 2024 11:27:40 +0300",
+ "server_start_iso8601": "2024-07-17T11:27:40+0300",
+ "dummy": null
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/icecast/testdata/stats_single_source.json b/src/go/plugin/go.d/modules/icecast/testdata/stats_single_source.json
new file mode 100644
index 000000000..9d14e7d64
--- /dev/null
+++ b/src/go/plugin/go.d/modules/icecast/testdata/stats_single_source.json
@@ -0,0 +1,27 @@
+{
+ "icestats": {
+ "admin": "icemaster@localhost",
+ "host": "localhost",
+ "location": "Earth",
+ "server_id": "Icecast 2.4.4",
+ "server_start": "Wed, 17 Jul 2024 11:27:40 +0300",
+ "server_start_iso8601": "2024-07-17T11:27:40+0300",
+ "source": {
+ "audio_info": "ice-bitrate=128;ice-channels=2;ice-samplerate=44100",
+ "genre": "(null)",
+ "ice-bitrate": 128,
+ "ice-channels": 2,
+ "ice-samplerate": 44100,
+ "listener_peak": 2,
+ "listeners": 1,
+ "listenurl": "http://localhost:8000/line.nsv",
+ "server_description": "(null)",
+ "server_name": "abc",
+ "server_type": "audio/mpeg",
+ "server_url": "(null)",
+ "stream_start": "Wed, 17 Jul 2024 12:10:20 +0300",
+ "stream_start_iso8601": "2024-07-17T12:10:20+0300",
+ "dummy": null
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/init.go b/src/go/plugin/go.d/modules/init.go
new file mode 100644
index 000000000..8271a70ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/init.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package modules
+
+import (
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/activemq"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/adaptecraid"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/ap"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/apache"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/beanstalk"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/bind"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/cassandra"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/chrony"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/clickhouse"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/cockroachdb"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/consul"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/coredns"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/couchbase"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/couchdb"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/dmcache"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/dnsdist"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/dnsmasq"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/dnsmasq_dhcp"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/dnsquery"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/docker"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/docker_engine"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/dockerhub"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/dovecot"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/elasticsearch"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/envoy"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/example"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/exim"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/fail2ban"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/filecheck"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/fluentd"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/freeradius"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/gearman"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/geth"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/haproxy"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/hddtemp"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/hdfs"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/hpssa"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/httpcheck"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/icecast"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/intelgpu"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/ipfs"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/isc_dhcpd"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/k8s_kubelet"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/k8s_kubeproxy"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/k8s_state"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/lighttpd"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/litespeed"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/logind"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/logstash"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/lvm"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/megacli"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/memcached"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/mongodb"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/monit"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/mysql"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nginx"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nginxplus"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nginxvts"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nsd"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/ntpd"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nvidia_smi"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/nvme"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn_status_log"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/pgbouncer"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/phpdaemon"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/phpfpm"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/pihole"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/pika"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/ping"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/portcheck"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/postfix"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/postgres"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/powerdns"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/powerdns_recursor"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/prometheus"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/proxysql"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/pulsar"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/puppet"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/rabbitmq"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/redis"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/rethinkdb"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/riakkv"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/rspamd"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/sensors"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/smartctl"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/snmp"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/squid"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/squidlog"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/storcli"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/supervisord"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/systemdunits"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/tengine"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/tomcat"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/tor"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/traefik"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/unbound"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/upsd"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/uwsgi"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vcsa"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vernemq"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/weblog"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/whoisquery"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/windows"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/wireguard"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/x509check"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/zfspool"
+ _ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/zookeeper"
+)
diff --git a/src/go/plugin/go.d/modules/intelgpu/README.md b/src/go/plugin/go.d/modules/intelgpu/README.md
new file mode 120000
index 000000000..44282e036
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/README.md
@@ -0,0 +1 @@
+integrations/intel_gpu.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/intelgpu/charts.go b/src/go/plugin/go.d/modules/intelgpu/charts.go
new file mode 100644
index 000000000..a73efc726
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/charts.go
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package intelgpu
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioGPUFrequency = module.Priority + iota
+ prioGPUPower
+ prioGPUEngineBusy
+)
+
+var charts = module.Charts{
+ intelGPUFrequencyChart.Copy(),
+ intelGPUPowerGPUChart.Copy(),
+}
+
+var intelGPUFrequencyChart = module.Chart{
+ ID: "igpu_frequency",
+ Title: "Intel GPU frequency",
+ Units: "MHz",
+ Fam: "frequency",
+ Ctx: "intelgpu.frequency",
+ Type: module.Line,
+ Priority: prioGPUFrequency,
+ Dims: module.Dims{
+ {ID: "frequency_actual", Name: "frequency", Div: precision},
+ },
+}
+
+var intelGPUPowerGPUChart = module.Chart{
+ ID: "igpu_power_gpu",
+ Title: "Intel GPU power",
+ Units: "Watts",
+ Fam: "power",
+ Ctx: "intelgpu.power",
+ Type: module.Line,
+ Priority: prioGPUPower,
+ Dims: module.Dims{
+ {ID: "power_gpu", Name: "gpu", Div: precision},
+ {ID: "power_package", Name: "package", Div: precision},
+ },
+}
+
+var intelGPUEngineBusyPercChartTmpl = module.Chart{
+ ID: "igpu_engine_%s_busy_percentage",
+ Title: "Intel GPU engine busy time percentage",
+ Units: "percentage",
+ Fam: "engines",
+ Ctx: "intelgpu.engine_busy_perc",
+ Type: module.Line,
+ Priority: prioGPUEngineBusy,
+ Dims: module.Dims{
+ {ID: "engine_%s_busy", Name: "busy", Div: precision},
+ },
+}
+
+func (ig *IntelGPU) addEngineCharts(engine string) {
+ chart := intelGPUEngineBusyPercChartTmpl.Copy()
+
+ s := strings.ToLower(engine)
+ s = strings.ReplaceAll(s, "/", "_")
+
+ chart.ID = fmt.Sprintf(chart.ID, s)
+ chart.Labels = []module.Label{
+ {Key: "engine_class", Value: engineClassName(engine)},
+ {Key: "engine_instance", Value: engine},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, engine)
+ }
+
+ if err := ig.Charts().Add(chart); err != nil {
+ ig.Warning(err)
+ }
+}
+
+func engineClassName(engine string) string {
+ // https://gitlab.freedesktop.org/drm/igt-gpu-tools/-/blob/master/tools/intel_gpu_top.c#L431
+ engines := []string{"Render/3D", "Blitter", "VideoEnhance", "Video", "Compute"}
+ for _, name := range engines {
+ if strings.HasPrefix(engine, name) {
+ return name
+ }
+ }
+ return "unknown"
+}
diff --git a/src/go/plugin/go.d/modules/intelgpu/collect.go b/src/go/plugin/go.d/modules/intelgpu/collect.go
new file mode 100644
index 000000000..38e8b305a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/collect.go
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package intelgpu
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+)
+
+type (
+ gpuSummaryStats struct {
+ Frequency struct {
+ Actual float64 `json:"actual"`
+ } `json:"frequency"`
+ Power struct {
+ GPU float64 `json:"gpu"`
+ Package float64 `json:"package"`
+ } `json:"power"`
+ Engines map[string]struct {
+ Busy float64 `json:"busy"`
+ } `json:"engines"`
+ }
+)
+
+const precision = 100
+
+func (ig *IntelGPU) collect() (map[string]int64, error) {
+ if ig.exec == nil {
+ return nil, errors.New("collector not initialized")
+ }
+
+ stats, err := ig.getGPUSummaryStats()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ mx["frequency_actual"] = int64(stats.Frequency.Actual * precision)
+ mx["power_gpu"] = int64(stats.Power.GPU * precision)
+ mx["power_package"] = int64(stats.Power.Package * precision)
+
+ for name, es := range stats.Engines {
+ if !ig.engines[name] {
+ ig.addEngineCharts(name)
+ ig.engines[name] = true
+ }
+
+ key := fmt.Sprintf("engine_%s_busy", name)
+ mx[key] = int64(es.Busy * precision)
+ }
+
+ return mx, nil
+}
+func (ig *IntelGPU) getGPUSummaryStats() (*gpuSummaryStats, error) {
+ bs, err := ig.exec.queryGPUSummaryJson()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(bs) == 0 {
+ return nil, errors.New("query returned empty response")
+ }
+
+ var stats gpuSummaryStats
+ if err := json.Unmarshal(bs, &stats); err != nil {
+ return nil, err
+ }
+
+ if len(stats.Engines) == 0 {
+ return nil, errors.New("query returned unexpected response")
+ }
+
+ return &stats, nil
+}
diff --git a/src/go/plugin/go.d/modules/intelgpu/config_schema.json b/src/go/plugin/go.d/modules/intelgpu/config_schema.json
new file mode 100644
index 000000000..ac8183421
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/config_schema.json
@@ -0,0 +1,33 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Intel GPU collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "device": {
+ "title": "Device selector",
+ "description": "Select Intel GPU ([supported filters](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html#DEVICE_SELECTION)). Use `intel_gpu_top -L` for listing devices. If not set, defaults to first.",
+ "type": "string"
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "device": {
+ "ui:placeholder": "For systems with multiple GPUs, create separate data collection jobs and specify the device for each job."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/intelgpu/exec.go b/src/go/plugin/go.d/modules/intelgpu/exec.go
new file mode 100644
index 000000000..bdfb526ef
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/exec.go
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package intelgpu
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "os/exec"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newIntelGpuTopExec(log *logger.Logger, ndsudoPath string, updateEvery int, device string) (*intelGpuTopExec, error) {
+ topExec := &intelGpuTopExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ updateEvery: updateEvery,
+ device: device,
+ firstSampleTimeout: time.Second * 3,
+ }
+
+ if err := topExec.run(); err != nil {
+ return nil, err
+ }
+
+ return topExec, nil
+}
+
+type intelGpuTopExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ updateEvery int
+ device string
+ firstSampleTimeout time.Duration
+
+ cmd *exec.Cmd
+ done chan struct{}
+
+ mux sync.Mutex
+ lastSample string
+}
+
+func (e *intelGpuTopExec) run() error {
+ var cmd *exec.Cmd
+
+ if e.device != "" {
+ cmd = exec.Command(e.ndsudoPath, "igt-device-json", "--interval", e.calcIntervalArg(), "--device", e.device)
+ } else {
+ cmd = exec.Command(e.ndsudoPath, "igt-json", "--interval", e.calcIntervalArg())
+ }
+
+ e.Debugf("executing '%s'", cmd)
+
+ r, err := cmd.StdoutPipe()
+ if err != nil {
+ return err
+ }
+
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+
+ firstSample := make(chan struct{}, 1)
+ done := make(chan struct{})
+ e.cmd = cmd
+ e.done = done
+
+ go func() {
+ defer close(done)
+ sc := bufio.NewScanner(r)
+ var buf bytes.Buffer
+ var n int
+
+ for sc.Scan() {
+ if n++; n > 1000 {
+ break
+ }
+
+ text := sc.Text()
+
+ if buf.Len() == 0 && text != "{" || text == "" {
+ continue
+ }
+
+ if text == "}," {
+ text = "}"
+ }
+
+ buf.WriteString(text + "\n")
+
+ if text[0] == '}' {
+ e.mux.Lock()
+ e.lastSample = buf.String()
+ e.mux.Unlock()
+
+ select {
+ case firstSample <- struct{}{}:
+ default:
+ }
+
+ buf.Reset()
+ n = 0
+ }
+ }
+ }()
+
+ select {
+ case <-e.done:
+ _ = e.stop()
+ return errors.New("process exited before the first sample was collected")
+ case <-time.After(e.firstSampleTimeout):
+ _ = e.stop()
+ return errors.New("timed out waiting for first sample")
+ case <-firstSample:
+ return nil
+ }
+}
+
+func (e *intelGpuTopExec) queryGPUSummaryJson() ([]byte, error) {
+ select {
+ case <-e.done:
+ return nil, errors.New("process has already exited")
+ default:
+ }
+
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ return []byte(e.lastSample), nil
+}
+
+func (e *intelGpuTopExec) stop() error {
+ if e.cmd == nil || e.cmd.Process == nil {
+ return nil
+ }
+
+ _ = e.cmd.Process.Kill()
+ _ = e.cmd.Wait()
+ e.cmd = nil
+
+ select {
+ case <-e.done:
+ return nil
+ case <-time.After(time.Second * 2):
+ return errors.New("timed out waiting for process to exit")
+ }
+}
+
+func (e *intelGpuTopExec) calcIntervalArg() string {
+ // intel_gpu_top appends the end marker ("},\n") of the previous sample to the beginning of the next sample.
+ // interval must be < than 'firstSampleTimeout'
+ interval := 900
+ if m := min(e.updateEvery, int(e.firstSampleTimeout.Seconds())); m > 1 {
+ interval = m*1000 - 500 // milliseconds
+ }
+ return strconv.Itoa(interval)
+}
diff --git a/src/go/plugin/go.d/modules/intelgpu/init.go b/src/go/plugin/go.d/modules/intelgpu/init.go
new file mode 100644
index 000000000..df489686d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/init.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package intelgpu
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (ig *IntelGPU) initIntelGPUTopExec() (intelGpuTop, error) {
+ ndsudoPath := filepath.Join(executable.Directory, ig.ndsudoName)
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+
+ }
+
+ return newIntelGpuTopExec(ig.Logger, ndsudoPath, ig.UpdateEvery, ig.Device)
+}
diff --git a/src/go/plugin/go.d/modules/intelgpu/integrations/intel_gpu.md b/src/go/plugin/go.d/modules/intelgpu/integrations/intel_gpu.md
new file mode 100644
index 000000000..696746601
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/integrations/intel_gpu.md
@@ -0,0 +1,213 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/intelgpu/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/intelgpu/metadata.yaml"
+sidebar_label: "Intel GPU"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Intel GPU
+
+
+<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: intelgpu
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector gathers performance metrics for Intel integrated GPUs.
+It relies on the [`intel_gpu_top`](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html) CLI tool but avoids directly executing the binary.
+Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+This approach eliminates the need to grant the CAP_PERFMON capability to `intel_gpu_top`, improving security and potentially simplifying permission management.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Intel GPU instance
+
+These metrics refer to the Intel GPU.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| intelgpu.frequency | frequency | MHz |
+| intelgpu.power | gpu, package | Watts |
+
+### Per engine
+
+These metrics refer to the GPU hardware engine.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| engine_class | Engine class (Render/3D, Blitter, VideoEnhance, Video, Compute). |
+| engine_instance | Engine instance (e.g. Render/3D/0, Video/0, Video/1). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| intelgpu.engine_busy_perc | busy | percentage |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install intel-gpu-tools
+
+Install `intel-gpu-tools` using your distribution's package manager.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/intelgpu.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/intelgpu.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| device | Select a specific GPU using [supported filter](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html#DESCRIPTION). | | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: intelgpu
+ update_every: 5 # Collect Intel iGPU metrics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `intelgpu` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m intelgpu
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `intelgpu` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep intelgpu
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep intelgpu /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep intelgpu
+```
+
+
diff --git a/src/go/plugin/go.d/modules/intelgpu/intelgpu.go b/src/go/plugin/go.d/modules/intelgpu/intelgpu.go
new file mode 100644
index 000000000..8e98c688d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/intelgpu.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package intelgpu
+
+import (
+ _ "embed"
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("intelgpu", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *IntelGPU {
+ return &IntelGPU{
+ ndsudoName: "ndsudo",
+ charts: charts.Copy(),
+ engines: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Device string `yaml:"device,omitempty" json:"device"`
+}
+
+type (
+ IntelGPU struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec intelGpuTop
+ ndsudoName string
+
+ engines map[string]bool
+ }
+ intelGpuTop interface {
+ queryGPUSummaryJson() ([]byte, error)
+ stop() error
+ }
+)
+
+func (ig *IntelGPU) Configuration() any {
+ return ig.Config
+}
+
+func (ig *IntelGPU) Init() error {
+ topExec, err := ig.initIntelGPUTopExec()
+ if err != nil {
+ ig.Error(err)
+ return err
+ }
+
+ ig.exec = topExec
+
+ return nil
+}
+
+func (ig *IntelGPU) Check() error {
+ mx, err := ig.collect()
+ if err != nil {
+ ig.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (ig *IntelGPU) Charts() *module.Charts {
+ return ig.charts
+}
+
+func (ig *IntelGPU) Collect() map[string]int64 {
+ mx, err := ig.collect()
+ if err != nil {
+ ig.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (ig *IntelGPU) Cleanup() {
+ if ig.exec != nil {
+ if err := ig.exec.stop(); err != nil {
+ ig.Error(err)
+ }
+ ig.exec = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/intelgpu/intelgpu_test.go b/src/go/plugin/go.d/modules/intelgpu/intelgpu_test.go
new file mode 100644
index 000000000..e38adc284
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/intelgpu_test.go
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package intelgpu
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataIntelTopGpuJSON, _ = os.ReadFile("testdata/igt.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataIntelTopGpuJSON": dataIntelTopGpuJSON,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestIntelGPU_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &IntelGPU{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestIntelGPU_Init(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(igt *IntelGPU)
+ wantFail bool
+ }{
+ "fails if can't locate ndsudo": {
+ wantFail: true,
+ prepare: func(igt *IntelGPU) {
+ igt.ndsudoName += "!!!"
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ igt := New()
+
+ test.prepare(igt)
+
+ if test.wantFail {
+ assert.Error(t, igt.Init())
+ } else {
+ assert.NoError(t, igt.Init())
+ }
+ })
+ }
+}
+
+func TestIntelGPU_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockIntelGpuTop
+ wantFail bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantFail: false,
+ },
+ "fail on error": {
+ prepareMock: prepareMockErrOnGPUSummaryJson,
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ igt := New()
+ mock := test.prepareMock()
+ igt.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, igt.Check())
+ } else {
+ assert.NoError(t, igt.Check())
+ }
+ })
+ }
+}
+
+func TestIntelGPU_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockIntelGpuTop
+ wantMetrics map[string]int64
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantMetrics: map[string]int64{
+ "engine_Blitter/0_busy": 0,
+ "engine_Render/3D/0_busy": 9609,
+ "engine_Video/0_busy": 7295,
+ "engine_Video/1_busy": 7740,
+ "engine_VideoEnhance/0_busy": 0,
+ "frequency_actual": 125308,
+ "power_gpu": 323,
+ "power_package": 1665,
+ },
+ },
+ "fail on error": {
+ prepareMock: prepareMockErrOnGPUSummaryJson,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ igt := New()
+ mock := test.prepareMock()
+ igt.exec = mock
+
+ mx := igt.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ assert.Len(t, *igt.Charts(), len(charts)+len(igt.engines))
+ }
+ })
+ }
+}
+
+func TestIntelGPU_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *IntelGPU
+ }{
+ "not initialized exec": {
+ prepare: func() *IntelGPU {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *IntelGPU {
+ igt := New()
+ igt.exec = prepareMockOK()
+ _ = igt.Check()
+ return igt
+ },
+ },
+ "after collect": {
+ prepare: func() *IntelGPU {
+ igt := New()
+ igt.exec = prepareMockOK()
+ _ = igt.Collect()
+ return igt
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ igt := test.prepare()
+
+ mock, ok := igt.exec.(*mockIntelGpuTop)
+
+ assert.NotPanics(t, igt.Cleanup)
+
+ if ok {
+ assert.True(t, mock.stopCalled)
+ }
+ })
+ }
+}
+
+func prepareMockOK() *mockIntelGpuTop {
+ return &mockIntelGpuTop{
+ gpuSummaryJson: dataIntelTopGpuJSON,
+ }
+}
+
+func prepareMockErrOnGPUSummaryJson() *mockIntelGpuTop {
+ return &mockIntelGpuTop{
+ errOnQueryGPUSummaryJson: true,
+ }
+}
+
+type mockIntelGpuTop struct {
+ errOnQueryGPUSummaryJson bool
+ gpuSummaryJson []byte
+
+ stopCalled bool
+}
+
+func (m *mockIntelGpuTop) queryGPUSummaryJson() ([]byte, error) {
+ if m.errOnQueryGPUSummaryJson {
+ return nil, errors.New("error on mock.queryGPUSummaryJson()")
+ }
+ return m.gpuSummaryJson, nil
+}
+
+func (m *mockIntelGpuTop) stop() error {
+ m.stopCalled = true
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/intelgpu/metadata.yaml b/src/go/plugin/go.d/modules/intelgpu/metadata.yaml
new file mode 100644
index 000000000..3b5b39f25
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/metadata.yaml
@@ -0,0 +1,119 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-intelgpu
+ plugin_name: go.d.plugin
+ module_name: intelgpu
+ monitored_instance:
+ name: Intel GPU
+ link: https://www.intel.com/
+ icon_filename: microchip.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords:
+ - intel
+ - gpu
+ - hardware
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector gathers performance metrics for Intel integrated GPUs.
+ It relies on the [`intel_gpu_top`](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html) CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to grant the CAP_PERFMON capability to `intel_gpu_top`, improving security and potentially simplifying permission management.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Install intel-gpu-tools
+ description: Install `intel-gpu-tools` using your distribution's package manager.
+ configuration:
+ file:
+ name: go.d/intelgpu.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: device
+ description: 'Select a specific GPU using [supported filter](https://manpages.debian.org/testing/intel-gpu-tools/intel_gpu_top.1.en.html#DESCRIPTION).'
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: intelgpu
+ update_every: 5 # Collect Intel iGPU metrics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the Intel GPU.
+ labels: []
+ metrics:
+ - name: intelgpu.frequency
+ description: Intel GPU frequency
+ unit: MHz
+ chart_type: line
+ dimensions:
+ - name: frequency
+ - name: intelgpu.power
+ description: Intel GPU power
+ unit: Watts
+ chart_type: line
+ dimensions:
+ - name: gpu
+ - name: package
+ - name: engine
+ description: These metrics refer to the GPU hardware engine.
+ labels:
+ - name: engine_class
+ description: Engine class (Render/3D, Blitter, VideoEnhance, Video, Compute).
+ - name: engine_instance
+ description: Engine instance (e.g. Render/3D/0, Video/0, Video/1).
+ metrics:
+ - name: intelgpu.engine_busy_perc
+ description: Intel GPU engine busy time percentage
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: busy
diff --git a/src/go/plugin/go.d/modules/intelgpu/testdata/config.json b/src/go/plugin/go.d/modules/intelgpu/testdata/config.json
new file mode 100644
index 000000000..167bd15fe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "device": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/intelgpu/testdata/config.yaml b/src/go/plugin/go.d/modules/intelgpu/testdata/config.yaml
new file mode 100644
index 000000000..f27729e3c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+device: "ok"
diff --git a/src/go/plugin/go.d/modules/intelgpu/testdata/igt.json b/src/go/plugin/go.d/modules/intelgpu/testdata/igt.json
new file mode 100644
index 000000000..4d43cbc5f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/intelgpu/testdata/igt.json
@@ -0,0 +1,80 @@
+{
+ "period": {
+ "duration": 1055.794145,
+ "unit": "ms"
+ },
+ "frequency": {
+ "requested": 1449.146131,
+ "actual": 1253.085184,
+ "unit": "MHz"
+ },
+ "interrupts": {
+ "count": 1757.918443,
+ "unit": "irq/s"
+ },
+ "rc6": {
+ "value": 0.000000,
+ "unit": "%"
+ },
+ "power": {
+ "GPU": 3.233528,
+ "Package": 16.658620,
+ "unit": "W"
+ },
+ "engines": {
+ "Render/3D/0": {
+ "busy": 96.092944,
+ "sema": 0.000000,
+ "wait": 0.000000,
+ "unit": "%"
+ },
+ "Blitter/0": {
+ "busy": 0.000000,
+ "sema": 0.000000,
+ "wait": 0.000000,
+ "unit": "%"
+ },
+ "Video/0": {
+ "busy": 72.950675,
+ "sema": 0.000000,
+ "wait": 0.000000,
+ "unit": "%"
+ },
+ "Video/1": {
+ "busy": 77.402254,
+ "sema": 0.000000,
+ "wait": 0.000000,
+ "unit": "%"
+ },
+ "VideoEnhance/0": {
+ "busy": 0.000000,
+ "sema": 0.000000,
+ "wait": 0.000000,
+ "unit": "%"
+ }
+ },
+ "clients": {
+ "4292239459": {
+ "name": "ffmpeg",
+ "pid": "2727837",
+ "engine-classes": {
+ "Render/3D": {
+ "busy": "101.396726",
+ "unit": "%"
+ },
+ "Blitter": {
+ "busy": "0.000000",
+ "unit": "%"
+ },
+ "Video": {
+ "busy": "159.292435",
+ "unit": "%"
+ },
+ "VideoEnhance": {
+ "busy": "0.000000",
+ "unit": "%"
+ }
+ }
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/README.md b/src/go/plugin/go.d/modules/ipfs/README.md
new file mode 120000
index 000000000..eee6a07b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/README.md
@@ -0,0 +1 @@
+integrations/ipfs.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/ipfs/charts.go b/src/go/plugin/go.d/modules/ipfs/charts.go
new file mode 100644
index 000000000..1f71c7b40
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/charts.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ipfs
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioBandwidth = module.Priority + iota
+ prioSwarmPeers
+ prioDatastoreSpaceUtilization
+ prioRepoSize
+ prioRepoObj
+ prioRepoPinnedObj
+)
+
+var charts = module.Charts{
+ bandwidthChart.Copy(),
+ peersChart.Copy(),
+ datastoreUtilizationChart.Copy(),
+ repoSizeChart.Copy(),
+ repoObjChart.Copy(),
+ repoPinnedObjChart.Copy(),
+}
+
+var (
+ bandwidthChart = module.Chart{
+ ID: "bandwidth",
+ Title: "IPFS Bandwidth",
+ Units: "bytes/s",
+ Fam: "bandwidth",
+ Ctx: "ipfs.bandwidth",
+ Type: module.Area,
+ Priority: prioBandwidth,
+ Dims: module.Dims{
+ {ID: "in", Algo: module.Incremental},
+ {ID: "out", Mul: -1, Algo: module.Incremental},
+ },
+ }
+
+ peersChart = module.Chart{
+ ID: "peers",
+ Title: "IPFS Peers",
+ Units: "peers",
+ Fam: "peers",
+ Ctx: "ipfs.peers",
+ Type: module.Line,
+ Priority: prioSwarmPeers,
+ Dims: module.Dims{
+ {ID: "peers"},
+ },
+ }
+
+ datastoreUtilizationChart = module.Chart{
+ ID: "datastore_space_utilization",
+ Title: "IPFS Datastore Space Utilization",
+ Units: "percent",
+ Fam: "size",
+ Ctx: "ipfs.datastore_space_utilization",
+ Type: module.Area,
+ Priority: prioDatastoreSpaceUtilization,
+ Dims: module.Dims{
+ {ID: "used_percent", Name: "used"},
+ },
+ }
+ repoSizeChart = module.Chart{
+ ID: "repo_size",
+ Title: "IPFS Repo Size",
+ Units: "bytes",
+ Fam: "size",
+ Ctx: "ipfs.repo_size",
+ Type: module.Line,
+ Priority: prioRepoSize,
+ Dims: module.Dims{
+ {ID: "size"},
+ },
+ }
+
+ repoObjChart = module.Chart{
+ ID: "repo_objects",
+ Title: "IPFS Repo Objects",
+ Units: "objects",
+ Fam: "objects",
+ Ctx: "ipfs.repo_objects",
+ Type: module.Line,
+ Priority: prioRepoObj,
+ Dims: module.Dims{
+ {ID: "objects"},
+ },
+ }
+ repoPinnedObjChart = module.Chart{
+ ID: "repo_pinned_objects",
+ Title: "IPFS Repo Pinned Objects",
+ Units: "objects",
+ Fam: "objects",
+ Ctx: "ipfs.repo_pinned_objects",
+ Type: module.Line,
+ Priority: prioRepoPinnedObj,
+ Dims: module.Dims{
+ {ID: "pinned"},
+ {ID: "recursive_pins"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/ipfs/collect.go b/src/go/plugin/go.d/modules/ipfs/collect.go
new file mode 100644
index 000000000..6bd0b128a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/collect.go
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ipfs
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type (
+ ipfsStatsBw struct {
+ TotalIn int64 `json:"TotalIn"`
+ TotalOut int64 `json:"TotalOut"`
+ RateIn *float64 `json:"RateIn"`
+ RateOut *float64 `json:"RateOut"`
+ }
+ ipfsStatsRepo struct {
+ RepoSize int64 `json:"RepoSize"`
+ StorageMax int64 `json:"StorageMax"`
+ NumObjects int64 `json:"NumObjects"`
+ }
+ ipfsSwarmPeers struct {
+ Peers []any `json:"Peers"`
+ }
+ ipfsPinsLs struct {
+ Keys map[string]struct {
+ Type string `json:"type"`
+ } `json:"Keys"`
+ }
+)
+
+const (
+ urlPathStatsBandwidth = "/api/v0/stats/bw" // https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-bw
+ urlPathStatsRepo = "/api/v0/stats/repo" // https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-repo
+ urlPathSwarmPeers = "/api/v0/swarm/peers" // https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-swarm-peers
+ urlPathPinLs = "/api/v0/pin/ls" // https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls
+)
+
+func (ip *IPFS) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := ip.collectStatsBandwidth(mx); err != nil {
+ return nil, err
+ }
+ if err := ip.collectSwarmPeers(mx); err != nil {
+ return nil, err
+ }
+ if ip.QueryRepoApi {
+ // https://github.com/netdata/netdata/pull/9687
+ // TODO: collect by default with "size-only"
+ // https://github.com/ipfs/kubo/issues/7528#issuecomment-657398332
+ if err := ip.collectStatsRepo(mx); err != nil {
+ return nil, err
+ }
+ }
+ if ip.QueryPinApi {
+ if err := ip.collectPinLs(mx); err != nil {
+ return nil, err
+ }
+ }
+
+ return mx, nil
+}
+
+func (ip *IPFS) collectStatsBandwidth(mx map[string]int64) error {
+ stats, err := ip.queryStatsBandwidth()
+ if err != nil {
+ return err
+ }
+
+ mx["in"] = stats.TotalIn
+ mx["out"] = stats.TotalOut
+
+ return nil
+}
+
+func (ip *IPFS) collectSwarmPeers(mx map[string]int64) error {
+ stats, err := ip.querySwarmPeers()
+ if err != nil {
+ return err
+ }
+
+ mx["peers"] = int64(len(stats.Peers))
+
+ return nil
+}
+
+func (ip *IPFS) collectStatsRepo(mx map[string]int64) error {
+ stats, err := ip.queryStatsRepo()
+ if err != nil {
+ return err
+ }
+
+ mx["used_percent"] = 0
+ if stats.StorageMax > 0 {
+ mx["used_percent"] = stats.RepoSize * 100 / stats.StorageMax
+ }
+ mx["size"] = stats.RepoSize
+ mx["objects"] = stats.NumObjects
+
+ return nil
+}
+
+func (ip *IPFS) collectPinLs(mx map[string]int64) error {
+ stats, err := ip.queryPinLs()
+ if err != nil {
+ return err
+ }
+
+ var n int64
+ for _, v := range stats.Keys {
+ if v.Type == "recursive" {
+ n++
+ }
+ }
+
+ mx["pinned"] = int64(len(stats.Keys))
+ mx["recursive_pins"] = n
+
+ return nil
+}
+
+func (ip *IPFS) queryStatsBandwidth() (*ipfsStatsBw, error) {
+ req, err := web.NewHTTPRequestWithPath(ip.Request, urlPathStatsBandwidth)
+ if err != nil {
+ return nil, err
+ }
+
+ var stats ipfsStatsBw
+ if err := ip.doOKDecode(req, &stats); err != nil {
+ return nil, err
+ }
+
+ if stats.RateIn == nil || stats.RateOut == nil {
+ return nil, fmt.Errorf("unexpected response: not ipfs data")
+ }
+
+ return &stats, nil
+}
+
+func (ip *IPFS) querySwarmPeers() (*ipfsSwarmPeers, error) {
+ req, err := web.NewHTTPRequestWithPath(ip.Request, urlPathSwarmPeers)
+ if err != nil {
+ return nil, err
+ }
+
+ var stats ipfsSwarmPeers
+ if err := ip.doOKDecode(req, &stats); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (ip *IPFS) queryStatsRepo() (*ipfsStatsRepo, error) {
+ req, err := web.NewHTTPRequestWithPath(ip.Request, urlPathStatsRepo)
+ if err != nil {
+ return nil, err
+ }
+
+ var stats ipfsStatsRepo
+ if err := ip.doOKDecode(req, &stats); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (ip *IPFS) queryPinLs() (*ipfsPinsLs, error) {
+ req, err := web.NewHTTPRequestWithPath(ip.Request, urlPathPinLs)
+ if err != nil {
+ return nil, err
+ }
+
+ var stats ipfsPinsLs
+ if err := ip.doOKDecode(req, &stats); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (ip *IPFS) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := ip.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/config_schema.json b/src/go/plugin/go.d/modules/ipfs/config_schema.json
new file mode 100644
index 000000000..ce4921c3e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/config_schema.json
@@ -0,0 +1,195 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "IPFS collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL where the IPFS API can be accessed.",
+ "type": "string",
+ "default": "http://127.0.0.1:5001",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "repoapi": {
+ "title": "Query Repo API",
+ "description": "Enables querying the [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-repo-stat) endpoint for repository statistics. **Disabled by default** due to potential high CPU usage.",
+ "type": "boolean"
+ },
+ "pinapi": {
+ "title": "Query Pin API",
+ "description": "Enables querying the [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls) endpoint to retrieve a list of all pinned objects. **Consider enabling only if necessary**.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "repoapi": {
+ "ui:help": "This endpoint retrieves the number of objects in the local repository, which is not cached and can be computationally expensive for IPFS to calculate, especially with frequent collection intervals. See [#7528](https://github.com/ipfs/go-ipfs/issues/7528)."
+ },
+ "pinapi": {
+ "ui:help": "Performance may decrease as the number of pinned objects grows, as the entire list needs to be retrieved. See [#3874](https://github.com/ipfs/go-ipfs/issues/3874)."
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "repoapi",
+ "pinapi",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/integrations/ipfs.md b/src/go/plugin/go.d/modules/ipfs/integrations/ipfs.md
new file mode 100644
index 000000000..4357b8665
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/integrations/ipfs.md
@@ -0,0 +1,246 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/ipfs/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/ipfs/metadata.yaml"
+sidebar_label: "IPFS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IPFS
+
+
+<img src="https://netdata.cloud/img/ipfs.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: ipfs
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors IPFS daemon health and network activity.
+
+It uses [RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to collect metrics.
+
+Used endpoints:
+
+- [/api/v0/stats/bw](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-bw)
+- [/api/v0/swarm/peers](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-swarm-peers)
+- [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-repo)
+- [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls)
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects IPFS instances running on localhost that are listening on port 5001.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+Calls to the following endpoints are disabled by default due to IPFS bugs:
+
+- /api/v0/stats/repo ([#7528](https://github.com/ipfs/go-ipfs/issues/7528)).
+- /api/v0/pin/ls ([#3874](https://github.com/ipfs/go-ipfs/issues/3874)).
+
+**Disabled by default** due to potential high CPU usage. Consider enabling only if necessary.
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per IPFS instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ipfs.bandwidth | in, out | bytes/s |
+| ipfs.peers | peers | peers |
+| ipfs.datastore_space_utilization | used | percent |
+| ipfs.repo_size | size | bytes |
+| ipfs.repo_objects | objects | objects |
+| ipfs.repo_pinned_objects | pinned, recursive_pins | objects |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.datastore_space_utilization | IPFS datastore utilization |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/ipfs.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/ipfs.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| repoapi | Enables querying the [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-repo-stat) endpoint for repository statistics. | no | no |
+| pinapi | Enables querying the [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls) endpoint to retrieve a list of all pinned objects. | no | no |
+| url | Server URL. | http://127.0.0.1:5001 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | POST | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:5001
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:5001
+
+ - name: remote
+ url: http://192.0.2.1:5001
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `ipfs` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m ipfs
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `ipfs` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep ipfs
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep ipfs /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep ipfs
+```
+
+
diff --git a/src/go/plugin/go.d/modules/ipfs/ipfs.go b/src/go/plugin/go.d/modules/ipfs/ipfs.go
new file mode 100644
index 000000000..0caed8d9b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/ipfs.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ipfs
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("ipfs", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *IPFS {
+ return &IPFS{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:5001",
+ Method: http.MethodPost,
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 1),
+ },
+ },
+ QueryRepoApi: false,
+ QueryPinApi: false,
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ QueryPinApi bool `yaml:"pinapi" json:"pinapi"`
+ QueryRepoApi bool `yaml:"repoapi" json:"repoapi"`
+}
+
+type IPFS struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (ip *IPFS) Configuration() any {
+ return ip.Config
+}
+
+func (ip *IPFS) Init() error {
+ if ip.URL == "" {
+ ip.Error("URL not set")
+ return errors.New("url not set")
+ }
+
+ client, err := web.NewHTTPClient(ip.Client)
+ if err != nil {
+ ip.Error(err)
+ return err
+ }
+ ip.httpClient = client
+
+ if !ip.QueryPinApi {
+ _ = ip.Charts().Remove(repoPinnedObjChart.ID)
+ }
+ if !ip.QueryRepoApi {
+ _ = ip.Charts().Remove(datastoreUtilizationChart.ID)
+ _ = ip.Charts().Remove(repoSizeChart.ID)
+ _ = ip.Charts().Remove(repoObjChart.ID)
+ }
+
+ ip.Debugf("using URL %s", ip.URL)
+ ip.Debugf("using timeout: %s", ip.Timeout)
+
+ return nil
+}
+
+func (ip *IPFS) Check() error {
+ mx, err := ip.collect()
+ if err != nil {
+ ip.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (ip *IPFS) Charts() *module.Charts {
+ return ip.charts
+}
+
+func (ip *IPFS) Collect() map[string]int64 {
+ mx, err := ip.collect()
+ if err != nil {
+ ip.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (ip *IPFS) Cleanup() {
+ if ip.httpClient != nil {
+ ip.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/ipfs_test.go b/src/go/plugin/go.d/modules/ipfs/ipfs_test.go
new file mode 100644
index 000000000..5e353a1bc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/ipfs_test.go
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ipfs
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ apiv0PinLsData, _ = os.ReadFile("testdata/api_v0_pin_ls.json")
+ apiv0StatsBwData, _ = os.ReadFile("testdata/api_v0_stats_bw.json")
+ apiv0StatsRepoData, _ = os.ReadFile("testdata/api_v0_stats_repo.json")
+ apiv0SwarmPeersData, _ = os.ReadFile("testdata/api_v0_swarm_peers.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "apiv0PinLsData": apiv0PinLsData,
+ "apiv0StatsBwData": apiv0StatsBwData,
+ "apiv0StatsRepoData": apiv0StatsRepoData,
+ "apiv0SwarmPeersData": apiv0SwarmPeersData,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestIPFS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &IPFS{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestIPFS_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ipfs := New()
+ ipfs.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, ipfs.Init())
+ } else {
+ assert.NoError(t, ipfs.Init())
+ }
+ })
+ }
+}
+
+func TestIPFS_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestIPFS_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (*IPFS, func())
+ }{
+ "success default config": {
+ wantFail: false,
+ prepare: prepareCaseOkDefault,
+ },
+ "success all queries enabled": {
+ wantFail: false,
+ prepare: prepareCaseOkDefault,
+ },
+ "fails on unexpected json response": {
+ wantFail: true,
+ prepare: prepareCaseUnexpectedJsonResponse,
+ },
+ "fails on invalid format response": {
+ wantFail: true,
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ipfs, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, ipfs.Check())
+ } else {
+ assert.NoError(t, ipfs.Check())
+ }
+ })
+ }
+}
+
+func TestIPFS_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (*IPFS, func())
+ wantMetrics map[string]int64
+ }{
+ "success default config": {
+ prepare: prepareCaseOkDefault,
+ wantMetrics: map[string]int64{
+ "in": 20113594,
+ "out": 3113852,
+ "peers": 6,
+ },
+ },
+ "success all queries enabled": {
+ prepare: prepareCaseOkAllQueriesEnabled,
+ wantMetrics: map[string]int64{
+ "in": 20113594,
+ "objects": 1,
+ "out": 3113852,
+ "peers": 6,
+ "pinned": 1,
+ "recursive_pins": 1,
+ "size": 25495,
+ "used_percent": 0,
+ },
+ },
+ "fails on unexpected json response": {
+ prepare: prepareCaseUnexpectedJsonResponse,
+ },
+ "fails on invalid format response": {
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ipfs, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := ipfs.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ testMetricsHasAllChartsDims(t, ipfs, mx)
+ }
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, ipfs *IPFS, mx map[string]int64) {
+ for _, chart := range *ipfs.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareCaseOkDefault(t *testing.T) (*IPFS, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathStatsBandwidth:
+ _, _ = w.Write(apiv0StatsBwData)
+ case urlPathStatsRepo:
+ _, _ = w.Write(apiv0StatsRepoData)
+ case urlPathSwarmPeers:
+ _, _ = w.Write(apiv0SwarmPeersData)
+ case urlPathPinLs:
+ _, _ = w.Write(apiv0PinLsData)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ ipfs := New()
+ ipfs.URL = srv.URL
+ require.NoError(t, ipfs.Init())
+
+ return ipfs, srv.Close
+}
+
+func prepareCaseOkAllQueriesEnabled(t *testing.T) (*IPFS, func()) {
+ t.Helper()
+ ipfs, cleanup := prepareCaseOkDefault(t)
+
+ ipfs.QueryRepoApi = true
+ ipfs.QueryPinApi = true
+
+ return ipfs, cleanup
+}
+
+func prepareCaseUnexpectedJsonResponse(t *testing.T) (*IPFS, func()) {
+ t.Helper()
+ resp := `
+{
+ "elephant": {
+ "burn": false,
+ "mountain": true,
+ "fog": false,
+ "skin": -1561907625,
+ "burst": "anyway",
+ "shadow": 1558616893
+ },
+ "start": "ever",
+ "base": 2093056027,
+ "mission": -2007590351,
+ "victory": 999053756,
+ "die": false
+}
+`
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(resp))
+ }))
+
+ ipfs := New()
+ ipfs.URL = srv.URL
+ require.NoError(t, ipfs.Init())
+
+ return ipfs, srv.Close
+}
+
+func prepareCaseInvalidFormatResponse(t *testing.T) (*IPFS, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ ipfs := New()
+ ipfs.URL = srv.URL
+ require.NoError(t, ipfs.Init())
+
+ return ipfs, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*IPFS, func()) {
+ t.Helper()
+ ipfs := New()
+ ipfs.URL = "http://127.0.0.1:65001"
+ require.NoError(t, ipfs.Init())
+
+ return ipfs, func() {}
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/metadata.yaml b/src/go/plugin/go.d/modules/ipfs/metadata.yaml
new file mode 100644
index 000000000..a37935785
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/metadata.yaml
@@ -0,0 +1,224 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-ipfs
+ plugin_name: go.d.plugin
+ module_name: ipfs
+ monitored_instance:
+ name: IPFS
+ link: "https://ipfs.tech/"
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ icon_filename: "ipfs.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - ipfs
+ - filesystem
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "This collector monitors IPFS daemon health and network activity."
+ method_description: |
+ It uses [RPC API](https://docs.ipfs.tech/reference/kubo/rpc/) to collect metrics.
+
+ Used endpoints:
+
+ - [/api/v0/stats/bw](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-bw)
+ - [/api/v0/swarm/peers](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-swarm-peers)
+ - [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-stats-repo)
+ - [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls)
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects IPFS instances running on localhost that are listening on port 5001.
+ limits:
+ description: ""
+ performance_impact:
+ description: |
+ Calls to the following endpoints are disabled by default due to IPFS bugs:
+
+ - /api/v0/stats/repo ([#7528](https://github.com/ipfs/go-ipfs/issues/7528)).
+ - /api/v0/pin/ls ([#3874](https://github.com/ipfs/go-ipfs/issues/3874)).
+
+ **Disabled by default** due to potential high CPU usage. Consider enabling only if necessary.
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/ipfs.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: repoapi
+ description: Enables querying the [/api/v0/stats/repo](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-repo-stat) endpoint for repository statistics.
+ default_value: false
+ required: false
+ - name: pinapi
+ description: Enables querying the [/api/v0/pin/ls](https://docs.ipfs.tech/reference/kubo/rpc/#api-v0-pin-ls) endpoint to retrieve a list of all pinned objects.
+ default_value: false
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:5001
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: POST
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:5001
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:5001
+
+ - name: remote
+ url: http://192.0.2.1:5001
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: ipfs_datastore_usage
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf
+ metric: ipfs.datastore_space_utilization
+ info: IPFS datastore utilization
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: ipfs.bandwidth
+ description: IPFS Bandwidth
+ unit: "bytes/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: ipfs.peers
+ description: IPFS Peers
+ unit: "peers"
+ chart_type: line
+ dimensions:
+ - name: peers
+ - name: ipfs.datastore_space_utilization
+ description: IPFS Datastore Space Utilization
+ unit: "percent"
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: ipfs.repo_size
+ description: IPFS Repo Size
+ unit: "bytes"
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: ipfs.repo_objects
+ description: IPFS Repo Objects
+ unit: "objects"
+ chart_type: line
+ dimensions:
+ - name: objects
+ - name: ipfs.repo_pinned_objects
+ description: IPFS Repo Pinned Objects
+ unit: "objects"
+ chart_type: line
+ dimensions:
+ - name: pinned
+ - name: recursive_pins
diff --git a/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_pin_ls.json b/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_pin_ls.json
new file mode 100644
index 000000000..b1d4d0192
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_pin_ls.json
@@ -0,0 +1,8 @@
+{
+ "Keys": {
+ "k1i2m3c4h5i6key": {
+ "Type": "recursive",
+ "Name": ""
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_stats_bw.json b/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_stats_bw.json
new file mode 100644
index 000000000..366cb8a2b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_stats_bw.json
@@ -0,0 +1,6 @@
+{
+ "TotalIn": 20113594,
+ "TotalOut": 3113852,
+ "RateIn": 1623.2181369394084,
+ "RateOut": 0.13743234792898051
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_stats_repo.json b/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_stats_repo.json
new file mode 100644
index 000000000..247fb29fa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_stats_repo.json
@@ -0,0 +1,7 @@
+{
+ "RepoSize": 25495,
+ "StorageMax": 10000000000,
+ "NumObjects": 1,
+ "RepoPath": "/home/fotis/.ipfs",
+ "Version": "fs-repo@15"
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_swarm_peers.json b/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_swarm_peers.json
new file mode 100644
index 000000000..8f8386e53
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/testdata/api_v0_swarm_peers.json
@@ -0,0 +1,70 @@
+{
+ "Peers": [
+ {
+ "Addr": "/ip4/1/tcp/27963",
+ "Peer": "a",
+ "Identify": {
+ "ID": "",
+ "PublicKey": "",
+ "Addresses": null,
+ "AgentVersion": "",
+ "Protocols": null
+ }
+ },
+ {
+ "Addr": "/ip4/1/udp/4001/quic-v1",
+ "Peer": "b",
+ "Identify": {
+ "ID": "",
+ "PublicKey": "",
+ "Addresses": null,
+ "AgentVersion": "",
+ "Protocols": null
+ }
+ },
+ {
+ "Addr": "/ip4/1/udp/4001/quic-v1/p2p/12D3KooWCqocoHdBANn2hH5acYAU4NdjEeBqERYk1MMTX49s1syY/p2p-circuit",
+ "Peer": "c",
+ "Identify": {
+ "ID": "",
+ "PublicKey": "",
+ "Addresses": null,
+ "AgentVersion": "",
+ "Protocols": null
+ }
+ },
+ {
+ "Addr": "/ip4/1/tcp/4001",
+ "Peer": "c",
+ "Identify": {
+ "ID": "",
+ "PublicKey": "",
+ "Addresses": null,
+ "AgentVersion": "",
+ "Protocols": null
+ }
+ },
+ {
+ "Addr": "/ip4/1/udp/33556/quic-v1",
+ "Peer": "e",
+ "Identify": {
+ "ID": "",
+ "PublicKey": "",
+ "Addresses": null,
+ "AgentVersion": "",
+ "Protocols": null
+ }
+ },
+ {
+ "Addr": "/ip6/1::1/udp/4001/quic-v1",
+ "Peer": "f",
+ "Identify": {
+ "ID": "",
+ "PublicKey": "",
+ "Addresses": null,
+ "AgentVersion": "",
+ "Protocols": null
+ }
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/testdata/config.json b/src/go/plugin/go.d/modules/ipfs/testdata/config.json
new file mode 100644
index 000000000..b99928ca6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/testdata/config.json
@@ -0,0 +1,22 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "pinapi": false,
+ "repoapi": false
+}
diff --git a/src/go/plugin/go.d/modules/ipfs/testdata/config.yaml b/src/go/plugin/go.d/modules/ipfs/testdata/config.yaml
new file mode 100644
index 000000000..271695e64
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ipfs/testdata/config.yaml
@@ -0,0 +1,19 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+pinapi: no
+repoapi: no
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/README.md b/src/go/plugin/go.d/modules/isc_dhcpd/README.md
new file mode 120000
index 000000000..3385a00a4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/README.md
@@ -0,0 +1 @@
+integrations/isc_dhcp.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/charts.go b/src/go/plugin/go.d/modules/isc_dhcpd/charts.go
new file mode 100644
index 000000000..a8b3581ea
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/charts.go
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package isc_dhcpd
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioLeasesTotal = module.Priority + iota
+
+ prioDHCPPoolUtilization
+ prioDHCPPoolActiveLeases
+)
+
+var activeLeasesTotalChart = module.Chart{
+ ID: "active_leases_total",
+ Title: "Active Leases Total",
+ Units: "leases",
+ Fam: "summary",
+ Ctx: "isc_dhcpd.active_leases_total",
+ Priority: prioLeasesTotal,
+ Dims: module.Dims{
+ {ID: "active_leases_total", Name: "active"},
+ },
+}
+
+var dhcpPoolChartsTmpl = module.Charts{
+ dhcpPoolActiveLeasesChartTmpl.Copy(),
+ dhcpPoolUtilizationChartTmpl.Copy(),
+}
+
+var (
+ dhcpPoolUtilizationChartTmpl = module.Chart{
+ ID: "dhcp_pool_%s_utilization",
+ Title: "DHCP Pool Utilization",
+ Units: "percent",
+ Fam: "pools",
+ Ctx: "isc_dhcpd.dhcp_pool_utilization",
+ Priority: prioDHCPPoolUtilization,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "dhcp_pool_%s_utilization", Name: "utilization"},
+ },
+ }
+ dhcpPoolActiveLeasesChartTmpl = module.Chart{
+ ID: "dhcp_pool_%s_active_leases",
+ Title: "DHCP Pool Active Leases",
+ Units: "leases",
+ Fam: "pools",
+ Ctx: "isc_dhcpd.dhcp_pool_active_leases",
+ Priority: prioDHCPPoolActiveLeases,
+ Dims: module.Dims{
+ {ID: "dhcp_pool_%s_active_leases", Name: "active"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/collect.go b/src/go/plugin/go.d/modules/isc_dhcpd/collect.go
new file mode 100644
index 000000000..08716a108
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/collect.go
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package isc_dhcpd
+
+import (
+ "os"
+)
+
+/*
+dhcpd.leases db (file), see details: https://kb.isc.org/docs/en/isc-dhcp-44-manual-pages-dhcpdleases#dhcpdleases
+
+Every time a lease is acquired, renewed or released, its new value is recorded at the end of the lease file.
+So if more than one declaration appears for a given lease, the last one in the file is the current one.
+
+In order to prevent the lease database from growing without bound, the file is rewritten from time to time.
+First, a temporary lease database is created and all known leases are dumped to it.
+Then, the old lease database is renamed DBDIR/dhcpd.leases~.
+Finally, the newly written lease database is moved into place.
+
+In order to process both DHCPv4 and DHCPv6 messages you will need to run two separate instances of the dhcpd process.
+Each of these instances will need its own lease file.
+*/
+
+func (d *DHCPd) collect() (map[string]int64, error) {
+ fi, err := os.Stat(d.LeasesPath)
+ if err != nil {
+ return nil, err
+ }
+
+ if d.leasesModTime.Equal(fi.ModTime()) {
+ d.Debugf("leases file is not modified, returning cached metrics ('%s')", d.LeasesPath)
+ return d.collected, nil
+ }
+
+ d.leasesModTime = fi.ModTime()
+
+ leases, err := parseDHCPdLeasesFile(d.LeasesPath)
+ if err != nil {
+ return nil, err
+ }
+
+ activeLeases := removeInactiveLeases(leases)
+ d.Debugf("found total/active %d/%d leases ('%s')", len(leases), len(activeLeases), d.LeasesPath)
+
+ for _, pool := range d.pools {
+ collectPool(d.collected, pool, activeLeases)
+ }
+ d.collected["active_leases_total"] = int64(len(activeLeases))
+
+ return d.collected, nil
+}
+
+const precision = 100
+
+func collectPool(collected map[string]int64, pool ipPool, leases []leaseEntry) {
+ n := calcPoolActiveLeases(pool, leases)
+ collected["dhcp_pool_"+pool.name+"_active_leases"] = n
+ collected["dhcp_pool_"+pool.name+"_utilization"] = int64(calcPoolUtilizationPercentage(pool, n) * precision)
+}
+
+func calcPoolActiveLeases(pool ipPool, leases []leaseEntry) (num int64) {
+ for _, l := range leases {
+ if pool.addresses.Contains(l.ip) {
+ num++
+ }
+ }
+ return num
+}
+
+func calcPoolUtilizationPercentage(pool ipPool, leases int64) float64 {
+ size := pool.addresses.Size()
+ if leases == 0 || !size.IsInt64() {
+ return 0
+ }
+ if size.Int64() == 0 {
+ return 100
+ }
+ return float64(leases) / float64(size.Int64()) * 100
+}
+
+func removeInactiveLeases(leases []leaseEntry) (active []leaseEntry) {
+ active = leases[:0]
+ for _, l := range leases {
+ if l.bindingState == "active" {
+ active = append(active, l)
+ }
+ }
+ return active
+}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/config_schema.json b/src/go/plugin/go.d/modules/isc_dhcpd/config_schema.json
new file mode 100644
index 000000000..a34e79c70
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/config_schema.json
@@ -0,0 +1,70 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ISC DHCP collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "leases_path": {
+ "title": "Leases file",
+ "description": "File path to the ISC DHCP server's lease database.",
+ "type": "string",
+ "default": "/var/lib/dhcp/dhcpd.leases",
+ "pattern": "^$|^/"
+ },
+ "pools": {
+ "title": "IP pools",
+ "description": "A list of IP pools to monitor. Each pool consists of a descriptive name and corresponding IP ranges.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "IP pool",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "name": {
+ "title": "Name",
+ "description": "A descriptive name for the IP pool.",
+ "type": "string"
+ },
+ "networks": {
+ "title": "Networks",
+ "description": "A space-separated list of [IP ranges](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/iprange#supported-formats) for the pool.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "networks"
+ ]
+ },
+ "minItems": 1,
+ "uniqueItems": true,
+ "additionalItems": false
+ }
+ },
+ "required": [
+ "leases_path",
+ "pools"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/init.go b/src/go/plugin/go.d/modules/isc_dhcpd/init.go
new file mode 100644
index 000000000..d103a223c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/init.go
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package isc_dhcpd
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/iprange"
+)
+
+type ipPool struct {
+ name string
+ addresses iprange.Pool
+}
+
+func (d *DHCPd) validateConfig() error {
+ if d.Config.LeasesPath == "" {
+ return errors.New("'lease_path' parameter not set")
+ }
+ if len(d.Config.Pools) == 0 {
+ return errors.New("'pools' parameter not set")
+ }
+ for i, cfg := range d.Config.Pools {
+ if cfg.Name == "" {
+ return fmt.Errorf("'pools[%d]->pool.name' parameter not set", i+1)
+ }
+ if cfg.Networks == "" {
+ return fmt.Errorf("'pools[%d]->pool.networks' parameter not set", i+1)
+ }
+ }
+ return nil
+}
+
+func (d *DHCPd) initPools() ([]ipPool, error) {
+ var pools []ipPool
+
+ for i, cfg := range d.Pools {
+ ipRange, err := iprange.ParseRanges(cfg.Networks)
+ if err != nil {
+ return nil, fmt.Errorf("parse pools[%d]->pool.networks '%s' ('%s'): %v", i+1, cfg.Name, cfg.Networks, err)
+ }
+ if len(ipRange) == 0 {
+ continue
+ }
+
+ pool := ipPool{name: cfg.Name, addresses: ipRange}
+ pools = append(pools, pool)
+ }
+
+ return pools, nil
+}
+
+func (d *DHCPd) initCharts(pools []ipPool) (*module.Charts, error) {
+ charts := &module.Charts{}
+
+ if err := charts.Add(activeLeasesTotalChart.Copy()); err != nil {
+ return nil, err
+ }
+
+ for _, pool := range pools {
+ poolCharts := dhcpPoolChartsTmpl.Copy()
+
+ for _, chart := range *poolCharts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanPoolNameForChart(pool.name))
+ chart.Labels = []module.Label{
+ {Key: "dhcp_pool_name", Value: pool.name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, pool.name)
+ }
+ }
+
+ if err := charts.Add(*poolCharts...); err != nil {
+ return nil, err
+ }
+ }
+
+ return charts, nil
+}
+
+func cleanPoolNameForChart(name string) string {
+ name = strings.ReplaceAll(name, " ", "_")
+ name = strings.ReplaceAll(name, ".", "_")
+ return name
+}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/integrations/isc_dhcp.md b/src/go/plugin/go.d/modules/isc_dhcpd/integrations/isc_dhcp.md
new file mode 100644
index 000000000..4607c1a5a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/integrations/isc_dhcp.md
@@ -0,0 +1,228 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/isc_dhcpd/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml"
+sidebar_label: "ISC DHCP"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ISC DHCP
+
+
+<img src="https://netdata.cloud/img/isc.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: isc_dhcpd
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per ISC DHCP instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| isc_dhcpd.active_leases_total | active | leases |
+
+### Per ISC DHCP instance
+
+These metrics refer to the DHCP pool.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| dhcp_pool_name | The DHCP pool name defined in the collector configuration. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| isc_dhcpd.dhcp_pool_utilization | utilization | percent |
+| isc_dhcpd.dhcp_pool_active_leases | active | leases |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/isc_dhcpd.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/isc_dhcpd.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| leases_path | Path to DHCP client lease database. | /var/lib/dhcp/dhcpd.leases | no |
+| pools | List of IP pools to monitor. | | yes |
+
+##### pools
+
+List of IP pools to monitor.
+
+- IP range syntax: see [supported formats](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/iprange#supported-formats).
+- Syntax:
+
+```yaml
+pools:
+ - name: "POOL_NAME1"
+ networks: "SPACE SEPARATED LIST OF IP RANGES"
+ - name: "POOL_NAME2"
+ networks: "SPACE SEPARATED LIST OF IP RANGES"
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ pools:
+ - name: lan
+ networks: "192.168.0.0/24 192.168.1.0/24 192.168.2.0/24"
+ - name: wifi
+ networks: "10.0.0.0/24"
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `isc_dhcpd` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m isc_dhcpd
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `isc_dhcpd` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep isc_dhcpd
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep isc_dhcpd /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep isc_dhcpd
+```
+
+
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd.go b/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd.go
new file mode 100644
index 000000000..1733cb221
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd.go
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package isc_dhcpd
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("isc_dhcpd", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 1,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *DHCPd {
+ return &DHCPd{
+ Config: Config{
+ LeasesPath: "/var/lib/dhcp/dhcpd.leases",
+ },
+
+ collected: make(map[string]int64),
+ }
+}
+
+type (
+ Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ LeasesPath string `yaml:"leases_path" json:"leases_path"`
+ // TODO: parse config file to extract configured pool
+ Pools []PoolConfig `yaml:"pools" json:"pools"`
+ }
+ PoolConfig struct {
+ Name string `yaml:"name" json:"name"`
+ Networks string `yaml:"networks" json:"networks"`
+ }
+)
+
+type DHCPd struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ pools []ipPool
+ leasesModTime time.Time
+ collected map[string]int64
+}
+
+func (d *DHCPd) Configuration() any {
+ return d.Config
+}
+
+func (d *DHCPd) Init() error {
+ err := d.validateConfig()
+ if err != nil {
+ d.Errorf("config validation: %v", err)
+ return err
+ }
+
+ pools, err := d.initPools()
+ if err != nil {
+ d.Errorf("ip pools init: %v", err)
+ return err
+ }
+ d.pools = pools
+
+ charts, err := d.initCharts(pools)
+ if err != nil {
+ d.Errorf("charts init: %v", err)
+ return err
+ }
+ d.charts = charts
+
+ d.Debugf("monitoring leases file: %v", d.Config.LeasesPath)
+ d.Debugf("monitoring ip pools: %v", d.Config.Pools)
+
+ return nil
+}
+
+func (d *DHCPd) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (d *DHCPd) Charts() *module.Charts {
+ return d.charts
+}
+
+func (d *DHCPd) Collect() map[string]int64 {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (d *DHCPd) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd_test.go b/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd_test.go
new file mode 100644
index 000000000..24540ea2f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/isc_dhcpd_test.go
@@ -0,0 +1,345 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package isc_dhcpd
+
+import (
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDHCPd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DHCPd{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestDHCPd_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestDHCPd_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default": {
+ wantFail: true,
+ config: New().Config,
+ },
+ "'leases_path' not set": {
+ wantFail: true,
+ config: Config{
+ LeasesPath: "",
+ Pools: []PoolConfig{
+ {Name: "test", Networks: "10.220.252.0/24"},
+ },
+ },
+ },
+ "'pools' not set": {
+ wantFail: true,
+ config: Config{
+ LeasesPath: "testdata/dhcpd.leases_ipv4",
+ },
+ },
+ "'pools->pool.networks' invalid syntax": {
+ wantFail: true,
+ config: Config{
+ LeasesPath: "testdata/dhcpd.leases_ipv4",
+ Pools: []PoolConfig{
+ {Name: "test", Networks: "10.220.252./24"},
+ },
+ }},
+ "ok config ('leases_path' and 'pools' are set)": {
+ config: Config{
+ LeasesPath: "testdata/dhcpd.leases_ipv4",
+ Pools: []PoolConfig{
+ {Name: "test", Networks: "10.220.252.0/24"},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dhcpd := New()
+ dhcpd.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, dhcpd.Init())
+ } else {
+ assert.NoError(t, dhcpd.Init())
+ }
+ })
+ }
+}
+
+func TestDHCPd_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *DHCPd
+ wantFail bool
+ }{
+ "lease db not exists": {prepare: prepareDHCPdLeasesNotExists, wantFail: true},
+ "lease db is an empty file": {prepare: prepareDHCPdLeasesEmpty},
+ "lease db ipv4": {prepare: prepareDHCPdLeasesIPv4},
+ "lease db ipv4 with only inactive leases": {prepare: prepareDHCPdLeasesIPv4Inactive},
+ "lease db ipv4 with backup leases": {prepare: prepareDHCPdLeasesIPv4Backup},
+ "lease db ipv6": {prepare: prepareDHCPdLeasesIPv6},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dhcpd := test.prepare()
+ require.NoError(t, dhcpd.Init())
+
+ if test.wantFail {
+ assert.Error(t, dhcpd.Check())
+ } else {
+ assert.NoError(t, dhcpd.Check())
+ }
+ })
+ }
+}
+
+func TestDHCPd_Charts(t *testing.T) {
+ dhcpd := New()
+ dhcpd.LeasesPath = "leases_path"
+ dhcpd.Pools = []PoolConfig{
+ {Name: "name", Networks: "192.0.2.0/24"},
+ }
+ require.NoError(t, dhcpd.Init())
+
+ assert.NotNil(t, dhcpd.Charts())
+}
+
+func TestDHCPd_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *DHCPd
+ wantCollected map[string]int64
+ }{
+ "lease db not exists": {
+ prepare: prepareDHCPdLeasesNotExists,
+ wantCollected: nil,
+ },
+ "lease db is an empty file": {
+ prepare: prepareDHCPdLeasesEmpty,
+ wantCollected: map[string]int64{
+ "active_leases_total": 0,
+ "dhcp_pool_net1_active_leases": 0,
+ "dhcp_pool_net1_utilization": 0,
+ "dhcp_pool_net2_active_leases": 0,
+ "dhcp_pool_net2_utilization": 0,
+ "dhcp_pool_net3_active_leases": 0,
+ "dhcp_pool_net3_utilization": 0,
+ "dhcp_pool_net4_active_leases": 0,
+ "dhcp_pool_net4_utilization": 0,
+ "dhcp_pool_net5_active_leases": 0,
+ "dhcp_pool_net5_utilization": 0,
+ "dhcp_pool_net6_active_leases": 0,
+ "dhcp_pool_net6_utilization": 0,
+ },
+ },
+ "lease db ipv4": {
+ prepare: prepareDHCPdLeasesIPv4,
+ wantCollected: map[string]int64{
+ "active_leases_total": 5,
+ "dhcp_pool_net1_active_leases": 2,
+ "dhcp_pool_net1_utilization": 158,
+ "dhcp_pool_net2_active_leases": 1,
+ "dhcp_pool_net2_utilization": 39,
+ "dhcp_pool_net3_active_leases": 0,
+ "dhcp_pool_net3_utilization": 0,
+ "dhcp_pool_net4_active_leases": 1,
+ "dhcp_pool_net4_utilization": 79,
+ "dhcp_pool_net5_active_leases": 0,
+ "dhcp_pool_net5_utilization": 0,
+ "dhcp_pool_net6_active_leases": 1,
+ "dhcp_pool_net6_utilization": 39,
+ },
+ },
+ "lease db ipv4 with only inactive leases": {
+ prepare: prepareDHCPdLeasesIPv4Inactive,
+ wantCollected: map[string]int64{
+ "active_leases_total": 0,
+ "dhcp_pool_net1_active_leases": 0,
+ "dhcp_pool_net1_utilization": 0,
+ "dhcp_pool_net2_active_leases": 0,
+ "dhcp_pool_net2_utilization": 0,
+ "dhcp_pool_net3_active_leases": 0,
+ "dhcp_pool_net3_utilization": 0,
+ "dhcp_pool_net4_active_leases": 0,
+ "dhcp_pool_net4_utilization": 0,
+ "dhcp_pool_net5_active_leases": 0,
+ "dhcp_pool_net5_utilization": 0,
+ "dhcp_pool_net6_active_leases": 0,
+ "dhcp_pool_net6_utilization": 0,
+ },
+ },
+ "lease db ipv4 with backup leases": {
+ prepare: prepareDHCPdLeasesIPv4Backup,
+ wantCollected: map[string]int64{
+ "active_leases_total": 2,
+ "dhcp_pool_net1_active_leases": 1,
+ "dhcp_pool_net1_utilization": 79,
+ "dhcp_pool_net2_active_leases": 0,
+ "dhcp_pool_net2_utilization": 0,
+ "dhcp_pool_net3_active_leases": 0,
+ "dhcp_pool_net3_utilization": 0,
+ "dhcp_pool_net4_active_leases": 1,
+ "dhcp_pool_net4_utilization": 79,
+ "dhcp_pool_net5_active_leases": 0,
+ "dhcp_pool_net5_utilization": 0,
+ "dhcp_pool_net6_active_leases": 0,
+ "dhcp_pool_net6_utilization": 0,
+ },
+ },
+ "lease db ipv6": {
+ prepare: prepareDHCPdLeasesIPv6,
+ wantCollected: map[string]int64{
+ "active_leases_total": 6,
+ "dhcp_pool_net1_active_leases": 6,
+ "dhcp_pool_net1_utilization": 5454,
+ "dhcp_pool_net2_active_leases": 0,
+ "dhcp_pool_net2_utilization": 0,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ dhcpd := test.prepare()
+ require.NoError(t, dhcpd.Init())
+
+ collected := dhcpd.Collect()
+
+ assert.Equal(t, test.wantCollected, collected)
+ if len(collected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, dhcpd, collected)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, dhcpd *DHCPd, collected map[string]int64) {
+ for _, chart := range *dhcpd.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareDHCPdLeasesNotExists() *DHCPd {
+ dhcpd := New()
+ dhcpd.Config = Config{
+ LeasesPath: "testdata/dhcpd.leases_not_exists",
+ Pools: []PoolConfig{
+ {Name: "net1", Networks: "192.168.3.0/25"},
+ {Name: "net2", Networks: "10.254.251.0/24"},
+ {Name: "net3", Networks: "10.254.252.0/24"},
+ {Name: "net4", Networks: "10.254.253.0/25"},
+ {Name: "net5", Networks: "10.254.254.0/25"},
+ {Name: "net6", Networks: "10.254.255.0/24"},
+ },
+ }
+ return dhcpd
+}
+
+func prepareDHCPdLeasesEmpty() *DHCPd {
+ dhcpd := New()
+ dhcpd.Config = Config{
+ LeasesPath: "testdata/dhcpd.leases_empty",
+ Pools: []PoolConfig{
+ {Name: "net1", Networks: "192.168.3.0/25"},
+ {Name: "net2", Networks: "10.254.251.0/24"},
+ {Name: "net3", Networks: "10.254.252.0/24"},
+ {Name: "net4", Networks: "10.254.253.0/25"},
+ {Name: "net5", Networks: "10.254.254.0/25"},
+ {Name: "net6", Networks: "10.254.255.0/24"},
+ },
+ }
+ return dhcpd
+}
+
+func prepareDHCPdLeasesIPv4() *DHCPd {
+ dhcpd := New()
+ dhcpd.Config = Config{
+ LeasesPath: "testdata/dhcpd.leases_ipv4",
+ Pools: []PoolConfig{
+ {Name: "net1", Networks: "192.168.3.0/25"},
+ {Name: "net2", Networks: "10.254.251.0/24"},
+ {Name: "net3", Networks: "10.254.252.0/24"},
+ {Name: "net4", Networks: "10.254.253.0/25"},
+ {Name: "net5", Networks: "10.254.254.0/25"},
+ {Name: "net6", Networks: "10.254.255.0/24"},
+ },
+ }
+ return dhcpd
+}
+
+func prepareDHCPdLeasesIPv4Backup() *DHCPd {
+ dhcpd := New()
+ dhcpd.Config = Config{
+ LeasesPath: "testdata/dhcpd.leases_ipv4_backup",
+ Pools: []PoolConfig{
+ {Name: "net1", Networks: "192.168.3.0/25"},
+ {Name: "net2", Networks: "10.254.251.0/24"},
+ {Name: "net3", Networks: "10.254.252.0/24"},
+ {Name: "net4", Networks: "10.254.253.0/25"},
+ {Name: "net5", Networks: "10.254.254.0/25"},
+ {Name: "net6", Networks: "10.254.255.0/24"},
+ },
+ }
+ return dhcpd
+}
+
+func prepareDHCPdLeasesIPv4Inactive() *DHCPd {
+ dhcpd := New()
+ dhcpd.Config = Config{
+ LeasesPath: "testdata/dhcpd.leases_ipv4_inactive",
+ Pools: []PoolConfig{
+ {Name: "net1", Networks: "192.168.3.0/25"},
+ {Name: "net2", Networks: "10.254.251.0/24"},
+ {Name: "net3", Networks: "10.254.252.0/24"},
+ {Name: "net4", Networks: "10.254.253.0/25"},
+ {Name: "net5", Networks: "10.254.254.0/25"},
+ {Name: "net6", Networks: "10.254.255.0/24"},
+ },
+ }
+ return dhcpd
+}
+
+func prepareDHCPdLeasesIPv6() *DHCPd {
+ dhcpd := New()
+ dhcpd.Config = Config{
+ LeasesPath: "testdata/dhcpd.leases_ipv6",
+ Pools: []PoolConfig{
+ {Name: "net1", Networks: "2001:db8::-2001:db8::a"},
+ {Name: "net2", Networks: "2001:db8:0:1::-2001:db8:0:1::a"},
+ },
+ }
+ return dhcpd
+}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml b/src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml
new file mode 100644
index 000000000..09eee81d0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/metadata.yaml
@@ -0,0 +1,135 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-isc_dhcpd
+ plugin_name: go.d.plugin
+ module_name: isc_dhcpd
+ monitored_instance:
+ name: ISC DHCP
+ link: https://www.isc.org/dhcp/
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ icon_filename: isc.png
+ keywords:
+ - dhcpd
+ - dhcp
+ most_popular: false
+ info_provided_to_referring_integrations:
+ description: ""
+ related_resources:
+ integrations:
+ list: []
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors ISC DHCP lease usage by reading the DHCP client lease database (dhcpd.leases).
+ method_description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/isc_dhcpd.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: leases_path
+ description: Path to DHCP client lease database.
+ default_value: /var/lib/dhcp/dhcpd.leases
+ required: false
+ - name: pools
+ description: List of IP pools to monitor.
+ default_value: ""
+ required: true
+ detailed_description: |
+ List of IP pools to monitor.
+
+ - IP range syntax: see [supported formats](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/iprange#supported-formats).
+ - Syntax:
+
+ ```yaml
+ pools:
+ - name: "POOL_NAME1"
+ networks: "SPACE SEPARATED LIST OF IP RANGES"
+ - name: "POOL_NAME2"
+ networks: "SPACE SEPARATED LIST OF IP RANGES"
+ ```
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ pools:
+ - name: lan
+ networks: "192.168.0.0/24 192.168.1.0/24 192.168.2.0/24"
+ - name: wifi
+ networks: "10.0.0.0/24"
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: isc_dhcpd.active_leases_total
+ description: Active Leases Total
+ unit: leases
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: global
+ description: These metrics refer to the DHCP pool.
+ labels:
+ - name: dhcp_pool_name
+ description: The DHCP pool name defined in the collector configuration.
+ metrics:
+ - name: isc_dhcpd.dhcp_pool_utilization
+ description: DHCP Pool Utilization
+ unit: percent
+ chart_type: area
+ dimensions:
+ - name: utilization
+ - name: isc_dhcpd.dhcp_pool_active_leases
+ description: Active Leases Total
+ unit: leases
+ chart_type: line
+ dimensions:
+ - name: active
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/parse.go b/src/go/plugin/go.d/modules/isc_dhcpd/parse.go
new file mode 100644
index 000000000..cb4161745
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/parse.go
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package isc_dhcpd
+
+import (
+ "bufio"
+ "bytes"
+ "net"
+ "os"
+)
+
+/*
+Documentation (v4.4): https://kb.isc.org/docs/en/isc-dhcp-44-manual-pages-dhcpdleases
+
+DHCPv4 prepare declaration:
+ prepare ip-address {
+ statements...
+ }
+
+DHCPv6 prepare declaration:
+ ia_ta IAID_DUID {
+ cltt date;
+ iaaddr ipv6-address {
+ statements...
+ }
+ }
+ ia_na IAID_DUID {
+ cltt date;
+ iaaddr ipv6-address {
+ statements...
+ }
+ }
+ ia_pd IAID_DUID {
+ cltt date;
+ iaprefix ipv6-address/prefix-length {
+ statements...
+ }
+ }
+*/
+
+type leaseEntry struct {
+ ip net.IP
+ bindingState string
+}
+
+func (l leaseEntry) hasIP() bool { return l.ip != nil }
+func (l leaseEntry) hasBindingState() bool { return l.bindingState != "" }
+
+func parseDHCPdLeasesFile(filepath string) ([]leaseEntry, error) {
+ f, err := os.Open(filepath)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = f.Close() }()
+
+ leasesSet := make(map[string]leaseEntry)
+ l := leaseEntry{}
+ sc := bufio.NewScanner(f)
+
+ for sc.Scan() {
+ bs := bytes.TrimSpace(sc.Bytes())
+ switch {
+ case !l.hasIP() && bytes.HasPrefix(bs, []byte("lease")):
+ // "lease 192.168.0.1 {" => "192.168.0.1"
+ s := string(bs)
+ l.ip = net.ParseIP(s[6 : len(s)-2])
+ case !l.hasIP() && bytes.HasPrefix(bs, []byte("iaaddr")):
+ // "iaaddr 1985:470:1f0b:c9a::001 {" => "1985:470:1f0b:c9a::001"
+ s := string(bs)
+ l.ip = net.ParseIP(s[7 : len(s)-2])
+ case l.hasIP() && !l.hasBindingState() && bytes.HasPrefix(bs, []byte("binding state")):
+ // "binding state active;" => "active"
+ s := string(bs)
+ l.bindingState = s[14 : len(s)-1]
+ case bytes.HasPrefix(bs, []byte("}")):
+ if l.hasIP() && l.hasBindingState() {
+ leasesSet[l.ip.String()] = l
+ }
+ l = leaseEntry{}
+ }
+ }
+
+ if len(leasesSet) == 0 {
+ return nil, nil
+ }
+
+ leases := make([]leaseEntry, 0, len(leasesSet))
+ for _, l := range leasesSet {
+ leases = append(leases, l)
+ }
+ return leases, nil
+}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/testdata/config.json b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/config.json
new file mode 100644
index 000000000..945f8865e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/config.json
@@ -0,0 +1,10 @@
+{
+ "update_every": 123,
+ "leases_path": "ok",
+ "pools": [
+ {
+ "name": "ok",
+ "networks": "ok"
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/testdata/config.yaml b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/config.yaml
new file mode 100644
index 000000000..a33defc55
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+leases_path: "ok"
+pools:
+ - name: "ok"
+ networks: "ok"
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_empty b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_empty
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_empty
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4 b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4
new file mode 100644
index 000000000..08e0e3f20
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4
@@ -0,0 +1,370 @@
+# The format of this file is documented in the dhcpd.leases(5) manual page.
+# This lease file was written by isc-dhcp-4.3.1
+
+lease 10.254.252.2 {
+ starts 3 2014/07/23 07:32:16;
+ ends 3 2014/07/23 09:12:16;
+ tstp 3 2014/07/23 09:12:16;
+ cltt 3 2014/07/23 07:32:16;
+ binding state free;
+ hardware ethernet f0:de:f1:89:24:1f;
+ uid "\001\360\336\361\211$\037";
+}
+lease 10.254.252.3 {
+ starts 5 2014/11/28 05:49:01;
+ ends 5 2014/11/28 07:29:01;
+ tstp 5 2014/11/28 07:29:01;
+ cltt 5 2014/11/28 05:49:01;
+ binding state free;
+ hardware ethernet c0:4a:00:00:f5:fa;
+ uid "\001\300J\000\000\365\372";
+}
+lease 10.254.252.4 {
+ starts 5 2016/03/11 01:03:59;
+ ends 5 2016/03/11 02:33:20;
+ tstp 5 2016/03/11 02:33:20;
+ cltt 5 2016/03/11 01:12:33;
+ binding state free;
+ hardware ethernet 00:1c:c0:7a:38:3f;
+ uid "\001\000\034\300z8?";
+ set vendor-class-identifier = "MSFT 5.0";
+}
+lease 10.254.252.5 {
+ starts 1 2016/09/05 23:53:19;
+ ends 2 2016/09/06 01:33:19;
+ tstp 2 2016/09/06 01:33:19;
+ cltt 1 2016/09/05 23:53:19;
+ binding state free;
+ hardware ethernet 28:28:5d:65:30:ef;
+ uid "\001((]e0\357";
+}
+lease 10.254.252.6 {
+ starts 4 2016/09/29 01:41:23;
+ ends 4 2016/09/29 03:21:23;
+ tstp 4 2016/09/29 03:21:23;
+ cltt 4 2016/09/29 01:41:23;
+ binding state free;
+ hardware ethernet 04:bf:6d:94:1b:0d;
+ uid "\001\004\277m\224\033\015";
+}
+lease 10.254.252.7 {
+ starts 1 2016/10/03 08:23:14;
+ ends 1 2016/10/03 10:03:14;
+ tstp 1 2016/10/03 10:03:14;
+ cltt 1 2016/10/03 08:23:14;
+ binding state free;
+ hardware ethernet ec:22:80:f7:3f:44;
+ uid "\001\354\"\200\367?D";
+}
+lease 10.254.252.8 {
+ starts 5 2016/10/07 05:43:11;
+ ends 5 2016/10/07 05:58:31;
+ tstp 5 2016/10/07 05:58:31;
+ cltt 5 2016/10/07 05:43:11;
+ binding state free;
+ hardware ethernet 70:62:b8:bf:b5:b3;
+ uid "\001pb\270\277\265\263";
+}
+lease 192.168.3.15 {
+ starts 2 2019/01/08 06:29:58;
+ ends 2 2019/01/08 08:09:58;
+ tstp 2 2019/01/08 08:09:58;
+ cltt 2 2019/01/08 06:29:58;
+ binding state free;
+ hardware ethernet a8:f9:4b:20:99:9c;
+ uid "\001\250\371K \231\234";
+}
+lease 192.168.3.18 {
+ starts 2 2020/03/10 01:46:07;
+ ends 2 2020/03/10 03:22:21;
+ tstp 2 2020/03/10 03:22:21;
+ cltt 2 2020/03/10 01:46:08;
+ binding state free;
+ hardware ethernet 04:bf:6d:0d:e2:35;
+ uid "\001\004\277m\015\3425";
+ set vendor-class-identifier = "ndhcpc";
+}
+lease 192.168.3.11 {
+ starts 6 2020/10/03 07:52:36;
+ ends 6 2020/10/03 09:32:36;
+ cltt 6 2020/10/03 07:52:36;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 60:a4:4c:3f:6e:78;
+ uid "\001`\244L?nx";
+}
+lease 192.168.3.10 {
+ starts 6 2020/10/03 08:18:50;
+ ends 6 2020/10/03 09:58:50;
+ cltt 6 2020/10/03 08:18:50;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 70:62:b8:bf:b5:b3;
+ uid "\001pb\270\277\265\263";
+ set vendor-class-identifier = "dslforum.org";
+}
+lease 10.254.251.101 {
+ starts 0 2017/03/12 22:11:59;
+ ends 0 2017/03/12 23:51:58;
+ tstp 0 2017/03/12 23:51:58;
+ cltt 0 2017/03/12 22:11:59;
+ binding state free;
+ hardware ethernet b4:ce:f6:01:83:73;
+ set vendor-class-identifier = "dhcpcd-5.5.6";
+}
+lease 10.254.251.102 {
+ starts 5 2017/05/19 06:07:39;
+ ends 5 2017/05/19 07:47:39;
+ tstp 5 2017/05/19 07:47:39;
+ cltt 5 2017/05/19 06:07:39;
+ binding state free;
+ hardware ethernet 34:51:c9:4c:40:c9;
+ uid "\0014Q\311L@\311";
+}
+lease 10.254.251.103 {
+ starts 2 2018/04/24 13:18:00;
+ ends 2 2018/04/24 14:58:00;
+ tstp 2 2018/04/24 14:58:00;
+ cltt 2 2018/04/24 13:18:00;
+ binding state free;
+ hardware ethernet 70:8a:09:da:74:d0;
+ set vendor-class-identifier = "dhcpcd-5.5.6";
+}
+lease 10.254.251.104 {
+ starts 2 2018/04/24 12:54:27;
+ ends 3 2018/04/25 06:47:20;
+ tstp 3 2018/04/25 06:47:20;
+ cltt 2 2018/04/24 12:54:28;
+ binding state free;
+ hardware ethernet 78:a3:e4:e8:12:1f;
+ uid "\001x\243\344\350\022\037";
+}
+lease 10.254.251.100 {
+ starts 6 2020/10/03 07:58:45;
+ ends 6 2020/10/03 09:38:45;
+ cltt 6 2020/10/03 07:58:45;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 74:ea:3a:a6:a9:c7;
+ uid "\001t\352:\246\251\307";
+ set vendor-class-identifier = "MSFT 5.0";
+ client-hostname "TL-WR741N";
+}
+lease 10.254.255.104 {
+ starts 1 2017/07/10 09:35:24;
+ ends 1 2017/07/10 09:37:24;
+ tstp 1 2017/07/10 09:37:24;
+ cltt 1 2017/07/10 09:35:24;
+ binding state free;
+ hardware ethernet 50:85:69:11:b6:ff;
+ uid "\001P\205i\021\266\377";
+}
+lease 10.254.255.102 {
+ starts 3 2017/08/16 22:01:09;
+ ends 3 2017/08/16 23:41:09;
+ tstp 3 2017/08/16 23:41:09;
+ cltt 3 2017/08/16 22:01:09;
+ binding state free;
+ hardware ethernet c8:d3:a3:54:31:3a;
+ uid "\001\310\323\243T1:";
+}
+lease 10.254.255.103 {
+ starts 0 2018/12/16 00:54:07;
+ ends 0 2018/12/16 02:34:07;
+ tstp 0 2018/12/16 02:34:07;
+ cltt 0 2018/12/16 00:54:07;
+ binding state free;
+ hardware ethernet 08:c6:b3:01:e8:18;
+ uid "\001\010\306\263\001\350\030";
+ set vendor-class-identifier = "QTCH-QBR1041WUV2";
+}
+lease 10.254.255.100 {
+ starts 2 2018/12/18 09:21:24;
+ ends 2 2018/12/18 10:32:36;
+ tstp 2 2018/12/18 10:32:36;
+ cltt 2 2018/12/18 09:21:30;
+ binding state free;
+ hardware ethernet 70:62:b8:c3:51:a3;
+ uid "\001pb\270\303Q\243";
+}
+lease 10.254.255.105 {
+ starts 5 2019/03/22 07:42:55;
+ ends 5 2019/03/22 09:22:55;
+ tstp 5 2019/03/22 09:22:55;
+ cltt 5 2019/03/22 07:42:55;
+ binding state free;
+ hardware ethernet 58:d5:6e:95:88:30;
+ uid "\001X\325n\225\2100";
+ set vendor-class-identifier = "dslforum.org";
+}
+lease 10.254.255.101 {
+ starts 6 2020/10/03 07:29:24;
+ ends 6 2020/10/03 09:09:24;
+ cltt 6 2020/10/03 07:29:24;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 28:3b:82:58:f4:58;
+ uid "\001(;\202X\364X";
+ set vendor-class-identifier = "dslforum.org";
+}
+lease 10.254.253.104 {
+ starts 4 2018/03/15 12:01:12;
+ ends 4 2018/03/15 12:34:35;
+ tstp 4 2018/03/15 12:34:35;
+ cltt 4 2018/03/15 12:02:58;
+ binding state free;
+ hardware ethernet 50:64:2b:4f:fd:3d;
+ uid "\001Pd+O\375=";
+ set vendor-class-identifier = "udhcp 1.19.4";
+}
+lease 10.254.253.105 {
+ starts 4 2018/03/15 12:39:46;
+ ends 4 2018/03/15 14:17:39;
+ tstp 4 2018/03/15 14:17:39;
+ cltt 4 2018/03/15 12:39:47;
+ binding state free;
+ hardware ethernet 50:64:2b:4f:fd:3d;
+ set vendor-class-identifier = "udhcp 1.19.4";
+}
+lease 10.254.253.101 {
+ starts 5 2018/03/16 11:00:43;
+ ends 5 2018/03/16 12:40:15;
+ tstp 5 2018/03/16 12:40:15;
+ cltt 5 2018/03/16 11:00:43;
+ binding state free;
+ hardware ethernet d0:66:7b:8b:e5:ff;
+ uid "\001\320f{\213\345\377";
+ set vendor-class-identifier = "udhcp 1.14.3-VD Linux VDLinux.1.2.1.x";
+}
+lease 10.254.253.102 {
+ starts 5 2018/03/16 11:26:21;
+ ends 5 2018/03/16 13:06:21;
+ tstp 5 2018/03/16 13:06:21;
+ cltt 5 2018/03/16 11:26:21;
+ binding state free;
+ hardware ethernet 50:64:2b:4f:fd:3f;
+ uid "\001Pd+O\375?";
+}
+lease 10.254.253.100 {
+ starts 2 2018/08/21 05:48:43;
+ ends 2 2018/08/21 07:23:13;
+ tstp 2 2018/08/21 07:23:13;
+ cltt 2 2018/08/21 05:48:44;
+ binding state free;
+ hardware ethernet 20:cf:30:ef:8e:a4;
+ uid "\001 \3170\357\216\244";
+ set vendor-class-identifier = "udhcp 0.9.8-asus";
+}
+lease 10.254.253.103 {
+ starts 6 2020/10/03 08:07:02;
+ ends 6 2020/10/03 09:47:02;
+ cltt 6 2020/10/03 08:07:02;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 34:ce:00:03:08:57;
+ uid "\0014\316\000\003\010W";
+ set vendor-class-identifier = "udhcp 1.24.2";
+}
+lease 10.254.254.103 {
+ starts 3 2015/11/11 09:03:11;
+ ends 3 2015/11/11 09:05:11;
+ tstp 3 2015/11/11 09:05:11;
+ cltt 3 2015/11/11 09:03:11;
+ binding state free;
+ hardware ethernet 74:d0:2b:0e:9b:d6;
+}
+lease 10.254.254.104 {
+ starts 0 2017/12/03 15:57:29;
+ ends 0 2017/12/03 17:37:29;
+ tstp 0 2017/12/03 17:37:29;
+ cltt 0 2017/12/03 15:57:29;
+ binding state free;
+ hardware ethernet ac:22:0b:78:00:78;
+ uid "\377\3139\012\307\000\002\000\000\253\021(CC\252e\021\000\017";
+}
+lease 10.254.254.105 {
+ starts 2 2018/06/26 12:30:04;
+ ends 2 2018/06/26 13:09:10;
+ tstp 2 2018/06/26 13:09:10;
+ cltt 2 2018/06/26 12:30:04;
+ binding state free;
+ hardware ethernet cc:2d:e0:3f:bc:5c;
+ uid "\001\314-\340?\274\\";
+}
+lease 10.254.254.101 {
+ starts 3 2018/07/25 09:33:10;
+ ends 3 2018/07/25 11:13:10;
+ tstp 3 2018/07/25 11:13:10;
+ cltt 3 2018/07/25 09:33:10;
+ binding state free;
+ hardware ethernet 74:d0:2b:0e:9b:d6;
+ uid "\001t\320+\016\233\326";
+ set vendor-class-identifier = "MSFT 5.0";
+}
+lease 10.254.254.100 {
+ starts 2 2020/09/22 11:19:29;
+ ends 2 2020/09/22 11:21:29;
+ cltt 2 2020/09/22 11:19:29;
+ binding state free;
+ hardware ethernet 30:45:96:6a:f3:de;
+ uid "\0010E\226j\363\336";
+ client-hostname "Honor_7C-bb23201389a3c44";
+}
+lease 10.254.254.102 {
+ starts 2 2020/09/22 11:25:14;
+ ends 2 2020/09/22 11:27:14;
+ cltt 2 2020/09/22 11:25:14;
+ binding state free;
+ hardware ethernet c8:3d:dc:be:d2:cf;
+ uid "\001\310=\334\276\322\317";
+ client-hostname "Redmi7A-Redmi";
+}
+lease 10.254.255.101 {
+ starts 6 2020/10/03 08:19:24;
+ ends 6 2020/10/03 09:59:24;
+ cltt 6 2020/10/03 08:19:24;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 28:3b:82:58:f4:58;
+ uid "\001(;\202X\364X";
+ set vendor-class-identifier = "dslforum.org";
+}
+lease 10.254.251.100 {
+ starts 6 2020/10/03 08:48:45;
+ ends 6 2020/10/03 10:28:45;
+ cltt 6 2020/10/03 08:48:45;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 74:ea:3a:a6:a9:c7;
+ uid "\001t\352:\246\251\307";
+ set vendor-class-identifier = "MSFT 5.0";
+ client-hostname "TL-WR741N";
+}
+lease 10.254.253.103 {
+ starts 6 2020/10/03 08:57:02;
+ ends 6 2020/10/03 10:37:02;
+ cltt 6 2020/10/03 08:57:02;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 34:ce:00:03:08:57;
+ uid "\0014\316\000\003\010W";
+ set vendor-class-identifier = "udhcp 1.24.2";
+}
+lease 192.168.3.11 {
+ starts 6 2020/10/03 09:01:22;
+ ends 6 2020/10/03 10:41:22;
+ cltt 6 2020/10/03 09:01:22;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 60:a4:4c:3f:6e:78;
+ uid "\001`\244L?nx";
+}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_backup b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_backup
new file mode 100644
index 000000000..e822ca846
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_backup
@@ -0,0 +1,39 @@
+# The format of this file is documented in the dhcpd.leases(5) manual page.
+# This lease file was written by isc-dhcp-4.4.2
+
+# authoring-byte-order entry is generated, DO NOT DELETE
+authoring-byte-order little-endian;
+
+lease 10.254.253.103 {
+ starts 6 2020/10/03 08:57:02;
+ ends 6 2020/10/03 10:37:02;
+ cltt 6 2020/10/03 08:57:02;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 34:ce:00:03:08:57;
+ uid "\0014\316\000\003\010W";
+ set vendor-class-identifier = "udhcp 1.24.2";
+}
+lease 192.168.3.1 {
+ starts 6 2018/02/17 01:13:21;
+ tsfp 6 2018/02/17 01:13:21;
+ atsfp 6 2018/02/17 01:13:21;
+ binding state backup;
+}
+lease 192.168.3.11 {
+ starts 6 2020/10/03 09:01:22;
+ ends 6 2020/10/03 10:41:22;
+ cltt 6 2020/10/03 09:01:22;
+ binding state active;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 60:a4:4c:3f:6e:78;
+ uid "\001`\244L?nx";
+}
+lease 192.168.3.2 {
+ starts 6 2018/02/17 01:13:21;
+ tsfp 6 2018/02/17 01:13:21;
+ atsfp 6 2018/02/17 01:13:21;
+ binding state backup;
+}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_inactive b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_inactive
new file mode 100644
index 000000000..c5aed080f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv4_inactive
@@ -0,0 +1,370 @@
+# The format of this file is documented in the dhcpd.leases(5) manual page.
+# This lease file was written by isc-dhcp-4.3.1
+
+lease 10.254.252.2 {
+ starts 3 2014/07/23 07:32:16;
+ ends 3 2014/07/23 09:12:16;
+ tstp 3 2014/07/23 09:12:16;
+ cltt 3 2014/07/23 07:32:16;
+ binding state free;
+ hardware ethernet f0:de:f1:89:24:1f;
+ uid "\001\360\336\361\211$\037";
+}
+lease 10.254.252.3 {
+ starts 5 2014/11/28 05:49:01;
+ ends 5 2014/11/28 07:29:01;
+ tstp 5 2014/11/28 07:29:01;
+ cltt 5 2014/11/28 05:49:01;
+ binding state free;
+ hardware ethernet c0:4a:00:00:f5:fa;
+ uid "\001\300J\000\000\365\372";
+}
+lease 10.254.252.4 {
+ starts 5 2016/03/11 01:03:59;
+ ends 5 2016/03/11 02:33:20;
+ tstp 5 2016/03/11 02:33:20;
+ cltt 5 2016/03/11 01:12:33;
+ binding state free;
+ hardware ethernet 00:1c:c0:7a:38:3f;
+ uid "\001\000\034\300z8?";
+ set vendor-class-identifier = "MSFT 5.0";
+}
+lease 10.254.252.5 {
+ starts 1 2016/09/05 23:53:19;
+ ends 2 2016/09/06 01:33:19;
+ tstp 2 2016/09/06 01:33:19;
+ cltt 1 2016/09/05 23:53:19;
+ binding state free;
+ hardware ethernet 28:28:5d:65:30:ef;
+ uid "\001((]e0\357";
+}
+lease 10.254.252.6 {
+ starts 4 2016/09/29 01:41:23;
+ ends 4 2016/09/29 03:21:23;
+ tstp 4 2016/09/29 03:21:23;
+ cltt 4 2016/09/29 01:41:23;
+ binding state free;
+ hardware ethernet 04:bf:6d:94:1b:0d;
+ uid "\001\004\277m\224\033\015";
+}
+lease 10.254.252.7 {
+ starts 1 2016/10/03 08:23:14;
+ ends 1 2016/10/03 10:03:14;
+ tstp 1 2016/10/03 10:03:14;
+ cltt 1 2016/10/03 08:23:14;
+ binding state free;
+ hardware ethernet ec:22:80:f7:3f:44;
+ uid "\001\354\"\200\367?D";
+}
+lease 10.254.252.8 {
+ starts 5 2016/10/07 05:43:11;
+ ends 5 2016/10/07 05:58:31;
+ tstp 5 2016/10/07 05:58:31;
+ cltt 5 2016/10/07 05:43:11;
+ binding state free;
+ hardware ethernet 70:62:b8:bf:b5:b3;
+ uid "\001pb\270\277\265\263";
+}
+lease 192.168.3.15 {
+ starts 2 2019/01/08 06:29:58;
+ ends 2 2019/01/08 08:09:58;
+ tstp 2 2019/01/08 08:09:58;
+ cltt 2 2019/01/08 06:29:58;
+ binding state free;
+ hardware ethernet a8:f9:4b:20:99:9c;
+ uid "\001\250\371K \231\234";
+}
+lease 192.168.3.18 {
+ starts 2 2020/03/10 01:46:07;
+ ends 2 2020/03/10 03:22:21;
+ tstp 2 2020/03/10 03:22:21;
+ cltt 2 2020/03/10 01:46:08;
+ binding state free;
+ hardware ethernet 04:bf:6d:0d:e2:35;
+ uid "\001\004\277m\015\3425";
+ set vendor-class-identifier = "ndhcpc";
+}
+lease 192.168.3.11 {
+ starts 6 2020/10/03 07:52:36;
+ ends 6 2020/10/03 09:32:36;
+ cltt 6 2020/10/03 07:52:36;
+ binding state free;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 60:a4:4c:3f:6e:78;
+ uid "\001`\244L?nx";
+}
+lease 192.168.3.10 {
+ starts 6 2020/10/03 08:18:50;
+ ends 6 2020/10/03 09:58:50;
+ cltt 6 2020/10/03 08:18:50;
+ binding state free;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 70:62:b8:bf:b5:b3;
+ uid "\001pb\270\277\265\263";
+ set vendor-class-identifier = "dslforum.org";
+}
+lease 10.254.251.101 {
+ starts 0 2017/03/12 22:11:59;
+ ends 0 2017/03/12 23:51:58;
+ tstp 0 2017/03/12 23:51:58;
+ cltt 0 2017/03/12 22:11:59;
+ binding state free;
+ hardware ethernet b4:ce:f6:01:83:73;
+ set vendor-class-identifier = "dhcpcd-5.5.6";
+}
+lease 10.254.251.102 {
+ starts 5 2017/05/19 06:07:39;
+ ends 5 2017/05/19 07:47:39;
+ tstp 5 2017/05/19 07:47:39;
+ cltt 5 2017/05/19 06:07:39;
+ binding state free;
+ hardware ethernet 34:51:c9:4c:40:c9;
+ uid "\0014Q\311L@\311";
+}
+lease 10.254.251.103 {
+ starts 2 2018/04/24 13:18:00;
+ ends 2 2018/04/24 14:58:00;
+ tstp 2 2018/04/24 14:58:00;
+ cltt 2 2018/04/24 13:18:00;
+ binding state free;
+ hardware ethernet 70:8a:09:da:74:d0;
+ set vendor-class-identifier = "dhcpcd-5.5.6";
+}
+lease 10.254.251.104 {
+ starts 2 2018/04/24 12:54:27;
+ ends 3 2018/04/25 06:47:20;
+ tstp 3 2018/04/25 06:47:20;
+ cltt 2 2018/04/24 12:54:28;
+ binding state free;
+ hardware ethernet 78:a3:e4:e8:12:1f;
+ uid "\001x\243\344\350\022\037";
+}
+lease 10.254.251.100 {
+ starts 6 2020/10/03 07:58:45;
+ ends 6 2020/10/03 09:38:45;
+ cltt 6 2020/10/03 07:58:45;
+ binding state free;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 74:ea:3a:a6:a9:c7;
+ uid "\001t\352:\246\251\307";
+ set vendor-class-identifier = "MSFT 5.0";
+ client-hostname "TL-WR741N";
+}
+lease 10.254.255.104 {
+ starts 1 2017/07/10 09:35:24;
+ ends 1 2017/07/10 09:37:24;
+ tstp 1 2017/07/10 09:37:24;
+ cltt 1 2017/07/10 09:35:24;
+ binding state free;
+ hardware ethernet 50:85:69:11:b6:ff;
+ uid "\001P\205i\021\266\377";
+}
+lease 10.254.255.102 {
+ starts 3 2017/08/16 22:01:09;
+ ends 3 2017/08/16 23:41:09;
+ tstp 3 2017/08/16 23:41:09;
+ cltt 3 2017/08/16 22:01:09;
+ binding state free;
+ hardware ethernet c8:d3:a3:54:31:3a;
+ uid "\001\310\323\243T1:";
+}
+lease 10.254.255.103 {
+ starts 0 2018/12/16 00:54:07;
+ ends 0 2018/12/16 02:34:07;
+ tstp 0 2018/12/16 02:34:07;
+ cltt 0 2018/12/16 00:54:07;
+ binding state free;
+ hardware ethernet 08:c6:b3:01:e8:18;
+ uid "\001\010\306\263\001\350\030";
+ set vendor-class-identifier = "QTCH-QBR1041WUV2";
+}
+lease 10.254.255.100 {
+ starts 2 2018/12/18 09:21:24;
+ ends 2 2018/12/18 10:32:36;
+ tstp 2 2018/12/18 10:32:36;
+ cltt 2 2018/12/18 09:21:30;
+ binding state free;
+ hardware ethernet 70:62:b8:c3:51:a3;
+ uid "\001pb\270\303Q\243";
+}
+lease 10.254.255.105 {
+ starts 5 2019/03/22 07:42:55;
+ ends 5 2019/03/22 09:22:55;
+ tstp 5 2019/03/22 09:22:55;
+ cltt 5 2019/03/22 07:42:55;
+ binding state free;
+ hardware ethernet 58:d5:6e:95:88:30;
+ uid "\001X\325n\225\2100";
+ set vendor-class-identifier = "dslforum.org";
+}
+lease 10.254.255.101 {
+ starts 6 2020/10/03 07:29:24;
+ ends 6 2020/10/03 09:09:24;
+ cltt 6 2020/10/03 07:29:24;
+ binding state free;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 28:3b:82:58:f4:58;
+ uid "\001(;\202X\364X";
+ set vendor-class-identifier = "dslforum.org";
+}
+lease 10.254.253.104 {
+ starts 4 2018/03/15 12:01:12;
+ ends 4 2018/03/15 12:34:35;
+ tstp 4 2018/03/15 12:34:35;
+ cltt 4 2018/03/15 12:02:58;
+ binding state free;
+ hardware ethernet 50:64:2b:4f:fd:3d;
+ uid "\001Pd+O\375=";
+ set vendor-class-identifier = "udhcp 1.19.4";
+}
+lease 10.254.253.105 {
+ starts 4 2018/03/15 12:39:46;
+ ends 4 2018/03/15 14:17:39;
+ tstp 4 2018/03/15 14:17:39;
+ cltt 4 2018/03/15 12:39:47;
+ binding state free;
+ hardware ethernet 50:64:2b:4f:fd:3d;
+ set vendor-class-identifier = "udhcp 1.19.4";
+}
+lease 10.254.253.101 {
+ starts 5 2018/03/16 11:00:43;
+ ends 5 2018/03/16 12:40:15;
+ tstp 5 2018/03/16 12:40:15;
+ cltt 5 2018/03/16 11:00:43;
+ binding state free;
+ hardware ethernet d0:66:7b:8b:e5:ff;
+ uid "\001\320f{\213\345\377";
+ set vendor-class-identifier = "udhcp 1.14.3-VD Linux VDLinux.1.2.1.x";
+}
+lease 10.254.253.102 {
+ starts 5 2018/03/16 11:26:21;
+ ends 5 2018/03/16 13:06:21;
+ tstp 5 2018/03/16 13:06:21;
+ cltt 5 2018/03/16 11:26:21;
+ binding state free;
+ hardware ethernet 50:64:2b:4f:fd:3f;
+ uid "\001Pd+O\375?";
+}
+lease 10.254.253.100 {
+ starts 2 2018/08/21 05:48:43;
+ ends 2 2018/08/21 07:23:13;
+ tstp 2 2018/08/21 07:23:13;
+ cltt 2 2018/08/21 05:48:44;
+ binding state free;
+ hardware ethernet 20:cf:30:ef:8e:a4;
+ uid "\001 \3170\357\216\244";
+ set vendor-class-identifier = "udhcp 0.9.8-asus";
+}
+lease 10.254.253.103 {
+ starts 6 2020/10/03 08:07:02;
+ ends 6 2020/10/03 09:47:02;
+ cltt 6 2020/10/03 08:07:02;
+ binding state free;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 34:ce:00:03:08:57;
+ uid "\0014\316\000\003\010W";
+ set vendor-class-identifier = "udhcp 1.24.2";
+}
+lease 10.254.254.103 {
+ starts 3 2015/11/11 09:03:11;
+ ends 3 2015/11/11 09:05:11;
+ tstp 3 2015/11/11 09:05:11;
+ cltt 3 2015/11/11 09:03:11;
+ binding state free;
+ hardware ethernet 74:d0:2b:0e:9b:d6;
+}
+lease 10.254.254.104 {
+ starts 0 2017/12/03 15:57:29;
+ ends 0 2017/12/03 17:37:29;
+ tstp 0 2017/12/03 17:37:29;
+ cltt 0 2017/12/03 15:57:29;
+ binding state free;
+ hardware ethernet ac:22:0b:78:00:78;
+ uid "\377\3139\012\307\000\002\000\000\253\021(CC\252e\021\000\017";
+}
+lease 10.254.254.105 {
+ starts 2 2018/06/26 12:30:04;
+ ends 2 2018/06/26 13:09:10;
+ tstp 2 2018/06/26 13:09:10;
+ cltt 2 2018/06/26 12:30:04;
+ binding state free;
+ hardware ethernet cc:2d:e0:3f:bc:5c;
+ uid "\001\314-\340?\274\\";
+}
+lease 10.254.254.101 {
+ starts 3 2018/07/25 09:33:10;
+ ends 3 2018/07/25 11:13:10;
+ tstp 3 2018/07/25 11:13:10;
+ cltt 3 2018/07/25 09:33:10;
+ binding state free;
+ hardware ethernet 74:d0:2b:0e:9b:d6;
+ uid "\001t\320+\016\233\326";
+ set vendor-class-identifier = "MSFT 5.0";
+}
+lease 10.254.254.100 {
+ starts 2 2020/09/22 11:19:29;
+ ends 2 2020/09/22 11:21:29;
+ cltt 2 2020/09/22 11:19:29;
+ binding state free;
+ hardware ethernet 30:45:96:6a:f3:de;
+ uid "\0010E\226j\363\336";
+ client-hostname "Honor_7C-bb23201389a3c44";
+}
+lease 10.254.254.102 {
+ starts 2 2020/09/22 11:25:14;
+ ends 2 2020/09/22 11:27:14;
+ cltt 2 2020/09/22 11:25:14;
+ binding state free;
+ hardware ethernet c8:3d:dc:be:d2:cf;
+ uid "\001\310=\334\276\322\317";
+ client-hostname "Redmi7A-Redmi";
+}
+lease 10.254.255.101 {
+ starts 6 2020/10/03 08:19:24;
+ ends 6 2020/10/03 09:59:24;
+ cltt 6 2020/10/03 08:19:24;
+ binding state free;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 28:3b:82:58:f4:58;
+ uid "\001(;\202X\364X";
+ set vendor-class-identifier = "dslforum.org";
+}
+lease 10.254.251.100 {
+ starts 6 2020/10/03 08:48:45;
+ ends 6 2020/10/03 10:28:45;
+ cltt 6 2020/10/03 08:48:45;
+ binding state free;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 74:ea:3a:a6:a9:c7;
+ uid "\001t\352:\246\251\307";
+ set vendor-class-identifier = "MSFT 5.0";
+ client-hostname "TL-WR741N";
+}
+lease 10.254.253.103 {
+ starts 6 2020/10/03 08:57:02;
+ ends 6 2020/10/03 10:37:02;
+ cltt 6 2020/10/03 08:57:02;
+ binding state free;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 34:ce:00:03:08:57;
+ uid "\0014\316\000\003\010W";
+ set vendor-class-identifier = "udhcp 1.24.2";
+}
+lease 192.168.3.11 {
+ starts 6 2020/10/03 09:01:22;
+ ends 6 2020/10/03 10:41:22;
+ cltt 6 2020/10/03 09:01:22;
+ binding state free;
+ next binding state free;
+ rewind binding state free;
+ hardware ethernet 60:a4:4c:3f:6e:78;
+ uid "\001`\244L?nx";
+}
diff --git a/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv6 b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv6
new file mode 100644
index 000000000..3a4f1520e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/isc_dhcpd/testdata/dhcpd.leases_ipv6
@@ -0,0 +1,67 @@
+# The format of this file is documented in the dhcpd.leases(5) manual page.
+# This lease file was written by isc-dhcp-4.3.6b1
+
+# authoring-byte-order entry is generated, DO NOT DELETE
+authoring-byte-order little-endian;
+
+server-duid "\000\001\002\003!\004\005\006\007\008)^6\257";
+
+ia-na "'\000\010\016\000\001\000\001!\320\263\003\010\000'\327\337\354" {
+ cltt 0 2017/12/24 10:53:29;
+ iaaddr 2001:db8:: {
+ binding state active;
+ preferred-life 604800;
+ max-life 2592000;
+ ends 2 2020/09/30 10:53:29;
+ }
+}
+
+ia-na "#\2340\000\000\000\000\000!\300\021]0\234#e\212\261" {
+ cltt 6 2017/12/23 23:59:58;
+ iaaddr 2001:db8::1 {
+ binding state active;
+ preferred-life 604800;
+ max-life 2592000;
+ ends 2 2020/09/30 23:59:58;
+ }
+}
+
+ia-na "\000\000\000\000\000\001\000\000 \000\301\267xOCl\313\310" {
+ cltt 0 2017/12/24 02:11:08;
+ iaaddr 2001:db8::2 {
+ binding state active;
+ preferred-life 604800;
+ max-life 2592000;
+ ends 2 2020/09/30 02:11:08;
+ }
+}
+
+ia-na "'\000\000\000\000\000\000\001\027.\010\225\010\000'C8\353" {
+ cltt 0 2017/12/24 00:48:39;
+ iaaddr 2001:db8::3 {
+ binding state active;
+ preferred-life 604800;
+ max-life 2592000;
+ ends 2 2020/09/30 18:48:39;
+ }
+}
+
+ia-na "\000\000\000\000\000\000\000\265H\006n\305F\351\270i\014\326q\023J\347" {
+ cltt 0 2017/12/24 01:53:15;
+ iaaddr 2001:db8::4 {
+ binding state active;
+ preferred-life 604800;
+ max-life 2592000;
+ ends 2 2020/09/30 14:53:15;
+ }
+}
+
+ia-na "\000\000\000\000\000\000\000\000 \010\351\267xOCl\313\310" {
+ cltt 0 2017/12/24 11:33:17;
+ iaaddr 2001:db8::5 {
+ binding state active;
+ preferred-life 604800;
+ max-life 2592000;
+ ends 2 2020/09/30 11:33:17;
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/README.md b/src/go/plugin/go.d/modules/k8s_kubelet/README.md
new file mode 120000
index 000000000..036630b3e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/README.md
@@ -0,0 +1 @@
+integrations/kubelet.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/charts.go b/src/go/plugin/go.d/modules/k8s_kubelet/charts.go
new file mode 100644
index 000000000..e2848ea3e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/charts.go
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubelet
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Chart is an alias for module.Chart
+ Chart = module.Chart
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+ // Dim is an alias for module.Dim
+ Dim = module.Dim
+)
+
+var charts = Charts{
+ {
+ ID: "apiserver_audit_requests_rejected_total",
+ Title: "API Server Audit Requests",
+ Units: "requests/s",
+ Fam: "api server",
+ Ctx: "k8s_kubelet.apiserver_audit_requests_rejected",
+ Dims: Dims{
+ {ID: "apiserver_audit_requests_rejected_total", Name: "rejected", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "apiserver_storage_data_key_generation_failures_total",
+ Title: "API Server Failed Data Encryption Key(DEK) Generation Operations",
+ Units: "events/s",
+ Fam: "api server",
+ Ctx: "k8s_kubelet.apiserver_storage_data_key_generation_failures",
+ Dims: Dims{
+ {ID: "apiserver_storage_data_key_generation_failures_total", Name: "failures", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "apiserver_storage_data_key_generation_latencies",
+ Title: "API Server Latencies Of Data Encryption Key(DEK) Generation Operations",
+ Units: "observes/s",
+ Fam: "api server",
+ Ctx: "k8s_kubelet.apiserver_storage_data_key_generation_latencies",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "apiserver_storage_data_key_generation_bucket_5", Name: "5 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_10", Name: "10 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_20", Name: "20 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_40", Name: "40 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_80", Name: "80 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_160", Name: "160 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_320", Name: "320 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_640", Name: "640 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_1280", Name: "1280 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_2560", Name: "2560 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_5120", Name: "5120 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_10240", Name: "10240 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_20480", Name: "20480 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_40960", Name: "40960 µs", Algo: module.Incremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_+Inf", Name: "+Inf", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "apiserver_storage_data_key_generation_latencies_percentage",
+ Title: "API Server Latencies Of Data Encryption Key(DEK) Generation Operations Percentage",
+ Units: "%",
+ Fam: "api server",
+ Ctx: "k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "apiserver_storage_data_key_generation_bucket_5", Name: "5 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_10", Name: "10 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_20", Name: "20 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_40", Name: "40 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_80", Name: "80 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_160", Name: "160 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_320", Name: "320 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_640", Name: "640 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_1280", Name: "1280 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_2560", Name: "2560 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_5120", Name: "5120 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_10240", Name: "10240 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_20480", Name: "20480 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_40960", Name: "40960 µs", Algo: module.PercentOfIncremental},
+ {ID: "apiserver_storage_data_key_generation_bucket_+Inf", Name: "+Inf", Algo: module.PercentOfIncremental},
+ },
+ },
+ {
+ ID: "apiserver_storage_envelope_transformation_cache_misses_total",
+ Title: "API Server Storage Envelope Transformation Cache Misses",
+ Units: "events/s",
+ Fam: "api server",
+ Ctx: "k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses",
+ Dims: Dims{
+ {ID: "apiserver_storage_envelope_transformation_cache_misses_total", Name: "cache misses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "kubelet_containers_running",
+ Title: "Number Of Containers Currently Running",
+ Units: "running containers",
+ Fam: "containers",
+ Ctx: "k8s_kubelet.kubelet_containers_running",
+ Dims: Dims{
+ {ID: "kubelet_running_container", Name: "total"},
+ },
+ },
+ {
+ ID: "kubelet_pods_running",
+ Title: "Number Of Pods Currently Running",
+ Units: "running pods",
+ Fam: "pods",
+ Ctx: "k8s_kubelet.kubelet_pods_running",
+ Dims: Dims{
+ {ID: "kubelet_running_pod", Name: "total"},
+ },
+ },
+ {
+ ID: "kubelet_pods_log_filesystem_used_bytes",
+ Title: "Bytes Used By The Pod Logs On The Filesystem",
+ Units: "B",
+ Fam: "pods",
+ Ctx: "k8s_kubelet.kubelet_pods_log_filesystem_used_bytes",
+ Type: module.Stacked,
+ },
+ {
+ ID: "kubelet_runtime_operations",
+ Title: "Runtime Operations By Type",
+ Units: "operations/s",
+ Fam: "operations",
+ Ctx: "k8s_kubelet.kubelet_runtime_operations",
+ Type: module.Stacked,
+ },
+ {
+ ID: "kubelet_runtime_operations_errors",
+ Title: "Runtime Operations Errors By Type",
+ Units: "errors/s",
+ Fam: "operations",
+ Ctx: "k8s_kubelet.kubelet_runtime_operations_errors",
+ Type: module.Stacked,
+ },
+ {
+ ID: "kubelet_docker_operations",
+ Title: "Docker Operations By Type",
+ Units: "operations/s",
+ Fam: "operations",
+ Ctx: "k8s_kubelet.kubelet_docker_operations",
+ Type: module.Stacked,
+ },
+ {
+ ID: "kubelet_docker_operations_errors",
+ Title: "Docker Operations Errors By Type",
+ Units: "errors/s",
+ Fam: "operations",
+ Ctx: "k8s_kubelet.kubelet_docker_operations_errors",
+ Type: module.Stacked,
+ },
+ {
+ ID: "kubelet_node_config_error",
+ Title: "Node Configuration-Related Error",
+ Units: "bool",
+ Fam: "config error",
+ Ctx: "k8s_kubelet.kubelet_node_config_error",
+ Dims: Dims{
+ {ID: "kubelet_node_config_error", Name: "experiencing_error"},
+ },
+ },
+ {
+ ID: "kubelet_pleg_relist_interval_microseconds",
+ Title: "PLEG Relisting Interval Summary",
+ Units: "microseconds",
+ Fam: "pleg relisting",
+ Ctx: "k8s_kubelet.kubelet_pleg_relist_interval_microseconds",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "kubelet_pleg_relist_interval_05", Name: "0.5"},
+ {ID: "kubelet_pleg_relist_interval_09", Name: "0.9"},
+ {ID: "kubelet_pleg_relist_interval_099", Name: "0.99"},
+ },
+ },
+ {
+ ID: "kubelet_pleg_relist_latency_microseconds",
+ Title: "PLEG Relisting Latency Summary",
+ Units: "microseconds",
+ Fam: "pleg relisting",
+ Ctx: "k8s_kubelet.kubelet_pleg_relist_latency_microseconds",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "kubelet_pleg_relist_latency_05", Name: "0.5"},
+ {ID: "kubelet_pleg_relist_latency_09", Name: "0.9"},
+ {ID: "kubelet_pleg_relist_latency_099", Name: "0.99"},
+ },
+ },
+ {
+ ID: "kubelet_token_requests",
+ Title: "Token() Requests To The Alternate Token Source",
+ Units: "token requests/s",
+ Fam: "token",
+ Ctx: "k8s_kubelet.kubelet_token_requests",
+ Dims: Dims{
+ {ID: "token_count", Name: "total", Algo: module.Incremental},
+ {ID: "token_fail_count", Name: "failed", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "rest_client_requests_by_code",
+ Title: "HTTP Requests By Status Code",
+ Units: "requests/s",
+ Fam: "rest client",
+ Ctx: "k8s_kubelet.rest_client_requests_by_code",
+ Type: module.Stacked,
+ },
+ {
+ ID: "rest_client_requests_by_method",
+ Title: "HTTP Requests By Status Method",
+ Units: "requests/s",
+ Fam: "rest client",
+ Ctx: "k8s_kubelet.rest_client_requests_by_method",
+ Type: module.Stacked,
+ },
+}
+
+func newVolumeManagerChart(name string) *Chart {
+ return &Chart{
+ ID: "volume_manager_total_volumes_" + name,
+ Title: "Volume Manager State Of The World, Plugin " + name,
+ Units: "state",
+ Fam: "volume manager",
+ Ctx: "k8s_kubelet.volume_manager_total_volumes",
+ Dims: Dims{
+ {ID: "volume_manager_plugin_" + name + "_state_actual", Name: "actual"},
+ {ID: "volume_manager_plugin_" + name + "_state_desired", Name: "desired"},
+ },
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/collect.go b/src/go/plugin/go.d/modules/k8s_kubelet/collect.go
new file mode 100644
index 000000000..f014617fc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/collect.go
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubelet
+
+import (
+ "math"
+
+ mtx "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (k *Kubelet) collect() (map[string]int64, error) {
+ raw, err := k.prom.ScrapeSeries()
+
+ if err != nil {
+ return nil, err
+ }
+
+ mx := newMetrics()
+
+ k.collectToken(raw, mx)
+ k.collectRESTClientHTTPRequests(raw, mx)
+ k.collectAPIServer(raw, mx)
+ k.collectKubelet(raw, mx)
+ k.collectVolumeManager(raw, mx)
+
+ return stm.ToMap(mx), nil
+}
+
+func (k *Kubelet) collectLogsUsagePerPod(raw prometheus.Series, mx *metrics) {
+ chart := k.charts.Get("kubelet_pods_log_filesystem_used_bytes")
+ seen := make(map[string]bool)
+
+ for _, metric := range raw.FindByName("kubelet_container_log_filesystem_used_bytes") {
+ pod := metric.Labels.Get("pod")
+ namespace := metric.Labels.Get("namespace")
+
+ if pod == "" || namespace == "" {
+ continue
+ }
+
+ key := namespace + "_" + pod
+ dimID := "kubelet_log_file_system_usage_" + key
+
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: pod})
+ chart.MarkNotCreated()
+ }
+
+ seen[dimID] = true
+ v := mx.Kubelet.PodLogFileSystemUsage[key]
+ v.Add(metric.Value)
+ mx.Kubelet.PodLogFileSystemUsage[key] = v
+ }
+
+ for _, dim := range chart.Dims {
+ if seen[dim.ID] {
+ continue
+ }
+ _ = chart.MarkDimRemove(dim.ID, false)
+ chart.MarkNotCreated()
+ }
+}
+
+func (k *Kubelet) collectVolumeManager(raw prometheus.Series, mx *metrics) {
+ vmPlugins := make(map[string]*volumeManagerPlugin)
+
+ for _, metric := range raw.FindByName("volume_manager_total_volumes") {
+ pluginName := metric.Labels.Get("plugin_name")
+ state := metric.Labels.Get("state")
+
+ if !k.collectedVMPlugins[pluginName] {
+ _ = k.charts.Add(newVolumeManagerChart(pluginName))
+ k.collectedVMPlugins[pluginName] = true
+ }
+ if _, ok := vmPlugins[pluginName]; !ok {
+ vmPlugins[pluginName] = &volumeManagerPlugin{}
+ }
+
+ switch state {
+ case "actual_state_of_world":
+ vmPlugins[pluginName].State.Actual.Set(metric.Value)
+ case "desired_state_of_world":
+ vmPlugins[pluginName].State.Desired.Set(metric.Value)
+ }
+ }
+
+ mx.VolumeManager.Plugins = vmPlugins
+}
+
+func (k *Kubelet) collectKubelet(raw prometheus.Series, mx *metrics) {
+ value := raw.FindByName("kubelet_node_config_error").Max()
+ mx.Kubelet.NodeConfigError.Set(value)
+
+ /*
+ # HELP kubelet_running_containers [ALPHA] Number of containers currently running
+ # TYPE kubelet_running_containers gauge
+ kubelet_running_containers{container_state="created"} 1
+ kubelet_running_containers{container_state="exited"} 13
+ kubelet_running_containers{container_state="running"} 42
+ kubelet_running_containers{container_state="unknown"} 1
+ */
+
+ ms := raw.FindByName("kubelet_running_container_count")
+ value = ms.Max()
+ if ms.Len() == 0 {
+ for _, m := range raw.FindByName("kubelet_running_containers") {
+ if m.Labels.Get("container_state") == "running" {
+ value = m.Value
+ break
+ }
+ }
+ }
+ mx.Kubelet.RunningContainerCount.Set(value)
+
+ /*
+ # HELP kubelet_running_pods [ALPHA] Number of pods currently running
+ # TYPE kubelet_running_pods gauge
+ kubelet_running_pods 37
+ */
+ value = raw.FindByNames("kubelet_running_pod_count", "kubelet_running_pods").Max()
+ mx.Kubelet.RunningPodCount.Set(value)
+
+ k.collectRuntimeOperations(raw, mx)
+ k.collectRuntimeOperationsErrors(raw, mx)
+ k.collectDockerOperations(raw, mx)
+ k.collectDockerOperationsErrors(raw, mx)
+ k.collectPLEGRelisting(raw, mx)
+ k.collectLogsUsagePerPod(raw, mx)
+}
+
+func (k *Kubelet) collectAPIServer(raw prometheus.Series, mx *metrics) {
+ value := raw.FindByName("apiserver_audit_requests_rejected_total").Max()
+ mx.APIServer.Audit.Requests.Rejected.Set(value)
+
+ value = raw.FindByName("apiserver_storage_data_key_generation_failures_total").Max()
+ mx.APIServer.Storage.DataKeyGeneration.Failures.Set(value)
+
+ value = raw.FindByName("apiserver_storage_envelope_transformation_cache_misses_total").Max()
+ mx.APIServer.Storage.EnvelopeTransformation.CacheMisses.Set(value)
+
+ k.collectStorageDataKeyGenerationLatencies(raw, mx)
+}
+
+func (k *Kubelet) collectToken(raw prometheus.Series, mx *metrics) {
+ value := raw.FindByName("get_token_count").Max()
+ mx.Token.Count.Set(value)
+
+ value = raw.FindByName("get_token_fail_count").Max()
+ mx.Token.FailCount.Set(value)
+}
+
+func (k *Kubelet) collectPLEGRelisting(raw prometheus.Series, mx *metrics) {
+ // Summary
+ for _, metric := range raw.FindByName("kubelet_pleg_relist_interval_microseconds") {
+ if math.IsNaN(metric.Value) {
+ continue
+ }
+ quantile := metric.Labels.Get("quantile")
+ switch quantile {
+ case "0.5":
+ mx.Kubelet.PLEG.Relist.Interval.Quantile05.Set(metric.Value)
+ case "0.9":
+ mx.Kubelet.PLEG.Relist.Interval.Quantile09.Set(metric.Value)
+ case "0.99":
+ mx.Kubelet.PLEG.Relist.Interval.Quantile099.Set(metric.Value)
+ }
+ }
+ for _, metric := range raw.FindByName("kubelet_pleg_relist_latency_microseconds") {
+ if math.IsNaN(metric.Value) {
+ continue
+ }
+ quantile := metric.Labels.Get("quantile")
+ switch quantile {
+ case "0.5":
+ mx.Kubelet.PLEG.Relist.Latency.Quantile05.Set(metric.Value)
+ case "0.9":
+ mx.Kubelet.PLEG.Relist.Latency.Quantile09.Set(metric.Value)
+ case "0.99":
+ mx.Kubelet.PLEG.Relist.Latency.Quantile099.Set(metric.Value)
+ }
+ }
+}
+
+func (k *Kubelet) collectStorageDataKeyGenerationLatencies(raw prometheus.Series, mx *metrics) {
+ latencies := &mx.APIServer.Storage.DataKeyGeneration.Latencies
+ metricName := "apiserver_storage_data_key_generation_latencies_microseconds_bucket"
+
+ for _, metric := range raw.FindByName(metricName) {
+ value := metric.Value
+ bucket := metric.Labels.Get("le")
+ switch bucket {
+ case "5":
+ latencies.LE5.Set(value)
+ case "10":
+ latencies.LE10.Set(value)
+ case "20":
+ latencies.LE20.Set(value)
+ case "40":
+ latencies.LE40.Set(value)
+ case "80":
+ latencies.LE80.Set(value)
+ case "160":
+ latencies.LE160.Set(value)
+ case "320":
+ latencies.LE320.Set(value)
+ case "640":
+ latencies.LE640.Set(value)
+ case "1280":
+ latencies.LE1280.Set(value)
+ case "2560":
+ latencies.LE2560.Set(value)
+ case "5120":
+ latencies.LE5120.Set(value)
+ case "10240":
+ latencies.LE10240.Set(value)
+ case "20480":
+ latencies.LE20480.Set(value)
+ case "40960":
+ latencies.LE40960.Set(value)
+ case "+Inf":
+ latencies.LEInf.Set(value)
+ }
+ }
+
+ latencies.LEInf.Sub(latencies.LE40960.Value())
+ latencies.LE40960.Sub(latencies.LE20480.Value())
+ latencies.LE20480.Sub(latencies.LE10240.Value())
+ latencies.LE10240.Sub(latencies.LE5120.Value())
+ latencies.LE5120.Sub(latencies.LE2560.Value())
+ latencies.LE2560.Sub(latencies.LE1280.Value())
+ latencies.LE1280.Sub(latencies.LE640.Value())
+ latencies.LE640.Sub(latencies.LE320.Value())
+ latencies.LE320.Sub(latencies.LE160.Value())
+ latencies.LE160.Sub(latencies.LE80.Value())
+ latencies.LE80.Sub(latencies.LE40.Value())
+ latencies.LE40.Sub(latencies.LE20.Value())
+ latencies.LE20.Sub(latencies.LE10.Value())
+ latencies.LE10.Sub(latencies.LE5.Value())
+}
+
+func (k *Kubelet) collectRESTClientHTTPRequests(raw prometheus.Series, mx *metrics) {
+ metricName := "rest_client_requests_total"
+ chart := k.charts.Get("rest_client_requests_by_code")
+
+ for _, metric := range raw.FindByName(metricName) {
+ code := metric.Labels.Get("code")
+ if code == "" {
+ continue
+ }
+ dimID := "rest_client_requests_" + code
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: code, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ mx.RESTClient.Requests.ByStatusCode[code] = mtx.Gauge(metric.Value)
+ }
+
+ chart = k.charts.Get("rest_client_requests_by_method")
+
+ for _, metric := range raw.FindByName(metricName) {
+ method := metric.Labels.Get("method")
+ if method == "" {
+ continue
+ }
+ dimID := "rest_client_requests_" + method
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: method, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ mx.RESTClient.Requests.ByMethod[method] = mtx.Gauge(metric.Value)
+ }
+}
+
+func (k *Kubelet) collectRuntimeOperations(raw prometheus.Series, mx *metrics) {
+ chart := k.charts.Get("kubelet_runtime_operations")
+
+ // kubelet_runtime_operations_total
+ for _, metric := range raw.FindByNames("kubelet_runtime_operations", "kubelet_runtime_operations_total") {
+ opType := metric.Labels.Get("operation_type")
+ if opType == "" {
+ continue
+ }
+ dimID := "kubelet_runtime_operations_" + opType
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: opType, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ mx.Kubelet.Runtime.Operations[opType] = mtx.Gauge(metric.Value)
+ }
+}
+
+func (k *Kubelet) collectRuntimeOperationsErrors(raw prometheus.Series, mx *metrics) {
+ chart := k.charts.Get("kubelet_runtime_operations_errors")
+
+ // kubelet_runtime_operations_errors_total
+ for _, metric := range raw.FindByNames("kubelet_runtime_operations_errors", "kubelet_runtime_operations_errors_total") {
+ opType := metric.Labels.Get("operation_type")
+ if opType == "" {
+ continue
+ }
+ dimID := "kubelet_runtime_operations_errors_" + opType
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: opType, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ mx.Kubelet.Runtime.OperationsErrors[opType] = mtx.Gauge(metric.Value)
+ }
+}
+
+func (k *Kubelet) collectDockerOperations(raw prometheus.Series, mx *metrics) {
+ chart := k.charts.Get("kubelet_docker_operations")
+
+ // kubelet_docker_operations_total
+ for _, metric := range raw.FindByNames("kubelet_docker_operations", "kubelet_docker_operations_total") {
+ opType := metric.Labels.Get("operation_type")
+ if opType == "" {
+ continue
+ }
+ dimID := "kubelet_docker_operations_" + opType
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: opType, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ mx.Kubelet.Docker.Operations[opType] = mtx.Gauge(metric.Value)
+ }
+}
+
+func (k *Kubelet) collectDockerOperationsErrors(raw prometheus.Series, mx *metrics) {
+ chart := k.charts.Get("kubelet_docker_operations_errors")
+
+ // kubelet_docker_operations_errors_total
+ for _, metric := range raw.FindByNames("kubelet_docker_operations_errors", "kubelet_docker_operations_errors_total") {
+ opType := metric.Labels.Get("operation_type")
+ if opType == "" {
+ continue
+ }
+ dimID := "kubelet_docker_operations_errors_" + opType
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: opType, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ mx.Kubelet.Docker.OperationsErrors[opType] = mtx.Gauge(metric.Value)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/config_schema.json b/src/go/plugin/go.d/modules/k8s_kubelet/config_schema.json
new file mode 100644
index 000000000..16f9029a6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Kubelet collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Kubelet metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:10255/metrics",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/init.go b/src/go/plugin/go.d/modules/k8s_kubelet/init.go
new file mode 100644
index 000000000..803cd984c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/init.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubelet
+
+import (
+ "errors"
+ "os"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (k *Kubelet) validateConfig() error {
+ if k.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (k *Kubelet) initAuthToken() string {
+ bs, err := os.ReadFile(k.TokenPath)
+ if err != nil {
+ k.Warningf("error on reading service account token from '%s': %v", k.TokenPath, err)
+ }
+ return string(bs)
+}
+
+func (k *Kubelet) initPrometheusClient() (prometheus.Prometheus, error) {
+ httpClient, err := web.NewHTTPClient(k.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(httpClient, k.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/integrations/kubelet.md b/src/go/plugin/go.d/modules/k8s_kubelet/integrations/kubelet.md
new file mode 100644
index 000000000..d92f82be7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/integrations/kubelet.md
@@ -0,0 +1,254 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/k8s_kubelet/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/k8s_kubelet/metadata.yaml"
+sidebar_label: "Kubelet"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Kubernetes"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kubelet
+
+
+<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: k8s_kubelet
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Kubelet instances.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Kubelet instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| k8s_kubelet.apiserver_audit_requests_rejected | rejected | requests/s |
+| k8s_kubelet.apiserver_storage_data_key_generation_failures | failures | events/s |
+| k8s_kubelet.apiserver_storage_data_key_generation_latencies | 5_µs, 10_µs, 20_µs, 40_µs, 80_µs, 160_µs, 320_µs, 640_µs, 1280_µs, 2560_µs, 5120_µs, 10240_µs, 20480_µs, 40960_µs, +Inf | observes/s |
+| k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent | 5_µs, 10_µs, 20_µs, 40_µs, 80_µs, 160_µs, 320_µs, 640_µs, 1280_µs, 2560_µs, 5120_µs, 10240_µs, 20480_µs, 40960_µs, +Inf | percentage |
+| k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses | cache misses | events/s |
+| k8s_kubelet.kubelet_containers_running | total | running_containers |
+| k8s_kubelet.kubelet_pods_running | total | running_pods |
+| k8s_kubelet.kubelet_pods_log_filesystem_used_bytes | a dimension per namespace and pod | B |
+| k8s_kubelet.kubelet_runtime_operations | a dimension per operation type | operations/s |
+| k8s_kubelet.kubelet_runtime_operations_errors | a dimension per operation type | errors/s |
+| k8s_kubelet.kubelet_docker_operations | a dimension per operation type | operations/s |
+| k8s_kubelet.kubelet_docker_operations_errors | a dimension per operation type | errors/s |
+| k8s_kubelet.kubelet_node_config_error | experiencing_error | bool |
+| k8s_kubelet.kubelet_pleg_relist_interval_microseconds | 0.5, 0.9, 0.99 | microseconds |
+| k8s_kubelet.kubelet_pleg_relist_latency_microseconds | 0.5, 0.9, 0.99 | microseconds |
+| k8s_kubelet.kubelet_token_requests | total, failed | token_requests/s |
+| k8s_kubelet.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |
+| k8s_kubelet.rest_client_requests_by_method | a dimension per HTTP method | requests/s |
+
+### Per volume manager
+
+These metrics refer to the Volume Manager.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| k8s_kubelet.volume_manager_total_volumes | actual, desired | state |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ kubelet_node_config_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_node_config_error | the node is experiencing a configuration-related error (0: false, 1: true) |
+| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |
+| [ kubelet_token_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_token_requests | number of failed Token() requests to the alternate token source |
+| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |
+| [ kubelet_operations_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf) | k8s_kubelet.kubelet_operations_errors | number of Docker or runtime operation errors |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/k8s_kubelet.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/k8s_kubelet.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:10255/metrics | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:10255/metrics
+
+```
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:10250/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `k8s_kubelet` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m k8s_kubelet
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `k8s_kubelet` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep k8s_kubelet
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep k8s_kubelet /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep k8s_kubelet
+```
+
+
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/kubelet.go b/src/go/plugin/go.d/modules/k8s_kubelet/kubelet.go
new file mode 100644
index 000000000..19fb9dd9e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/kubelet.go
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubelet
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("k8s_kubelet", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ // NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000
+ Priority: 50000,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Kubelet {
+ return &Kubelet{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:10255/metrics",
+ Headers: make(map[string]string),
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token",
+ },
+
+ charts: charts.Copy(),
+ collectedVMPlugins: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ TokenPath string `yaml:"token_path,omitempty" json:"token_path"`
+}
+
+type Kubelet struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ prom prometheus.Prometheus
+
+ collectedVMPlugins map[string]bool // volume_manager_total_volumes
+}
+
+func (k *Kubelet) Configuration() any {
+ return k.Config
+}
+
+func (k *Kubelet) Init() error {
+ if err := k.validateConfig(); err != nil {
+ k.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prom, err := k.initPrometheusClient()
+ if err != nil {
+ k.Error(err)
+ return err
+ }
+ k.prom = prom
+
+ if tok := k.initAuthToken(); tok != "" {
+ k.Request.Headers["Authorization"] = "Bearer " + tok
+ }
+
+ return nil
+}
+
+func (k *Kubelet) Check() error {
+ mx, err := k.collect()
+ if err != nil {
+ k.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (k *Kubelet) Charts() *Charts {
+ return k.charts
+}
+
+func (k *Kubelet) Collect() map[string]int64 {
+ mx, err := k.collect()
+
+ if err != nil {
+ k.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (k *Kubelet) Cleanup() {
+ if k.prom != nil && k.prom.HTTPClient() != nil {
+ k.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/kubelet_test.go b/src/go/plugin/go.d/modules/k8s_kubelet/kubelet_test.go
new file mode 100644
index 000000000..d55ee31a3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/kubelet_test.go
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubelet
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataMetrics, _ = os.ReadFile("testdata/metrics.txt")
+ dataServiceAccountToken, _ = os.ReadFile("testdata/token.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataMetrics": dataMetrics,
+ "dataServiceAccountToken": dataServiceAccountToken,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestKubelet_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Kubelet{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestKubelet_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestKubelet_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestKubelet_Init(t *testing.T) {
+ assert.NoError(t, New().Init())
+}
+
+func TestKubelet_Init_ReadServiceAccountToken(t *testing.T) {
+ job := New()
+ job.TokenPath = "testdata/token.txt"
+
+ assert.NoError(t, job.Init())
+ assert.Equal(t, "Bearer "+string(dataServiceAccountToken), job.Request.Headers["Authorization"])
+}
+
+func TestKubelet_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) {
+ job := New()
+ job.Client.TLSConfig.TLSCA = "testdata/tls"
+
+ assert.Error(t, job.Init())
+}
+
+func TestKubelet_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+}
+
+func TestKubelet_Check_ConnectionRefused(t *testing.T) {
+ job := New()
+ job.URL = "http://127.0.0.1:38001/metrics"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestKubelet_Collect(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "apiserver_audit_requests_rejected_total": 0,
+ "apiserver_storage_data_key_generation_bucket_+Inf": 1,
+ "apiserver_storage_data_key_generation_bucket_10": 1,
+ "apiserver_storage_data_key_generation_bucket_10240": 1,
+ "apiserver_storage_data_key_generation_bucket_1280": 1,
+ "apiserver_storage_data_key_generation_bucket_160": 1,
+ "apiserver_storage_data_key_generation_bucket_20": 1,
+ "apiserver_storage_data_key_generation_bucket_20480": 1,
+ "apiserver_storage_data_key_generation_bucket_2560": 1,
+ "apiserver_storage_data_key_generation_bucket_320": 1,
+ "apiserver_storage_data_key_generation_bucket_40": 1,
+ "apiserver_storage_data_key_generation_bucket_40960": 1,
+ "apiserver_storage_data_key_generation_bucket_5": 6,
+ "apiserver_storage_data_key_generation_bucket_5120": 1,
+ "apiserver_storage_data_key_generation_bucket_640": 1,
+ "apiserver_storage_data_key_generation_bucket_80": 1,
+ "apiserver_storage_data_key_generation_failures_total": 0,
+ "apiserver_storage_envelope_transformation_cache_misses_total": 0,
+ "kubelet_docker_operations_create_container": 19,
+ "kubelet_docker_operations_errors_inspect_container": 14,
+ "kubelet_docker_operations_errors_remove_container": 4,
+ "kubelet_docker_operations_info": 2,
+ "kubelet_docker_operations_inspect_container": 223,
+ "kubelet_docker_operations_inspect_image": 110,
+ "kubelet_docker_operations_list_containers": 5157,
+ "kubelet_docker_operations_list_images": 195,
+ "kubelet_docker_operations_remove_container": 23,
+ "kubelet_docker_operations_start_container": 19,
+ "kubelet_docker_operations_stop_container": 23,
+ "kubelet_docker_operations_version": 472,
+ "kubelet_log_file_system_usage_kube-system_coredns-86c58d9df4-d22hv": 28672,
+ "kubelet_log_file_system_usage_kube-system_coredns-86c58d9df4-ks5dj": 28672,
+ "kubelet_log_file_system_usage_kube-system_etcd-minikube": 36864,
+ "kubelet_log_file_system_usage_kube-system_kube-addon-manager-minikube": 45056,
+ "kubelet_log_file_system_usage_kube-system_kube-apiserver-minikube": 36864,
+ "kubelet_log_file_system_usage_kube-system_kube-controller-manager-minikube": 57344,
+ "kubelet_log_file_system_usage_kube-system_kube-proxy-q2fvs": 28672,
+ "kubelet_log_file_system_usage_kube-system_kube-scheduler-minikube": 40960,
+ "kubelet_log_file_system_usage_kube-system_storage-provisioner": 24576,
+ "kubelet_node_config_error": 1,
+ "kubelet_pleg_relist_interval_05": 1013125,
+ "kubelet_pleg_relist_interval_09": 1016820,
+ "kubelet_pleg_relist_interval_099": 1032022,
+ "kubelet_pleg_relist_latency_05": 12741,
+ "kubelet_pleg_relist_latency_09": 16211,
+ "kubelet_pleg_relist_latency_099": 31234,
+ "kubelet_running_container": 9,
+ "kubelet_running_pod": 9,
+ "kubelet_runtime_operations_container_status": 90,
+ "kubelet_runtime_operations_create_container": 10,
+ "kubelet_runtime_operations_errors_container_status": 14,
+ "kubelet_runtime_operations_errors_remove_container": 4,
+ "kubelet_runtime_operations_exec_sync": 138,
+ "kubelet_runtime_operations_image_status": 25,
+ "kubelet_runtime_operations_list_containers": 2586,
+ "kubelet_runtime_operations_list_images": 195,
+ "kubelet_runtime_operations_list_podsandbox": 2562,
+ "kubelet_runtime_operations_podsandbox_status": 77,
+ "kubelet_runtime_operations_remove_container": 14,
+ "kubelet_runtime_operations_run_podsandbox": 9,
+ "kubelet_runtime_operations_start_container": 10,
+ "kubelet_runtime_operations_status": 279,
+ "kubelet_runtime_operations_stop_podsandbox": 14,
+ "kubelet_runtime_operations_version": 190,
+ "rest_client_requests_200": 177,
+ "rest_client_requests_201": 43,
+ "rest_client_requests_403": 2,
+ "rest_client_requests_409": 1,
+ "rest_client_requests_<error>": 8,
+ "rest_client_requests_GET": 37,
+ "rest_client_requests_PATCH": 177,
+ "rest_client_requests_POST": 8,
+ "token_count": 0,
+ "token_fail_count": 0,
+ "volume_manager_plugin_kubernetes.io/configmap_state_actual": 3,
+ "volume_manager_plugin_kubernetes.io/configmap_state_desired": 3,
+ "volume_manager_plugin_kubernetes.io/host-path_state_actual": 15,
+ "volume_manager_plugin_kubernetes.io/host-path_state_desired": 15,
+ "volume_manager_plugin_kubernetes.io/secret_state_actual": 4,
+ "volume_manager_plugin_kubernetes.io/secret_state_desired": 4,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestKubelet_Collect_ReceiveInvalidResponse(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestKubelet_Collect_Receive404(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/metadata.yaml b/src/go/plugin/go.d/modules/k8s_kubelet/metadata.yaml
new file mode 100644
index 000000000..0d5229bb5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/metadata.yaml
@@ -0,0 +1,331 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-k8s_kubelet
+ plugin_name: go.d.plugin
+ module_name: k8s_kubelet
+ monitored_instance:
+ name: Kubelet
+ link: https://kubernetes.io/docs/concepts/overview/components/#kubelet
+ icon_filename: kubernetes.svg
+ categories:
+ - data-collection.kubernetes
+ keywords:
+ - kubelet
+ - kubernetes
+ - k8s
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Kubelet instances.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/k8s_kubelet.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:10255/metrics
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:10255/metrics
+ - name: HTTPS with self-signed certificate
+ description: |
+ Do not validate server certificate chain and hostname.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:10250/metrics
+ tls_skip_verify: yes
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: kubelet_node_config_error
+ metric: k8s_kubelet.kubelet_node_config_error
+ info: "the node is experiencing a configuration-related error (0: false, 1: true)"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf
+ - name: kubelet_token_requests
+ metric: k8s_kubelet.kubelet_token_requests
+ info: "number of failed Token() requests to the alternate token source"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf
+ - name: kubelet_token_requests
+ metric: k8s_kubelet.kubelet_token_requests
+ info: "number of failed Token() requests to the alternate token source"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf
+ - name: kubelet_operations_error
+ metric: k8s_kubelet.kubelet_operations_errors
+ info: number of Docker or runtime operation errors
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf
+ - name: kubelet_operations_error
+ metric: k8s_kubelet.kubelet_operations_errors
+ info: number of Docker or runtime operation errors
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/kubelet.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: k8s_kubelet.apiserver_audit_requests_rejected
+ description: API Server Audit Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: rejected
+ - name: k8s_kubelet.apiserver_storage_data_key_generation_failures
+ description: API Server Failed Data Encryption Key(DEK) Generation Operations
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: failures
+ - name: k8s_kubelet.apiserver_storage_data_key_generation_latencies
+ description: API Server Latencies Of Data Encryption Key(DEK) Generation Operations
+ unit: observes/s
+ chart_type: stacked
+ dimensions:
+ - name: 5_µs
+ - name: 10_µs
+ - name: 20_µs
+ - name: 40_µs
+ - name: 80_µs
+ - name: 160_µs
+ - name: 320_µs
+ - name: 640_µs
+ - name: 1280_µs
+ - name: 2560_µs
+ - name: 5120_µs
+ - name: 10240_µs
+ - name: 20480_µs
+ - name: 40960_µs
+ - name: +Inf
+ - name: k8s_kubelet.apiserver_storage_data_key_generation_latencies_percent
+ description: API Server Latencies Of Data Encryption Key(DEK) Generation Operations Percentage
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: 5_µs
+ - name: 10_µs
+ - name: 20_µs
+ - name: 40_µs
+ - name: 80_µs
+ - name: 160_µs
+ - name: 320_µs
+ - name: 640_µs
+ - name: 1280_µs
+ - name: 2560_µs
+ - name: 5120_µs
+ - name: 10240_µs
+ - name: 20480_µs
+ - name: 40960_µs
+ - name: +Inf
+ - name: k8s_kubelet.apiserver_storage_envelope_transformation_cache_misses
+ description: API Server Storage Envelope Transformation Cache Misses
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: cache misses
+ - name: k8s_kubelet.kubelet_containers_running
+ description: Number Of Containers Currently Running
+ unit: running_containers
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: k8s_kubelet.kubelet_pods_running
+ description: Number Of Pods Currently Running
+ unit: running_pods
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: k8s_kubelet.kubelet_pods_log_filesystem_used_bytes
+ description: Bytes Used By The Pod Logs On The Filesystem
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per namespace and pod
+ - name: k8s_kubelet.kubelet_runtime_operations
+ description: Runtime Operations By Type
+ unit: operations/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per operation type
+ - name: k8s_kubelet.kubelet_runtime_operations_errors
+ description: Runtime Operations Errors By Type
+ unit: errors/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per operation type
+ - name: k8s_kubelet.kubelet_docker_operations
+ description: Docker Operations By Type
+ unit: operations/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per operation type
+ - name: k8s_kubelet.kubelet_docker_operations_errors
+ description: Docker Operations Errors By Type
+ unit: errors/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per operation type
+ - name: k8s_kubelet.kubelet_node_config_error
+ description: Node Configuration-Related Error
+ unit: bool
+ chart_type: line
+ dimensions:
+ - name: experiencing_error
+ - name: k8s_kubelet.kubelet_pleg_relist_interval_microseconds
+ description: PLEG Relisting Interval Summary
+ unit: microseconds
+ chart_type: stacked
+ dimensions:
+ - name: "0.5"
+ - name: "0.9"
+ - name: "0.99"
+ - name: k8s_kubelet.kubelet_pleg_relist_latency_microseconds
+ description: PLEG Relisting Latency Summary
+ unit: microseconds
+ chart_type: stacked
+ dimensions:
+ - name: "0.5"
+ - name: "0.9"
+ - name: "0.99"
+ - name: k8s_kubelet.kubelet_token_requests
+ description: Token() Requests To The Alternate Token Source
+ unit: token_requests/s
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: failed
+ - name: k8s_kubelet.rest_client_requests_by_code
+ description: HTTP Requests By Status Code
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per HTTP status code
+ - name: k8s_kubelet.rest_client_requests_by_method
+ description: HTTP Requests By Status Method
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per HTTP method
+ - name: volume manager
+ description: These metrics refer to the Volume Manager.
+ labels: []
+ metrics:
+ - name: k8s_kubelet.volume_manager_total_volumes
+ description: Volume Manager State Of The World
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: actual
+ - name: desired
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/metrics.go b/src/go/plugin/go.d/modules/k8s_kubelet/metrics.go
new file mode 100644
index 000000000..f8a4c5c57
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/metrics.go
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubelet
+
+import (
+ mtx "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+)
+
+func newMetrics() *metrics {
+ var mx metrics
+ mx.RESTClient.Requests.ByStatusCode = make(map[string]mtx.Gauge)
+ mx.RESTClient.Requests.ByMethod = make(map[string]mtx.Gauge)
+ mx.Kubelet.Runtime.Operations = make(map[string]mtx.Gauge)
+ mx.Kubelet.Runtime.OperationsErrors = make(map[string]mtx.Gauge)
+ mx.Kubelet.Docker.Operations = make(map[string]mtx.Gauge)
+ mx.Kubelet.Docker.OperationsErrors = make(map[string]mtx.Gauge)
+ mx.Kubelet.PodLogFileSystemUsage = make(map[string]mtx.Gauge)
+
+ return &mx
+}
+
+type metrics struct {
+ Token tokenMetrics `stm:"token"`
+ RESTClient restClientMetrics `stm:"rest_client"`
+ APIServer apiServerMetrics `stm:"apiserver"`
+ Kubelet kubeletMetrics `stm:"kubelet"`
+ VolumeManager volumeManagerMetrics `stm:"volume_manager"`
+}
+
+type tokenMetrics struct {
+ Count mtx.Gauge `stm:"count"`
+ FailCount mtx.Gauge `stm:"fail_count"`
+}
+
+type restClientMetrics struct {
+ Requests struct {
+ ByStatusCode map[string]mtx.Gauge `stm:""`
+ ByMethod map[string]mtx.Gauge `stm:""`
+ } `stm:"requests"`
+}
+
+type apiServerMetrics struct {
+ Audit struct {
+ Requests struct {
+ Rejected mtx.Gauge `stm:"rejected_total"`
+ } `stm:"requests"`
+ } `stm:"audit"`
+ Storage struct {
+ EnvelopeTransformation struct {
+ CacheMisses mtx.Gauge `stm:"cache_misses_total"`
+ } `stm:"envelope_transformation"`
+ DataKeyGeneration struct {
+ Failures mtx.Gauge `stm:"failures_total"`
+ Latencies struct {
+ LE5 mtx.Gauge `stm:"5"`
+ LE10 mtx.Gauge `stm:"10"`
+ LE20 mtx.Gauge `stm:"20"`
+ LE40 mtx.Gauge `stm:"40"`
+ LE80 mtx.Gauge `stm:"80"`
+ LE160 mtx.Gauge `stm:"160"`
+ LE320 mtx.Gauge `stm:"320"`
+ LE640 mtx.Gauge `stm:"640"`
+ LE1280 mtx.Gauge `stm:"1280"`
+ LE2560 mtx.Gauge `stm:"2560"`
+ LE5120 mtx.Gauge `stm:"5120"`
+ LE10240 mtx.Gauge `stm:"10240"`
+ LE20480 mtx.Gauge `stm:"20480"`
+ LE40960 mtx.Gauge `stm:"40960"`
+ LEInf mtx.Gauge `stm:"+Inf"`
+ } `stm:"bucket"`
+ } `stm:"data_key_generation"`
+ } `stm:"storage"`
+}
+
+type kubeletMetrics struct {
+ NodeConfigError mtx.Gauge `stm:"node_config_error"`
+ RunningContainerCount mtx.Gauge `stm:"running_container"`
+ RunningPodCount mtx.Gauge `stm:"running_pod"`
+ PLEG struct {
+ Relist struct {
+ Interval struct {
+ Quantile05 mtx.Gauge `stm:"05"`
+ Quantile09 mtx.Gauge `stm:"09"`
+ Quantile099 mtx.Gauge `stm:"099"`
+ } `stm:"interval"`
+ Latency struct {
+ Quantile05 mtx.Gauge `stm:"05"`
+ Quantile09 mtx.Gauge `stm:"09"`
+ Quantile099 mtx.Gauge `stm:"099"`
+ } `stm:"latency"`
+ } `stm:"relist"`
+ } `stm:"pleg"`
+ Runtime struct {
+ Operations map[string]mtx.Gauge `stm:"operations"`
+ OperationsErrors map[string]mtx.Gauge `stm:"operations_errors"`
+ } `stm:"runtime"`
+ Docker struct {
+ Operations map[string]mtx.Gauge `stm:"operations"`
+ OperationsErrors map[string]mtx.Gauge `stm:"operations_errors"`
+ } `stm:"docker"`
+ PodLogFileSystemUsage map[string]mtx.Gauge `stm:"log_file_system_usage"`
+}
+
+type volumeManagerMetrics struct {
+ Plugins map[string]*volumeManagerPlugin `stm:"plugin"`
+}
+
+type volumeManagerPlugin struct {
+ State struct {
+ Actual mtx.Gauge `stm:"actual"`
+ Desired mtx.Gauge `stm:"desired"`
+ } `stm:"state"`
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/testdata/config.json b/src/go/plugin/go.d/modules/k8s_kubelet/testdata/config.json
new file mode 100644
index 000000000..d85483953
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "token_path": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/testdata/config.yaml b/src/go/plugin/go.d/modules/k8s_kubelet/testdata/config.yaml
new file mode 100644
index 000000000..9e4f3fdc4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+token_path: "ok"
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/testdata/metrics.txt b/src/go/plugin/go.d/modules/k8s_kubelet/testdata/metrics.txt
new file mode 100644
index 000000000..47b63bd55
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/testdata/metrics.txt
@@ -0,0 +1,574 @@
+# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend.
+# TYPE apiserver_audit_event_total counter
+apiserver_audit_event_total 0
+# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend.
+# TYPE apiserver_audit_requests_rejected_total counter
+apiserver_audit_requests_rejected_total 0
+# HELP apiserver_client_certificate_expiration_seconds Distribution of the remaining lifetime on the certificate used to authenticate a request.
+# TYPE apiserver_client_certificate_expiration_seconds histogram
+apiserver_client_certificate_expiration_seconds_bucket{le="0"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="21600"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="43200"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="86400"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="172800"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="345600"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="604800"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="2.592e+06"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="7.776e+06"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="1.5552e+07"} 0
+apiserver_client_certificate_expiration_seconds_bucket{le="3.1104e+07"} 2
+apiserver_client_certificate_expiration_seconds_bucket{le="+Inf"} 2
+apiserver_client_certificate_expiration_seconds_sum 6.198359653913356e+07
+apiserver_client_certificate_expiration_seconds_count 2
+# HELP apiserver_storage_data_key_generation_failures_total Total number of failed data encryption key(DEK) generation operations.
+# TYPE apiserver_storage_data_key_generation_failures_total counter
+apiserver_storage_data_key_generation_failures_total 0
+# HELP apiserver_storage_data_key_generation_latencies_microseconds Latencies in microseconds of data encryption key(DEK) generation operations.
+# TYPE apiserver_storage_data_key_generation_latencies_microseconds histogram
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="5"} 6
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="10"} 7
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="20"} 8
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="40"} 9
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="80"} 10
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="160"} 11
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="320"} 12
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="640"} 13
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="1280"} 14
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="2560"} 15
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="5120"} 16
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="10240"} 17
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="20480"} 18
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="40960"} 19
+apiserver_storage_data_key_generation_latencies_microseconds_bucket{le="+Inf"} 20
+apiserver_storage_data_key_generation_latencies_microseconds_sum 0
+apiserver_storage_data_key_generation_latencies_microseconds_count 0
+# HELP apiserver_storage_envelope_transformation_cache_misses_total Total number of cache misses while accessing key decryption key(KEK).
+# TYPE apiserver_storage_envelope_transformation_cache_misses_total counter
+apiserver_storage_envelope_transformation_cache_misses_total 0
+# HELP get_token_count Counter of total Token() requests to the alternate token source
+# TYPE get_token_count counter
+get_token_count 0
+# HELP get_token_fail_count Counter of failed Token() requests to the alternate token source
+# TYPE get_token_fail_count counter
+get_token_fail_count 0
+# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 3.1848e-05
+go_gc_duration_seconds{quantile="0.25"} 6.1739e-05
+go_gc_duration_seconds{quantile="0.5"} 9.1641e-05
+go_gc_duration_seconds{quantile="0.75"} 0.000143403
+go_gc_duration_seconds{quantile="1"} 0.003400982
+go_gc_duration_seconds_sum 0.041302468
+go_gc_duration_seconds_count 252
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 282
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 2.2614512e+07
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 2.851571192e+09
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.81591e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 1.9710993e+07
+# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
+# TYPE go_memstats_gc_cpu_fraction gauge
+go_memstats_gc_cpu_fraction 0.0005851177440973569
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 2.41664e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 2.2614512e+07
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 3.8526976e+07
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 2.5796608e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 114479
+# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes gauge
+go_memstats_heap_released_bytes 0
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 6.4323584e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.552938975118211e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 1.9825472e+07
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 3456
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 16384
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 361304
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 409600
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 2.612264e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 517010
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 2.78528e+06
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 2.78528e+06
+# HELP go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 7.2284408e+07
+# HELP go_threads Number of OS threads created
+# TYPE go_threads gauge
+go_threads 19
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 4933.921
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 4933.921
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 4933.921
+http_request_duration_microseconds_sum{handler="prometheus"} 283201.29
+http_request_duration_microseconds_count{handler="prometheus"} 31
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="prometheus",quantile="0.5"} 423
+http_request_size_bytes{handler="prometheus",quantile="0.9"} 423
+http_request_size_bytes{handler="prometheus",quantile="0.99"} 423
+http_request_size_bytes_sum{handler="prometheus"} 11711
+http_request_size_bytes_count{handler="prometheus"} 31
+# HELP http_requests_total Total number of HTTP requests made.
+# TYPE http_requests_total counter
+http_requests_total{code="200",handler="prometheus",method="get"} 31
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="prometheus",quantile="0.5"} 5678
+http_response_size_bytes{handler="prometheus",quantile="0.9"} 5678
+http_response_size_bytes{handler="prometheus",quantile="0.99"} 5678
+http_response_size_bytes_sum{handler="prometheus"} 178006
+http_response_size_bytes_count{handler="prometheus"} 31
+# HELP kubelet_cgroup_manager_latency_microseconds Latency in microseconds for cgroup manager operations. Broken down by method.
+# TYPE kubelet_cgroup_manager_latency_microseconds summary
+kubelet_cgroup_manager_latency_microseconds{operation_type="create",quantile="0.5"} NaN
+kubelet_cgroup_manager_latency_microseconds{operation_type="create",quantile="0.9"} NaN
+kubelet_cgroup_manager_latency_microseconds{operation_type="create",quantile="0.99"} NaN
+kubelet_cgroup_manager_latency_microseconds_sum{operation_type="create"} 96365
+kubelet_cgroup_manager_latency_microseconds_count{operation_type="create"} 12
+kubelet_cgroup_manager_latency_microseconds{operation_type="update",quantile="0.5"} 91
+kubelet_cgroup_manager_latency_microseconds{operation_type="update",quantile="0.9"} 193
+kubelet_cgroup_manager_latency_microseconds{operation_type="update",quantile="0.99"} 208
+kubelet_cgroup_manager_latency_microseconds_sum{operation_type="update"} 12921
+kubelet_cgroup_manager_latency_microseconds_count{operation_type="update"} 79
+# HELP kubelet_container_log_filesystem_used_bytes Bytes used by the container's logs on the filesystem.
+# TYPE kubelet_container_log_filesystem_used_bytes gauge
+kubelet_container_log_filesystem_used_bytes{container="coredns",namespace="kube-system",pod="coredns-86c58d9df4-d22hv"} 28672
+kubelet_container_log_filesystem_used_bytes{container="coredns",namespace="kube-system",pod="coredns-86c58d9df4-ks5dj"} 28672
+kubelet_container_log_filesystem_used_bytes{container="etcd",namespace="kube-system",pod="etcd-minikube"} 36864
+kubelet_container_log_filesystem_used_bytes{container="kube-addon-manager",namespace="kube-system",pod="kube-addon-manager-minikube"} 45056
+kubelet_container_log_filesystem_used_bytes{container="kube-apiserver",namespace="kube-system",pod="kube-apiserver-minikube"} 36864
+kubelet_container_log_filesystem_used_bytes{container="kube-controller-manager",namespace="kube-system",pod="kube-controller-manager-minikube"} 57344
+kubelet_container_log_filesystem_used_bytes{container="kube-proxy",namespace="kube-system",pod="kube-proxy-q2fvs"} 28672
+kubelet_container_log_filesystem_used_bytes{container="kube-scheduler",namespace="kube-system",pod="kube-scheduler-minikube"} 40960
+kubelet_container_log_filesystem_used_bytes{container="storage-provisioner",namespace="kube-system",pod="storage-provisioner"} 24576
+# HELP kubelet_containers_per_pod_count The number of containers per pod.
+# TYPE kubelet_containers_per_pod_count summary
+kubelet_containers_per_pod_count{quantile="0.5"} NaN
+kubelet_containers_per_pod_count{quantile="0.9"} NaN
+kubelet_containers_per_pod_count{quantile="0.99"} NaN
+kubelet_containers_per_pod_count_sum 9
+kubelet_containers_per_pod_count_count 9
+# HELP kubelet_docker_operations Cumulative number of Docker operations by operation type.
+# TYPE kubelet_docker_operations counter
+kubelet_docker_operations{operation_type="create_container"} 19
+kubelet_docker_operations{operation_type="info"} 2
+kubelet_docker_operations{operation_type="inspect_container"} 223
+kubelet_docker_operations{operation_type="inspect_image"} 110
+kubelet_docker_operations{operation_type="list_containers"} 5157
+kubelet_docker_operations{operation_type="list_images"} 195
+kubelet_docker_operations{operation_type="remove_container"} 23
+kubelet_docker_operations{operation_type="start_container"} 19
+kubelet_docker_operations{operation_type="stop_container"} 23
+kubelet_docker_operations{operation_type="version"} 472
+# HELP kubelet_docker_operations_errors Cumulative number of Docker operation errors by operation type.
+# TYPE kubelet_docker_operations_errors counter
+kubelet_docker_operations_errors{operation_type="inspect_container"} 14
+kubelet_docker_operations_errors{operation_type="remove_container"} 4
+# HELP kubelet_docker_operations_latency_microseconds Latency in microseconds of Docker operations. Broken down by operation type.
+# TYPE kubelet_docker_operations_latency_microseconds summary
+kubelet_docker_operations_latency_microseconds{operation_type="create_container",quantile="0.5"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="create_container",quantile="0.9"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="create_container",quantile="0.99"} NaN
+kubelet_docker_operations_latency_microseconds_sum{operation_type="create_container"} 1.157649e+07
+kubelet_docker_operations_latency_microseconds_count{operation_type="create_container"} 19
+kubelet_docker_operations_latency_microseconds{operation_type="info",quantile="0.5"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="info",quantile="0.9"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="info",quantile="0.99"} NaN
+kubelet_docker_operations_latency_microseconds_sum{operation_type="info"} 15754
+kubelet_docker_operations_latency_microseconds_count{operation_type="info"} 2
+kubelet_docker_operations_latency_microseconds{operation_type="inspect_container",quantile="0.5"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="inspect_container",quantile="0.9"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="inspect_container",quantile="0.99"} NaN
+kubelet_docker_operations_latency_microseconds_sum{operation_type="inspect_container"} 6.320335e+06
+kubelet_docker_operations_latency_microseconds_count{operation_type="inspect_container"} 223
+kubelet_docker_operations_latency_microseconds{operation_type="inspect_image",quantile="0.5"} 1112
+kubelet_docker_operations_latency_microseconds{operation_type="inspect_image",quantile="0.9"} 1112
+kubelet_docker_operations_latency_microseconds{operation_type="inspect_image",quantile="0.99"} 1112
+kubelet_docker_operations_latency_microseconds_sum{operation_type="inspect_image"} 276071
+kubelet_docker_operations_latency_microseconds_count{operation_type="inspect_image"} 110
+kubelet_docker_operations_latency_microseconds{operation_type="list_containers",quantile="0.5"} 3368
+kubelet_docker_operations_latency_microseconds{operation_type="list_containers",quantile="0.9"} 9003
+kubelet_docker_operations_latency_microseconds{operation_type="list_containers",quantile="0.99"} 16951
+kubelet_docker_operations_latency_microseconds_sum{operation_type="list_containers"} 2.2912964e+07
+kubelet_docker_operations_latency_microseconds_count{operation_type="list_containers"} 5157
+kubelet_docker_operations_latency_microseconds{operation_type="list_images",quantile="0.5"} 3579
+kubelet_docker_operations_latency_microseconds{operation_type="list_images",quantile="0.9"} 5431
+kubelet_docker_operations_latency_microseconds{operation_type="list_images",quantile="0.99"} 7136
+kubelet_docker_operations_latency_microseconds_sum{operation_type="list_images"} 798789
+kubelet_docker_operations_latency_microseconds_count{operation_type="list_images"} 195
+kubelet_docker_operations_latency_microseconds{operation_type="remove_container",quantile="0.5"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="remove_container",quantile="0.9"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="remove_container",quantile="0.99"} NaN
+kubelet_docker_operations_latency_microseconds_sum{operation_type="remove_container"} 5.297973e+06
+kubelet_docker_operations_latency_microseconds_count{operation_type="remove_container"} 23
+kubelet_docker_operations_latency_microseconds{operation_type="start_container",quantile="0.5"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="start_container",quantile="0.9"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="start_container",quantile="0.99"} NaN
+kubelet_docker_operations_latency_microseconds_sum{operation_type="start_container"} 1.5755618e+07
+kubelet_docker_operations_latency_microseconds_count{operation_type="start_container"} 19
+kubelet_docker_operations_latency_microseconds{operation_type="stop_container",quantile="0.5"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="stop_container",quantile="0.9"} NaN
+kubelet_docker_operations_latency_microseconds{operation_type="stop_container",quantile="0.99"} NaN
+kubelet_docker_operations_latency_microseconds_sum{operation_type="stop_container"} 18810
+kubelet_docker_operations_latency_microseconds_count{operation_type="stop_container"} 23
+kubelet_docker_operations_latency_microseconds{operation_type="version",quantile="0.5"} 869
+kubelet_docker_operations_latency_microseconds{operation_type="version",quantile="0.9"} 1482
+kubelet_docker_operations_latency_microseconds{operation_type="version",quantile="0.99"} 2426
+kubelet_docker_operations_latency_microseconds_sum{operation_type="version"} 455522
+kubelet_docker_operations_latency_microseconds_count{operation_type="version"} 472
+# HELP kubelet_network_plugin_operations_latency_microseconds Latency in microseconds of network plugin operations. Broken down by operation type.
+# TYPE kubelet_network_plugin_operations_latency_microseconds summary
+kubelet_network_plugin_operations_latency_microseconds{operation_type="get_pod_network_status",quantile="0.5"} NaN
+kubelet_network_plugin_operations_latency_microseconds{operation_type="get_pod_network_status",quantile="0.9"} NaN
+kubelet_network_plugin_operations_latency_microseconds{operation_type="get_pod_network_status",quantile="0.99"} NaN
+kubelet_network_plugin_operations_latency_microseconds_sum{operation_type="get_pod_network_status"} 47
+kubelet_network_plugin_operations_latency_microseconds_count{operation_type="get_pod_network_status"} 11
+kubelet_network_plugin_operations_latency_microseconds{operation_type="set_up_pod",quantile="0.5"} NaN
+kubelet_network_plugin_operations_latency_microseconds{operation_type="set_up_pod",quantile="0.9"} NaN
+kubelet_network_plugin_operations_latency_microseconds{operation_type="set_up_pod",quantile="0.99"} NaN
+kubelet_network_plugin_operations_latency_microseconds_sum{operation_type="set_up_pod"} 23
+kubelet_network_plugin_operations_latency_microseconds_count{operation_type="set_up_pod"} 2
+kubelet_network_plugin_operations_latency_microseconds{operation_type="tear_down_pod",quantile="0.5"} NaN
+kubelet_network_plugin_operations_latency_microseconds{operation_type="tear_down_pod",quantile="0.9"} NaN
+kubelet_network_plugin_operations_latency_microseconds{operation_type="tear_down_pod",quantile="0.99"} NaN
+kubelet_network_plugin_operations_latency_microseconds_sum{operation_type="tear_down_pod"} 29
+kubelet_network_plugin_operations_latency_microseconds_count{operation_type="tear_down_pod"} 4
+# HELP kubelet_node_config_error This metric is true (1) if the node is experiencing a configuration-related error, false (0) otherwise.
+# TYPE kubelet_node_config_error gauge
+kubelet_node_config_error 1
+# HELP kubelet_pleg_relist_interval_microseconds Interval in microseconds between relisting in PLEG.
+# TYPE kubelet_pleg_relist_interval_microseconds summary
+kubelet_pleg_relist_interval_microseconds{quantile="0.5"} 1.013125e+06
+kubelet_pleg_relist_interval_microseconds{quantile="0.9"} 1.01682e+06
+kubelet_pleg_relist_interval_microseconds{quantile="0.99"} 1.032022e+06
+kubelet_pleg_relist_interval_microseconds_sum 1.392954348e+09
+kubelet_pleg_relist_interval_microseconds_count 1368
+# HELP kubelet_pleg_relist_latency_microseconds Latency in microseconds for relisting pods in PLEG.
+# TYPE kubelet_pleg_relist_latency_microseconds summary
+kubelet_pleg_relist_latency_microseconds{quantile="0.5"} 12741
+kubelet_pleg_relist_latency_microseconds{quantile="0.9"} 16211
+kubelet_pleg_relist_latency_microseconds{quantile="0.99"} 31234
+kubelet_pleg_relist_latency_microseconds_sum 2.4227856e+07
+kubelet_pleg_relist_latency_microseconds_count 1369
+# HELP kubelet_pod_start_latency_microseconds Latency in microseconds for a single pod to go from pending to running.
+# TYPE kubelet_pod_start_latency_microseconds summary
+kubelet_pod_start_latency_microseconds{quantile="0.5"} NaN
+kubelet_pod_start_latency_microseconds{quantile="0.9"} NaN
+kubelet_pod_start_latency_microseconds{quantile="0.99"} NaN
+kubelet_pod_start_latency_microseconds_sum 2.884769e+06
+kubelet_pod_start_latency_microseconds_count 9
+# HELP kubelet_pod_worker_latency_microseconds Latency in microseconds to sync a single pod. Broken down by operation type: create, update, or sync
+# TYPE kubelet_pod_worker_latency_microseconds summary
+kubelet_pod_worker_latency_microseconds{operation_type="sync",quantile="0.5"} NaN
+kubelet_pod_worker_latency_microseconds{operation_type="sync",quantile="0.9"} NaN
+kubelet_pod_worker_latency_microseconds{operation_type="sync",quantile="0.99"} NaN
+kubelet_pod_worker_latency_microseconds_sum{operation_type="sync"} 412
+kubelet_pod_worker_latency_microseconds_count{operation_type="sync"} 1
+# HELP kubelet_pod_worker_start_latency_microseconds Latency in microseconds from seeing a pod to starting a worker.
+# TYPE kubelet_pod_worker_start_latency_microseconds summary
+kubelet_pod_worker_start_latency_microseconds{quantile="0.5"} NaN
+kubelet_pod_worker_start_latency_microseconds{quantile="0.9"} NaN
+kubelet_pod_worker_start_latency_microseconds{quantile="0.99"} NaN
+kubelet_pod_worker_start_latency_microseconds_sum 2.85589e+06
+kubelet_pod_worker_start_latency_microseconds_count 9
+# HELP kubelet_running_container_count Number of containers currently running
+# TYPE kubelet_running_container_count gauge
+kubelet_running_container_count 9
+# HELP kubelet_running_pod_count Number of pods currently running
+# TYPE kubelet_running_pod_count gauge
+kubelet_running_pod_count 9
+# HELP kubelet_runtime_operations Cumulative number of runtime operations by operation type.
+# TYPE kubelet_runtime_operations counter
+kubelet_runtime_operations{operation_type="container_status"} 90
+kubelet_runtime_operations{operation_type="create_container"} 10
+kubelet_runtime_operations{operation_type="exec_sync"} 138
+kubelet_runtime_operations{operation_type="image_status"} 25
+kubelet_runtime_operations{operation_type="list_containers"} 2586
+kubelet_runtime_operations{operation_type="list_images"} 195
+kubelet_runtime_operations{operation_type="list_podsandbox"} 2562
+kubelet_runtime_operations{operation_type="podsandbox_status"} 77
+kubelet_runtime_operations{operation_type="remove_container"} 14
+kubelet_runtime_operations{operation_type="run_podsandbox"} 9
+kubelet_runtime_operations{operation_type="start_container"} 10
+kubelet_runtime_operations{operation_type="status"} 279
+kubelet_runtime_operations{operation_type="stop_podsandbox"} 14
+kubelet_runtime_operations{operation_type="version"} 190
+# HELP kubelet_runtime_operations_errors Cumulative number of runtime operation errors by operation type.
+# TYPE kubelet_runtime_operations_errors counter
+kubelet_runtime_operations_errors{operation_type="container_status"} 14
+kubelet_runtime_operations_errors{operation_type="remove_container"} 4
+# HELP kubelet_runtime_operations_latency_microseconds Latency in microseconds of runtime operations. Broken down by operation type.
+# TYPE kubelet_runtime_operations_latency_microseconds summary
+kubelet_runtime_operations_latency_microseconds{operation_type="container_status",quantile="0.5"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="container_status",quantile="0.9"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="container_status",quantile="0.99"} NaN
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="container_status"} 5.830434e+06
+kubelet_runtime_operations_latency_microseconds_count{operation_type="container_status"} 90
+kubelet_runtime_operations_latency_microseconds{operation_type="create_container",quantile="0.5"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="create_container",quantile="0.9"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="create_container",quantile="0.99"} NaN
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="create_container"} 6.237513e+06
+kubelet_runtime_operations_latency_microseconds_count{operation_type="create_container"} 10
+kubelet_runtime_operations_latency_microseconds{operation_type="exec_sync",quantile="0.5"} 77674
+kubelet_runtime_operations_latency_microseconds{operation_type="exec_sync",quantile="0.9"} 84801
+kubelet_runtime_operations_latency_microseconds{operation_type="exec_sync",quantile="0.99"} 91057
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="exec_sync"} 1.1581846e+07
+kubelet_runtime_operations_latency_microseconds_count{operation_type="exec_sync"} 138
+kubelet_runtime_operations_latency_microseconds{operation_type="image_status",quantile="0.5"} 1379
+kubelet_runtime_operations_latency_microseconds{operation_type="image_status",quantile="0.9"} 1379
+kubelet_runtime_operations_latency_microseconds{operation_type="image_status",quantile="0.99"} 1379
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="image_status"} 84242
+kubelet_runtime_operations_latency_microseconds_count{operation_type="image_status"} 25
+kubelet_runtime_operations_latency_microseconds{operation_type="list_containers",quantile="0.5"} 2860
+kubelet_runtime_operations_latency_microseconds{operation_type="list_containers",quantile="0.9"} 5131
+kubelet_runtime_operations_latency_microseconds{operation_type="list_containers",quantile="0.99"} 15491
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="list_containers"} 8.583973e+06
+kubelet_runtime_operations_latency_microseconds_count{operation_type="list_containers"} 2586
+kubelet_runtime_operations_latency_microseconds{operation_type="list_images",quantile="0.5"} 4206
+kubelet_runtime_operations_latency_microseconds{operation_type="list_images",quantile="0.9"} 6102
+kubelet_runtime_operations_latency_microseconds{operation_type="list_images",quantile="0.99"} 7592
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="list_images"} 915822
+kubelet_runtime_operations_latency_microseconds_count{operation_type="list_images"} 195
+kubelet_runtime_operations_latency_microseconds{operation_type="list_podsandbox",quantile="0.5"} 6645
+kubelet_runtime_operations_latency_microseconds{operation_type="list_podsandbox",quantile="0.9"} 11038
+kubelet_runtime_operations_latency_microseconds{operation_type="list_podsandbox",quantile="0.99"} 21220
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="list_podsandbox"} 1.7650737e+07
+kubelet_runtime_operations_latency_microseconds_count{operation_type="list_podsandbox"} 2562
+kubelet_runtime_operations_latency_microseconds{operation_type="podsandbox_status",quantile="0.5"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="podsandbox_status",quantile="0.9"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="podsandbox_status",quantile="0.99"} NaN
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="podsandbox_status"} 673056
+kubelet_runtime_operations_latency_microseconds_count{operation_type="podsandbox_status"} 77
+kubelet_runtime_operations_latency_microseconds{operation_type="remove_container",quantile="0.5"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="remove_container",quantile="0.9"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="remove_container",quantile="0.99"} NaN
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="remove_container"} 1.781569e+06
+kubelet_runtime_operations_latency_microseconds_count{operation_type="remove_container"} 14
+kubelet_runtime_operations_latency_microseconds{operation_type="run_podsandbox",quantile="0.5"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="run_podsandbox",quantile="0.9"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="run_podsandbox",quantile="0.99"} NaN
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="run_podsandbox"} 9.284403e+06
+kubelet_runtime_operations_latency_microseconds_count{operation_type="run_podsandbox"} 9
+kubelet_runtime_operations_latency_microseconds{operation_type="start_container",quantile="0.5"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="start_container",quantile="0.9"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="start_container",quantile="0.99"} NaN
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="start_container"} 1.1913088e+07
+kubelet_runtime_operations_latency_microseconds_count{operation_type="start_container"} 10
+kubelet_runtime_operations_latency_microseconds{operation_type="status",quantile="0.5"} 1555
+kubelet_runtime_operations_latency_microseconds{operation_type="status",quantile="0.9"} 2438
+kubelet_runtime_operations_latency_microseconds{operation_type="status",quantile="0.99"} 4376
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="status"} 496865
+kubelet_runtime_operations_latency_microseconds_count{operation_type="status"} 279
+kubelet_runtime_operations_latency_microseconds{operation_type="stop_podsandbox",quantile="0.5"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="stop_podsandbox",quantile="0.9"} NaN
+kubelet_runtime_operations_latency_microseconds{operation_type="stop_podsandbox",quantile="0.99"} NaN
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="stop_podsandbox"} 41502
+kubelet_runtime_operations_latency_microseconds_count{operation_type="stop_podsandbox"} 14
+kubelet_runtime_operations_latency_microseconds{operation_type="version",quantile="0.5"} 933
+kubelet_runtime_operations_latency_microseconds{operation_type="version",quantile="0.9"} 1515
+kubelet_runtime_operations_latency_microseconds{operation_type="version",quantile="0.99"} 1674
+kubelet_runtime_operations_latency_microseconds_sum{operation_type="version"} 216328
+kubelet_runtime_operations_latency_microseconds_count{operation_type="version"} 190
+# HELP kubernetes_build_info A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.
+# TYPE kubernetes_build_info gauge
+kubernetes_build_info{buildDate="2019-02-28T13:35:32Z",compiler="gc",gitCommit="c27b913fddd1a6c480c229191a087698aa92f0b1",gitTreeState="clean",gitVersion="v1.13.4",goVersion="go1.11.5",major="1",minor="13",platform="linux/amd64"} 1
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 44.55
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 1e+06
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 33
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 9.2401664e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.55293758654e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 1.379233792e+09
+# HELP rest_client_request_latency_seconds Request latency in seconds. Broken down by verb and URL.
+# TYPE rest_client_request_latency_seconds histogram
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.001"} 44
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.002"} 124
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.004"} 181
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.008"} 183
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.016"} 190
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.032"} 195
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.064"} 195
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.128"} 199
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.256"} 199
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="0.512"} 199
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="GET",le="+Inf"} 202
+rest_client_request_latency_seconds_sum{url="https://localhost:8443/%7Bprefix%7D",verb="GET"} 24.538311267
+rest_client_request_latency_seconds_count{url="https://localhost:8443/%7Bprefix%7D",verb="GET"} 202
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.001"} 0
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.002"} 0
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.004"} 23
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.008"} 160
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.016"} 172
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.032"} 175
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.064"} 176
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.128"} 177
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.256"} 177
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="0.512"} 177
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH",le="+Inf"} 177
+rest_client_request_latency_seconds_sum{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH"} 1.1527289999999994
+rest_client_request_latency_seconds_count{url="https://localhost:8443/%7Bprefix%7D",verb="PATCH"} 177
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.001"} 8
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.002"} 10
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.004"} 17
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.008"} 49
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.016"} 49
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.032"} 49
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.064"} 49
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.128"} 49
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.256"} 49
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="0.512"} 49
+rest_client_request_latency_seconds_bucket{url="https://localhost:8443/%7Bprefix%7D",verb="POST",le="+Inf"} 52
+rest_client_request_latency_seconds_sum{url="https://localhost:8443/%7Bprefix%7D",verb="POST"} 17.43416557
+rest_client_request_latency_seconds_count{url="https://localhost:8443/%7Bprefix%7D",verb="POST"} 52
+# HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host.
+# TYPE rest_client_requests_total counter
+rest_client_requests_total{code="200",host="localhost:8443",method="GET"} 191
+rest_client_requests_total{code="200",host="localhost:8443",method="PATCH"} 177
+rest_client_requests_total{code="201",host="localhost:8443",method="POST"} 43
+rest_client_requests_total{code="403",host="localhost:8443",method="GET"} 2
+rest_client_requests_total{code="409",host="localhost:8443",method="POST"} 1
+rest_client_requests_total{code="<error>",host="localhost:8443",method="GET"} 37
+rest_client_requests_total{code="<error>",host="localhost:8443",method="POST"} 8
+# HELP storage_operation_duration_seconds Storage operation duration
+# TYPE storage_operation_duration_seconds histogram
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="0.1"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="0.25"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="0.5"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="1"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="2.5"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="5"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="10"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="15"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="25"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="50"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap",le="+Inf"} 3
+storage_operation_duration_seconds_sum{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap"} 0.00147889
+storage_operation_duration_seconds_count{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/configmap"} 3
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="0.1"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="0.25"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="0.5"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="1"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="2.5"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="5"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="10"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="15"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="25"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="50"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path",le="+Inf"} 15
+storage_operation_duration_seconds_sum{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path"} 0.002347783
+storage_operation_duration_seconds_count{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/host-path"} 15
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="0.1"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="0.25"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="0.5"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="1"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="2.5"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="5"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="10"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="15"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="25"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="50"} 4
+storage_operation_duration_seconds_bucket{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret",le="+Inf"} 4
+storage_operation_duration_seconds_sum{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret"} 0.001769817
+storage_operation_duration_seconds_count{operation_name="verify_controller_attached_volume",volume_plugin="kubernetes.io/secret"} 4
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="0.1"} 59
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="0.25"} 60
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="0.5"} 60
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="1"} 62
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="2.5"} 62
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="5"} 62
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="10"} 62
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="15"} 62
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="25"} 62
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="50"} 62
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap",le="+Inf"} 62
+storage_operation_duration_seconds_sum{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap"} 2.039342002999999
+storage_operation_duration_seconds_count{operation_name="volume_mount",volume_plugin="kubernetes.io/configmap"} 62
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="0.1"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="0.25"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="0.5"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="1"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="2.5"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="5"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="10"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="15"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="25"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="50"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path",le="+Inf"} 15
+storage_operation_duration_seconds_sum{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path"} 0.006827130000000001
+storage_operation_duration_seconds_count{operation_name="volume_mount",volume_plugin="kubernetes.io/host-path"} 15
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="0.1"} 83
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="0.25"} 83
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="0.5"} 83
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="1"} 85
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="2.5"} 85
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="5"} 85
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="10"} 85
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="15"} 85
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="25"} 85
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="50"} 85
+storage_operation_duration_seconds_bucket{operation_name="volume_mount",volume_plugin="kubernetes.io/secret",le="+Inf"} 85
+storage_operation_duration_seconds_sum{operation_name="volume_mount",volume_plugin="kubernetes.io/secret"} 1.9201849530000006
+storage_operation_duration_seconds_count{operation_name="volume_mount",volume_plugin="kubernetes.io/secret"} 85
+# HELP volume_manager_total_volumes Number of volumes in Volume Manager
+# TYPE volume_manager_total_volumes gauge
+volume_manager_total_volumes{plugin_name="kubernetes.io/configmap",state="actual_state_of_world"} 3
+volume_manager_total_volumes{plugin_name="kubernetes.io/configmap",state="desired_state_of_world"} 3
+volume_manager_total_volumes{plugin_name="kubernetes.io/host-path",state="actual_state_of_world"} 15
+volume_manager_total_volumes{plugin_name="kubernetes.io/host-path",state="desired_state_of_world"} 15
+volume_manager_total_volumes{plugin_name="kubernetes.io/secret",state="actual_state_of_world"} 4
+volume_manager_total_volumes{plugin_name="kubernetes.io/secret",state="desired_state_of_world"} 4 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/k8s_kubelet/testdata/token.txt b/src/go/plugin/go.d/modules/k8s_kubelet/testdata/token.txt
new file mode 100644
index 000000000..e769c538e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubelet/testdata/token.txt
@@ -0,0 +1 @@
+8zU5Emm58tPGShVkwTK3ZLn0d4I \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/README.md b/src/go/plugin/go.d/modules/k8s_kubeproxy/README.md
new file mode 120000
index 000000000..020405250
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/README.md
@@ -0,0 +1 @@
+integrations/kubeproxy.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/charts.go b/src/go/plugin/go.d/modules/k8s_kubeproxy/charts.go
new file mode 100644
index 000000000..3eea903fc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/charts.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubeproxy
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+ // Dim is an alias for module.Dim
+ Dim = module.Dim
+)
+
+var charts = Charts{
+ {
+ ID: "kubeproxy_sync_proxy_rules",
+ Title: "Sync Proxy Rules",
+ Units: "events/s",
+ Fam: "sync proxy rules",
+ Ctx: "k8s_kubeproxy.kubeproxy_sync_proxy_rules",
+ Dims: Dims{
+ {ID: "sync_proxy_rules_count", Name: "sync proxy rules", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "kubeproxy_sync_proxy_rules_latency",
+ Title: "Sync Proxy Rules Latency",
+ Units: "observes/s",
+ Fam: "sync proxy rules",
+ Ctx: "k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microseconds",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "sync_proxy_rules_bucket_1000", Name: "0.001 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_2000", Name: "0.002 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_4000", Name: "0.004 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_8000", Name: "0.008 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_16000", Name: "0.016 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_32000", Name: "0.032 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_64000", Name: "0.064 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_128000", Name: "0.128 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_256000", Name: "0.256 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_512000", Name: "0.512 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_1024000", Name: "1.024 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_2048000", Name: "2.048 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_4096000", Name: "4.096 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_8192000", Name: "8.192 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_16384000", Name: "16.384 sec", Algo: module.Incremental},
+ {ID: "sync_proxy_rules_bucket_+Inf", Name: "+Inf", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "kubeproxy_sync_proxy_rules_latency_percentage",
+ Title: "Sync Proxy Rules Latency Percentage",
+ Units: "%",
+ Fam: "sync proxy rules",
+ Ctx: "k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "sync_proxy_rules_bucket_1000", Name: "0.001 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_2000", Name: "0.002 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_4000", Name: "0.004 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_8000", Name: "0.008 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_16000", Name: "0.016 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_32000", Name: "0.032 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_64000", Name: "0.064 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_128000", Name: "0.128 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_256000", Name: "0.256 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_512000", Name: "0.512 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_1024000", Name: "1.024 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_2048000", Name: "2.048 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_4096000", Name: "4.096 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_8192000", Name: "8.192 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_16384000", Name: "16.384 sec", Algo: module.PercentOfIncremental},
+ {ID: "sync_proxy_rules_bucket_+Inf", Name: "+Inf", Algo: module.PercentOfIncremental},
+ },
+ },
+ {
+ ID: "rest_client_requests_by_code",
+ Title: "HTTP Requests By Status Code",
+ Units: "requests/s",
+ Fam: "rest client",
+ Ctx: "k8s_kubeproxy.rest_client_requests_by_code",
+ Type: module.Stacked,
+ },
+ {
+ ID: "rest_client_requests_by_method",
+ Title: "HTTP Requests By Status Method",
+ Units: "requests/s",
+ Fam: "rest client",
+ Ctx: "k8s_kubeproxy.rest_client_requests_by_method",
+ Type: module.Stacked,
+ },
+ {
+ ID: "http_request_duration",
+ Title: "HTTP Requests Duration",
+ Units: "microseconds",
+ Fam: "http",
+ Ctx: "k8s_kubeproxy.http_request_duration",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "http_request_duration_05", Name: "0.5"},
+ {ID: "http_request_duration_09", Name: "0.9"},
+ {ID: "http_request_duration_099", Name: "0.99"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/collect.go b/src/go/plugin/go.d/modules/k8s_kubeproxy/collect.go
new file mode 100644
index 000000000..8664efaae
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/collect.go
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubeproxy
+
+import (
+ "math"
+
+ mtx "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (kp *KubeProxy) collect() (map[string]int64, error) {
+ raw, err := kp.prom.ScrapeSeries()
+
+ if err != nil {
+ return nil, err
+ }
+
+ mx := newMetrics()
+
+ kp.collectSyncProxyRules(raw, mx)
+ kp.collectRESTClientHTTPRequests(raw, mx)
+ kp.collectHTTPRequestDuration(raw, mx)
+
+ return stm.ToMap(mx), nil
+}
+
+func (kp *KubeProxy) collectSyncProxyRules(raw prometheus.Series, mx *metrics) {
+ m := raw.FindByName("kubeproxy_sync_proxy_rules_latency_microseconds_count")
+ mx.SyncProxyRules.Count.Set(m.Max())
+ kp.collectSyncProxyRulesLatency(raw, mx)
+}
+
+func (kp *KubeProxy) collectSyncProxyRulesLatency(raw prometheus.Series, mx *metrics) {
+ metricName := "kubeproxy_sync_proxy_rules_latency_microseconds_bucket"
+ latency := &mx.SyncProxyRules.Latency
+
+ for _, metric := range raw.FindByName(metricName) {
+ bucket := metric.Labels.Get("le")
+ value := metric.Value
+ switch bucket {
+ case "1000":
+ latency.LE1000.Set(value)
+ case "2000":
+ latency.LE2000.Set(value)
+ case "4000":
+ latency.LE4000.Set(value)
+ case "8000":
+ latency.LE8000.Set(value)
+ case "16000":
+ latency.LE16000.Set(value)
+ case "32000":
+ latency.LE32000.Set(value)
+ case "64000":
+ latency.LE64000.Set(value)
+ case "128000":
+ latency.LE128000.Set(value)
+ case "256000":
+ latency.LE256000.Set(value)
+ case "512000":
+ latency.LE512000.Set(value)
+ case "1.024e+06":
+ latency.LE1024000.Set(value)
+ case "2.048e+06":
+ latency.LE2048000.Set(value)
+ case "4.096e+06":
+ latency.LE4096000.Set(value)
+ case "8.192e+06":
+ latency.LE8192000.Set(value)
+ case "1.6384e+07":
+ latency.LE16384000.Set(value)
+ case "+Inf":
+ latency.Inf.Set(value)
+ }
+ }
+
+ latency.Inf.Sub(latency.LE16384000.Value())
+ latency.LE16384000.Sub(latency.LE8192000.Value())
+ latency.LE8192000.Sub(latency.LE4096000.Value())
+ latency.LE4096000.Sub(latency.LE2048000.Value())
+ latency.LE2048000.Sub(latency.LE1024000.Value())
+ latency.LE1024000.Sub(latency.LE512000.Value())
+ latency.LE512000.Sub(latency.LE256000.Value())
+ latency.LE256000.Sub(latency.LE128000.Value())
+ latency.LE128000.Sub(latency.LE64000.Value())
+ latency.LE64000.Sub(latency.LE32000.Value())
+ latency.LE32000.Sub(latency.LE16000.Value())
+ latency.LE16000.Sub(latency.LE8000.Value())
+ latency.LE8000.Sub(latency.LE4000.Value())
+ latency.LE4000.Sub(latency.LE2000.Value())
+ latency.LE2000.Sub(latency.LE1000.Value())
+}
+
+func (kp *KubeProxy) collectRESTClientHTTPRequests(raw prometheus.Series, mx *metrics) {
+ metricName := "rest_client_requests_total"
+ chart := kp.charts.Get("rest_client_requests_by_code")
+
+ for _, metric := range raw.FindByName(metricName) {
+ code := metric.Labels.Get("code")
+ if code == "" {
+ continue
+ }
+ dimID := "rest_client_requests_" + code
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: code, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ mx.RESTClient.Requests.ByStatusCode[code] = mtx.Gauge(metric.Value)
+ }
+
+ chart = kp.charts.Get("rest_client_requests_by_method")
+
+ for _, metric := range raw.FindByName(metricName) {
+ method := metric.Labels.Get("method")
+ if method == "" {
+ continue
+ }
+ dimID := "rest_client_requests_" + method
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: method, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ mx.RESTClient.Requests.ByMethod[method] = mtx.Gauge(metric.Value)
+ }
+}
+
+func (kp *KubeProxy) collectHTTPRequestDuration(raw prometheus.Series, mx *metrics) {
+ // Summary
+ for _, metric := range raw.FindByName("http_request_duration_microseconds") {
+ if math.IsNaN(metric.Value) {
+ continue
+ }
+ quantile := metric.Labels.Get("quantile")
+ switch quantile {
+ case "0.5":
+ mx.HTTP.Request.Duration.Quantile05.Set(metric.Value)
+ case "0.9":
+ mx.HTTP.Request.Duration.Quantile09.Set(metric.Value)
+ case "0.99":
+ mx.HTTP.Request.Duration.Quantile099.Set(metric.Value)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/config_schema.json b/src/go/plugin/go.d/modules/k8s_kubeproxy/config_schema.json
new file mode 100644
index 000000000..f5d2d3424
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Kubeproxy collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Kubeproxy metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:10249/metrics",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/init.go b/src/go/plugin/go.d/modules/k8s_kubeproxy/init.go
new file mode 100644
index 000000000..93e4427e3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/init.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubeproxy
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (kp *KubeProxy) validateConfig() error {
+ if kp.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (kp *KubeProxy) initPrometheusClient() (prometheus.Prometheus, error) {
+ httpClient, err := web.NewHTTPClient(kp.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(httpClient, kp.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/integrations/kubeproxy.md b/src/go/plugin/go.d/modules/k8s_kubeproxy/integrations/kubeproxy.md
new file mode 100644
index 000000000..bfeb00b54
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/integrations/kubeproxy.md
@@ -0,0 +1,221 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/k8s_kubeproxy/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/k8s_kubeproxy/metadata.yaml"
+sidebar_label: "Kubeproxy"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Kubernetes"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kubeproxy
+
+
+<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: k8s_kubeproxy
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Kubeproxy instances.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Kubeproxy instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| k8s_kubeproxy.kubeproxy_sync_proxy_rules | sync_proxy_rules | events/s |
+| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | observes/s |
+| k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency | 0.001, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 2.048, 4.096, 8.192, 16.384, +Inf | percentage |
+| k8s_kubeproxy.rest_client_requests_by_code | a dimension per HTTP status code | requests/s |
+| k8s_kubeproxy.rest_client_requests_by_method | a dimension per HTTP method | requests/s |
+| k8s_kubeproxy.http_request_duration | 0.5, 0.9, 0.99 | microseconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/k8s_kubeproxy.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/k8s_kubeproxy.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:10249/metrics | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:10249/metrics
+
+```
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:10249/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `k8s_kubeproxy` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m k8s_kubeproxy
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `k8s_kubeproxy` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep k8s_kubeproxy
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep k8s_kubeproxy /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep k8s_kubeproxy
+```
+
+
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy.go b/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy.go
new file mode 100644
index 000000000..3c9848431
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy.go
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubeproxy
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("k8s_kubeproxy", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ // NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000
+ Priority: 50000,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *KubeProxy {
+ return &KubeProxy{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:10249/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type KubeProxy struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ prom prometheus.Prometheus
+}
+
+func (kp *KubeProxy) Configuration() any {
+ return kp.Config
+}
+
+func (kp *KubeProxy) Init() error {
+ if err := kp.validateConfig(); err != nil {
+ kp.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prom, err := kp.initPrometheusClient()
+ if err != nil {
+ kp.Error(err)
+ return err
+ }
+ kp.prom = prom
+
+ return nil
+}
+
+func (kp *KubeProxy) Check() error {
+ mx, err := kp.collect()
+ if err != nil {
+ kp.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (kp *KubeProxy) Charts() *Charts {
+ return kp.charts
+}
+
+func (kp *KubeProxy) Collect() map[string]int64 {
+ mx, err := kp.collect()
+
+ if err != nil {
+ kp.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (kp *KubeProxy) Cleanup() {
+ if kp.prom != nil && kp.prom.HTTPClient() != nil {
+ kp.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy_test.go b/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy_test.go
new file mode 100644
index 000000000..206528a23
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/kubeproxy_test.go
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubeproxy
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataMetrics, _ = os.ReadFile("testdata/metrics.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataMetrics": dataMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestKubeProxy_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &KubeProxy{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestKubeProxy_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestKubeProxy_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestKubeProxy_Init(t *testing.T) {
+ assert.NoError(t, New().Init())
+}
+
+func TestKubeProxy_InitNG(t *testing.T) {
+ job := New()
+ job.URL = ""
+ assert.Error(t, job.Init())
+}
+
+func TestKubeProxy_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+}
+
+func TestKubeProxy_CheckNG(t *testing.T) {
+ job := New()
+ job.URL = "http://127.0.0.1:38001/metrics"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestKubeProxy_Collect(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "sync_proxy_rules_count": 2669,
+ "sync_proxy_rules_bucket_1000": 1,
+ "sync_proxy_rules_bucket_2000": 0,
+ "sync_proxy_rules_bucket_4000": 0,
+ "sync_proxy_rules_bucket_8000": 0,
+ "sync_proxy_rules_bucket_16000": 23,
+ "sync_proxy_rules_bucket_32000": 2510,
+ "sync_proxy_rules_bucket_64000": 126,
+ "sync_proxy_rules_bucket_128000": 8,
+ "sync_proxy_rules_bucket_256000": 0,
+ "sync_proxy_rules_bucket_512000": 1,
+ "sync_proxy_rules_bucket_1024000": 0,
+ "sync_proxy_rules_bucket_4096000": 0,
+ "sync_proxy_rules_bucket_8192000": 0,
+ "sync_proxy_rules_bucket_2048000": 0,
+ "sync_proxy_rules_bucket_16384000": 0,
+ "sync_proxy_rules_bucket_+Inf": 0,
+ "rest_client_requests_201": 1,
+ "rest_client_requests_200": 362,
+ "rest_client_requests_GET": 362,
+ "rest_client_requests_POST": 1,
+ "http_request_duration_05": 1515,
+ "http_request_duration_09": 3939,
+ "http_request_duration_099": 9464,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestKubeProxy_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestKubeProxy_404(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/metrics"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/metadata.yaml b/src/go/plugin/go.d/modules/k8s_kubeproxy/metadata.yaml
new file mode 100644
index 000000000..0f8d0d72a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/metadata.yaml
@@ -0,0 +1,227 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-k8s_kubeproxy
+ plugin_name: go.d.plugin
+ module_name: k8s_kubeproxy
+ monitored_instance:
+ name: Kubeproxy
+ link: https://kubernetes.io/docs/concepts/overview/components/#kube-proxy
+ icon_filename: kubernetes.svg
+ categories:
+ - data-collection.kubernetes
+ keywords:
+ - kubeproxy
+ - kubernetes
+ - k8s
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Kubeproxy instances.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/k8s_kubeproxy.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:10249/metrics
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:10249/metrics
+ - name: HTTPS with self-signed certificate
+ description: |
+ Do not validate server certificate chain and hostname.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:10249/metrics
+ tls_skip_verify: yes
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: k8s_kubeproxy.kubeproxy_sync_proxy_rules
+ description: Sync Proxy Rules
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: sync_proxy_rules
+ - name: k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency_microsecond
+ description: Sync Proxy Rules Latency
+ unit: observes/s
+ chart_type: stacked
+ dimensions:
+ - name: "0.001"
+ - name: "0.002"
+ - name: "0.004"
+ - name: "0.008"
+ - name: "0.016"
+ - name: "0.032"
+ - name: "0.064"
+ - name: "0.128"
+ - name: "0.256"
+ - name: "0.512"
+ - name: "1.024"
+ - name: "2.048"
+ - name: "4.096"
+ - name: "8.192"
+ - name: "16.384"
+ - name: +Inf
+ - name: k8s_kubeproxy.kubeproxy_sync_proxy_rules_latency
+ description: Sync Proxy Rules Latency Percentage
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: "0.001"
+ - name: "0.002"
+ - name: "0.004"
+ - name: "0.008"
+ - name: "0.016"
+ - name: "0.032"
+ - name: "0.064"
+ - name: "0.128"
+ - name: "0.256"
+ - name: "0.512"
+ - name: "1.024"
+ - name: "2.048"
+ - name: "4.096"
+ - name: "8.192"
+ - name: "16.384"
+ - name: +Inf
+ - name: k8s_kubeproxy.rest_client_requests_by_code
+ description: HTTP Requests By Status Code
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per HTTP status code
+ - name: k8s_kubeproxy.rest_client_requests_by_method
+ description: HTTP Requests By Status Method
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per HTTP method
+ - name: k8s_kubeproxy.http_request_duration
+ description: HTTP Requests Duration
+ unit: microseconds
+ chart_type: stacked
+ dimensions:
+ - name: "0.5"
+ - name: "0.9"
+ - name: "0.99"
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/metrics.go b/src/go/plugin/go.d/modules/k8s_kubeproxy/metrics.go
new file mode 100644
index 000000000..f5c587a23
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/metrics.go
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubeproxy
+
+import (
+ mtx "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+)
+
+func newMetrics() *metrics {
+ var mx metrics
+ mx.RESTClient.Requests.ByStatusCode = make(map[string]mtx.Gauge)
+ mx.RESTClient.Requests.ByMethod = make(map[string]mtx.Gauge)
+
+ return &mx
+}
+
+type metrics struct {
+ SyncProxyRules struct {
+ Count mtx.Gauge `stm:"count"`
+ Latency struct {
+ LE1000 mtx.Gauge `stm:"1000"`
+ LE2000 mtx.Gauge `stm:"2000"`
+ LE4000 mtx.Gauge `stm:"4000"`
+ LE8000 mtx.Gauge `stm:"8000"`
+ LE16000 mtx.Gauge `stm:"16000"`
+ LE32000 mtx.Gauge `stm:"32000"`
+ LE64000 mtx.Gauge `stm:"64000"`
+ LE128000 mtx.Gauge `stm:"128000"`
+ LE256000 mtx.Gauge `stm:"256000"`
+ LE512000 mtx.Gauge `stm:"512000"`
+ LE1024000 mtx.Gauge `stm:"1024000"`
+ LE2048000 mtx.Gauge `stm:"2048000"`
+ LE4096000 mtx.Gauge `stm:"4096000"`
+ LE8192000 mtx.Gauge `stm:"8192000"`
+ LE16384000 mtx.Gauge `stm:"16384000"`
+ Inf mtx.Gauge `stm:"+Inf"`
+ } `stm:"bucket"`
+ } `stm:"sync_proxy_rules"`
+ RESTClient struct {
+ Requests struct {
+ ByStatusCode map[string]mtx.Gauge `stm:""`
+ ByMethod map[string]mtx.Gauge `stm:""`
+ } `stm:"requests"`
+ } `stm:"rest_client"`
+ HTTP struct {
+ Request struct {
+ Duration struct {
+ Quantile05 mtx.Gauge `stm:"05"`
+ Quantile09 mtx.Gauge `stm:"09"`
+ Quantile099 mtx.Gauge `stm:"099"`
+ } `stm:"duration"`
+ } `stm:"request"`
+ } `stm:"http"`
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/config.json b/src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/config.yaml b/src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/metrics.txt b/src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/metrics.txt
new file mode 100644
index 000000000..7a10d8477
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_kubeproxy/testdata/metrics.txt
@@ -0,0 +1,190 @@
+# HELP apiserver_audit_event_total Counter of audit events generated and sent to the audit backend.
+# TYPE apiserver_audit_event_total counter
+apiserver_audit_event_total 0
+# HELP apiserver_audit_requests_rejected_total Counter of apiserver requests rejected due to an error in audit logging backend.
+# TYPE apiserver_audit_requests_rejected_total counter
+apiserver_audit_requests_rejected_total 0
+# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 2.2652e-05
+go_gc_duration_seconds{quantile="0.25"} 5.9037e-05
+go_gc_duration_seconds{quantile="0.5"} 0.000113147
+go_gc_duration_seconds{quantile="0.75"} 0.000232939
+go_gc_duration_seconds{quantile="1"} 0.009002756
+go_gc_duration_seconds_sum 0.294305823
+go_gc_duration_seconds_count 755
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 46
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 6.14748e+06
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 9.53406048e+08
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.535744e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 8.247964e+06
+# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
+# TYPE go_memstats_gc_cpu_fraction gauge
+go_memstats_gc_cpu_fraction 7.826953112615371e-06
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 2.387968e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 6.14748e+06
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 5.8466304e+07
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 7.82336e+06
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 29543
+# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes gauge
+go_memstats_heap_released_bytes 0
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 6.6289664e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.5530903816542802e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 8.277507e+06
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 3456
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 16384
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 89832
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 114688
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 7.132208e+06
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 596472
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 819200
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 819200
+# HELP go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 7.176012e+07
+# HELP go_threads Number of OS threads created
+# TYPE go_threads gauge
+go_threads 10
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1515.864
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 3939.871
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 9464.15
+http_request_duration_microseconds_sum{handler="prometheus"} 837819.5429999996
+http_request_duration_microseconds_count{handler="prometheus"} 378
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="prometheus",quantile="0.5"} 377
+http_request_size_bytes{handler="prometheus",quantile="0.9"} 377
+http_request_size_bytes{handler="prometheus",quantile="0.99"} 377
+http_request_size_bytes_sum{handler="prometheus"} 142462
+http_request_size_bytes_count{handler="prometheus"} 378
+# HELP http_requests_total Total number of HTTP requests made.
+# TYPE http_requests_total counter
+http_requests_total{code="200",handler="prometheus",method="get"} 378
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="prometheus",quantile="0.5"} 2414
+http_response_size_bytes{handler="prometheus",quantile="0.9"} 2419
+http_response_size_bytes{handler="prometheus",quantile="0.99"} 2423
+http_response_size_bytes_sum{handler="prometheus"} 911969
+http_response_size_bytes_count{handler="prometheus"} 378
+# HELP kubeproxy_sync_proxy_rules_latency_microseconds SyncProxyRules latency
+# TYPE kubeproxy_sync_proxy_rules_latency_microseconds histogram
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="1000"} 1
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="2000"} 1
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="4000"} 1
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="8000"} 1
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="16000"} 24
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="32000"} 2534
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="64000"} 2660
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="128000"} 2668
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="256000"} 2668
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="512000"} 2669
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="1.024e+06"} 2669
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="2.048e+06"} 2669
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="4.096e+06"} 2669
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="8.192e+06"} 2669
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="1.6384e+07"} 2669
+kubeproxy_sync_proxy_rules_latency_microseconds_bucket{le="+Inf"} 2669
+kubeproxy_sync_proxy_rules_latency_microseconds_sum 6.2885705e+07
+kubeproxy_sync_proxy_rules_latency_microseconds_count 2669
+# HELP kubernetes_build_info A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.
+# TYPE kubernetes_build_info gauge
+kubernetes_build_info{buildDate="2019-02-28T13:35:32Z",compiler="gc",gitCommit="c27b913fddd1a6c480c229191a087698aa92f0b1",gitTreeState="clean",gitVersion="v1.13.4",goVersion="go1.11.5",major="1",minor="13",platform="linux/amd64"} 1
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 156.15
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 1.048576e+06
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 11
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 3.5467264e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.5530103809e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 1.4047232e+08
+# HELP rest_client_request_latency_seconds Request latency in seconds. Broken down by verb and URL.
+# TYPE rest_client_request_latency_seconds histogram
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.001"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.002"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.004"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.008"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.016"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.032"} 2
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.064"} 2
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.128"} 2
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.256"} 3
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="0.512"} 3
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET",le="+Inf"} 3
+rest_client_request_latency_seconds_sum{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET"} 0.28126861
+rest_client_request_latency_seconds_count{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="GET"} 3
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.001"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.002"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.004"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.008"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.016"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.032"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.064"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.128"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.256"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="0.512"} 0
+rest_client_request_latency_seconds_bucket{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST",le="+Inf"} 1
+rest_client_request_latency_seconds_sum{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST"} 4.008446017
+rest_client_request_latency_seconds_count{url="https://192.168.99.124:8443/%7Bprefix%7D",verb="POST"} 1
+# HELP rest_client_requests_total Number of HTTP requests, partitioned by status code, method, and host.
+# TYPE rest_client_requests_total counter
+rest_client_requests_total{code="200",host="192.168.99.124:8443",method="GET"} 362
+rest_client_requests_total{code="201",host="192.168.99.124:8443",method="POST"} 1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/k8s_state/README.md b/src/go/plugin/go.d/modules/k8s_state/README.md
new file mode 120000
index 000000000..72c4e5cab
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/README.md
@@ -0,0 +1 @@
+integrations/kubernetes_cluster_state.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/k8s_state/charts.go b/src/go/plugin/go.d/modules/k8s_state/charts.go
new file mode 100644
index 000000000..471d12577
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/charts.go
@@ -0,0 +1,785 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+// NETDATA_CHART_PRIO_CGROUPS_CONTAINERS 40000
+const prioDiscoveryDiscovererState = 50999
+
+const (
+ prioNodeAllocatableCPURequestsUtil = 50100 + iota
+ prioNodeAllocatableCPURequestsUsed
+ prioNodeAllocatableCPULimitsUtil
+ prioNodeAllocatableCPULimitsUsed
+ prioNodeAllocatableMemRequestsUtil
+ prioNodeAllocatableMemRequestsUsed
+ prioNodeAllocatableMemLimitsUtil
+ prioNodeAllocatableMemLimitsUsed
+ prioNodeAllocatablePodsUtil
+ prioNodeAllocatablePodsUsage
+ prioNodeConditions
+ prioNodeSchedulability
+ prioNodePodsReadiness
+ prioNodePodsReadinessState
+ prioNodePodsCondition
+ prioNodePodsPhase
+ prioNodeContainersCount
+ prioNodeContainersState
+ prioNodeInitContainersState
+ prioNodeAge
+)
+
+const (
+ prioPodCPURequestsUsed = 50300 + iota
+ prioPodCPULimitsUsed
+ prioPodMemRequestsUsed
+ prioPodMemLimitsUsed
+ prioPodCondition
+ prioPodPhase
+ prioPodAge
+ prioPodContainersCount
+ prioPodContainersState
+ prioPodInitContainersState
+ prioPodContainerReadinessState
+ prioPodContainerRestarts
+ prioPodContainerState
+ prioPodContainerWaitingStateReason
+ prioPodContainerTerminatedStateReason
+)
+
+const (
+ labelKeyPrefix = "k8s_"
+ //labelKeyLabelPrefix = labelKeyPrefix + "label_"
+ //labelKeyAnnotationPrefix = labelKeyPrefix + "annotation_"
+ labelKeyClusterID = labelKeyPrefix + "cluster_id"
+ labelKeyClusterName = labelKeyPrefix + "cluster_name"
+ labelKeyNamespace = labelKeyPrefix + "namespace"
+ labelKeyKind = labelKeyPrefix + "kind"
+ labelKeyPodName = labelKeyPrefix + "pod_name"
+ labelKeyNodeName = labelKeyPrefix + "node_name"
+ labelKeyPodUID = labelKeyPrefix + "pod_uid"
+ labelKeyControllerKind = labelKeyPrefix + "controller_kind"
+ labelKeyControllerName = labelKeyPrefix + "controller_name"
+ labelKeyContainerName = labelKeyPrefix + "container_name"
+ labelKeyContainerID = labelKeyPrefix + "container_id"
+ labelKeyQoSClass = labelKeyPrefix + "qos_class"
+)
+
+var baseCharts = module.Charts{
+ discoveryStatusChart.Copy(),
+}
+
+var nodeChartsTmpl = module.Charts{
+ nodeAllocatableCPURequestsUtilChartTmpl.Copy(),
+ nodeAllocatableCPURequestsUsedChartTmpl.Copy(),
+ nodeAllocatableCPULimitsUtilChartTmpl.Copy(),
+ nodeAllocatableCPULimitsUsedChartTmpl.Copy(),
+ nodeAllocatableMemRequestsUtilChartTmpl.Copy(),
+ nodeAllocatableMemRequestsUsedChartTmpl.Copy(),
+ nodeAllocatableMemLimitsUtilChartTmpl.Copy(),
+ nodeAllocatableMemLimitsUsedChartTmpl.Copy(),
+ nodeAllocatablePodsUtilizationChartTmpl.Copy(),
+ nodeAllocatablePodsUsageChartTmpl.Copy(),
+ nodeConditionsChartTmpl.Copy(),
+ nodeSchedulabilityChartTmpl.Copy(),
+ nodePodsReadinessChartTmpl.Copy(),
+ nodePodsReadinessStateChartTmpl.Copy(),
+ nodePodsConditionChartTmpl.Copy(),
+ nodePodsPhaseChartTmpl.Copy(),
+ nodeContainersChartTmpl.Copy(),
+ nodeContainersStateChartTmpl.Copy(),
+ nodeInitContainersStateChartTmpl.Copy(),
+ nodeAgeChartTmpl.Copy(),
+}
+
+var podChartsTmpl = module.Charts{
+ podCPURequestsUsedChartTmpl.Copy(),
+ podCPULimitsUsedChartTmpl.Copy(),
+ podMemRequestsUsedChartTmpl.Copy(),
+ podMemLimitsUsedChartTmpl.Copy(),
+ podConditionChartTmpl.Copy(),
+ podPhaseChartTmpl.Copy(),
+ podAgeChartTmpl.Copy(),
+ podContainersCountChartTmpl.Copy(),
+ podContainersStateChartTmpl.Copy(),
+ podInitContainersStateChartTmpl.Copy(),
+}
+
+var containerChartsTmpl = module.Charts{
+ containerReadinessStateChartTmpl.Copy(),
+ containerRestartsChartTmpl.Copy(),
+ containersStateChartTmpl.Copy(),
+ containersStateWaitingChartTmpl.Copy(),
+ containersStateTerminatedChartTmpl.Copy(),
+}
+
+var (
+ // CPU resource
+ nodeAllocatableCPURequestsUtilChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocatable_cpu_requests_utilization",
+ Title: "CPU requests utilization",
+ Units: "%",
+ Fam: "node cpu resource",
+ Ctx: "k8s_state.node_allocatable_cpu_requests_utilization",
+ Priority: prioNodeAllocatableCPURequestsUtil,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_cpu_requests_util", Name: "requests", Div: precision},
+ },
+ }
+ nodeAllocatableCPURequestsUsedChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocatable_cpu_requests_used",
+ Title: "CPU requests used",
+ Units: "millicpu",
+ Fam: "node cpu resource",
+ Ctx: "k8s_state.node_allocatable_cpu_requests_used",
+ Priority: prioNodeAllocatableCPURequestsUsed,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_cpu_requests_used", Name: "requests"},
+ },
+ }
+ nodeAllocatableCPULimitsUtilChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocatable_cpu_limits_utilization",
+ Title: "CPU limits utilization",
+ Units: "%",
+ Fam: "node cpu resource",
+ Ctx: "k8s_state.node_allocatable_cpu_limits_utilization",
+ Priority: prioNodeAllocatableCPULimitsUtil,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_cpu_limits_util", Name: "limits", Div: precision},
+ },
+ }
+ nodeAllocatableCPULimitsUsedChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocatable_cpu_limits_used",
+ Title: "CPU limits used",
+ Units: "millicpu",
+ Fam: "node cpu resource",
+ Ctx: "k8s_state.node_allocatable_cpu_limits_used",
+ Priority: prioNodeAllocatableCPULimitsUsed,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_cpu_limits_used", Name: "limits"},
+ },
+ }
+ // memory resource
+ nodeAllocatableMemRequestsUtilChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocatable_mem_requests_utilization",
+ Title: "Memory requests utilization",
+ Units: "%",
+ Fam: "node mem resource",
+ Ctx: "k8s_state.node_allocatable_mem_requests_utilization",
+ Priority: prioNodeAllocatableMemRequestsUtil,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_mem_requests_util", Name: "requests", Div: precision},
+ },
+ }
+ nodeAllocatableMemRequestsUsedChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocatable_mem_requests_used",
+ Title: "Memory requests used",
+ Units: "bytes",
+ Fam: "node mem resource",
+ Ctx: "k8s_state.node_allocatable_mem_requests_used",
+ Priority: prioNodeAllocatableMemRequestsUsed,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_mem_requests_used", Name: "requests"},
+ },
+ }
+ nodeAllocatableMemLimitsUtilChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocatable_mem_limits_utilization",
+ Title: "Memory limits utilization",
+ Units: "%",
+ Fam: "node mem resource",
+ Ctx: "k8s_state.node_allocatable_mem_limits_utilization",
+ Priority: prioNodeAllocatableMemLimitsUtil,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_mem_limits_util", Name: "limits", Div: precision},
+ },
+ }
+ nodeAllocatableMemLimitsUsedChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocatable_mem_limits_used",
+ Title: "Memory limits used",
+ Units: "bytes",
+ Fam: "node mem resource",
+ Ctx: "k8s_state.node_allocatable_mem_limits_used",
+ Priority: prioNodeAllocatableMemLimitsUsed,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_mem_limits_used", Name: "limits"},
+ },
+ }
+ // pods resource
+ nodeAllocatablePodsUtilizationChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocatable_pods_utilization",
+ Title: "Pods resource utilization",
+ Units: "%",
+ Fam: "node pods resource",
+ Ctx: "k8s_state.node_allocatable_pods_utilization",
+ Priority: prioNodeAllocatablePodsUtil,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_pods_util", Name: "allocated", Div: precision},
+ },
+ }
+ nodeAllocatablePodsUsageChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.allocated_pods_usage",
+ Title: "Pods resource usage",
+ Units: "pods",
+ Fam: "node pods resource",
+ Ctx: "k8s_state.node_allocatable_pods_usage",
+ Type: module.Stacked,
+ Priority: prioNodeAllocatablePodsUsage,
+ Dims: module.Dims{
+ {ID: "node_%s_alloc_pods_available", Name: "available"},
+ {ID: "node_%s_alloc_pods_allocated", Name: "allocated"},
+ },
+ }
+ // condition
+ nodeConditionsChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.condition_status",
+ Title: "Condition status",
+ Units: "status",
+ Fam: "node condition",
+ Ctx: "k8s_state.node_condition",
+ Priority: prioNodeConditions,
+ }
+ nodeSchedulabilityChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.schedulability",
+ Title: "Schedulability",
+ Units: "state",
+ Fam: "node schedulability",
+ Ctx: "k8s_state.node_schedulability",
+ Priority: prioNodeSchedulability,
+ Dims: module.Dims{
+ {ID: "node_%s_schedulability_schedulable", Name: "schedulable"},
+ {ID: "node_%s_schedulability_unschedulable", Name: "unschedulable"},
+ },
+ }
+ // pods readiness
+ nodePodsReadinessChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.pods_readiness",
+ Title: "Pods readiness",
+ Units: "%",
+ Fam: "node pods readiness",
+ Ctx: "k8s_state.node_pods_readiness",
+ Priority: prioNodePodsReadiness,
+ Dims: module.Dims{
+ {ID: "node_%s_pods_readiness", Name: "ready", Div: precision},
+ },
+ }
+ nodePodsReadinessStateChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.pods_readiness_state",
+ Title: "Pods readiness state",
+ Units: "pods",
+ Fam: "node pods readiness",
+ Ctx: "k8s_state.node_pods_readiness_state",
+ Type: module.Stacked,
+ Priority: prioNodePodsReadinessState,
+ Dims: module.Dims{
+ {ID: "node_%s_pods_readiness_ready", Name: "ready"},
+ {ID: "node_%s_pods_readiness_unready", Name: "unready"},
+ },
+ }
+ // pods condition
+ nodePodsConditionChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.pods_condition",
+ Title: "Pods condition",
+ Units: "pods",
+ Fam: "node pods condition",
+ Ctx: "k8s_state.node_pods_condition",
+ Priority: prioNodePodsCondition,
+ Dims: module.Dims{
+ {ID: "node_%s_pods_cond_podready", Name: "pod_ready"},
+ {ID: "node_%s_pods_cond_podscheduled", Name: "pod_scheduled"},
+ {ID: "node_%s_pods_cond_podinitialized", Name: "pod_initialized"},
+ {ID: "node_%s_pods_cond_containersready", Name: "containers_ready"},
+ },
+ }
+ // pods phase
+ nodePodsPhaseChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.pods_phase",
+ Title: "Pods phase",
+ Units: "pods",
+ Fam: "node pods phase",
+ Ctx: "k8s_state.node_pods_phase",
+ Type: module.Stacked,
+ Priority: prioNodePodsPhase,
+ Dims: module.Dims{
+ {ID: "node_%s_pods_phase_running", Name: "running"},
+ {ID: "node_%s_pods_phase_failed", Name: "failed"},
+ {ID: "node_%s_pods_phase_succeeded", Name: "succeeded"},
+ {ID: "node_%s_pods_phase_pending", Name: "pending"},
+ },
+ }
+ // containers
+ nodeContainersChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.containers",
+ Title: "Containers",
+ Units: "containers",
+ Fam: "node containers",
+ Ctx: "k8s_state.node_containers",
+ Priority: prioNodeContainersCount,
+ Dims: module.Dims{
+ {ID: "node_%s_containers", Name: "containers"},
+ {ID: "node_%s_init_containers", Name: "init_containers"},
+ },
+ }
+ nodeContainersStateChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.containers_state",
+ Title: "Containers state",
+ Units: "containers",
+ Fam: "node containers",
+ Ctx: "k8s_state.node_containers_state",
+ Type: module.Stacked,
+ Priority: prioNodeContainersState,
+ Dims: module.Dims{
+ {ID: "node_%s_containers_state_running", Name: "running"},
+ {ID: "node_%s_containers_state_waiting", Name: "waiting"},
+ {ID: "node_%s_containers_state_terminated", Name: "terminated"},
+ },
+ }
+ nodeInitContainersStateChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.init_containers_state",
+ Title: "Init containers state",
+ Units: "containers",
+ Fam: "node containers",
+ Ctx: "k8s_state.node_init_containers_state",
+ Type: module.Stacked,
+ Priority: prioNodeInitContainersState,
+ Dims: module.Dims{
+ {ID: "node_%s_init_containers_state_running", Name: "running"},
+ {ID: "node_%s_init_containers_state_waiting", Name: "waiting"},
+ {ID: "node_%s_init_containers_state_terminated", Name: "terminated"},
+ },
+ }
+ // age
+ nodeAgeChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "node_%s.age",
+ Title: "Age",
+ Units: "seconds",
+ Fam: "node age",
+ Ctx: "k8s_state.node_age",
+ Priority: prioNodeAge,
+ Dims: module.Dims{
+ {ID: "node_%s_age", Name: "age"},
+ },
+ }
+)
+
+func (ks *KubeState) newNodeCharts(ns *nodeState) *module.Charts {
+ cs := nodeChartsTmpl.Copy()
+ for _, c := range *cs {
+ c.ID = fmt.Sprintf(c.ID, replaceDots(ns.id()))
+ c.Labels = ks.newNodeChartLabels(ns)
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, ns.id())
+ }
+ }
+ return cs
+}
+
+func (ks *KubeState) newNodeChartLabels(ns *nodeState) []module.Label {
+ labels := []module.Label{
+ {Key: labelKeyNodeName, Value: ns.name, Source: module.LabelSourceK8s},
+ {Key: labelKeyClusterID, Value: ks.kubeClusterID, Source: module.LabelSourceK8s},
+ {Key: labelKeyClusterName, Value: ks.kubeClusterName, Source: module.LabelSourceK8s},
+ }
+ return labels
+}
+
+func (ks *KubeState) addNodeCharts(ns *nodeState) {
+ cs := ks.newNodeCharts(ns)
+ if err := ks.Charts().Add(*cs...); err != nil {
+ ks.Warning(err)
+ }
+}
+
+func (ks *KubeState) removeNodeCharts(ns *nodeState) {
+ prefix := fmt.Sprintf("node_%s", replaceDots(ns.id()))
+ for _, c := range *ks.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+func (ks *KubeState) addNodeConditionToCharts(ns *nodeState, cond string) {
+ id := fmt.Sprintf(nodeConditionsChartTmpl.ID, replaceDots(ns.id()))
+ c := ks.Charts().Get(id)
+ if c == nil {
+ ks.Warningf("chart '%s' does not exist", id)
+ return
+ }
+ dim := &module.Dim{
+ ID: fmt.Sprintf("node_%s_cond_%s", ns.id(), strings.ToLower(cond)),
+ Name: cond,
+ }
+ if err := c.AddDim(dim); err != nil {
+ ks.Warning(err)
+ return
+ }
+ c.MarkNotCreated()
+}
+
+var (
+ podCPURequestsUsedChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.cpu_requests_used",
+ Title: "CPU requests used",
+ Units: "millicpu",
+ Fam: "pod allocated cpu",
+ Ctx: "k8s_state.pod_cpu_requests_used",
+ Priority: prioPodCPURequestsUsed,
+ Dims: module.Dims{
+ {ID: "pod_%s_cpu_requests_used", Name: "requests"},
+ },
+ }
+ podCPULimitsUsedChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.cpu_limits_used",
+ Title: "CPU limits used",
+ Units: "millicpu",
+ Fam: "pod allocated cpu",
+ Ctx: "k8s_state.pod_cpu_limits_used",
+ Priority: prioPodCPULimitsUsed,
+ Dims: module.Dims{
+ {ID: "pod_%s_cpu_limits_used", Name: "limits"},
+ },
+ }
+ podMemRequestsUsedChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.mem_requests_used",
+ Title: "Memory requests used",
+ Units: "bytes",
+ Fam: "pod allocated mem",
+ Ctx: "k8s_state.pod_mem_requests_used",
+ Priority: prioPodMemRequestsUsed,
+ Dims: module.Dims{
+ {ID: "pod_%s_mem_requests_used", Name: "requests"},
+ },
+ }
+ podMemLimitsUsedChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.mem_limits_used",
+ Title: "Memory limits used",
+ Units: "bytes",
+ Fam: "pod allocated mem",
+ Ctx: "k8s_state.pod_mem_limits_used",
+ Priority: prioPodMemLimitsUsed,
+ Dims: module.Dims{
+ {ID: "pod_%s_mem_limits_used", Name: "limits"},
+ },
+ }
+ podConditionChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.condition",
+ Title: "Condition",
+ Units: "state",
+ Fam: "pod condition",
+ Ctx: "k8s_state.pod_condition",
+ Priority: prioPodCondition,
+ Dims: module.Dims{
+ {ID: "pod_%s_cond_podready", Name: "pod_ready"},
+ {ID: "pod_%s_cond_podscheduled", Name: "pod_scheduled"},
+ {ID: "pod_%s_cond_podinitialized", Name: "pod_initialized"},
+ {ID: "pod_%s_cond_containersready", Name: "containers_ready"},
+ },
+ }
+ podPhaseChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.phase",
+ Title: "Phase",
+ Units: "state",
+ Fam: "pod phase",
+ Ctx: "k8s_state.pod_phase",
+ Priority: prioPodPhase,
+ Dims: module.Dims{
+ {ID: "pod_%s_phase_running", Name: "running"},
+ {ID: "pod_%s_phase_failed", Name: "failed"},
+ {ID: "pod_%s_phase_succeeded", Name: "succeeded"},
+ {ID: "pod_%s_phase_pending", Name: "pending"},
+ },
+ }
+ podAgeChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.age",
+ Title: "Age",
+ Units: "seconds",
+ Fam: "pod age",
+ Ctx: "k8s_state.pod_age",
+ Priority: prioPodAge,
+ Dims: module.Dims{
+ {ID: "pod_%s_age", Name: "age"},
+ },
+ }
+ podContainersCountChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.containers_count",
+ Title: "Containers",
+ Units: "containers",
+ Fam: "pod containers",
+ Ctx: "k8s_state.pod_containers",
+ Priority: prioPodContainersCount,
+ Dims: module.Dims{
+ {ID: "pod_%s_containers", Name: "containers"},
+ {ID: "pod_%s_init_containers", Name: "init_containers"},
+ },
+ }
+ podContainersStateChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.containers_state",
+ Title: "Containers state",
+ Units: "containers",
+ Fam: "pod containers",
+ Ctx: "k8s_state.pod_containers_state",
+ Type: module.Stacked,
+ Priority: prioPodContainersState,
+ Dims: module.Dims{
+ {ID: "pod_%s_containers_state_running", Name: "running"},
+ {ID: "pod_%s_containers_state_waiting", Name: "waiting"},
+ {ID: "pod_%s_containers_state_terminated", Name: "terminated"},
+ },
+ }
+ podInitContainersStateChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s.init_containers_state",
+ Title: "Init containers state",
+ Units: "containers",
+ Fam: "pod containers",
+ Ctx: "k8s_state.pod_init_containers_state",
+ Type: module.Stacked,
+ Priority: prioPodInitContainersState,
+ Dims: module.Dims{
+ {ID: "pod_%s_init_containers_state_running", Name: "running"},
+ {ID: "pod_%s_init_containers_state_waiting", Name: "waiting"},
+ {ID: "pod_%s_init_containers_state_terminated", Name: "terminated"},
+ },
+ }
+)
+
+func (ks *KubeState) newPodCharts(ps *podState) *module.Charts {
+ charts := podChartsTmpl.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, replaceDots(ps.id()))
+ c.Labels = ks.newPodChartLabels(ps)
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, ps.id())
+ }
+ }
+ return charts
+}
+
+func (ks *KubeState) newPodChartLabels(ps *podState) []module.Label {
+ labels := []module.Label{
+ {Key: labelKeyNamespace, Value: ps.namespace, Source: module.LabelSourceK8s},
+ {Key: labelKeyPodName, Value: ps.name, Source: module.LabelSourceK8s},
+ {Key: labelKeyNodeName, Value: ps.nodeName, Source: module.LabelSourceK8s},
+ {Key: labelKeyQoSClass, Value: ps.qosClass, Source: module.LabelSourceK8s},
+ {Key: labelKeyControllerKind, Value: ps.controllerKind, Source: module.LabelSourceK8s},
+ {Key: labelKeyControllerName, Value: ps.controllerName, Source: module.LabelSourceK8s},
+ {Key: labelKeyClusterID, Value: ks.kubeClusterID, Source: module.LabelSourceK8s},
+ {Key: labelKeyClusterName, Value: ks.kubeClusterName, Source: module.LabelSourceK8s},
+ }
+ return labels
+}
+
+func (ks *KubeState) addPodCharts(ps *podState) {
+ charts := ks.newPodCharts(ps)
+ if err := ks.Charts().Add(*charts...); err != nil {
+ ks.Warning(err)
+ }
+}
+
+func (ks *KubeState) updatePodChartsNodeLabel(ps *podState) {
+ prefix := fmt.Sprintf("pod_%s", replaceDots(ps.id()))
+ for _, c := range *ks.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ updateNodeLabel(c, ps.nodeName)
+ c.MarkNotCreated()
+ }
+ }
+}
+
+func updateNodeLabel(c *module.Chart, nodeName string) {
+ for i, l := range c.Labels {
+ if l.Key == labelKeyNodeName {
+ c.Labels[i].Value = nodeName
+ break
+ }
+ }
+}
+
+func (ks *KubeState) removePodCharts(ps *podState) {
+ prefix := fmt.Sprintf("pod_%s", replaceDots(ps.id()))
+ for _, c := range *ks.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ containerReadinessStateChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s_container_%s.readiness_state",
+ Title: "Readiness state",
+ Units: "state",
+ Fam: "container readiness",
+ Ctx: "k8s_state.pod_container_readiness_state",
+ Priority: prioPodContainerReadinessState,
+ Dims: module.Dims{
+ {ID: "pod_%s_container_%s_readiness", Name: "ready"},
+ },
+ }
+ containerRestartsChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s_container_%s.restarts",
+ Title: "Restarts",
+ Units: "restarts",
+ Fam: "container restarts",
+ Ctx: "k8s_state.pod_container_restarts",
+ Priority: prioPodContainerRestarts,
+ Dims: module.Dims{
+ {ID: "pod_%s_container_%s_restarts", Name: "restarts"},
+ },
+ }
+ containersStateChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s_container_%s.state",
+ Title: "Container state",
+ Units: "state",
+ Fam: "container state",
+ Ctx: "k8s_state.pod_container_state",
+ Priority: prioPodContainerState,
+ Dims: module.Dims{
+ {ID: "pod_%s_container_%s_state_running", Name: "running"},
+ {ID: "pod_%s_container_%s_state_waiting", Name: "waiting"},
+ {ID: "pod_%s_container_%s_state_terminated", Name: "terminated"},
+ },
+ }
+ containersStateWaitingChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s_container_%s.state_waiting_reason",
+ Title: "Container waiting state reason",
+ Units: "state",
+ Fam: "container waiting reason",
+ Ctx: "k8s_state.pod_container_waiting_state_reason",
+ Priority: prioPodContainerWaitingStateReason,
+ }
+ containersStateTerminatedChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "pod_%s_container_%s.state_terminated_reason",
+ Title: "Container terminated state reason",
+ Units: "state",
+ Fam: "container terminated reason",
+ Ctx: "k8s_state.pod_container_terminated_state_reason",
+ Priority: prioPodContainerTerminatedStateReason,
+ }
+)
+
+func (ks *KubeState) newContainerCharts(ps *podState, cs *containerState) *module.Charts {
+ charts := containerChartsTmpl.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, replaceDots(ps.id()), cs.name)
+ c.Labels = ks.newContainerChartLabels(ps, cs)
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, ps.id(), cs.name)
+ }
+ }
+ return charts
+}
+
+func (ks *KubeState) newContainerChartLabels(ps *podState, cs *containerState) []module.Label {
+ labels := ks.newPodChartLabels(ps)
+ labels = append(
+ labels, module.Label{Key: labelKeyContainerName, Value: cs.name, Source: module.LabelSourceK8s},
+ )
+ return labels
+}
+
+func (ks *KubeState) addContainerCharts(ps *podState, cs *containerState) {
+ charts := ks.newContainerCharts(ps, cs)
+ if err := ks.Charts().Add(*charts...); err != nil {
+ ks.Warning(err)
+ }
+}
+
+func (ks *KubeState) addContainerWaitingStateReasonToChart(ps *podState, cs *containerState, reason string) {
+ id := fmt.Sprintf(containersStateWaitingChartTmpl.ID, replaceDots(ps.id()), cs.name)
+ c := ks.Charts().Get(id)
+ if c == nil {
+ ks.Warningf("chart '%s' does not exist", id)
+ return
+ }
+ dim := &module.Dim{
+ ID: fmt.Sprintf("pod_%s_container_%s_state_waiting_reason_%s", ps.id(), cs.name, reason),
+ Name: reason,
+ }
+ if err := c.AddDim(dim); err != nil {
+ ks.Warning(err)
+ return
+ }
+ c.MarkNotCreated()
+}
+
+func (ks *KubeState) addContainerTerminatedStateReasonToChart(ps *podState, cs *containerState, reason string) {
+ id := fmt.Sprintf(containersStateTerminatedChartTmpl.ID, replaceDots(ps.id()), cs.name)
+ c := ks.Charts().Get(id)
+ if c == nil {
+ ks.Warningf("chart '%s' does not exist", id)
+ return
+ }
+ dim := &module.Dim{
+ ID: fmt.Sprintf("pod_%s_container_%s_state_terminated_reason_%s", ps.id(), cs.name, reason),
+ Name: reason,
+ }
+ if err := c.AddDim(dim); err != nil {
+ ks.Warning(err)
+ return
+ }
+ c.MarkNotCreated()
+}
+
+var discoveryStatusChart = module.Chart{
+ ID: "discovery_discoverers_state",
+ Title: "Running discoverers state",
+ Units: "state",
+ Fam: "discovery",
+ Ctx: "k8s_state.discovery_discoverers_state",
+ Priority: prioDiscoveryDiscovererState,
+ Opts: module.Opts{Hidden: true},
+ Dims: module.Dims{
+ {ID: "discovery_node_discoverer_state", Name: "node"},
+ {ID: "discovery_pod_discoverer_state", Name: "pod"},
+ },
+}
+
+var reDots = regexp.MustCompile(`\.`)
+
+func replaceDots(v string) string {
+ return reDots.ReplaceAllString(v, "-")
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/client.go b/src/go/plugin/go.d/modules/k8s_state/client.go
new file mode 100644
index 000000000..315e823fe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/client.go
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+
+ _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+
+ "github.com/mattn/go-isatty"
+)
+
+const (
+ envKubeServiceHost = "KUBERNETES_SERVICE_HOST"
+ envKubeServicePort = "KUBERNETES_SERVICE_PORT"
+)
+
+func newKubeClient() (kubernetes.Interface, error) {
+ if os.Getenv(envKubeServiceHost) != "" && os.Getenv(envKubeServicePort) != "" {
+ return newKubeClientInCluster()
+ }
+ if isatty.IsTerminal(os.Stdout.Fd()) {
+ return newKubeClientOutOfCluster()
+ }
+ return nil, errors.New("can not create Kubernetes client: not inside a cluster")
+}
+
+func newKubeClientInCluster() (*kubernetes.Clientset, error) {
+ config, err := rest.InClusterConfig()
+ if err != nil {
+ return nil, err
+ }
+ config.UserAgent = "Netdata/kube-state"
+ return kubernetes.NewForConfig(config)
+}
+
+func newKubeClientOutOfCluster() (*kubernetes.Clientset, error) {
+ home := homeDir()
+ if home == "" {
+ return nil, errors.New("couldn't find home directory")
+ }
+
+ configPath := filepath.Join(home, ".kube", "config")
+ config, err := clientcmd.BuildConfigFromFlags("", configPath)
+ if err != nil {
+ return nil, err
+ }
+
+ config.UserAgent = "Netdata/kube-state"
+ return kubernetes.NewForConfig(config)
+}
+
+func homeDir() string {
+ if h := os.Getenv("HOME"); h != "" {
+ return h
+ }
+ return os.Getenv("USERPROFILE") // windows
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/cluster_meta.go b/src/go/plugin/go.d/modules/k8s_state/cluster_meta.go
new file mode 100644
index 000000000..e7eb809cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/cluster_meta.go
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func (ks *KubeState) getKubeClusterID() string {
+ ns, err := ks.client.CoreV1().Namespaces().Get(ks.ctx, "kube-system", metav1.GetOptions{})
+ if err != nil {
+ ks.Warningf("error on getting 'kube-system' namespace UID: %v", err)
+ return ""
+ }
+ return string(ns.UID)
+}
+
+func (ks *KubeState) getKubeClusterName() string {
+ client := http.Client{Timeout: time.Second}
+ n, err := getGKEKubeClusterName(client)
+ if err != nil {
+ ks.Debugf("error on getting GKE cluster name: %v", err)
+ }
+ return n
+}
+
+func getGKEKubeClusterName(client http.Client) (string, error) {
+ id, err := doMetaGKEHTTPReq(client, "http://metadata/computeMetadata/v1/project/project-id")
+ if err != nil {
+ return "", err
+ }
+ loc, err := doMetaGKEHTTPReq(client, "http://metadata/computeMetadata/v1/instance/attributes/cluster-location")
+ if err != nil {
+ return "", err
+ }
+ name, err := doMetaGKEHTTPReq(client, "http://metadata/computeMetadata/v1/instance/attributes/cluster-name")
+ if err != nil {
+ return "", err
+ }
+
+ return fmt.Sprintf("gke_%s_%s_%s", id, loc, name), nil
+}
+
+func doMetaGKEHTTPReq(client http.Client, url string) (string, error) {
+ req, err := http.NewRequest(http.MethodGet, url, nil)
+ if err != nil {
+ return "", err
+ }
+
+ req.Header.Add("Metadata-Flavor", "Google")
+ resp, err := client.Do(req)
+ if err != nil {
+ return "", err
+ }
+ defer closeHTTPRespBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("'%s' returned HTTP status code %d", url, resp.StatusCode)
+ }
+
+ bs, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ s := string(bs)
+ if s == "" {
+ return "", fmt.Errorf("an empty response from '%s'", url)
+ }
+
+ return s, nil
+}
+
+func closeHTTPRespBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/collect.go b/src/go/plugin/go.d/modules/k8s_state/collect.go
new file mode 100644
index 000000000..081a0fdf1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/collect.go
@@ -0,0 +1,271 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+const precision = 1000
+
+func (ks *KubeState) collect() (map[string]int64, error) {
+ if ks.discoverer == nil {
+ return nil, errors.New("nil discoverer")
+ }
+
+ ks.once.Do(func() {
+ ks.startTime = time.Now()
+ in := make(chan resource)
+
+ ks.wg.Add(1)
+ go func() { defer ks.wg.Done(); ks.runUpdateState(in) }()
+
+ ks.wg.Add(1)
+ go func() { defer ks.wg.Done(); ks.discoverer.run(ks.ctx, in) }()
+
+ ks.kubeClusterID = ks.getKubeClusterID()
+ ks.kubeClusterName = ks.getKubeClusterName()
+ if chart := ks.Charts().Get(discoveryStatusChart.ID); chart != nil {
+ chart.Labels = []module.Label{
+ {Key: labelKeyClusterID, Value: ks.kubeClusterID, Source: module.LabelSourceK8s},
+ {Key: labelKeyClusterName, Value: ks.kubeClusterName, Source: module.LabelSourceK8s},
+ }
+ }
+ })
+
+ mx := map[string]int64{
+ "discovery_node_discoverer_state": 1,
+ "discovery_pod_discoverer_state": 1,
+ }
+
+ if !ks.discoverer.ready() || time.Since(ks.startTime) < ks.initDelay {
+ return mx, nil
+ }
+
+ ks.state.Lock()
+ defer ks.state.Unlock()
+
+ ks.collectKubeState(mx)
+
+ return mx, nil
+}
+
+func (ks *KubeState) collectKubeState(mx map[string]int64) {
+ for _, ns := range ks.state.nodes {
+ ns.resetStats()
+ }
+ ks.collectPodsState(mx)
+ ks.collectNodesState(mx)
+}
+
+func (ks *KubeState) collectPodsState(mx map[string]int64) {
+ now := time.Now()
+ for _, ps := range ks.state.pods {
+ // Skip cronjobs (each of them is a unique container because name contains hash)
+ // to avoid overwhelming Netdata with high cardinality metrics.
+ // Related issue https://github.com/netdata/netdata/issues/16412
+ if ps.controllerKind == "Job" {
+ continue
+ }
+
+ if ps.deleted {
+ delete(ks.state.pods, podSource(ps.namespace, ps.name))
+ ks.removePodCharts(ps)
+ continue
+ }
+ if ps.new {
+ ps.new = false
+ ks.addPodCharts(ps)
+ ps.unscheduled = ps.nodeName == ""
+ } else if ps.unscheduled && ps.nodeName != "" {
+ ps.unscheduled = false
+ ks.updatePodChartsNodeLabel(ps)
+ }
+
+ ns := ks.state.nodes[nodeSource(ps.nodeName)]
+ if ns != nil {
+ ns.stats.pods++
+ ns.stats.reqCPU += ps.reqCPU
+ ns.stats.limitCPU += ps.limitCPU
+ ns.stats.reqMem += ps.reqMem
+ ns.stats.limitMem += ps.limitMem
+ ns.stats.podsCondPodReady += condStatusToInt(ps.condPodReady)
+ ns.stats.podsCondPodScheduled += condStatusToInt(ps.condPodScheduled)
+ ns.stats.podsCondPodInitialized += condStatusToInt(ps.condPodInitialized)
+ ns.stats.podsCondContainersReady += condStatusToInt(ps.condContainersReady)
+ ns.stats.podsReadinessReady += boolToInt(ps.condPodReady == corev1.ConditionTrue)
+ ns.stats.podsReadinessUnready += boolToInt(ps.condPodReady != corev1.ConditionTrue)
+ ns.stats.podsPhasePending += boolToInt(ps.phase == corev1.PodPending)
+ ns.stats.podsPhaseRunning += boolToInt(ps.phase == corev1.PodRunning)
+ ns.stats.podsPhaseSucceeded += boolToInt(ps.phase == corev1.PodSucceeded)
+ ns.stats.podsPhaseFailed += boolToInt(ps.phase == corev1.PodFailed)
+ for _, cs := range ps.initContainers {
+ ns.stats.initContainers++
+ ns.stats.initContStateRunning += boolToInt(cs.stateRunning)
+ ns.stats.initContStateWaiting += boolToInt(cs.stateWaiting)
+ ns.stats.initContStateTerminated += boolToInt(cs.stateTerminated)
+ }
+ for _, cs := range ps.containers {
+ ns.stats.containers++
+ ns.stats.contStateRunning += boolToInt(cs.stateRunning)
+ ns.stats.contStateWaiting += boolToInt(cs.stateWaiting)
+ ns.stats.contStateTerminated += boolToInt(cs.stateTerminated)
+ }
+ }
+
+ px := fmt.Sprintf("pod_%s_", ps.id())
+
+ mx[px+"cond_podready"] = condStatusToInt(ps.condPodReady)
+ mx[px+"cond_podscheduled"] = condStatusToInt(ps.condPodScheduled)
+ mx[px+"cond_podinitialized"] = condStatusToInt(ps.condPodInitialized)
+ mx[px+"cond_containersready"] = condStatusToInt(ps.condContainersReady)
+ mx[px+"phase_running"] = boolToInt(ps.phase == corev1.PodRunning)
+ mx[px+"phase_failed"] = boolToInt(ps.phase == corev1.PodFailed)
+ mx[px+"phase_succeeded"] = boolToInt(ps.phase == corev1.PodSucceeded)
+ mx[px+"phase_pending"] = boolToInt(ps.phase == corev1.PodPending)
+ mx[px+"age"] = int64(now.Sub(ps.creationTime).Seconds())
+ mx[px+"cpu_requests_used"] = ps.reqCPU
+ mx[px+"cpu_limits_used"] = ps.limitCPU
+ mx[px+"mem_requests_used"] = ps.reqMem
+ mx[px+"mem_limits_used"] = ps.limitMem
+
+ mx[px+"init_containers"] = int64(len(ps.initContainers))
+ mx[px+"containers"] = int64(len(ps.containers))
+
+ mx[px+"init_containers_state_running"] = 0
+ mx[px+"init_containers_state_waiting"] = 0
+ mx[px+"init_containers_state_terminated"] = 0
+ for _, cs := range ps.initContainers {
+ mx[px+"init_containers_state_running"] += boolToInt(cs.stateRunning)
+ mx[px+"init_containers_state_waiting"] += boolToInt(cs.stateWaiting)
+ mx[px+"init_containers_state_terminated"] += boolToInt(cs.stateTerminated)
+ }
+ mx[px+"containers_state_running"] = 0
+ mx[px+"containers_state_waiting"] = 0
+ mx[px+"containers_state_terminated"] = 0
+ for _, cs := range ps.containers {
+ if cs.new {
+ cs.new = false
+ ks.addContainerCharts(ps, cs)
+ }
+ mx[px+"containers_state_running"] += boolToInt(cs.stateRunning)
+ mx[px+"containers_state_waiting"] += boolToInt(cs.stateWaiting)
+ mx[px+"containers_state_terminated"] += boolToInt(cs.stateTerminated)
+
+ ppx := fmt.Sprintf("%scontainer_%s_", px, cs.name)
+ mx[ppx+"state_running"] = boolToInt(cs.stateRunning)
+ mx[ppx+"state_waiting"] = boolToInt(cs.stateWaiting)
+ mx[ppx+"state_terminated"] = boolToInt(cs.stateTerminated)
+ mx[ppx+"readiness"] = boolToInt(cs.ready)
+ mx[ppx+"restarts"] = cs.restarts
+ for _, r := range cs.stateWaitingReasons {
+ if r.new {
+ r.new = false
+ ks.addContainerWaitingStateReasonToChart(ps, cs, r.reason)
+ }
+ mx[ppx+"state_waiting_reason_"+r.reason] = boolToInt(r.active)
+ }
+ for _, r := range cs.stateTerminatedReasons {
+ if r.new {
+ r.new = false
+ ks.addContainerTerminatedStateReasonToChart(ps, cs, r.reason)
+ }
+ mx[ppx+"state_terminated_reason_"+r.reason] = boolToInt(r.active)
+ }
+ }
+ }
+}
+
+func (ks *KubeState) collectNodesState(mx map[string]int64) {
+ now := time.Now()
+ for _, ns := range ks.state.nodes {
+ if ns.deleted {
+ delete(ks.state.nodes, nodeSource(ns.name))
+ ks.removeNodeCharts(ns)
+ continue
+ }
+ if ns.new {
+ ns.new = false
+ ks.addNodeCharts(ns)
+ }
+
+ px := fmt.Sprintf("node_%s_", ns.id())
+
+ for typ, cond := range ns.conditions {
+ if cond.new {
+ cond.new = false
+ ks.addNodeConditionToCharts(ns, typ)
+ }
+ mx[px+"cond_"+strings.ToLower(typ)] = condStatusToInt(cond.status)
+ }
+
+ mx[px+"age"] = int64(now.Sub(ns.creationTime).Seconds())
+ mx[px+"alloc_pods_util"] = calcPercentage(ns.stats.pods, ns.allocatablePods)
+ mx[px+"pods_readiness_ready"] = ns.stats.podsReadinessReady
+ mx[px+"pods_readiness_unready"] = ns.stats.podsReadinessUnready
+ mx[px+"pods_readiness"] = calcPercentage(ns.stats.podsReadinessReady, ns.stats.pods)
+ mx[px+"pods_phase_running"] = ns.stats.podsPhaseRunning
+ mx[px+"pods_phase_failed"] = ns.stats.podsPhaseFailed
+ mx[px+"pods_phase_succeeded"] = ns.stats.podsPhaseSucceeded
+ mx[px+"pods_phase_pending"] = ns.stats.podsPhasePending
+ mx[px+"pods_cond_podready"] = ns.stats.podsCondPodReady
+ mx[px+"pods_cond_podscheduled"] = ns.stats.podsCondPodScheduled
+ mx[px+"pods_cond_podinitialized"] = ns.stats.podsCondPodInitialized
+ mx[px+"pods_cond_containersready"] = ns.stats.podsCondContainersReady
+ mx[px+"pods_cond_containersready"] = ns.stats.podsCondContainersReady
+ mx[px+"schedulability_schedulable"] = boolToInt(!ns.unSchedulable)
+ mx[px+"schedulability_unschedulable"] = boolToInt(ns.unSchedulable)
+ mx[px+"alloc_pods_available"] = ns.allocatablePods - ns.stats.pods
+ mx[px+"alloc_pods_allocated"] = ns.stats.pods
+ mx[px+"alloc_cpu_requests_util"] = calcPercentage(ns.stats.reqCPU, ns.allocatableCPU)
+ mx[px+"alloc_cpu_limits_util"] = calcPercentage(ns.stats.limitCPU, ns.allocatableCPU)
+ mx[px+"alloc_mem_requests_util"] = calcPercentage(ns.stats.reqMem, ns.allocatableMem)
+ mx[px+"alloc_mem_limits_util"] = calcPercentage(ns.stats.limitMem, ns.allocatableMem)
+ mx[px+"alloc_cpu_requests_used"] = ns.stats.reqCPU
+ mx[px+"alloc_cpu_limits_used"] = ns.stats.limitCPU
+ mx[px+"alloc_mem_requests_used"] = ns.stats.reqMem
+ mx[px+"alloc_mem_limits_used"] = ns.stats.limitMem
+ mx[px+"init_containers"] = ns.stats.initContainers
+ mx[px+"containers"] = ns.stats.containers
+ mx[px+"containers_state_running"] = ns.stats.contStateRunning
+ mx[px+"containers_state_waiting"] = ns.stats.contStateWaiting
+ mx[px+"containers_state_terminated"] = ns.stats.contStateTerminated
+ mx[px+"init_containers_state_running"] = ns.stats.initContStateRunning
+ mx[px+"init_containers_state_waiting"] = ns.stats.initContStateWaiting
+ mx[px+"init_containers_state_terminated"] = ns.stats.initContStateTerminated
+ }
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
+
+func condStatusToInt(cs corev1.ConditionStatus) int64 {
+ switch cs {
+ case corev1.ConditionFalse:
+ return 0
+ case corev1.ConditionTrue:
+ return 1
+ case corev1.ConditionUnknown:
+ return 0
+ default:
+ return 0
+ }
+}
+
+func calcPercentage(value, total int64) int64 {
+ if total == 0 {
+ return 0
+ }
+ return int64(float64(value) / float64(total) * 100 * precision)
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/config_schema.json b/src/go/plugin/go.d/modules/k8s_state/config_schema.json
new file mode 100644
index 000000000..ae66d7cb5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/config_schema.json
@@ -0,0 +1,25 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Kubernetes Cluster State collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/discover_kubernetes.go b/src/go/plugin/go.d/modules/k8s_state/discover_kubernetes.go
new file mode 100644
index 000000000..5d435871a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/discover_kubernetes.go
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "context"
+ "os"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/watch"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+)
+
+func newKubeDiscovery(client kubernetes.Interface, l *logger.Logger) *kubeDiscovery {
+ return &kubeDiscovery{
+ client: client,
+ Logger: l,
+ readyCh: make(chan struct{}),
+ stopCh: make(chan struct{}),
+ }
+}
+
+type kubeDiscovery struct {
+ *logger.Logger
+ client kubernetes.Interface
+ discoverers []discoverer
+ readyCh chan struct{}
+ stopCh chan struct{}
+}
+
+func (d *kubeDiscovery) run(ctx context.Context, in chan<- resource) {
+ d.Info("kube_discoverer is started")
+ defer func() { close(d.stopCh); d.Info("kube_discoverer is stopped") }()
+
+ d.discoverers = d.setupDiscoverers(ctx)
+
+ var wg sync.WaitGroup
+ updates := make(chan resource)
+
+ for _, dd := range d.discoverers {
+ wg.Add(1)
+ go func(dd discoverer) { defer wg.Done(); dd.run(ctx, updates) }(dd)
+ }
+
+ wg.Add(1)
+ go func() { defer wg.Done(); d.runDiscover(ctx, updates, in) }()
+
+ close(d.readyCh)
+ wg.Wait()
+ <-ctx.Done()
+}
+
+func (d *kubeDiscovery) ready() bool {
+ if !isChanClosed(d.readyCh) {
+ return false
+ }
+ for _, dd := range d.discoverers {
+ if !dd.ready() {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *kubeDiscovery) stopped() bool {
+ if !isChanClosed(d.stopCh) {
+ return false
+ }
+ for _, dd := range d.discoverers {
+ if !dd.stopped() {
+ return false
+ }
+ }
+ return true
+}
+
+func (d *kubeDiscovery) runDiscover(ctx context.Context, updates chan resource, in chan<- resource) {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case r := <-updates:
+ select {
+ case <-ctx.Done():
+ return
+ case in <- r:
+ }
+ }
+ }
+}
+
+const resyncPeriod = 10 * time.Minute
+
+var (
+ myNodeName = os.Getenv("MY_NODE_NAME")
+)
+
+func (d *kubeDiscovery) setupDiscoverers(ctx context.Context) []discoverer {
+ node := d.client.CoreV1().Nodes()
+ nodeWatcher := &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return node.List(ctx, options) },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { return node.Watch(ctx, options) },
+ }
+
+ pod := d.client.CoreV1().Pods(corev1.NamespaceAll)
+ podWatcher := &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if myNodeName != "" {
+ options.FieldSelector = "spec.nodeName=" + myNodeName
+ }
+ return pod.List(ctx, options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if myNodeName != "" {
+ options.FieldSelector = "spec.nodeName=" + myNodeName
+ }
+ return pod.Watch(ctx, options)
+ },
+ }
+
+ return []discoverer{
+ newNodeDiscoverer(cache.NewSharedInformer(nodeWatcher, &corev1.Node{}, resyncPeriod), d.Logger),
+ newPodDiscoverer(cache.NewSharedInformer(podWatcher, &corev1.Pod{}, resyncPeriod), d.Logger),
+ }
+}
+
+func enqueue(queue *workqueue.Type, obj interface{}) {
+ key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
+ if err != nil {
+ return
+ }
+ queue.Add(key)
+}
+
+func send(ctx context.Context, in chan<- resource, r resource) {
+ if r == nil {
+ return
+ }
+ select {
+ case <-ctx.Done():
+ case in <- r:
+ }
+}
+
+func isChanClosed(ch chan struct{}) bool {
+ select {
+ case <-ch:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/discover_node.go b/src/go/plugin/go.d/modules/k8s_state/discover_node.go
new file mode 100644
index 000000000..1d91436c8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/discover_node.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "context"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+)
+
+func newNodeDiscoverer(si cache.SharedInformer, l *logger.Logger) *nodeDiscoverer {
+ if si == nil {
+ panic("nil node shared informer")
+ }
+
+ queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "node"})
+ _, _ = si.AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) { enqueue(queue, obj) },
+ UpdateFunc: func(_, obj interface{}) { enqueue(queue, obj) },
+ DeleteFunc: func(obj interface{}) { enqueue(queue, obj) },
+ })
+
+ return &nodeDiscoverer{
+ Logger: l,
+ informer: si,
+ queue: queue,
+ readyCh: make(chan struct{}),
+ stopCh: make(chan struct{}),
+ }
+}
+
+type nodeResource struct {
+ src string
+ val interface{}
+}
+
+func (r nodeResource) source() string { return r.src }
+func (r nodeResource) kind() kubeResourceKind { return kubeResourceNode }
+func (r nodeResource) value() interface{} { return r.val }
+
+type nodeDiscoverer struct {
+ *logger.Logger
+ informer cache.SharedInformer
+ queue *workqueue.Type
+ readyCh chan struct{}
+ stopCh chan struct{}
+}
+
+func (d *nodeDiscoverer) run(ctx context.Context, in chan<- resource) {
+ d.Info("node_discoverer is started")
+ defer func() { close(d.stopCh); d.Info("node_discoverer is stopped") }()
+
+ defer d.queue.ShutDown()
+
+ go d.informer.Run(ctx.Done())
+
+ if !cache.WaitForCacheSync(ctx.Done(), d.informer.HasSynced) {
+ return
+ }
+
+ go d.runDiscover(ctx, in)
+ close(d.readyCh)
+
+ <-ctx.Done()
+}
+
+func (d *nodeDiscoverer) ready() bool { return isChanClosed(d.readyCh) }
+func (d *nodeDiscoverer) stopped() bool { return isChanClosed(d.stopCh) }
+
+func (d *nodeDiscoverer) runDiscover(ctx context.Context, in chan<- resource) {
+ for {
+ item, shutdown := d.queue.Get()
+ if shutdown {
+ return
+ }
+
+ func() {
+ defer d.queue.Done(item)
+
+ key := item.(string)
+ _, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ return
+ }
+
+ item, exists, err := d.informer.GetStore().GetByKey(key)
+ if err != nil {
+ return
+ }
+
+ r := &nodeResource{src: nodeSource(name)}
+ if exists {
+ r.val = item
+ }
+ send(ctx, in, r)
+ }()
+ }
+}
+
+func nodeSource(name string) string {
+ return "k8s/node/" + name
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/discover_pod.go b/src/go/plugin/go.d/modules/k8s_state/discover_pod.go
new file mode 100644
index 000000000..53e9ceb92
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/discover_pod.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "context"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+
+ "k8s.io/client-go/tools/cache"
+ "k8s.io/client-go/util/workqueue"
+)
+
+func newPodDiscoverer(si cache.SharedInformer, l *logger.Logger) *podDiscoverer {
+ if si == nil {
+ panic("nil pod shared informer")
+ }
+
+ queue := workqueue.NewWithConfig(workqueue.QueueConfig{Name: "pod"})
+ _, _ = si.AddEventHandler(cache.ResourceEventHandlerFuncs{
+ AddFunc: func(obj interface{}) { enqueue(queue, obj) },
+ UpdateFunc: func(_, obj interface{}) { enqueue(queue, obj) },
+ DeleteFunc: func(obj interface{}) { enqueue(queue, obj) },
+ })
+
+ return &podDiscoverer{
+ Logger: l,
+ informer: si,
+ queue: queue,
+ readyCh: make(chan struct{}),
+ stopCh: make(chan struct{}),
+ }
+}
+
+type podResource struct {
+ src string
+ val interface{}
+}
+
+func (r podResource) source() string { return r.src }
+func (r podResource) kind() kubeResourceKind { return kubeResourcePod }
+func (r podResource) value() interface{} { return r.val }
+
+type podDiscoverer struct {
+ *logger.Logger
+ informer cache.SharedInformer
+ queue *workqueue.Type
+ readyCh chan struct{}
+ stopCh chan struct{}
+}
+
+func (d *podDiscoverer) run(ctx context.Context, in chan<- resource) {
+ d.Info("pod_discoverer is started")
+ defer func() { close(d.stopCh); d.Info("pod_discoverer is stopped") }()
+
+ defer d.queue.ShutDown()
+
+ go d.informer.Run(ctx.Done())
+
+ if !cache.WaitForCacheSync(ctx.Done(), d.informer.HasSynced) {
+ return
+ }
+
+ go d.runDiscover(ctx, in)
+ close(d.readyCh)
+
+ <-ctx.Done()
+}
+
+func (d *podDiscoverer) ready() bool { return isChanClosed(d.readyCh) }
+func (d *podDiscoverer) stopped() bool { return isChanClosed(d.stopCh) }
+
+func (d *podDiscoverer) runDiscover(ctx context.Context, in chan<- resource) {
+ for {
+ item, shutdown := d.queue.Get()
+ if shutdown {
+ return
+ }
+
+ func() {
+ defer d.queue.Done(item)
+
+ key := item.(string)
+ ns, name, err := cache.SplitMetaNamespaceKey(key)
+ if err != nil {
+ return
+ }
+
+ item, exists, err := d.informer.GetStore().GetByKey(key)
+ if err != nil {
+ return
+ }
+
+ r := &podResource{src: podSource(ns, name)}
+ if exists {
+ r.val = item
+ }
+ send(ctx, in, r)
+ }()
+ }
+}
+
+func podSource(namespace, name string) string {
+ return "k8s/pod/" + namespace + "/" + name
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/init.go b/src/go/plugin/go.d/modules/k8s_state/init.go
new file mode 100644
index 000000000..998131394
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/init.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "k8s.io/client-go/kubernetes"
+)
+
+func (ks *KubeState) initClient() (kubernetes.Interface, error) {
+ return ks.newKubeClient()
+}
+
+func (ks *KubeState) initDiscoverer(client kubernetes.Interface) discoverer {
+ return newKubeDiscovery(client, ks.Logger)
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/integrations/kubernetes_cluster_state.md b/src/go/plugin/go.d/modules/k8s_state/integrations/kubernetes_cluster_state.md
new file mode 100644
index 000000000..5f5e36f87
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/integrations/kubernetes_cluster_state.md
@@ -0,0 +1,253 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/k8s_state/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/k8s_state/metadata.yaml"
+sidebar_label: "Kubernetes Cluster State"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Kubernetes"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kubernetes Cluster State
+
+
+<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: k8s_state
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Kubernetes Nodes, Pods and Containers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per node
+
+These metrics refer to the Node.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |
+| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |
+| k8s_node_name | Node name. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| k8s_state.node_allocatable_cpu_requests_utilization | requests | % |
+| k8s_state.node_allocatable_cpu_requests_used | requests | millicpu |
+| k8s_state.node_allocatable_cpu_limits_utilization | limits | % |
+| k8s_state.node_allocatable_cpu_limits_used | limits | millicpu |
+| k8s_state.node_allocatable_mem_requests_utilization | requests | % |
+| k8s_state.node_allocatable_mem_requests_used | requests | bytes |
+| k8s_state.node_allocatable_mem_limits_utilization | limits | % |
+| k8s_state.node_allocatable_mem_limits_used | limits | bytes |
+| k8s_state.node_allocatable_pods_utilization | allocated | % |
+| k8s_state.node_allocatable_pods_usage | available, allocated | pods |
+| k8s_state.node_condition | a dimension per condition | status |
+| k8s_state.node_schedulability | schedulable, unschedulable | state |
+| k8s_state.node_pods_readiness | ready | % |
+| k8s_state.node_pods_readiness_state | ready, unready | pods |
+| k8s_state.node_pods_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | pods |
+| k8s_state.node_pods_phase | running, failed, succeeded, pending | pods |
+| k8s_state.node_containers | containers, init_containers | containers |
+| k8s_state.node_containers_state | running, waiting, terminated | containers |
+| k8s_state.node_init_containers_state | running, waiting, terminated | containers |
+| k8s_state.node_age | age | seconds |
+
+### Per pod
+
+These metrics refer to the Pod.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |
+| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |
+| k8s_node_name | Node name. |
+| k8s_namespace | Namespace. |
+| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |
+| k8s_controller_name | Controller name. |
+| k8s_pod_name | Pod name. |
+| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| k8s_state.pod_cpu_requests_used | requests | millicpu |
+| k8s_state.pod_cpu_limits_used | limits | millicpu |
+| k8s_state.pod_mem_requests_used | requests | bytes |
+| k8s_state.pod_mem_limits_used | limits | bytes |
+| k8s_state.pod_condition | pod_ready, pod_scheduled, pod_initialized, containers_ready | state |
+| k8s_state.pod_phase | running, failed, succeeded, pending | state |
+| k8s_state.pod_age | age | seconds |
+| k8s_state.pod_containers | containers, init_containers | containers |
+| k8s_state.pod_containers_state | running, waiting, terminated | containers |
+| k8s_state.pod_init_containers_state | running, waiting, terminated | containers |
+
+### Per container
+
+These metrics refer to the Pod container.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| k8s_cluster_id | Cluster ID. This is equal to the kube-system namespace UID. |
+| k8s_cluster_name | Cluster name. Cluster name discovery only works in GKE. |
+| k8s_node_name | Node name. |
+| k8s_namespace | Namespace. |
+| k8s_controller_kind | Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.). |
+| k8s_controller_name | Controller name. |
+| k8s_pod_name | Pod name. |
+| k8s_qos_class | Pod QOS class (burstable, guaranteed, besteffort). |
+| k8s_container_name | Container name. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| k8s_state.pod_container_readiness_state | ready | state |
+| k8s_state.pod_container_restarts | restarts | restarts |
+| k8s_state.pod_container_state | running, waiting, terminated | state |
+| k8s_state.pod_container_waiting_state_reason | a dimension per reason | state |
+| k8s_state.pod_container_terminated_state_reason | a dimension per reason | state |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/k8s_state.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/k8s_state.conf
+```
+#### Options
+
+
+
+There are no configuration options.
+
+#### Examples
+There are no configuration examples.
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `k8s_state` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m k8s_state
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `k8s_state` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep k8s_state
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep k8s_state /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep k8s_state
+```
+
+
diff --git a/src/go/plugin/go.d/modules/k8s_state/kube_state.go b/src/go/plugin/go.d/modules/k8s_state/kube_state.go
new file mode 100644
index 000000000..26962928e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/kube_state.go
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "context"
+ _ "embed"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "k8s.io/client-go/kubernetes"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("k8s_state", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ Disabled: true,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *KubeState {
+ return &KubeState{
+ initDelay: time.Second * 3,
+ newKubeClient: newKubeClient,
+ charts: baseCharts.Copy(),
+ once: &sync.Once{},
+ wg: &sync.WaitGroup{},
+ state: newKubeState(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+}
+
+type (
+ KubeState struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ client kubernetes.Interface
+ newKubeClient func() (kubernetes.Interface, error)
+
+ startTime time.Time
+ initDelay time.Duration
+ once *sync.Once
+ wg *sync.WaitGroup
+ discoverer discoverer
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ kubeClusterID string
+ kubeClusterName string
+
+ state *kubeState
+ }
+ discoverer interface {
+ run(ctx context.Context, in chan<- resource)
+ ready() bool
+ stopped() bool
+ }
+)
+
+func (ks *KubeState) Configuration() any {
+ return ks.Config
+}
+
+func (ks *KubeState) Init() error {
+ client, err := ks.initClient()
+ if err != nil {
+ ks.Errorf("client initialization: %v", err)
+ return err
+ }
+ ks.client = client
+
+ ks.ctx, ks.ctxCancel = context.WithCancel(context.Background())
+
+ ks.discoverer = ks.initDiscoverer(ks.client)
+
+ return nil
+}
+
+func (ks *KubeState) Check() error {
+ if ks.client == nil || ks.discoverer == nil {
+ ks.Error("not initialized job")
+ return errors.New("not initialized")
+ }
+
+ ver, err := ks.client.Discovery().ServerVersion()
+ if err != nil {
+ err := fmt.Errorf("failed to connect to K8s API server: %v", err)
+ ks.Error(err)
+ return err
+ }
+
+ ks.Infof("successfully connected to the Kubernetes API server '%s'", ver)
+
+ return nil
+}
+
+func (ks *KubeState) Charts() *module.Charts {
+ return ks.charts
+}
+
+func (ks *KubeState) Collect() map[string]int64 {
+ ms, err := ks.collect()
+ if err != nil {
+ ks.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+
+func (ks *KubeState) Cleanup() {
+ if ks.ctxCancel == nil {
+ return
+ }
+ ks.ctxCancel()
+
+ c := make(chan struct{})
+ go func() { defer close(c); ks.wg.Wait() }()
+
+ t := time.NewTimer(time.Second * 5)
+ defer t.Stop()
+
+ select {
+ case <-c:
+ return
+ case <-t.C:
+ return
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/kube_state_test.go b/src/go/plugin/go.d/modules/k8s_state/kube_state_test.go
new file mode 100644
index 000000000..cf52c08b6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/kube_state_test.go
@@ -0,0 +1,859 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ apiresource "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/version"
+ "k8s.io/client-go/discovery"
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/fake"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestKubeState_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &KubeState{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestKubeState_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func() *KubeState
+ }{
+ "success when no error on initializing K8s client": {
+ wantFail: false,
+ prepare: func() *KubeState {
+ ks := New()
+ ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil }
+ return ks
+ },
+ },
+ "fail when get an error on initializing K8s client": {
+ wantFail: true,
+ prepare: func() *KubeState {
+ ks := New()
+ ks.newKubeClient = func() (kubernetes.Interface, error) { return nil, errors.New("newKubeClient() error") }
+ return ks
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ks := test.prepare()
+
+ if test.wantFail {
+ assert.Error(t, ks.Init())
+ } else {
+ assert.NoError(t, ks.Init())
+ }
+ })
+ }
+}
+
+func TestKubeState_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func() *KubeState
+ }{
+ "success when connected to the K8s API": {
+ wantFail: false,
+ prepare: func() *KubeState {
+ ks := New()
+ ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil }
+ return ks
+ },
+ },
+ "fail when not connected to the K8s API": {
+ wantFail: true,
+ prepare: func() *KubeState {
+ ks := New()
+ client := &brokenInfoKubeClient{fake.NewSimpleClientset()}
+ ks.newKubeClient = func() (kubernetes.Interface, error) { return client, nil }
+ return ks
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ks := test.prepare()
+ require.NoError(t, ks.Init())
+
+ if test.wantFail {
+ assert.Error(t, ks.Check())
+ } else {
+ assert.NoError(t, ks.Check())
+ }
+ })
+ }
+}
+
+func TestKubeState_Charts(t *testing.T) {
+ ks := New()
+
+ assert.NotEmpty(t, *ks.Charts())
+}
+
+func TestKubeState_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *KubeState
+ doInit bool
+ doCollect bool
+ }{
+ "before init": {
+ doInit: false,
+ doCollect: false,
+ prepare: func() *KubeState {
+ ks := New()
+ ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil }
+ return ks
+ },
+ },
+ "after init": {
+ doInit: true,
+ doCollect: false,
+ prepare: func() *KubeState {
+ ks := New()
+ ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil }
+ return ks
+ },
+ },
+ "after collect": {
+ doInit: true,
+ doCollect: true,
+ prepare: func() *KubeState {
+ ks := New()
+ ks.newKubeClient = func() (kubernetes.Interface, error) { return fake.NewSimpleClientset(), nil }
+ return ks
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ks := test.prepare()
+
+ if test.doInit {
+ _ = ks.Init()
+ }
+ if test.doCollect {
+ _ = ks.Collect()
+ time.Sleep(ks.initDelay)
+ }
+
+ assert.NotPanics(t, ks.Cleanup)
+ time.Sleep(time.Second)
+ if test.doCollect {
+ assert.True(t, ks.discoverer.stopped())
+ }
+ })
+ }
+}
+
+func TestKubeState_Collect(t *testing.T) {
+ type (
+ testCaseStep func(t *testing.T, ks *KubeState)
+ testCase struct {
+ client kubernetes.Interface
+ steps []testCaseStep
+ }
+ )
+
+ tests := map[string]struct {
+ create func(t *testing.T) testCase
+ }{
+ "Node only": {
+ create: func(t *testing.T) testCase {
+ client := fake.NewSimpleClientset(
+ newNode("node01"),
+ )
+
+ step1 := func(t *testing.T, ks *KubeState) {
+ mx := ks.Collect()
+ expected := map[string]int64{
+ "discovery_node_discoverer_state": 1,
+ "discovery_pod_discoverer_state": 1,
+ "node_node01_age": 3,
+ "node_node01_alloc_cpu_limits_used": 0,
+ "node_node01_alloc_cpu_limits_util": 0,
+ "node_node01_alloc_cpu_requests_used": 0,
+ "node_node01_alloc_cpu_requests_util": 0,
+ "node_node01_alloc_mem_limits_used": 0,
+ "node_node01_alloc_mem_limits_util": 0,
+ "node_node01_alloc_mem_requests_used": 0,
+ "node_node01_alloc_mem_requests_util": 0,
+ "node_node01_alloc_pods_allocated": 0,
+ "node_node01_alloc_pods_available": 110,
+ "node_node01_alloc_pods_util": 0,
+ "node_node01_cond_diskpressure": 0,
+ "node_node01_cond_memorypressure": 0,
+ "node_node01_cond_networkunavailable": 0,
+ "node_node01_cond_pidpressure": 0,
+ "node_node01_cond_ready": 1,
+ "node_node01_schedulability_schedulable": 1,
+ "node_node01_schedulability_unschedulable": 0,
+ "node_node01_containers": 0,
+ "node_node01_containers_state_running": 0,
+ "node_node01_containers_state_terminated": 0,
+ "node_node01_containers_state_waiting": 0,
+ "node_node01_init_containers": 0,
+ "node_node01_init_containers_state_running": 0,
+ "node_node01_init_containers_state_terminated": 0,
+ "node_node01_init_containers_state_waiting": 0,
+ "node_node01_pods_cond_containersready": 0,
+ "node_node01_pods_cond_podinitialized": 0,
+ "node_node01_pods_cond_podready": 0,
+ "node_node01_pods_cond_podscheduled": 0,
+ "node_node01_pods_phase_failed": 0,
+ "node_node01_pods_phase_pending": 0,
+ "node_node01_pods_phase_running": 0,
+ "node_node01_pods_phase_succeeded": 0,
+ "node_node01_pods_readiness": 0,
+ "node_node01_pods_readiness_ready": 0,
+ "node_node01_pods_readiness_unready": 0,
+ }
+ copyAge(expected, mx)
+ assert.Equal(t, expected, mx)
+ assert.Equal(t,
+ len(nodeChartsTmpl)+len(baseCharts),
+ len(*ks.Charts()),
+ )
+ }
+
+ return testCase{
+ client: client,
+ steps: []testCaseStep{step1},
+ }
+ },
+ },
+ "Pod only": {
+ create: func(t *testing.T) testCase {
+ pod := newPod("node01", "pod01")
+ client := fake.NewSimpleClientset(
+ pod,
+ )
+
+ step1 := func(t *testing.T, ks *KubeState) {
+ mx := ks.Collect()
+ expected := map[string]int64{
+ "discovery_node_discoverer_state": 1,
+ "discovery_pod_discoverer_state": 1,
+ "pod_default_pod01_age": 3,
+ "pod_default_pod01_cpu_limits_used": 400,
+ "pod_default_pod01_cpu_requests_used": 200,
+ "pod_default_pod01_mem_limits_used": 419430400,
+ "pod_default_pod01_mem_requests_used": 209715200,
+ "pod_default_pod01_cond_containersready": 1,
+ "pod_default_pod01_cond_podinitialized": 1,
+ "pod_default_pod01_cond_podready": 1,
+ "pod_default_pod01_cond_podscheduled": 1,
+ "pod_default_pod01_container_container1_readiness": 1,
+ "pod_default_pod01_container_container1_restarts": 0,
+ "pod_default_pod01_container_container1_state_running": 1,
+ "pod_default_pod01_container_container1_state_terminated": 0,
+ "pod_default_pod01_container_container1_state_waiting": 0,
+ "pod_default_pod01_container_container2_readiness": 1,
+ "pod_default_pod01_container_container2_restarts": 0,
+ "pod_default_pod01_container_container2_state_running": 1,
+ "pod_default_pod01_container_container2_state_terminated": 0,
+ "pod_default_pod01_container_container2_state_waiting": 0,
+ "pod_default_pod01_containers": 2,
+ "pod_default_pod01_containers_state_running": 2,
+ "pod_default_pod01_containers_state_terminated": 0,
+ "pod_default_pod01_containers_state_waiting": 0,
+ "pod_default_pod01_init_containers": 1,
+ "pod_default_pod01_init_containers_state_running": 0,
+ "pod_default_pod01_init_containers_state_terminated": 1,
+ "pod_default_pod01_init_containers_state_waiting": 0,
+ "pod_default_pod01_phase_failed": 0,
+ "pod_default_pod01_phase_pending": 0,
+ "pod_default_pod01_phase_running": 1,
+ "pod_default_pod01_phase_succeeded": 0,
+ }
+ copyAge(expected, mx)
+
+ assert.Equal(t, expected, mx)
+ assert.Equal(t,
+ len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers)+len(baseCharts),
+ len(*ks.Charts()),
+ )
+ }
+
+ return testCase{
+ client: client,
+ steps: []testCaseStep{step1},
+ }
+ },
+ },
+ "Nodes and Pods": {
+ create: func(t *testing.T) testCase {
+ node := newNode("node01")
+ pod := newPod(node.Name, "pod01")
+ client := fake.NewSimpleClientset(
+ node,
+ pod,
+ )
+
+ step1 := func(t *testing.T, ks *KubeState) {
+ mx := ks.Collect()
+ expected := map[string]int64{
+ "discovery_node_discoverer_state": 1,
+ "discovery_pod_discoverer_state": 1,
+ "node_node01_age": 3,
+ "node_node01_alloc_cpu_limits_used": 400,
+ "node_node01_alloc_cpu_limits_util": 11428,
+ "node_node01_alloc_cpu_requests_used": 200,
+ "node_node01_alloc_cpu_requests_util": 5714,
+ "node_node01_alloc_mem_limits_used": 419430400,
+ "node_node01_alloc_mem_limits_util": 11428,
+ "node_node01_alloc_mem_requests_used": 209715200,
+ "node_node01_alloc_mem_requests_util": 5714,
+ "node_node01_alloc_pods_allocated": 1,
+ "node_node01_alloc_pods_available": 109,
+ "node_node01_alloc_pods_util": 909,
+ "node_node01_cond_diskpressure": 0,
+ "node_node01_cond_memorypressure": 0,
+ "node_node01_cond_networkunavailable": 0,
+ "node_node01_cond_pidpressure": 0,
+ "node_node01_cond_ready": 1,
+ "node_node01_schedulability_schedulable": 1,
+ "node_node01_schedulability_unschedulable": 0,
+ "node_node01_containers": 2,
+ "node_node01_containers_state_running": 2,
+ "node_node01_containers_state_terminated": 0,
+ "node_node01_containers_state_waiting": 0,
+ "node_node01_init_containers": 1,
+ "node_node01_init_containers_state_running": 0,
+ "node_node01_init_containers_state_terminated": 1,
+ "node_node01_init_containers_state_waiting": 0,
+ "node_node01_pods_cond_containersready": 1,
+ "node_node01_pods_cond_podinitialized": 1,
+ "node_node01_pods_cond_podready": 1,
+ "node_node01_pods_cond_podscheduled": 1,
+ "node_node01_pods_phase_failed": 0,
+ "node_node01_pods_phase_pending": 0,
+ "node_node01_pods_phase_running": 1,
+ "node_node01_pods_phase_succeeded": 0,
+ "node_node01_pods_readiness": 100000,
+ "node_node01_pods_readiness_ready": 1,
+ "node_node01_pods_readiness_unready": 0,
+ "pod_default_pod01_age": 3,
+ "pod_default_pod01_cpu_limits_used": 400,
+ "pod_default_pod01_cpu_requests_used": 200,
+ "pod_default_pod01_mem_limits_used": 419430400,
+ "pod_default_pod01_mem_requests_used": 209715200,
+ "pod_default_pod01_cond_containersready": 1,
+ "pod_default_pod01_cond_podinitialized": 1,
+ "pod_default_pod01_cond_podready": 1,
+ "pod_default_pod01_cond_podscheduled": 1,
+ "pod_default_pod01_container_container1_readiness": 1,
+ "pod_default_pod01_container_container1_restarts": 0,
+ "pod_default_pod01_container_container1_state_running": 1,
+ "pod_default_pod01_container_container1_state_terminated": 0,
+ "pod_default_pod01_container_container1_state_waiting": 0,
+ "pod_default_pod01_container_container2_readiness": 1,
+ "pod_default_pod01_container_container2_restarts": 0,
+ "pod_default_pod01_container_container2_state_running": 1,
+ "pod_default_pod01_container_container2_state_terminated": 0,
+ "pod_default_pod01_container_container2_state_waiting": 0,
+ "pod_default_pod01_containers": 2,
+ "pod_default_pod01_containers_state_running": 2,
+ "pod_default_pod01_containers_state_terminated": 0,
+ "pod_default_pod01_containers_state_waiting": 0,
+ "pod_default_pod01_init_containers": 1,
+ "pod_default_pod01_init_containers_state_running": 0,
+ "pod_default_pod01_init_containers_state_terminated": 1,
+ "pod_default_pod01_init_containers_state_waiting": 0,
+ "pod_default_pod01_phase_failed": 0,
+ "pod_default_pod01_phase_pending": 0,
+ "pod_default_pod01_phase_running": 1,
+ "pod_default_pod01_phase_succeeded": 0,
+ }
+ copyAge(expected, mx)
+
+ assert.Equal(t, expected, mx)
+ assert.Equal(t,
+ len(nodeChartsTmpl)+len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers)+len(baseCharts),
+ len(*ks.Charts()),
+ )
+ }
+
+ return testCase{
+ client: client,
+ steps: []testCaseStep{step1},
+ }
+ },
+ },
+ "delete a Pod in runtime": {
+ create: func(t *testing.T) testCase {
+ ctx := context.Background()
+ node := newNode("node01")
+ pod := newPod(node.Name, "pod01")
+ client := fake.NewSimpleClientset(
+ node,
+ pod,
+ )
+ step1 := func(t *testing.T, ks *KubeState) {
+ _ = ks.Collect()
+ _ = client.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{})
+ }
+
+ step2 := func(t *testing.T, ks *KubeState) {
+ mx := ks.Collect()
+ expected := map[string]int64{
+ "discovery_node_discoverer_state": 1,
+ "discovery_pod_discoverer_state": 1,
+ "node_node01_age": 4,
+ "node_node01_alloc_cpu_limits_used": 0,
+ "node_node01_alloc_cpu_limits_util": 0,
+ "node_node01_alloc_cpu_requests_used": 0,
+ "node_node01_alloc_cpu_requests_util": 0,
+ "node_node01_alloc_mem_limits_used": 0,
+ "node_node01_alloc_mem_limits_util": 0,
+ "node_node01_alloc_mem_requests_used": 0,
+ "node_node01_alloc_mem_requests_util": 0,
+ "node_node01_alloc_pods_allocated": 0,
+ "node_node01_alloc_pods_available": 110,
+ "node_node01_alloc_pods_util": 0,
+ "node_node01_cond_diskpressure": 0,
+ "node_node01_cond_memorypressure": 0,
+ "node_node01_cond_networkunavailable": 0,
+ "node_node01_cond_pidpressure": 0,
+ "node_node01_cond_ready": 1,
+ "node_node01_schedulability_schedulable": 1,
+ "node_node01_schedulability_unschedulable": 0,
+ "node_node01_containers": 0,
+ "node_node01_containers_state_running": 0,
+ "node_node01_containers_state_terminated": 0,
+ "node_node01_containers_state_waiting": 0,
+ "node_node01_init_containers": 0,
+ "node_node01_init_containers_state_running": 0,
+ "node_node01_init_containers_state_terminated": 0,
+ "node_node01_init_containers_state_waiting": 0,
+ "node_node01_pods_cond_containersready": 0,
+ "node_node01_pods_cond_podinitialized": 0,
+ "node_node01_pods_cond_podready": 0,
+ "node_node01_pods_cond_podscheduled": 0,
+ "node_node01_pods_phase_failed": 0,
+ "node_node01_pods_phase_pending": 0,
+ "node_node01_pods_phase_running": 0,
+ "node_node01_pods_phase_succeeded": 0,
+ "node_node01_pods_readiness": 0,
+ "node_node01_pods_readiness_ready": 0,
+ "node_node01_pods_readiness_unready": 0,
+ }
+ copyAge(expected, mx)
+
+ assert.Equal(t, expected, mx)
+ assert.Equal(t,
+ len(nodeChartsTmpl)+len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers)+len(baseCharts),
+ len(*ks.Charts()),
+ )
+ assert.Equal(t,
+ len(podChartsTmpl)+len(containerChartsTmpl)*len(pod.Spec.Containers),
+ calcObsoleteCharts(*ks.Charts()),
+ )
+ }
+
+ return testCase{
+ client: client,
+ steps: []testCaseStep{step1, step2},
+ }
+ },
+ },
+ "slow spec.NodeName set": {
+ create: func(t *testing.T) testCase {
+ ctx := context.Background()
+ node := newNode("node01")
+ podOrig := newPod(node.Name, "pod01")
+ podOrig.Spec.NodeName = ""
+ client := fake.NewSimpleClientset(
+ node,
+ podOrig,
+ )
+ podUpdated := newPod(node.Name, "pod01") // with set Spec.NodeName
+
+ step1 := func(t *testing.T, ks *KubeState) {
+ _ = ks.Collect()
+ for _, c := range *ks.Charts() {
+ if strings.HasPrefix(c.ID, "pod_") {
+ ok := isLabelValueSet(c, labelKeyNodeName)
+ assert.Falsef(t, ok, "chart '%s' has not empty %s label", c.ID, labelKeyNodeName)
+ }
+ }
+ }
+ step2 := func(t *testing.T, ks *KubeState) {
+ _, _ = client.CoreV1().Pods(podOrig.Namespace).Update(ctx, podUpdated, metav1.UpdateOptions{})
+ time.Sleep(time.Millisecond * 50)
+ _ = ks.Collect()
+
+ for _, c := range *ks.Charts() {
+ if strings.HasPrefix(c.ID, "pod_") {
+ ok := isLabelValueSet(c, labelKeyNodeName)
+ assert.Truef(t, ok, "chart '%s' has empty %s label", c.ID, labelKeyNodeName)
+ }
+ }
+ }
+
+ return testCase{
+ client: client,
+ steps: []testCaseStep{step1, step2},
+ }
+ },
+ },
+ "add a Pod in runtime": {
+ create: func(t *testing.T) testCase {
+ ctx := context.Background()
+ node := newNode("node01")
+ pod1 := newPod(node.Name, "pod01")
+ pod2 := newPod(node.Name, "pod02")
+ client := fake.NewSimpleClientset(
+ node,
+ pod1,
+ )
+ step1 := func(t *testing.T, ks *KubeState) {
+ _ = ks.Collect()
+ _, _ = client.CoreV1().Pods(pod1.Namespace).Create(ctx, pod2, metav1.CreateOptions{})
+ }
+
+ step2 := func(t *testing.T, ks *KubeState) {
+ mx := ks.Collect()
+ expected := map[string]int64{
+ "discovery_node_discoverer_state": 1,
+ "discovery_pod_discoverer_state": 1,
+ "node_node01_age": 4,
+ "node_node01_alloc_cpu_limits_used": 800,
+ "node_node01_alloc_cpu_limits_util": 22857,
+ "node_node01_alloc_cpu_requests_used": 400,
+ "node_node01_alloc_cpu_requests_util": 11428,
+ "node_node01_alloc_mem_limits_used": 838860800,
+ "node_node01_alloc_mem_limits_util": 22857,
+ "node_node01_alloc_mem_requests_used": 419430400,
+ "node_node01_alloc_mem_requests_util": 11428,
+ "node_node01_alloc_pods_allocated": 2,
+ "node_node01_alloc_pods_available": 108,
+ "node_node01_alloc_pods_util": 1818,
+ "node_node01_cond_diskpressure": 0,
+ "node_node01_cond_memorypressure": 0,
+ "node_node01_cond_networkunavailable": 0,
+ "node_node01_cond_pidpressure": 0,
+ "node_node01_cond_ready": 1,
+ "node_node01_schedulability_schedulable": 1,
+ "node_node01_schedulability_unschedulable": 0,
+ "node_node01_containers": 4,
+ "node_node01_containers_state_running": 4,
+ "node_node01_containers_state_terminated": 0,
+ "node_node01_containers_state_waiting": 0,
+ "node_node01_init_containers": 2,
+ "node_node01_init_containers_state_running": 0,
+ "node_node01_init_containers_state_terminated": 2,
+ "node_node01_init_containers_state_waiting": 0,
+ "node_node01_pods_cond_containersready": 2,
+ "node_node01_pods_cond_podinitialized": 2,
+ "node_node01_pods_cond_podready": 2,
+ "node_node01_pods_cond_podscheduled": 2,
+ "node_node01_pods_phase_failed": 0,
+ "node_node01_pods_phase_pending": 0,
+ "node_node01_pods_phase_running": 2,
+ "node_node01_pods_phase_succeeded": 0,
+ "node_node01_pods_readiness": 100000,
+ "node_node01_pods_readiness_ready": 2,
+ "node_node01_pods_readiness_unready": 0,
+ "pod_default_pod01_age": 4,
+ "pod_default_pod01_cpu_limits_used": 400,
+ "pod_default_pod01_cpu_requests_used": 200,
+ "pod_default_pod01_mem_limits_used": 419430400,
+ "pod_default_pod01_mem_requests_used": 209715200,
+ "pod_default_pod01_cond_containersready": 1,
+ "pod_default_pod01_cond_podinitialized": 1,
+ "pod_default_pod01_cond_podready": 1,
+ "pod_default_pod01_cond_podscheduled": 1,
+ "pod_default_pod01_container_container1_readiness": 1,
+ "pod_default_pod01_container_container1_restarts": 0,
+ "pod_default_pod01_container_container1_state_running": 1,
+ "pod_default_pod01_container_container1_state_terminated": 0,
+ "pod_default_pod01_container_container1_state_waiting": 0,
+ "pod_default_pod01_container_container2_readiness": 1,
+ "pod_default_pod01_container_container2_restarts": 0,
+ "pod_default_pod01_container_container2_state_running": 1,
+ "pod_default_pod01_container_container2_state_terminated": 0,
+ "pod_default_pod01_container_container2_state_waiting": 0,
+ "pod_default_pod01_containers": 2,
+ "pod_default_pod01_containers_state_running": 2,
+ "pod_default_pod01_containers_state_terminated": 0,
+ "pod_default_pod01_containers_state_waiting": 0,
+ "pod_default_pod01_init_containers": 1,
+ "pod_default_pod01_init_containers_state_running": 0,
+ "pod_default_pod01_init_containers_state_terminated": 1,
+ "pod_default_pod01_init_containers_state_waiting": 0,
+ "pod_default_pod01_phase_failed": 0,
+ "pod_default_pod01_phase_pending": 0,
+ "pod_default_pod01_phase_running": 1,
+ "pod_default_pod01_phase_succeeded": 0,
+ "pod_default_pod02_age": 4,
+ "pod_default_pod02_cpu_limits_used": 400,
+ "pod_default_pod02_cpu_requests_used": 200,
+ "pod_default_pod02_mem_limits_used": 419430400,
+ "pod_default_pod02_mem_requests_used": 209715200,
+ "pod_default_pod02_cond_containersready": 1,
+ "pod_default_pod02_cond_podinitialized": 1,
+ "pod_default_pod02_cond_podready": 1,
+ "pod_default_pod02_cond_podscheduled": 1,
+ "pod_default_pod02_container_container1_readiness": 1,
+ "pod_default_pod02_container_container1_restarts": 0,
+ "pod_default_pod02_container_container1_state_running": 1,
+ "pod_default_pod02_container_container1_state_terminated": 0,
+ "pod_default_pod02_container_container1_state_waiting": 0,
+ "pod_default_pod02_container_container2_readiness": 1,
+ "pod_default_pod02_container_container2_restarts": 0,
+ "pod_default_pod02_container_container2_state_running": 1,
+ "pod_default_pod02_container_container2_state_terminated": 0,
+ "pod_default_pod02_container_container2_state_waiting": 0,
+ "pod_default_pod02_containers": 2,
+ "pod_default_pod02_containers_state_running": 2,
+ "pod_default_pod02_containers_state_terminated": 0,
+ "pod_default_pod02_containers_state_waiting": 0,
+ "pod_default_pod02_init_containers": 1,
+ "pod_default_pod02_init_containers_state_running": 0,
+ "pod_default_pod02_init_containers_state_terminated": 1,
+ "pod_default_pod02_init_containers_state_waiting": 0,
+ "pod_default_pod02_phase_failed": 0,
+ "pod_default_pod02_phase_pending": 0,
+ "pod_default_pod02_phase_running": 1,
+ "pod_default_pod02_phase_succeeded": 0,
+ }
+ copyAge(expected, mx)
+
+ assert.Equal(t, expected, mx)
+ assert.Equal(t,
+ len(nodeChartsTmpl)+
+ len(podChartsTmpl)*2+
+ len(containerChartsTmpl)*len(pod1.Spec.Containers)+
+ len(containerChartsTmpl)*len(pod2.Spec.Containers)+
+ len(baseCharts),
+ len(*ks.Charts()),
+ )
+ }
+
+ return testCase{
+ client: client,
+ steps: []testCaseStep{step1, step2},
+ }
+ },
+ },
+ }
+
+ for name, creator := range tests {
+ t.Run(name, func(t *testing.T) {
+ test := creator.create(t)
+
+ ks := New()
+ ks.newKubeClient = func() (kubernetes.Interface, error) { return test.client, nil }
+
+ require.NoError(t, ks.Init())
+ require.NoError(t, ks.Check())
+ defer ks.Cleanup()
+
+ for i, executeStep := range test.steps {
+ if i == 0 {
+ _ = ks.Collect()
+ time.Sleep(ks.initDelay)
+ } else {
+ time.Sleep(time.Second)
+ }
+ executeStep(t, ks)
+ }
+ })
+ }
+}
+
+func newNode(name string) *corev1.Node {
+ return &corev1.Node{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ CreationTimestamp: metav1.Time{Time: time.Now()},
+ },
+ Status: corev1.NodeStatus{
+ Capacity: corev1.ResourceList{
+ corev1.ResourceCPU: mustQuantity("4000m"),
+ corev1.ResourceMemory: mustQuantity("4000Mi"),
+ "pods": mustQuantity("110"),
+ },
+ Allocatable: corev1.ResourceList{
+ corev1.ResourceCPU: mustQuantity("3500m"),
+ corev1.ResourceMemory: mustQuantity("3500Mi"),
+ "pods": mustQuantity("110"),
+ },
+ Conditions: []corev1.NodeCondition{
+ {Type: corev1.NodeReady, Status: corev1.ConditionTrue},
+ {Type: corev1.NodeMemoryPressure, Status: corev1.ConditionFalse},
+ {Type: corev1.NodeDiskPressure, Status: corev1.ConditionFalse},
+ {Type: corev1.NodePIDPressure, Status: corev1.ConditionFalse},
+ {Type: corev1.NodeNetworkUnavailable, Status: corev1.ConditionFalse},
+ },
+ },
+ }
+}
+
+func newPod(nodeName, name string) *corev1.Pod {
+ return &corev1.Pod{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: corev1.NamespaceDefault,
+ CreationTimestamp: metav1.Time{Time: time.Now()},
+ },
+ Spec: corev1.PodSpec{
+ NodeName: nodeName,
+ InitContainers: []corev1.Container{
+ {
+ Name: "init-container1",
+ Resources: corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: mustQuantity("50m"),
+ corev1.ResourceMemory: mustQuantity("50Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: mustQuantity("10m"),
+ corev1.ResourceMemory: mustQuantity("10Mi"),
+ },
+ },
+ },
+ },
+ Containers: []corev1.Container{
+ {
+ Name: "container1",
+ Resources: corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: mustQuantity("200m"),
+ corev1.ResourceMemory: mustQuantity("200Mi"),
+ },
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: mustQuantity("100m"),
+ corev1.ResourceMemory: mustQuantity("100Mi"),
+ },
+ },
+ },
+ {
+ Name: "container2",
+ Resources: corev1.ResourceRequirements{
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: mustQuantity("200m"),
+ corev1.ResourceMemory: mustQuantity("200Mi")},
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: mustQuantity("100m"),
+ corev1.ResourceMemory: mustQuantity("100Mi"),
+ },
+ },
+ },
+ },
+ },
+ Status: corev1.PodStatus{
+ Phase: corev1.PodRunning,
+ Conditions: []corev1.PodCondition{
+ {Type: corev1.PodReady, Status: corev1.ConditionTrue},
+ {Type: corev1.PodScheduled, Status: corev1.ConditionTrue},
+ {Type: corev1.PodInitialized, Status: corev1.ConditionTrue},
+ {Type: corev1.ContainersReady, Status: corev1.ConditionTrue},
+ },
+ InitContainerStatuses: []corev1.ContainerStatus{
+ {
+ Name: "init-container1",
+ State: corev1.ContainerState{Terminated: &corev1.ContainerStateTerminated{}},
+ },
+ },
+ ContainerStatuses: []corev1.ContainerStatus{
+ {
+ Name: "container1",
+ Ready: true,
+ State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}},
+ },
+ {
+ Name: "container2",
+ Ready: true,
+ State: corev1.ContainerState{Running: &corev1.ContainerStateRunning{}},
+ },
+ },
+ },
+ }
+}
+
+type brokenInfoKubeClient struct {
+ kubernetes.Interface
+}
+
+func (kc *brokenInfoKubeClient) Discovery() discovery.DiscoveryInterface {
+ return &brokenInfoDiscovery{kc.Interface.Discovery()}
+}
+
+type brokenInfoDiscovery struct {
+ discovery.DiscoveryInterface
+}
+
+func (d *brokenInfoDiscovery) ServerVersion() (*version.Info, error) {
+ return nil, errors.New("brokenInfoDiscovery.ServerVersion() error")
+}
+
+func calcObsoleteCharts(charts module.Charts) (num int) {
+ for _, c := range charts {
+ if c.Obsolete {
+ num++
+ }
+ }
+ return num
+}
+
+func mustQuantity(s string) apiresource.Quantity {
+ q, err := apiresource.ParseQuantity(s)
+ if err != nil {
+ panic(fmt.Sprintf("fail to create resource quantity: %v", err))
+ }
+ return q
+}
+
+func copyAge(dst, src map[string]int64) {
+ for k, v := range src {
+ if !strings.HasSuffix(k, "_age") {
+ continue
+ }
+ if _, ok := dst[k]; ok {
+ dst[k] = v
+ }
+ }
+}
+
+func isLabelValueSet(c *module.Chart, name string) bool {
+ for _, l := range c.Labels {
+ if l.Key == name {
+ return l.Value != ""
+ }
+ }
+ return false
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/metadata.yaml b/src/go/plugin/go.d/modules/k8s_state/metadata.yaml
new file mode 100644
index 000000000..7617b297f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/metadata.yaml
@@ -0,0 +1,356 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-k8s_state
+ plugin_name: go.d.plugin
+ module_name: k8s_state
+ monitored_instance:
+ name: Kubernetes Cluster State
+ link: https://kubernetes.io/
+ icon_filename: kubernetes.svg
+ categories:
+ - data-collection.kubernetes
+ keywords:
+ - kubernetes
+ - k8s
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Kubernetes Nodes, Pods and Containers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/k8s_state.conf
+ options:
+ description: ""
+ folding:
+ title: Config options
+ enabled: true
+ list: []
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: node
+ description: These metrics refer to the Node.
+ labels:
+ - name: k8s_cluster_id
+ description: Cluster ID. This is equal to the kube-system namespace UID.
+ - name: k8s_cluster_name
+ description: Cluster name. Cluster name discovery only works in GKE.
+ - name: k8s_node_name
+ description: Node name.
+ metrics:
+ - name: k8s_state.node_allocatable_cpu_requests_utilization
+ description: CPU requests utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: k8s_state.node_allocatable_cpu_requests_used
+ description: CPU requests used
+ unit: millicpu
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: k8s_state.node_allocatable_cpu_limits_utilization
+ description: CPU limits utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: limits
+ - name: k8s_state.node_allocatable_cpu_limits_used
+ description: CPU limits used
+ unit: millicpu
+ chart_type: line
+ dimensions:
+ - name: limits
+ - name: k8s_state.node_allocatable_mem_requests_utilization
+ description: Memory requests utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: k8s_state.node_allocatable_mem_requests_used
+ description: Memory requests used
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: k8s_state.node_allocatable_mem_limits_utilization
+ description: Memory limits utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: limits
+ - name: k8s_state.node_allocatable_mem_limits_used
+ description: Memory limits used
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: limits
+ - name: k8s_state.node_allocatable_pods_utilization
+ description: Pods resource utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: k8s_state.node_allocatable_pods_usage
+ description: Pods resource usage
+ unit: pods
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: allocated
+ - name: k8s_state.node_condition
+ description: Condition status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: a dimension per condition
+ - name: k8s_state.node_schedulability
+ description: Schedulability
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: schedulable
+ - name: unschedulable
+ - name: k8s_state.node_pods_readiness
+ description: Pods readiness
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: ready
+ - name: k8s_state.node_pods_readiness_state
+ description: Pods readiness state
+ unit: pods
+ chart_type: line
+ dimensions:
+ - name: ready
+ - name: unready
+ - name: k8s_state.node_pods_condition
+ description: Pods condition
+ unit: pods
+ chart_type: line
+ dimensions:
+ - name: pod_ready
+ - name: pod_scheduled
+ - name: pod_initialized
+ - name: containers_ready
+ - name: k8s_state.node_pods_phase
+ description: Pods phase
+ unit: pods
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: failed
+ - name: succeeded
+ - name: pending
+ - name: k8s_state.node_containers
+ description: Containers
+ unit: containers
+ chart_type: line
+ dimensions:
+ - name: containers
+ - name: init_containers
+ - name: k8s_state.node_containers_state
+ description: Containers state
+ unit: containers
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: waiting
+ - name: terminated
+ - name: k8s_state.node_init_containers_state
+ description: Init containers state
+ unit: containers
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: waiting
+ - name: terminated
+ - name: k8s_state.node_age
+ description: Age
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: age
+ - name: pod
+ description: These metrics refer to the Pod.
+ labels:
+ - name: k8s_cluster_id
+ description: Cluster ID. This is equal to the kube-system namespace UID.
+ - name: k8s_cluster_name
+ description: Cluster name. Cluster name discovery only works in GKE.
+ - name: k8s_node_name
+ description: Node name.
+ - name: k8s_namespace
+ description: Namespace.
+ - name: k8s_controller_kind
+ description: Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.).
+ - name: k8s_controller_name
+ description: Controller name.
+ - name: k8s_pod_name
+ description: Pod name.
+ - name: k8s_qos_class
+ description: Pod QOS class (burstable, guaranteed, besteffort).
+ metrics:
+ - name: k8s_state.pod_cpu_requests_used
+ description: CPU requests used
+ unit: millicpu
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: k8s_state.pod_cpu_limits_used
+ description: CPU limits used
+ unit: millicpu
+ chart_type: line
+ dimensions:
+ - name: limits
+ - name: k8s_state.pod_mem_requests_used
+ description: Memory requests used
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: k8s_state.pod_mem_limits_used
+ description: Memory limits used
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: limits
+ - name: k8s_state.pod_condition
+ description: Condition
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: pod_ready
+ - name: pod_scheduled
+ - name: pod_initialized
+ - name: containers_ready
+ - name: k8s_state.pod_phase
+ description: Phase
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: running
+ - name: failed
+ - name: succeeded
+ - name: pending
+ - name: k8s_state.pod_age
+ description: Age
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: age
+ - name: k8s_state.pod_containers
+ description: Containers
+ unit: containers
+ chart_type: line
+ dimensions:
+ - name: containers
+ - name: init_containers
+ - name: k8s_state.pod_containers_state
+ description: Containers state
+ unit: containers
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: waiting
+ - name: terminated
+ - name: k8s_state.pod_init_containers_state
+ description: Init containers state
+ unit: containers
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: waiting
+ - name: terminated
+ - name: container
+ description: These metrics refer to the Pod container.
+ labels:
+ - name: k8s_cluster_id
+ description: Cluster ID. This is equal to the kube-system namespace UID.
+ - name: k8s_cluster_name
+ description: Cluster name. Cluster name discovery only works in GKE.
+ - name: k8s_node_name
+ description: Node name.
+ - name: k8s_namespace
+ description: Namespace.
+ - name: k8s_controller_kind
+ description: Controller kind (ReplicaSet, DaemonSet, StatefulSet, Job, etc.).
+ - name: k8s_controller_name
+ description: Controller name.
+ - name: k8s_pod_name
+ description: Pod name.
+ - name: k8s_qos_class
+ description: Pod QOS class (burstable, guaranteed, besteffort).
+ - name: k8s_container_name
+ description: Container name.
+ metrics:
+ - name: k8s_state.pod_container_readiness_state
+ description: Readiness state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: ready
+ - name: k8s_state.pod_container_restarts
+ description: Restarts
+ unit: restarts
+ chart_type: line
+ dimensions:
+ - name: restarts
+ - name: k8s_state.pod_container_state
+ description: Container state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: running
+ - name: waiting
+ - name: terminated
+ - name: k8s_state.pod_container_waiting_state_reason
+ description: Container waiting state reason
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: a dimension per reason
+ - name: k8s_state.pod_container_terminated_state_reason
+ description: Container terminated state reason
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: a dimension per reason
diff --git a/src/go/plugin/go.d/modules/k8s_state/resource.go b/src/go/plugin/go.d/modules/k8s_state/resource.go
new file mode 100644
index 000000000..cabd41a67
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/resource.go
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+type resource interface {
+ source() string
+ kind() kubeResourceKind
+ value() interface{}
+}
+
+type kubeResourceKind uint8
+
+const (
+ kubeResourceNode kubeResourceKind = iota + 1
+ kubeResourcePod
+)
+
+func toNode(i interface{}) (*corev1.Node, error) {
+ switch v := i.(type) {
+ case *corev1.Node:
+ return v, nil
+ case resource:
+ return toNode(v.value())
+ default:
+ return nil, fmt.Errorf("unexpected type: %T (expected %T or %T)", v, &corev1.Node{}, resource(nil))
+ }
+}
+
+func toPod(i interface{}) (*corev1.Pod, error) {
+ switch v := i.(type) {
+ case *corev1.Pod:
+ return v, nil
+ case resource:
+ return toPod(v.value())
+ default:
+ return nil, fmt.Errorf("unexpected type: %T (expected %T or %T)", v, &corev1.Pod{}, resource(nil))
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/state.go b/src/go/plugin/go.d/modules/k8s_state/state.go
new file mode 100644
index 000000000..72bac88ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/state.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "sync"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+func newKubeState() *kubeState {
+ return &kubeState{
+ Mutex: &sync.Mutex{},
+ nodes: make(map[string]*nodeState),
+ pods: make(map[string]*podState),
+ }
+}
+
+func newNodeState() *nodeState {
+ return &nodeState{
+ new: true,
+ labels: make(map[string]string),
+ conditions: make(map[string]*nodeStateCondition),
+ }
+}
+
+func newPodState() *podState {
+ return &podState{
+ new: true,
+ labels: make(map[string]string),
+ initContainers: make(map[string]*containerState),
+ containers: make(map[string]*containerState),
+ }
+}
+
+func newContainerState() *containerState {
+ return &containerState{
+ new: true,
+ stateWaitingReasons: make(map[string]*containerStateReason),
+ stateTerminatedReasons: make(map[string]*containerStateReason),
+ }
+}
+
+type kubeState struct {
+ *sync.Mutex
+ nodes map[string]*nodeState
+ pods map[string]*podState
+}
+
+type (
+ nodeState struct {
+ new bool
+ deleted bool
+
+ name string
+ unSchedulable bool
+ labels map[string]string
+ creationTime time.Time
+ allocatableCPU int64
+ allocatableMem int64
+ allocatablePods int64
+ conditions map[string]*nodeStateCondition
+
+ stats nodeStateStats
+ }
+ nodeStateCondition struct {
+ new bool
+ // https://kubernetes.io/docs/concepts/architecture/nodes/#condition
+ //typ corev1.NodeConditionType
+ status corev1.ConditionStatus
+ }
+ nodeStateStats struct {
+ reqCPU int64
+ limitCPU int64
+ reqMem int64
+ limitMem int64
+ pods int64
+
+ podsCondPodReady int64
+ podsCondPodScheduled int64
+ podsCondPodInitialized int64
+ podsCondContainersReady int64
+
+ podsReadinessReady int64
+ podsReadinessUnready int64
+
+ podsPhaseRunning int64
+ podsPhaseFailed int64
+ podsPhaseSucceeded int64
+ podsPhasePending int64
+
+ containers int64
+ initContainers int64
+ initContStateRunning int64
+ initContStateWaiting int64
+ initContStateTerminated int64
+ contStateRunning int64
+ contStateWaiting int64
+ contStateTerminated int64
+ }
+)
+
+func (ns *nodeState) id() string { return ns.name }
+func (ns *nodeState) resetStats() { ns.stats = nodeStateStats{} }
+
+type (
+ podState struct {
+ new bool
+ deleted bool
+ unscheduled bool
+
+ name string
+ nodeName string
+ namespace string
+ uid string
+ labels map[string]string
+ controllerKind string
+ controllerName string
+ qosClass string
+ creationTime time.Time
+ reqCPU int64
+ reqMem int64
+ limitCPU int64
+ limitMem int64
+ // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-conditions
+ condPodScheduled corev1.ConditionStatus
+ condContainersReady corev1.ConditionStatus
+ condPodInitialized corev1.ConditionStatus
+ condPodReady corev1.ConditionStatus
+ // https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase
+ phase corev1.PodPhase
+
+ initContainers map[string]*containerState
+ containers map[string]*containerState
+ }
+)
+
+func (ps podState) id() string { return ps.namespace + "_" + ps.name }
+
+type (
+ containerState struct {
+ new bool
+
+ name string
+ uid string
+
+ podName string
+ nodeName string
+ namespace string
+
+ ready bool
+ restarts int64
+ stateRunning bool
+ stateWaiting bool
+ stateTerminated bool
+ stateWaitingReasons map[string]*containerStateReason
+ stateTerminatedReasons map[string]*containerStateReason
+ }
+ containerStateReason struct {
+ new bool
+ reason string
+ active bool
+ }
+)
diff --git a/src/go/plugin/go.d/modules/k8s_state/testdata/config.json b/src/go/plugin/go.d/modules/k8s_state/testdata/config.json
new file mode 100644
index 000000000..0e3f7c403
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/testdata/config.json
@@ -0,0 +1,3 @@
+{
+ "update_every": 123
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/testdata/config.yaml b/src/go/plugin/go.d/modules/k8s_state/testdata/config.yaml
new file mode 100644
index 000000000..f21a3a7a0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/testdata/config.yaml
@@ -0,0 +1 @@
+update_every: 123
diff --git a/src/go/plugin/go.d/modules/k8s_state/update_node_state.go b/src/go/plugin/go.d/modules/k8s_state/update_node_state.go
new file mode 100644
index 000000000..80f5c26c8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/update_node_state.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+func (ks *KubeState) updateNodeState(r resource) {
+ if r.value() == nil {
+ if ns, ok := ks.state.nodes[r.source()]; ok {
+ ns.deleted = true
+ }
+ return
+ }
+
+ node, err := toNode(r)
+ if err != nil {
+ ks.Warning(err)
+ return
+ }
+
+ if myNodeName != "" && node.Name != myNodeName {
+ return
+ }
+
+ ns, ok := ks.state.nodes[r.source()]
+ if !ok {
+ ns = newNodeState()
+ ks.state.nodes[r.source()] = ns
+ }
+
+ if !ok {
+ ns.name = node.Name
+ ns.creationTime = node.CreationTimestamp.Time
+ ns.allocatableCPU = int64(node.Status.Allocatable.Cpu().AsApproximateFloat64() * 1000)
+ ns.allocatableMem = node.Status.Allocatable.Memory().Value()
+ ns.allocatablePods = node.Status.Allocatable.Pods().Value()
+ copyLabels(ns.labels, node.Labels)
+ }
+
+ ns.unSchedulable = node.Spec.Unschedulable
+
+ for _, c := range node.Status.Conditions {
+ if v, ok := ns.conditions[string(c.Type)]; !ok {
+ ns.conditions[string(c.Type)] = &nodeStateCondition{new: true, status: c.Status}
+ } else {
+ v.status = c.Status
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/update_pod_state.go b/src/go/plugin/go.d/modules/k8s_state/update_pod_state.go
new file mode 100644
index 000000000..16b0f433b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/update_pod_state.go
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+import (
+ "strings"
+
+ corev1 "k8s.io/api/core/v1"
+)
+
+func (ks *KubeState) updatePodState(r resource) {
+ if r.value() == nil {
+ if ps, ok := ks.state.pods[r.source()]; ok {
+ ps.deleted = true
+ }
+ return
+ }
+
+ pod, err := toPod(r)
+ if err != nil {
+ ks.Warning(err)
+ return
+ }
+
+ ps, ok := ks.state.pods[r.source()]
+ if !ok {
+ ps = newPodState()
+ ks.state.pods[r.source()] = ps
+ }
+
+ if !ok {
+ ps.name = pod.Name
+ ps.nodeName = pod.Spec.NodeName
+ ps.namespace = pod.Namespace
+ ps.creationTime = pod.CreationTimestamp.Time
+ ps.uid = string(pod.UID)
+ ps.qosClass = strings.ToLower(string(pod.Status.QOSClass))
+ copyLabels(ps.labels, pod.Labels)
+ for _, ref := range pod.OwnerReferences {
+ if ref.Controller != nil && *ref.Controller {
+ ps.controllerKind = ref.Kind
+ ps.controllerName = ref.Name
+ }
+ }
+ var res struct{ rCPU, lCPU, rMem, lMem, irCPU, ilCPU, irMem, ilMem int64 }
+ for _, cntr := range pod.Spec.Containers {
+ res.rCPU += int64(cntr.Resources.Requests.Cpu().AsApproximateFloat64() * 1000)
+ res.lCPU += int64(cntr.Resources.Limits.Cpu().AsApproximateFloat64() * 1000)
+ res.rMem += cntr.Resources.Requests.Memory().Value()
+ res.lMem += cntr.Resources.Limits.Memory().Value()
+ }
+ for _, cntr := range pod.Spec.InitContainers {
+ res.irCPU += int64(cntr.Resources.Requests.Cpu().AsApproximateFloat64() * 1000)
+ res.ilCPU += int64(cntr.Resources.Limits.Cpu().AsApproximateFloat64() * 1000)
+ res.irMem += cntr.Resources.Requests.Memory().Value()
+ res.ilMem += cntr.Resources.Limits.Memory().Value()
+ }
+ ps.reqCPU = max(res.rCPU, res.irCPU)
+ ps.limitCPU = max(res.lCPU, res.ilCPU)
+ ps.reqMem = max(res.rMem, res.irMem)
+ ps.limitMem = max(res.lMem, res.ilMem)
+ }
+ if ps.nodeName == "" {
+ ps.nodeName = pod.Spec.NodeName
+ }
+
+ for _, c := range pod.Status.Conditions {
+ switch c.Type {
+ case corev1.ContainersReady:
+ ps.condContainersReady = c.Status
+ case corev1.PodInitialized:
+ ps.condPodInitialized = c.Status
+ case corev1.PodReady:
+ ps.condPodReady = c.Status
+ case corev1.PodScheduled:
+ ps.condPodScheduled = c.Status
+ }
+ }
+
+ ps.phase = pod.Status.Phase
+
+ for _, cs := range ps.containers {
+ for _, r := range cs.stateWaitingReasons {
+ r.active = false
+ }
+ for _, r := range cs.stateTerminatedReasons {
+ r.active = false
+ }
+ }
+
+ for _, cntr := range pod.Status.ContainerStatuses {
+ cs, ok := ps.containers[cntr.Name]
+ if !ok {
+ cs = newContainerState()
+ ps.containers[cntr.Name] = cs
+ }
+ if !ok {
+ cs.name = cntr.Name
+ cs.podName = pod.Name
+ cs.namespace = pod.Namespace
+ cs.nodeName = pod.Spec.NodeName
+ cs.uid = extractContainerID(cntr.ContainerID)
+ }
+ cs.ready = cntr.Ready
+ cs.restarts = int64(cntr.RestartCount)
+ cs.stateRunning = cntr.State.Running != nil
+ cs.stateWaiting = cntr.State.Waiting != nil
+ cs.stateTerminated = cntr.State.Terminated != nil
+
+ if cntr.State.Waiting != nil {
+ reason := cntr.State.Waiting.Reason
+ r, ok := cs.stateWaitingReasons[reason]
+ if !ok {
+ r = &containerStateReason{new: true, reason: reason}
+ cs.stateWaitingReasons[reason] = r
+ }
+ r.active = true
+ }
+
+ if cntr.State.Terminated != nil {
+ reason := cntr.State.Terminated.Reason
+ r, ok := cs.stateTerminatedReasons[reason]
+ if !ok {
+ r = &containerStateReason{new: true, reason: reason}
+ cs.stateTerminatedReasons[reason] = r
+ }
+ r.active = true
+ }
+ }
+
+ for _, cntr := range pod.Status.InitContainerStatuses {
+ cs, ok := ps.initContainers[cntr.Name]
+ if !ok {
+ cs = newContainerState()
+ ps.initContainers[cntr.Name] = cs
+ }
+ if !ok {
+ cs.name = cntr.Name
+ cs.podName = pod.Name
+ cs.namespace = pod.Namespace
+ cs.nodeName = pod.Spec.NodeName
+ cs.uid = extractContainerID(cntr.ContainerID)
+ }
+ cs.ready = cntr.Ready
+ cs.restarts = int64(cntr.RestartCount)
+ cs.stateRunning = cntr.State.Running != nil
+ cs.stateWaiting = cntr.State.Waiting != nil
+ cs.stateTerminated = cntr.State.Terminated != nil
+ }
+}
+
+func extractContainerID(id string) string {
+ // docker://d98...
+ if i := strings.LastIndexByte(id, '/'); i != -1 {
+ id = id[i+1:]
+ }
+ return id
+}
diff --git a/src/go/plugin/go.d/modules/k8s_state/update_state.go b/src/go/plugin/go.d/modules/k8s_state/update_state.go
new file mode 100644
index 000000000..88f3272c1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/k8s_state/update_state.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_state
+
+func (ks *KubeState) runUpdateState(in <-chan resource) {
+ for {
+ select {
+ case <-ks.ctx.Done():
+ return
+ case r := <-in:
+ ks.state.Lock()
+ switch r.kind() {
+ case kubeResourceNode:
+ ks.updateNodeState(r)
+ case kubeResourcePod:
+ ks.updatePodState(r)
+ }
+ ks.state.Unlock()
+ }
+ }
+}
+
+func copyLabels(dst, src map[string]string) {
+ for k, v := range src {
+ dst[k] = v
+ }
+}
diff --git a/src/go/plugin/go.d/modules/lighttpd/README.md b/src/go/plugin/go.d/modules/lighttpd/README.md
new file mode 120000
index 000000000..b0d3613bf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/README.md
@@ -0,0 +1 @@
+integrations/lighttpd.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/lighttpd/apiclient.go b/src/go/plugin/go.d/modules/lighttpd/apiclient.go
new file mode 100644
index 000000000..1686272cd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/apiclient.go
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lighttpd
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ busyWorkers = "BusyWorkers"
+ idleWorkers = "IdleWorkers"
+
+ busyServers = "BusyServers"
+ idleServers = "IdleServers"
+ totalAccesses = "Total Accesses"
+ totalkBytes = "Total kBytes"
+ uptime = "Uptime"
+ scoreBoard = "Scoreboard"
+)
+
+func newAPIClient(client *http.Client, request web.Request) *apiClient {
+ return &apiClient{httpClient: client, request: request}
+}
+
+type apiClient struct {
+ httpClient *http.Client
+ request web.Request
+}
+
+func (a apiClient) getServerStatus() (*serverStatus, error) {
+ req, err := web.NewHTTPRequest(a.request)
+
+ if err != nil {
+ return nil, fmt.Errorf("error on creating request : %v", err)
+ }
+
+ resp, err := a.doRequestOK(req)
+
+ defer closeBody(resp)
+
+ if err != nil {
+ return nil, err
+ }
+
+ status, err := parseResponse(resp.Body)
+
+ if err != nil {
+ return nil, fmt.Errorf("error on parsing response from %s : %v", req.URL, err)
+ }
+
+ return status, nil
+}
+
+func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) {
+ resp, err := a.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on request : %v", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
+ }
+ return resp, nil
+}
+
+func parseResponse(r io.Reader) (*serverStatus, error) {
+ s := bufio.NewScanner(r)
+ var status serverStatus
+
+ for s.Scan() {
+ parts := strings.Split(s.Text(), ":")
+ if len(parts) != 2 {
+ continue
+ }
+ key, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])
+
+ switch key {
+ default:
+ case busyWorkers, idleWorkers:
+ return nil, fmt.Errorf("found '%s', apache data", key)
+ case busyServers:
+ status.Servers.Busy = mustParseInt(value)
+ case idleServers:
+ status.Servers.Idle = mustParseInt(value)
+ case totalAccesses:
+ status.Total.Accesses = mustParseInt(value)
+ case totalkBytes:
+ status.Total.KBytes = mustParseInt(value)
+ case uptime:
+ status.Uptime = mustParseInt(value)
+ case scoreBoard:
+ status.Scoreboard = parseScoreboard(value)
+ }
+ }
+
+ return &status, nil
+}
+
+func parseScoreboard(value string) *scoreboard {
+ // Descriptions from https://blog.serverdensity.com/monitor-lighttpd/
+ //
+ // “.” = Opening the TCP connection (connect)
+ // “C” = Closing the TCP connection if no other HTTP request will use it (close)
+ // “E” = hard error
+ // “k” = Keeping the TCP connection open for more HTTP requests from the same client to avoid the TCP handling overhead (keep-alive)
+ // “r” = ReadAsMap the content of the HTTP request (read)
+ // “R” = ReadAsMap the content of the HTTP request (read-POST)
+ // “W” = Write the HTTP response to the socket (write)
+ // “h” = Decide action to take with the request (handle-request)
+ // “q” = Start of HTTP request (request-start)
+ // “Q” = End of HTTP request (request-end)
+ // “s” = Start of the HTTP request response (response-start)
+ // “S” = End of the HTTP request response (response-end)
+ // “_” Waiting for Connection (NOTE: not sure, copied the description from apache score board)
+
+ var sb scoreboard
+ for _, s := range strings.Split(value, "") {
+ switch s {
+ case "_":
+ sb.Waiting++
+ case ".":
+ sb.Open++
+ case "C":
+ sb.Close++
+ case "E":
+ sb.HardError++
+ case "k":
+ sb.KeepAlive++
+ case "r":
+ sb.Read++
+ case "R":
+ sb.ReadPost++
+ case "W":
+ sb.Write++
+ case "h":
+ sb.HandleRequest++
+ case "q":
+ sb.RequestStart++
+ case "Q":
+ sb.RequestEnd++
+ case "s":
+ sb.ResponseStart++
+ case "S":
+ sb.ResponseEnd++
+ }
+ }
+
+ return &sb
+}
+
+func mustParseInt(value string) *int64 {
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ return &v
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/lighttpd/charts.go b/src/go/plugin/go.d/modules/lighttpd/charts.go
new file mode 100644
index 000000000..4780384c8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/charts.go
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lighttpd
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+)
+
+var charts = Charts{
+ {
+ ID: "requests",
+ Title: "Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "lighttpd.requests",
+ Dims: Dims{
+ {ID: "total_accesses", Name: "requests", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "net",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "bandwidth",
+ Ctx: "lighttpd.net",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "total_kBytes", Name: "sent", Algo: module.Incremental, Mul: 8},
+ },
+ },
+ {
+ ID: "servers",
+ Title: "Servers",
+ Units: "servers",
+ Fam: "servers",
+ Ctx: "lighttpd.workers",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "idle_servers", Name: "idle"},
+ {ID: "busy_servers", Name: "busy"},
+ },
+ },
+ {
+ ID: "scoreboard",
+ Title: "ScoreBoard",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "lighttpd.scoreboard",
+ Dims: Dims{
+ {ID: "scoreboard_waiting", Name: "waiting"},
+ {ID: "scoreboard_open", Name: "open"},
+ {ID: "scoreboard_close", Name: "close"},
+ {ID: "scoreboard_hard_error", Name: "hard error"},
+ {ID: "scoreboard_keepalive", Name: "keepalive"},
+ {ID: "scoreboard_read", Name: "read"},
+ {ID: "scoreboard_read_post", Name: "read post"},
+ {ID: "scoreboard_write", Name: "write"},
+ {ID: "scoreboard_handle_request", Name: "handle request"},
+ {ID: "scoreboard_request_start", Name: "request start"},
+ {ID: "scoreboard_request_end", Name: "request end"},
+ {ID: "scoreboard_response_start", Name: "response start"},
+ {ID: "scoreboard_response_end", Name: "response end"},
+ },
+ },
+ {
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "lighttpd.uptime",
+ Dims: Dims{
+ {ID: "uptime"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/lighttpd/collect.go b/src/go/plugin/go.d/modules/lighttpd/collect.go
new file mode 100644
index 000000000..84c88af45
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/collect.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lighttpd
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func (l *Lighttpd) collect() (map[string]int64, error) {
+ status, err := l.apiClient.getServerStatus()
+
+ if err != nil {
+ return nil, err
+ }
+
+ mx := stm.ToMap(status)
+
+ if len(mx) == 0 {
+ return nil, fmt.Errorf("nothing was collected from %s", l.URL)
+ }
+
+ return mx, nil
+}
diff --git a/src/go/plugin/go.d/modules/lighttpd/config_schema.json b/src/go/plugin/go.d/modules/lighttpd/config_schema.json
new file mode 100644
index 000000000..32700b3b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Lighttpd collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Lighttpd machine readable [status page](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).",
+ "type": "string",
+ "default": "http://127.0.0.1/server-status?auto",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/lighttpd/init.go b/src/go/plugin/go.d/modules/lighttpd/init.go
new file mode 100644
index 000000000..0923262c3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/init.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lighttpd
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (l *Lighttpd) validateConfig() error {
+ if l.URL == "" {
+ return errors.New("url not set")
+ }
+ if !strings.HasSuffix(l.URL, "?auto") {
+ return fmt.Errorf("bad URL '%s', should ends in '?auto'", l.URL)
+ }
+ return nil
+}
+
+func (l *Lighttpd) initApiClient() (*apiClient, error) {
+ client, err := web.NewHTTPClient(l.Client)
+ if err != nil {
+ return nil, err
+ }
+ return newAPIClient(client, l.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/lighttpd/integrations/lighttpd.md b/src/go/plugin/go.d/modules/lighttpd/integrations/lighttpd.md
new file mode 100644
index 000000000..bcf434fc5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/integrations/lighttpd.md
@@ -0,0 +1,266 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/lighttpd/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/lighttpd/metadata.yaml"
+sidebar_label: "Lighttpd"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Lighttpd
+
+
+<img src="https://netdata.cloud/img/lighttpd.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: lighttpd
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.
+
+
+It sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status),
+which is a built-in location that provides metrics about the Lighttpd server.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Lighttpd instances running on localhost that are listening on port 80.
+On startup, it tries to collect metrics from:
+
+- http://localhost/server-status?auto
+- http://127.0.0.1/server-status?auto
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Lighttpd instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| lighttpd.requests | requests | requests/s |
+| lighttpd.net | sent | kilobits/s |
+| lighttpd.workers | idle, busy | servers |
+| lighttpd.scoreboard | waiting, open, close, hard_error, keepalive, read, read_post, write, handle_request, request_start, request_end | connections |
+| lighttpd.uptime | uptime | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable Lighttpd status support
+
+To enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/lighttpd.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/lighttpd.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1/server-status?auto | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Lighttpd with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1/server-status?auto
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+
+ - name: remote
+ url: http://192.0.2.1/server-status?auto
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `lighttpd` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m lighttpd
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `lighttpd` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep lighttpd
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep lighttpd /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep lighttpd
+```
+
+
diff --git a/src/go/plugin/go.d/modules/lighttpd/lighttpd.go b/src/go/plugin/go.d/modules/lighttpd/lighttpd.go
new file mode 100644
index 000000000..1b17833e9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/lighttpd.go
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lighttpd
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("lighttpd", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Lighttpd {
+ return &Lighttpd{Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1/server-status?auto",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ },
+ }}
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Lighttpd struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ apiClient *apiClient
+}
+
+func (l *Lighttpd) Configuration() any {
+ return l.Config
+}
+
+func (l *Lighttpd) Init() error {
+ if err := l.validateConfig(); err != nil {
+ l.Errorf("config validation: %v", err)
+ return err
+ }
+
+ client, err := l.initApiClient()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+ l.apiClient = client
+
+ l.Debugf("using URL %s", l.URL)
+ l.Debugf("using timeout: %s", l.Timeout.Duration())
+
+ return nil
+}
+
+func (l *Lighttpd) Check() error {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (l *Lighttpd) Charts() *Charts {
+ return charts.Copy()
+}
+
+func (l *Lighttpd) Collect() map[string]int64 {
+ mx, err := l.collect()
+
+ if err != nil {
+ l.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (l *Lighttpd) Cleanup() {
+ if l.apiClient != nil && l.apiClient.httpClient != nil {
+ l.apiClient.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/lighttpd/lighttpd_test.go b/src/go/plugin/go.d/modules/lighttpd/lighttpd_test.go
new file mode 100644
index 000000000..05c7504ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/lighttpd_test.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lighttpd
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatusMetrics, _ = os.ReadFile("testdata/status.txt")
+ dataApacheStatusMetrics, _ = os.ReadFile("testdata/apache-status.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestLighttpd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Lighttpd{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestLighttpd_Cleanup(t *testing.T) { New().Cleanup() }
+
+func TestLighttpd_Init(t *testing.T) {
+ job := New()
+
+ require.NoError(t, job.Init())
+ assert.NotNil(t, job.apiClient)
+}
+
+func TestLighttpd_InitNG(t *testing.T) {
+ job := New()
+
+ job.URL = ""
+ assert.Error(t, job.Init())
+}
+
+func TestLighttpd_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/server-status?auto"
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+}
+
+func TestLighttpd_CheckNG(t *testing.T) {
+ job := New()
+
+ job.URL = "http://127.0.0.1:38001/server-status?auto"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestLighttpd_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
+
+func TestLighttpd_Collect(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/server-status?auto"
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "scoreboard_waiting": 125,
+ "scoreboard_request_end": 0,
+ "busy_servers": 3,
+ "scoreboard_keepalive": 1,
+ "scoreboard_read": 1,
+ "scoreboard_request_start": 0,
+ "scoreboard_response_start": 0,
+ "scoreboard_close": 0,
+ "scoreboard_open": 0,
+ "scoreboard_hard_error": 0,
+ "scoreboard_handle_request": 1,
+ "idle_servers": 125,
+ "total_kBytes": 4,
+ "uptime": 11,
+ "scoreboard_read_post": 0,
+ "scoreboard_write": 0,
+ "scoreboard_response_end": 0,
+ "total_accesses": 12,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestLighttpd_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/server-status?auto"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestLighttpd_ApacheData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataApacheStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/server-status?auto"
+ require.NoError(t, job.Init())
+ require.Error(t, job.Check())
+}
+
+func TestLighttpd_404(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/server-status?auto"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
diff --git a/src/go/plugin/go.d/modules/lighttpd/metadata.yaml b/src/go/plugin/go.d/modules/lighttpd/metadata.yaml
new file mode 100644
index 000000000..a90ac05ed
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/metadata.yaml
@@ -0,0 +1,231 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-lighttpd
+ plugin_name: go.d.plugin
+ module_name: lighttpd
+ monitored_instance:
+ name: Lighttpd
+ link: https://www.lighttpd.net/
+ icon_filename: lighttpd.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - webserver
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: go.d.plugin
+ module_name: weblog
+ - plugin_name: go.d.plugin
+ module_name: httpcheck
+ - plugin_name: apps.plugin
+ module_name: apps
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the activity and performance of Lighttpd servers, and collects metrics such as the number of connections, workers, requests and more.
+ method_description: |
+ It sends HTTP requests to the Lighttpd location [server-status](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status),
+ which is a built-in location that provides metrics about the Lighttpd server.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Lighttpd instances running on localhost that are listening on port 80.
+ On startup, it tries to collect metrics from:
+
+ - http://localhost/server-status?auto
+ - http://127.0.0.1/server-status?auto
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable Lighttpd status support
+ description: |
+ To enable status support, see the [official documentation](https://redmine.lighttpd.net/projects/lighttpd/wiki/Mod_status).
+ configuration:
+ file:
+ name: go.d/lighttpd.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1/server-status?auto
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: Lighttpd with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1/server-status?auto
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+
+ - name: remote
+ url: http://192.0.2.1/server-status?auto
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: lighttpd.requests
+ description: Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: lighttpd.net
+ description: Bandwidth
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: sent
+ - name: lighttpd.workers
+ description: Servers
+ unit: servers
+ chart_type: stacked
+ dimensions:
+ - name: idle
+ - name: busy
+ - name: lighttpd.scoreboard
+ description: ScoreBoard
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: waiting
+ - name: open
+ - name: close
+ - name: hard_error
+ - name: keepalive
+ - name: read
+ - name: read_post
+ - name: write
+ - name: handle_request
+ - name: request_start
+ - name: request_end
+ - name: lighttpd.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
diff --git a/src/go/plugin/go.d/modules/lighttpd/metrics.go b/src/go/plugin/go.d/modules/lighttpd/metrics.go
new file mode 100644
index 000000000..6c39d2d06
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/metrics.go
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lighttpd
+
+type (
+ serverStatus struct {
+ Total struct {
+ Accesses *int64 `stm:"accesses"`
+ KBytes *int64 `stm:"kBytes"`
+ } `stm:"total"`
+ Servers struct {
+ Busy *int64 `stm:"busy_servers"`
+ Idle *int64 `stm:"idle_servers"`
+ } `stm:""`
+ Uptime *int64 `stm:"uptime"`
+ Scoreboard *scoreboard `stm:"scoreboard"`
+ }
+ scoreboard struct {
+ Waiting int64 `stm:"waiting"`
+ Open int64 `stm:"open"`
+ Close int64 `stm:"close"`
+ HardError int64 `stm:"hard_error"`
+ KeepAlive int64 `stm:"keepalive"`
+ Read int64 `stm:"read"`
+ ReadPost int64 `stm:"read_post"`
+ Write int64 `stm:"write"`
+ HandleRequest int64 `stm:"handle_request"`
+ RequestStart int64 `stm:"request_start"`
+ RequestEnd int64 `stm:"request_end"`
+ ResponseStart int64 `stm:"response_start"`
+ ResponseEnd int64 `stm:"response_end"`
+ }
+)
diff --git a/src/go/plugin/go.d/modules/lighttpd/testdata/apache-status.txt b/src/go/plugin/go.d/modules/lighttpd/testdata/apache-status.txt
new file mode 100644
index 000000000..136b69363
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/testdata/apache-status.txt
@@ -0,0 +1,39 @@
+127.0.0.1
+ServerVersion: Apache/2.4.37 (Unix)
+ServerMPM: event
+Server Built: Oct 23 2018 18:27:46
+CurrentTime: Sunday, 13-Jan-2019 20:39:30 MSK
+RestartTime: Sunday, 13-Jan-2019 20:35:13 MSK
+ParentServerConfigGeneration: 1
+ParentServerMPMGeneration: 0
+ServerUptimeSeconds: 256
+ServerUptime: 4 minutes 16 seconds
+Load1: 1.02
+Load5: 1.30
+Load15: 1.41
+Total Accesses: 9
+Total kBytes: 12
+Total Duration: 1
+CPUUser: 0
+CPUSystem: .01
+CPUChildrenUser: 0
+CPUChildrenSystem: 0
+CPULoad: .00390625
+Uptime: 256
+ReqPerSec: .0351563
+BytesPerSec: 48
+BytesPerReq: 1365.33
+DurationPerReq: .111111
+BusyWorkers: 1
+IdleWorkers: 99
+Processes: 4
+Stopping: 0
+BusyWorkers: 1
+IdleWorkers: 99
+ConnsTotal: 0
+ConnsAsyncWriting: 0
+ConnsAsyncKeepAlive: 0
+ConnsAsyncClosing: 0
+Scoreboard: ____________________________________________________________W_______________________________________............................................................................................................................................................................................................................................................................................................
+Using GnuTLS version: 3.6.5
+Built against GnuTLS version: 3.5.19 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/lighttpd/testdata/config.json b/src/go/plugin/go.d/modules/lighttpd/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/lighttpd/testdata/config.yaml b/src/go/plugin/go.d/modules/lighttpd/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/lighttpd/testdata/status.txt b/src/go/plugin/go.d/modules/lighttpd/testdata/status.txt
new file mode 100644
index 000000000..07d8e06e8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lighttpd/testdata/status.txt
@@ -0,0 +1,6 @@
+Total Accesses: 12
+Total kBytes: 4
+Uptime: 11
+BusyServers: 3
+IdleServers: 125
+Scoreboard: khr_____________________________________________________________________________________________________________________________ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/litespeed/README.md b/src/go/plugin/go.d/modules/litespeed/README.md
new file mode 120000
index 000000000..e7418b3dc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/README.md
@@ -0,0 +1 @@
+integrations/litespeed.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/litespeed/charts.go b/src/go/plugin/go.d/modules/litespeed/charts.go
new file mode 100644
index 000000000..b7309f287
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/charts.go
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package litespeed
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+const (
+ prioRequests = module.Priority + iota
+ prioRequestsProcessing
+ prioNetThroughputHttp
+ prioNetThroughputHttps
+ prioConnectionsHttp
+ prioConnectionsHttps
+ prioPublicCacheHits
+ prioPrivateCacheHits
+ prioStaticHits
+)
+
+var charts = module.Charts{
+ requestsChart.Copy(),
+ requestsProcessingChart.Copy(),
+
+ netThroughputHttpChart.Copy(),
+ netThroughputHttpsChart.Copy(),
+
+ connectionsHttpChart.Copy(),
+ connectionsHttpsChart.Copy(),
+
+ publicCacheHitsChart.Copy(),
+ privateCacheHitsChart.Copy(),
+ staticCacheHitsChart.Copy(),
+}
+
+var (
+ requestsChart = module.Chart{
+ ID: "requests",
+ Title: "Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "litespeed.requests",
+ Priority: prioRequests,
+ Dims: module.Dims{
+ {ID: "req_per_sec", Name: "requests", Div: precision},
+ },
+ }
+ requestsProcessingChart = module.Chart{
+ ID: "requests_processing",
+ Title: "Processing requests",
+ Units: "requests",
+ Fam: "requests",
+ Ctx: "litespeed.requests_processing",
+ Priority: prioRequestsProcessing,
+ Dims: module.Dims{
+ {ID: "req_processing", Name: "processing"},
+ },
+ }
+)
+
+var (
+ netThroughputHttpChart = module.Chart{
+ ID: "net_throughput_http",
+ Title: "HTTP throughput",
+ Units: "kilobits/s",
+ Fam: "throughput",
+ Ctx: "litespeed.net_throughput",
+ Type: module.Area,
+ Priority: prioNetThroughputHttp,
+ Dims: module.Dims{
+ {ID: "bps_in", Name: "in"},
+ {ID: "bps_out", Name: "out", Div: -1},
+ },
+ }
+ netThroughputHttpsChart = module.Chart{
+ ID: "net_throughput_https",
+ Title: "HTTPs throughput",
+ Units: "kilobits/s",
+ Fam: "throughput",
+ Ctx: "litespeed.net_ssl_throughput",
+ Type: module.Area,
+ Priority: prioNetThroughputHttps,
+ Dims: module.Dims{
+ {ID: "ssl_bps_in", Name: "in"},
+ {ID: "ssl_bps_out", Name: "out", Div: -1},
+ },
+ }
+)
+
+var (
+ connectionsHttpChart = module.Chart{
+ ID: "connections_http",
+ Title: "HTTP connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "litespeed.connections",
+ Type: module.Stacked,
+ Priority: prioConnectionsHttp,
+ Dims: module.Dims{
+ {ID: "availconn", Name: "free"},
+ {ID: "plainconn", Name: "used"},
+ },
+ }
+ connectionsHttpsChart = module.Chart{
+ ID: "connections_https",
+ Title: "HTTPs connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "litespeed.ssl_connections",
+ Type: module.Stacked,
+ Priority: prioConnectionsHttps,
+ Dims: module.Dims{
+ {ID: "availssl", Name: "free"},
+ {ID: "sslconn", Name: "used"},
+ },
+ }
+)
+
+var (
+ publicCacheHitsChart = module.Chart{
+ ID: "pub_cache_hits",
+ Title: "Public cache hits",
+ Units: "hits/s",
+ Fam: "cache",
+ Ctx: "litespeed.public_cache",
+ Priority: prioPublicCacheHits,
+ Dims: module.Dims{
+ {ID: "pub_cache_hits_per_sec", Name: "hits", Div: precision},
+ },
+ }
+ privateCacheHitsChart = module.Chart{
+ ID: "private_cache_hits",
+ Title: "Private cache hits",
+ Units: "hits/s",
+ Fam: "cache",
+ Ctx: "litespeed.private_cache",
+ Priority: prioPrivateCacheHits,
+ Dims: module.Dims{
+ {ID: "private_cache_hits_per_sec", Name: "hits", Div: precision},
+ },
+ }
+
+ staticCacheHitsChart = module.Chart{
+ ID: "static_hits",
+ Title: "Static hits",
+ Units: "hits/s",
+ Fam: "static",
+ Ctx: "litespeed.static",
+ Priority: prioStaticHits,
+ Dims: module.Dims{
+ {ID: "static_hits_per_sec", Name: "hits", Div: precision},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/litespeed/collect.go b/src/go/plugin/go.d/modules/litespeed/collect.go
new file mode 100644
index 000000000..a68cf119c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/collect.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package litespeed
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+const precision = 100
+
+func (l *Litespeed) collect() (map[string]int64, error) {
+ if l.checkDir {
+ _, err := os.Stat(l.ReportsDir)
+ if err != nil {
+ return nil, err
+ }
+ l.checkDir = false
+ }
+ reports, err := filepath.Glob(filepath.Join(l.ReportsDir, ".rtreport*"))
+ if err != nil {
+ return nil, err
+ }
+
+ l.Debugf("found %d reports: %v", len(reports), reports)
+
+ if len(reports) == 0 {
+ return nil, errors.New("no reports found")
+ }
+
+ mx := make(map[string]int64)
+
+ for _, report := range reports {
+ if err := l.collectReport(mx, report); err != nil {
+ return nil, err
+ }
+ }
+
+ return mx, nil
+}
+
+func (l *Litespeed) collectReport(mx map[string]int64, filename string) error {
+ bs, err := os.ReadFile(filename)
+ if err != nil {
+ return err
+ }
+
+ sc := bufio.NewScanner(bytes.NewReader(bs))
+
+ var valid bool
+
+ for sc.Scan() {
+ line := sc.Text()
+
+ switch {
+ default:
+ continue
+ case strings.HasPrefix(line, "BPS_IN:"):
+ case strings.HasPrefix(line, "PLAINCONN:"):
+ case strings.HasPrefix(line, "MAXCONN:"):
+ case strings.HasPrefix(line, "REQ_RATE []:"):
+ line = strings.TrimPrefix(line, "REQ_RATE []:")
+ }
+
+ parts := strings.Split(line, ",")
+
+ for _, part := range parts {
+ i := strings.IndexByte(part, ':')
+ if i == -1 {
+ l.Debugf("Skipping metric '%s': missing colon separator", part)
+ continue
+ }
+
+ metric, sVal := strings.TrimSpace(part[:i]), strings.TrimSpace(part[i+1:])
+
+ val, err := strconv.ParseFloat(sVal, 64)
+ if err != nil {
+ l.Debugf("Skipping metric '%s': invalid value", part)
+ continue
+ }
+
+ key := strings.ToLower(metric)
+
+ switch metric {
+ default:
+ continue
+ case "REQ_PER_SEC",
+ "PUB_CACHE_HITS_PER_SEC",
+ "PRIVATE_CACHE_HITS_PER_SEC",
+ "STATIC_HITS_PER_SEC":
+ mx[key] += int64(val * precision)
+ case "BPS_IN",
+ "BPS_OUT",
+ "SSL_BPS_IN",
+ "SSL_BPS_OUT":
+ mx[key] += int64(val) * 8
+ case "REQ_PROCESSING",
+ "PLAINCONN",
+ "AVAILCONN",
+ "SSLCONN",
+ "AVAILSSL":
+ mx[key] += int64(val)
+ }
+ valid = true
+
+ }
+ }
+
+ if !valid {
+ return errors.New("unexpected file: not a litespeed report")
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/litespeed/config_schema.json b/src/go/plugin/go.d/modules/litespeed/config_schema.json
new file mode 100644
index 000000000..2ec13468f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/config_schema.json
@@ -0,0 +1,37 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Litespeed collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "reports_dir": {
+ "title": "Reports directory",
+ "description": "Directory containing Litespeed's real-time statistics files (`.rtreport`)",
+ "type": "string",
+ "default": "/tmp/lshttpd/"
+ }
+ },
+ "required": [
+ "reports_dir"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "update_every": {
+ "ui:help": "Data is collected by reading Litespeed's report files, which are updated every 10 seconds. Setting the data collection interval to less than 10 seconds wouldn't provide more recent data."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/litespeed/integrations/litespeed.md b/src/go/plugin/go.d/modules/litespeed/integrations/litespeed.md
new file mode 100644
index 000000000..96858fdab
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/integrations/litespeed.md
@@ -0,0 +1,193 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/litespeed/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/litespeed/metadata.yaml"
+sidebar_label: "Litespeed"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Litespeed
+
+
+<img src="https://netdata.cloud/img/litespeed.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: litespeed
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Examine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery.
+
+The collector uses the statistics under /tmp/lshttpd to gather the metrics.
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is present, the collector will attempt to read files under /tmp/lshttpd/.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Litespeed instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| litespeed.requests | requests | requests/s |
+| litespeed.requests_processing | processing | requests |
+| litespeed.net_throughput | in, out | kilobits/s |
+| litespeed.net_ssl_throughput | in, out | kilobits/s |
+| litespeed.connections | free, used | conns |
+| litespeed.ssl_connections | free, used | conns |
+| litespeed.public_cache | hits | hits/s |
+| litespeed.private_cache | hits | hits/s |
+| litespeed.static | hits | hits/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/litespeed.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/litespeed.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| reports_dir | Directory containing Litespeed's real-time statistics files. | /tmp/lshttpd/ | no |
+
+</details>
+
+#### Examples
+
+##### Set the path to statistics
+
+Change the path for the litespeed stats files
+
+```yaml
+local:
+ name: 'local'
+ path: '/tmp/lshttpd'
+
+```
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `litespeed` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m litespeed
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `litespeed` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep litespeed
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep litespeed /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep litespeed
+```
+
+
diff --git a/src/go/plugin/go.d/modules/litespeed/litespeed.go b/src/go/plugin/go.d/modules/litespeed/litespeed.go
new file mode 100644
index 000000000..f57c0eed5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/litespeed.go
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package litespeed
+
+import (
+ _ "embed"
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("litespeed", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10, // The .rtreport files are generated per worker, and updated every 10 seconds.
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Litespeed {
+ return &Litespeed{
+ Config: Config{
+ //ReportsDir: "/tmp/lshttpd/",
+ ReportsDir: "/opt/litespeed",
+ },
+ checkDir: true,
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ ReportsDir string `yaml:"reports_dir" json:"reports_dir"`
+}
+
+type Litespeed struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ checkDir bool
+
+ charts *module.Charts
+}
+
+func (l *Litespeed) Configuration() any {
+ return l.Config
+}
+
+func (l *Litespeed) Init() error {
+ if l.ReportsDir == "" {
+ return errors.New("reports_dir is required")
+ }
+ return nil
+}
+
+func (l *Litespeed) Check() error {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (l *Litespeed) Charts() *module.Charts {
+ return l.charts
+}
+
+func (l *Litespeed) Collect() map[string]int64 {
+ mx, err := l.collect()
+
+ if err != nil {
+ l.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (l *Litespeed) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/litespeed/litespeed_test.go b/src/go/plugin/go.d/modules/litespeed/litespeed_test.go
new file mode 100644
index 000000000..576609dca
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/litespeed_test.go
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package litespeed
+
+import (
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestLitespeed_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Litespeed{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestLitespeed_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if reports_dir not set": {
+ wantFail: true,
+ config: Config{
+ ReportsDir: "",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ lite := New()
+ lite.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, lite.Init())
+ } else {
+ assert.NoError(t, lite.Init())
+ }
+ })
+ }
+}
+
+func TestLitespeed_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestLitespeed_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareLitespeed func() *Litespeed
+ wantFail bool
+ }{
+ "success": {
+ wantFail: false,
+ prepareLitespeed: prepareLitespeedOk,
+ },
+ "fails if reports dir not exist": {
+ wantFail: true,
+ prepareLitespeed: prepareLitespeedDirNotExist,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ lite := test.prepareLitespeed()
+
+ if test.wantFail {
+ assert.Error(t, lite.Check())
+ } else {
+ assert.NoError(t, lite.Check())
+ }
+ })
+ }
+}
+
+func TestLitespeed_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareLitespeed func() *Litespeed
+ wantMetrics map[string]int64
+ }{
+ "success": {
+ prepareLitespeed: prepareLitespeedOk,
+ wantMetrics: map[string]int64{
+ "availconn": 3804,
+ "availssl": 3814,
+ "bps_in": 0,
+ "bps_out": 240,
+ "plainconn": 10,
+ "private_cache_hits_per_sec": 0,
+ "pub_cache_hits_per_sec": 0,
+ "req_per_sec": 1560,
+ "req_processing": 168,
+ "ssl_bps_in": 16,
+ "ssl_bps_out": 3120,
+ "sslconn": 186,
+ "static_hits_per_sec": 760,
+ },
+ },
+ "fails if reports dir not exist": {
+ prepareLitespeed: prepareLitespeedDirNotExist,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ lite := test.prepareLitespeed()
+
+ mx := lite.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ testMetricsHasAllChartsDims(t, lite, mx)
+ }
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, lite *Litespeed, mx map[string]int64) {
+ for _, chart := range *lite.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareLitespeedOk() *Litespeed {
+ lite := New()
+ lite.ReportsDir = "testdata"
+ return lite
+}
+
+func prepareLitespeedDirNotExist() *Litespeed {
+ lite := prepareLitespeedOk()
+ lite.ReportsDir += "!"
+ return lite
+}
diff --git a/src/go/plugin/go.d/modules/litespeed/metadata.yaml b/src/go/plugin/go.d/modules/litespeed/metadata.yaml
new file mode 100644
index 000000000..1c7957532
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/metadata.yaml
@@ -0,0 +1,148 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-litespeed
+ plugin_name: go.d.plugin
+ module_name: litespeed
+ monitored_instance:
+ name: Litespeed
+ link: "https://www.litespeedtech.com/products/litespeed-web-server"
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: "litespeed.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - litespeed
+ - web
+ - server
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Examine Litespeed metrics for insights into web server operations. Analyze request rates, response times, and error rates for efficient web service delivery."
+ method_description: "The collector uses the statistics under /tmp/lshttpd to gather the metrics."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: "If no configuration is present, the collector will attempt to read files under /tmp/lshttpd/."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/litespeed.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: autodetection_retry
+ description: Sets the job re-check interval in seconds.
+ default_value: 0
+ required: false
+ - name: reports_dir
+ description: Directory containing Litespeed's real-time statistics files.
+ default_value: "/tmp/lshttpd/"
+ required: false
+ examples:
+ folding:
+ enabled: false
+ title: "Config"
+ list:
+ - name: Set the path to statistics
+ description: Change the path for the litespeed stats files
+ config: |
+ local:
+ name: 'local'
+ path: '/tmp/lshttpd'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: litespeed.requests
+ description: Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: litespeed.requests_processing
+ description: Requests In Processing
+ unit: "requests"
+ chart_type: line
+ dimensions:
+ - name: processing
+ - name: litespeed.net_throughput
+ description: Network Throughput HTTP
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: litespeed.net_ssl_throughput
+ description: Network Throughput HTTPS
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: litespeed.connections
+ description: Connections HTTP
+ unit: "conns"
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: litespeed.ssl_connections
+ description: Connections HTTPS
+ unit: "conns"
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: litespeed.public_cache
+ description: Public Cache Hits
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: litespeed.private_cache
+ description: Private Cache Hits
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: litespeed.static
+ description: Static Hits
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hits
diff --git a/src/go/plugin/go.d/modules/litespeed/testdata/.rtreport b/src/go/plugin/go.d/modules/litespeed/testdata/.rtreport
new file mode 100644
index 000000000..e262cf3cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/testdata/.rtreport
@@ -0,0 +1,8 @@
+VERSION: LiteSpeed Web Server/Enterprise/5.2.7
+UPTIME: 02:31:56
+BPS_IN: 0, BPS_OUT: 15, SSL_BPS_IN: 1, SSL_BPS_OUT: 195
+MAXCONN: 2000, MAXSSL_CONN: 2000, PLAINCONN: 5, AVAILCONN: 1902, IDLECONN: 14, SSLCONN: 93, AVAILSSL: 1907
+REQ_RATE []: REQ_PROCESSING: 84, REQ_PER_SEC: 7.8, TOT_REQS: 92899, PUB_CACHE_HITS_PER_SEC: 0.0, TOTAL_PUB_CACHE_HITS: 9, PRIVATE_CACHE_HITS_PER_SEC: 0.0, TOTAL_PRIVATE_CACHE_HITS: 0, STATIC_HITS_PER_SEC: 3.8, TOTAL_STATIC_HITS: 53244
+REQ_RATE [APVH_149.202.xxx.xxxx:443_example.com]: REQ_PROCESSING: 0, REQ_PER_SEC: 0.0, TOT_REQS: 0, PUB_CACHE_HITS_PER_SEC: 0.0, TOTAL_PUB_CACHE_HITS: 0, PRIVATE_CACHE_HITS_PER_SEC: 0.0, TOTAL_PRIVATE_CACHE_HITS: 0, STATIC_HITS_PER_SEC: 0.0, TOTAL_STATIC_HITS: 0
+EXTAPP [CGI] [] [lscgid]: CMAXCONN: 200, EMAXCONN: 200, POOL_SIZE: 1, INUSE_CONN: 0, IDLE_CONN: 1, WAITQUE_DEPTH: 0, REQ_PER_SEC: 0.0, TOT_REQS: 41
+EXTAPP [Proxy] [] [http://127.0.0.1]: CMAXCONN: 10, EMAXCONN: 10, POOL_SIZE: 1, INUSE_CONN: 0, IDLE_CONN: 1, WAITQUE_DEPTH: 0, REQ_PER_SEC: 0.0, TOT_REQS: 20
diff --git a/src/go/plugin/go.d/modules/litespeed/testdata/.rtreport.2 b/src/go/plugin/go.d/modules/litespeed/testdata/.rtreport.2
new file mode 100644
index 000000000..e262cf3cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/testdata/.rtreport.2
@@ -0,0 +1,8 @@
+VERSION: LiteSpeed Web Server/Enterprise/5.2.7
+UPTIME: 02:31:56
+BPS_IN: 0, BPS_OUT: 15, SSL_BPS_IN: 1, SSL_BPS_OUT: 195
+MAXCONN: 2000, MAXSSL_CONN: 2000, PLAINCONN: 5, AVAILCONN: 1902, IDLECONN: 14, SSLCONN: 93, AVAILSSL: 1907
+REQ_RATE []: REQ_PROCESSING: 84, REQ_PER_SEC: 7.8, TOT_REQS: 92899, PUB_CACHE_HITS_PER_SEC: 0.0, TOTAL_PUB_CACHE_HITS: 9, PRIVATE_CACHE_HITS_PER_SEC: 0.0, TOTAL_PRIVATE_CACHE_HITS: 0, STATIC_HITS_PER_SEC: 3.8, TOTAL_STATIC_HITS: 53244
+REQ_RATE [APVH_149.202.xxx.xxxx:443_example.com]: REQ_PROCESSING: 0, REQ_PER_SEC: 0.0, TOT_REQS: 0, PUB_CACHE_HITS_PER_SEC: 0.0, TOTAL_PUB_CACHE_HITS: 0, PRIVATE_CACHE_HITS_PER_SEC: 0.0, TOTAL_PRIVATE_CACHE_HITS: 0, STATIC_HITS_PER_SEC: 0.0, TOTAL_STATIC_HITS: 0
+EXTAPP [CGI] [] [lscgid]: CMAXCONN: 200, EMAXCONN: 200, POOL_SIZE: 1, INUSE_CONN: 0, IDLE_CONN: 1, WAITQUE_DEPTH: 0, REQ_PER_SEC: 0.0, TOT_REQS: 41
+EXTAPP [Proxy] [] [http://127.0.0.1]: CMAXCONN: 10, EMAXCONN: 10, POOL_SIZE: 1, INUSE_CONN: 0, IDLE_CONN: 1, WAITQUE_DEPTH: 0, REQ_PER_SEC: 0.0, TOT_REQS: 20
diff --git a/src/go/plugin/go.d/modules/litespeed/testdata/config.json b/src/go/plugin/go.d/modules/litespeed/testdata/config.json
new file mode 100644
index 000000000..309245495
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "reports_dir": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/litespeed/testdata/config.yaml b/src/go/plugin/go.d/modules/litespeed/testdata/config.yaml
new file mode 100644
index 000000000..03905e5ce
--- /dev/null
+++ b/src/go/plugin/go.d/modules/litespeed/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+reports_dir: "ok"
diff --git a/src/go/plugin/go.d/modules/logind/README.md b/src/go/plugin/go.d/modules/logind/README.md
new file mode 120000
index 000000000..22c20d705
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/README.md
@@ -0,0 +1 @@
+integrations/systemd-logind_users.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/logind/charts.go b/src/go/plugin/go.d/modules/logind/charts.go
new file mode 100644
index 000000000..61fa0490c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/charts.go
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package logind
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+const (
+ prioSessions = module.Priority + iota
+ prioSessionsType
+ prioSessionsState
+ prioUsersState
+)
+
+var charts = module.Charts{
+ sessionsChart.Copy(),
+ sessionsTypeChart.Copy(),
+ sessionsStateChart.Copy(),
+ usersStateChart.Copy(),
+}
+
+var sessionsChart = module.Chart{
+ ID: "sessions",
+ Title: "Logind Sessions",
+ Units: "sessions",
+ Fam: "sessions",
+ Ctx: "logind.sessions",
+ Priority: prioSessions,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "sessions_remote", Name: "remote"},
+ {ID: "sessions_local", Name: "local"},
+ },
+}
+
+var sessionsTypeChart = module.Chart{
+ ID: "sessions_type",
+ Title: "Logind Sessions By Type",
+ Units: "sessions",
+ Fam: "sessions",
+ Ctx: "logind.sessions_type",
+ Priority: prioSessionsType,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "sessions_type_console", Name: "console"},
+ {ID: "sessions_type_graphical", Name: "graphical"},
+ {ID: "sessions_type_other", Name: "other"},
+ },
+}
+
+var sessionsStateChart = module.Chart{
+ ID: "sessions_state",
+ Title: "Logind Sessions By State",
+ Units: "sessions",
+ Fam: "sessions",
+ Ctx: "logind.sessions_state",
+ Priority: prioSessionsState,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "sessions_state_online", Name: "online"},
+ {ID: "sessions_state_closing", Name: "closing"},
+ {ID: "sessions_state_active", Name: "active"},
+ },
+}
+
+var usersStateChart = module.Chart{
+ ID: "users_state",
+ Title: "Logind Users By State",
+ Units: "users",
+ Fam: "users",
+ Ctx: "logind.users_state",
+ Priority: prioUsersState,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "users_state_offline", Name: "offline"},
+ {ID: "users_state_closing", Name: "closing"},
+ {ID: "users_state_online", Name: "online"},
+ {ID: "users_state_lingering", Name: "lingering"},
+ {ID: "users_state_active", Name: "active"},
+ },
+}
diff --git a/src/go/plugin/go.d/modules/logind/collect.go b/src/go/plugin/go.d/modules/logind/collect.go
new file mode 100644
index 000000000..1f22478b1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/collect.go
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package logind
+
+import (
+ "strings"
+)
+
+func (l *Logind) collect() (map[string]int64, error) {
+ if l.conn == nil {
+ conn, err := l.newLogindConn(l.Config)
+ if err != nil {
+ return nil, err
+ }
+ l.conn = conn
+ }
+
+ mx := make(map[string]int64)
+
+ // https://www.freedesktop.org/wiki/Software/systemd/logind/ (Session Objects)
+ if err := l.collectSessions(mx); err != nil {
+ return nil, err
+ }
+ // https://www.freedesktop.org/wiki/Software/systemd/logind/ (User Objects)
+ if err := l.collectUsers(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (l *Logind) collectSessions(mx map[string]int64) error {
+ sessions, err := l.conn.ListSessions()
+ if err != nil {
+ return err
+ }
+
+ mx["sessions_remote"] = 0
+ mx["sessions_local"] = 0
+ mx["sessions_type_graphical"] = 0
+ mx["sessions_type_console"] = 0
+ mx["sessions_type_other"] = 0
+ mx["sessions_state_online"] = 0
+ mx["sessions_state_active"] = 0
+ mx["sessions_state_closing"] = 0
+
+ for _, session := range sessions {
+ props, err := l.conn.GetSessionProperties(session.Path)
+ if err != nil {
+ return err
+ }
+
+ if v, ok := props["Remote"]; ok && v.String() == "true" {
+ mx["sessions_remote"]++
+ } else {
+ mx["sessions_local"]++
+ }
+
+ if v, ok := props["Type"]; ok {
+ typ := strings.Trim(v.String(), "\"")
+ switch typ {
+ case "x11", "mir", "wayland":
+ mx["sessions_type_graphical"]++
+ case "tty":
+ mx["sessions_type_console"]++
+ case "unspecified":
+ mx["sessions_type_other"]++
+ default:
+ l.Debugf("unknown session type '%s' for session '%s/%s'", typ, session.User, session.ID)
+ mx["sessions_type_other"]++
+ }
+ }
+
+ if v, ok := props["State"]; ok {
+ state := strings.Trim(v.String(), "\"")
+ switch state {
+ case "online":
+ mx["sessions_state_online"]++
+ case "active":
+ mx["sessions_state_active"]++
+ case "closing":
+ mx["sessions_state_closing"]++
+ default:
+ l.Debugf("unknown session state '%s' for session '%s/%s'", state, session.User, session.ID)
+ }
+ }
+ }
+ return nil
+}
+
+func (l *Logind) collectUsers(mx map[string]int64) error {
+ users, err := l.conn.ListUsers()
+ if err != nil {
+ return err
+ }
+
+ // https://www.freedesktop.org/software/systemd/man/sd_uid_get_state.html
+ mx["users_state_offline"] = 0
+ mx["users_state_lingering"] = 0
+ mx["users_state_online"] = 0
+ mx["users_state_active"] = 0
+ mx["users_state_closing"] = 0
+
+ for _, user := range users {
+ v, err := l.conn.GetUserProperty(user.Path, "State")
+ if err != nil {
+ return err
+ }
+
+ state := strings.Trim(v.String(), "\"")
+ switch state {
+ case "offline":
+ mx["users_state_offline"]++
+ case "lingering":
+ mx["users_state_lingering"]++
+ case "online":
+ mx["users_state_online"]++
+ case "active":
+ mx["users_state_active"]++
+ case "closing":
+ mx["users_state_closing"]++
+ default:
+ l.Debugf("unknown user state '%s' for user '%s/%d'", state, user.Name, user.UID)
+ }
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/logind/config_schema.json b/src/go/plugin/go.d/modules/logind/config_schema.json
new file mode 100644
index 000000000..0a8618538
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Logind collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for a connection to systemds dbus endpoint.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logind/connection.go b/src/go/plugin/go.d/modules/logind/connection.go
new file mode 100644
index 000000000..b97387acf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/connection.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package logind
+
+import (
+ "context"
+ "time"
+
+ "github.com/coreos/go-systemd/v22/login1"
+ "github.com/godbus/dbus/v5"
+)
+
+type logindConnection interface {
+ Close()
+
+ ListSessions() ([]login1.Session, error)
+ GetSessionProperties(dbus.ObjectPath) (map[string]dbus.Variant, error)
+
+ ListUsers() ([]login1.User, error)
+ GetUserProperty(dbus.ObjectPath, string) (*dbus.Variant, error)
+}
+
+func newLogindConnection(timeout time.Duration) (logindConnection, error) {
+ conn, err := login1.New()
+ if err != nil {
+ return nil, err
+ }
+ return &logindDBusConnection{
+ conn: conn,
+ timeout: timeout,
+ }, nil
+}
+
+type logindDBusConnection struct {
+ conn *login1.Conn
+ timeout time.Duration
+}
+
+func (c *logindDBusConnection) Close() {
+ if c.conn != nil {
+ c.conn.Close()
+ c.conn = nil
+ }
+}
+
+func (c *logindDBusConnection) ListSessions() ([]login1.Session, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ return c.conn.ListSessionsContext(ctx)
+}
+
+func (c *logindDBusConnection) ListUsers() ([]login1.User, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ return c.conn.ListUsersContext(ctx)
+}
+
+func (c *logindDBusConnection) GetSessionProperties(path dbus.ObjectPath) (map[string]dbus.Variant, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ return c.conn.GetSessionPropertiesContext(ctx, path)
+}
+
+func (c *logindDBusConnection) GetUserProperty(path dbus.ObjectPath, property string) (*dbus.Variant, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ return c.conn.GetUserPropertyContext(ctx, path, property)
+}
diff --git a/src/go/plugin/go.d/modules/logind/doc.go b/src/go/plugin/go.d/modules/logind/doc.go
new file mode 100644
index 000000000..90aa8b4ef
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/doc.go
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logind
diff --git a/src/go/plugin/go.d/modules/logind/integrations/systemd-logind_users.md b/src/go/plugin/go.d/modules/logind/integrations/systemd-logind_users.md
new file mode 100644
index 000000000..3450ff669
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/integrations/systemd-logind_users.md
@@ -0,0 +1,170 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/logind/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/logind/metadata.yaml"
+sidebar_label: "systemd-logind users"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Systemd"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# systemd-logind users
+
+
+<img src="https://netdata.cloud/img/users.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: logind
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per systemd-logind users instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| logind.sessions | remote, local | sessions |
+| logind.sessions_type | console, graphical, other | sessions |
+| logind.sessions_state | online, closing, active | sessions |
+| logind.users_state | offline, closing, online, lingering, active | users |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/logind.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/logind.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+#### Examples
+There are no configuration examples.
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `logind` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m logind
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `logind` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep logind
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep logind /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep logind
+```
+
+
diff --git a/src/go/plugin/go.d/modules/logind/logind.go b/src/go/plugin/go.d/modules/logind/logind.go
new file mode 100644
index 000000000..ff2866349
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/logind.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package logind
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("logind", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ Priority: 59999, // copied from the python collector
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Logind {
+ return &Logind{
+ Config: Config{
+ Timeout: web.Duration(time.Second),
+ },
+ newLogindConn: func(cfg Config) (logindConnection, error) {
+ return newLogindConnection(cfg.Timeout.Duration())
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type Logind struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ conn logindConnection
+ newLogindConn func(config Config) (logindConnection, error)
+}
+
+func (l *Logind) Configuration() any {
+ return l.Config
+}
+
+func (l *Logind) Init() error {
+ return nil
+}
+
+func (l *Logind) Check() error {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (l *Logind) Charts() *module.Charts {
+ return l.charts
+}
+
+func (l *Logind) Collect() map[string]int64 {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (l *Logind) Cleanup() {
+ if l.conn != nil {
+ l.conn.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logind/logind_test.go b/src/go/plugin/go.d/modules/logind/logind_test.go
new file mode 100644
index 000000000..21cbba871
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/logind_test.go
@@ -0,0 +1,350 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package logind
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/coreos/go-systemd/v22/login1"
+ "github.com/godbus/dbus/v5"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestLogind_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Logind{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestLogind_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ l := New()
+ l.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, l.Init())
+ } else {
+ assert.NoError(t, l.Init())
+ }
+ })
+ }
+}
+
+func TestLogind_Charts(t *testing.T) {
+ assert.Equal(t, len(charts), len(*New().Charts()))
+}
+
+func TestLogind_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ wantClose bool
+ prepare func(l *Logind)
+ }{
+ "after New": {
+ wantClose: false,
+ prepare: func(l *Logind) {},
+ },
+ "after Init": {
+ wantClose: false,
+ prepare: func(l *Logind) { _ = l.Init() },
+ },
+ "after Check": {
+ wantClose: true,
+ prepare: func(l *Logind) { _ = l.Init(); _ = l.Check() },
+ },
+ "after Collect": {
+ wantClose: true,
+ prepare: func(l *Logind) { _ = l.Init(); l.Collect() },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ l := New()
+ m := prepareConnOK()
+ l.newLogindConn = func(Config) (logindConnection, error) { return m, nil }
+ test.prepare(l)
+
+ require.NotPanics(t, l.Cleanup)
+
+ if test.wantClose {
+ assert.True(t, m.closeCalled)
+ } else {
+ assert.False(t, m.closeCalled)
+ }
+ })
+ }
+}
+
+func TestLogind_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func() *mockConn
+ }{
+ "success when response contains sessions and users": {
+ wantFail: false,
+ prepare: prepareConnOK,
+ },
+ "success when response does not contain sessions and users": {
+ wantFail: false,
+ prepare: prepareConnOKNoSessionsNoUsers,
+ },
+ "fail when error on list sessions": {
+ wantFail: true,
+ prepare: prepareConnErrOnListSessions,
+ },
+ "fail when error on get session properties": {
+ wantFail: true,
+ prepare: prepareConnErrOnGetSessionProperties,
+ },
+ "fail when error on list users": {
+ wantFail: true,
+ prepare: prepareConnErrOnListUsers,
+ },
+ "fail when error on get user property": {
+ wantFail: true,
+ prepare: prepareConnErrOnGetUserProperty,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ l := New()
+ require.NoError(t, l.Init())
+ l.conn = test.prepare()
+
+ if test.wantFail {
+ assert.Error(t, l.Check())
+ } else {
+ assert.NoError(t, l.Check())
+ }
+ })
+ }
+}
+
+func TestLogind_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *mockConn
+ expected map[string]int64
+ }{
+ "success when response contains sessions and users": {
+ prepare: prepareConnOK,
+ expected: map[string]int64{
+ "sessions_local": 3,
+ "sessions_remote": 0,
+ "sessions_state_active": 0,
+ "sessions_state_closing": 0,
+ "sessions_state_online": 3,
+ "sessions_type_console": 3,
+ "sessions_type_graphical": 0,
+ "sessions_type_other": 0,
+ "users_state_active": 3,
+ "users_state_closing": 0,
+ "users_state_lingering": 0,
+ "users_state_offline": 0,
+ "users_state_online": 0,
+ },
+ },
+ "success when response does not contain sessions and users": {
+ prepare: prepareConnOKNoSessionsNoUsers,
+ expected: map[string]int64{
+ "sessions_local": 0,
+ "sessions_remote": 0,
+ "sessions_state_active": 0,
+ "sessions_state_closing": 0,
+ "sessions_state_online": 0,
+ "sessions_type_console": 0,
+ "sessions_type_graphical": 0,
+ "sessions_type_other": 0,
+ "users_state_active": 0,
+ "users_state_closing": 0,
+ "users_state_lingering": 0,
+ "users_state_offline": 0,
+ "users_state_online": 0,
+ },
+ },
+ "fail when error on list sessions": {
+ prepare: prepareConnErrOnListSessions,
+ expected: map[string]int64(nil),
+ },
+ "fail when error on get session properties": {
+ prepare: prepareConnErrOnGetSessionProperties,
+ expected: map[string]int64(nil),
+ },
+ "fail when error on list users": {
+ prepare: prepareConnErrOnListUsers,
+ expected: map[string]int64(nil),
+ },
+ "fail when error on get user property": {
+ prepare: prepareConnErrOnGetUserProperty,
+ expected: map[string]int64(nil),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ l := New()
+ require.NoError(t, l.Init())
+ l.conn = test.prepare()
+
+ mx := l.Collect()
+
+ assert.Equal(t, test.expected, mx)
+ })
+ }
+}
+
+func prepareConnOK() *mockConn {
+ return &mockConn{
+ sessions: []login1.Session{
+ {Path: "/org/freedesktop/login1/session/_3156", User: "user1", ID: "123"},
+ {Path: "/org/freedesktop/login1/session/_3157", User: "user2", ID: "124"},
+ {Path: "/org/freedesktop/login1/session/_3158", User: "user3", ID: "125"},
+ },
+ users: []login1.User{
+ {Path: "/org/freedesktop/login1/user/_1000", Name: "user1", UID: 123},
+ {Path: "/org/freedesktop/login1/user/_1001", Name: "user2", UID: 124},
+ {Path: "/org/freedesktop/login1/user/_1002", Name: "user3", UID: 125},
+ },
+ errOnListSessions: false,
+ errOnGetSessionProperties: false,
+ errOnListUsers: false,
+ errOnGetUserProperty: false,
+ closeCalled: false,
+ }
+}
+
+func prepareConnOKNoSessionsNoUsers() *mockConn {
+ conn := prepareConnOK()
+ conn.sessions = nil
+ conn.users = nil
+ return conn
+}
+
+func prepareConnErrOnListSessions() *mockConn {
+ conn := prepareConnOK()
+ conn.errOnListSessions = true
+ return conn
+}
+
+func prepareConnErrOnGetSessionProperties() *mockConn {
+ conn := prepareConnOK()
+ conn.errOnGetSessionProperties = true
+ return conn
+}
+
+func prepareConnErrOnListUsers() *mockConn {
+ conn := prepareConnOK()
+ conn.errOnListUsers = true
+ return conn
+}
+
+func prepareConnErrOnGetUserProperty() *mockConn {
+ conn := prepareConnOK()
+ conn.errOnGetUserProperty = true
+ return conn
+}
+
+type mockConn struct {
+ sessions []login1.Session
+ users []login1.User
+
+ errOnListSessions bool
+ errOnGetSessionProperties bool
+ errOnListUsers bool
+ errOnGetUserProperty bool
+ closeCalled bool
+}
+
+func (m *mockConn) Close() {
+ m.closeCalled = true
+}
+
+func (m *mockConn) ListSessions() ([]login1.Session, error) {
+ if m.errOnListSessions {
+ return nil, errors.New("mock.ListSessions() error")
+ }
+ return m.sessions, nil
+}
+
+func (m *mockConn) GetSessionProperties(path dbus.ObjectPath) (map[string]dbus.Variant, error) {
+ if m.errOnGetSessionProperties {
+ return nil, errors.New("mock.GetSessionProperties() error")
+ }
+
+ var found bool
+ for _, s := range m.sessions {
+ if s.Path == path {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return nil, errors.New("mock.GetUserProperty(): session is not found")
+ }
+
+ return map[string]dbus.Variant{
+ "Remote": dbus.MakeVariant("true"),
+ "Type": dbus.MakeVariant("tty"),
+ "State": dbus.MakeVariant("online"),
+ }, nil
+}
+
+func (m *mockConn) ListUsers() ([]login1.User, error) {
+ if m.errOnListUsers {
+ return nil, errors.New("mock.ListUsers() error")
+ }
+ return m.users, nil
+}
+
+func (m *mockConn) GetUserProperty(path dbus.ObjectPath, _ string) (*dbus.Variant, error) {
+ if m.errOnGetUserProperty {
+ return nil, errors.New("mock.GetUserProperty() error")
+ }
+
+ var found bool
+ for _, u := range m.users {
+ if u.Path == path {
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ return nil, errors.New("mock.GetUserProperty(): user is not found")
+ }
+
+ v := dbus.MakeVariant("active")
+ return &v, nil
+}
diff --git a/src/go/plugin/go.d/modules/logind/metadata.yaml b/src/go/plugin/go.d/modules/logind/metadata.yaml
new file mode 100644
index 000000000..792a515fe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/metadata.yaml
@@ -0,0 +1,105 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-logind
+ plugin_name: go.d.plugin
+ module_name: logind
+ monitored_instance:
+ name: systemd-logind users
+ link: https://www.freedesktop.org/software/systemd/man/systemd-logind.service.html
+ icon_filename: users.svg
+ categories:
+ - data-collection.systemd
+ keywords:
+ - logind
+ - systemd
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors number of sessions and users as reported by the `org.freedesktop.login1` DBus API.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/logind.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list: []
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: logind.sessions
+ description: Logind Sessions
+ unit: sessions
+ chart_type: stacked
+ dimensions:
+ - name: remote
+ - name: local
+ - name: logind.sessions_type
+ description: Logind Sessions By Type
+ unit: sessions
+ chart_type: stacked
+ dimensions:
+ - name: console
+ - name: graphical
+ - name: other
+ - name: logind.sessions_state
+ description: Logind Sessions By State
+ unit: sessions
+ chart_type: stacked
+ dimensions:
+ - name: online
+ - name: closing
+ - name: active
+ - name: logind.users_state
+ description: Logind Users By State
+ unit: users
+ chart_type: stacked
+ dimensions:
+ - name: offline
+ - name: closing
+ - name: online
+ - name: lingering
+ - name: active
diff --git a/src/go/plugin/go.d/modules/logind/testdata/config.json b/src/go/plugin/go.d/modules/logind/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/logind/testdata/config.yaml b/src/go/plugin/go.d/modules/logind/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logind/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/logstash/README.md b/src/go/plugin/go.d/modules/logstash/README.md
new file mode 120000
index 000000000..7a35ae8ff
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/README.md
@@ -0,0 +1 @@
+integrations/logstash.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/logstash/charts.go b/src/go/plugin/go.d/modules/logstash/charts.go
new file mode 100644
index 000000000..3fed45f4a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/charts.go
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioJVMThreads = module.Priority + iota
+ prioJVMMemHeapUsed
+ prioJVMMemHeap
+ prioJVMMemPoolsEden
+ prioJVMMemPoolsSurvivor
+ prioJVMMemPoolsOld
+ prioJVMGCCollectorCount
+ prioJVMGCCollectorTime
+ prioOpenFileDescriptors
+ prioEvent
+ prioEventDuration
+ prioPipelineEvent
+ prioPipelineEventDurations
+ prioUptime
+)
+
+var charts = module.Charts{
+ // thread
+ {
+ ID: "jvm_threads",
+ Title: "JVM Threads",
+ Units: "count",
+ Fam: "threads",
+ Ctx: "logstash.jvm_threads",
+ Priority: prioJVMThreads,
+ Dims: module.Dims{
+ {ID: "jvm_threads_count", Name: "threads"},
+ },
+ },
+ // memory
+ {
+ ID: "jvm_mem_heap_used",
+ Title: "JVM Heap Memory Percentage",
+ Units: "percentage",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_heap_used",
+ Priority: prioJVMMemHeapUsed,
+ Dims: module.Dims{
+ {ID: "jvm_mem_heap_used_percent", Name: "in use"},
+ },
+ },
+ {
+ ID: "jvm_mem_heap",
+ Title: "JVM Heap Memory",
+ Units: "KiB",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_heap",
+ Type: module.Area,
+ Priority: prioJVMMemHeap,
+ Dims: module.Dims{
+ {ID: "jvm_mem_heap_committed_in_bytes", Name: "committed", Div: 1024},
+ {ID: "jvm_mem_heap_used_in_bytes", Name: "used", Div: 1024},
+ },
+ },
+ {
+ ID: "jvm_mem_pools_eden",
+ Title: "JVM Pool Eden Memory",
+ Units: "KiB",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_pools_eden",
+ Type: module.Area,
+ Priority: prioJVMMemPoolsEden,
+ Dims: module.Dims{
+ {ID: "jvm_mem_pools_eden_committed_in_bytes", Name: "committed", Div: 1024},
+ {ID: "jvm_mem_pools_eden_used_in_bytes", Name: "used", Div: 1024},
+ },
+ },
+ {
+ ID: "jvm_mem_pools_survivor",
+ Title: "JVM Pool Survivor Memory",
+ Units: "KiB",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_pools_survivor",
+ Type: module.Area,
+ Priority: prioJVMMemPoolsSurvivor,
+ Dims: module.Dims{
+ {ID: "jvm_mem_pools_survivor_committed_in_bytes", Name: "committed", Div: 1024},
+ {ID: "jvm_mem_pools_survivor_used_in_bytes", Name: "used", Div: 1024},
+ },
+ },
+ {
+ ID: "jvm_mem_pools_old",
+ Title: "JVM Pool Old Memory",
+ Units: "KiB",
+ Fam: "memory",
+ Ctx: "logstash.jvm_mem_pools_old",
+ Type: module.Area,
+ Priority: prioJVMMemPoolsOld,
+ Dims: module.Dims{
+ {ID: "jvm_mem_pools_old_committed_in_bytes", Name: "committed", Div: 1024},
+ {ID: "jvm_mem_pools_old_used_in_bytes", Name: "used", Div: 1024},
+ },
+ },
+ // garbage collection
+ {
+ ID: "jvm_gc_collector_count",
+ Title: "Garbage Collection Count",
+ Units: "counts/s",
+ Fam: "garbage collection",
+ Ctx: "logstash.jvm_gc_collector_count",
+ Priority: prioJVMGCCollectorCount,
+ Dims: module.Dims{
+ {ID: "jvm_gc_collectors_eden_collection_count", Name: "eden", Algo: module.Incremental},
+ {ID: "jvm_gc_collectors_old_collection_count", Name: "old", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "jvm_gc_collector_time",
+ Title: "Time Spent On Garbage Collection",
+ Units: "ms",
+ Fam: "garbage collection",
+ Ctx: "logstash.jvm_gc_collector_time",
+ Priority: prioJVMGCCollectorTime,
+ Dims: module.Dims{
+ {ID: "jvm_gc_collectors_eden_collection_time_in_millis", Name: "eden", Algo: module.Incremental},
+ {ID: "jvm_gc_collectors_old_collection_time_in_millis", Name: "old", Algo: module.Incremental},
+ },
+ },
+ // processes
+ {
+ ID: "open_file_descriptors",
+ Title: "Open File Descriptors",
+ Units: "fd",
+ Fam: "processes",
+ Ctx: "logstash.open_file_descriptors",
+ Priority: prioOpenFileDescriptors,
+ Dims: module.Dims{
+ {ID: "process_open_file_descriptors", Name: "open"},
+ },
+ },
+ // events
+ {
+ ID: "event",
+ Title: "Events Overview",
+ Units: "events/s",
+ Fam: "events",
+ Ctx: "logstash.event",
+ Priority: prioEvent,
+ Dims: module.Dims{
+ {ID: "event_in", Name: "in", Algo: module.Incremental},
+ {ID: "event_filtered", Name: "filtered", Algo: module.Incremental},
+ {ID: "event_out", Name: "out", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "event_duration",
+ Title: "Events Duration",
+ Units: "seconds",
+ Fam: "events",
+ Ctx: "logstash.event_duration",
+ Priority: prioEventDuration,
+ Dims: module.Dims{
+ {ID: "event_duration_in_millis", Name: "event", Div: 1000, Algo: module.Incremental},
+ {ID: "event_queue_push_duration_in_millis", Name: "queue", Div: 1000, Algo: module.Incremental},
+ },
+ },
+ // uptime
+ {
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "logstash.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "jvm_uptime_in_millis", Name: "uptime", Div: 1000},
+ },
+ },
+}
+
+var pipelineChartsTmpl = module.Charts{
+ {
+ ID: "pipeline_%s_event",
+ Title: "Pipeline Events",
+ Units: "events/s",
+ Fam: "pipeline events",
+ Ctx: "logstash.pipeline_event",
+ Priority: prioPipelineEvent,
+ Dims: module.Dims{
+ {ID: "pipelines_%s_event_in", Name: "in", Algo: module.Incremental},
+ {ID: "pipelines_%s_event_filtered", Name: "filtered", Algo: module.Incremental},
+ {ID: "pipelines_%s_event_out", Name: "out", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "pipeline_%s_event_duration",
+ Title: "Pipeline Events Duration",
+ Units: "seconds",
+ Fam: "pipeline events duration",
+ Ctx: "logstash.pipeline_event_duration",
+ Priority: prioPipelineEventDurations,
+ Dims: module.Dims{
+ {ID: "pipelines_%s_event_duration_in_millis", Name: "event", Div: 1000, Algo: module.Incremental},
+ {ID: "pipelines_%s_event_queue_push_duration_in_millis", Name: "queue", Div: 1000, Algo: module.Incremental},
+ },
+ },
+}
+
+func (l *Logstash) addPipelineCharts(id string) {
+ charts := pipelineChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, id)
+ chart.Labels = []module.Label{
+ {Key: "pipeline", Value: id},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, id)
+ }
+ }
+
+ if err := l.Charts().Add(*charts...); err != nil {
+ l.Warning(err)
+ }
+}
+
+func (l *Logstash) removePipelineCharts(id string) {
+ for _, chart := range *l.Charts() {
+ if strings.HasPrefix(chart.ID, "pipeline_"+id) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logstash/collect.go b/src/go/plugin/go.d/modules/logstash/collect.go
new file mode 100644
index 000000000..ff506d640
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/collect.go
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const urlPathNodeStatsAPI = "/_node/stats"
+
+func (l *Logstash) collect() (map[string]int64, error) {
+ stats, err := l.queryNodeStats()
+ if err != nil {
+ return nil, err
+ }
+
+ l.updateCharts(stats.Pipelines)
+
+ return stm.ToMap(stats), nil
+}
+
+func (l *Logstash) updateCharts(pipelines map[string]pipelineStats) {
+ seen := make(map[string]bool)
+
+ for id := range pipelines {
+ seen[id] = true
+ if !l.pipelines[id] {
+ l.pipelines[id] = true
+ l.addPipelineCharts(id)
+ }
+ }
+
+ for id := range l.pipelines {
+ if !seen[id] {
+ delete(l.pipelines, id)
+ l.removePipelineCharts(id)
+ }
+ }
+}
+
+func (l *Logstash) queryNodeStats() (*nodeStats, error) {
+ req, err := web.NewHTTPRequestWithPath(l.Request, urlPathNodeStatsAPI)
+ if err != nil {
+ return nil, err
+ }
+
+ var stats nodeStats
+
+ if err := l.doWithDecode(&stats, req); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (l *Logstash) doWithDecode(dst interface{}, req *http.Request) error {
+ l.Debugf("executing %s '%s'", req.Method, req.URL)
+ resp, err := l.httpClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("%s returned %d status code (%s)", req.URL, resp.StatusCode, resp.Status)
+ }
+
+ content, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("error on reading response from %s : %v", req.URL, err)
+ }
+
+ if err := json.Unmarshal(content, dst); err != nil {
+ return fmt.Errorf("error on parsing response from %s : %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logstash/config_schema.json b/src/go/plugin/go.d/modules/logstash/config_schema.json
new file mode 100644
index 000000000..c08d136f1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Logstash collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Logstash [monitoring API](https://www.elastic.co/guide/en/logstash/current/monitoring-logstash.html#monitoring).",
+ "type": "string",
+ "default": "http://localhost:9600",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logstash/integrations/logstash.md b/src/go/plugin/go.d/modules/logstash/integrations/logstash.md
new file mode 100644
index 000000000..0ca751ebf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/integrations/logstash.md
@@ -0,0 +1,283 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/logstash/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/logstash/metadata.yaml"
+sidebar_label: "Logstash"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Logs Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Logstash
+
+
+<img src="https://netdata.cloud/img/elastic-logstash.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: logstash
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Logstash instances.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Logstash instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| logstash.jvm_threads | threads | count |
+| logstash.jvm_mem_heap_used | in_use | percentage |
+| logstash.jvm_mem_heap | committed, used | KiB |
+| logstash.jvm_mem_pools_eden | committed, used | KiB |
+| logstash.jvm_mem_pools_survivor | committed, used | KiB |
+| logstash.jvm_mem_pools_old | committed, used | KiB |
+| logstash.jvm_gc_collector_count | eden, old | counts/s |
+| logstash.jvm_gc_collector_time | eden, old | ms |
+| logstash.open_file_descriptors | open | fd |
+| logstash.event | in, filtered, out | events/s |
+| logstash.event_duration | event, queue | seconds |
+| logstash.uptime | uptime | seconds |
+
+### Per pipeline
+
+These metrics refer to the pipeline.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| pipeline | pipeline name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| logstash.pipeline_event | in, filtered, out | events/s |
+| logstash.pipeline_event_duration | event, queue | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/logstatsh.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/logstatsh.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://localhost:9600 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://localhost:9600
+
+```
+</details>
+
+##### HTTP authentication
+
+HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://localhost:9600
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://localhost:9600
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://localhost:9600
+
+ - name: remote
+ url: http://192.0.2.1:9600
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `logstash` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m logstash
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `logstash` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep logstash
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep logstash /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep logstash
+```
+
+
diff --git a/src/go/plugin/go.d/modules/logstash/logstash.go b/src/go/plugin/go.d/modules/logstash/logstash.go
new file mode 100644
index 000000000..3ee95594e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/logstash.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("logstash", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Logstash {
+ return &Logstash{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://localhost:9600",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ pipelines: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Logstash struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ pipelines map[string]bool
+}
+
+func (l *Logstash) Configuration() any {
+ return l.Config
+}
+
+func (l *Logstash) Init() error {
+ if l.URL == "" {
+ l.Error("config validation: 'url' cannot be empty")
+ return errors.New("url not set")
+ }
+
+ httpClient, err := web.NewHTTPClient(l.Client)
+ if err != nil {
+ l.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ l.httpClient = httpClient
+
+ l.Debugf("using URL %s", l.URL)
+ l.Debugf("using timeout: %s", l.Timeout.Duration())
+
+ return nil
+}
+
+func (l *Logstash) Check() error {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (l *Logstash) Charts() *module.Charts {
+ return l.charts
+}
+
+func (l *Logstash) Collect() map[string]int64 {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (l *Logstash) Cleanup() {
+ if l.httpClient != nil {
+ l.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/logstash/logstash_test.go b/src/go/plugin/go.d/modules/logstash/logstash_test.go
new file mode 100644
index 000000000..166d39815
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/logstash_test.go
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNodeStatsMetrics, _ = os.ReadFile("testdata/stats.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNodeStatsMetrics": dataNodeStatsMetrics,
+ } {
+ require.NotNilf(t, data, name)
+
+ }
+}
+
+func TestLogstash_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Logstash{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestLogstash_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ls := New()
+ ls.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, ls.Init())
+ } else {
+ assert.NoError(t, ls.Init())
+ }
+ })
+ }
+}
+
+func TestLogstash_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestLogstash_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestLogstash_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (ls *Logstash, cleanup func())
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: caseValidResponse,
+ },
+ "fail on invalid data response": {
+ wantFail: true,
+ prepare: caseInvalidDataResponse,
+ },
+ "fail on connection refused": {
+ wantFail: true,
+ prepare: caseConnectionRefused,
+ },
+ "fail on 404 response": {
+ wantFail: true,
+ prepare: case404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ls, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, ls.Check())
+ } else {
+ assert.NoError(t, ls.Check())
+ }
+ })
+ }
+}
+
+func TestLogstash_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (ls *Logstash, cleanup func())
+ wantNumOfCharts int
+ wantMetrics map[string]int64
+ }{
+ "success on valid response": {
+ prepare: caseValidResponse,
+ wantNumOfCharts: len(charts) + len(pipelineChartsTmpl),
+ wantMetrics: map[string]int64{
+ "event_duration_in_millis": 0,
+ "event_filtered": 0,
+ "event_in": 0,
+ "event_out": 0,
+ "event_queue_push_duration_in_millis": 0,
+ "jvm_gc_collectors_eden_collection_count": 5796,
+ "jvm_gc_collectors_eden_collection_time_in_millis": 45008,
+ "jvm_gc_collectors_old_collection_count": 7,
+ "jvm_gc_collectors_old_collection_time_in_millis": 3263,
+ "jvm_mem_heap_committed_in_bytes": 528154624,
+ "jvm_mem_heap_used_in_bytes": 189973480,
+ "jvm_mem_heap_used_percent": 35,
+ "jvm_mem_pools_eden_committed_in_bytes": 69795840,
+ "jvm_mem_pools_eden_used_in_bytes": 2600120,
+ "jvm_mem_pools_old_committed_in_bytes": 449642496,
+ "jvm_mem_pools_old_used_in_bytes": 185944824,
+ "jvm_mem_pools_survivor_committed_in_bytes": 8716288,
+ "jvm_mem_pools_survivor_used_in_bytes": 1428536,
+ "jvm_threads_count": 28,
+ "jvm_uptime_in_millis": 699809475,
+ "pipelines_pipeline-1_event_duration_in_millis": 5027018,
+ "pipelines_pipeline-1_event_filtered": 567639,
+ "pipelines_pipeline-1_event_in": 567639,
+ "pipelines_pipeline-1_event_out": 567639,
+ "pipelines_pipeline-1_event_queue_push_duration_in_millis": 84241,
+ "process_open_file_descriptors": 101,
+ },
+ },
+ "fail on invalid data response": {
+ prepare: caseInvalidDataResponse,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on connection refused": {
+ prepare: caseConnectionRefused,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on 404 response": {
+ prepare: case404,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ls, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := ls.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ assert.Equal(t, test.wantNumOfCharts, len(*ls.Charts()))
+ ensureCollectedHasAllChartsDimsVarsIDs(t, ls, mx)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, ls *Logstash, mx map[string]int64) {
+ for _, chart := range *ls.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func caseValidResponse(t *testing.T) (*Logstash, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathNodeStatsAPI:
+ _, _ = w.Write(dataNodeStatsMetrics)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ ls := New()
+ ls.URL = srv.URL
+ require.NoError(t, ls.Init())
+
+ return ls, srv.Close
+}
+
+func caseInvalidDataResponse(t *testing.T) (*Logstash, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ ls := New()
+ ls.URL = srv.URL
+ require.NoError(t, ls.Init())
+
+ return ls, srv.Close
+}
+
+func caseConnectionRefused(t *testing.T) (*Logstash, func()) {
+ t.Helper()
+ ls := New()
+ ls.URL = "http://127.0.0.1:65001"
+ require.NoError(t, ls.Init())
+
+ return ls, func() {}
+}
+
+func case404(t *testing.T) (*Logstash, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ ls := New()
+ ls.URL = srv.URL
+ require.NoError(t, ls.Init())
+
+ return ls, srv.Close
+}
diff --git a/src/go/plugin/go.d/modules/logstash/metadata.yaml b/src/go/plugin/go.d/modules/logstash/metadata.yaml
new file mode 100644
index 000000000..00d92db2a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/metadata.yaml
@@ -0,0 +1,274 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-logstash
+ plugin_name: go.d.plugin
+ module_name: logstash
+ monitored_instance:
+ name: Logstash
+ link: https://www.elastic.co/products/logstash
+ icon_filename: elastic-logstash.svg
+ categories:
+ - data-collection.logs-servers
+ keywords:
+ - logstatsh
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Logstash instances.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/logstatsh.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://localhost:9600
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://localhost:9600
+ - name: HTTP authentication
+ description: HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://localhost:9600
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://localhost:9600
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://localhost:9600
+
+ - name: remote
+ url: http://192.0.2.1:9600
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: logstash.jvm_threads
+ description: JVM Threads
+ unit: count
+ chart_type: line
+ dimensions:
+ - name: threads
+ - name: logstash.jvm_mem_heap_used
+ description: JVM Heap Memory Percentage
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: in_use
+ - name: logstash.jvm_mem_heap
+ description: JVM Heap Memory
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: logstash.jvm_mem_pools_eden
+ description: JVM Pool Eden Memory
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: logstash.jvm_mem_pools_survivor
+ description: JVM Pool Survivor Memory
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: logstash.jvm_mem_pools_old
+ description: JVM Pool Old Memory
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: logstash.jvm_gc_collector_count
+ description: Garbage Collection Count
+ unit: counts/s
+ chart_type: line
+ dimensions:
+ - name: eden
+ - name: old
+ - name: logstash.jvm_gc_collector_time
+ description: Time Spent On Garbage Collection
+ unit: ms
+ chart_type: line
+ dimensions:
+ - name: eden
+ - name: old
+ - name: logstash.open_file_descriptors
+ description: Open File Descriptors
+ unit: fd
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: logstash.event
+ description: Events Overview
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: filtered
+ - name: out
+ - name: logstash.event_duration
+ description: Events Duration
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: event
+ - name: queue
+ - name: logstash.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: pipeline
+ description: These metrics refer to the pipeline.
+ labels:
+ - name: pipeline
+ description: pipeline name
+ metrics:
+ - name: logstash.pipeline_event
+ description: Pipeline Events
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: filtered
+ - name: out
+ - name: logstash.pipeline_event_duration
+ description: Pipeline Events Duration
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: event
+ - name: queue
diff --git a/src/go/plugin/go.d/modules/logstash/node_stats.go b/src/go/plugin/go.d/modules/logstash/node_stats.go
new file mode 100644
index 000000000..1687f333d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/node_stats.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logstash
+
+// https://www.elastic.co/guide/en/logstash/current/node-stats-api.html
+
+type nodeStats struct {
+ JVM jvmStats `json:"jvm" stm:"jvm"`
+ Process processStats `json:"process" stm:"process"`
+ Event eventsStats `json:"event" stm:"event"`
+ Pipelines map[string]pipelineStats `json:"pipelines" stm:"pipelines"`
+}
+
+type pipelineStats struct {
+ Event eventsStats `json:"events" stm:"event"`
+}
+
+type eventsStats struct {
+ In int `json:"in" stm:"in"`
+ Filtered int `json:"filtered" stm:"filtered"`
+ Out int `json:"out" stm:"out"`
+ DurationInMillis int `json:"duration_in_millis" stm:"duration_in_millis"`
+ QueuePushDurationInMillis int `json:"queue_push_duration_in_millis" stm:"queue_push_duration_in_millis"`
+}
+
+type processStats struct {
+ OpenFileDescriptors int `json:"open_file_descriptors" stm:"open_file_descriptors"`
+}
+
+type jvmStats struct {
+ Threads struct {
+ Count int `stm:"count"`
+ } `stm:"threads"`
+ Mem jvmMemStats `stm:"mem"`
+ GC jvmGCStats `stm:"gc"`
+ UptimeInMillis int `json:"uptime_in_millis" stm:"uptime_in_millis"`
+}
+
+type jvmMemStats struct {
+ HeapUsedPercent int `json:"heap_used_percent" stm:"heap_used_percent"`
+ HeapCommittedInBytes int `json:"heap_committed_in_bytes" stm:"heap_committed_in_bytes"`
+ HeapUsedInBytes int `json:"heap_used_in_bytes" stm:"heap_used_in_bytes"`
+ Pools struct {
+ Survivor jvmPoolStats `stm:"survivor"`
+ Old jvmPoolStats `stm:"old"`
+ Young jvmPoolStats `stm:"eden"`
+ } `stm:"pools"`
+}
+
+type jvmPoolStats struct {
+ UsedInBytes int `json:"used_in_bytes" stm:"used_in_bytes"`
+ CommittedInBytes int `json:"committed_in_bytes" stm:"committed_in_bytes"`
+}
+
+type jvmGCStats struct {
+ Collectors struct {
+ Old gcCollectorStats `stm:"old"`
+ Young gcCollectorStats `stm:"eden"`
+ } `stm:"collectors"`
+}
+
+type gcCollectorStats struct {
+ CollectionTimeInMillis int `json:"collection_time_in_millis" stm:"collection_time_in_millis"`
+ CollectionCount int `json:"collection_count" stm:"collection_count"`
+}
diff --git a/src/go/plugin/go.d/modules/logstash/testdata/config.json b/src/go/plugin/go.d/modules/logstash/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/logstash/testdata/config.yaml b/src/go/plugin/go.d/modules/logstash/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/logstash/testdata/stats.json b/src/go/plugin/go.d/modules/logstash/testdata/stats.json
new file mode 100644
index 000000000..50fd7b071
--- /dev/null
+++ b/src/go/plugin/go.d/modules/logstash/testdata/stats.json
@@ -0,0 +1,252 @@
+{
+ "host" : "<replaced>",
+ "version" : "7.3.0",
+ "http_address" : "0.0.0.0:9600",
+ "id" : "<replaced>",
+ "name" : "<replaced>",
+ "ephemeral_id" : "339d4ddb-8a6e-4ddc-b843-efd4abf4bf73",
+ "status" : "green",
+ "snapshot" : false,
+ "pipeline" : {
+ "workers" : 1,
+ "batch_size" : 125,
+ "batch_delay" : 50
+ },
+ "jvm" : {
+ "threads" : {
+ "count" : 28,
+ "peak_count" : 32
+ },
+ "mem" : {
+ "heap_used_percent" : 35,
+ "heap_committed_in_bytes" : 528154624,
+ "heap_max_in_bytes" : 528154624,
+ "heap_used_in_bytes" : 189973480,
+ "non_heap_used_in_bytes" : 178053280,
+ "non_heap_committed_in_bytes" : 235200512,
+ "pools" : {
+ "young" : {
+ "committed_in_bytes" : 69795840,
+ "peak_max_in_bytes" : 69795840,
+ "max_in_bytes" : 69795840,
+ "peak_used_in_bytes" : 69795840,
+ "used_in_bytes" : 2600120
+ },
+ "old" : {
+ "committed_in_bytes" : 449642496,
+ "peak_max_in_bytes" : 449642496,
+ "max_in_bytes" : 449642496,
+ "peak_used_in_bytes" : 185944824,
+ "used_in_bytes" : 185944824
+ },
+ "survivor" : {
+ "committed_in_bytes" : 8716288,
+ "peak_max_in_bytes" : 8716288,
+ "max_in_bytes" : 8716288,
+ "peak_used_in_bytes" : 8716288,
+ "used_in_bytes" : 1428536
+ }
+ }
+ },
+ "gc" : {
+ "collectors" : {
+ "young" : {
+ "collection_count" : 5796,
+ "collection_time_in_millis" : 45008
+ },
+ "old" : {
+ "collection_count" : 7,
+ "collection_time_in_millis" : 3263
+ }
+ }
+ },
+ "uptime_in_millis" : 699809475
+ },
+ "process" : {
+ "open_file_descriptors" : 101,
+ "peak_open_file_descriptors" : 105,
+ "max_file_descriptors" : 1048576,
+ "mem" : {
+ "total_virtual_in_bytes" : 5074657280
+ },
+ "cpu" : {
+ "total_in_millis" : 7304550,
+ "percent" : 0,
+ "load_average" : {
+ "1m" : 0.73,
+ "5m" : 1.13,
+ "15m" : 1.06
+ }
+ }
+ },
+ "events" : {
+ "in" : 567639,
+ "filtered" : 567639,
+ "out" : 567639,
+ "duration_in_millis" : 5027018,
+ "queue_push_duration_in_millis" : 84241
+ },
+ "pipelines" : {
+ "pipeline-1" : {
+ "events" : {
+ "queue_push_duration_in_millis" : 84241,
+ "filtered" : 567639,
+ "duration_in_millis" : 5027018,
+ "in" : 567639,
+ "out" : 567639
+ },
+ "plugins" : {
+ "inputs" : [ {
+ "id" : "kafka input",
+ "events" : {
+ "queue_push_duration_in_millis" : 84241,
+ "out" : 567639
+ },
+ "name" : "kafka"
+ } ],
+ "codecs" : [ {
+ "id" : "json_9562e6c4-7a1a-4c18-919f-f012e58923dd",
+ "decode" : {
+ "writes_in" : 567639,
+ "duration_in_millis" : 86778,
+ "out" : 567639
+ },
+ "name" : "json",
+ "encode" : {
+ "writes_in" : 0,
+ "duration_in_millis" : 0
+ }
+ }, {
+ "id" : "plain_13e28721-e681-43ec-aa2c-c0a4d856b9ed",
+ "decode" : {
+ "writes_in" : 0,
+ "duration_in_millis" : 0,
+ "out" : 0
+ },
+ "name" : "plain",
+ "encode" : {
+ "writes_in" : 0,
+ "duration_in_millis" : 0
+ }
+ } ],
+ "filters" : [ {
+ "id" : "set default timezone",
+ "events" : {
+ "duration_in_millis" : 340,
+ "in" : 326901,
+ "out" : 326901
+ },
+ "name" : "mutate"
+ }, {
+ "id" : "assign index (filebeat)",
+ "events" : {
+ "duration_in_millis" : 858,
+ "in" : 567639,
+ "out" : 567639
+ },
+ "name" : "mutate"
+ }, {
+ "id" : "parse JSON",
+ "events" : {
+ "duration_in_millis" : 112,
+ "in" : 0,
+ "out" : 0
+ },
+ "name" : "json"
+ }, {
+ "id" : "parse LTSV",
+ "events" : {
+ "duration_in_millis" : 130,
+ "in" : 0,
+ "out" : 0
+ },
+ "name" : "kv"
+ }, {
+ "id" : "assign document_id",
+ "events" : {
+ "duration_in_millis" : 2406,
+ "in" : 567639,
+ "out" : 567639
+ },
+ "name" : "fingerprint"
+ }, {
+ "id" : "assign index (fluentd)",
+ "events" : {
+ "duration_in_millis" : 140,
+ "in" : 0,
+ "out" : 0
+ },
+ "name" : "mutate"
+ }, {
+ "id" : "parse timestamp",
+ "events" : {
+ "duration_in_millis" : 7261,
+ "in" : 326901,
+ "out" : 326901
+ },
+ "name" : "date",
+ "failures" : 1,
+ "matches" : 326900
+ } ],
+ "outputs" : [ {
+ "id" : "0f72afb28c5ff3a3897d87b04fc1b0a5fe8358cb55bbc29b995056fd868e612b",
+ "events" : {
+ "duration_in_millis" : 4063485,
+ "in" : 567639,
+ "out" : 567639
+ },
+ "name" : "elasticsearch",
+ "documents" : {
+ "successes" : 567639
+ },
+ "bulk_requests" : {
+ "responses" : {
+ "200" : 50735
+ },
+ "successes" : 50735
+ }
+ } ]
+ },
+ "reloads" : {
+ "last_error" : null,
+ "last_failure_timestamp" : null,
+ "last_success_timestamp" : null,
+ "failures" : 0,
+ "successes" : 0
+ },
+ "queue" : {
+ "type" : "persisted",
+ "events_count" : 0,
+ "queue_size_in_bytes" : 45085456,
+ "max_queue_size_in_bytes" : 1073741824
+ },
+ "hash" : "46f5c757f55a52d08ed841e9f51698653cf228ff9be41b7372f20a1b699bf129",
+ "ephemeral_id" : "c43b3a8e-882c-4e3a-a2f2-8515a5ef4ecc"
+ }
+ },
+ "reloads" : {
+ "failures" : 0,
+ "successes" : 0
+ },
+ "os" : {
+ "cgroup" : {
+ "cpuacct" : {
+ "control_group" : "/",
+ "usage_nanos" : 7304416115351
+ },
+ "cpu" : {
+ "control_group" : "/",
+ "cfs_quota_micros" : 100000,
+ "cfs_period_micros" : 100000,
+ "stat" : {
+ "time_throttled_nanos" : 124716913549,
+ "number_of_elapsed_periods" : 5875889,
+ "number_of_times_throttled" : 1219
+ }
+ }
+ }
+ },
+ "queue" : {
+ "events_count" : 0
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/lvm/README.md b/src/go/plugin/go.d/modules/lvm/README.md
new file mode 120000
index 000000000..9b86695a2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/README.md
@@ -0,0 +1 @@
+integrations/lvm_logical_volumes.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/lvm/charts.go b/src/go/plugin/go.d/modules/lvm/charts.go
new file mode 100644
index 000000000..8d2f0fa19
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/charts.go
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lvm
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioLVDataPercent = 2920 + iota
+ prioLVMetadataPercent
+)
+
+var lvThinPoolChartsTmpl = module.Charts{
+ lvDataSpaceUtilizationChartTmpl.Copy(),
+ lvMetadataSpaceUtilizationChartTmpl.Copy(),
+}
+
+var (
+ lvDataSpaceUtilizationChartTmpl = module.Chart{
+ ID: "lv_%s_vg_%s_lv_data_space_utilization",
+ Title: "Logical volume space allocated for data",
+ Units: "percentage",
+ Fam: "lv space usage",
+ Ctx: "lvm.lv_data_space_utilization",
+ Type: module.Area,
+ Priority: prioLVDataPercent,
+ Dims: module.Dims{
+ {ID: "lv_%s_vg_%s_data_percent", Name: "utilization", Div: 100},
+ },
+ }
+ lvMetadataSpaceUtilizationChartTmpl = module.Chart{
+ ID: "lv_%s_vg_%s_lv_metadata_space_utilization",
+ Title: "Logical volume space allocated for metadata",
+ Units: "percentage",
+ Fam: "lv space usage",
+ Ctx: "lvm.lv_metadata_space_utilization",
+ Type: module.Area,
+ Priority: prioLVMetadataPercent,
+ Dims: module.Dims{
+ {ID: "lv_%s_vg_%s_metadata_percent", Name: "utilization", Div: 100},
+ },
+ }
+)
+
+func (l *LVM) addLVMThinPoolCharts(lvName, vgName string) {
+ charts := lvThinPoolChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, lvName, vgName)
+ chart.Labels = []module.Label{
+ {Key: "lv_name", Value: lvName},
+ {Key: "vg_name", Value: vgName},
+ {Key: "volume_type", Value: "thin_pool"},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, lvName, vgName)
+ }
+ }
+
+ if err := l.Charts().Add(*charts...); err != nil {
+ l.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/lvm/collect.go b/src/go/plugin/go.d/modules/lvm/collect.go
new file mode 100644
index 000000000..8f57a1a80
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/collect.go
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lvm
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+)
+
+type lvsReport struct {
+ Report []struct {
+ Lv []struct {
+ VGName string `json:"vg_name"`
+ LVName string `json:"lv_name"`
+ LVSize string `json:"lv_size"`
+ DataPercent string `json:"data_percent"`
+ MetadataPercent string `json:"metadata_percent"`
+ LVAttr string `json:"lv_attr"`
+ } `json:"lv"`
+ } `json:"report"`
+}
+
+func (l *LVM) collect() (map[string]int64, error) {
+ bs, err := l.exec.lvsReportJson()
+ if err != nil {
+ return nil, err
+ }
+
+ var report lvsReport
+ if err = json.Unmarshal(bs, &report); err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ for _, r := range report.Report {
+ for _, lv := range r.Lv {
+ if lv.VGName == "" || lv.LVName == "" {
+ continue
+ }
+
+ if !isThinPool(lv.LVAttr) {
+ l.Debugf("skipping lv '%s' vg '%s': not a thin pool", lv.LVName, lv.VGName)
+ continue
+ }
+
+ key := fmt.Sprintf("lv_%s_vg_%s", lv.LVName, lv.VGName)
+ if !l.lvmThinPools[key] {
+ l.addLVMThinPoolCharts(lv.LVName, lv.VGName)
+ l.lvmThinPools[key] = true
+ }
+ if v, ok := parseFloat(lv.DataPercent); ok {
+ mx[key+"_data_percent"] = int64(v * 100)
+ }
+ if v, ok := parseFloat(lv.MetadataPercent); ok {
+ mx[key+"_metadata_percent"] = int64(v * 100)
+ }
+ }
+ }
+
+ return mx, nil
+}
+
+func isThinPool(lvAttr string) bool {
+ return getLVType(lvAttr) == "thin_pool"
+}
+
+func getLVType(lvAttr string) string {
+ if len(lvAttr) == 0 {
+ return ""
+ }
+
+ // https://man7.org/linux/man-pages/man8/lvs.8.html#NOTES
+ switch lvAttr[0] {
+ case 'C':
+ return "cache"
+ case 'm':
+ return "mirrored"
+ case 'M':
+ return "mirrored_without_initial_sync"
+ case 'o':
+ return "origin"
+ case 'O':
+ return "origin_with_merging_snapshot"
+ case 'g':
+ return "integrity"
+ case 'r':
+ return "raid"
+ case 'R':
+ return "raid_without_initial_sync"
+ case 's':
+ return "snapshot"
+ case 'S':
+ return "merging_snapshot"
+ case 'p':
+ return "pvmove"
+ case 'v':
+ return "virtual"
+ case 'i':
+ return "mirror_or_raid_image"
+ case 'I':
+ return "mirror_or_raid_mage_out_of_sync"
+ case 'l':
+ return "log_device"
+ case 'c':
+ return "under_conversion"
+ case 'V':
+ return "thin_volume"
+ case 't':
+ return "thin_pool"
+ case 'T':
+ return "thin_pool_data"
+ case 'd':
+ return "vdo_pool"
+ case 'D':
+ return "vdo_pool_data"
+ case 'e':
+ return "raid_or_pool_metadata"
+ default:
+ return ""
+ }
+}
+
+func parseFloat(s string) (float64, bool) {
+ if s == "-" {
+ return 0, false
+ }
+ v, err := strconv.ParseFloat(s, 64)
+ return v, err == nil
+}
diff --git a/src/go/plugin/go.d/modules/lvm/config_schema.json b/src/go/plugin/go.d/modules/lvm/config_schema.json
new file mode 100644
index 000000000..1e0788074
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "LVM collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/lvm/exec.go b/src/go/plugin/go.d/modules/lvm/exec.go
new file mode 100644
index 000000000..66863a051
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/exec.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lvm
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newLVMCLIExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *lvmCLIExec {
+ return &lvmCLIExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ }
+}
+
+type lvmCLIExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (e *lvmCLIExec) lvsReportJson() ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx,
+ e.ndsudoPath,
+ "lvs-report-json",
+ "--options",
+ "vg_name,lv_name,lv_size,data_percent,metadata_percent,lv_attr",
+ )
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/lvm/init.go b/src/go/plugin/go.d/modules/lvm/init.go
new file mode 100644
index 000000000..5c4db1add
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/init.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lvm
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (l *LVM) initLVMCLIExec() (lvmCLI, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+
+ }
+
+ lvmExec := newLVMCLIExec(ndsudoPath, l.Timeout.Duration(), l.Logger)
+
+ return lvmExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/lvm/integrations/lvm_logical_volumes.md b/src/go/plugin/go.d/modules/lvm/integrations/lvm_logical_volumes.md
new file mode 100644
index 000000000..1d76c3635
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/integrations/lvm_logical_volumes.md
@@ -0,0 +1,202 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/lvm/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/lvm/metadata.yaml"
+sidebar_label: "LVM logical volumes"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# LVM logical volumes
+
+
+<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: lvm
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the health of LVM logical volumes. It relies on the [`lvs`](https://man7.org/linux/man-pages/man8/lvs.8.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per logical volume
+
+These metrics refer to the LVM logical volume.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| lv_name | Logical volume name |
+| vg_name | Volume group name |
+| volume_type | Type of the volume |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| lvm.lv_data_space_utilization | utilization | % |
+| lvm.lv_metadata_space_utilization | utilization | % |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ lvm_lv_data_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_data_space_utilization | LVM logical volume high data space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |
+| [ lvm_lv_metadata_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf) | lvm.lv_metadata_space_utilization | LVM logical volume high metadata space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type}) |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/lvm.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/lvm.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| timeout | lvs binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: lvm
+ update_every: 5 # Collect logical volume statistics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `lvm` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m lvm
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `lvm` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep lvm
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep lvm /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep lvm
+```
+
+
diff --git a/src/go/plugin/go.d/modules/lvm/lvm.go b/src/go/plugin/go.d/modules/lvm/lvm.go
new file mode 100644
index 000000000..c6754e06a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/lvm.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lvm
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("lvm", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *LVM {
+ return &LVM{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ lvmThinPools: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ LVM struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec lvmCLI
+
+ lvmThinPools map[string]bool
+ }
+ lvmCLI interface {
+ lvsReportJson() ([]byte, error)
+ }
+)
+
+func (l *LVM) Configuration() any {
+ return l.Config
+}
+
+func (l *LVM) Init() error {
+ lvmExec, err := l.initLVMCLIExec()
+ if err != nil {
+ l.Errorf("lvm exec initialization: %v", err)
+ return err
+ }
+ l.exec = lvmExec
+
+ return nil
+}
+
+func (l *LVM) Check() error {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (l *LVM) Charts() *module.Charts {
+ return l.charts
+}
+
+func (l *LVM) Collect() map[string]int64 {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (l *LVM) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/lvm/lvm_test.go b/src/go/plugin/go.d/modules/lvm/lvm_test.go
new file mode 100644
index 000000000..a3c072837
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/lvm_test.go
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lvm
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataLvsReportJson, _ = os.ReadFile("testdata/lvs-report.json")
+ dataLvsReportNoThinJson, _ = os.ReadFile("testdata/lvs-report-no-thin.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataLvsReportJson": dataLvsReportJson,
+ "dataLvsReportNoThinJson": dataLvsReportNoThinJson,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestLVM_Configuration(t *testing.T) {
+ module.TestConfigurationSerialize(t, &LVM{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestLVM_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if failed to locate ndsudo": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ lvm := New()
+ lvm.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, lvm.Init())
+ } else {
+ assert.NoError(t, lvm.Init())
+ }
+ })
+ }
+}
+
+func TestLVM_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *LVM
+ }{
+ "not initialized exec": {
+ prepare: func() *LVM {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *LVM {
+ lvm := New()
+ lvm.exec = prepareMockOK()
+ _ = lvm.Check()
+ return lvm
+ },
+ },
+ "after collect": {
+ prepare: func() *LVM {
+ lvm := New()
+ lvm.exec = prepareMockOK()
+ _ = lvm.Collect()
+ return lvm
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ lvm := test.prepare()
+
+ assert.NotPanics(t, lvm.Cleanup)
+ })
+ }
+}
+
+func TestLVM_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestLVM_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockLvmCliExec
+ wantFail bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantFail: false,
+ },
+ "no thin volumes": {
+ prepareMock: prepareMockNoThinVolumes,
+ wantFail: true,
+ },
+ "error on lvs report call": {
+ prepareMock: prepareMockErrOnLvsReportJson,
+ wantFail: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantFail: true,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ lvm := New()
+ mock := test.prepareMock()
+ lvm.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, lvm.Check())
+ } else {
+ assert.NoError(t, lvm.Check())
+ }
+ })
+ }
+}
+
+func TestLVM_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockLvmCliExec
+ wantMetrics map[string]int64
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantMetrics: map[string]int64{
+ "lv_root_vg_cm-vg_data_percent": 7889,
+ "lv_root_vg_cm-vg_metadata_percent": 1925,
+ },
+ },
+ "no thin volumes": {
+ prepareMock: prepareMockNoThinVolumes,
+ wantMetrics: nil,
+ },
+ "error on lvs report call": {
+ prepareMock: prepareMockErrOnLvsReportJson,
+ wantMetrics: nil,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ lvm := New()
+ mock := test.prepareMock()
+ lvm.exec = mock
+
+ mx := lvm.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ assert.Len(t, *lvm.Charts(), len(lvThinPoolChartsTmpl)*len(lvm.lvmThinPools))
+ }
+ })
+ }
+}
+
+func prepareMockOK() *mockLvmCliExec {
+ return &mockLvmCliExec{
+ lvsReportJsonData: dataLvsReportJson,
+ }
+}
+
+func prepareMockNoThinVolumes() *mockLvmCliExec {
+ return &mockLvmCliExec{
+ lvsReportJsonData: dataLvsReportNoThinJson,
+ }
+}
+
+func prepareMockErrOnLvsReportJson() *mockLvmCliExec {
+ return &mockLvmCliExec{
+ errOnLvsReportJson: true,
+ }
+}
+
+func prepareMockEmptyResponse() *mockLvmCliExec {
+ return &mockLvmCliExec{}
+}
+
+func prepareMockUnexpectedResponse() *mockLvmCliExec {
+ return &mockLvmCliExec{
+ lvsReportJsonData: []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`),
+ }
+}
+
+type mockLvmCliExec struct {
+ errOnLvsReportJson bool
+ lvsReportJsonData []byte
+}
+
+func (m *mockLvmCliExec) lvsReportJson() ([]byte, error) {
+ if m.errOnLvsReportJson {
+ return nil, errors.New("mock.lvsReportJson() error")
+ }
+
+ return m.lvsReportJsonData, nil
+}
diff --git a/src/go/plugin/go.d/modules/lvm/metadata.yaml b/src/go/plugin/go.d/modules/lvm/metadata.yaml
new file mode 100644
index 000000000..46d036946
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/metadata.yaml
@@ -0,0 +1,115 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-lvm
+ plugin_name: go.d.plugin
+ module_name: lvm
+ monitored_instance:
+ name: LVM logical volumes
+ link: ""
+ icon_filename: filesystem.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - lvm
+ - lvs
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector monitors the health of LVM logical volumes.
+ It relies on the [`lvs`](https://man7.org/linux/man-pages/man8/lvs.8.html) CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/lvm.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: lvs binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: lvm
+ update_every: 5 # Collect logical volume statistics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: lvm_lv_data_space_utilization
+ metric: lvm.lv_data_space_utilization
+ info: LVM logical volume high data space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type})
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf
+ - name: lvm_lv_metadata_space_utilization
+ metric: lvm.lv_metadata_space_utilization
+ info: LVM logical volume high metadata space usage (LV ${label:lv_name} VG ${label:vg_name} Type ${label:volume_type})
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/lvm.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: logical volume
+ description: These metrics refer to the LVM logical volume.
+ labels:
+ - name: lv_name
+ description: Logical volume name
+ - name: vg_name
+ description: Volume group name
+ - name: volume_type
+ description: Type of the volume
+ metrics:
+ - name: lvm.lv_data_space_utilization
+ description: Logical volume space allocated for data
+ unit: '%'
+ chart_type: area
+ dimensions:
+ - name: utilization
+ - name: lvm.lv_metadata_space_utilization
+ description: Logical volume space allocated for metadata
+ unit: '%'
+ chart_type: area
+ dimensions:
+ - name: utilization
diff --git a/src/go/plugin/go.d/modules/lvm/testdata/config.json b/src/go/plugin/go.d/modules/lvm/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/lvm/testdata/config.yaml b/src/go/plugin/go.d/modules/lvm/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/lvm/testdata/lvs-report-no-thin.json b/src/go/plugin/go.d/modules/lvm/testdata/lvs-report-no-thin.json
new file mode 100644
index 000000000..1fe8ec44f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/testdata/lvs-report-no-thin.json
@@ -0,0 +1,16 @@
+{
+ "report": [
+ {
+ "lv": [
+ {
+ "vg_name": "cm-vg",
+ "lv_name": "root",
+ "lv_size": "214232465408",
+ "data_percent": "",
+ "metadata_percent": "",
+ "lv_attr": "-wi-ao----"
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/lvm/testdata/lvs-report.json b/src/go/plugin/go.d/modules/lvm/testdata/lvs-report.json
new file mode 100644
index 000000000..bd04fad75
--- /dev/null
+++ b/src/go/plugin/go.d/modules/lvm/testdata/lvs-report.json
@@ -0,0 +1,16 @@
+{
+ "report": [
+ {
+ "lv": [
+ {
+ "vg_name": "cm-vg",
+ "lv_name": "root",
+ "lv_size": "214232465408",
+ "data_percent": "78.89",
+ "metadata_percent": "19.25",
+ "lv_attr": "twi-ao----"
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/megacli/README.md b/src/go/plugin/go.d/modules/megacli/README.md
new file mode 120000
index 000000000..bf0d30985
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/README.md
@@ -0,0 +1 @@
+integrations/megacli_megaraid.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/megacli/charts.go b/src/go/plugin/go.d/modules/megacli/charts.go
new file mode 100644
index 000000000..c479d5677
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/charts.go
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package megacli
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioAdapterHealthState = module.Priority + iota
+
+ prioPhysDriveMediaErrorsRate
+ prioPhysDrivePredictiveFailuresRate
+
+ prioBBURelativeCharge
+ prioBBURechargeCycles
+ prioBBUCapDegradationPerc
+ prioBBUTemperature
+)
+
+var adapterChartsTmpl = module.Charts{
+ adapterHealthStateChartTmpl.Copy(),
+}
+
+var (
+ adapterHealthStateChartTmpl = module.Chart{
+ ID: "adapter_%s_health_state",
+ Title: "Adapter health state",
+ Units: "state",
+ Fam: "adapter health",
+ Ctx: "megacli.adapter_health_state",
+ Type: module.Line,
+ Priority: prioAdapterHealthState,
+ Dims: module.Dims{
+ {ID: "adapter_%s_health_state_optimal", Name: "optimal"},
+ {ID: "adapter_%s_health_state_degraded", Name: "degraded"},
+ {ID: "adapter_%s_health_state_partially_degraded", Name: "partially_degraded"},
+ {ID: "adapter_%s_health_state_failed", Name: "failed"},
+ },
+ }
+)
+
+var physDriveChartsTmpl = module.Charts{
+ physDriveMediaErrorsRateChartTmpl.Copy(),
+ physDrivePredictiveFailuresRateChartTmpl.Copy(),
+}
+
+var (
+ physDriveMediaErrorsRateChartTmpl = module.Chart{
+ ID: "phys_drive_%s_media_errors_rate",
+ Title: "Physical Drive media errors rate",
+ Units: "errors/s",
+ Fam: "phys drive errors",
+ Ctx: "megacli.phys_drive_media_errors",
+ Type: module.Line,
+ Priority: prioPhysDriveMediaErrorsRate,
+ Dims: module.Dims{
+ {ID: "phys_drive_%s_media_error_count", Name: "media_errors"},
+ },
+ }
+ physDrivePredictiveFailuresRateChartTmpl = module.Chart{
+ ID: "phys_drive_%s_predictive_failures_rate",
+ Title: "Physical Drive predictive failures rate",
+ Units: "failures/s",
+ Fam: "phys drive errors",
+ Ctx: "megacli.phys_drive_predictive_failures",
+ Type: module.Line,
+ Priority: prioPhysDrivePredictiveFailuresRate,
+ Dims: module.Dims{
+ {ID: "phys_drive_%s_predictive_failure_count", Name: "predictive_failures"},
+ },
+ }
+)
+
+var bbuChartsTmpl = module.Charts{
+ bbuRelativeChargeChartsTmpl.Copy(),
+ bbuRechargeCyclesChartsTmpl.Copy(),
+ bbuCapacityDegradationChartsTmpl.Copy(),
+ bbuTemperatureChartsTmpl.Copy(),
+}
+
+var (
+ bbuRelativeChargeChartsTmpl = module.Chart{
+ ID: "bbu_adapter_%s_relative_charge",
+ Title: "BBU relative charge",
+ Units: "percentage",
+ Fam: "bbu charge",
+ Ctx: "megacli.bbu_charge",
+ Type: module.Area,
+ Priority: prioBBURelativeCharge,
+ Dims: module.Dims{
+ {ID: "bbu_adapter_%s_relative_state_of_charge", Name: "charge"},
+ },
+ }
+ bbuRechargeCyclesChartsTmpl = module.Chart{
+ ID: "bbu_adapter_%s_recharge_cycles",
+ Title: "BBU recharge cycles",
+ Units: "cycles",
+ Fam: "bbu charge",
+ Ctx: "megacli.bbu_recharge_cycles",
+ Type: module.Line,
+ Priority: prioBBURechargeCycles,
+ Dims: module.Dims{
+ {ID: "bbu_adapter_%s_cycle_count", Name: "recharge"},
+ },
+ }
+ bbuCapacityDegradationChartsTmpl = module.Chart{
+ ID: "bbu_adapter_%s_capacity_degradation",
+ Title: "BBU capacity degradation",
+ Units: "percent",
+ Fam: "bbu charge",
+ Ctx: "megacli.bbu_capacity_degradation",
+ Type: module.Line,
+ Priority: prioBBUCapDegradationPerc,
+ Dims: module.Dims{
+ {ID: "bbu_adapter_%s_capacity_degradation_perc", Name: "cap_degradation"},
+ },
+ }
+ bbuTemperatureChartsTmpl = module.Chart{
+ ID: "bbu_adapter_%s_temperature",
+ Title: "BBU temperature",
+ Units: "Celsius",
+ Fam: "bbu temperature",
+ Ctx: "megacli.bbu_temperature",
+ Type: module.Line,
+ Priority: prioBBUTemperature,
+ Dims: module.Dims{
+ {ID: "bbu_adapter_%s_temperature", Name: "temperature"},
+ },
+ }
+)
+
+func (m *MegaCli) addAdapterCharts(ad *megaAdapter) {
+ charts := adapterChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, ad.number)
+ chart.Labels = []module.Label{
+ {Key: "adapter_number", Value: ad.number},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, ad.number)
+ }
+ }
+
+ if err := m.Charts().Add(*charts...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *MegaCli) addPhysDriveCharts(pd *megaPhysDrive) {
+ charts := physDriveChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, pd.wwn)
+ chart.Labels = []module.Label{
+ {Key: "adapter_number", Value: pd.adapterNumber},
+ {Key: "wwn", Value: pd.wwn},
+ {Key: "slot_number", Value: pd.slotNumber},
+ {Key: "drive_position", Value: pd.drivePosition},
+ {Key: "drive_type", Value: pd.pdType},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, pd.wwn)
+ }
+ }
+
+ if err := m.Charts().Add(*charts...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *MegaCli) addBBUCharts(bbu *megaBBU) {
+ charts := bbuChartsTmpl.Copy()
+
+ if _, ok := calcCapDegradationPerc(bbu); !ok {
+ _ = charts.Remove(bbuCapacityDegradationChartsTmpl.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, bbu.adapterNumber)
+ chart.Labels = []module.Label{
+ {Key: "adapter_number", Value: bbu.adapterNumber},
+ {Key: "battery_type", Value: bbu.batteryType},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, bbu.adapterNumber)
+ }
+ }
+
+ if err := m.Charts().Add(*charts...); err != nil {
+ m.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/megacli/collect.go b/src/go/plugin/go.d/modules/megacli/collect.go
new file mode 100644
index 000000000..c4e74b78b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/collect.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package megacli
+
+import (
+ "strconv"
+ "strings"
+)
+
+func (m *MegaCli) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := m.collectPhysDrives(mx); err != nil {
+ return nil, err
+ }
+ if err := m.collectBBU(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func writeInt(mx map[string]int64, key, value string) {
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return
+ }
+ mx[key] = v
+}
+
+func getColonSepValue(line string) string {
+ i := strings.IndexByte(line, ':')
+ if i == -1 {
+ return ""
+ }
+ return strings.TrimSpace(line[i+1:])
+}
+
+func getColonSepNumValue(line string) string {
+ v := getColonSepValue(line)
+ i := strings.IndexByte(v, ' ')
+ if i == -1 {
+ return v
+ }
+ return v[:i]
+}
diff --git a/src/go/plugin/go.d/modules/megacli/collect_bbu.go b/src/go/plugin/go.d/modules/megacli/collect_bbu.go
new file mode 100644
index 000000000..33b048e64
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/collect_bbu.go
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package megacli
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type megaBBU struct {
+ adapterNumber string
+ batteryType string
+ temperature string
+ rsoc string
+ asoc string // apparently can be 0 while relative > 0 (e.g. relative 91%, absolute 0%)
+ cycleCount string
+ fullChargeCap string
+ designCap string
+}
+
+func (m *MegaCli) collectBBU(mx map[string]int64) error {
+ bs, err := m.exec.bbuInfo()
+ if err != nil {
+ return err
+ }
+
+ bbus, err := parseBBUInfo(bs)
+ if err != nil {
+ return err
+ }
+
+ if len(bbus) == 0 {
+ m.Debugf("no BBUs found")
+ return nil
+ }
+
+ for _, bbu := range bbus {
+ if !m.bbu[bbu.adapterNumber] {
+ m.bbu[bbu.adapterNumber] = true
+ m.addBBUCharts(bbu)
+ }
+
+ px := fmt.Sprintf("bbu_adapter_%s_", bbu.adapterNumber)
+
+ writeInt(mx, px+"temperature", bbu.temperature)
+ writeInt(mx, px+"relative_state_of_charge", bbu.rsoc)
+ writeInt(mx, px+"absolute_state_of_charge", bbu.asoc)
+ writeInt(mx, px+"cycle_count", bbu.cycleCount)
+ if v, ok := calcCapDegradationPerc(bbu); ok {
+ mx[px+"capacity_degradation_perc"] = v
+ }
+ }
+
+ m.Debugf("found %d BBUs", len(m.bbu))
+
+ return nil
+}
+
+func parseBBUInfo(bs []byte) (map[string]*megaBBU, error) {
+ bbus := make(map[string]*megaBBU)
+
+ var section string
+ var bbu *megaBBU
+
+ sc := bufio.NewScanner(bytes.NewReader(bs))
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+
+ switch {
+ case strings.HasPrefix(line, "BBU status for Adapter"):
+ section = "status"
+ ad := getColonSepValue(line)
+ if _, ok := bbus[ad]; !ok {
+ bbu = &megaBBU{adapterNumber: ad}
+ bbus[ad] = bbu
+ }
+ continue
+ case strings.HasPrefix(line, "BBU Capacity Info for Adapter"):
+ section = "capacity"
+ continue
+ case strings.HasPrefix(line, "BBU Design Info for Adapter"):
+ section = "design"
+ continue
+ case strings.HasPrefix(line, "BBU Firmware Status"),
+ strings.HasPrefix(line, "BBU GasGauge Status"),
+ strings.HasPrefix(line, "BBU Properties for Adapter"):
+ section = ""
+ continue
+ }
+
+ if bbu == nil {
+ continue
+ }
+
+ switch section {
+ case "status":
+ switch {
+ case strings.HasPrefix(line, "BatteryType:"):
+ bbu.batteryType = getColonSepValue(line)
+ case strings.HasPrefix(line, "Temperature:"):
+ bbu.temperature = getColonSepNumValue(line)
+ }
+ case "capacity":
+ switch {
+ case strings.HasPrefix(line, "Relative State of Charge:"):
+ bbu.rsoc = getColonSepNumValue(line)
+ case strings.HasPrefix(line, "Absolute State of charge:"):
+ bbu.asoc = getColonSepNumValue(line)
+ case strings.HasPrefix(line, "Full Charge Capacity:"):
+ bbu.fullChargeCap = getColonSepNumValue(line)
+ case strings.HasPrefix(line, "Cycle Count:"):
+ bbu.cycleCount = getColonSepNumValue(line)
+ }
+ case "design":
+ if strings.HasPrefix(line, "Design Capacity:") {
+ bbu.designCap = getColonSepNumValue(line)
+ }
+ }
+ }
+
+ return bbus, nil
+}
+
+func calcCapDegradationPerc(bbu *megaBBU) (int64, bool) {
+ full, err := strconv.ParseInt(bbu.fullChargeCap, 10, 64)
+ if err != nil || full == 0 {
+ return 0, false
+ }
+ design, err := strconv.ParseInt(bbu.designCap, 10, 64)
+ if err != nil || design == 0 {
+ return 0, false
+ }
+
+ v := 100 - float64(full)/float64(design)*100
+
+ return int64(v), true
+}
diff --git a/src/go/plugin/go.d/modules/megacli/collect_phys_drives.go b/src/go/plugin/go.d/modules/megacli/collect_phys_drives.go
new file mode 100644
index 000000000..71d4546e3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/collect_phys_drives.go
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package megacli
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+)
+
+type (
+ megaAdapter struct {
+ number string
+ name string
+ state string
+ physDrives map[string]*megaPhysDrive
+ }
+ megaPhysDrive struct {
+ adapterNumber string
+ number string
+ wwn string
+ slotNumber string
+ drivePosition string
+ pdType string
+ mediaErrorCount string
+ predictiveFailureCount string
+ }
+)
+
+var adapterStates = []string{
+ "optimal",
+ "degraded",
+ "partially_degraded",
+ "failed",
+}
+
+func (m *MegaCli) collectPhysDrives(mx map[string]int64) error {
+ bs, err := m.exec.physDrivesInfo()
+ if err != nil {
+ return err
+ }
+
+ adapters, err := parsePhysDrivesInfo(bs)
+ if err != nil {
+ return err
+ }
+ if len(adapters) == 0 {
+ return errors.New("no adapters found")
+ }
+
+ var drives int
+
+ for _, ad := range adapters {
+ if !m.adapters[ad.number] {
+ m.adapters[ad.number] = true
+ m.addAdapterCharts(ad)
+ }
+
+ px := fmt.Sprintf("adapter_%s_health_state_", ad.number)
+ for _, st := range adapterStates {
+ mx[px+st] = 0
+ }
+ st := strings.ReplaceAll(strings.ToLower(ad.state), " ", "_")
+ mx[px+st] = 1
+
+ for _, pd := range ad.physDrives {
+ if !m.adapters[pd.wwn] {
+ m.adapters[pd.wwn] = true
+ m.addPhysDriveCharts(pd)
+ }
+ drives++
+
+ px := fmt.Sprintf("phys_drive_%s_", pd.wwn)
+
+ writeInt(mx, px+"media_error_count", pd.mediaErrorCount)
+ writeInt(mx, px+"predictive_failure_count", pd.predictiveFailureCount)
+ }
+ }
+
+ m.Debugf("found %d adapters, %d physical drives", len(m.adapters), drives)
+
+ return nil
+}
+
+func parsePhysDrivesInfo(bs []byte) (map[string]*megaAdapter, error) {
+ adapters := make(map[string]*megaAdapter)
+
+ var ad *megaAdapter
+ var pd *megaPhysDrive
+
+ sc := bufio.NewScanner(bytes.NewReader(bs))
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+
+ switch {
+ case strings.HasPrefix(line, "Adapter #"):
+ idx := strings.TrimPrefix(line, "Adapter #")
+ ad = &megaAdapter{number: idx, physDrives: make(map[string]*megaPhysDrive)}
+ adapters[idx] = ad
+ case strings.HasPrefix(line, "Name") && ad != nil:
+ ad.name = getColonSepValue(line)
+ case strings.HasPrefix(line, "State") && ad != nil:
+ ad.state = getColonSepValue(line)
+ case strings.HasPrefix(line, "PD:") && ad != nil:
+ if parts := strings.Fields(line); len(parts) == 3 {
+ idx := parts[1]
+ pd = &megaPhysDrive{number: idx, adapterNumber: ad.number}
+ ad.physDrives[idx] = pd
+ }
+ case strings.HasPrefix(line, "Slot Number:") && pd != nil:
+ pd.slotNumber = getColonSepValue(line)
+ case strings.HasPrefix(line, "Drive's position:") && pd != nil:
+ pd.drivePosition = getColonSepValue(line)
+ case strings.HasPrefix(line, "WWN:") && pd != nil:
+ pd.wwn = getColonSepValue(line)
+ case strings.HasPrefix(line, "PD Type:") && pd != nil:
+ pd.pdType = getColonSepValue(line)
+ case strings.HasPrefix(line, "Media Error Count:") && pd != nil:
+ pd.mediaErrorCount = getColonSepNumValue(line)
+ case strings.HasPrefix(line, "Predictive Failure Count:") && pd != nil:
+ pd.predictiveFailureCount = getColonSepNumValue(line)
+ }
+ }
+
+ return adapters, nil
+}
diff --git a/src/go/plugin/go.d/modules/megacli/config_schema.json b/src/go/plugin/go.d/modules/megacli/config_schema.json
new file mode 100644
index 000000000..6eb36519d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MegaCli collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/megacli/exec.go b/src/go/plugin/go.d/modules/megacli/exec.go
new file mode 100644
index 000000000..846952b25
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/exec.go
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package megacli
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newMegaCliExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *megaCliExec {
+ return &megaCliExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ }
+}
+
+type megaCliExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (e *megaCliExec) physDrivesInfo() ([]byte, error) {
+ return e.execute("megacli-disk-info")
+}
+
+func (e *megaCliExec) bbuInfo() ([]byte, error) {
+ return e.execute("megacli-battery-info")
+}
+
+func (e *megaCliExec) execute(args ...string) ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.ndsudoPath, args...)
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/megacli/init.go b/src/go/plugin/go.d/modules/megacli/init.go
new file mode 100644
index 000000000..78b7bf482
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/init.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package megacli
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (m *MegaCli) initMegaCliExec() (megaCli, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+ }
+
+ megaExec := newMegaCliExec(ndsudoPath, m.Timeout.Duration(), m.Logger)
+
+ return megaExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/megacli/integrations/megacli_megaraid.md b/src/go/plugin/go.d/modules/megacli/integrations/megacli_megaraid.md
new file mode 100644
index 000000000..d1efa7df1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/integrations/megacli_megaraid.md
@@ -0,0 +1,250 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/megacli/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/megacli/metadata.yaml"
+sidebar_label: "MegaCLI MegaRAID"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MegaCLI MegaRAID
+
+
+<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: megacli
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitors the health of MegaCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.
+It relies on the `megacli` CLI tool but avoids directly executing the binary.
+Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+Executed commands:
+- `megacli -LDPDInfo -aAll -NoLog`
+- `megacli -AdpBbuCmd -aAll -NoLog`
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per adapter
+
+These metrics refer to the MegaCLI Adapter.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| adapter_number | Adapter number |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| megacli.adapter_health_state | optimal, degraded, partially_degraded, failed | state |
+
+### Per physical drive
+
+These metrics refer to the MegaCLI Physical Drive.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| adapter_number | Adapter number |
+| wwn | World Wide Name |
+| slot_number | Slot number |
+| drive_position | Position (e.g. DiskGroup: 0, Span: 0, Arm: 2) |
+| drive_type | Type (e.g. SATA) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| megacli.phys_drive_media_errors_rate | media_errors | errors/s |
+| megacli.phys_drive_predictive_failures_rate | predictive_failures | failures/s |
+
+### Per backup battery unit
+
+These metrics refer to the MegaCLI Backup Battery Unit.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| adapter_number | Adapter number |
+| battery_type | Battery type (e.g. BBU) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| megacli.bbu_charge | charge | percentage |
+| megacli.bbu_recharge_cycles | recharge | cycles |
+| megacli.bbu_capacity_degradation | cap_degradation | percent |
+| megacli.bbu_temperature | temperature | Celsius |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ megacli_adapter_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.adapter_health_state | MegaCLI adapter ${label:adapter_number} is in the degraded state |
+| [ megacli_phys_drive_media_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_media_errors | MegaCLI physical drive adapter ${label:adapter_number} slot ${label:slot_number} media errors |
+| [ megacli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.phys_drive_predictive_failures | MegaCLI physical drive (adapter ${label:adapter_number} slot ${label:slot_number}) predictive failures |
+| [ megacli_bbu_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_charge | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |
+| [ megacli_bbu_recharge_cycles ](https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf) | megacli.bbu_recharge_cycles | MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/megacli.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/megacli.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| timeout | megacli binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: megacli
+ update_every: 5 # Collect MegaCli Hardware RAID statistics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `megacli` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m megacli
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `megacli` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep megacli
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep megacli /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep megacli
+```
+
+
diff --git a/src/go/plugin/go.d/modules/megacli/megacli.go b/src/go/plugin/go.d/modules/megacli/megacli.go
new file mode 100644
index 000000000..41abd7a12
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/megacli.go
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package megacli
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("megacli", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *MegaCli {
+ return &MegaCli{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ adapters: make(map[string]bool),
+ drives: make(map[string]bool),
+ bbu: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ MegaCli struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec megaCli
+
+ adapters map[string]bool
+ drives map[string]bool
+ bbu map[string]bool
+ }
+ megaCli interface {
+ physDrivesInfo() ([]byte, error)
+ bbuInfo() ([]byte, error)
+ }
+)
+
+func (m *MegaCli) Configuration() any {
+ return m.Config
+}
+
+func (m *MegaCli) Init() error {
+ lvmExec, err := m.initMegaCliExec()
+ if err != nil {
+ m.Errorf("megacli exec initialization: %v", err)
+ return err
+ }
+ m.exec = lvmExec
+
+ return nil
+}
+
+func (m *MegaCli) Check() error {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (m *MegaCli) Charts() *module.Charts {
+ return m.charts
+}
+
+func (m *MegaCli) Collect() map[string]int64 {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (m *MegaCli) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/megacli/megacli_test.go b/src/go/plugin/go.d/modules/megacli/megacli_test.go
new file mode 100644
index 000000000..4991a28ce
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/megacli_test.go
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package megacli
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataBBUInfoOld, _ = os.ReadFile("testdata/mega-bbu-info-old.txt")
+ dataBBUInfoRecent, _ = os.ReadFile("testdata/mega-bbu-info-recent.txt")
+ dataPhysDrivesInfo, _ = os.ReadFile("testdata/mega-phys-drives-info.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataBBUInfoOld": dataBBUInfoOld,
+ "dataBBUInfoRecent": dataBBUInfoRecent,
+ "dataPhysDrivesInfo": dataPhysDrivesInfo,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestMegaCli_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &MegaCli{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestMegaCli_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if 'ndsudo' not found": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mega := New()
+
+ if test.wantFail {
+ assert.Error(t, mega.Init())
+ } else {
+ assert.NoError(t, mega.Init())
+ }
+ })
+ }
+}
+
+func TestMegaCli_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *MegaCli
+ }{
+ "not initialized exec": {
+ prepare: func() *MegaCli {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *MegaCli {
+ mega := New()
+ mega.exec = prepareMockOK()
+ _ = mega.Check()
+ return mega
+ },
+ },
+ "after collect": {
+ prepare: func() *MegaCli {
+ mega := New()
+ mega.exec = prepareMockOK()
+ _ = mega.Collect()
+ return mega
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mega := test.prepare()
+
+ assert.NotPanics(t, mega.Cleanup)
+ })
+ }
+}
+
+func TestMegaCli_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestMegaCli_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockMegaCliExec
+ wantFail bool
+ }{
+ "success case": {
+ wantFail: false,
+ prepareMock: prepareMockOK,
+ },
+ "success case old bbu": {
+ wantFail: false,
+ prepareMock: prepareMockOldBbuOK,
+ },
+ "err on exec": {
+ wantFail: true,
+ prepareMock: prepareMockErr,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mega := New()
+ mock := test.prepareMock()
+ mega.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, mega.Check())
+ } else {
+ assert.NoError(t, mega.Check())
+ }
+ })
+ }
+}
+
+func TestMegaCli_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockMegaCliExec
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantCharts: len(adapterChartsTmpl)*1 + len(physDriveChartsTmpl)*8 + len(bbuChartsTmpl)*1,
+ wantMetrics: map[string]int64{
+ "adapter_0_health_state_degraded": 0,
+ "adapter_0_health_state_failed": 0,
+ "adapter_0_health_state_optimal": 1,
+ "adapter_0_health_state_partially_degraded": 0,
+ "bbu_adapter_0_absolute_state_of_charge": 63,
+ "bbu_adapter_0_capacity_degradation_perc": 10,
+ "bbu_adapter_0_cycle_count": 4,
+ "bbu_adapter_0_relative_state_of_charge": 71,
+ "bbu_adapter_0_temperature": 33,
+ "phys_drive_5002538c00019b96_media_error_count": 0,
+ "phys_drive_5002538c00019b96_predictive_failure_count": 0,
+ "phys_drive_5002538c4002da83_media_error_count": 0,
+ "phys_drive_5002538c4002da83_predictive_failure_count": 0,
+ "phys_drive_5002538c4002dade_media_error_count": 0,
+ "phys_drive_5002538c4002dade_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e6e9_media_error_count": 0,
+ "phys_drive_5002538c4002e6e9_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e707_media_error_count": 0,
+ "phys_drive_5002538c4002e707_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e70f_media_error_count": 0,
+ "phys_drive_5002538c4002e70f_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e712_media_error_count": 0,
+ "phys_drive_5002538c4002e712_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e713_media_error_count": 0,
+ "phys_drive_5002538c4002e713_predictive_failure_count": 0,
+ },
+ },
+ "success case old bbu": {
+ prepareMock: prepareMockOldBbuOK,
+ wantCharts: len(adapterChartsTmpl)*1 + len(physDriveChartsTmpl)*8 + len(bbuChartsTmpl)*1,
+ wantMetrics: map[string]int64{
+ "adapter_0_health_state_degraded": 0,
+ "adapter_0_health_state_failed": 0,
+ "adapter_0_health_state_optimal": 1,
+ "adapter_0_health_state_partially_degraded": 0,
+ "bbu_adapter_0_absolute_state_of_charge": 83,
+ "bbu_adapter_0_capacity_degradation_perc": 17,
+ "bbu_adapter_0_cycle_count": 61,
+ "bbu_adapter_0_relative_state_of_charge": 100,
+ "bbu_adapter_0_temperature": 31,
+ "phys_drive_5002538c00019b96_media_error_count": 0,
+ "phys_drive_5002538c00019b96_predictive_failure_count": 0,
+ "phys_drive_5002538c4002da83_media_error_count": 0,
+ "phys_drive_5002538c4002da83_predictive_failure_count": 0,
+ "phys_drive_5002538c4002dade_media_error_count": 0,
+ "phys_drive_5002538c4002dade_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e6e9_media_error_count": 0,
+ "phys_drive_5002538c4002e6e9_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e707_media_error_count": 0,
+ "phys_drive_5002538c4002e707_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e70f_media_error_count": 0,
+ "phys_drive_5002538c4002e70f_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e712_media_error_count": 0,
+ "phys_drive_5002538c4002e712_predictive_failure_count": 0,
+ "phys_drive_5002538c4002e713_media_error_count": 0,
+ "phys_drive_5002538c4002e713_predictive_failure_count": 0,
+ },
+ },
+ "err on exec": {
+ prepareMock: prepareMockErr,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mega := New()
+ mock := test.prepareMock()
+ mega.exec = mock
+
+ mx := mega.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ assert.Len(t, *mega.Charts(), test.wantCharts)
+ if len(test.wantMetrics) > 0 {
+ module.TestMetricsHasAllChartsDims(t, mega.Charts(), mx)
+ }
+ })
+ }
+}
+
+func prepareMockOK() *mockMegaCliExec {
+ return &mockMegaCliExec{
+ physDrivesInfoData: dataPhysDrivesInfo,
+ bbuInfoData: dataBBUInfoRecent,
+ }
+}
+
+func prepareMockOldBbuOK() *mockMegaCliExec {
+ return &mockMegaCliExec{
+ physDrivesInfoData: dataPhysDrivesInfo,
+ bbuInfoData: dataBBUInfoOld,
+ }
+}
+
+func prepareMockErr() *mockMegaCliExec {
+ return &mockMegaCliExec{
+ errOnInfo: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockMegaCliExec {
+ resp := []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`)
+ return &mockMegaCliExec{
+ physDrivesInfoData: resp,
+ bbuInfoData: resp,
+ }
+}
+
+func prepareMockEmptyResponse() *mockMegaCliExec {
+ return &mockMegaCliExec{}
+}
+
+type mockMegaCliExec struct {
+ errOnInfo bool
+ physDrivesInfoData []byte
+ bbuInfoData []byte
+}
+
+func (m *mockMegaCliExec) physDrivesInfo() ([]byte, error) {
+ if m.errOnInfo {
+ return nil, errors.New("mock.physDrivesInfo() error")
+ }
+ return m.physDrivesInfoData, nil
+}
+
+func (m *mockMegaCliExec) bbuInfo() ([]byte, error) {
+ if m.errOnInfo {
+ return nil, errors.New("mock.bbuInfo() error")
+ }
+ return m.bbuInfoData, nil
+}
diff --git a/src/go/plugin/go.d/modules/megacli/metadata.yaml b/src/go/plugin/go.d/modules/megacli/metadata.yaml
new file mode 100644
index 000000000..da5f4fefa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/metadata.yaml
@@ -0,0 +1,183 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-megacli
+ plugin_name: go.d.plugin
+ module_name: megacli
+ monitored_instance:
+ name: MegaCLI MegaRAID
+ link: "https://wikitech.wikimedia.org/wiki/MegaCli"
+ icon_filename: "hard-drive.svg"
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - storage
+ - raid-controller
+ - manage-disks
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Monitors the health of MegaCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.
+ It relies on the `megacli` CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+ Executed commands:
+ - `megacli -LDPDInfo -aAll -NoLog`
+ - `megacli -AdpBbuCmd -aAll -NoLog`
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/megacli.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: megacli binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: megacli
+ update_every: 5 # Collect MegaCli Hardware RAID statistics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: megacli_adapter_health_state
+ metric: megacli.adapter_health_state
+ info: MegaCLI adapter ${label:adapter_number} is in the degraded state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf
+ - name: megacli_phys_drive_media_errors
+ metric: megacli.phys_drive_media_errors
+ info: MegaCLI physical drive adapter ${label:adapter_number} slot ${label:slot_number} media errors
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf
+ - name: megacli_phys_drive_predictive_failures
+ metric: megacli.phys_drive_predictive_failures
+ info: MegaCLI physical drive (adapter ${label:adapter_number} slot ${label:slot_number}) predictive failures
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf
+ - name: megacli_bbu_charge
+ metric: megacli.bbu_charge
+ info: MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf
+ - name: megacli_bbu_recharge_cycles
+ metric: megacli.bbu_recharge_cycles
+ info: MegaCLI Backup Battery Unit (adapter ${label:adapter_number}) average charge over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/megacli.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: adapter
+ description: These metrics refer to the MegaCLI Adapter.
+ labels:
+ - name: adapter_number
+ description: Adapter number
+ metrics:
+ - name: megacli.adapter_health_state
+ description: Adapter health state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: optimal
+ - name: degraded
+ - name: partially_degraded
+ - name: failed
+ - name: physical drive
+ description: These metrics refer to the MegaCLI Physical Drive.
+ labels:
+ - name: adapter_number
+ description: Adapter number
+ - name: wwn
+ description: World Wide Name
+ - name: slot_number
+ description: Slot number
+ - name: drive_position
+ description: "Position (e.g. DiskGroup: 0, Span: 0, Arm: 2)"
+ - name: drive_type
+ description: Type (e.g. SATA)
+ metrics:
+ - name: megacli.phys_drive_media_errors_rate
+ description: Physical Drive media errors rate
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: media_errors
+ - name: megacli.phys_drive_predictive_failures_rate
+ description: Physical Drive predictive failures rate
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: predictive_failures
+ - name: backup battery unit
+ description: These metrics refer to the MegaCLI Backup Battery Unit.
+ labels:
+ - name: adapter_number
+ description: Adapter number
+ - name: battery_type
+ description: Battery type (e.g. BBU)
+ metrics:
+ - name: megacli.bbu_charge
+ description: BBU relative charge
+ unit: percentage
+ chart_type: area
+ dimensions:
+ - name: charge
+ - name: megacli.bbu_recharge_cycles
+ description: BBU relative charge
+ unit: cycles
+ chart_type: line
+ dimensions:
+ - name: recharge
+ - name: megacli.bbu_capacity_degradation
+ description: BBU capacity degradation
+ unit: percent
+ chart_type: area
+ dimensions:
+ - name: cap_degradation
+ - name: megacli.bbu_temperature
+ description: BBU bbu_temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
diff --git a/src/go/plugin/go.d/modules/megacli/testdata/config.json b/src/go/plugin/go.d/modules/megacli/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/megacli/testdata/config.yaml b/src/go/plugin/go.d/modules/megacli/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/megacli/testdata/mega-bbu-info-old.txt b/src/go/plugin/go.d/modules/megacli/testdata/mega-bbu-info-old.txt
new file mode 100644
index 000000000..054ce54df
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/testdata/mega-bbu-info-old.txt
@@ -0,0 +1,84 @@
+BBU status for Adapter: 0
+
+BatteryType: BBU
+Voltage: 4073 mV
+Current: 0 mA
+Temperature: 31 C
+Battery State: Optimal
+BBU Firmware Status:
+
+ Charging Status : None
+ Voltage : OK
+ Temperature : OK
+ Learn Cycle Requested : No
+ Learn Cycle Active : No
+ Learn Cycle Status : OK
+ Learn Cycle Timeout : No
+ I2c Errors Detected : No
+ Battery Pack Missing : No
+ Battery Replacement required : No
+ Remaining Capacity Low : No
+ Periodic Learn Required : No
+ Transparent Learn : No
+ No space to cache offload : No
+ Pack is about to fail & should be replaced : No
+ Cache Offload premium feature required : No
+ Module microcode update required : No
+
+
+GasGuageStatus:
+ Fully Discharged : No
+ Fully Charged : Yes
+ Discharging : Yes
+ Initialized : Yes
+ Remaining Time Alarm : No
+ Discharge Terminated : No
+ Over Temperature : No
+ Charging Terminated : Yes
+ Over Charged : No
+Relative State of Charge: 100 %
+Charger Status: Complete
+Remaining Capacity: 1477 mAh
+Full Charge Capacity: 1477 mAh
+isSOHGood: Yes
+ Battery backup charge time : 0 hours
+
+BBU Capacity Info for Adapter: 0
+
+ Relative State of Charge: 100 %
+ Absolute State of charge: 83 %
+ Remaining Capacity: 1477 mAh
+ Full Charge Capacity: 1477 mAh
+ Run time to empty: Battery is not being charged.
+ Average time to empty: Battery is not being charged.
+ Estimated Time to full recharge: Battery is not being charged.
+ Cycle Count: 61
+Max Error = 2 %
+Remaining Capacity Alarm = 180 mAh
+Remining Time Alarm = 10 Min
+
+BBU Design Info for Adapter: 0
+
+ Date of Manufacture: 07/08, 2010
+ Design Capacity: 1800 mAh
+ Design Voltage: 3700 mV
+ Specification Info: 49
+ Serial Number: 4069
+ Pack Stat Configuration: 0x0014
+ Manufacture Name: SMP-PA1.9
+ Firmware Version : �
+ Device Name: DLFR463
+ Device Chemistry: LION
+ Battery FRU: N/A
+Module Version = �
+ Transparent Learn = 0
+ App Data = 0
+
+BBU Properties for Adapter: 0
+
+ Auto Learn Period: 90 Days
+ Next Learn time: Fri Jan 28 13:07:56 2022
+ Learn Delay Interval:0 Hours
+ Auto-Learn Mode: Enabled
+
+Exit Code: 0x00
diff --git a/src/go/plugin/go.d/modules/megacli/testdata/mega-bbu-info-recent.txt b/src/go/plugin/go.d/modules/megacli/testdata/mega-bbu-info-recent.txt
new file mode 100644
index 000000000..948be372b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/testdata/mega-bbu-info-recent.txt
@@ -0,0 +1,74 @@
+BBU status for Adapter: 0
+
+BatteryType: iBBU08
+Voltage: 3922 mV
+Current: 0 mA
+Temperature: 33 C
+Battery State: Optimal
+Design Mode : 48+ Hrs retention with a non-transparent learn cycle and balanced service life.
+
+BBU Firmware Status:
+
+ Charging Status : None
+ Voltage : OK
+ Temperature : OK
+ Learn Cycle Requested : No
+ Learn Cycle Active : No
+ Learn Cycle Status : OK
+ Learn Cycle Timeout : No
+ I2c Errors Detected : No
+ Battery Pack Missing : No
+ Battery Replacement required : No
+ Remaining Capacity Low : No
+ Periodic Learn Required : No
+ Transparent Learn : No
+ No space to cache offload : No
+ Pack is about to fail & should be replaced : No
+ Cache Offload premium feature required : No
+ Module microcode update required : No
+
+BBU GasGauge Status: 0x0100
+ Relative State of Charge: 71 %
+ Charger System State: 1
+ Charger System Ctrl: 0
+ Charging current: 0 mA
+ Absolute state of charge: 63 %
+ Max Error: 0 %
+ Battery backup charge time : 48 hours +
+
+BBU Capacity Info for Adapter: 0
+
+ Relative State of Charge: 71 %
+ Absolute State of charge: 63 %
+ Remaining Capacity: 969 mAh
+ Full Charge Capacity: 1365 mAh
+ Run time to empty: Battery is not being charged.
+ Average time to empty: 1 Hour, 56 Min.
+ Estimated Time to full recharge: Battery is not being charged.
+ Cycle Count: 4
+
+BBU Design Info for Adapter: 0
+
+ Date of Manufacture: 03/18, 2011
+ Design Capacity: 1530 mAh
+ Design Voltage: 4100 mV
+ Specification Info: 0
+ Serial Number: 5164
+ Pack Stat Configuration: 0x0000
+ Manufacture Name: LS36681
+ Firmware Version :
+ Device Name: bq27541
+ Device Chemistry: LPMR
+ Battery FRU: N/A
+ Transparent Learn = 0
+ App Data = 0
+
+BBU Properties for Adapter: 0
+
+ Auto Learn Period: 28 Days
+ Next Learn time: Thu Dec 21 18:32:56 2023
+ Learn Delay Interval:0 Hours
+ Auto-Learn Mode: Enabled
+ BBU Mode = 4
+
+Exit Code: 0x00
diff --git a/src/go/plugin/go.d/modules/megacli/testdata/mega-phys-drives-info.txt b/src/go/plugin/go.d/modules/megacli/testdata/mega-phys-drives-info.txt
new file mode 100644
index 000000000..142ddc822
--- /dev/null
+++ b/src/go/plugin/go.d/modules/megacli/testdata/mega-phys-drives-info.txt
@@ -0,0 +1,433 @@
+Adapter #0
+
+Number of Virtual Disks: 1
+Virtual Drive: 0 (Target Id: 0)
+Name :Virtual Disk 0
+RAID Level : Primary-1, Secondary-0, RAID Level Qualifier-0
+Size : 3.491 TB
+Sector Size : 512
+Is VD emulated : No
+Mirror Data : 3.491 TB
+State : Optimal
+Strip Size : 64 KB
+Number Of Drives : 8
+Span Depth : 1
+Default Cache Policy: WriteBack, ReadAhead, Direct, No Write Cache if Bad BBU
+Current Cache Policy: WriteBack, ReadAhead, Direct, No Write Cache if Bad BBU
+Default Access Policy: Read/Write
+Current Access Policy: Read/Write
+Disk Cache Policy : Disk's Default
+Encryption Type : None
+Default Power Savings Policy: Controller Defined
+Current Power Savings Policy: None
+Can spin up in 1 minute: No
+LD has drives that support T10 power conditions: No
+LD's IO profile supports MAX power savings with cached writes: No
+Bad Blocks Exist: No
+PI type: No PI
+
+Is VD Cached: No
+Number of Spans: 1
+Span: 0 - Number of PDs: 8
+
+PD: 0 Information
+Enclosure Device ID: 32
+Slot Number: 0
+Drive's position: DiskGroup: 0, Span: 0, Arm: 0
+Enclosure position: 1
+Device Id: 0
+WWN: 5002538c4002e713
+Sequence Number: 2
+Media Error Count: 0
+Other Error Count: 0
+Predictive Failure Count: 0
+Last Predictive Failure Event Seq Number: 0
+PD Type: SATA
+
+Raw Size: 894.252 GB [0x6fc81ab0 Sectors]
+Non Coerced Size: 893.752 GB [0x6fb81ab0 Sectors]
+Coerced Size: 893.75 GB [0x6fb80000 Sectors]
+Sector Size: 512
+Logical Sector Size: 512
+Physical Sector Size: 512
+Firmware state: Online, Spun Up
+Device Firmware Level: 003Q
+Shield Counter: 0
+Successful diagnostics completion on : N/A
+SAS Address(0): 0x4433221104000000
+Connected Port Number: 4(path0)
+Inquiry Data: S1YHNXAG804005 SAMSUNG MZ7LM960HCHP-00003 GXT3003Q
+FDE Capable: Not Capable
+FDE Enable: Disable
+Secured: Unsecured
+Locked: Unlocked
+Needs EKM Attention: No
+Foreign State: None
+Device Speed: 6.0Gb/s
+Link Speed: 6.0Gb/s
+Media Type: Solid State Device
+Drive: Not Certified
+Drive Temperature :33C (91.40 F)
+PI Eligibility: No
+Drive is formatted for PI information: No
+PI: No PI
+Drive's NCQ setting : N/A
+Port-0 :
+Port status: Active
+Port's Linkspeed: 6.0Gb/s
+Drive has flagged a S.M.A.R.T alert : No
+
+
+
+
+PD: 1 Information
+Enclosure Device ID: 32
+Slot Number: 2
+Drive's position: DiskGroup: 0, Span: 0, Arm: 1
+Enclosure position: 1
+Device Id: 2
+WWN: 5002538c00019b96
+Sequence Number: 2
+Media Error Count: 0
+Other Error Count: 0
+Predictive Failure Count: 0
+Last Predictive Failure Event Seq Number: 0
+PD Type: SATA
+
+Raw Size: 894.252 GB [0x6fc81ab0 Sectors]
+Non Coerced Size: 893.752 GB [0x6fb81ab0 Sectors]
+Coerced Size: 893.75 GB [0x6fb80000 Sectors]
+Sector Size: 512
+Logical Sector Size: 512
+Physical Sector Size: 512
+Firmware state: Online, Spun Up
+Device Firmware Level: 003Q
+Shield Counter: 0
+Successful diagnostics completion on : N/A
+SAS Address(0): 0x4433221106000000
+Connected Port Number: 6(path0)
+Inquiry Data: S1YHNYAG600061 SAMSUNG MZ7LM960HCHP-00003 GXT3003Q
+FDE Capable: Not Capable
+FDE Enable: Disable
+Secured: Unsecured
+Locked: Unlocked
+Needs EKM Attention: No
+Foreign State: None
+Device Speed: 6.0Gb/s
+Link Speed: 6.0Gb/s
+Media Type: Solid State Device
+Drive: Not Certified
+Drive Temperature :33C (91.40 F)
+PI Eligibility: No
+Drive is formatted for PI information: No
+PI: No PI
+Drive's NCQ setting : N/A
+Port-0 :
+Port status: Active
+Port's Linkspeed: 6.0Gb/s
+Drive has flagged a S.M.A.R.T alert : No
+
+
+
+
+PD: 2 Information
+Enclosure Device ID: 32
+Slot Number: 1
+Drive's position: DiskGroup: 0, Span: 0, Arm: 2
+Enclosure position: 1
+Device Id: 1
+WWN: 5002538c4002e707
+Sequence Number: 2
+Media Error Count: 0
+Other Error Count: 0
+Predictive Failure Count: 0
+Last Predictive Failure Event Seq Number: 0
+PD Type: SATA
+
+Raw Size: 894.252 GB [0x6fc81ab0 Sectors]
+Non Coerced Size: 893.752 GB [0x6fb81ab0 Sectors]
+Coerced Size: 893.75 GB [0x6fb80000 Sectors]
+Sector Size: 512
+Logical Sector Size: 512
+Physical Sector Size: 512
+Firmware state: Online, Spun Up
+Device Firmware Level: 003Q
+Shield Counter: 0
+Successful diagnostics completion on : N/A
+SAS Address(0): 0x4433221100000000
+Connected Port Number: 0(path0)
+Inquiry Data: S1YHNXAG803993 SAMSUNG MZ7LM960HCHP-00003 GXT3003Q
+FDE Capable: Not Capable
+FDE Enable: Disable
+Secured: Unsecured
+Locked: Unlocked
+Needs EKM Attention: No
+Foreign State: None
+Device Speed: 6.0Gb/s
+Link Speed: 6.0Gb/s
+Media Type: Solid State Device
+Drive: Not Certified
+Drive Temperature :34C (93.20 F)
+PI Eligibility: No
+Drive is formatted for PI information: No
+PI: No PI
+Drive's NCQ setting : N/A
+Port-0 :
+Port status: Active
+Port's Linkspeed: 6.0Gb/s
+Drive has flagged a S.M.A.R.T alert : No
+
+
+
+
+PD: 3 Information
+Enclosure Device ID: 32
+Slot Number: 3
+Drive's position: DiskGroup: 0, Span: 0, Arm: 3
+Enclosure position: 1
+Device Id: 3
+WWN: 5002538c4002e70f
+Sequence Number: 2
+Media Error Count: 0
+Other Error Count: 0
+Predictive Failure Count: 0
+Last Predictive Failure Event Seq Number: 0
+PD Type: SATA
+
+Raw Size: 894.252 GB [0x6fc81ab0 Sectors]
+Non Coerced Size: 893.752 GB [0x6fb81ab0 Sectors]
+Coerced Size: 893.75 GB [0x6fb80000 Sectors]
+Sector Size: 512
+Logical Sector Size: 512
+Physical Sector Size: 512
+Firmware state: Online, Spun Up
+Device Firmware Level: 003Q
+Shield Counter: 0
+Successful diagnostics completion on : N/A
+SAS Address(0): 0x4433221102000000
+Connected Port Number: 2(path0)
+Inquiry Data: S1YHNXAG804001 SAMSUNG MZ7LM960HCHP-00003 GXT3003Q
+FDE Capable: Not Capable
+FDE Enable: Disable
+Secured: Unsecured
+Locked: Unlocked
+Needs EKM Attention: No
+Foreign State: None
+Device Speed: 6.0Gb/s
+Link Speed: 6.0Gb/s
+Media Type: Solid State Device
+Drive: Not Certified
+Drive Temperature :34C (93.20 F)
+PI Eligibility: No
+Drive is formatted for PI information: No
+PI: No PI
+Drive's NCQ setting : N/A
+Port-0 :
+Port status: Active
+Port's Linkspeed: 6.0Gb/s
+Drive has flagged a S.M.A.R.T alert : No
+
+
+
+
+PD: 4 Information
+Enclosure Device ID: 32
+Slot Number: 5
+Drive's position: DiskGroup: 0, Span: 0, Arm: 4
+Enclosure position: 1
+Device Id: 5
+WWN: 5002538c4002e712
+Sequence Number: 2
+Media Error Count: 0
+Other Error Count: 0
+Predictive Failure Count: 0
+Last Predictive Failure Event Seq Number: 0
+PD Type: SATA
+
+Raw Size: 894.252 GB [0x6fc81ab0 Sectors]
+Non Coerced Size: 893.752 GB [0x6fb81ab0 Sectors]
+Coerced Size: 893.75 GB [0x6fb80000 Sectors]
+Sector Size: 512
+Logical Sector Size: 512
+Physical Sector Size: 512
+Firmware state: Online, Spun Up
+Device Firmware Level: 003Q
+Shield Counter: 0
+Successful diagnostics completion on : N/A
+SAS Address(0): 0x4433221101000000
+Connected Port Number: 1(path0)
+Inquiry Data: S1YHNXAG804004 SAMSUNG MZ7LM960HCHP-00003 GXT3003Q
+FDE Capable: Not Capable
+FDE Enable: Disable
+Secured: Unsecured
+Locked: Unlocked
+Needs EKM Attention: No
+Foreign State: None
+Device Speed: 6.0Gb/s
+Link Speed: 6.0Gb/s
+Media Type: Solid State Device
+Drive: Not Certified
+Drive Temperature :34C (93.20 F)
+PI Eligibility: No
+Drive is formatted for PI information: No
+PI: No PI
+Drive's NCQ setting : N/A
+Port-0 :
+Port status: Active
+Port's Linkspeed: 6.0Gb/s
+Drive has flagged a S.M.A.R.T alert : No
+
+
+
+
+PD: 5 Information
+Enclosure Device ID: 32
+Slot Number: 4
+Drive's position: DiskGroup: 0, Span: 0, Arm: 5
+Enclosure position: 1
+Device Id: 4
+WWN: 5002538c4002e6e9
+Sequence Number: 2
+Media Error Count: 0
+Other Error Count: 0
+Predictive Failure Count: 0
+Last Predictive Failure Event Seq Number: 0
+PD Type: SATA
+
+Raw Size: 894.252 GB [0x6fc81ab0 Sectors]
+Non Coerced Size: 893.752 GB [0x6fb81ab0 Sectors]
+Coerced Size: 893.75 GB [0x6fb80000 Sectors]
+Sector Size: 512
+Logical Sector Size: 512
+Physical Sector Size: 512
+Firmware state: Online, Spun Up
+Device Firmware Level: 003Q
+Shield Counter: 0
+Successful diagnostics completion on : N/A
+SAS Address(0): 0x4433221105000000
+Connected Port Number: 5(path0)
+Inquiry Data: S1YHNXAG803963 SAMSUNG MZ7LM960HCHP-00003 GXT3003Q
+FDE Capable: Not Capable
+FDE Enable: Disable
+Secured: Unsecured
+Locked: Unlocked
+Needs EKM Attention: No
+Foreign State: None
+Device Speed: 6.0Gb/s
+Link Speed: 6.0Gb/s
+Media Type: Solid State Device
+Drive: Not Certified
+Drive Temperature :33C (91.40 F)
+PI Eligibility: No
+Drive is formatted for PI information: No
+PI: No PI
+Drive's NCQ setting : N/A
+Port-0 :
+Port status: Active
+Port's Linkspeed: 6.0Gb/s
+Drive has flagged a S.M.A.R.T alert : No
+
+
+
+
+PD: 6 Information
+Enclosure Device ID: 32
+Slot Number: 6
+Drive's position: DiskGroup: 0, Span: 0, Arm: 6
+Enclosure position: 1
+Device Id: 6
+WWN: 5002538c4002da83
+Sequence Number: 2
+Media Error Count: 0
+Other Error Count: 0
+Predictive Failure Count: 0
+Last Predictive Failure Event Seq Number: 0
+PD Type: SATA
+
+Raw Size: 894.252 GB [0x6fc81ab0 Sectors]
+Non Coerced Size: 893.752 GB [0x6fb81ab0 Sectors]
+Coerced Size: 893.75 GB [0x6fb80000 Sectors]
+Sector Size: 512
+Logical Sector Size: 512
+Physical Sector Size: 512
+Firmware state: Online, Spun Up
+Device Firmware Level: 003Q
+Shield Counter: 0
+Successful diagnostics completion on : N/A
+SAS Address(0): 0x4433221107000000
+Connected Port Number: 7(path0)
+Inquiry Data: S1YHNXAG801029 SAMSUNG MZ7LM960HCHP-00003 GXT3003Q
+FDE Capable: Not Capable
+FDE Enable: Disable
+Secured: Unsecured
+Locked: Unlocked
+Needs EKM Attention: No
+Foreign State: None
+Device Speed: 6.0Gb/s
+Link Speed: 6.0Gb/s
+Media Type: Solid State Device
+Drive: Not Certified
+Drive Temperature :33C (91.40 F)
+PI Eligibility: No
+Drive is formatted for PI information: No
+PI: No PI
+Drive's NCQ setting : N/A
+Port-0 :
+Port status: Active
+Port's Linkspeed: 6.0Gb/s
+Drive has flagged a S.M.A.R.T alert : No
+
+
+
+
+PD: 7 Information
+Enclosure Device ID: 32
+Slot Number: 7
+Drive's position: DiskGroup: 0, Span: 0, Arm: 7
+Enclosure position: 1
+Device Id: 7
+WWN: 5002538c4002dade
+Sequence Number: 2
+Media Error Count: 0
+Other Error Count: 0
+Predictive Failure Count: 0
+Last Predictive Failure Event Seq Number: 0
+PD Type: SATA
+
+Raw Size: 894.252 GB [0x6fc81ab0 Sectors]
+Non Coerced Size: 893.752 GB [0x6fb81ab0 Sectors]
+Coerced Size: 893.75 GB [0x6fb80000 Sectors]
+Sector Size: 512
+Logical Sector Size: 512
+Physical Sector Size: 512
+Firmware state: Online, Spun Up
+Device Firmware Level: 003Q
+Shield Counter: 0
+Successful diagnostics completion on : N/A
+SAS Address(0): 0x4433221103000000
+Connected Port Number: 3(path0)
+Inquiry Data: S1YHNXAG801120 SAMSUNG MZ7LM960HCHP-00003 GXT3003Q
+FDE Capable: Not Capable
+FDE Enable: Disable
+Secured: Unsecured
+Locked: Unlocked
+Needs EKM Attention: No
+Foreign State: None
+Device Speed: 6.0Gb/s
+Link Speed: 6.0Gb/s
+Media Type: Solid State Device
+Drive: Not Certified
+Drive Temperature :34C (93.20 F)
+PI Eligibility: No
+Drive is formatted for PI information: No
+PI: No PI
+Drive's NCQ setting : N/A
+Port-0 :
+Port status: Active
+Port's Linkspeed: 6.0Gb/s
+Drive has flagged a S.M.A.R.T alert : No
+
+
+
+
+Exit Code: 0x00 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/memcached/README.md b/src/go/plugin/go.d/modules/memcached/README.md
new file mode 120000
index 000000000..2cb76d33c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/README.md
@@ -0,0 +1 @@
+integrations/memcached.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/memcached/charts.go b/src/go/plugin/go.d/modules/memcached/charts.go
new file mode 100644
index 000000000..14cb1bf11
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/charts.go
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package memcached
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioCache = module.Priority + iota
+ prioNet
+ prioConnections
+ prioItems
+ prioEvictedReclaimed
+ prioGet
+ prioGetRate
+ prioSetRate
+ prioDelete
+ prioCas
+ prioIncrement
+ prioDecrement
+ prioTouch
+ prioTouchRate
+)
+
+var charts = module.Charts{
+ cacheChart.Copy(),
+ netChart.Copy(),
+ connectionsChart.Copy(),
+ itemsChart.Copy(),
+ EvictedReclaimedChart.Copy(),
+ getChart.Copy(),
+ getRateChart.Copy(),
+ setRateChart.Copy(),
+ deleteChart.Copy(),
+ casChart.Copy(),
+ incrementChart.Copy(),
+ decrementChart.Copy(),
+ touchChart.Copy(),
+ touchRateChart.Copy(),
+}
+
+const (
+ byteToMiB = 1 << 20
+)
+
+var (
+ cacheChart = module.Chart{
+ ID: "cache",
+ Title: "Cache Size",
+ Units: "MiB",
+ Fam: "cache",
+ Ctx: "memcached.cache",
+ Type: module.Stacked,
+ Priority: prioCache,
+ Dims: module.Dims{
+ {ID: "avail", Div: byteToMiB},
+ {ID: "bytes", Name: "used", Div: byteToMiB},
+ },
+ }
+ netChart = module.Chart{
+ ID: "net",
+ Title: "Network",
+ Units: "kilobits/s",
+ Fam: "network",
+ Ctx: "memcached.net",
+ Type: module.Area,
+ Priority: prioNet,
+ Dims: module.Dims{
+ {ID: "bytes_read", Name: "in", Mul: 8, Div: 1000, Algo: module.Incremental},
+ {ID: "bytes_written", Name: "out", Mul: -8, Div: 1000, Algo: module.Incremental},
+ },
+ }
+ connectionsChart = module.Chart{
+ ID: "connections",
+ Title: "Connections",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "memcached.connections",
+ Type: module.Line,
+ Priority: prioConnections,
+ Dims: module.Dims{
+ {ID: "curr_connections", Name: "current", Algo: module.Incremental},
+ {ID: "rejected_connections", Name: "rejected", Algo: module.Incremental},
+ {ID: "total_connections", Name: "total", Algo: module.Incremental},
+ },
+ }
+ itemsChart = module.Chart{
+ ID: "items",
+ Title: "Items",
+ Units: "items",
+ Fam: "items",
+ Ctx: "memcached.items",
+ Type: module.Line,
+ Priority: prioItems,
+ Dims: module.Dims{
+ {ID: "curr_items", Name: "current"},
+ {ID: "total_items", Name: "total"},
+ },
+ }
+ EvictedReclaimedChart = module.Chart{
+ ID: "evicted_reclaimed",
+ Title: "Evicted and Reclaimed Items",
+ Units: "items",
+ Fam: "items",
+ Ctx: "memcached.evicted_reclaimed",
+ Type: module.Line,
+ Priority: prioEvictedReclaimed,
+ Dims: module.Dims{
+ {ID: "reclaimed"},
+ {ID: "evictions", Name: "evicted"},
+ },
+ }
+ getChart = module.Chart{
+ ID: "get",
+ Title: "Get Requests",
+ Units: "requests",
+ Fam: "get ops",
+ Ctx: "memcached.get",
+ Type: module.Stacked,
+ Priority: prioGet,
+ Dims: module.Dims{
+ {ID: "get_hits", Name: "hits", Algo: module.PercentOfAbsolute},
+ {ID: "get_misses", Name: "misses", Algo: module.PercentOfAbsolute},
+ },
+ }
+ getRateChart = module.Chart{
+ ID: "get_rate",
+ Title: "Get Request Rate",
+ Units: "requests/s",
+ Fam: "get ops",
+ Ctx: "memcached.get_rate",
+ Type: module.Line,
+ Priority: prioGetRate,
+ Dims: module.Dims{
+ {ID: "cmd_get", Name: "rate", Algo: module.Incremental},
+ },
+ }
+ setRateChart = module.Chart{
+ ID: "set_rate",
+ Title: "Set Request Rate",
+ Units: "requests/s",
+ Fam: "set ops",
+ Ctx: "memcached.set_rate",
+ Type: module.Line,
+ Priority: prioSetRate,
+ Dims: module.Dims{
+ {ID: "cmd_set", Name: "rate", Algo: module.Incremental},
+ },
+ }
+ deleteChart = module.Chart{
+ ID: "delete",
+ Title: "Delete Requests",
+ Units: "requests",
+ Fam: "delete ops",
+ Ctx: "memcached.delete",
+ Type: module.Stacked,
+ Priority: prioDelete,
+ Dims: module.Dims{
+ {ID: "delete_hits", Name: "hits", Algo: module.PercentOfAbsolute},
+ {ID: "delete_misses", Name: "misses", Algo: module.PercentOfAbsolute},
+ },
+ }
+ casChart = module.Chart{
+ ID: "cas",
+ Title: "Check and Set Requests",
+ Units: "requests",
+ Fam: "check and set ops",
+ Ctx: "memcached.cas",
+ Type: module.Stacked,
+ Priority: prioCas,
+ Dims: module.Dims{
+ {ID: "cas_hits", Name: "hits", Algo: module.PercentOfAbsolute},
+ {ID: "cas_misses", Name: "misses", Algo: module.PercentOfAbsolute},
+ {ID: "cas_badval", Name: "bad value", Algo: module.PercentOfAbsolute},
+ },
+ }
+ incrementChart = module.Chart{
+ ID: "increment",
+ Title: "Increment Requests",
+ Units: "requests",
+ Fam: "increment ops",
+ Ctx: "memcached.increment",
+ Type: module.Stacked,
+ Priority: prioIncrement,
+ Dims: module.Dims{
+ {ID: "incr_hits", Name: "hits", Algo: module.PercentOfAbsolute},
+ {ID: "incr_misses", Name: "misses", Algo: module.PercentOfAbsolute},
+ },
+ }
+ decrementChart = module.Chart{
+ ID: "decrement",
+ Title: "Decrement Requests",
+ Units: "requests",
+ Fam: "decrement ops",
+ Ctx: "memcached.decrement",
+ Type: module.Stacked,
+ Priority: prioDecrement,
+ Dims: module.Dims{
+ {ID: "decr_hits", Name: "hits", Algo: module.PercentOfAbsolute},
+ {ID: "decr_misses", Name: "misses", Algo: module.PercentOfAbsolute},
+ },
+ }
+ touchChart = module.Chart{
+ ID: "touch",
+ Title: "Touch Requests",
+ Units: "requests",
+ Fam: "touch ops",
+ Ctx: "memcached.touch",
+ Type: module.Stacked,
+ Priority: prioTouch,
+ Dims: module.Dims{
+ {ID: "touch_hits", Name: "hits", Algo: module.PercentOfAbsolute},
+ {ID: "touch_misses", Name: "misses", Algo: module.PercentOfAbsolute},
+ },
+ }
+ touchRateChart = module.Chart{
+ ID: "touch_rate",
+ Title: "Touch Requests Rate",
+ Units: "requests/s",
+ Fam: "touch ops",
+ Ctx: "memcached.touch_rate",
+ Type: module.Line,
+ Priority: prioTouchRate,
+ Dims: module.Dims{
+ {ID: "cmd_touch", Name: "rate", Algo: module.Incremental},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/memcached/client.go b/src/go/plugin/go.d/modules/memcached/client.go
new file mode 100644
index 000000000..679e3eb0f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/client.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package memcached
+
+import (
+ "bytes"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+func newMemcachedConn(conf Config) memcachedConn {
+ return &memcachedClient{conn: socket.New(socket.Config{
+ Address: conf.Address,
+ ConnectTimeout: conf.Timeout.Duration(),
+ ReadTimeout: conf.Timeout.Duration(),
+ WriteTimeout: conf.Timeout.Duration(),
+ })}
+}
+
+type memcachedClient struct {
+ conn socket.Client
+}
+
+func (c *memcachedClient) connect() error {
+ return c.conn.Connect()
+}
+
+func (c *memcachedClient) disconnect() {
+ _ = c.conn.Disconnect()
+}
+
+func (c *memcachedClient) queryStats() ([]byte, error) {
+ var b bytes.Buffer
+ err := c.conn.Command("stats\r\n", func(bytes []byte) bool {
+ s := strings.TrimSpace(string(bytes))
+ b.WriteString(s)
+ b.WriteByte('\n')
+ return !(strings.HasPrefix(s, "END") || strings.HasPrefix(s, "ERROR"))
+ })
+ if err != nil {
+ return nil, err
+ }
+ return b.Bytes(), nil
+}
diff --git a/src/go/plugin/go.d/modules/memcached/collect.go b/src/go/plugin/go.d/modules/memcached/collect.go
new file mode 100644
index 000000000..9ead8f47b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/collect.go
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package memcached
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "strconv"
+ "strings"
+)
+
+// https://github.com/memcached/memcached/blob/b1aefcdf8a265f8a5126e8aa107a50988fa1ec35/doc/protocol.txt#L1267
+var statsMetrics = map[string]bool{
+ "limit_maxbytes": true,
+ "bytes": true,
+ "bytes_read": true,
+ "bytes_written": true,
+ "cas_badval": true,
+ "cas_hits": true,
+ "cas_misses": true,
+ "cmd_get": true,
+ "cmd_set": true,
+ "cmd_touch": true,
+ "curr_connections": true,
+ "curr_items": true,
+ "decr_hits": true,
+ "decr_misses": true,
+ "delete_hits": true,
+ "delete_misses": true,
+ "evictions": true,
+ "get_hits": true,
+ "get_misses": true,
+ "incr_hits": true,
+ "incr_misses": true,
+ "reclaimed": true,
+ "rejected_connections": true,
+ "total_connections": true,
+ "total_items": true,
+ "touch_hits": true,
+ "touch_misses": true,
+}
+
+func (m *Memcached) collect() (map[string]int64, error) {
+ if m.conn == nil {
+ conn, err := m.establishConn()
+ if err != nil {
+ return nil, err
+ }
+ m.conn = conn
+ }
+
+ stats, err := m.conn.queryStats()
+ if err != nil {
+ m.conn.disconnect()
+ m.conn = nil
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ if err := m.collectStats(mx, stats); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (m *Memcached) collectStats(mx map[string]int64, stats []byte) error {
+ if len(stats) == 0 {
+ return errors.New("empty stats response")
+ }
+
+ var n int
+ sc := bufio.NewScanner(bytes.NewReader(stats))
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+
+ switch {
+ case strings.HasPrefix(line, "STAT"):
+ key, value := getStatKeyValue(line)
+ if !statsMetrics[key] {
+ continue
+ }
+ if v, err := strconv.ParseInt(value, 10, 64); err == nil {
+ mx[key] = v
+ n++
+ }
+ case strings.HasPrefix(line, "ERROR"):
+ return errors.New("received ERROR response")
+ }
+ }
+
+ if n == 0 {
+ return errors.New("unexpected memcached response")
+ }
+
+ mx["avail"] = mx["limit_maxbytes"] - mx["bytes"]
+
+ return nil
+}
+
+func (m *Memcached) establishConn() (memcachedConn, error) {
+ conn := m.newMemcachedConn(m.Config)
+
+ if err := conn.connect(); err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func getStatKeyValue(line string) (string, string) {
+ line = strings.TrimPrefix(line, "STAT ")
+ i := strings.IndexByte(line, ' ')
+ if i < 0 {
+ return "", ""
+ }
+ return line[:i], line[i+1:]
+}
diff --git a/src/go/plugin/go.d/modules/memcached/config_schema.json b/src/go/plugin/go.d/modules/memcached/config_schema.json
new file mode 100644
index 000000000..f92a8eee9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/config_schema.json
@@ -0,0 +1,44 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Memcached collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the memcached service listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:11211"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/memcached/integrations/memcached.md b/src/go/plugin/go.d/modules/memcached/integrations/memcached.md
new file mode 100644
index 000000000..1e653902f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/integrations/memcached.md
@@ -0,0 +1,231 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/memcached/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/memcached/metadata.yaml"
+sidebar_label: "Memcached"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Memcached
+
+
+<img src="https://netdata.cloud/img/memcached.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: memcached
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.
+
+It reads the server's response to the `stats` command.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Memcached instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| memcached.cache | available, used | MiB |
+| memcached.net | in, out | kilobits/s |
+| memcached.connections | current, rejected, total | connections/s |
+| memcached.items | current, total | items |
+| memcached.evicted_reclaimed | reclaimed, evicted | items |
+| memcached.get | hints, misses | requests |
+| memcached.get_rate | rate | requests/s |
+| memcached.set_rate | rate | requests/s |
+| memcached.delete | hits, misses | requests |
+| memcached.cas | hits, misses, bad value | requests |
+| memcached.increment | hits, misses | requests |
+| memcached.decrement | hits, misses | requests |
+| memcached.touch | hits, misses | requests |
+| memcached.touch_rate | rate | requests/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ memcached_cache_memory_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | cache memory utilization |
+| [ memcached_cache_fill_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | average rate the cache fills up (positive), or frees up (negative) space over the last hour |
+| [ memcached_out_of_cache_space_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf) | memcached.cache | estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/memcached.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/memcached.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | The IP address and port where the memcached service listens for connections. | 127.0.0.1:11211 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:11211
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:11211
+
+ - name: remote
+ address: 203.0.113.0:11211
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `memcached` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m memcached
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `memcached` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep memcached
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep memcached /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep memcached
+```
+
+
diff --git a/src/go/plugin/go.d/modules/memcached/memcached.go b/src/go/plugin/go.d/modules/memcached/memcached.go
new file mode 100644
index 000000000..bd6039aee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/memcached.go
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package memcached
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("memcached", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Memcached {
+ return &Memcached{
+ Config: Config{
+ Address: "127.0.0.1:11211",
+ Timeout: web.Duration(time.Second * 1),
+ },
+ newMemcachedConn: newMemcachedConn,
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+}
+
+type (
+ Memcached struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ newMemcachedConn func(Config) memcachedConn
+ conn memcachedConn
+ }
+ memcachedConn interface {
+ connect() error
+ disconnect()
+ queryStats() ([]byte, error)
+ }
+)
+
+func (m *Memcached) Configuration() any {
+ return m.Config
+}
+
+func (m *Memcached) Init() error {
+ if m.Address == "" {
+ m.Error("config: 'address' not set")
+ return errors.New("address not set")
+ }
+
+ return nil
+}
+
+func (m *Memcached) Check() error {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (m *Memcached) Charts() *module.Charts {
+ return m.charts
+}
+
+func (m *Memcached) Collect() map[string]int64 {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (m *Memcached) Cleanup() {
+ if m.conn != nil {
+ m.conn.disconnect()
+ m.conn = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/memcached/memcached_test.go b/src/go/plugin/go.d/modules/memcached/memcached_test.go
new file mode 100644
index 000000000..33a85d330
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/memcached_test.go
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package memcached
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataMemcachedStats, _ = os.ReadFile("testdata/stats.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataMemcachedStats": dataMemcachedStats,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestMemcached_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Memcached{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestMemcached_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success with default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if address not set": {
+ wantFail: true,
+ config: func() Config {
+ conf := New().Config
+ conf.Address = ""
+ return conf
+ }(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mem := New()
+ mem.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, mem.Init())
+ } else {
+ assert.NoError(t, mem.Init())
+ }
+ })
+ }
+}
+
+func TestMemcached_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Memcached
+ }{
+ "not initialized": {
+ prepare: func() *Memcached {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Memcached {
+ mem := New()
+ mem.newMemcachedConn = func(config Config) memcachedConn { return prepareMockOk() }
+ _ = mem.Check()
+ return mem
+ },
+ },
+ "after collect": {
+ prepare: func() *Memcached {
+ mem := New()
+ mem.newMemcachedConn = func(config Config) memcachedConn { return prepareMockOk() }
+ _ = mem.Collect()
+ return mem
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mem := test.prepare()
+
+ assert.NotPanics(t, mem.Cleanup)
+ })
+ }
+}
+
+func TestMemcached_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestMemcached_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockMemcachedConn
+ wantFail bool
+ }{
+ "success case": {
+ wantFail: false,
+ prepareMock: prepareMockOk,
+ },
+ "err on connect": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnConnect,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mem := New()
+ mock := test.prepareMock()
+ mem.newMemcachedConn = func(config Config) memcachedConn { return mock }
+
+ if test.wantFail {
+ assert.Error(t, mem.Check())
+ } else {
+ assert.NoError(t, mem.Check())
+ }
+ })
+ }
+}
+
+func TestMemcached_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockMemcachedConn
+ wantMetrics map[string]int64
+ disconnectBeforeCleanup bool
+ disconnectAfterCleanup bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOk,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ wantMetrics: map[string]int64{
+ "avail": 67108831,
+ "bytes": 33,
+ "bytes_read": 108662,
+ "bytes_written": 9761348,
+ "cas_badval": 0,
+ "cas_hits": 0,
+ "cas_misses": 0,
+ "cmd_get": 1,
+ "cmd_set": 1,
+ "cmd_touch": 0,
+ "curr_connections": 3,
+ "curr_items": 0,
+ "decr_hits": 0,
+ "decr_misses": 0,
+ "delete_hits": 0,
+ "delete_misses": 0,
+ "evictions": 0,
+ "get_hits": 0,
+ "get_misses": 1,
+ "incr_hits": 0,
+ "incr_misses": 0,
+ "limit_maxbytes": 67108864,
+ "reclaimed": 1,
+ "rejected_connections": 0,
+ "total_connections": 39,
+ "total_items": 1,
+ "touch_hits": 0,
+ "touch_misses": 0,
+ },
+ },
+ "error response": {
+ prepareMock: prepareMockErrorResponse,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: true,
+ },
+ "err on connect": {
+ prepareMock: prepareMockErrOnConnect,
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: false,
+ },
+ "err on query stats": {
+ prepareMock: prepareMockErrOnQueryStats,
+ disconnectBeforeCleanup: true,
+ disconnectAfterCleanup: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mem := New()
+ mock := test.prepareMock()
+ mem.newMemcachedConn = func(config Config) memcachedConn { return mock }
+
+ mx := mem.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ module.TestMetricsHasAllChartsDims(t, mem.Charts(), mx)
+ }
+
+ assert.Equal(t, test.disconnectBeforeCleanup, mock.disconnectCalled, "disconnect before cleanup")
+ mem.Cleanup()
+ assert.Equal(t, test.disconnectAfterCleanup, mock.disconnectCalled, "disconnect after cleanup")
+ })
+ }
+}
+
+func prepareMockOk() *mockMemcachedConn {
+ return &mockMemcachedConn{
+ statsResponse: dataMemcachedStats,
+ }
+}
+
+func prepareMockErrorResponse() *mockMemcachedConn {
+ return &mockMemcachedConn{
+ statsResponse: []byte("ERROR"),
+ }
+}
+
+func prepareMockErrOnConnect() *mockMemcachedConn {
+ return &mockMemcachedConn{
+ errOnConnect: true,
+ }
+}
+
+func prepareMockErrOnQueryStats() *mockMemcachedConn {
+ return &mockMemcachedConn{
+ errOnQueryStats: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockMemcachedConn {
+ return &mockMemcachedConn{
+ statsResponse: []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit."),
+ }
+}
+
+func prepareMockEmptyResponse() *mockMemcachedConn {
+ return &mockMemcachedConn{}
+}
+
+type mockMemcachedConn struct {
+ errOnConnect bool
+ errOnQueryStats bool
+ statsResponse []byte
+ disconnectCalled bool
+}
+
+func (m *mockMemcachedConn) connect() error {
+ if m.errOnConnect {
+ return errors.New("mock.connect() error")
+ }
+ return nil
+}
+
+func (m *mockMemcachedConn) disconnect() {
+ m.disconnectCalled = true
+}
+
+func (m *mockMemcachedConn) queryStats() ([]byte, error) {
+ if m.errOnQueryStats {
+ return nil, errors.New("mock.queryStats() error")
+ }
+ return m.statsResponse, nil
+}
diff --git a/src/go/plugin/go.d/modules/memcached/metadata.yaml b/src/go/plugin/go.d/modules/memcached/metadata.yaml
new file mode 100644
index 000000000..c307ef018
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/metadata.yaml
@@ -0,0 +1,217 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-memcached
+ plugin_name: go.d.plugin
+ module_name: memcached
+ monitored_instance:
+ name: Memcached
+ link: https://memcached.org/
+ categories:
+ - data-collection.database-servers
+ icon_filename: "memcached.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - memcached
+ - memcache
+ - cache
+ - database
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching."
+ method_description: "It reads the server's response to the `stats` command."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ If no configuration is given, collector will attempt to connect to memcached instance on `127.0.0.1:11211` address.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/memcached.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: The IP address and port where the memcached service listens for connections.
+ default_value: 127.0.0.1:11211
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:11211
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:11211
+
+ - name: remote
+ address: 203.0.113.0:11211
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: memcached_cache_memory_usage
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf
+ metric: memcached.cache
+ info: cache memory utilization
+ - name: memcached_cache_fill_rate
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf
+ metric: memcached.cache
+ info: average rate the cache fills up (positive), or frees up (negative) space over the last hour
+ - name: memcached_out_of_cache_space_time
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/memcached.conf
+ metric: memcached.cache
+ info: estimated time the cache will run out of space if the system continues to add data at the same rate as the past hour
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: memcached.cache
+ description: Cache Size
+ unit: "MiB"
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: memcached.net
+ description: Network
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: memcached.connections
+ description: Connections
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: current
+ - name: rejected
+ - name: total
+ - name: memcached.items
+ description: Items
+ unit: "items"
+ chart_type: line
+ dimensions:
+ - name: current
+ - name: total
+ - name: memcached.evicted_reclaimed
+ description: Evicted and Reclaimed Items
+ unit: "items"
+ chart_type: line
+ dimensions:
+ - name: reclaimed
+ - name: evicted
+ - name: memcached.get
+ description: Get Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hints
+ - name: misses
+ - name: memcached.get_rate
+ description: Get Request Rate
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: rate
+ - name: memcached.set_rate
+ description: Set Request Rate
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: rate
+ - name: memcached.delete
+ description: Delete Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: memcached.cas
+ description: Check and Set Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: bad value
+ - name: memcached.increment
+ description: Increment Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: memcached.decrement
+ description: Decrement Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: memcached.touch
+ description: Touch Requests
+ unit: "requests"
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: memcached.touch_rate
+ description: Touch Request Rate
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: rate
diff --git a/src/go/plugin/go.d/modules/memcached/testdata/config.json b/src/go/plugin/go.d/modules/memcached/testdata/config.json
new file mode 100644
index 000000000..e86834720
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/memcached/testdata/config.yaml b/src/go/plugin/go.d/modules/memcached/testdata/config.yaml
new file mode 100644
index 000000000..1b81d09eb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/memcached/testdata/stats.txt b/src/go/plugin/go.d/modules/memcached/testdata/stats.txt
new file mode 100644
index 000000000..b9647cc1d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/memcached/testdata/stats.txt
@@ -0,0 +1,93 @@
+STAT pid 30783
+STAT uptime 5028
+STAT time 1721297802
+STAT version 1.6.24
+STAT libevent 2.1.12-stable
+STAT pointer_size 64
+STAT rusage_user 1.026626
+STAT rusage_system 0.685365
+STAT max_connections 1024
+STAT curr_connections 3
+STAT total_connections 39
+STAT rejected_connections 0
+STAT connection_structures 6
+STAT response_obj_oom 0
+STAT response_obj_count 1
+STAT response_obj_bytes 65536
+STAT read_buf_count 9
+STAT read_buf_bytes 147456
+STAT read_buf_bytes_free 65536
+STAT read_buf_oom 0
+STAT reserved_fds 20
+STAT cmd_get 1
+STAT cmd_set 1
+STAT cmd_flush 0
+STAT cmd_touch 0
+STAT cmd_meta 0
+STAT get_hits 0
+STAT get_misses 1
+STAT get_expired 0
+STAT get_flushed 0
+STAT delete_misses 0
+STAT delete_hits 0
+STAT incr_misses 0
+STAT incr_hits 0
+STAT decr_misses 0
+STAT decr_hits 0
+STAT cas_misses 0
+STAT cas_hits 0
+STAT cas_badval 0
+STAT touch_hits 0
+STAT touch_misses 0
+STAT store_too_large 0
+STAT store_no_memory 0
+STAT auth_cmds 0
+STAT auth_errors 0
+STAT bytes_read 108662
+STAT bytes_written 9761348
+STAT limit_maxbytes 67108864
+STAT accepting_conns 1
+STAT listen_disabled_num 0
+STAT time_in_listen_disabled_us 0
+STAT threads 4
+STAT conn_yields 0
+STAT hash_power_level 16
+STAT hash_bytes 524288
+STAT hash_is_expanding 0
+STAT slab_reassign_rescues 0
+STAT slab_reassign_chunk_rescues 0
+STAT slab_reassign_evictions_nomem 0
+STAT slab_reassign_inline_reclaim 0
+STAT slab_reassign_busy_items 0
+STAT slab_reassign_busy_deletes 0
+STAT slab_reassign_running 0
+STAT slabs_moved 0
+STAT lru_crawler_running 0
+STAT lru_crawler_starts 13
+STAT lru_maintainer_juggles 9280
+STAT malloc_fails 0
+STAT log_worker_dropped 0
+STAT log_worker_written 0
+STAT log_watcher_skipped 0
+STAT log_watcher_sent 0
+STAT log_watchers 0
+STAT unexpected_napi_ids 0
+STAT round_robin_fallback 0
+STAT bytes 33
+STAT curr_items 0
+STAT total_items 1
+STAT slab_global_page_pool 0
+STAT expired_unfetched 1
+STAT evicted_unfetched 0
+STAT evicted_active 0
+STAT evictions 0
+STAT reclaimed 1
+STAT crawler_reclaimed 0
+STAT crawler_items_checked 0
+STAT lrutail_reflocked 0
+STAT moves_to_cold 1
+STAT moves_to_warm 0
+STAT moves_within_lru 0
+STAT direct_reclaims 0
+STAT lru_bumps_dropped 0
+END
diff --git a/src/go/plugin/go.d/modules/mongodb/README.md b/src/go/plugin/go.d/modules/mongodb/README.md
new file mode 120000
index 000000000..a28253054
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/README.md
@@ -0,0 +1 @@
+integrations/mongodb.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mongodb/charts.go b/src/go/plugin/go.d/modules/mongodb/charts.go
new file mode 100644
index 000000000..af9dfcefc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/charts.go
@@ -0,0 +1,1036 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioOperationsRate = module.Priority + iota
+ prioOperationsLatencyTime
+ prioOperationsByTypeRate
+ prioDocumentOperationsRate
+ prioScannedIndexesRate
+ prioScannedDocumentsRate
+
+ prioActiveClientsCount
+ prioQueuedOperationsCount
+
+ prioGlobalLockAcquisitionsRate
+ prioDatabaseLockAcquisitionsRate
+ prioCollectionLockAcquisitionsRate
+ prioMutexLockAcquisitionsRate
+ prioMetadataLockAcquisitionsRate
+ prioOpLogLockAcquisitionsRate
+
+ prioCursorsOpenCount
+ prioCursorsOpenNoTimeoutCount
+ prioCursorsOpenedRate
+ prioTimedOutCursorsRate
+ prioCursorsByLifespanCount
+
+ prioTransactionsCount
+ prioTransactionsRate
+ prioTransactionsNoShardsCommitsRate
+ prioTransactionsNoShardsCommitsDurationTime
+ prioTransactionsSingleShardCommitsRate
+ prioTransactionsSingleShardCommitsDurationTime
+ prioTransactionsSingleWriteShardCommitsRate
+ prioTransactionsSingleWriteShardCommitsDurationTime
+ prioTransactionsReadOnlyCommitsRate
+ prioTransactionsReadOnlyCommitsDurationTime
+ prioTransactionsTwoPhaseCommitCommitsRate
+ prioTransactionsTwoPhaseCommitCommitsDurationTime
+ prioTransactionsRecoverWithTokenCommitsRate
+ prioTransactionsRecoverWithTokenCommitsDurationTime
+
+ prioConnectionsUsage
+ prioConnectionsByStateCount
+ prioConnectionsRate
+
+ prioAssertsRate
+
+ prioNetworkTrafficRate
+ prioNetworkRequestsRate
+ prioNetworkSlowDNSResolutionsRate
+ prioNetworkSlowSSLHandshakesRate
+
+ prioMemoryResidentSize
+ prioMemoryVirtualSize
+ prioMemoryPageFaultsRate
+ prioMemoryTCMallocStats
+
+ prioWiredTigerConcurrentReadTransactionsUsage
+ prioWiredTigerConcurrentWriteTransactionsUsage
+ prioWiredTigerCacheUsage
+ prioWiredTigerCacheDirtySpaceSize
+ prioWiredTigerCacheIORate
+ prioWiredTigerCacheEvictionsRate
+
+ prioDatabaseCollectionsCount
+ prioDatabaseIndexesCount
+ prioDatabaseViewsCount
+ prioDatabaseDocumentsCount
+ prioDatabaseDataSize
+ prioDatabaseStorageSize
+ prioDatabaseIndexSize
+
+ prioReplSetMemberState
+ prioReplSetMemberHealthStatus
+ prioReplSetMemberReplicationLagTime
+ prioReplSetMemberHeartbeatLatencyTime
+ prioReplSetMemberPingRTTTime
+ prioReplSetMemberUptime
+
+ prioShardingNodesCount
+ prioShardingShardedDatabasesCount
+ prioShardingShardedCollectionsCount
+ prioShardChunks
+)
+
+const (
+ chartPxDatabase = "database_"
+ chartPxReplSetMember = "replica_set_member_"
+ chartPxShard = "sharding_shard_"
+)
+
+// these charts are expected to be available in many versions
+var chartsServerStatus = module.Charts{
+ chartOperationsByTypeRate.Copy(),
+ chartDocumentOperationsRate.Copy(),
+ chartScannedIndexesRate.Copy(),
+ chartScannedDocumentsRate.Copy(),
+
+ chartConnectionsUsage.Copy(),
+ chartConnectionsByStateCount.Copy(),
+ chartConnectionsRate.Copy(),
+
+ chartNetworkTrafficRate.Copy(),
+ chartNetworkRequestsRate.Copy(),
+
+ chartMemoryResidentSize.Copy(),
+ chartMemoryVirtualSize.Copy(),
+ chartMemoryPageFaultsRate.Copy(),
+
+ chartAssertsRate.Copy(),
+}
+
+var chartsTmplDatabase = module.Charts{
+ chartTmplDatabaseCollectionsCount.Copy(),
+ chartTmplDatabaseIndexesCount.Copy(),
+ chartTmplDatabaseViewsCount.Copy(),
+ chartTmplDatabaseDocumentsCount.Copy(),
+ chartTmplDatabaseDataSize.Copy(),
+ chartTmplDatabaseStorageSize.Copy(),
+ chartTmplDatabaseIndexSize.Copy(),
+}
+
+var chartsTmplReplSetMember = module.Charts{
+ chartTmplReplSetMemberState.Copy(),
+ chartTmplReplSetMemberHealthStatus.Copy(),
+ chartTmplReplSetMemberReplicationLagTime.Copy(),
+ chartTmplReplSetMemberHeartbeatLatencyTime.Copy(),
+ chartTmplReplSetMemberPingRTTTime.Copy(),
+ chartTmplReplSetMemberUptime.Copy(),
+}
+
+var chartsSharding = module.Charts{
+ chartShardingNodesCount.Copy(),
+ chartShardingShardedDatabases.Copy(),
+ chartShardingShardedCollectionsCount.Copy(),
+}
+
+var chartsTmplShardingShard = module.Charts{
+ chartTmplShardChunks.Copy(),
+}
+
+var (
+ chartOperationsRate = module.Chart{
+ ID: "operations_rate",
+ Title: "Operations rate",
+ Units: "operations/s",
+ Fam: "operations",
+ Ctx: "mongodb.operations_rate",
+ Priority: prioOperationsRate,
+ Dims: module.Dims{
+ {ID: "operations_latencies_reads_ops", Name: "reads", Algo: module.Incremental},
+ {ID: "operations_latencies_writes_ops", Name: "writes", Algo: module.Incremental},
+ {ID: "operations_latencies_commands_ops", Name: "commands", Algo: module.Incremental},
+ },
+ }
+ chartOperationsLatencyTime = module.Chart{
+ ID: "operations_latency_time",
+ Title: "Operations Latency",
+ Units: "milliseconds",
+ Fam: "operations",
+ Ctx: "mongodb.operations_latency_time",
+ Priority: prioOperationsLatencyTime,
+ Dims: module.Dims{
+ {ID: "operations_latencies_reads_latency", Name: "reads", Algo: module.Incremental, Div: 1000},
+ {ID: "operations_latencies_writes_latency", Name: "writes", Algo: module.Incremental, Div: 1000},
+ {ID: "operations_latencies_commands_latency", Name: "commands", Algo: module.Incremental, Div: 1000},
+ },
+ }
+ chartOperationsByTypeRate = module.Chart{
+ ID: "operations_by_type_rate",
+ Title: "Operations by type",
+ Units: "operations/s",
+ Fam: "operations",
+ Ctx: "mongodb.operations_by_type_rate",
+ Priority: prioOperationsByTypeRate,
+ Dims: module.Dims{
+ {ID: "operations_insert", Name: "insert", Algo: module.Incremental},
+ {ID: "operations_query", Name: "query", Algo: module.Incremental},
+ {ID: "operations_update", Name: "update", Algo: module.Incremental},
+ {ID: "operations_delete", Name: "delete", Algo: module.Incremental},
+ {ID: "operations_getmore", Name: "getmore", Algo: module.Incremental},
+ {ID: "operations_command", Name: "command", Algo: module.Incremental},
+ },
+ }
+ chartDocumentOperationsRate = module.Chart{
+ ID: "document_operations_rate",
+ Title: "Document operations",
+ Units: "operations/s",
+ Fam: "operations",
+ Ctx: "mongodb.document_operations_rate",
+ Type: module.Stacked,
+ Priority: prioDocumentOperationsRate,
+ Dims: module.Dims{
+ {ID: "metrics_document_inserted", Name: "inserted", Algo: module.Incremental},
+ {ID: "metrics_document_deleted", Name: "deleted", Algo: module.Incremental},
+ {ID: "metrics_document_returned", Name: "returned", Algo: module.Incremental},
+ {ID: "metrics_document_updated", Name: "updated", Algo: module.Incremental},
+ },
+ }
+ chartScannedIndexesRate = module.Chart{
+ ID: "scanned_indexes_rate",
+ Title: "Scanned indexes",
+ Units: "indexes/s",
+ Fam: "operations",
+ Ctx: "mongodb.scanned_indexes_rate",
+ Priority: prioScannedIndexesRate,
+ Dims: module.Dims{
+ {ID: "metrics_query_executor_scanned", Name: "scanned", Algo: module.Incremental},
+ },
+ }
+ chartScannedDocumentsRate = module.Chart{
+ ID: "scanned_documents_rate",
+ Title: "Scanned documents",
+ Units: "documents/s",
+ Fam: "operations",
+ Ctx: "mongodb.scanned_documents_rate",
+ Priority: prioScannedDocumentsRate,
+ Dims: module.Dims{
+ {ID: "metrics_query_executor_scanned_objects", Name: "scanned", Algo: module.Incremental},
+ },
+ }
+
+ chartGlobalLockActiveClientsCount = module.Chart{
+ ID: "active_clients_count",
+ Title: "Connected clients",
+ Units: "clients",
+ Fam: "clients",
+ Ctx: "mongodb.active_clients_count",
+ Priority: prioActiveClientsCount,
+ Dims: module.Dims{
+ {ID: "global_lock_active_clients_readers", Name: "readers"},
+ {ID: "global_lock_active_clients_writers", Name: "writers"},
+ },
+ }
+ chartGlobalLockCurrentQueueCount = module.Chart{
+ ID: "queued_operations",
+ Title: "Queued operations because of a lock",
+ Units: "operations",
+ Fam: "clients",
+ Ctx: "mongodb.queued_operations_count",
+ Priority: prioQueuedOperationsCount,
+ Dims: module.Dims{
+ {ID: "global_lock_current_queue_readers", Name: "readers"},
+ {ID: "global_lock_current_queue_writers", Name: "writers"},
+ },
+ }
+
+ chartConnectionsUsage = module.Chart{
+ ID: "connections_usage",
+ Title: "Connections usage",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "mongodb.connections_usage",
+ Type: module.Stacked,
+ Priority: prioConnectionsUsage,
+ Dims: module.Dims{
+ {ID: "connections_available", Name: "available"},
+ {ID: "connections_current", Name: "used"},
+ },
+ }
+ chartConnectionsByStateCount = module.Chart{
+ ID: "connections_by_state_count",
+ Title: "Connections By State",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "mongodb.connections_by_state_count",
+ Priority: prioConnectionsByStateCount,
+ Dims: module.Dims{
+ {ID: "connections_active", Name: "active"},
+ {ID: "connections_threaded", Name: "threaded"},
+ {ID: "connections_exhaust_is_master", Name: "exhaust_is_master"},
+ {ID: "connections_exhaust_hello", Name: "exhaust_hello"},
+ {ID: "connections_awaiting_topology_changes", Name: "awaiting_topology_changes"},
+ },
+ }
+ chartConnectionsRate = module.Chart{
+ ID: "connections_rate",
+ Title: "Connections Rate",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "mongodb.connections_rate",
+ Priority: prioConnectionsRate,
+ Dims: module.Dims{
+ {ID: "connections_total_created", Name: "created", Algo: module.Incremental},
+ },
+ }
+
+ chartNetworkTrafficRate = module.Chart{
+ ID: "network_traffic",
+ Title: "Network traffic",
+ Units: "bytes/s",
+ Fam: "network",
+ Ctx: "mongodb.network_traffic_rate",
+ Priority: prioNetworkTrafficRate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "network_bytes_in", Name: "in", Algo: module.Incremental},
+ {ID: "network_bytes_out", Name: "out", Algo: module.Incremental},
+ },
+ }
+ chartNetworkRequestsRate = module.Chart{
+ ID: "network_requests_rate",
+ Title: "Network Requests",
+ Units: "requests/s",
+ Fam: "network",
+ Ctx: "mongodb.network_requests_rate",
+ Priority: prioNetworkRequestsRate,
+ Dims: module.Dims{
+ {ID: "network_requests", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ chartNetworkSlowDNSResolutionsRate = module.Chart{
+ ID: "network_slow_dns_resolutions_rate",
+ Title: "Slow DNS resolution operations",
+ Units: "resolutions/s",
+ Fam: "network",
+ Ctx: "mongodb.network_slow_dns_resolutions_rate",
+ Priority: prioNetworkSlowDNSResolutionsRate,
+ Dims: module.Dims{
+ {ID: "network_slow_dns_operations", Name: "slow_dns", Algo: module.Incremental},
+ },
+ }
+ chartNetworkSlowSSLHandshakesRate = module.Chart{
+ ID: "network_slow_ssl_handshakes_rate",
+ Title: "Slow SSL handshake operations",
+ Units: "handshakes/s",
+ Fam: "network",
+ Ctx: "mongodb.network_slow_ssl_handshakes_rate",
+ Priority: prioNetworkSlowSSLHandshakesRate,
+ Dims: module.Dims{
+ {ID: "network_slow_ssl_operations", Name: "slow_ssl", Algo: module.Incremental},
+ },
+ }
+
+ chartMemoryResidentSize = module.Chart{
+ ID: "memory_resident_size",
+ Title: "Used resident memory",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "mongodb.memory_resident_size",
+ Priority: prioMemoryResidentSize,
+ Dims: module.Dims{
+ {ID: "memory_resident", Name: "used"},
+ },
+ }
+ chartMemoryVirtualSize = module.Chart{
+ ID: "memory_virtual_size",
+ Title: "Used virtual memory",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "mongodb.memory_virtual_size",
+ Priority: prioMemoryVirtualSize,
+ Dims: module.Dims{
+ {ID: "memory_virtual", Name: "used"},
+ },
+ }
+ chartMemoryPageFaultsRate = module.Chart{
+ ID: "memory_page_faults",
+ Title: "Memory page faults",
+ Units: "pgfaults/s",
+ Fam: "memory",
+ Ctx: "mongodb.memory_page_faults_rate",
+ Priority: prioMemoryPageFaultsRate,
+ Dims: module.Dims{
+ {ID: "extra_info_page_faults", Name: "pgfaults", Algo: module.Incremental},
+ },
+ }
+ chartMemoryTCMallocStatsChart = module.Chart{
+ ID: "memory_tcmalloc_stats",
+ Title: "TCMalloc statistics",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "mongodb.memory_tcmalloc_stats",
+ Priority: prioMemoryTCMallocStats,
+ Dims: module.Dims{
+ {ID: "tcmalloc_generic_current_allocated_bytes", Name: "allocated"},
+ {ID: "tcmalloc_central_cache_free_bytes", Name: "central_cache_freelist"},
+ {ID: "tcmalloc_transfer_cache_free_bytes", Name: "transfer_cache_freelist"},
+ {ID: "tcmalloc_thread_cache_free_bytes", Name: "thread_cache_freelists"},
+ {ID: "tcmalloc_pageheap_free_bytes", Name: "pageheap_freelist"},
+ {ID: "tcmalloc_pageheap_unmapped_bytes", Name: "pageheap_unmapped"},
+ },
+ }
+
+ chartAssertsRate = module.Chart{
+ ID: "asserts_rate",
+ Title: "Raised assertions",
+ Units: "asserts/s",
+ Fam: "asserts",
+ Ctx: "mongodb.asserts_rate",
+ Type: module.Stacked,
+ Priority: prioAssertsRate,
+ Dims: module.Dims{
+ {ID: "asserts_regular", Name: "regular", Algo: module.Incremental},
+ {ID: "asserts_warning", Name: "warning", Algo: module.Incremental},
+ {ID: "asserts_msg", Name: "msg", Algo: module.Incremental},
+ {ID: "asserts_user", Name: "user", Algo: module.Incremental},
+ {ID: "asserts_tripwire", Name: "tripwire", Algo: module.Incremental},
+ {ID: "asserts_rollovers", Name: "rollovers", Algo: module.Incremental},
+ },
+ }
+
+ chartTransactionsCount = module.Chart{
+ ID: "transactions_count",
+ Title: "Current transactions",
+ Units: "transactions",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_count",
+ Priority: prioTransactionsCount,
+ Dims: module.Dims{
+ {ID: "txn_active", Name: "active"},
+ {ID: "txn_inactive", Name: "inactive"},
+ {ID: "txn_open", Name: "open"},
+ {ID: "txn_prepared", Name: "prepared"},
+ },
+ }
+ chartTransactionsRate = module.Chart{
+ ID: "transactions_rate",
+ Title: "Transactions rate",
+ Units: "transactions/s",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_rate",
+ Priority: prioTransactionsRate,
+ Dims: module.Dims{
+ {ID: "txn_total_started", Name: "started", Algo: module.Incremental},
+ {ID: "txn_total_aborted", Name: "aborted", Algo: module.Incremental},
+ {ID: "txn_total_committed", Name: "committed", Algo: module.Incremental},
+ {ID: "txn_total_prepared", Name: "prepared", Algo: module.Incremental},
+ },
+ }
+ chartTransactionsNoShardsCommitsRate = module.Chart{
+ ID: "transactions_no_shards_commits_rate",
+ Title: "Transactions commits",
+ Units: "commits/s",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_rate",
+ Priority: prioTransactionsNoShardsCommitsRate,
+ Type: module.Stacked,
+ Labels: []module.Label{{Key: "commit_type", Value: "noShards"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_no_shards_successful", Name: "success", Algo: module.Incremental},
+ {ID: "txn_commit_types_no_shards_unsuccessful", Name: "fail", Algo: module.Incremental},
+ },
+ }
+ chartTransactionsNoShardsCommitsDurationTime = module.Chart{
+ ID: "transactions_no_shards_commits_duration_time",
+ Title: "Transactions successful commits duration",
+ Units: "milliseconds",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_duration_time",
+ Priority: prioTransactionsNoShardsCommitsDurationTime,
+ Labels: []module.Label{{Key: "commit_type", Value: "noShards"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_no_shards_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000},
+ },
+ }
+ chartTransactionsSingleShardCommitsRate = module.Chart{
+ ID: "transactions_single_shard_commits_rate",
+ Title: "Transactions commits",
+ Units: "commits/s",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_rate",
+ Priority: prioTransactionsSingleShardCommitsRate,
+ Type: module.Stacked,
+ Labels: []module.Label{{Key: "commit_type", Value: "singleShard"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_single_shard_successful", Name: "success", Algo: module.Incremental},
+ {ID: "txn_commit_types_single_shard_unsuccessful", Name: "fail", Algo: module.Incremental},
+ },
+ }
+ chartTransactionsSingleShardCommitsDurationTime = module.Chart{
+ ID: "transactions_single_shard_commits_duration_time",
+ Title: "Transactions successful commits duration",
+ Units: "milliseconds",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_duration_time",
+ Priority: prioTransactionsSingleShardCommitsDurationTime,
+ Labels: []module.Label{{Key: "commit_type", Value: "singleShard"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_single_shard_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000},
+ },
+ }
+ chartTransactionsSingleWriteShardCommitsRate = module.Chart{
+ ID: "transactions_single_write_shard_commits_rate",
+ Title: "Transactions commits",
+ Units: "commits/s",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_rate",
+ Priority: prioTransactionsSingleWriteShardCommitsRate,
+ Type: module.Stacked,
+ Labels: []module.Label{{Key: "commit_type", Value: "singleWriteShard"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_single_write_shard_successful", Name: "success", Algo: module.Incremental},
+ {ID: "txn_commit_types_single_write_shard_unsuccessful", Name: "fail", Algo: module.Incremental},
+ },
+ }
+ chartTransactionsSingleWriteShardCommitsDurationTime = module.Chart{
+ ID: "transactions_single_write_shard_commits_duration_time",
+ Title: "Transactions successful commits duration",
+ Units: "milliseconds",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_duration_time",
+ Priority: prioTransactionsSingleWriteShardCommitsDurationTime,
+ Labels: []module.Label{{Key: "commit_type", Value: "singleWriteShard"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_single_write_shard_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000},
+ },
+ }
+ chartTransactionsReadOnlyCommitsRate = module.Chart{
+ ID: "transactions_read_only_commits_rate",
+ Title: "Transactions commits",
+ Units: "commits/s",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_rate",
+ Priority: prioTransactionsReadOnlyCommitsRate,
+ Type: module.Stacked,
+ Labels: []module.Label{{Key: "commit_type", Value: "readOnly"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_read_only_successful", Name: "success", Algo: module.Incremental},
+ {ID: "txn_commit_types_read_only_unsuccessful", Name: "fail", Algo: module.Incremental},
+ },
+ }
+ chartTransactionsReadOnlyCommitsDurationTime = module.Chart{
+ ID: "transactions_read_only_commits_duration_time",
+ Title: "Transactions successful commits duration",
+ Units: "milliseconds",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_duration_time",
+ Priority: prioTransactionsReadOnlyCommitsDurationTime,
+ Labels: []module.Label{{Key: "commit_type", Value: "readOnly"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_read_only_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000},
+ },
+ }
+ chartTransactionsTwoPhaseCommitCommitsRate = module.Chart{
+ ID: "transactions_two_phase_commit_commits_rate",
+ Title: "Transactions commits",
+ Units: "commits/s",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_rate",
+ Priority: prioTransactionsTwoPhaseCommitCommitsRate,
+ Type: module.Stacked,
+ Labels: []module.Label{{Key: "commit_type", Value: "twoPhaseCommit"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_two_phase_commit_successful", Name: "success", Algo: module.Incremental},
+ {ID: "txn_commit_types_two_phase_commit_unsuccessful", Name: "fail", Algo: module.Incremental},
+ },
+ }
+ chartTransactionsTwoPhaseCommitCommitsDurationTime = module.Chart{
+ ID: "transactions_two_phase_commit_commits_duration_time",
+ Title: "Transactions successful commits duration",
+ Units: "milliseconds",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_duration_time",
+ Priority: prioTransactionsTwoPhaseCommitCommitsDurationTime,
+ Labels: []module.Label{{Key: "commit_type", Value: "twoPhaseCommit"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_two_phase_commit_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000},
+ },
+ }
+ chartTransactionsRecoverWithTokenCommitsRate = module.Chart{
+ ID: "transactions_recover_with_token_commits_rate",
+ Title: "Transactions commits",
+ Units: "commits/s",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_rate",
+ Priority: prioTransactionsRecoverWithTokenCommitsRate,
+ Type: module.Stacked,
+ Labels: []module.Label{{Key: "commit_type", Value: "recoverWithToken"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_recover_with_token_successful", Name: "success", Algo: module.Incremental},
+ {ID: "txn_commit_types_recover_with_token_unsuccessful", Name: "fail", Algo: module.Incremental},
+ },
+ }
+ chartTransactionsRecoverWithTokenCommitsDurationTime = module.Chart{
+ ID: "transactions_recover_with_token_commits_duration_time",
+ Title: "Transactions successful commits duration",
+ Units: "milliseconds",
+ Fam: "transactions",
+ Ctx: "mongodb.transactions_commits_duration_time",
+ Priority: prioTransactionsRecoverWithTokenCommitsDurationTime,
+ Labels: []module.Label{{Key: "commit_type", Value: "recoverWithToken"}},
+ Dims: module.Dims{
+ {ID: "txn_commit_types_recover_with_token_successful_duration_micros", Name: "commits", Algo: module.Incremental, Div: 1000},
+ },
+ }
+
+ chartGlobalLockAcquisitionsRate = module.Chart{
+ ID: "global_lock_acquisitions_rate",
+ Title: "Global lock acquisitions",
+ Units: "acquisitions/s",
+ Fam: "locks",
+ Ctx: "mongodb.lock_acquisitions_rate",
+ Priority: prioGlobalLockAcquisitionsRate,
+ Labels: []module.Label{{Key: "lock_type", Value: "global"}},
+ Dims: module.Dims{
+ {ID: "locks_global_acquire_shared", Name: "shared", Algo: module.Incremental},
+ {ID: "locks_global_acquire_exclusive", Name: "exclusive", Algo: module.Incremental},
+ {ID: "locks_global_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental},
+ {ID: "locks_global_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental},
+ },
+ }
+ chartDatabaseLockAcquisitionsRate = module.Chart{
+ ID: "database_lock_acquisitions_rate",
+ Title: "Database lock acquisitions",
+ Units: "acquisitions/s",
+ Fam: "locks",
+ Ctx: "mongodb.lock_acquisitions_rate",
+ Priority: prioDatabaseLockAcquisitionsRate,
+ Labels: []module.Label{{Key: "lock_type", Value: "database"}},
+ Dims: module.Dims{
+ {ID: "locks_database_acquire_shared", Name: "shared", Algo: module.Incremental},
+ {ID: "locks_database_acquire_exclusive", Name: "exclusive", Algo: module.Incremental},
+ {ID: "locks_database_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental},
+ {ID: "locks_database_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental},
+ },
+ }
+ chartCollectionLockAcquisitionsRate = module.Chart{
+ ID: "collection_lock_acquisitions_rate",
+ Title: "Collection lock acquisitions",
+ Units: "acquisitions/s",
+ Fam: "locks",
+ Ctx: "mongodb.lock_acquisitions_rate",
+ Priority: prioCollectionLockAcquisitionsRate,
+ Labels: []module.Label{{Key: "lock_type", Value: "collection"}},
+ Dims: module.Dims{
+ {ID: "locks_collection_acquire_shared", Name: "shared", Algo: module.Incremental},
+ {ID: "locks_collection_acquire_exclusive", Name: "exclusive", Algo: module.Incremental},
+ {ID: "locks_collection_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental},
+ {ID: "locks_collection_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental},
+ },
+ }
+ chartMutexLockAcquisitionsRate = module.Chart{
+ ID: "mutex_lock_acquisitions_rate",
+ Title: "Mutex lock acquisitions",
+ Units: "acquisitions/s",
+ Fam: "locks",
+ Ctx: "mongodb.lock_acquisitions_rate",
+ Priority: prioMutexLockAcquisitionsRate,
+ Labels: []module.Label{{Key: "lock_type", Value: "mutex"}},
+ Dims: module.Dims{
+ {ID: "locks_mutex_acquire_shared", Name: "shared", Algo: module.Incremental},
+ {ID: "locks_mutex_acquire_exclusive", Name: "exclusive", Algo: module.Incremental},
+ {ID: "locks_mutex_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental},
+ {ID: "locks_mutex_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental},
+ },
+ }
+ chartMetadataLockAcquisitionsRate = module.Chart{
+ ID: "metadata_lock_acquisitions_rate",
+ Title: "Metadata lock acquisitions",
+ Units: "acquisitions/s",
+ Fam: "locks",
+ Ctx: "mongodb.lock_acquisitions_rate",
+ Priority: prioMetadataLockAcquisitionsRate,
+ Labels: []module.Label{{Key: "lock_type", Value: "metadata"}},
+ Dims: module.Dims{
+ {ID: "locks_metadata_acquire_shared", Name: "shared", Algo: module.Incremental},
+ {ID: "locks_metadata_acquire_exclusive", Name: "exclusive", Algo: module.Incremental},
+ {ID: "locks_metadata_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental},
+ {ID: "locks_metadata_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental},
+ },
+ }
+ chartOpLogLockAcquisitionsRate = module.Chart{
+ ID: "oplog_lock_acquisitions_rate",
+ Title: "Operations log lock acquisitions",
+ Units: "acquisitions/s",
+ Fam: "locks",
+ Ctx: "mongodb.lock_acquisitions_rate",
+ Priority: prioOpLogLockAcquisitionsRate,
+ Labels: []module.Label{{Key: "lock_type", Value: "oplog"}},
+ Dims: module.Dims{
+ {ID: "locks_oplog_acquire_shared", Name: "shared", Algo: module.Incremental},
+ {ID: "locks_oplog_acquire_exclusive", Name: "exclusive", Algo: module.Incremental},
+ {ID: "locks_oplog_acquire_intent_shared", Name: "intent_shared", Algo: module.Incremental},
+ {ID: "locks_oplog_acquire_intent_exclusive", Name: "intent_exclusive", Algo: module.Incremental},
+ },
+ }
+
+ chartCursorsOpenCount = module.Chart{
+ ID: "cursors_open_count",
+ Title: "Open cursors",
+ Units: "cursors",
+ Fam: "cursors",
+ Ctx: "mongodb.cursors_open_count",
+ Priority: prioCursorsOpenCount,
+ Dims: module.Dims{
+ {ID: "metrics_cursor_open_total", Name: "open"},
+ },
+ }
+ chartCursorsOpenNoTimeoutCount = module.Chart{
+ ID: "cursors_open_no_timeout_count",
+ Title: "Open cursors with disabled timeout",
+ Units: "cursors",
+ Fam: "cursors",
+ Ctx: "mongodb.cursors_open_no_timeout_count",
+ Priority: prioCursorsOpenNoTimeoutCount,
+ Dims: module.Dims{
+ {ID: "metrics_cursor_open_no_timeout", Name: "open_no_timeout"},
+ },
+ }
+ chartCursorsOpenedRate = module.Chart{
+ ID: "cursors_opened_rate",
+ Title: "Opened cursors rate",
+ Units: "cursors/s",
+ Fam: "cursors",
+ Ctx: "mongodb.cursors_opened_rate",
+ Priority: prioCursorsOpenedRate,
+ Dims: module.Dims{
+ {ID: "metrics_cursor_total_opened", Name: "opened"},
+ },
+ }
+ chartCursorsTimedOutRate = module.Chart{
+ ID: "cursors_timed_out_rate",
+ Title: "Timed-out cursors",
+ Units: "cursors/s",
+ Fam: "cursors",
+ Ctx: "mongodb.cursors_timed_out_rate",
+ Priority: prioTimedOutCursorsRate,
+ Dims: module.Dims{
+ {ID: "metrics_cursor_timed_out", Name: "timed_out"},
+ },
+ }
+ chartCursorsByLifespanCount = module.Chart{
+ ID: "cursors_by_lifespan_count",
+ Title: "Cursors lifespan",
+ Units: "cursors",
+ Fam: "cursors",
+ Ctx: "mongodb.cursors_by_lifespan_count",
+ Priority: prioCursorsByLifespanCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "metrics_cursor_lifespan_less_than_1_second", Name: "le_1s"},
+ {ID: "metrics_cursor_lifespan_less_than_5_seconds", Name: "1s_5s"},
+ {ID: "metrics_cursor_lifespan_less_than_15_seconds", Name: "5s_15s"},
+ {ID: "metrics_cursor_lifespan_less_than_30_seconds", Name: "15s_30s"},
+ {ID: "metrics_cursor_lifespan_less_than_1_minute", Name: "30s_1m"},
+ {ID: "metrics_cursor_lifespan_less_than_10_minutes", Name: "1m_10m"},
+ {ID: "metrics_cursor_lifespan_greater_than_or_equal_10_minutes", Name: "ge_10m"},
+ },
+ }
+
+ chartWiredTigerConcurrentReadTransactionsUsage = module.Chart{
+ ID: "wiredtiger_concurrent_read_transactions_usage",
+ Title: "Wired Tiger concurrent read transactions usage",
+ Units: "transactions",
+ Fam: "wiredtiger",
+ Ctx: "mongodb.wiredtiger_concurrent_read_transactions_usage",
+ Priority: prioWiredTigerConcurrentReadTransactionsUsage,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "wiredtiger_concurrent_txn_read_available", Name: "available"},
+ {ID: "wiredtiger_concurrent_txn_read_out", Name: "used"},
+ },
+ }
+ chartWiredTigerConcurrentWriteTransactionsUsage = module.Chart{
+ ID: "wiredtiger_concurrent_write_transactions_usage",
+ Title: "Wired Tiger concurrent write transactions usage",
+ Units: "transactions",
+ Fam: "wiredtiger",
+ Ctx: "mongodb.wiredtiger_concurrent_write_transactions_usage",
+ Priority: prioWiredTigerConcurrentWriteTransactionsUsage,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "wiredtiger_concurrent_txn_write_available", Name: "available"},
+ {ID: "wiredtiger_concurrent_txn_write_out", Name: "used"},
+ },
+ }
+ chartWiredTigerCacheUsage = module.Chart{
+ ID: "wiredtiger_cache_usage",
+ Title: "Wired Tiger cache usage",
+ Units: "bytes",
+ Fam: "wiredtiger",
+ Ctx: "mongodb.wiredtiger_cache_usage",
+ Priority: prioWiredTigerCacheUsage,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "wiredtiger_cache_currently_in_cache_bytes", Name: "used"},
+ },
+ }
+ chartWiredTigerCacheDirtySpaceSize = module.Chart{
+ ID: "wiredtiger_cache_dirty_space_size",
+ Title: "Wired Tiger cache dirty space size",
+ Units: "bytes",
+ Fam: "wiredtiger",
+ Ctx: "mongodb.wiredtiger_cache_dirty_space_size",
+ Priority: prioWiredTigerCacheDirtySpaceSize,
+ Dims: module.Dims{
+ {ID: "wiredtiger_cache_tracked_dirty_in_the_cache_bytes", Name: "dirty"},
+ },
+ }
+ chartWiredTigerCacheIORate = module.Chart{
+ ID: "wiredtiger_cache_io_rate",
+ Title: "Wired Tiger IO activity",
+ Units: "pages/s",
+ Fam: "wiredtiger",
+ Ctx: "mongodb.wiredtiger_cache_io_rate",
+ Priority: prioWiredTigerCacheIORate,
+ Dims: module.Dims{
+ {ID: "wiredtiger_cache_read_into_cache_pages", Name: "read", Algo: module.Incremental},
+ {ID: "wiredtiger_cache_written_from_cache_pages", Name: "written", Algo: module.Incremental},
+ },
+ }
+ chartWiredTigerCacheEvictionsRate = module.Chart{
+ ID: "wiredtiger_cache_eviction_rate",
+ Title: "Wired Tiger cache evictions",
+ Units: "pages/s",
+ Fam: "wiredtiger",
+ Ctx: "mongodb.wiredtiger_cache_evictions_rate",
+ Type: module.Stacked,
+ Priority: prioWiredTigerCacheEvictionsRate,
+ Dims: module.Dims{
+ {ID: "wiredtiger_cache_unmodified_evicted_pages", Name: "unmodified", Algo: module.Incremental},
+ {ID: "wiredtiger_cache_modified_evicted_pages", Name: "modified", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartTmplDatabaseCollectionsCount = &module.Chart{
+ ID: chartPxDatabase + "%s_collections_count",
+ Title: "Database collections",
+ Units: "collections",
+ Fam: "databases",
+ Ctx: "mongodb.database_collections_count",
+ Priority: prioDatabaseCollectionsCount,
+ Dims: module.Dims{
+ {ID: "database_%s_collections", Name: "collections"},
+ },
+ }
+ chartTmplDatabaseIndexesCount = &module.Chart{
+ ID: chartPxDatabase + "%s_indexes_count",
+ Title: "Database indexes",
+ Units: "indexes",
+ Fam: "databases",
+ Ctx: "mongodb.database_indexes_count",
+ Priority: prioDatabaseIndexesCount,
+ Dims: module.Dims{
+ {ID: "database_%s_indexes", Name: "indexes"},
+ },
+ }
+ chartTmplDatabaseViewsCount = &module.Chart{
+ ID: chartPxDatabase + "%s_views_count",
+ Title: "Database views",
+ Units: "views",
+ Fam: "databases",
+ Ctx: "mongodb.database_views_count",
+ Priority: prioDatabaseViewsCount,
+ Dims: module.Dims{
+ {ID: "database_%s_views", Name: "views"},
+ },
+ }
+ chartTmplDatabaseDocumentsCount = &module.Chart{
+ ID: chartPxDatabase + "%s_documents_count",
+ Title: "Database documents",
+ Units: "documents",
+ Fam: "databases",
+ Ctx: "mongodb.database_documents_count",
+ Priority: prioDatabaseDocumentsCount,
+ Dims: module.Dims{
+ {ID: "database_%s_documents", Name: "documents"},
+ },
+ }
+ chartTmplDatabaseDataSize = &module.Chart{
+ ID: chartPxDatabase + "%s_data_size",
+ Title: "Database data size",
+ Units: "bytes",
+ Fam: "databases",
+ Ctx: "mongodb.database_data_size",
+ Priority: prioDatabaseDataSize,
+ Dims: module.Dims{
+ {ID: "database_%s_data_size", Name: "data_size"},
+ },
+ }
+ chartTmplDatabaseStorageSize = &module.Chart{
+ ID: chartPxDatabase + "%s_storage_size",
+ Title: "Database storage size",
+ Units: "bytes",
+ Fam: "databases",
+ Ctx: "mongodb.database_storage_size",
+ Priority: prioDatabaseStorageSize,
+ Dims: module.Dims{
+ {ID: "database_%s_storage_size", Name: "storage_size"},
+ },
+ }
+ chartTmplDatabaseIndexSize = &module.Chart{
+ ID: chartPxDatabase + "%s_index_size",
+ Title: "Database index size",
+ Units: "bytes",
+ Fam: "databases",
+ Ctx: "mongodb.database_index_size",
+ Priority: prioDatabaseIndexSize,
+ Dims: module.Dims{
+ {ID: "database_%s_index_size", Name: "index_size"},
+ },
+ }
+)
+
+var (
+ chartTmplReplSetMemberState = &module.Chart{
+ ID: chartPxReplSetMember + "%s_state",
+ Title: "Replica Set member state",
+ Units: "state",
+ Fam: "replica sets",
+ Ctx: "mongodb.repl_set_member_state",
+ Priority: prioReplSetMemberState,
+ Dims: module.Dims{
+ {ID: "repl_set_member_%s_state_primary", Name: "primary"},
+ {ID: "repl_set_member_%s_state_startup", Name: "startup"},
+ {ID: "repl_set_member_%s_state_secondary", Name: "secondary"},
+ {ID: "repl_set_member_%s_state_recovering", Name: "recovering"},
+ {ID: "repl_set_member_%s_state_startup2", Name: "startup2"},
+ {ID: "repl_set_member_%s_state_unknown", Name: "unknown"},
+ {ID: "repl_set_member_%s_state_arbiter", Name: "arbiter"},
+ {ID: "repl_set_member_%s_state_down", Name: "down"},
+ {ID: "repl_set_member_%s_state_rollback", Name: "rollback"},
+ {ID: "repl_set_member_%s_state_removed", Name: "removed"},
+ },
+ }
+ chartTmplReplSetMemberHealthStatus = &module.Chart{
+ ID: chartPxReplSetMember + "%s_health_status",
+ Title: "Replica Set member health status",
+ Units: "status",
+ Fam: "replica sets",
+ Ctx: "mongodb.repl_set_member_health_status",
+ Priority: prioReplSetMemberHealthStatus,
+ Dims: module.Dims{
+ {ID: "repl_set_member_%s_health_status_up", Name: "up"},
+ {ID: "repl_set_member_%s_health_status_down", Name: "down"},
+ },
+ }
+ chartTmplReplSetMemberReplicationLagTime = &module.Chart{
+ ID: chartPxReplSetMember + "%s_replication_lag_time",
+ Title: "Replica Set member replication lag",
+ Units: "milliseconds",
+ Fam: "replica sets",
+ Ctx: "mongodb.repl_set_member_replication_lag_time",
+ Priority: prioReplSetMemberReplicationLagTime,
+ Dims: module.Dims{
+ {ID: "repl_set_member_%s_replication_lag", Name: "replication_lag"},
+ },
+ }
+ chartTmplReplSetMemberHeartbeatLatencyTime = &module.Chart{
+ ID: chartPxReplSetMember + "%s_heartbeat_latency_time",
+ Title: "Replica Set member heartbeat latency",
+ Units: "milliseconds",
+ Fam: "replica sets",
+ Ctx: "mongodb.repl_set_member_heartbeat_latency_time",
+ Priority: prioReplSetMemberHeartbeatLatencyTime,
+ Dims: module.Dims{
+ {ID: "repl_set_member_%s_heartbeat_latency", Name: "heartbeat_latency"},
+ },
+ }
+ chartTmplReplSetMemberPingRTTTime = &module.Chart{
+ ID: chartPxReplSetMember + "%s_ping_rtt_time",
+ Title: "Replica Set member ping RTT",
+ Units: "milliseconds",
+ Fam: "replica sets",
+ Ctx: "mongodb.repl_set_member_ping_rtt_time",
+ Priority: prioReplSetMemberPingRTTTime,
+ Dims: module.Dims{
+ {ID: "repl_set_member_%s_ping_rtt", Name: "ping_rtt"},
+ },
+ }
+ chartTmplReplSetMemberUptime = &module.Chart{
+ ID: chartPxReplSetMember + "%s_uptime",
+ Title: "Replica Set member uptime",
+ Units: "seconds",
+ Fam: "replica sets",
+ Ctx: "mongodb.repl_set_member_uptime",
+ Priority: prioReplSetMemberUptime,
+ Dims: module.Dims{
+ {ID: "repl_set_member_%s_uptime", Name: "uptime"},
+ },
+ }
+)
+
+var (
+ chartShardingNodesCount = &module.Chart{
+ ID: "sharding_nodes_count",
+ Title: "Sharding Nodes",
+ Units: "nodes",
+ Fam: "sharding",
+ Ctx: "mongodb.sharding_nodes_count",
+ Type: module.Stacked,
+ Priority: prioShardingNodesCount,
+ Dims: module.Dims{
+ {ID: "shard_nodes_aware", Name: "shard_aware"},
+ {ID: "shard_nodes_unaware", Name: "shard_unaware"},
+ },
+ }
+ chartShardingShardedDatabases = &module.Chart{
+ ID: "sharding_sharded_databases_count",
+ Title: "Sharded databases",
+ Units: "databases",
+ Fam: "sharding",
+ Ctx: "mongodb.sharding_sharded_databases_count",
+ Type: module.Stacked,
+ Priority: prioShardingShardedDatabasesCount,
+ Dims: module.Dims{
+ {ID: "shard_databases_partitioned", Name: "partitioned"},
+ {ID: "shard_databases_unpartitioned", Name: "unpartitioned"},
+ },
+ }
+
+ chartShardingShardedCollectionsCount = &module.Chart{
+ ID: "sharding_sharded_collections_count",
+ Title: "Sharded collections",
+ Units: "collections",
+ Fam: "sharding",
+ Ctx: "mongodb.sharding_sharded_collections_count",
+ Type: module.Stacked,
+ Priority: prioShardingShardedCollectionsCount,
+ Dims: module.Dims{
+ {ID: "shard_collections_partitioned", Name: "partitioned"},
+ {ID: "shard_collections_unpartitioned", Name: "unpartitioned"},
+ },
+ }
+
+ chartTmplShardChunks = &module.Chart{
+ ID: chartPxShard + "%s_chunks",
+ Title: "Shard chunks",
+ Units: "chunks",
+ Fam: "sharding",
+ Ctx: "mongodb.sharding_shard_chunks_count",
+ Priority: prioShardChunks,
+ Dims: module.Dims{
+ {ID: "shard_id_%s_chunks", Name: "chunks"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/mongodb/client.go b/src/go/plugin/go.d/modules/mongodb/client.go
new file mode 100644
index 000000000..eb36fa8ac
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/client.go
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "go.mongodb.org/mongo-driver/bson"
+ "go.mongodb.org/mongo-driver/mongo"
+ "go.mongodb.org/mongo-driver/mongo/options"
+)
+
+const (
+ mongos = "mongos"
+)
+
+type mongoConn interface {
+ serverStatus() (*documentServerStatus, error)
+ listDatabaseNames() ([]string, error)
+ dbStats(name string) (*documentDBStats, error)
+ isReplicaSet() bool
+ isMongos() bool
+ replSetGetStatus() (*documentReplSetStatus, error)
+ shardNodes() (*documentShardNodesResult, error)
+ shardDatabasesPartitioning() (*documentPartitionedResult, error)
+ shardCollectionsPartitioning() (*documentPartitionedResult, error)
+ shardChunks() (map[string]int64, error)
+ initClient(uri string, timeout time.Duration) error
+ close() error
+}
+
+type mongoClient struct {
+ client *mongo.Client
+ timeout time.Duration
+ replicaSetFlag *bool
+ mongosFlag *bool
+}
+
+func (c *mongoClient) serverStatus() (*documentServerStatus, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ cmd := bson.D{
+ {Key: "serverStatus", Value: 1},
+ {Key: "repl", Value: 1},
+ {Key: "metrics",
+ Value: bson.D{
+ {Key: "document", Value: true},
+ {Key: "cursor", Value: true},
+ {Key: "queryExecutor", Value: true},
+ {Key: "apiVersions", Value: false},
+ {Key: "aggStageCounters", Value: false},
+ {Key: "commands", Value: false},
+ {Key: "dotsAndDollarsFields", Value: false},
+ {Key: "getLastError", Value: false},
+ {Key: "mongos", Value: false},
+ {Key: "operation", Value: false},
+ {Key: "operatorCounters", Value: false},
+ {Key: "query", Value: false},
+ {Key: "record", Value: false},
+ {Key: "repl", Value: false},
+ {Key: "storage", Value: false},
+ {Key: "ttl", Value: false},
+ },
+ },
+ }
+ var status *documentServerStatus
+
+ err := c.client.Database("admin").RunCommand(ctx, cmd).Decode(&status)
+ if err != nil {
+ return nil, err
+ }
+
+ isReplSet := status.Repl != nil
+ c.replicaSetFlag = &isReplSet
+
+ isMongos := status.Process == mongos
+ c.mongosFlag = &isMongos
+
+ return status, err
+}
+
+func (c *mongoClient) listDatabaseNames() ([]string, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ return c.client.ListDatabaseNames(ctx, bson.M{})
+}
+
+func (c *mongoClient) dbStats(name string) (*documentDBStats, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ cmd := bson.M{"dbStats": 1}
+ var stats documentDBStats
+
+ if err := c.client.Database(name).RunCommand(ctx, cmd).Decode(&stats); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (c *mongoClient) isReplicaSet() bool {
+ if c.replicaSetFlag != nil {
+ return *c.replicaSetFlag
+ }
+
+ status, err := c.serverStatus()
+ if err != nil {
+ return false
+ }
+
+ return status.Repl != nil
+}
+
+func (c *mongoClient) isMongos() bool {
+ if c.mongosFlag != nil {
+ return *c.mongosFlag
+ }
+
+ status, err := c.serverStatus()
+ if err != nil {
+ return false
+ }
+
+ return status.Process == mongos
+}
+
+func (c *mongoClient) replSetGetStatus() (*documentReplSetStatus, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ var status *documentReplSetStatus
+ cmd := bson.M{"replSetGetStatus": 1}
+
+ err := c.client.Database("admin").RunCommand(ctx, cmd).Decode(&status)
+ if err != nil {
+ return nil, err
+ }
+
+ return status, err
+}
+
+func (c *mongoClient) shardNodes() (*documentShardNodesResult, error) {
+ collection := "shards"
+ groupStage := bson.D{{Key: "$sortByCount", Value: "$state"}}
+
+ nodesByState, err := c.shardCollectAggregation(collection, []bson.D{groupStage})
+ if err != nil {
+ return nil, err
+ }
+
+ return &documentShardNodesResult{nodesByState.True, nodesByState.False}, nil
+}
+
+func (c *mongoClient) shardDatabasesPartitioning() (*documentPartitionedResult, error) {
+ collection := "databases"
+ groupStage := bson.D{{Key: "$sortByCount", Value: "$partitioned"}}
+
+ partitioning, err := c.shardCollectAggregation(collection, []bson.D{groupStage})
+ if err != nil {
+ return nil, err
+ }
+
+ return &documentPartitionedResult{partitioning.True, partitioning.False}, nil
+}
+
+func (c *mongoClient) shardCollectionsPartitioning() (*documentPartitionedResult, error) {
+ collection := "collections"
+ matchStage := bson.D{{Key: "$match", Value: bson.D{{Key: "dropped", Value: false}}}}
+ countStage := bson.D{{Key: "$sortByCount", Value: bson.D{{Key: "$eq", Value: bson.A{"$distributionMode", "sharded"}}}}}
+
+ partitioning, err := c.shardCollectAggregation(collection, []bson.D{matchStage, countStage})
+ if err != nil {
+ return nil, err
+ }
+
+ return &documentPartitionedResult{partitioning.True, partitioning.False}, nil
+}
+
+func (c *mongoClient) shardCollectAggregation(collection string, aggr []bson.D) (*documentAggrResult, error) {
+ rows, err := c.dbAggregate(collection, aggr)
+ if err != nil {
+ return nil, err
+ }
+
+ result := &documentAggrResult{}
+
+ for _, row := range rows {
+ if row.Bool {
+ result.True = row.Count
+ } else {
+ result.False = row.Count
+ }
+ }
+
+ return result, err
+}
+
+func (c *mongoClient) shardChunks() (map[string]int64, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ col := c.client.Database("config").Collection("chunks")
+
+ cursor, err := col.Aggregate(ctx, mongo.Pipeline{bson.D{{Key: "$sortByCount", Value: "$shard"}}})
+ if err != nil {
+ return nil, err
+ }
+
+ var shards []bson.M
+ if err = cursor.All(ctx, &shards); err != nil {
+ return nil, err
+ }
+
+ defer func() { _ = cursor.Close(ctx) }()
+
+ result := map[string]int64{}
+
+ for _, row := range shards {
+ k, ok := row["_id"].(string)
+ if !ok {
+ return nil, fmt.Errorf("shard name is not a string: %v", row["_id"])
+ }
+ v, ok := row["count"].(int32)
+ if !ok {
+ return nil, fmt.Errorf("shard chunk count is not a int32: %v", row["count"])
+ }
+ result[k] = int64(v)
+ }
+
+ return result, err
+}
+
+func (c *mongoClient) initClient(uri string, timeout time.Duration) error {
+ if c.client != nil {
+ return nil
+ }
+
+ c.timeout = timeout
+
+ ctxConn, cancelConn := context.WithTimeout(context.Background(), c.timeout)
+ defer cancelConn()
+
+ client, err := mongo.Connect(ctxConn, options.Client().ApplyURI(uri))
+ if err != nil {
+ return err
+ }
+
+ ctxPing, cancelPing := context.WithTimeout(context.Background(), c.timeout)
+ defer cancelPing()
+
+ if err := client.Ping(ctxPing, nil); err != nil {
+ return err
+ }
+
+ c.client = client
+
+ return nil
+}
+
+func (c *mongoClient) close() error {
+ if c.client == nil {
+ return nil
+ }
+
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ if err := c.client.Disconnect(ctx); err != nil {
+ return err
+ }
+
+ c.client = nil
+
+ return nil
+}
+
+func (c *mongoClient) dbAggregate(collection string, aggr []bson.D) ([]documentAggrResults, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ cursor, err := c.client.Database("config").Collection(collection).Aggregate(ctx, aggr)
+ if err != nil {
+ return nil, err
+ }
+
+ defer func() { _ = cursor.Close(ctx) }()
+
+ var rows []documentAggrResults
+ if err := cursor.All(ctx, &rows); err != nil {
+ return nil, err
+ }
+
+ return rows, nil
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/collect.go b/src/go/plugin/go.d/modules/mongodb/collect.go
new file mode 100644
index 000000000..232145de3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/collect.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import "fmt"
+
+func (m *Mongo) collect() (map[string]int64, error) {
+ if err := m.conn.initClient(m.URI, m.Timeout.Duration()); err != nil {
+ return nil, fmt.Errorf("init mongo conn: %v", err)
+ }
+
+ mx := make(map[string]int64)
+
+ if err := m.collectServerStatus(mx); err != nil {
+ return nil, fmt.Errorf("couldn't collect server status metrics: %v", err)
+ }
+
+ if err := m.collectDbStats(mx); err != nil {
+ return mx, fmt.Errorf("couldn't collect dbstats metrics: %v", err)
+ }
+
+ if m.conn.isReplicaSet() {
+ if err := m.collectReplSetStatus(mx); err != nil {
+ return mx, fmt.Errorf("couldn't collect documentReplSetStatus metrics: %v", err)
+ }
+ }
+
+ if m.conn.isMongos() {
+ m.addShardingChartsOnce.Do(m.addShardingCharts)
+ if err := m.collectSharding(mx); err != nil {
+ return mx, fmt.Errorf("couldn't collect sharding metrics: %v", err)
+ }
+ }
+
+ return mx, nil
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/collect_dbstats.go b/src/go/plugin/go.d/modules/mongodb/collect_dbstats.go
new file mode 100644
index 000000000..3a20bee7f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/collect_dbstats.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (m *Mongo) collectDbStats(mx map[string]int64) error {
+ if m.dbSelector == nil {
+ m.Debug("'database' selector not set, skip collecting database statistics")
+ return nil
+ }
+
+ allDBs, err := m.conn.listDatabaseNames()
+ if err != nil {
+ return fmt.Errorf("cannot get database names: %v", err)
+ }
+
+ m.Debugf("all databases on the server: '%v'", allDBs)
+
+ var dbs []string
+ for _, db := range allDBs {
+ if m.dbSelector.MatchString(db) {
+ dbs = append(dbs, db)
+ }
+ }
+
+ if len(allDBs) != len(dbs) {
+ m.Debugf("databases remaining after filtering: %v", dbs)
+ }
+
+ seen := make(map[string]bool)
+ for _, db := range dbs {
+ s, err := m.conn.dbStats(db)
+ if err != nil {
+ return fmt.Errorf("dbStats command failed: %v", err)
+ }
+
+ seen[db] = true
+
+ mx["database_"+db+"_collections"] = s.Collections
+ mx["database_"+db+"_views"] = s.Views
+ mx["database_"+db+"_indexes"] = s.Indexes
+ mx["database_"+db+"_documents"] = s.Objects
+ mx["database_"+db+"_data_size"] = s.DataSize
+ mx["database_"+db+"_index_size"] = s.IndexSize
+ mx["database_"+db+"_storage_size"] = s.StorageSize
+ }
+
+ for db := range seen {
+ if !m.databases[db] {
+ m.databases[db] = true
+ m.Debugf("new database '%s': creating charts", db)
+ m.addDatabaseCharts(db)
+ }
+ }
+
+ for db := range m.databases {
+ if !seen[db] {
+ delete(m.databases, db)
+ m.Debugf("stale database '%s': removing charts", db)
+ m.removeDatabaseCharts(db)
+ }
+ }
+
+ return nil
+}
+
+func (m *Mongo) addDatabaseCharts(name string) {
+ charts := chartsTmplDatabase.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Labels = []module.Label{
+ {Key: "database", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := m.Charts().Add(*charts...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *Mongo) removeDatabaseCharts(name string) {
+ px := fmt.Sprintf("%s%s_", chartPxDatabase, name)
+
+ for _, chart := range *m.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/collect_replsetgetstatus.go b/src/go/plugin/go.d/modules/mongodb/collect_replsetgetstatus.go
new file mode 100644
index 000000000..43d4168db
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/collect_replsetgetstatus.go
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+// https://www.mongodb.com/docs/manual/reference/replica-states/#replica-set-member-states
+var replicaSetMemberStates = map[string]int{
+ "startup": 0,
+ "primary": 1,
+ "secondary": 2,
+ "recovering": 3,
+ "startup2": 5,
+ "unknown": 6,
+ "arbiter": 7,
+ "down": 8,
+ "rollback": 9,
+ "removed": 10,
+}
+
+// TODO: deal with duplicates if we collect metrics from all cluster nodes
+// should we only collect ReplSetStatus (at least by default) from primary nodes? (db.runCommand( { isMaster: 1 } ))
+func (m *Mongo) collectReplSetStatus(mx map[string]int64) error {
+ s, err := m.conn.replSetGetStatus()
+ if err != nil {
+ return fmt.Errorf("error get status of the replica set from mongo: %s", err)
+ }
+
+ seen := make(map[string]documentReplSetMember)
+
+ for _, member := range s.Members {
+ seen[member.Name] = member
+
+ px := fmt.Sprintf("repl_set_member_%s_", member.Name)
+
+ mx[px+"replication_lag"] = s.Date.Sub(member.OptimeDate).Milliseconds()
+
+ for k, v := range replicaSetMemberStates {
+ mx[px+"state_"+k] = boolToInt(member.State == v)
+ }
+
+ mx[px+"health_status_up"] = boolToInt(member.Health == 1)
+ mx[px+"health_status_down"] = boolToInt(member.Health == 0)
+
+ if member.Self == nil {
+ mx[px+"uptime"] = member.Uptime
+ if v := member.LastHeartbeatRecv; v != nil && !v.IsZero() {
+ mx[px+"heartbeat_latency"] = s.Date.Sub(*v).Milliseconds()
+ }
+ if v := member.PingMs; v != nil {
+ mx[px+"ping_rtt"] = *v
+ }
+ }
+ }
+
+ for name, member := range seen {
+ if !m.replSetMembers[name] {
+ m.replSetMembers[name] = true
+ m.Debugf("new replica set member '%s': adding charts", name)
+ m.addReplSetMemberCharts(member)
+ }
+ }
+
+ for name := range m.replSetMembers {
+ if _, ok := seen[name]; !ok {
+ delete(m.replSetMembers, name)
+ m.Debugf("stale replica set member '%s': removing charts", name)
+ m.removeReplSetMemberCharts(name)
+ }
+ }
+
+ return nil
+}
+
+func (m *Mongo) addReplSetMemberCharts(v documentReplSetMember) {
+ charts := chartsTmplReplSetMember.Copy()
+
+ if v.Self != nil {
+ _ = charts.Remove(chartTmplReplSetMemberHeartbeatLatencyTime.ID)
+ _ = charts.Remove(chartTmplReplSetMemberPingRTTTime.ID)
+ _ = charts.Remove(chartTmplReplSetMemberUptime.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, v.Name)
+ chart.Labels = []module.Label{
+ {Key: "repl_set_member", Value: v.Name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, v.Name)
+ }
+ }
+
+ if err := m.Charts().Add(*charts...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *Mongo) removeReplSetMemberCharts(name string) {
+ px := fmt.Sprintf("%s%s_", chartPxReplSetMember, name)
+
+ for _, chart := range *m.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/collect_serverstatus.go b/src/go/plugin/go.d/modules/mongodb/collect_serverstatus.go
new file mode 100644
index 000000000..861726386
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/collect_serverstatus.go
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+// collectServerStatus creates the map[string]int64 for the available dims.
+// nil values will be ignored and not added to the map and thus metrics should not appear on the dashboard.
+// Because mongo reports a metric only after it first appears,some dims might take a while to appear.
+// For example, in order to report number of create commands, a document must be created first.
+func (m *Mongo) collectServerStatus(mx map[string]int64) error {
+ s, err := m.conn.serverStatus()
+ if err != nil {
+ return fmt.Errorf("serverStatus command failed: %s", err)
+ }
+
+ m.addOptionalCharts(s)
+
+ for k, v := range stm.ToMap(s) {
+ mx[k] = v
+ }
+
+ if s.Transactions != nil && s.Transactions.CommitTypes != nil {
+ px := "txn_commit_types_"
+ v := s.Transactions.CommitTypes
+ mx[px+"no_shards_unsuccessful"] = v.NoShards.Initiated - v.NoShards.Successful
+ mx[px+"single_shard_unsuccessful"] = v.SingleShard.Initiated - v.SingleShard.Successful
+ mx[px+"single_write_shard_unsuccessful"] = v.SingleWriteShard.Initiated - v.SingleWriteShard.Successful
+ mx[px+"read_only_unsuccessful"] = v.ReadOnly.Initiated - v.ReadOnly.Successful
+ mx[px+"two_phase_commit_unsuccessful"] = v.TwoPhaseCommit.Initiated - v.TwoPhaseCommit.Successful
+ mx[px+"recover_with_token_unsuccessful"] = v.RecoverWithToken.Initiated - v.RecoverWithToken.Successful
+ }
+
+ return nil
+}
+
+func (m *Mongo) addOptionalCharts(s *documentServerStatus) {
+ m.addOptionalChart(s.OpLatencies,
+ &chartOperationsRate,
+ &chartOperationsLatencyTime,
+ )
+ m.addOptionalChart(s.WiredTiger,
+ &chartWiredTigerConcurrentReadTransactionsUsage,
+ &chartWiredTigerConcurrentWriteTransactionsUsage,
+ &chartWiredTigerCacheUsage,
+ &chartWiredTigerCacheDirtySpaceSize,
+ &chartWiredTigerCacheIORate,
+ &chartWiredTigerCacheEvictionsRate,
+ )
+ m.addOptionalChart(s.Tcmalloc,
+ &chartMemoryTCMallocStatsChart,
+ )
+ m.addOptionalChart(s.GlobalLock,
+ &chartGlobalLockActiveClientsCount,
+ &chartGlobalLockCurrentQueueCount,
+ )
+ m.addOptionalChart(s.Network.NumSlowDNSOperations,
+ &chartNetworkSlowDNSResolutionsRate,
+ )
+ m.addOptionalChart(s.Network.NumSlowSSLOperations,
+ &chartNetworkSlowSSLHandshakesRate,
+ )
+ m.addOptionalChart(s.Metrics.Cursor.TotalOpened,
+ &chartCursorsOpenedRate,
+ )
+ m.addOptionalChart(s.Metrics.Cursor.TimedOut,
+ &chartCursorsTimedOutRate,
+ )
+ m.addOptionalChart(s.Metrics.Cursor.Open.Total,
+ &chartCursorsOpenCount,
+ )
+ m.addOptionalChart(s.Metrics.Cursor.Open.NoTimeout,
+ &chartCursorsOpenNoTimeoutCount,
+ )
+ m.addOptionalChart(s.Metrics.Cursor.Lifespan,
+ &chartCursorsByLifespanCount,
+ )
+
+ if s.Transactions != nil {
+ m.addOptionalChart(s.Transactions,
+ &chartTransactionsCount,
+ &chartTransactionsRate,
+ )
+ m.addOptionalChart(s.Transactions.CommitTypes,
+ &chartTransactionsNoShardsCommitsRate,
+ &chartTransactionsNoShardsCommitsDurationTime,
+ &chartTransactionsSingleShardCommitsRate,
+ &chartTransactionsSingleShardCommitsDurationTime,
+ &chartTransactionsSingleWriteShardCommitsRate,
+ &chartTransactionsSingleWriteShardCommitsDurationTime,
+ &chartTransactionsReadOnlyCommitsRate,
+ &chartTransactionsReadOnlyCommitsDurationTime,
+ &chartTransactionsTwoPhaseCommitCommitsRate,
+ &chartTransactionsTwoPhaseCommitCommitsDurationTime,
+ &chartTransactionsRecoverWithTokenCommitsRate,
+ &chartTransactionsRecoverWithTokenCommitsDurationTime,
+ )
+ }
+ if s.Locks != nil {
+ m.addOptionalChart(s.Locks.Global, &chartGlobalLockAcquisitionsRate)
+ m.addOptionalChart(s.Locks.Database, &chartDatabaseLockAcquisitionsRate)
+ m.addOptionalChart(s.Locks.Collection, &chartCollectionLockAcquisitionsRate)
+ m.addOptionalChart(s.Locks.Mutex, &chartMutexLockAcquisitionsRate)
+ m.addOptionalChart(s.Locks.Metadata, &chartMetadataLockAcquisitionsRate)
+ m.addOptionalChart(s.Locks.Oplog, &chartOpLogLockAcquisitionsRate)
+ }
+}
+
+func (m *Mongo) addOptionalChart(iface any, charts ...*module.Chart) {
+ if reflect.ValueOf(iface).IsNil() {
+ return
+ }
+ for _, chart := range charts {
+ if m.optionalCharts[chart.ID] {
+ continue
+ }
+ m.optionalCharts[chart.ID] = true
+
+ if err := m.charts.Add(chart.Copy()); err != nil {
+ m.Warning(err)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/collect_sharding.go b/src/go/plugin/go.d/modules/mongodb/collect_sharding.go
new file mode 100644
index 000000000..43e9ae8bd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/collect_sharding.go
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (m *Mongo) collectSharding(mx map[string]int64) error {
+ nodes, err := m.conn.shardNodes()
+ if err != nil {
+ return err
+ }
+
+ mx["shard_nodes_aware"] = nodes.ShardAware
+ mx["shard_nodes_unaware"] = nodes.ShardUnaware
+
+ dbPart, err := m.conn.shardDatabasesPartitioning()
+ if err != nil {
+ return err
+ }
+
+ mx["shard_databases_partitioned"] = dbPart.Partitioned
+ mx["shard_databases_unpartitioned"] = dbPart.UnPartitioned
+
+ collPart, err := m.conn.shardCollectionsPartitioning()
+ if err != nil {
+ return err
+ }
+
+ mx["shard_collections_partitioned"] = collPart.Partitioned
+ mx["shard_collections_unpartitioned"] = collPart.UnPartitioned
+
+ chunksPerShard, err := m.conn.shardChunks()
+ if err != nil {
+ return err
+ }
+
+ seen := make(map[string]bool)
+
+ for shard, count := range chunksPerShard {
+ seen[shard] = true
+ mx["shard_id_"+shard+"_chunks"] = count
+ }
+
+ for id := range seen {
+ if !m.shards[id] {
+ m.shards[id] = true
+ m.addShardCharts(id)
+ }
+ }
+
+ for id := range m.shards {
+ if !seen[id] {
+ delete(m.shards, id)
+ m.removeShardCharts(id)
+ }
+ }
+
+ return nil
+}
+
+func (m *Mongo) addShardCharts(id string) {
+ charts := chartsTmplShardingShard.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, id)
+ chart.Labels = []module.Label{
+ {Key: "shard_id", Value: id},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, id)
+ }
+ }
+
+ if err := m.Charts().Add(*charts...); err != nil {
+ m.Warning(err)
+ }
+
+}
+
+func (m *Mongo) removeShardCharts(id string) {
+ px := fmt.Sprintf("%s%s_", chartPxShard, id)
+
+ for _, chart := range *m.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func (m *Mongo) addShardingCharts() {
+ charts := chartsSharding.Copy()
+
+ if err := m.Charts().Add(*charts...); err != nil {
+ m.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/config_schema.json b/src/go/plugin/go.d/modules/mongodb/config_schema.json
new file mode 100644
index 000000000..fc5c42eff
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/config_schema.json
@@ -0,0 +1,105 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MongoDB collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "uri": {
+ "title": "URI",
+ "description": "The MongoDB connection string in the [standard connection string format](https://www.mongodb.com/docs/manual/reference/connection-string/#std-label-connections-standard-connection-string-format).",
+ "type": "string",
+ "default": "mongodb://localhost:27017"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for queries, in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "databases": {
+ "title": "Database selector",
+ "description": "Configuration for monitoring specific databases. If left empty, no [database stats](https://docs.mongodb.com/manual/reference/command/dbStats/) will be collected.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "includes": {
+ "title": "Include",
+ "description": "Include databases that match any of the specified include [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "excludes": {
+ "title": "Exclude",
+ "description": "Exclude databases that match any of the specified exclude [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ }
+ },
+ "required": [
+ "uri"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "uri": {
+ "ui:placeholder": "mongodb://username:password@host:port"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "databases": {
+ "ui:help": "The logic for inclusion and exclusion is as follows: `(include1 OR include2) AND !(exclude1 OR exclude2)`."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "uri",
+ "timeout"
+ ]
+ },
+ {
+ "title": "Database stats",
+ "fields": [
+ "databases"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/documents.go b/src/go/plugin/go.d/modules/mongodb/documents.go
new file mode 100644
index 000000000..5c95e952e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/documents.go
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import "time"
+
+// https://www.mongodb.com/docs/manual/reference/command/serverStatus
+type documentServerStatus struct {
+ Process string `bson:"process"` // mongod|mongos
+ OpCounters documentOpCounters `bson:"opcounters" stm:"operations"`
+ OpLatencies *documentOpLatencies `bson:"opLatencies" stm:"operations_latencies"` // mongod only
+ Connections documentConnections `bson:"connections" stm:"connections"`
+ Network documentNetwork `bson:"network" stm:"network"`
+ Memory documentMemory `bson:"mem" stm:"memory"`
+ Metrics documentMetrics `bson:"metrics" stm:"metrics"`
+ ExtraInfo documentExtraInfo `bson:"extra_info" stm:"extra_info"`
+ Asserts documentAsserts `bson:"asserts" stm:"asserts"`
+ Transactions *documentTransactions `bson:"transactions" stm:"txn"` // mongod in 3.6.3+ and on mongos in 4.2+
+ GlobalLock *documentGlobalLock `bson:"globalLock" stm:"global_lock"`
+ Tcmalloc *documentTCMallocStatus `bson:"tcmalloc" stm:"tcmalloc"`
+ Locks *documentLocks `bson:"locks" stm:"locks"`
+ WiredTiger *documentWiredTiger `bson:"wiredTiger" stm:"wiredtiger"`
+ Repl interface{} `bson:"repl"`
+}
+
+type (
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#opcounters
+ documentOpCounters struct {
+ Insert int64 `bson:"insert" stm:"insert"`
+ Query int64 `bson:"query" stm:"query"`
+ Update int64 `bson:"update" stm:"update"`
+ Delete int64 `bson:"delete" stm:"delete"`
+ GetMore int64 `bson:"getmore" stm:"getmore"`
+ Command int64 `bson:"command" stm:"command"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#oplatencies
+ documentOpLatencies struct {
+ Reads documentLatencyStats `bson:"reads" stm:"reads"`
+ Writes documentLatencyStats `bson:"writes" stm:"writes"`
+ Commands documentLatencyStats `bson:"commands" stm:"commands"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/operator/aggregation/collStats/#latencystats-document
+ documentLatencyStats struct {
+ Latency int64 `bson:"latency" stm:"latency"`
+ Ops int64 `bson:"ops" stm:"ops"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#connections
+ documentConnections struct {
+ Current int64 `bson:"current" stm:"current"`
+ Available int64 `bson:"available" stm:"available"`
+ TotalCreated int64 `bson:"totalCreated" stm:"total_created"`
+ Active *int64 `bson:"active" stm:"active"`
+ Threaded *int64 `bson:"threaded" stm:"threaded"`
+ ExhaustIsMaster *int64 `bson:"exhaustIsMaster" stm:"exhaust_is_master"`
+ ExhaustHello *int64 `bson:"exhaustHello" stm:"exhaust_hello"`
+ AwaitingTopologyChanges *int64 `bson:"awaitingTopologyChanges" stm:"awaiting_topology_changes"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#network
+ documentNetwork struct {
+ BytesIn int64 `bson:"bytesIn" stm:"bytes_in"`
+ BytesOut int64 `bson:"bytesOut" stm:"bytes_out"`
+ NumRequests int64 `bson:"numRequests" stm:"requests"`
+ NumSlowDNSOperations *int64 `bson:"numSlowDNSOperations" stm:"slow_dns_operations"` // 4.4+
+ NumSlowSSLOperations *int64 `bson:"numSlowSSLOperations" stm:"slow_ssl_operations"` // 4.4+
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#mem
+ documentMemory struct {
+ Resident int64 `bson:"resident" stm:"resident,1048576,1"`
+ Virtual int64 `bson:"virtual" stm:"virtual,1048576,1"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#extra_info
+ documentExtraInfo struct {
+ PageFaults int64 `bson:"page_faults" stm:"page_faults"`
+ }
+ // Values:
+ // - mongodb: https://github.com/mongodb/mongo/blob/54e1be7d98aa154e1676d6d652b4d2d1a1073b07/src/mongo/util/tcmalloc_server_status_section.cpp#L88
+ // - tcmalloc: https://github.com/google/tcmalloc/blob/927c1433141daa1f0bcf920e6d71bf64795cc2c2/tcmalloc/global_stats.cc#L582
+ // formattedString:
+ // - https://github.com/google/tcmalloc/blob/master/docs/stats.md
+ // - https://github.com/google/tcmalloc/blob/927c1433141daa1f0bcf920e6d71bf64795cc2c2/tcmalloc/global_stats.cc#L208
+ documentTCMallocStatus struct {
+ Generic *struct {
+ CurrentAllocatedBytes int64 `bson:"current_allocated_bytes" stm:"current_allocated_bytes"`
+ HeapSize int64 `bson:"heap_size" stm:"heap_size"`
+ } `bson:"generic" stm:"generic"`
+ Tcmalloc *struct {
+ PageheapFreeBytes int64 `bson:"pageheap_free_bytes" stm:"pageheap_free_bytes"`
+ PageheapUnmappedBytes int64 `bson:"pageheap_unmapped_bytes" stm:"pageheap_unmapped_bytes"`
+ MaxTotalThreadCacheBytes int64 `bson:"max_total_thread_cache_bytes" stm:"max_total_thread_cache_bytes"`
+ CurrentTotalThreadCacheBytes int64 `bson:"current_total_thread_cache_bytes" stm:"current_total_thread_cache_bytes"`
+ TotalFreeBytes int64 `bson:"total_free_bytes" stm:"total_free_bytes"`
+ CentralCacheFreeBytes int64 `bson:"central_cache_free_bytes" stm:"central_cache_free_bytes"`
+ TransferCacheFreeBytes int64 `bson:"transfer_cache_free_bytes" stm:"transfer_cache_free_bytes"`
+ ThreadCacheFreeBytes int64 `bson:"thread_cache_free_bytes" stm:"thread_cache_free_bytes"`
+ AggressiveMemoryDecommit int64 `bson:"aggressive_memory_decommit" stm:"aggressive_memory_decommit"`
+ PageheapCommittedBytes int64 `bson:"pageheap_committed_bytes" stm:"pageheap_committed_bytes"`
+ PageheapScavengeBytes int64 `bson:"pageheap_scavenge_bytes" stm:"pageheap_scavenge_bytes"`
+ PageheapCommitCount int64 `bson:"pageheap_commit_count" stm:"pageheap_commit_count"`
+ PageheapTotalCommitBytes int64 `bson:"pageheap_total_commit_bytes" stm:"pageheap_total_commit_bytes"`
+ PageheapDecommitCount int64 `bson:"pageheap_decommit_count" stm:"pageheap_decommit_count"`
+ PageheapTotalDecommitBytes int64 `bson:"pageheap_total_decommit_bytes" stm:"pageheap_total_decommit_bytes"`
+ PageheapReserveCount int64 `bson:"pageheap_reserve_count" stm:"pageheap_reserve_count"`
+ PageheapTotalReserveBytes int64 `bson:"pageheap_total_reserve_bytes" stm:"pageheap_total_reserve_bytes"`
+ SpinlockTotalDelayNs int64 `bson:"spinlock_total_delay_ns" stm:"spinlock_total_delay_ns"`
+ } `bson:"tcmalloc" stm:""`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#metrics
+ documentMetrics struct {
+ Cursor struct {
+ TotalOpened *int64 `bson:"totalOpened" stm:"total_opened"`
+ TimedOut *int64 `bson:"timedOut" stm:"timed_out"`
+ Open struct {
+ NoTimeout *int64 `bson:"noTimeout" stm:"no_timeout"`
+ Total *int64 `bson:"total" stm:"total"`
+ } `bson:"open" stm:"open"`
+ Lifespan *struct {
+ GreaterThanOrEqual10Minutes int64 `bson:"greaterThanOrEqual10Minutes" stm:"greater_than_or_equal_10_minutes"`
+ LessThan10Minutes int64 `bson:"lessThan10Minutes" stm:"less_than_10_minutes"`
+ LessThan15Seconds int64 `bson:"lessThan15Seconds" stm:"less_than_15_seconds"`
+ LessThan1Minute int64 `bson:"lessThan1Minute" stm:"less_than_1_minute"`
+ LessThan1Second int64 `bson:"lessThan1Second" stm:"less_than_1_second"`
+ LessThan30Seconds int64 `bson:"lessThan30Seconds" stm:"less_than_30_seconds"`
+ LessThan5Seconds int64 `bson:"lessThan5Seconds" stm:"less_than_5_seconds"`
+ } `bson:"lifespan" stm:"lifespan"`
+ } `bson:"cursor" stm:"cursor"`
+ Document struct {
+ Deleted int64 `bson:"deleted" stm:"deleted"`
+ Inserted int64 `bson:"inserted" stm:"inserted"`
+ Returned int64 `bson:"returned" stm:"returned"`
+ Updated int64 `bson:"updated" stm:"updated"`
+ } `bson:"document" stm:"document"`
+ QueryExecutor struct {
+ Scanned int64 `bson:"scanned" stm:"scanned"`
+ ScannedObjects int64 `bson:"scannedObjects" stm:"scanned_objects"`
+ } `bson:"queryExecutor" stm:"query_executor"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#asserts
+ documentAsserts struct {
+ Regular int64 `bson:"regular" stm:"regular"`
+ Warning int64 `bson:"warning" stm:"warning"`
+ Msg int64 `bson:"msg" stm:"msg"`
+ User int64 `bson:"user" stm:"user"`
+ Tripwire int64 `bson:"tripwire" stm:"tripwire"`
+ Rollovers int64 `bson:"rollovers" stm:"rollovers"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#transactions
+ documentTransactions struct {
+ CurrentActive *int64 `bson:"currentActive" stm:"active"` // mongod in 4.0.2+ and mongos in 4.2.1+
+ CurrentInactive *int64 `bson:"currentInactive" stm:"inactive"` // mongod in 4.0.2+ and mongos in 4.2.1+
+ CurrentOpen *int64 `bson:"currentOpen" stm:"open"` // mongod in 4.0.2+ and mongos in 4.2.1+
+ CurrentPrepared *int64 `bson:"currentPrepared" stm:"prepared"` // 4.2+ mongod only
+ TotalAborted *int64 `bson:"totalAborted" stm:"total_aborted"` // mongod in 4.0.2+ and mongos in 4.2+
+ TotalCommitted *int64 `bson:"totalCommitted" stm:"total_committed"` // mongod in 4.0.2+ and mongos in 4.2+
+ TotalStarted *int64 `bson:"totalStarted" stm:"total_started"` // mongod in 4.0.2+ and mongos in 4.2+
+ TotalPrepared *int64 `bson:"totalPrepared" stm:"total_prepared"` // mongod in 4.0.2+ and mongos in 4.2+
+ CommitTypes *documentTransactionsCommitTypes `bson:"commitTypes" stm:"commit_types"` // mongos only
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#mongodb-serverstatus-serverstatus.transactions.commitTypes
+ documentTransactionsCommitTypes struct {
+ NoShards documentTransactionsCommitType `bson:"noShards" stm:"no_shards"`
+ SingleShard documentTransactionsCommitType `bson:"singleShard" stm:"single_shard"`
+ SingleWriteShard documentTransactionsCommitType `bson:"singleWriteShard" stm:"single_write_shard"`
+ ReadOnly documentTransactionsCommitType `bson:"readOnly" stm:"read_only"`
+ TwoPhaseCommit documentTransactionsCommitType `bson:"twoPhaseCommit" stm:"two_phase_commit"`
+ RecoverWithToken documentTransactionsCommitType `bson:"recoverWithToken" stm:"recover_with_token"`
+ }
+ documentTransactionsCommitType struct {
+ Initiated int64 `json:"initiated" stm:"initiated"`
+ Successful int64 `json:"successful" stm:"successful"`
+ SuccessfulDurationMicros int64 `json:"successfulDurationMicros" stm:"successful_duration_micros"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#globallock
+ documentGlobalLock struct {
+ CurrentQueue *struct {
+ Readers int64 `bson:"readers" stm:"readers"`
+ Writers int64 `bson:"writers" stm:"writers"`
+ } `bson:"currentQueue" stm:"current_queue"`
+ ActiveClients *struct {
+ Readers int64 `bson:"readers" stm:"readers"`
+ Writers int64 `bson:"writers" stm:"writers"`
+ } `bson:"activeClients" stm:"active_clients"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#mongodb-serverstatus-serverstatus.locks
+ documentLocks struct {
+ Global *documentLockType `bson:"Global" stm:"global"`
+ Database *documentLockType `bson:"Database" stm:"database"`
+ Collection *documentLockType `bson:"Collection" stm:"collection"`
+ Mutex *documentLockType `bson:"Mutex" stm:"mutex"`
+ Metadata *documentLockType `bson:"Metadata" stm:"metadata"`
+ Oplog *documentLockType `bson:"oplog" stm:"oplog"`
+ }
+ documentLockType struct {
+ AcquireCount documentLockModes `bson:"acquireCount" stm:"acquire"`
+ }
+ documentLockModes struct {
+ Shared int64 `bson:"R" stm:"shared"`
+ Exclusive int64 `bson:"W" stm:"exclusive"`
+ IntentShared int64 `bson:"r" stm:"intent_shared"`
+ IntentExclusive int64 `bson:"w" stm:"intent_exclusive"`
+ }
+ // https://www.mongodb.com/docs/manual/reference/command/serverStatus/#wiredtiger
+ documentWiredTiger struct {
+ ConcurrentTransaction struct {
+ Write struct {
+ Out int `bson:"out" stm:"out"`
+ Available int `bson:"available" stm:"available"`
+ } `bson:"write" stm:"write"`
+ Read struct {
+ Out int `bson:"out" stm:"out"`
+ Available int `bson:"available" stm:"available"`
+ } `bson:"read" stm:"read"`
+ } `bson:"concurrentTransactions" stm:"concurrent_txn"`
+ Cache struct {
+ BytesCurrentlyInCache int `bson:"bytes currently in the cache" stm:"currently_in_cache_bytes"`
+ MaximumBytesConfigured int `bson:"maximum bytes configured" stm:"maximum_configured_bytes"`
+ TrackedDirtyBytesInCache int `bson:"tracked dirty bytes in the cache" stm:"tracked_dirty_in_the_cache_bytes"`
+ UnmodifiedPagesEvicted int `bson:"unmodified pages evicted" stm:"unmodified_evicted_pages"`
+ ModifiedPagesEvicted int `bson:"modified pages evicted" stm:"modified_evicted_pages"`
+ PagesReadIntoCache int `bson:"pages read into cache" stm:"read_into_cache_pages"`
+ PagesWrittenFromCache int `bson:"pages written from cache" stm:"written_from_cache_pages"`
+ } `bson:"cache" stm:"cache"`
+ }
+)
+
+// https://www.mongodb.com/docs/manual/reference/command/dbStats/
+type documentDBStats struct {
+ Collections int64 `bson:"collections"`
+ Views int64 `bson:"views"`
+ Indexes int64 `bson:"indexes"`
+ Objects int64 `bson:"objects"`
+ DataSize int64 `bson:"dataSize"`
+ IndexSize int64 `bson:"indexSize"`
+ StorageSize int64 `bson:"storageSize"`
+}
+
+// https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/
+type documentReplSetStatus struct {
+ Date time.Time `bson:"date"`
+ Members []documentReplSetMember `bson:"members"`
+}
+
+type (
+ documentReplSetMember struct {
+ Name string `bson:"name"`
+ Self *bool `bson:"self"`
+ State int `bson:"state"`
+ Health int `bson:"health"`
+ OptimeDate time.Time `bson:"optimeDate"`
+ LastHeartbeat *time.Time `bson:"lastHeartbeat"`
+ LastHeartbeatRecv *time.Time `bson:"lastHeartbeatRecv"`
+ PingMs *int64 `bson:"pingMs"`
+ Uptime int64 `bson:"uptime"`
+ }
+)
+
+type documentAggrResults struct {
+ Bool bool `bson:"_id"`
+ Count int64 `bson:"count"`
+}
+
+type (
+ documentAggrResult struct {
+ True int64
+ False int64
+ }
+)
+
+type documentPartitionedResult struct {
+ Partitioned int64
+ UnPartitioned int64
+}
+
+type documentShardNodesResult struct {
+ ShardAware int64
+ ShardUnaware int64
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/init.go b/src/go/plugin/go.d/modules/mongodb/init.go
new file mode 100644
index 000000000..b881e8711
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/init.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import (
+ "errors"
+)
+
+func (m *Mongo) verifyConfig() error {
+ if m.URI == "" {
+ return errors.New("connection URI is empty")
+ }
+
+ return nil
+}
+
+func (m *Mongo) initDatabaseSelector() error {
+ if m.Databases.Empty() {
+ return nil
+ }
+
+ sr, err := m.Databases.Parse()
+ if err != nil {
+ return err
+ }
+ m.dbSelector = sr
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/integrations/mongodb.md b/src/go/plugin/go.d/modules/mongodb/integrations/mongodb.md
new file mode 100644
index 000000000..e47c3865d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/integrations/mongodb.md
@@ -0,0 +1,391 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/mongodb/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/mongodb/metadata.yaml"
+sidebar_label: "MongoDB"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MongoDB
+
+
+<img src="https://netdata.cloud/img/mongodb.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: mongodb
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors MongoDB servers.
+
+Executed queries:
+
+- [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)
+- [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)
+- [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+- WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the
+ storage engine.
+- Sharding metrics are available on shards only
+ for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).
+
+
+### Per MongoDB instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mongodb.operations_rate | reads, writes, commands | operations/s |
+| mongodb.operations_latency_time | reads, writes, commands | milliseconds |
+| mongodb.operations_by_type_rate | insert, query, update, delete, getmore, command | operations/s |
+| mongodb.document_operations_rate | inserted, deleted, returned, updated | operations/s |
+| mongodb.scanned_indexes_rate | scanned | indexes/s |
+| mongodb.scanned_documents_rate | scanned | documents/s |
+| mongodb.active_clients_count | readers, writers | clients |
+| mongodb.queued_operations_count | reads, writes | operations |
+| mongodb.cursors_open_count | open | cursors |
+| mongodb.cursors_open_no_timeout_count | open_no_timeout | cursors |
+| mongodb.cursors_opened_rate | opened | cursors/s |
+| mongodb.cursors_timed_out_rate | timed_out | cursors/s |
+| mongodb.cursors_by_lifespan_count | le_1s, 1s_5s, 5s_15s, 15s_30s, 30s_1m, 1m_10m, ge_10m | cursors |
+| mongodb.transactions_count | active, inactive, open, prepared | transactions |
+| mongodb.transactions_rate | started, aborted, committed, prepared | transactions/s |
+| mongodb.connections_usage | available, used | connections |
+| mongodb.connections_by_state_count | active, threaded, exhaust_is_master, exhaust_hello, awaiting_topology_changes | connections |
+| mongodb.connections_rate | created | connections/s |
+| mongodb.asserts_rate | regular, warning, msg, user, tripwire, rollovers | asserts/s |
+| mongodb.network_traffic_rate | in, out | bytes/s |
+| mongodb.network_requests_rate | requests | requests/s |
+| mongodb.network_slow_dns_resolutions_rate | slow_dns | resolutions/s |
+| mongodb.network_slow_ssl_handshakes_rate | slow_ssl | handshakes/s |
+| mongodb.memory_resident_size | used | bytes |
+| mongodb.memory_virtual_size | used | bytes |
+| mongodb.memory_page_faults_rate | pgfaults | pgfaults/s |
+| mongodb.memory_tcmalloc_stats | allocated, central_cache_freelist, transfer_cache_freelist, thread_cache_freelists, pageheap_freelist, pageheap_unmapped | bytes |
+| mongodb.wiredtiger_concurrent_read_transactions_usage | available, used | transactions |
+| mongodb.wiredtiger_concurrent_write_transactions_usage | available, used | transactions |
+| mongodb.wiredtiger_cache_usage | used | bytes |
+| mongodb.wiredtiger_cache_dirty_space_size | dirty | bytes |
+| mongodb.wiredtiger_cache_io_rate | read, written | pages/s |
+| mongodb.wiredtiger_cache_evictions_rate | unmodified, modified | pages/s |
+| mongodb.sharding_nodes_count | shard_aware, shard_unaware | nodes |
+| mongodb.sharding_sharded_databases_count | partitioned, unpartitioned | databases |
+| mongodb.sharding_sharded_collections_count | partitioned, unpartitioned | collections |
+
+### Per lock type
+
+These metrics refer to the lock type.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| lock_type | lock type (e.g. global, database, collection, mutex) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mongodb.lock_acquisitions_rate | shared, exclusive, intent_shared, intent_exclusive | acquisitions/s |
+
+### Per commit type
+
+These metrics refer to the commit type.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| commit_type | commit type (e.g. noShards, singleShard, singleWriteShard) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mongodb.transactions_commits_rate | success, fail | commits/s |
+| mongodb.transactions_commits_duration_time | commits | milliseconds |
+
+### Per database
+
+These metrics refer to the database.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | database name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mongodb.database_collection_count | collections | collections |
+| mongodb.database_indexes_count | indexes | indexes |
+| mongodb.database_views_count | views | views |
+| mongodb.database_documents_count | documents | documents |
+| mongodb.database_data_size | data_size | bytes |
+| mongodb.database_storage_size | storage_size | bytes |
+| mongodb.database_index_size | index_size | bytes |
+
+### Per replica set member
+
+These metrics refer to the replica set member.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| repl_set_member | replica set member name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mongodb.repl_set_member_state | primary, startup, secondary, recovering, startup2, unknown, arbiter, down, rollback, removed | state |
+| mongodb.repl_set_member_health_status | up, down | status |
+| mongodb.repl_set_member_replication_lag_time | replication_lag | milliseconds |
+| mongodb.repl_set_member_heartbeat_latency_time | heartbeat_latency | milliseconds |
+| mongodb.repl_set_member_ping_rtt_time | ping_rtt | milliseconds |
+| mongodb.repl_set_member_uptime | uptime | seconds |
+
+### Per shard
+
+These metrics refer to the shard.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| shard_id | shard id |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mongodb.sharding_shard_chunks_count | chunks | chunks |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Create a read-only user
+
+Create a read-only user for Netdata in the admin database.
+
+- Authenticate as the admin user:
+
+ ```bash
+ use admin
+ db.auth("admin", "<MONGODB_ADMIN_PASSWORD>")
+ ```
+
+- Create a user:
+
+ ```bash
+ db.createUser({
+ "user":"netdata",
+ "pwd": "<UNIQUE_PASSWORD>",
+ "roles" : [
+ {role: 'read', db: 'admin' },
+ {role: 'clusterMonitor', db: 'admin'},
+ {role: 'read', db: 'local' }
+ ]
+ })
+ ```
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/mongodb.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/mongodb.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| uri | MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/). | mongodb://localhost:27017 | yes |
+| timeout | Query timeout in seconds. | 1 | no |
+| databases | Databases selector. Determines which database metrics will be collected. | | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ uri: mongodb://netdata:password@localhost:27017
+
+```
+</details>
+
+##### With databases metrics
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ uri: mongodb://netdata:password@localhost:27017
+ databases:
+ includes:
+ - "* *"
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ uri: mongodb://netdata:password@localhost:27017
+
+ - name: remote
+ uri: mongodb://netdata:password@203.0.113.0:27017
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `mongodb` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m mongodb
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `mongodb` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep mongodb
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep mongodb /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep mongodb
+```
+
+
diff --git a/src/go/plugin/go.d/modules/mongodb/metadata.yaml b/src/go/plugin/go.d/modules/mongodb/metadata.yaml
new file mode 100644
index 000000000..ae013539f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/metadata.yaml
@@ -0,0 +1,580 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-mongodb
+ plugin_name: go.d.plugin
+ module_name: mongodb
+ monitored_instance:
+ name: MongoDB
+ link: https://www.mongodb.com/
+ icon_filename: mongodb.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - mongodb
+ - databases
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors MongoDB servers.
+
+ Executed queries:
+
+ - [serverStatus](https://docs.mongodb.com/manual/reference/command/serverStatus/)
+ - [dbStats](https://docs.mongodb.com/manual/reference/command/dbStats/)
+ - [replSetGetStatus](https://www.mongodb.com/docs/manual/reference/command/replSetGetStatus/)
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Create a read-only user
+ description: |
+ Create a read-only user for Netdata in the admin database.
+
+ - Authenticate as the admin user:
+
+ ```bash
+ use admin
+ db.auth("admin", "<MONGODB_ADMIN_PASSWORD>")
+ ```
+
+ - Create a user:
+
+ ```bash
+ db.createUser({
+ "user":"netdata",
+ "pwd": "<UNIQUE_PASSWORD>",
+ "roles" : [
+ {role: 'read', db: 'admin' },
+ {role: 'clusterMonitor', db: 'admin'},
+ {role: 'read', db: 'local' }
+ ]
+ })
+ ```
+ configuration:
+ file:
+ name: go.d/mongodb.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: uri
+ description: MongoDB connection string. See [URI syntax](https://www.mongodb.com/docs/manual/reference/connection-string/).
+ default_value: mongodb://localhost:27017
+ required: true
+ - name: timeout
+ description: Query timeout in seconds.
+ default_value: 1
+ required: false
+ - name: databases
+ description: Databases selector. Determines which database metrics will be collected.
+ default_value: ""
+ required: false
+ details: |
+ Metrics of databases matching the selector will be collected.
+
+ - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+ - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).
+ - Syntax:
+
+ ```yaml
+ per_user_stats:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+ ```
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: TCP socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ uri: mongodb://netdata:password@localhost:27017
+ - name: With databases metrics
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ uri: mongodb://netdata:password@localhost:27017
+ databases:
+ includes:
+ - "* *"
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ uri: mongodb://netdata:password@localhost:27017
+
+ - name: remote
+ uri: mongodb://netdata:password@203.0.113.0:27017
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ availability: []
+ description: |
+ - WireTiger metrics are available only if [WiredTiger](https://docs.mongodb.com/v6.0/core/wiredtiger/) is used as the
+ storage engine.
+ - Sharding metrics are available on shards only
+ for [mongos](https://www.mongodb.com/docs/manual/reference/program/mongos/).
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: mongodb.operations_rate
+ description: Operations rate
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: commands
+ - name: mongodb.operations_latency_time
+ description: Operations Latency
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: commands
+ - name: mongodb.operations_by_type_rate
+ description: Operations by type
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: insert
+ - name: query
+ - name: update
+ - name: delete
+ - name: getmore
+ - name: command
+ - name: mongodb.document_operations_rate
+ description: Document operations
+ unit: operations/s
+ chart_type: stacked
+ dimensions:
+ - name: inserted
+ - name: deleted
+ - name: returned
+ - name: updated
+ - name: mongodb.scanned_indexes_rate
+ description: Scanned indexes
+ unit: indexes/s
+ chart_type: line
+ dimensions:
+ - name: scanned
+ - name: mongodb.scanned_documents_rate
+ description: Scanned documents
+ unit: documents/s
+ chart_type: line
+ dimensions:
+ - name: scanned
+ - name: mongodb.active_clients_count
+ description: Connected clients
+ unit: clients
+ chart_type: line
+ dimensions:
+ - name: readers
+ - name: writers
+ - name: mongodb.queued_operations_count
+ description: Queued operations because of a lock
+ unit: operations
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: mongodb.cursors_open_count
+ description: Open cursors
+ unit: cursors
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: mongodb.cursors_open_no_timeout_count
+ description: Open cursors with disabled timeout
+ unit: cursors
+ chart_type: line
+ dimensions:
+ - name: open_no_timeout
+ - name: mongodb.cursors_opened_rate
+ description: Opened cursors rate
+ unit: cursors/s
+ chart_type: line
+ dimensions:
+ - name: opened
+ - name: mongodb.cursors_timed_out_rate
+ description: Timed-out cursors
+ unit: cursors/s
+ chart_type: line
+ dimensions:
+ - name: timed_out
+ - name: mongodb.cursors_by_lifespan_count
+ description: Cursors lifespan
+ unit: cursors
+ chart_type: stacked
+ dimensions:
+ - name: le_1s
+ - name: 1s_5s
+ - name: 5s_15s
+ - name: 15s_30s
+ - name: 30s_1m
+ - name: 1m_10m
+ - name: ge_10m
+ - name: mongodb.transactions_count
+ description: Current transactions
+ unit: transactions
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: open
+ - name: prepared
+ - name: mongodb.transactions_rate
+ description: Transactions rate
+ unit: transactions/s
+ chart_type: line
+ dimensions:
+ - name: started
+ - name: aborted
+ - name: committed
+ - name: prepared
+ - name: mongodb.connections_usage
+ description: Connections usage
+ unit: connections
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: mongodb.connections_by_state_count
+ description: Connections By State
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: threaded
+ - name: exhaust_is_master
+ - name: exhaust_hello
+ - name: awaiting_topology_changes
+ - name: mongodb.connections_rate
+ description: Connections Rate
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: mongodb.asserts_rate
+ description: Raised assertions
+ unit: asserts/s
+ chart_type: stacked
+ dimensions:
+ - name: regular
+ - name: warning
+ - name: msg
+ - name: user
+ - name: tripwire
+ - name: rollovers
+ - name: mongodb.network_traffic_rate
+ description: Network traffic
+ unit: bytes/s
+ chart_type: stacked
+ dimensions:
+ - name: in
+ - name: out
+ - name: mongodb.network_requests_rate
+ description: Network Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: mongodb.network_slow_dns_resolutions_rate
+ description: Slow DNS resolution operations
+ unit: resolutions/s
+ chart_type: line
+ dimensions:
+ - name: slow_dns
+ - name: mongodb.network_slow_ssl_handshakes_rate
+ description: Slow SSL handshake operations
+ unit: handshakes/s
+ chart_type: line
+ dimensions:
+ - name: slow_ssl
+ - name: mongodb.memory_resident_size
+ description: Used resident memory
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: mongodb.memory_virtual_size
+ description: Used virtual memory
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: mongodb.memory_page_faults_rate
+ description: Memory page faults
+ unit: pgfaults/s
+ chart_type: line
+ dimensions:
+ - name: pgfaults
+ - name: mongodb.memory_tcmalloc_stats
+ description: TCMalloc statistics
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: central_cache_freelist
+ - name: transfer_cache_freelist
+ - name: thread_cache_freelists
+ - name: pageheap_freelist
+ - name: pageheap_unmapped
+ - name: mongodb.wiredtiger_concurrent_read_transactions_usage
+ description: Wired Tiger concurrent read transactions usage
+ unit: transactions
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: mongodb.wiredtiger_concurrent_write_transactions_usage
+ description: Wired Tiger concurrent write transactions usage
+ unit: transactions
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: mongodb.wiredtiger_cache_usage
+ description: Wired Tiger cache usage
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: mongodb.wiredtiger_cache_dirty_space_size
+ description: Wired Tiger cache dirty space size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: dirty
+ - name: mongodb.wiredtiger_cache_io_rate
+ description: Wired Tiger IO activity
+ unit: pages/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: written
+ - name: mongodb.wiredtiger_cache_evictions_rate
+ description: Wired Tiger cache evictions
+ unit: pages/s
+ chart_type: stacked
+ dimensions:
+ - name: unmodified
+ - name: modified
+ - name: mongodb.sharding_nodes_count
+ description: Sharding Nodes
+ unit: nodes
+ chart_type: stacked
+ dimensions:
+ - name: shard_aware
+ - name: shard_unaware
+ - name: mongodb.sharding_sharded_databases_count
+ description: Sharded databases
+ unit: databases
+ chart_type: stacked
+ dimensions:
+ - name: partitioned
+ - name: unpartitioned
+ - name: mongodb.sharding_sharded_collections_count
+ description: Sharded collections
+ unit: collections
+ chart_type: stacked
+ dimensions:
+ - name: partitioned
+ - name: unpartitioned
+ - name: lock type
+ description: These metrics refer to the lock type.
+ labels:
+ - name: lock_type
+ description: lock type (e.g. global, database, collection, mutex)
+ metrics:
+ - name: mongodb.lock_acquisitions_rate
+ description: Lock acquisitions
+ unit: acquisitions/s
+ chart_type: line
+ dimensions:
+ - name: shared
+ - name: exclusive
+ - name: intent_shared
+ - name: intent_exclusive
+ - name: commit type
+ description: These metrics refer to the commit type.
+ labels:
+ - name: commit_type
+ description: commit type (e.g. noShards, singleShard, singleWriteShard)
+ metrics:
+ - name: mongodb.transactions_commits_rate
+ description: Transactions commits
+ unit: commits/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: fail
+ - name: mongodb.transactions_commits_duration_time
+ description: Transactions successful commits duration
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: commits
+ - name: database
+ description: These metrics refer to the database.
+ labels:
+ - name: database
+ description: database name
+ metrics:
+ - name: mongodb.database_collection_count
+ description: Database collections
+ unit: collections
+ chart_type: line
+ dimensions:
+ - name: collections
+ - name: mongodb.database_indexes_count
+ description: Database indexes
+ unit: indexes
+ chart_type: line
+ dimensions:
+ - name: indexes
+ - name: mongodb.database_views_count
+ description: Database views
+ unit: views
+ chart_type: line
+ dimensions:
+ - name: views
+ - name: mongodb.database_documents_count
+ description: Database documents
+ unit: documents
+ chart_type: line
+ dimensions:
+ - name: documents
+ - name: mongodb.database_data_size
+ description: Database data size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: data_size
+ - name: mongodb.database_storage_size
+ description: Database storage size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: storage_size
+ - name: mongodb.database_index_size
+ description: Database index size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: index_size
+ - name: replica set member
+ description: These metrics refer to the replica set member.
+ labels:
+ - name: repl_set_member
+ description: replica set member name
+ metrics:
+ - name: mongodb.repl_set_member_state
+ description: Replica Set member state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: primary
+ - name: startup
+ - name: secondary
+ - name: recovering
+ - name: startup2
+ - name: unknown
+ - name: arbiter
+ - name: down
+ - name: rollback
+ - name: removed
+ - name: mongodb.repl_set_member_health_status
+ description: Replica Set member health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: up
+ - name: down
+ - name: mongodb.repl_set_member_replication_lag_time
+ description: Replica Set member replication lag
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: replication_lag
+ - name: mongodb.repl_set_member_heartbeat_latency_time
+ description: Replica Set member heartbeat latency
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: heartbeat_latency
+ - name: mongodb.repl_set_member_ping_rtt_time
+ description: Replica Set member ping RTT
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: ping_rtt
+ - name: mongodb.repl_set_member_uptime
+ description: Replica Set member uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: shard
+ description: These metrics refer to the shard.
+ labels:
+ - name: shard_id
+ description: shard id
+ metrics:
+ - name: mongodb.sharding_shard_chunks_count
+ description: Shard chunks
+ unit: chunks
+ chart_type: line
+ dimensions:
+ - name: chunks
diff --git a/src/go/plugin/go.d/modules/mongodb/mongodb.go b/src/go/plugin/go.d/modules/mongodb/mongodb.go
new file mode 100644
index 000000000..7b8550251
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/mongodb.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import (
+ _ "embed"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("mongodb", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Mongo {
+ return &Mongo{
+ Config: Config{
+ URI: "mongodb://localhost:27017",
+ Timeout: web.Duration(time.Second),
+ Databases: matcher.SimpleExpr{
+ Includes: []string{},
+ Excludes: []string{},
+ },
+ },
+
+ conn: &mongoClient{},
+
+ charts: chartsServerStatus.Copy(),
+ addShardingChartsOnce: &sync.Once{},
+
+ optionalCharts: make(map[string]bool),
+ replSetMembers: make(map[string]bool),
+ databases: make(map[string]bool),
+ shards: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ URI string `yaml:"uri" json:"uri"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ Databases matcher.SimpleExpr `yaml:"databases,omitempty" json:"databases"`
+}
+
+type Mongo struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addShardingChartsOnce *sync.Once
+
+ conn mongoConn
+
+ dbSelector matcher.Matcher
+ optionalCharts map[string]bool
+ databases map[string]bool
+ replSetMembers map[string]bool
+ shards map[string]bool
+}
+
+func (m *Mongo) Configuration() any {
+ return m.Config
+}
+
+func (m *Mongo) Init() error {
+ if err := m.verifyConfig(); err != nil {
+ m.Errorf("config validation: %v", err)
+ return err
+ }
+
+ if err := m.initDatabaseSelector(); err != nil {
+ m.Errorf("init database selector: %v", err)
+ return err
+ }
+
+ return nil
+}
+
+func (m *Mongo) Check() error {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (m *Mongo) Charts() *module.Charts {
+ return m.charts
+}
+
+func (m *Mongo) Collect() map[string]int64 {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ }
+
+ if len(mx) == 0 {
+ m.Warning("no values collected")
+ return nil
+ }
+
+ return mx
+}
+
+func (m *Mongo) Cleanup() {
+ if m.conn == nil {
+ return
+ }
+ if err := m.conn.close(); err != nil {
+ m.Warningf("cleanup: error on closing mongo conn: %v", err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/mongodb_test.go b/src/go/plugin/go.d/modules/mongodb/mongodb_test.go
new file mode 100644
index 000000000..835ea20e2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/mongodb_test.go
@@ -0,0 +1,816 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mongo
+
+import (
+ "encoding/json"
+ "errors"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer6MongodServerStatus, _ = os.ReadFile("testdata/v6.0.3/mongod-serverStatus.json")
+ dataVer6MongosServerStatus, _ = os.ReadFile("testdata/v6.0.3/mongos-serverStatus.json")
+ dataVer6DbStats, _ = os.ReadFile("testdata/v6.0.3/dbStats.json")
+ dataVer6ReplSetGetStatus, _ = os.ReadFile("testdata/v6.0.3/replSetGetStatus.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer6MongodServerStatus": dataVer6MongodServerStatus,
+ "dataVer6MongosServerStatus": dataVer6MongosServerStatus,
+ "dataVer6DbStats": dataVer6DbStats,
+ "dataVer6ReplSetGetStatus": dataVer6ReplSetGetStatus,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestMongo_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Mongo{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestMongo_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails on unset 'address'": {
+ wantFail: true,
+ config: Config{
+ URI: "",
+ },
+ },
+ "fails on invalid database selector": {
+ wantFail: true,
+ config: Config{
+ URI: "mongodb://localhost:27017",
+ Databases: matcher.SimpleExpr{
+ Includes: []string{"!@#"},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mongo := New()
+ mongo.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, mongo.Init())
+ } else {
+ assert.NoError(t, mongo.Init())
+ }
+ })
+ }
+}
+
+func TestMongo_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestMongo_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) *Mongo
+ wantClose bool
+ }{
+ "client not initialized": {
+ wantClose: false,
+ prepare: func(t *testing.T) *Mongo {
+ return New()
+ },
+ },
+ "client initialized": {
+ wantClose: true,
+ prepare: func(t *testing.T) *Mongo {
+ mongo := New()
+ mongo.conn = caseMongod()
+ _ = mongo.conn.initClient("", 0)
+
+ return mongo
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mongo := test.prepare(t)
+
+ require.NotPanics(t, mongo.Cleanup)
+ if test.wantClose {
+ mock, ok := mongo.conn.(*mockMongoClient)
+ require.True(t, ok)
+ assert.True(t, mock.closeCalled)
+ }
+ })
+ }
+}
+
+func TestMongo_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *mockMongoClient
+ wantFail bool
+ }{
+ "success on Mongod (v6)": {
+ wantFail: false,
+ prepare: caseMongod,
+ },
+ "success on Mongod Replicas Set(v6)": {
+ wantFail: false,
+ prepare: caseMongodReplicaSet,
+ },
+ "success on Mongos (v6)": {
+ wantFail: false,
+ prepare: caseMongos,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mongo := prepareMongo()
+ defer mongo.Cleanup()
+ mongo.conn = test.prepare()
+
+ require.NoError(t, mongo.Init())
+
+ if test.wantFail {
+ assert.Error(t, mongo.Check())
+ } else {
+ assert.NoError(t, mongo.Check())
+ }
+ })
+ }
+}
+
+func TestMongo_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *mockMongoClient
+ wantCollected map[string]int64
+ }{
+ "success on Mongod (v6)": {
+ prepare: caseMongod,
+ wantCollected: map[string]int64{
+ "asserts_msg": 0,
+ "asserts_regular": 0,
+ "asserts_rollovers": 0,
+ "asserts_tripwire": 0,
+ "asserts_user": 246,
+ "asserts_warning": 0,
+ "connections_active": 7,
+ "connections_available": 838841,
+ "connections_awaiting_topology_changes": 5,
+ "connections_current": 19,
+ "connections_exhaust_hello": 2,
+ "connections_exhaust_is_master": 1,
+ "connections_threaded": 19,
+ "connections_total_created": 77,
+ "database_admin_collections": 3,
+ "database_admin_data_size": 796,
+ "database_admin_documents": 5,
+ "database_admin_index_size": 81920,
+ "database_admin_indexes": 4,
+ "database_admin_storage_size": 61440,
+ "database_admin_views": 0,
+ "database_config_collections": 3,
+ "database_config_data_size": 796,
+ "database_config_documents": 5,
+ "database_config_index_size": 81920,
+ "database_config_indexes": 4,
+ "database_config_storage_size": 61440,
+ "database_config_views": 0,
+ "database_local_collections": 3,
+ "database_local_data_size": 796,
+ "database_local_documents": 5,
+ "database_local_index_size": 81920,
+ "database_local_indexes": 4,
+ "database_local_storage_size": 61440,
+ "database_local_views": 0,
+ "extra_info_page_faults": 0,
+ "global_lock_active_clients_readers": 0,
+ "global_lock_active_clients_writers": 0,
+ "global_lock_current_queue_readers": 0,
+ "global_lock_current_queue_writers": 0,
+ "locks_collection_acquire_exclusive": 6,
+ "locks_collection_acquire_intent_exclusive": 172523,
+ "locks_collection_acquire_intent_shared": 336370,
+ "locks_collection_acquire_shared": 0,
+ "locks_database_acquire_exclusive": 3,
+ "locks_database_acquire_intent_exclusive": 172539,
+ "locks_database_acquire_intent_shared": 50971,
+ "locks_database_acquire_shared": 0,
+ "locks_global_acquire_exclusive": 6,
+ "locks_global_acquire_intent_exclusive": 174228,
+ "locks_global_acquire_intent_shared": 437905,
+ "locks_global_acquire_shared": 0,
+ "locks_mutex_acquire_exclusive": 0,
+ "locks_mutex_acquire_intent_exclusive": 0,
+ "locks_mutex_acquire_intent_shared": 245077,
+ "locks_mutex_acquire_shared": 0,
+ "locks_oplog_acquire_exclusive": 0,
+ "locks_oplog_acquire_intent_exclusive": 1,
+ "locks_oplog_acquire_intent_shared": 16788,
+ "locks_oplog_acquire_shared": 0,
+ "memory_resident": 193986560,
+ "memory_virtual": 3023044608,
+ "metrics_cursor_lifespan_greater_than_or_equal_10_minutes": 0,
+ "metrics_cursor_lifespan_less_than_10_minutes": 0,
+ "metrics_cursor_lifespan_less_than_15_seconds": 0,
+ "metrics_cursor_lifespan_less_than_1_minute": 0,
+ "metrics_cursor_lifespan_less_than_1_second": 0,
+ "metrics_cursor_lifespan_less_than_30_seconds": 0,
+ "metrics_cursor_lifespan_less_than_5_seconds": 0,
+ "metrics_cursor_open_no_timeout": 0,
+ "metrics_cursor_open_total": 1,
+ "metrics_cursor_timed_out": 0,
+ "metrics_cursor_total_opened": 1,
+ "metrics_document_deleted": 7,
+ "metrics_document_inserted": 0,
+ "metrics_document_returned": 1699,
+ "metrics_document_updated": 52,
+ "metrics_query_executor_scanned": 61,
+ "metrics_query_executor_scanned_objects": 1760,
+ "network_bytes_in": 38851356,
+ "network_bytes_out": 706335836,
+ "network_requests": 130530,
+ "network_slow_dns_operations": 0,
+ "network_slow_ssl_operations": 0,
+ "operations_command": 125531,
+ "operations_delete": 7,
+ "operations_getmore": 5110,
+ "operations_insert": 0,
+ "operations_latencies_commands_latency": 46432082,
+ "operations_latencies_commands_ops": 125412,
+ "operations_latencies_reads_latency": 1009868,
+ "operations_latencies_reads_ops": 5111,
+ "operations_latencies_writes_latency": 0,
+ "operations_latencies_writes_ops": 0,
+ "operations_query": 76,
+ "operations_update": 59,
+ "tcmalloc_aggressive_memory_decommit": 0,
+ "tcmalloc_central_cache_free_bytes": 406680,
+ "tcmalloc_current_total_thread_cache_bytes": 2490832,
+ "tcmalloc_generic_current_allocated_bytes": 109050648,
+ "tcmalloc_generic_heap_size": 127213568,
+ "tcmalloc_max_total_thread_cache_bytes": 1073741824,
+ "tcmalloc_pageheap_commit_count": 376,
+ "tcmalloc_pageheap_committed_bytes": 127086592,
+ "tcmalloc_pageheap_decommit_count": 122,
+ "tcmalloc_pageheap_free_bytes": 13959168,
+ "tcmalloc_pageheap_reserve_count": 60,
+ "tcmalloc_pageheap_scavenge_bytes": 0,
+ "tcmalloc_pageheap_total_commit_bytes": 229060608,
+ "tcmalloc_pageheap_total_decommit_bytes": 101974016,
+ "tcmalloc_pageheap_total_reserve_bytes": 127213568,
+ "tcmalloc_pageheap_unmapped_bytes": 126976,
+ "tcmalloc_spinlock_total_delay_ns": 33426251,
+ "tcmalloc_thread_cache_free_bytes": 2490832,
+ "tcmalloc_total_free_bytes": 4076776,
+ "tcmalloc_transfer_cache_free_bytes": 1179264,
+ "txn_active": 0,
+ "txn_inactive": 0,
+ "txn_open": 0,
+ "txn_prepared": 0,
+ "txn_total_aborted": 0,
+ "txn_total_committed": 0,
+ "txn_total_prepared": 0,
+ "txn_total_started": 0,
+ "wiredtiger_cache_currently_in_cache_bytes": 814375,
+ "wiredtiger_cache_maximum_configured_bytes": 7854882816,
+ "wiredtiger_cache_modified_evicted_pages": 0,
+ "wiredtiger_cache_read_into_cache_pages": 108,
+ "wiredtiger_cache_tracked_dirty_in_the_cache_bytes": 456446,
+ "wiredtiger_cache_unmodified_evicted_pages": 0,
+ "wiredtiger_cache_written_from_cache_pages": 3177,
+ "wiredtiger_concurrent_txn_read_available": 128,
+ "wiredtiger_concurrent_txn_read_out": 0,
+ "wiredtiger_concurrent_txn_write_available": 128,
+ "wiredtiger_concurrent_txn_write_out": 0,
+ },
+ },
+ "success on Mongod Replica Set (v6)": {
+ prepare: caseMongodReplicaSet,
+ wantCollected: map[string]int64{
+ "asserts_msg": 0,
+ "asserts_regular": 0,
+ "asserts_rollovers": 0,
+ "asserts_tripwire": 0,
+ "asserts_user": 246,
+ "asserts_warning": 0,
+ "connections_active": 7,
+ "connections_available": 838841,
+ "connections_awaiting_topology_changes": 5,
+ "connections_current": 19,
+ "connections_exhaust_hello": 2,
+ "connections_exhaust_is_master": 1,
+ "connections_threaded": 19,
+ "connections_total_created": 77,
+ "database_admin_collections": 3,
+ "database_admin_data_size": 796,
+ "database_admin_documents": 5,
+ "database_admin_index_size": 81920,
+ "database_admin_indexes": 4,
+ "database_admin_storage_size": 61440,
+ "database_admin_views": 0,
+ "database_config_collections": 3,
+ "database_config_data_size": 796,
+ "database_config_documents": 5,
+ "database_config_index_size": 81920,
+ "database_config_indexes": 4,
+ "database_config_storage_size": 61440,
+ "database_config_views": 0,
+ "database_local_collections": 3,
+ "database_local_data_size": 796,
+ "database_local_documents": 5,
+ "database_local_index_size": 81920,
+ "database_local_indexes": 4,
+ "database_local_storage_size": 61440,
+ "database_local_views": 0,
+ "extra_info_page_faults": 0,
+ "global_lock_active_clients_readers": 0,
+ "global_lock_active_clients_writers": 0,
+ "global_lock_current_queue_readers": 0,
+ "global_lock_current_queue_writers": 0,
+ "locks_collection_acquire_exclusive": 6,
+ "locks_collection_acquire_intent_exclusive": 172523,
+ "locks_collection_acquire_intent_shared": 336370,
+ "locks_collection_acquire_shared": 0,
+ "locks_database_acquire_exclusive": 3,
+ "locks_database_acquire_intent_exclusive": 172539,
+ "locks_database_acquire_intent_shared": 50971,
+ "locks_database_acquire_shared": 0,
+ "locks_global_acquire_exclusive": 6,
+ "locks_global_acquire_intent_exclusive": 174228,
+ "locks_global_acquire_intent_shared": 437905,
+ "locks_global_acquire_shared": 0,
+ "locks_mutex_acquire_exclusive": 0,
+ "locks_mutex_acquire_intent_exclusive": 0,
+ "locks_mutex_acquire_intent_shared": 245077,
+ "locks_mutex_acquire_shared": 0,
+ "locks_oplog_acquire_exclusive": 0,
+ "locks_oplog_acquire_intent_exclusive": 1,
+ "locks_oplog_acquire_intent_shared": 16788,
+ "locks_oplog_acquire_shared": 0,
+ "memory_resident": 193986560,
+ "memory_virtual": 3023044608,
+ "metrics_cursor_lifespan_greater_than_or_equal_10_minutes": 0,
+ "metrics_cursor_lifespan_less_than_10_minutes": 0,
+ "metrics_cursor_lifespan_less_than_15_seconds": 0,
+ "metrics_cursor_lifespan_less_than_1_minute": 0,
+ "metrics_cursor_lifespan_less_than_1_second": 0,
+ "metrics_cursor_lifespan_less_than_30_seconds": 0,
+ "metrics_cursor_lifespan_less_than_5_seconds": 0,
+ "metrics_cursor_open_no_timeout": 0,
+ "metrics_cursor_open_total": 1,
+ "metrics_cursor_timed_out": 0,
+ "metrics_cursor_total_opened": 1,
+ "metrics_document_deleted": 7,
+ "metrics_document_inserted": 0,
+ "metrics_document_returned": 1699,
+ "metrics_document_updated": 52,
+ "metrics_query_executor_scanned": 61,
+ "metrics_query_executor_scanned_objects": 1760,
+ "network_bytes_in": 38851356,
+ "network_bytes_out": 706335836,
+ "network_requests": 130530,
+ "network_slow_dns_operations": 0,
+ "network_slow_ssl_operations": 0,
+ "operations_command": 125531,
+ "operations_delete": 7,
+ "operations_getmore": 5110,
+ "operations_insert": 0,
+ "operations_latencies_commands_latency": 46432082,
+ "operations_latencies_commands_ops": 125412,
+ "operations_latencies_reads_latency": 1009868,
+ "operations_latencies_reads_ops": 5111,
+ "operations_latencies_writes_latency": 0,
+ "operations_latencies_writes_ops": 0,
+ "operations_query": 76,
+ "operations_update": 59,
+ "repl_set_member_mongodb-primary:27017_health_status_down": 0,
+ "repl_set_member_mongodb-primary:27017_health_status_up": 1,
+ "repl_set_member_mongodb-primary:27017_replication_lag": 4572,
+ "repl_set_member_mongodb-primary:27017_state_arbiter": 0,
+ "repl_set_member_mongodb-primary:27017_state_down": 0,
+ "repl_set_member_mongodb-primary:27017_state_primary": 1,
+ "repl_set_member_mongodb-primary:27017_state_recovering": 0,
+ "repl_set_member_mongodb-primary:27017_state_removed": 0,
+ "repl_set_member_mongodb-primary:27017_state_rollback": 0,
+ "repl_set_member_mongodb-primary:27017_state_secondary": 0,
+ "repl_set_member_mongodb-primary:27017_state_startup": 0,
+ "repl_set_member_mongodb-primary:27017_state_startup2": 0,
+ "repl_set_member_mongodb-primary:27017_state_unknown": 0,
+ "repl_set_member_mongodb-secondary:27017_health_status_down": 0,
+ "repl_set_member_mongodb-secondary:27017_health_status_up": 1,
+ "repl_set_member_mongodb-secondary:27017_heartbeat_latency": 1359,
+ "repl_set_member_mongodb-secondary:27017_ping_rtt": 0,
+ "repl_set_member_mongodb-secondary:27017_replication_lag": 4572,
+ "repl_set_member_mongodb-secondary:27017_state_arbiter": 0,
+ "repl_set_member_mongodb-secondary:27017_state_down": 0,
+ "repl_set_member_mongodb-secondary:27017_state_primary": 0,
+ "repl_set_member_mongodb-secondary:27017_state_recovering": 0,
+ "repl_set_member_mongodb-secondary:27017_state_removed": 0,
+ "repl_set_member_mongodb-secondary:27017_state_rollback": 0,
+ "repl_set_member_mongodb-secondary:27017_state_secondary": 1,
+ "repl_set_member_mongodb-secondary:27017_state_startup": 0,
+ "repl_set_member_mongodb-secondary:27017_state_startup2": 0,
+ "repl_set_member_mongodb-secondary:27017_state_unknown": 0,
+ "repl_set_member_mongodb-secondary:27017_uptime": 192370,
+ "tcmalloc_aggressive_memory_decommit": 0,
+ "tcmalloc_central_cache_free_bytes": 406680,
+ "tcmalloc_current_total_thread_cache_bytes": 2490832,
+ "tcmalloc_generic_current_allocated_bytes": 109050648,
+ "tcmalloc_generic_heap_size": 127213568,
+ "tcmalloc_max_total_thread_cache_bytes": 1073741824,
+ "tcmalloc_pageheap_commit_count": 376,
+ "tcmalloc_pageheap_committed_bytes": 127086592,
+ "tcmalloc_pageheap_decommit_count": 122,
+ "tcmalloc_pageheap_free_bytes": 13959168,
+ "tcmalloc_pageheap_reserve_count": 60,
+ "tcmalloc_pageheap_scavenge_bytes": 0,
+ "tcmalloc_pageheap_total_commit_bytes": 229060608,
+ "tcmalloc_pageheap_total_decommit_bytes": 101974016,
+ "tcmalloc_pageheap_total_reserve_bytes": 127213568,
+ "tcmalloc_pageheap_unmapped_bytes": 126976,
+ "tcmalloc_spinlock_total_delay_ns": 33426251,
+ "tcmalloc_thread_cache_free_bytes": 2490832,
+ "tcmalloc_total_free_bytes": 4076776,
+ "tcmalloc_transfer_cache_free_bytes": 1179264,
+ "txn_active": 0,
+ "txn_inactive": 0,
+ "txn_open": 0,
+ "txn_prepared": 0,
+ "txn_total_aborted": 0,
+ "txn_total_committed": 0,
+ "txn_total_prepared": 0,
+ "txn_total_started": 0,
+ "wiredtiger_cache_currently_in_cache_bytes": 814375,
+ "wiredtiger_cache_maximum_configured_bytes": 7854882816,
+ "wiredtiger_cache_modified_evicted_pages": 0,
+ "wiredtiger_cache_read_into_cache_pages": 108,
+ "wiredtiger_cache_tracked_dirty_in_the_cache_bytes": 456446,
+ "wiredtiger_cache_unmodified_evicted_pages": 0,
+ "wiredtiger_cache_written_from_cache_pages": 3177,
+ "wiredtiger_concurrent_txn_read_available": 128,
+ "wiredtiger_concurrent_txn_read_out": 0,
+ "wiredtiger_concurrent_txn_write_available": 128,
+ "wiredtiger_concurrent_txn_write_out": 0,
+ },
+ },
+ "success on Mongos (v6)": {
+ prepare: caseMongos,
+ wantCollected: map[string]int64{
+ "asserts_msg": 0,
+ "asserts_regular": 0,
+ "asserts_rollovers": 0,
+ "asserts_tripwire": 0,
+ "asserts_user": 352,
+ "asserts_warning": 0,
+ "connections_active": 5,
+ "connections_available": 838842,
+ "connections_awaiting_topology_changes": 4,
+ "connections_current": 18,
+ "connections_exhaust_hello": 3,
+ "connections_exhaust_is_master": 0,
+ "connections_threaded": 18,
+ "connections_total_created": 89,
+ "database_admin_collections": 3,
+ "database_admin_data_size": 796,
+ "database_admin_documents": 5,
+ "database_admin_index_size": 81920,
+ "database_admin_indexes": 4,
+ "database_admin_storage_size": 61440,
+ "database_admin_views": 0,
+ "database_config_collections": 3,
+ "database_config_data_size": 796,
+ "database_config_documents": 5,
+ "database_config_index_size": 81920,
+ "database_config_indexes": 4,
+ "database_config_storage_size": 61440,
+ "database_config_views": 0,
+ "database_local_collections": 3,
+ "database_local_data_size": 796,
+ "database_local_documents": 5,
+ "database_local_index_size": 81920,
+ "database_local_indexes": 4,
+ "database_local_storage_size": 61440,
+ "database_local_views": 0,
+ "extra_info_page_faults": 526,
+ "memory_resident": 84934656,
+ "memory_virtual": 2596274176,
+ "metrics_document_deleted": 0,
+ "metrics_document_inserted": 0,
+ "metrics_document_returned": 0,
+ "metrics_document_updated": 0,
+ "metrics_query_executor_scanned": 0,
+ "metrics_query_executor_scanned_objects": 0,
+ "network_bytes_in": 57943348,
+ "network_bytes_out": 247343709,
+ "network_requests": 227310,
+ "network_slow_dns_operations": 0,
+ "network_slow_ssl_operations": 0,
+ "operations_command": 227283,
+ "operations_delete": 0,
+ "operations_getmore": 0,
+ "operations_insert": 0,
+ "operations_query": 10,
+ "operations_update": 0,
+ "shard_collections_partitioned": 1,
+ "shard_collections_unpartitioned": 1,
+ "shard_databases_partitioned": 1,
+ "shard_databases_unpartitioned": 1,
+ "shard_id_shard0_chunks": 1,
+ "shard_id_shard1_chunks": 1,
+ "shard_nodes_aware": 1,
+ "shard_nodes_unaware": 1,
+ "tcmalloc_aggressive_memory_decommit": 0,
+ "tcmalloc_central_cache_free_bytes": 736960,
+ "tcmalloc_current_total_thread_cache_bytes": 1638104,
+ "tcmalloc_generic_current_allocated_bytes": 13519784,
+ "tcmalloc_generic_heap_size": 24576000,
+ "tcmalloc_max_total_thread_cache_bytes": 1042284544,
+ "tcmalloc_pageheap_commit_count": 480,
+ "tcmalloc_pageheap_committed_bytes": 24518656,
+ "tcmalloc_pageheap_decommit_count": 127,
+ "tcmalloc_pageheap_free_bytes": 5697536,
+ "tcmalloc_pageheap_reserve_count": 15,
+ "tcmalloc_pageheap_scavenge_bytes": 0,
+ "tcmalloc_pageheap_total_commit_bytes": 84799488,
+ "tcmalloc_pageheap_total_decommit_bytes": 60280832,
+ "tcmalloc_pageheap_total_reserve_bytes": 24576000,
+ "tcmalloc_pageheap_unmapped_bytes": 57344,
+ "tcmalloc_spinlock_total_delay_ns": 96785212,
+ "tcmalloc_thread_cache_free_bytes": 1638104,
+ "tcmalloc_total_free_bytes": 5301336,
+ "tcmalloc_transfer_cache_free_bytes": 2926272,
+ "txn_active": 0,
+ "txn_commit_types_no_shards_initiated": 0,
+ "txn_commit_types_no_shards_successful": 0,
+ "txn_commit_types_no_shards_successful_duration_micros": 0,
+ "txn_commit_types_no_shards_unsuccessful": 0,
+ "txn_commit_types_read_only_initiated": 0,
+ "txn_commit_types_read_only_successful": 0,
+ "txn_commit_types_read_only_successful_duration_micros": 0,
+ "txn_commit_types_read_only_unsuccessful": 0,
+ "txn_commit_types_recover_with_token_initiated": 0,
+ "txn_commit_types_recover_with_token_successful": 0,
+ "txn_commit_types_recover_with_token_successful_duration_micros": 0,
+ "txn_commit_types_recover_with_token_unsuccessful": 0,
+ "txn_commit_types_single_shard_initiated": 0,
+ "txn_commit_types_single_shard_successful": 0,
+ "txn_commit_types_single_shard_successful_duration_micros": 0,
+ "txn_commit_types_single_shard_unsuccessful": 0,
+ "txn_commit_types_single_write_shard_initiated": 0,
+ "txn_commit_types_single_write_shard_successful": 0,
+ "txn_commit_types_single_write_shard_successful_duration_micros": 0,
+ "txn_commit_types_single_write_shard_unsuccessful": 0,
+ "txn_commit_types_two_phase_commit_initiated": 0,
+ "txn_commit_types_two_phase_commit_successful": 0,
+ "txn_commit_types_two_phase_commit_successful_duration_micros": 0,
+ "txn_commit_types_two_phase_commit_unsuccessful": 0,
+ "txn_inactive": 0,
+ "txn_open": 0,
+ "txn_total_aborted": 0,
+ "txn_total_committed": 0,
+ "txn_total_started": 0,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mongo := prepareMongo()
+ defer mongo.Cleanup()
+ mongo.conn = test.prepare()
+
+ require.NoError(t, mongo.Init())
+
+ mx := mongo.Collect()
+
+ assert.Equal(t, test.wantCollected, mx)
+ })
+ }
+}
+
+func prepareMongo() *Mongo {
+ m := New()
+ m.Databases = matcher.SimpleExpr{Includes: []string{"* *"}}
+ return m
+}
+
+func caseMongodReplicaSet() *mockMongoClient {
+ return &mockMongoClient{replicaSet: true}
+}
+
+func caseMongod() *mockMongoClient {
+ return &mockMongoClient{}
+}
+
+func caseMongos() *mockMongoClient {
+ return &mockMongoClient{mongos: true}
+}
+
+type mockMongoClient struct {
+ replicaSet bool
+ mongos bool
+ errOnServerStatus bool
+ errOnListDatabaseNames bool
+ errOnDbStats bool
+ errOnReplSetGetStatus bool
+ errOnShardNodes bool
+ errOnShardDatabasesPartitioning bool
+ errOnShardCollectionsPartitioning bool
+ errOnShardChunks bool
+ errOnInitClient bool
+ clientInited bool
+ closeCalled bool
+}
+
+func (m *mockMongoClient) serverStatus() (*documentServerStatus, error) {
+ if !m.clientInited {
+ return nil, errors.New("mock.serverStatus() error: mongo client not inited")
+ }
+ if m.errOnServerStatus {
+ return nil, errors.New("mock.serverStatus() error")
+ }
+
+ data := dataVer6MongodServerStatus
+ if m.mongos {
+ data = dataVer6MongosServerStatus
+ }
+
+ var s documentServerStatus
+ if err := json.Unmarshal(data, &s); err != nil {
+ return nil, err
+ }
+
+ return &s, nil
+}
+
+func (m *mockMongoClient) listDatabaseNames() ([]string, error) {
+ if !m.clientInited {
+ return nil, errors.New("mock.listDatabaseNames() error: mongo client not inited")
+ }
+ if m.errOnListDatabaseNames {
+ return nil, errors.New("mock.listDatabaseNames() error")
+ }
+ return []string{"admin", "config", "local"}, nil
+}
+
+func (m *mockMongoClient) dbStats(_ string) (*documentDBStats, error) {
+ if !m.clientInited {
+ return nil, errors.New("mock.dbStats() error: mongo client not inited")
+ }
+ if m.errOnDbStats {
+ return nil, errors.New("mock.dbStats() error")
+ }
+
+ var s documentDBStats
+ if err := json.Unmarshal(dataVer6DbStats, &s); err != nil {
+ return nil, err
+ }
+
+ return &s, nil
+}
+
+func (m *mockMongoClient) isReplicaSet() bool {
+ return m.replicaSet
+}
+
+func (m *mockMongoClient) isMongos() bool {
+ return m.mongos
+}
+
+func (m *mockMongoClient) replSetGetStatus() (*documentReplSetStatus, error) {
+ if !m.clientInited {
+ return nil, errors.New("mock.replSetGetStatus() error: mongo client not inited")
+ }
+ if m.mongos {
+ return nil, errors.New("mock.replSetGetStatus() error: shouldn't be called for mongos")
+ }
+ if !m.replicaSet {
+ return nil, errors.New("mock.replSetGetStatus() error: should be called for replica set")
+ }
+ if m.errOnReplSetGetStatus {
+ return nil, errors.New("mock.replSetGetStatus() error")
+ }
+
+ var s documentReplSetStatus
+ if err := json.Unmarshal(dataVer6ReplSetGetStatus, &s); err != nil {
+ return nil, err
+ }
+
+ return &s, nil
+}
+
+func (m *mockMongoClient) shardNodes() (*documentShardNodesResult, error) {
+ if !m.clientInited {
+ return nil, errors.New("mock.shardNodes() error: mongo client not inited")
+ }
+ if m.replicaSet {
+ return nil, errors.New("mock.replSetGetStatus() error: shouldn't be called for replica set")
+ }
+ if !m.mongos {
+ return nil, errors.New("mock.shardNodes() error: should be called for mongos")
+ }
+ if m.errOnShardNodes {
+ return nil, errors.New("mock.shardNodes() error")
+ }
+
+ return &documentShardNodesResult{
+ ShardAware: 1,
+ ShardUnaware: 1,
+ }, nil
+}
+
+func (m *mockMongoClient) shardDatabasesPartitioning() (*documentPartitionedResult, error) {
+ if !m.clientInited {
+ return nil, errors.New("mock.shardDatabasesPartitioning() error: mongo client not inited")
+ }
+ if m.replicaSet {
+ return nil, errors.New("mock.shardDatabasesPartitioning() error: shouldn't be called for replica set")
+ }
+ if !m.mongos {
+ return nil, errors.New("mock.shardDatabasesPartitioning() error: should be called for mongos")
+ }
+ if m.errOnShardDatabasesPartitioning {
+ return nil, errors.New("mock.shardDatabasesPartitioning() error")
+ }
+
+ return &documentPartitionedResult{
+ Partitioned: 1,
+ UnPartitioned: 1,
+ }, nil
+}
+
+func (m *mockMongoClient) shardCollectionsPartitioning() (*documentPartitionedResult, error) {
+ if !m.clientInited {
+ return nil, errors.New("mock.shardCollectionsPartitioning() error: mongo client not inited")
+ }
+ if m.replicaSet {
+ return nil, errors.New("mock.shardCollectionsPartitioning() error: shouldn't be called for replica set")
+ }
+ if !m.mongos {
+ return nil, errors.New("mock.shardCollectionsPartitioning() error: should be called for mongos")
+ }
+ if m.errOnShardCollectionsPartitioning {
+ return nil, errors.New("mock.shardCollectionsPartitioning() error")
+ }
+
+ return &documentPartitionedResult{
+ Partitioned: 1,
+ UnPartitioned: 1,
+ }, nil
+}
+
+func (m *mockMongoClient) shardChunks() (map[string]int64, error) {
+ if !m.clientInited {
+ return nil, errors.New("mock.shardChunks() error: mongo client not inited")
+ }
+ if m.replicaSet {
+ return nil, errors.New("mock.shardChunks() error: shouldn't be called for replica set")
+ }
+ if !m.mongos {
+ return nil, errors.New("mock.shardChunks() error: should be called for mongos")
+ }
+ if m.errOnShardChunks {
+ return nil, errors.New("mock.shardChunks() error")
+ }
+
+ return map[string]int64{
+ "shard0": 1,
+ "shard1": 1,
+ }, nil
+}
+
+func (m *mockMongoClient) initClient(_ string, _ time.Duration) error {
+ if m.errOnInitClient {
+ return errors.New("mock.initClient() error")
+ }
+ m.clientInited = true
+ return nil
+}
+
+func (m *mockMongoClient) close() error {
+ if m.clientInited {
+ m.closeCalled = true
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/testdata/config.json b/src/go/plugin/go.d/modules/mongodb/testdata/config.json
new file mode 100644
index 000000000..bc3f94d81
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/testdata/config.json
@@ -0,0 +1,13 @@
+{
+ "update_every": 1,
+ "uri": "ok",
+ "timeout": 123.123,
+ "databases": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/testdata/config.yaml b/src/go/plugin/go.d/modules/mongodb/testdata/config.yaml
new file mode 100644
index 000000000..03a11029c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/testdata/config.yaml
@@ -0,0 +1,8 @@
+update_every: 1
+uri: "ok"
+timeout: 123.123
+databases:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/dbStats.json b/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/dbStats.json
new file mode 100644
index 000000000..52a513203
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/dbStats.json
@@ -0,0 +1,9 @@
+{
+ "Collections": 3,
+ "Views": 0,
+ "Indexes": 4,
+ "Objects": 5,
+ "DataSize": 796,
+ "IndexSize": 81920,
+ "StorageSize": 61440
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/mongod-serverStatus.json b/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/mongod-serverStatus.json
new file mode 100644
index 000000000..77f083923
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/mongod-serverStatus.json
@@ -0,0 +1,497 @@
+{
+ "Process": "mongod",
+ "OpCounters": {
+ "Insert": 0,
+ "Query": 76,
+ "Update": 59,
+ "Delete": 7,
+ "GetMore": 5110,
+ "Command": 125531
+ },
+ "OpLatencies": {
+ "Reads": {
+ "Latency": 1009868,
+ "Ops": 5111
+ },
+ "Writes": {
+ "Latency": 0,
+ "Ops": 0
+ },
+ "Commands": {
+ "Latency": 46432082,
+ "Ops": 125412
+ }
+ },
+ "Connections": {
+ "Current": 19,
+ "Available": 838841,
+ "TotalCreated": 77,
+ "Active": 7,
+ "Threaded": 19,
+ "ExhaustIsMaster": 1,
+ "ExhaustHello": 2,
+ "AwaitingTopologyChanges": 5
+ },
+ "Network": {
+ "BytesIn": 38851356,
+ "BytesOut": 706335836,
+ "NumRequests": 130530,
+ "NumSlowDNSOperations": 0,
+ "NumSlowSSLOperations": 0
+ },
+ "Memory": {
+ "Resident": 185,
+ "Virtual": 2883
+ },
+ "Metrics": {
+ "Cursor": {
+ "TotalOpened": 1,
+ "TimedOut": 0,
+ "Open": {
+ "NoTimeout": 0,
+ "Total": 1
+ },
+ "Lifespan": {
+ "GreaterThanOrEqual10Minutes": 0,
+ "LessThan10Minutes": 0,
+ "LessThan15Seconds": 0,
+ "LessThan1Minute": 0,
+ "LessThan1Second": 0,
+ "LessThan30Seconds": 0,
+ "LessThan5Seconds": 0
+ }
+ },
+ "Document": {
+ "Deleted": 7,
+ "Inserted": 0,
+ "Returned": 1699,
+ "Updated": 52
+ },
+ "QueryExecutor": {
+ "Scanned": 61,
+ "ScannedObjects": 1760
+ }
+ },
+ "ExtraInfo": {
+ "PageFaults": 0
+ },
+ "Asserts": {
+ "Regular": 0,
+ "Warning": 0,
+ "Msg": 0,
+ "User": 246,
+ "Tripwire": 0,
+ "Rollovers": 0
+ },
+ "Transactions": {
+ "CurrentActive": 0,
+ "CurrentInactive": 0,
+ "CurrentOpen": 0,
+ "CurrentPrepared": 0,
+ "TotalAborted": 0,
+ "TotalCommitted": 0,
+ "TotalStarted": 0,
+ "TotalPrepared": 0,
+ "CommitTypes": null
+ },
+ "GlobalLock": {
+ "CurrentQueue": {
+ "Readers": 0,
+ "Writers": 0
+ },
+ "ActiveClients": {
+ "Readers": 0,
+ "Writers": 0
+ }
+ },
+ "Tcmalloc": {
+ "Generic": {
+ "CurrentAllocatedBytes": 109050648,
+ "HeapSize": 127213568
+ },
+ "Tcmalloc": {
+ "PageheapFreeBytes": 13959168,
+ "PageheapUnmappedBytes": 126976,
+ "MaxTotalThreadCacheBytes": 1073741824,
+ "CurrentTotalThreadCacheBytes": 2490832,
+ "TotalFreeBytes": 4076776,
+ "CentralCacheFreeBytes": 406680,
+ "TransferCacheFreeBytes": 1179264,
+ "ThreadCacheFreeBytes": 2490832,
+ "AggressiveMemoryDecommit": 0,
+ "PageheapCommittedBytes": 127086592,
+ "PageheapScavengeBytes": 0,
+ "PageheapCommitCount": 376,
+ "PageheapTotalCommitBytes": 229060608,
+ "PageheapDecommitCount": 122,
+ "PageheapTotalDecommitBytes": 101974016,
+ "PageheapReserveCount": 60,
+ "PageheapTotalReserveBytes": 127213568,
+ "SpinlockTotalDelayNs": 33426251
+ }
+ },
+ "Locks": {
+ "Global": {
+ "AcquireCount": {
+ "Shared": 0,
+ "Exclusive": 6,
+ "IntentShared": 437905,
+ "IntentExclusive": 174228
+ }
+ },
+ "Database": {
+ "AcquireCount": {
+ "Shared": 0,
+ "Exclusive": 3,
+ "IntentShared": 50971,
+ "IntentExclusive": 172539
+ }
+ },
+ "Collection": {
+ "AcquireCount": {
+ "Shared": 0,
+ "Exclusive": 6,
+ "IntentShared": 336370,
+ "IntentExclusive": 172523
+ }
+ },
+ "Mutex": {
+ "AcquireCount": {
+ "Shared": 0,
+ "Exclusive": 0,
+ "IntentShared": 245077,
+ "IntentExclusive": 0
+ }
+ },
+ "Metadata": null,
+ "Oplog": {
+ "AcquireCount": {
+ "Shared": 0,
+ "Exclusive": 0,
+ "IntentShared": 16788,
+ "IntentExclusive": 1
+ }
+ }
+ },
+ "WiredTiger": {
+ "ConcurrentTransaction": {
+ "Write": {
+ "Out": 0,
+ "Available": 128
+ },
+ "Read": {
+ "Out": 0,
+ "Available": 128
+ }
+ },
+ "Cache": {
+ "BytesCurrentlyInCache": 814375,
+ "MaximumBytesConfigured": 7854882816,
+ "TrackedDirtyBytesInCache": 456446,
+ "UnmodifiedPagesEvicted": 0,
+ "ModifiedPagesEvicted": 0,
+ "PagesReadIntoCache": 108,
+ "PagesWrittenFromCache": 3177
+ }
+ },
+ "Repl": [
+ {
+ "Key": "topologyVersion",
+ "Value": [
+ {
+ "Key": "processId",
+ "Value": "63b043be562288304ad3b4fe"
+ },
+ {
+ "Key": "counter",
+ "Value": 7
+ }
+ ]
+ },
+ {
+ "Key": "hosts",
+ "Value": [
+ "mongodb-primary:27017",
+ "mongodb-secondary:27017"
+ ]
+ },
+ {
+ "Key": "setName",
+ "Value": "replicaset"
+ },
+ {
+ "Key": "setVersion",
+ "Value": 4
+ },
+ {
+ "Key": "isWritablePrimary",
+ "Value": true
+ },
+ {
+ "Key": "secondary",
+ "Value": false
+ },
+ {
+ "Key": "primary",
+ "Value": "mongodb-primary:27017"
+ },
+ {
+ "Key": "me",
+ "Value": "mongodb-primary:27017"
+ },
+ {
+ "Key": "electionId",
+ "Value": "7fffffff0000000000000006"
+ },
+ {
+ "Key": "lastWrite",
+ "Value": [
+ {
+ "Key": "opTime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 1672512884,
+ "I": 1
+ }
+ },
+ {
+ "Key": "t",
+ "Value": 6
+ }
+ ]
+ },
+ {
+ "Key": "lastWriteDate",
+ "Value": "2022-12-31T20:54:44+02:00"
+ },
+ {
+ "Key": "majorityOpTime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 1672512884,
+ "I": 1
+ }
+ },
+ {
+ "Key": "t",
+ "Value": 6
+ }
+ ]
+ },
+ {
+ "Key": "majorityWriteDate",
+ "Value": "2022-12-31T20:54:44+02:00"
+ }
+ ]
+ },
+ {
+ "Key": "replicationProgress",
+ "Value": [
+ [
+ {
+ "Key": "host",
+ "Value": "mongodb-primary:27017"
+ },
+ {
+ "Key": "optime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 1672512884,
+ "I": 1
+ }
+ },
+ {
+ "Key": "t",
+ "Value": 6
+ }
+ ]
+ },
+ {
+ "Key": "lastAppliedOpTime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 1672512884,
+ "I": 1
+ }
+ },
+ {
+ "Key": "t",
+ "Value": 6
+ }
+ ]
+ },
+ {
+ "Key": "heartbeatAppliedOpTime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 0,
+ "I": 0
+ }
+ },
+ {
+ "Key": "t",
+ "Value": -1
+ }
+ ]
+ },
+ {
+ "Key": "heartbeatDurableOpTime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 0,
+ "I": 0
+ }
+ },
+ {
+ "Key": "t",
+ "Value": -1
+ }
+ ]
+ },
+ {
+ "Key": "memberId",
+ "Value": 0
+ }
+ ],
+ [
+ {
+ "Key": "host",
+ "Value": "mongodb-secondary:27017"
+ },
+ {
+ "Key": "optime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 1672512884,
+ "I": 1
+ }
+ },
+ {
+ "Key": "t",
+ "Value": 6
+ }
+ ]
+ },
+ {
+ "Key": "lastAppliedOpTime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 1672512884,
+ "I": 1
+ }
+ },
+ {
+ "Key": "t",
+ "Value": 6
+ }
+ ]
+ },
+ {
+ "Key": "heartbeatAppliedOpTime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 1672512884,
+ "I": 1
+ }
+ },
+ {
+ "Key": "t",
+ "Value": 6
+ }
+ ]
+ },
+ {
+ "Key": "heartbeatDurableOpTime",
+ "Value": [
+ {
+ "Key": "ts",
+ "Value": {
+ "T": 1672512884,
+ "I": 1
+ }
+ },
+ {
+ "Key": "t",
+ "Value": 6
+ }
+ ]
+ },
+ {
+ "Key": "memberId",
+ "Value": 1
+ }
+ ]
+ ]
+ },
+ {
+ "Key": "primaryOnlyServices",
+ "Value": [
+ {
+ "Key": "ShardSplitDonorService",
+ "Value": [
+ {
+ "Key": "state",
+ "Value": "running"
+ },
+ {
+ "Key": "numInstances",
+ "Value": 0
+ }
+ ]
+ },
+ {
+ "Key": "TenantMigrationRecipientService",
+ "Value": [
+ {
+ "Key": "state",
+ "Value": "running"
+ },
+ {
+ "Key": "numInstances",
+ "Value": 0
+ }
+ ]
+ },
+ {
+ "Key": "TenantMigrationDonorService",
+ "Value": [
+ {
+ "Key": "state",
+ "Value": "running"
+ },
+ {
+ "Key": "numInstances",
+ "Value": 0
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Key": "rbid",
+ "Value": 2
+ },
+ {
+ "Key": "userWriteBlockMode",
+ "Value": 1
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/mongos-serverStatus.json b/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/mongos-serverStatus.json
new file mode 100644
index 000000000..ecf766715
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/mongos-serverStatus.json
@@ -0,0 +1,129 @@
+{
+ "Process": "mongos",
+ "OpCounters": {
+ "Insert": 0,
+ "Query": 10,
+ "Update": 0,
+ "Delete": 0,
+ "GetMore": 0,
+ "Command": 227283
+ },
+ "OpLatencies": null,
+ "Connections": {
+ "Current": 18,
+ "Available": 838842,
+ "TotalCreated": 89,
+ "Active": 5,
+ "Threaded": 18,
+ "ExhaustIsMaster": 0,
+ "ExhaustHello": 3,
+ "AwaitingTopologyChanges": 4
+ },
+ "Network": {
+ "BytesIn": 57943348,
+ "BytesOut": 247343709,
+ "NumRequests": 227310,
+ "NumSlowDNSOperations": 0,
+ "NumSlowSSLOperations": 0
+ },
+ "Memory": {
+ "Resident": 81,
+ "Virtual": 2476
+ },
+ "Metrics": {
+ "Cursor": {},
+ "Document": {
+ "Deleted": 0,
+ "Inserted": 0,
+ "Returned": 0,
+ "Updated": 0
+ },
+ "QueryExecutor": {
+ "Scanned": 0,
+ "ScannedObjects": 0
+ }
+ },
+ "ExtraInfo": {
+ "PageFaults": 526
+ },
+ "Asserts": {
+ "Regular": 0,
+ "Warning": 0,
+ "Msg": 0,
+ "User": 352,
+ "Tripwire": 0,
+ "Rollovers": 0
+ },
+ "Transactions": {
+ "CurrentActive": 0,
+ "CurrentInactive": 0,
+ "CurrentOpen": 0,
+ "CurrentPrepared": null,
+ "TotalAborted": 0,
+ "TotalCommitted": 0,
+ "TotalStarted": 0,
+ "TotalPrepared": null,
+ "CommitTypes": {
+ "NoShards": {
+ "initiated": 0,
+ "successful": 0,
+ "successfulDurationMicros": 0
+ },
+ "SingleShard": {
+ "initiated": 0,
+ "successful": 0,
+ "successfulDurationMicros": 0
+ },
+ "SingleWriteShard": {
+ "initiated": 0,
+ "successful": 0,
+ "successfulDurationMicros": 0
+ },
+ "ReadOnly": {
+ "initiated": 0,
+ "successful": 0,
+ "successfulDurationMicros": 0
+ },
+ "TwoPhaseCommit": {
+ "initiated": 0,
+ "successful": 0,
+ "successfulDurationMicros": 0
+ },
+ "RecoverWithToken": {
+ "initiated": 0,
+ "successful": 0,
+ "successfulDurationMicros": 0
+ }
+ }
+ },
+ "GlobalLock": null,
+ "Tcmalloc": {
+ "Generic": {
+ "CurrentAllocatedBytes": 13519784,
+ "HeapSize": 24576000
+ },
+ "Tcmalloc": {
+ "PageheapFreeBytes": 5697536,
+ "PageheapUnmappedBytes": 57344,
+ "MaxTotalThreadCacheBytes": 1042284544,
+ "CurrentTotalThreadCacheBytes": 1638104,
+ "TotalFreeBytes": 5301336,
+ "CentralCacheFreeBytes": 736960,
+ "TransferCacheFreeBytes": 2926272,
+ "ThreadCacheFreeBytes": 1638104,
+ "AggressiveMemoryDecommit": 0,
+ "PageheapCommittedBytes": 24518656,
+ "PageheapScavengeBytes": 0,
+ "PageheapCommitCount": 480,
+ "PageheapTotalCommitBytes": 84799488,
+ "PageheapDecommitCount": 127,
+ "PageheapTotalDecommitBytes": 60280832,
+ "PageheapReserveCount": 15,
+ "PageheapTotalReserveBytes": 24576000,
+ "SpinlockTotalDelayNs": 96785212
+ }
+ },
+ "Locks": null,
+ "WiredTiger": null,
+ "Repl": null
+}
diff --git a/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/replSetGetStatus.json b/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/replSetGetStatus.json
new file mode 100644
index 000000000..c97a77f31
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mongodb/testdata/v6.0.3/replSetGetStatus.json
@@ -0,0 +1,27 @@
+{
+ "Date": "2022-12-30T22:19:29.572Z",
+ "Members": [
+ {
+ "Name": "mongodb-primary:27017",
+ "Self": true,
+ "State": 1,
+ "Health": 1,
+ "OptimeDate": "2022-12-30T22:19:25Z",
+ "LastHeartbeat": null,
+ "LastHeartbeatRecv": null,
+ "PingMs": null,
+ "Uptime": 192588
+ },
+ {
+ "Name": "mongodb-secondary:27017",
+ "Self": null,
+ "State": 2,
+ "Health": 1,
+ "OptimeDate": "2022-12-30T22:19:25Z",
+ "LastHeartbeat": "2022-12-30T22:19:28.214Z",
+ "LastHeartbeatRecv": "2022-12-30T22:19:28.213Z",
+ "PingMs": 0,
+ "Uptime": 192370
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/monit/README.md b/src/go/plugin/go.d/modules/monit/README.md
new file mode 120000
index 000000000..ac69496f4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/README.md
@@ -0,0 +1 @@
+integrations/monit.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/monit/charts.go b/src/go/plugin/go.d/modules/monit/charts.go
new file mode 100644
index 000000000..58fcf6c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/charts.go
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package monit
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioServiceCheckStatus = module.Priority + iota
+ prioUptime
+)
+
+var baseCharts = module.Charts{
+ uptimeChart.Copy(),
+}
+
+var (
+ uptimeChart = module.Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "monit.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "uptime"},
+ },
+ }
+)
+
+var serviceCheckChartsTmpl = module.Charts{
+ serviceCheckStatusChartTmpl.Copy(),
+}
+
+var (
+ serviceCheckStatusChartTmpl = module.Chart{
+ ID: "service_check_type_%s_name_%s_status",
+ Title: "Service Check Status",
+ Units: "status",
+ Fam: "service status",
+ Ctx: "monit.service_check_status",
+ Priority: prioServiceCheckStatus,
+ Dims: module.Dims{
+ {ID: "service_check_type_%s_name_%s_status_ok", Name: "ok"},
+ {ID: "service_check_type_%s_name_%s_status_error", Name: "error"},
+ {ID: "service_check_type_%s_name_%s_status_initializing", Name: "initializing"},
+ {ID: "service_check_type_%s_name_%s_status_not_monitored", Name: "not_monitored"},
+ },
+ }
+)
+
+func (m *Monit) addServiceCheckCharts(svc statusServiceCheck, srv *statusServer) {
+ charts := serviceCheckChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = cleanChartId(fmt.Sprintf(chart.ID, svc.svcType(), svc.Name))
+ chart.Labels = []module.Label{
+ {Key: "server_hostname", Value: srv.LocalHostname},
+ {Key: "service_check_name", Value: svc.Name},
+ {Key: "service_check_type", Value: svc.svcType()},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, svc.svcType(), svc.Name)
+ }
+ }
+
+ if err := m.Charts().Add(*charts...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *Monit) removeServiceCharts(svc statusServiceCheck) {
+ px := fmt.Sprintf("service_check_type_%s_name_%s_", svc.svcType(), svc.Name)
+ px = cleanChartId(px)
+
+ for _, chart := range *m.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func cleanChartId(s string) string {
+ r := strings.NewReplacer(" ", "_", ".", "_", ",", "_")
+ return r.Replace(s)
+}
diff --git a/src/go/plugin/go.d/modules/monit/collect.go b/src/go/plugin/go.d/modules/monit/collect.go
new file mode 100644
index 000000000..580aa6d99
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/collect.go
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package monit
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "golang.org/x/net/html/charset"
+)
+
+var (
+ urlPathStatus = "/_status"
+ urlQueryStatus = url.Values{"format": {"xml"}, "level": {"full"}}.Encode()
+)
+
+func (m *Monit) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := m.collectStatus(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (m *Monit) collectStatus(mx map[string]int64) error {
+ status, err := m.fetchStatus()
+ if err != nil {
+ return err
+ }
+
+ if status.Server == nil {
+ // not Monit
+ return errors.New("invalid Monit status response: missing server data")
+ }
+
+ mx["uptime"] = status.Server.Uptime
+
+ seen := make(map[string]bool)
+
+ for _, svc := range status.Services {
+ seen[svc.id()] = true
+
+ if _, ok := m.seenServices[svc.id()]; !ok {
+ m.seenServices[svc.id()] = svc
+ m.addServiceCheckCharts(svc, status.Server)
+ }
+
+ px := fmt.Sprintf("service_check_type_%s_name_%s_status_", svc.svcType(), svc.Name)
+
+ for _, v := range []string{"not_monitored", "ok", "initializing", "error"} {
+ mx[px+v] = 0
+ if svc.status() == v {
+ mx[px+v] = 1
+ }
+ }
+ }
+
+ for id, svc := range m.seenServices {
+ if !seen[id] {
+ delete(m.seenServices, id)
+ m.removeServiceCharts(svc)
+ }
+ }
+
+ return nil
+}
+
+func (m *Monit) fetchStatus() (*monitStatus, error) {
+ req, err := web.NewHTTPRequestWithPath(m.Request, urlPathStatus)
+ if err != nil {
+ return nil, err
+ }
+ req.URL.RawQuery = urlQueryStatus
+
+ var status monitStatus
+ if err := m.doOKDecode(req, &status); err != nil {
+ return nil, err
+ }
+
+ return &status, nil
+}
+
+func (m *Monit) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := m.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ dec := xml.NewDecoder(resp.Body)
+ dec.CharsetReader = charset.NewReaderLabel
+
+ if err := dec.Decode(in); err != nil {
+ return fmt.Errorf("error on decoding XML response from '%s': %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/monit/config_schema.json b/src/go/plugin/go.d/modules/monit/config_schema.json
new file mode 100644
index 000000000..4d23760b3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/config_schema.json
@@ -0,0 +1,185 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Monit collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the Monit server.",
+ "type": "string",
+ "default": "http://127.0.0.1:2812",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true,
+ "default": "admin"
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true,
+ "default": "monit"
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/monit/integrations/monit.md b/src/go/plugin/go.d/modules/monit/integrations/monit.md
new file mode 100644
index 000000000..8d3739ac4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/integrations/monit.md
@@ -0,0 +1,255 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/monit/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/monit/metadata.yaml"
+sidebar_label: "Monit"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Monit
+
+
+<img src="https://netdata.cloud/img/monit.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: monit
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors status of Monit's service checks.
+
+
+It sends HTTP requests to the Monit `/_status?format=xml&level=full` endpoint.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Monit instances running on localhost that are listening on port 2812.
+On startup, it tries to collect metrics from:
+
+- http://127.0.0.1:2812
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per service
+
+These metrics refer to the monitored Service.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| server_hostname | Hostname of the Monit server. |
+| service_check_name | Service check name. |
+| service_check_type | Service check type. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| monit.service_check_status | ok, error, initializing, not_monitored | status |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable TCP PORT
+
+See [Syntax for TCP port](https://mmonit.com/monit/documentation/monit.html#TCP-PORT) for details.
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/monit.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/monit.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:2812 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | admin | no |
+| password | Password for basic HTTP authentication. | monit | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:2812
+ username: admin
+ password: monit
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+With enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:2812
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:2812
+
+ - name: remote
+ url: http://192.0.2.1:2812
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `monit` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m monit
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `monit` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep monit
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep monit /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep monit
+```
+
+
diff --git a/src/go/plugin/go.d/modules/monit/metadata.yaml b/src/go/plugin/go.d/modules/monit/metadata.yaml
new file mode 100644
index 000000000..d54793984
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/metadata.yaml
@@ -0,0 +1,193 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-monit
+ plugin_name: go.d.plugin
+ module_name: monit
+ monitored_instance:
+ name: Monit
+ link: https://mmonit.com/monit/
+ categories:
+ - data-collection.synthetic-checks
+ icon_filename: monit.png
+ related_resources:
+ integrations:
+ list: []
+ alternative_monitored_instances: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - monit
+ - mmonit
+ - supervision tool
+ - monitrc
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors status of Monit's service checks.
+ method_description: |
+ It sends HTTP requests to the Monit `/_status?format=xml&level=full` endpoint.
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Monit instances running on localhost that are listening on port 2812.
+ On startup, it tries to collect metrics from:
+
+ - http://127.0.0.1:2812
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list:
+ - title: Enable TCP PORT
+ description:
+ See [Syntax for TCP port](https://mmonit.com/monit/documentation/monit.html#TCP-PORT) for details.
+ configuration:
+ file:
+ name: go.d/monit.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:2812
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: "admin"
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: "monit"
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:2812
+ username: admin
+ password: monit
+ - name: HTTPS with self-signed certificate
+ description: With enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:2812
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:2812
+
+ - name: remote
+ url: http://192.0.2.1:2812
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: service
+ description: These metrics refer to the monitored Service.
+ labels:
+ - name: server_hostname
+ description: Hostname of the Monit server.
+ - name: service_check_name
+ description: Service check name.
+ - name: service_check_type
+ description: Service check type.
+ metrics:
+ - name: monit.service_check_status
+ description: Service Check Status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: error
+ - name: initializing
+ - name: not_monitored
diff --git a/src/go/plugin/go.d/modules/monit/monit.go b/src/go/plugin/go.d/modules/monit/monit.go
new file mode 100644
index 000000000..d0fe90b14
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/monit.go
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package monit
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("monit", module.Creator{
+ Create: func() module.Module { return New() },
+ JobConfigSchema: configSchema,
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Monit {
+ return &Monit{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:2812",
+ Username: "admin",
+ Password: "monit",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: baseCharts.Copy(),
+ seenServices: make(map[string]statusServiceCheck),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Monit struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ seenServices map[string]statusServiceCheck
+}
+
+func (m *Monit) Configuration() any {
+ return m.Config
+}
+
+func (m *Monit) Init() error {
+ if m.URL == "" {
+ m.Error("config: monit url is required but not set")
+ return errors.New("config: missing URL")
+ }
+
+ httpClient, err := web.NewHTTPClient(m.Client)
+ if err != nil {
+ m.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ m.httpClient = httpClient
+
+ m.Debugf("using URL %s", m.URL)
+ m.Debugf("using timeout: %s", m.Timeout)
+
+ return nil
+}
+
+func (m *Monit) Check() error {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (m *Monit) Charts() *module.Charts {
+ return m.charts
+}
+
+func (m *Monit) Collect() map[string]int64 {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (m *Monit) Cleanup() {
+ if m.httpClient != nil {
+ m.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/monit/monit_test.go b/src/go/plugin/go.d/modules/monit/monit_test.go
new file mode 100644
index 000000000..7735dcdc2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/monit_test.go
@@ -0,0 +1,371 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package monit
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatus, _ = os.ReadFile("testdata/v5.33.0/status.xml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStatus": dataStatus,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestMonit_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Monit{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestMonit_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ monit := New()
+ monit.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, monit.Init())
+ } else {
+ assert.NoError(t, monit.Init())
+ }
+ })
+ }
+}
+
+func TestMonit_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (monit *Monit, cleanup func())
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: caseOk,
+ },
+ "fail on unexpected XML response": {
+ wantFail: true,
+ prepare: caseUnexpectedXMLResponse,
+ },
+ "fail on invalid data response": {
+ wantFail: true,
+ prepare: caseInvalidDataResponse,
+ },
+ "fail on connection refused": {
+ wantFail: true,
+ prepare: caseConnectionRefused,
+ },
+ "fail on 404 response": {
+ wantFail: true,
+ prepare: case404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ monit, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, monit.Check())
+ } else {
+ assert.NoError(t, monit.Check())
+ }
+ })
+ }
+}
+
+func TestMonit_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestMonit_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (monit *Monit, cleanup func())
+ wantNumOfCharts int
+ wantMetrics map[string]int64
+ }{
+ "success on valid response": {
+ prepare: caseOk,
+ wantNumOfCharts: len(baseCharts) + len(serviceCheckChartsTmpl)*25,
+ wantMetrics: map[string]int64{
+ "service_check_type_directory_name_directoryAlert_status_error": 1,
+ "service_check_type_directory_name_directoryAlert_status_initializing": 0,
+ "service_check_type_directory_name_directoryAlert_status_not_monitored": 0,
+ "service_check_type_directory_name_directoryAlert_status_ok": 0,
+ "service_check_type_directory_name_directoryDisabled_status_error": 0,
+ "service_check_type_directory_name_directoryDisabled_status_initializing": 0,
+ "service_check_type_directory_name_directoryDisabled_status_not_monitored": 1,
+ "service_check_type_directory_name_directoryDisabled_status_ok": 0,
+ "service_check_type_directory_name_directoryNotExists_status_error": 1,
+ "service_check_type_directory_name_directoryNotExists_status_initializing": 0,
+ "service_check_type_directory_name_directoryNotExists_status_not_monitored": 0,
+ "service_check_type_directory_name_directoryNotExists_status_ok": 0,
+ "service_check_type_directory_name_directoryOk_status_error": 0,
+ "service_check_type_directory_name_directoryOk_status_initializing": 0,
+ "service_check_type_directory_name_directoryOk_status_not_monitored": 0,
+ "service_check_type_directory_name_directoryOk_status_ok": 1,
+ "service_check_type_file_name_fileAlert_status_error": 1,
+ "service_check_type_file_name_fileAlert_status_initializing": 0,
+ "service_check_type_file_name_fileAlert_status_not_monitored": 0,
+ "service_check_type_file_name_fileAlert_status_ok": 0,
+ "service_check_type_file_name_fileDisabled_status_error": 0,
+ "service_check_type_file_name_fileDisabled_status_initializing": 0,
+ "service_check_type_file_name_fileDisabled_status_not_monitored": 1,
+ "service_check_type_file_name_fileDisabled_status_ok": 0,
+ "service_check_type_file_name_fileNotExists_status_error": 1,
+ "service_check_type_file_name_fileNotExists_status_initializing": 0,
+ "service_check_type_file_name_fileNotExists_status_not_monitored": 0,
+ "service_check_type_file_name_fileNotExists_status_ok": 0,
+ "service_check_type_file_name_fileOk_status_error": 0,
+ "service_check_type_file_name_fileOk_status_initializing": 0,
+ "service_check_type_file_name_fileOk_status_not_monitored": 0,
+ "service_check_type_file_name_fileOk_status_ok": 1,
+ "service_check_type_filesystem_name_filesystemAlert_status_error": 1,
+ "service_check_type_filesystem_name_filesystemAlert_status_initializing": 0,
+ "service_check_type_filesystem_name_filesystemAlert_status_not_monitored": 0,
+ "service_check_type_filesystem_name_filesystemAlert_status_ok": 0,
+ "service_check_type_filesystem_name_filesystemDisabled_status_error": 0,
+ "service_check_type_filesystem_name_filesystemDisabled_status_initializing": 0,
+ "service_check_type_filesystem_name_filesystemDisabled_status_not_monitored": 1,
+ "service_check_type_filesystem_name_filesystemDisabled_status_ok": 0,
+ "service_check_type_filesystem_name_filesystemNotExists_status_error": 1,
+ "service_check_type_filesystem_name_filesystemNotExists_status_initializing": 0,
+ "service_check_type_filesystem_name_filesystemNotExists_status_not_monitored": 0,
+ "service_check_type_filesystem_name_filesystemNotExists_status_ok": 0,
+ "service_check_type_filesystem_name_filsystemOk_status_error": 0,
+ "service_check_type_filesystem_name_filsystemOk_status_initializing": 0,
+ "service_check_type_filesystem_name_filsystemOk_status_not_monitored": 0,
+ "service_check_type_filesystem_name_filsystemOk_status_ok": 1,
+ "service_check_type_host_name_hostAlert_status_error": 1,
+ "service_check_type_host_name_hostAlert_status_initializing": 0,
+ "service_check_type_host_name_hostAlert_status_not_monitored": 0,
+ "service_check_type_host_name_hostAlert_status_ok": 0,
+ "service_check_type_host_name_hostDisabled_status_error": 0,
+ "service_check_type_host_name_hostDisabled_status_initializing": 0,
+ "service_check_type_host_name_hostDisabled_status_not_monitored": 1,
+ "service_check_type_host_name_hostDisabled_status_ok": 0,
+ "service_check_type_host_name_hostNotExists_status_error": 1,
+ "service_check_type_host_name_hostNotExists_status_initializing": 0,
+ "service_check_type_host_name_hostNotExists_status_not_monitored": 0,
+ "service_check_type_host_name_hostNotExists_status_ok": 0,
+ "service_check_type_host_name_hostOk_status_error": 0,
+ "service_check_type_host_name_hostOk_status_initializing": 0,
+ "service_check_type_host_name_hostOk_status_not_monitored": 0,
+ "service_check_type_host_name_hostOk_status_ok": 1,
+ "service_check_type_network_name_networkAlert_status_error": 1,
+ "service_check_type_network_name_networkAlert_status_initializing": 0,
+ "service_check_type_network_name_networkAlert_status_not_monitored": 0,
+ "service_check_type_network_name_networkAlert_status_ok": 0,
+ "service_check_type_network_name_networkDisabled_status_error": 0,
+ "service_check_type_network_name_networkDisabled_status_initializing": 0,
+ "service_check_type_network_name_networkDisabled_status_not_monitored": 1,
+ "service_check_type_network_name_networkDisabled_status_ok": 0,
+ "service_check_type_network_name_networkNotExists_status_error": 1,
+ "service_check_type_network_name_networkNotExists_status_initializing": 0,
+ "service_check_type_network_name_networkNotExists_status_not_monitored": 0,
+ "service_check_type_network_name_networkNotExists_status_ok": 0,
+ "service_check_type_network_name_networkOk_status_error": 0,
+ "service_check_type_network_name_networkOk_status_initializing": 0,
+ "service_check_type_network_name_networkOk_status_not_monitored": 0,
+ "service_check_type_network_name_networkOk_status_ok": 1,
+ "service_check_type_process_name_processAlert_status_error": 1,
+ "service_check_type_process_name_processAlert_status_initializing": 0,
+ "service_check_type_process_name_processAlert_status_not_monitored": 0,
+ "service_check_type_process_name_processAlert_status_ok": 0,
+ "service_check_type_process_name_processDisabled_status_error": 0,
+ "service_check_type_process_name_processDisabled_status_initializing": 0,
+ "service_check_type_process_name_processDisabled_status_not_monitored": 1,
+ "service_check_type_process_name_processDisabled_status_ok": 0,
+ "service_check_type_process_name_processNotExists_status_error": 1,
+ "service_check_type_process_name_processNotExists_status_initializing": 0,
+ "service_check_type_process_name_processNotExists_status_not_monitored": 0,
+ "service_check_type_process_name_processNotExists_status_ok": 0,
+ "service_check_type_process_name_processOk_status_error": 0,
+ "service_check_type_process_name_processOk_status_initializing": 0,
+ "service_check_type_process_name_processOk_status_not_monitored": 0,
+ "service_check_type_process_name_processOk_status_ok": 1,
+ "service_check_type_system_name_pve-deb-work_status_error": 0,
+ "service_check_type_system_name_pve-deb-work_status_initializing": 0,
+ "service_check_type_system_name_pve-deb-work_status_not_monitored": 0,
+ "service_check_type_system_name_pve-deb-work_status_ok": 1,
+ "uptime": 33,
+ },
+ },
+ "fail on unexpected XML response": {
+ prepare: caseUnexpectedXMLResponse,
+ wantNumOfCharts: len(baseCharts),
+ wantMetrics: nil,
+ },
+ "fail on invalid data response": {
+ prepare: caseInvalidDataResponse,
+ wantNumOfCharts: len(baseCharts),
+ wantMetrics: nil,
+ },
+ "fail on connection refused": {
+ prepare: caseConnectionRefused,
+ wantNumOfCharts: len(baseCharts),
+ wantMetrics: nil,
+ },
+ "fail on 404 response": {
+ prepare: case404,
+ wantNumOfCharts: len(baseCharts),
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ monit, cleanup := test.prepare(t)
+ defer cleanup()
+
+ _ = monit.Check()
+
+ mx := monit.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ module.TestMetricsHasAllChartsDims(t, monit.Charts(), mx)
+ assert.Equal(t, test.wantNumOfCharts, len(*monit.Charts()), "want number of charts")
+ }
+ })
+ }
+}
+
+func caseOk(t *testing.T) (*Monit, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != urlPathStatus || r.URL.RawQuery != urlQueryStatus {
+ w.WriteHeader(http.StatusNotFound)
+ return
+ }
+ _, _ = w.Write(dataStatus)
+ }))
+ monit := New()
+ monit.URL = srv.URL
+ require.NoError(t, monit.Init())
+
+ return monit, srv.Close
+}
+
+func caseUnexpectedXMLResponse(t *testing.T) (*Monit, func()) {
+ t.Helper()
+ data := `<?xml version="1.0" encoding="UTF-8"?>
+<Response>
+ <Status>
+ <Code>200</Code>
+ <Message>Success</Message>
+ </Status>
+ <Data>
+ <User>
+ <ID>12345</ID>
+ <Name>John Doe</Name>
+ <Email>johndoe@example.com</Email>
+ <Roles>
+ <Role>Admin</Role>
+ <Role>User</Role>
+ </Roles>
+ </User>
+ <Order>
+ <OrderID>98765</OrderID>
+ <Date>2024-08-15</Date>
+ <Items>
+ <Item>
+ <Name>Widget A</Name>
+ <Quantity>2</Quantity>
+ <Price>19.99</Price>
+ </Item>
+ <Item>
+ <Name>Gadget B</Name>
+ <Quantity>1</Quantity>
+ <Price>99.99</Price>
+ </Item>
+ </Items>
+ <Total>139.97</Total>
+ </Order>
+ </Data>
+</Response>
+`
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(data))
+ }))
+ monit := New()
+ monit.URL = srv.URL
+ require.NoError(t, monit.Init())
+
+ return monit, srv.Close
+}
+
+func caseInvalidDataResponse(t *testing.T) (*Monit, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ monit := New()
+ monit.URL = srv.URL
+ require.NoError(t, monit.Init())
+
+ return monit, srv.Close
+}
+
+func caseConnectionRefused(t *testing.T) (*Monit, func()) {
+ t.Helper()
+ monit := New()
+ monit.URL = "http://127.0.0.1:65001"
+ require.NoError(t, monit.Init())
+
+ return monit, func() {}
+}
+
+func case404(t *testing.T) (*Monit, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ monit := New()
+ monit.URL = srv.URL
+ require.NoError(t, monit.Init())
+
+ return monit, srv.Close
+}
diff --git a/src/go/plugin/go.d/modules/monit/status.go b/src/go/plugin/go.d/modules/monit/status.go
new file mode 100644
index 000000000..4a87e8c90
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/status.go
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package monit
+
+// status_xml(): https://bitbucket.org/tildeslash/monit/src/5467d37d70c3c63c5760cddb93831bde4e17c14b/src/http/xml.c#lines-631
+type monitStatus struct {
+ Server *statusServer `xml:"server"`
+ Services []statusServiceCheck `xml:"service"`
+}
+
+type statusServer struct {
+ ID string `xml:"id"`
+ Version string `xml:"version"`
+ Uptime int64 `xml:"uptime"`
+ LocalHostname string `xml:"localhostname"`
+}
+
+// status_service(): https://bitbucket.org/tildeslash/monit/src/5467d37d70c3c63c5760cddb93831bde4e17c14b/src/http/xml.c#lines-196
+// struct Service_T: https://bitbucket.org/tildeslash/monit/src/5467d37d70c3c63c5760cddb93831bde4e17c14b/src/monit.h#lines-1212
+type statusServiceCheck struct {
+ Type string `xml:"type,attr"`
+ Name string `xml:"name"`
+
+ Status int `xml:"status"` // Error flags bitmap
+
+ // https://bitbucket.org/tildeslash/monit/src/5467d37d70c3c63c5760cddb93831bde4e17c14b/src/monit.h#lines-269
+ MonitoringStatus int `xml:"monitor"`
+
+ // https://bitbucket.org/tildeslash/monit/src/5467d37d70c3c63c5760cddb93831bde4e17c14b/src/monit.h#lines-254
+ MonitorMode int `xml:"monitormode"`
+
+ // https://bitbucket.org/tildeslash/monit/src/5467d37d70c3c63c5760cddb93831bde4e17c14b/src/monit.h#lines-261
+ OnReboot int `xml:"onreboot"`
+
+ // https://bitbucket.org/tildeslash/monit/src/5467d37d70c3c63c5760cddb93831bde4e17c14b/src/monit.h#lines-248
+ PendingAction int `xml:"pendingaction"`
+}
+
+func (s *statusServiceCheck) id() string {
+ return s.svcType() + ":" + s.Name
+}
+
+func (s *statusServiceCheck) svcType() string {
+ // See enum Service_Type https://bitbucket.org/tildeslash/monit/src/master/src/monit.h
+
+ switch s.Type {
+ case "0":
+ return "filesystem"
+ case "1":
+ return "directory"
+ case "2":
+ return "file"
+ case "3":
+ return "process"
+ case "4":
+ return "host"
+ case "5":
+ return "system"
+ case "6":
+ return "fifo"
+ case "7":
+ return "program"
+ case "8":
+ return "network"
+ default:
+ return "unknown"
+ }
+}
+
+func (s *statusServiceCheck) status() string {
+ // https://bitbucket.org/tildeslash/monit/src/5467d37d70c3c63c5760cddb93831bde4e17c14b/src/http/cervlet.c#lines-2866
+
+ switch st := s.monitoringStatus(); st {
+ case "not_monitored", "initializing":
+ return st
+ default:
+ if s.Status != 0 {
+ return "error"
+ }
+ return "ok"
+ }
+}
+
+func (s *statusServiceCheck) monitoringStatus() string {
+ switch s.MonitoringStatus {
+ case 0:
+ return "not_monitored"
+ case 1:
+ return "monitored"
+ case 2:
+ return "initializing"
+ case 4:
+ return "waiting"
+ default:
+ return "unknown"
+ }
+}
+
+func (s *statusServiceCheck) monitorMode() string {
+ switch s.MonitorMode {
+ case 0:
+ return "active"
+ case 1:
+ return "passive"
+ default:
+ return "unknown"
+ }
+}
+
+func (s *statusServiceCheck) onReboot() string {
+ switch s.OnReboot {
+ case 0:
+ return "start"
+ case 1:
+ return "no_start"
+ default:
+ return "unknown"
+ }
+}
+
+func (s *statusServiceCheck) pendingAction() string {
+ switch s.PendingAction {
+ case 0:
+ return "ignored"
+ case 1:
+ return "alert"
+ case 2:
+ return "restart"
+ case 3:
+ return "stop"
+ case 4:
+ return "exec"
+ case 5:
+ return "unmonitor"
+ case 6:
+ return "start"
+ case 7:
+ return "monitor"
+ default:
+ return "unknown"
+ }
+}
+
+func (s *statusServiceCheck) hasServiceStatus() bool {
+ // https://bitbucket.org/tildeslash/monit/src/5467d37d70c3c63c5760cddb93831bde4e17c14b/src/util.c#lines-1721
+
+ const eventNonExist = 512
+ const eventData = 2048
+
+ return s.monitoringStatus() == "monitored" &&
+ !(s.Status&eventNonExist != 0) &&
+ !(s.Status&eventData != 0)
+}
diff --git a/src/go/plugin/go.d/modules/monit/testdata/config.json b/src/go/plugin/go.d/modules/monit/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/monit/testdata/config.yaml b/src/go/plugin/go.d/modules/monit/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/monit/testdata/v5.33.0/status.xml b/src/go/plugin/go.d/modules/monit/testdata/v5.33.0/status.xml
new file mode 100644
index 000000000..ca4178c6c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/monit/testdata/v5.33.0/status.xml
@@ -0,0 +1,688 @@
+<?xml version="1.0" encoding="ISO-8859-1"?>
+<monit>
+ <server>
+ <id>309dc5d56ccd5964cef9b42d1d8305e7</id>
+ <incarnation>1723810534</incarnation>
+ <version>5.33.0</version>
+ <uptime>33</uptime>
+ <poll>120</poll>
+ <startdelay>0</startdelay>
+ <localhostname>pve-deb-work</localhostname>
+ <controlfile>/etc/monit/monitrc</controlfile>
+ <httpd>
+ <address>127.0.0.1</address>
+ <port>2812</port>
+ <ssl>0</ssl>
+ </httpd>
+ </server>
+ <platform>
+ <name>Linux</name>
+ <release>6.1.0-23-amd64</release>
+ <version>#1 SMP PREEMPT_DYNAMIC Debian 6.1.99-1 (2024-07-15)</version>
+ <machine>x86_64</machine>
+ <cpu>16</cpu>
+ <memory>32864100</memory>
+ <swap>262140</swap>
+ </platform>
+ <service type="3">
+ <name>processOk</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>86510</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <pid>843</pid>
+ <ppid>1</ppid>
+ <uid>0</uid>
+ <euid>0</euid>
+ <gid>0</gid>
+ <uptime>66112</uptime>
+ <threads>1</threads>
+ <children>2</children>
+ <memory>
+ <percent>0.0</percent>
+ <percenttotal>0.1</percenttotal>
+ <kilobyte>5036</kilobyte>
+ <kilobytetotal>34156</kilobytetotal>
+ </memory>
+ <cpu>
+ <percent>-1.0</percent>
+ <percenttotal>-1.0</percenttotal>
+ </cpu>
+ <filedescriptors>
+ <open>9</open>
+ <opentotal>34</opentotal>
+ <limit>
+ <soft>8192</soft>
+ <hard>8192</hard>
+ </limit>
+ </filedescriptors>
+ <read>
+ <bytesgeneric>
+ <count>0</count>
+ <total>1733465</total>
+ </bytesgeneric>
+ <bytes>
+ <count>0</count>
+ <total>135168</total>
+ </bytes>
+ <operations>
+ <count>0</count>
+ <total>23145</total>
+ </operations>
+ </read>
+ <write>
+ <bytesgeneric>
+ <count>0</count>
+ <total>8842272</total>
+ </bytesgeneric>
+ <bytes>
+ <count>0</count>
+ <total>9150464</total>
+ </bytes>
+ <operations>
+ <count>0</count>
+ <total>22890</total>
+ </operations>
+ </write>
+ </service>
+ <service type="3">
+ <name>processDisabled</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>68402</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>0</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="3">
+ <name>processAlert</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>86548</collected_usec>
+ <status>2</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <pid>843</pid>
+ <ppid>1</ppid>
+ <uid>0</uid>
+ <euid>0</euid>
+ <gid>0</gid>
+ <uptime>66112</uptime>
+ <threads>1</threads>
+ <children>2</children>
+ <memory>
+ <percent>0.0</percent>
+ <percenttotal>0.1</percenttotal>
+ <kilobyte>5036</kilobyte>
+ <kilobytetotal>34156</kilobytetotal>
+ </memory>
+ <cpu>
+ <percent>-1.0</percent>
+ <percenttotal>-1.0</percenttotal>
+ </cpu>
+ <filedescriptors>
+ <open>9</open>
+ <opentotal>34</opentotal>
+ <limit>
+ <soft>8192</soft>
+ <hard>8192</hard>
+ </limit>
+ </filedescriptors>
+ <read>
+ <bytesgeneric>
+ <count>0</count>
+ <total>1733465</total>
+ </bytesgeneric>
+ <bytes>
+ <count>0</count>
+ <total>135168</total>
+ </bytes>
+ <operations>
+ <count>0</count>
+ <total>23145</total>
+ </operations>
+ </read>
+ <write>
+ <bytesgeneric>
+ <count>0</count>
+ <total>8842272</total>
+ </bytesgeneric>
+ <bytes>
+ <count>0</count>
+ <total>9150464</total>
+ </bytes>
+ <operations>
+ <count>0</count>
+ <total>22890</total>
+ </operations>
+ </write>
+ </service>
+ <service type="3">
+ <name>processNotExists</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>86595</collected_usec>
+ <status>4608</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="0">
+ <name>filsystemOk</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>86891</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <fstype>ext2</fstype>
+ <fsflags>rw,relatime</fsflags>
+ <mode>660</mode>
+ <uid>0</uid>
+ <gid>6</gid>
+ <block>
+ <percent>19.6</percent>
+ <usage>92.0</usage>
+ <total>469.4</total>
+ </block>
+ <inode>
+ <percent>0.3</percent>
+ <usage>356</usage>
+ <total>124928</total>
+ </inode>
+ <read>
+ <bytes>
+ <count>0</count>
+ <total>5706752</total>
+ </bytes>
+ <operations>
+ <count>0</count>
+ <total>210</total>
+ </operations>
+ </read>
+ <write>
+ <bytes>
+ <count>0</count>
+ <total>1024</total>
+ </bytes>
+ <operations>
+ <count>0</count>
+ <total>1</total>
+ </operations>
+ </write>
+ <servicetime>
+ <read>0.000</read>
+ <write>0.000</write>
+ </servicetime>
+ </service>
+ <service type="0">
+ <name>filesystemDisabled</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>68613</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>0</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="0">
+ <name>filesystemAlert</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>87124</collected_usec>
+ <status>384</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <fstype>ext2</fstype>
+ <fsflags>rw,relatime</fsflags>
+ <mode>660</mode>
+ <uid>0</uid>
+ <gid>6</gid>
+ <block>
+ <percent>19.6</percent>
+ <usage>92.0</usage>
+ <total>469.4</total>
+ </block>
+ <inode>
+ <percent>0.3</percent>
+ <usage>356</usage>
+ <total>124928</total>
+ </inode>
+ <read>
+ <bytes>
+ <count>0</count>
+ <total>5706752</total>
+ </bytes>
+ <operations>
+ <count>0</count>
+ <total>210</total>
+ </operations>
+ </read>
+ <write>
+ <bytes>
+ <count>0</count>
+ <total>1024</total>
+ </bytes>
+ <operations>
+ <count>0</count>
+ <total>1</total>
+ </operations>
+ </write>
+ <servicetime>
+ <read>0.000</read>
+ <write>0.000</write>
+ </servicetime>
+ </service>
+ <service type="0">
+ <name>filesystemNotExists</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>87334</collected_usec>
+ <status>512</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="2">
+ <name>fileOk</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>87339</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <mode>755</mode>
+ <uid>0</uid>
+ <gid>0</gid>
+ <timestamps>
+ <access>1723744256</access>
+ <change>1723744256</change>
+ <modify>1723744256</modify>
+ </timestamps>
+ <size>84820392</size>
+ </service>
+ <service type="2">
+ <name>fileDisabled</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>68835</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>0</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="2">
+ <name>fileAlert</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>87356</collected_usec>
+ <status>384</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <mode>755</mode>
+ <uid>0</uid>
+ <gid>0</gid>
+ <timestamps>
+ <access>1723744256</access>
+ <change>1723744256</change>
+ <modify>1723744256</modify>
+ </timestamps>
+ <size>84820392</size>
+ </service>
+ <service type="2">
+ <name>fileNotExists</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>87371</collected_usec>
+ <status>512</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="1">
+ <name>directoryOk</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>87375</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <mode>775</mode>
+ <uid>0</uid>
+ <gid>0</gid>
+ <timestamps>
+ <access>1723740545</access>
+ <change>1720694060</change>
+ <modify>1720694060</modify>
+ </timestamps>
+ </service>
+ <service type="1">
+ <name>directoryDisabled</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>68957</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>0</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="1">
+ <name>directoryAlert</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>87385</collected_usec>
+ <status>64</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <mode>775</mode>
+ <uid>0</uid>
+ <gid>0</gid>
+ <timestamps>
+ <access>1723740545</access>
+ <change>1720694060</change>
+ <modify>1720694060</modify>
+ </timestamps>
+ </service>
+ <service type="1">
+ <name>directoryNotExists</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>87400</collected_usec>
+ <status>512</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="4">
+ <name>hostOk</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>89652</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <icmp>
+ <type>Ping</type>
+ <responsetime>0.000144</responsetime>
+ </icmp>
+ <port>
+ <hostname>10.20.4.200</hostname>
+ <portnumber>19999</portnumber>
+ <request><![CDATA[/api/v1/info]]></request>
+ <protocol>HTTP</protocol>
+ <type>TCP</type>
+ <responsetime>0.002077</responsetime>
+ </port>
+ </service>
+ <service type="4">
+ <name>hostDisabled</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>69066</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>0</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="4">
+ <name>hostAlert</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>89857</collected_usec>
+ <status>32</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <icmp>
+ <type>Ping</type>
+ <responsetime>0.000069</responsetime>
+ </icmp>
+ <port>
+ <hostname>10.20.4.200</hostname>
+ <portnumber>19991</portnumber>
+ <request><![CDATA[/api/v1/info]]></request>
+ <protocol>HTTP</protocol>
+ <type>TCP</type>
+ <responsetime>-1.000000</responsetime>
+ </port>
+ </service>
+ <service type="4">
+ <name>hostNotExists</name>
+ <collected_sec>1723810549</collected_sec>
+ <collected_usec>94459</collected_usec>
+ <status>16384</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <icmp>
+ <type>Ping</type>
+ <responsetime>-1.000000</responsetime>
+ </icmp>
+ <port>
+ <hostname>10.20.4.233</hostname>
+ <portnumber>19999</portnumber>
+ <request><![CDATA[/api/v1/info]]></request>
+ <protocol>HTTP</protocol>
+ <type>TCP</type>
+ <responsetime>-1.000000</responsetime>
+ </port>
+ </service>
+ <service type="8">
+ <name>networkOk</name>
+ <collected_sec>1723810549</collected_sec>
+ <collected_usec>94801</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <link>
+ <state>1</state>
+ <speed>-1000000</speed>
+ <duplex>-1</duplex>
+ <download>
+ <packets>
+ <now>0</now>
+ <total>319258</total>
+ </packets>
+ <bytes>
+ <now>0</now>
+ <total>714558077</total>
+ </bytes>
+ <errors>
+ <now>0</now>
+ <total>0</total>
+ </errors>
+ </download>
+ <upload>
+ <packets>
+ <now>0</now>
+ <total>172909</total>
+ </packets>
+ <bytes>
+ <now>0</now>
+ <total>25128489</total>
+ </bytes>
+ <errors>
+ <now>0</now>
+ <total>0</total>
+ </errors>
+ </upload>
+ </link>
+ </service>
+ <service type="8">
+ <name>networkDisabled</name>
+ <collected_sec>1723810534</collected_sec>
+ <collected_usec>69103</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>0</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ </service>
+ <service type="8">
+ <name>networkAlert</name>
+ <collected_sec>1723810549</collected_sec>
+ <collected_usec>94969</collected_usec>
+ <status>8388608</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <link>
+ <state>0</state>
+ <speed>-1</speed>
+ <duplex>-1</duplex>
+ <download>
+ <packets>
+ <now>-1</now>
+ <total>-1</total>
+ </packets>
+ <bytes>
+ <now>-1</now>
+ <total>-1</total>
+ </bytes>
+ <errors>
+ <now>-1</now>
+ <total>-1</total>
+ </errors>
+ </download>
+ <upload>
+ <packets>
+ <now>-1</now>
+ <total>-1</total>
+ </packets>
+ <bytes>
+ <now>-1</now>
+ <total>-1</total>
+ </bytes>
+ <errors>
+ <now>-1</now>
+ <total>-1</total>
+ </errors>
+ </upload>
+ </link>
+ </service>
+ <service type="8">
+ <name>networkNotExists</name>
+ <collected_sec>1723810549</collected_sec>
+ <collected_usec>94992</collected_usec>
+ <status>8388608</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <link>
+ <state>-1</state>
+ <speed>-1</speed>
+ <duplex>-1</duplex>
+ <download>
+ <packets>
+ <now>-1</now>
+ <total>-1</total>
+ </packets>
+ <bytes>
+ <now>-1</now>
+ <total>-1</total>
+ </bytes>
+ <errors>
+ <now>-1</now>
+ <total>-1</total>
+ </errors>
+ </download>
+ <upload>
+ <packets>
+ <now>-1</now>
+ <total>-1</total>
+ </packets>
+ <bytes>
+ <now>-1</now>
+ <total>-1</total>
+ </bytes>
+ <errors>
+ <now>-1</now>
+ <total>-1</total>
+ </errors>
+ </upload>
+ </link>
+ </service>
+ <service type="5">
+ <name>pve-deb-work</name>
+ <collected_sec>1723810549</collected_sec>
+ <collected_usec>94992</collected_usec>
+ <status>0</status>
+ <status_hint>0</status_hint>
+ <monitor>1</monitor>
+ <monitormode>0</monitormode>
+ <onreboot>0</onreboot>
+ <pendingaction>0</pendingaction>
+ <filedescriptors>
+ <allocated>1664</allocated>
+ <unused>0</unused>
+ <maximum>9223372036854775807</maximum>
+ </filedescriptors>
+ <system>
+ <load>
+ <avg01>0.00</avg01>
+ <avg05>0.04</avg05>
+ <avg15>0.03</avg15>
+ </load>
+ <cpu>
+ <user>0.0</user>
+ <system>0.0</system>
+ <nice>0.0</nice>
+ <wait>0.0</wait>
+ <hardirq>0.0</hardirq>
+ <softirq>0.0</softirq>
+ <steal>0.0</steal>
+ <guest>0.0</guest>
+ <guestnice>0.0</guestnice>
+ </cpu>
+ <memory>
+ <percent>3.1</percent>
+ <kilobyte>1020120</kilobyte>
+ </memory>
+ <swap>
+ <percent>0.0</percent>
+ <kilobyte>0</kilobyte>
+ </swap>
+ </system>
+ </service>
+</monit>
diff --git a/src/go/plugin/go.d/modules/mysql/README.md b/src/go/plugin/go.d/modules/mysql/README.md
new file mode 120000
index 000000000..edf116dee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/README.md
@@ -0,0 +1 @@
+integrations/mysql.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/charts.go b/src/go/plugin/go.d/modules/mysql/charts.go
new file mode 100644
index 000000000..bb5089114
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/charts.go
@@ -0,0 +1,1239 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioBandwidth = module.Priority + iota
+ prioQueries
+ prioQueriesType
+ prioHandlers
+ prioTableOpenCacheOverflows
+ prioTableLocks
+ prioTableJoinIssues
+ prioTableSortIssues
+ prioTmpOperations
+ prioConnections
+ prioActiveConnections
+ prioBinlogCache
+ prioBinlogStatementCache
+ prioThreads
+ prioThreadsCreated
+ prioThreadCacheMisses
+ prioInnoDBIO
+ prioInnoDBIOOperations
+ prioInnoDBIOPendingOperations
+ prioInnoDBLog
+ prioInnoDBOSLog
+ prioInnoDBOSLogFsyncWrites
+ prioInnoDBOSLogIO
+ prioInnoDBCurRowLock
+ prioInnoDBRows
+ prioInnoDBBufferPoolPages
+ prioInnoDBBufferPoolPagesFlushed
+ prioInnoDBBufferPoolBytes
+ prioInnoDBBufferPoolReadAhead
+ prioInnoDBBufferPoolReadAheadRnd
+ prioInnoDBBufferPoolOperations
+ prioMyISAMKeyBlocks
+ prioMyISAMKeyRequests
+ prioMyISAMKeyDiskOperations
+ prioOpenFiles
+ prioOpenFilesRate
+ prioConnectionErrors
+ prioOpenedTables
+ prioOpenTables
+ prioProcessListFetchQueryDuration
+ prioProcessListQueries
+ prioProcessListLongestQueryDuration
+ prioInnoDBDeadlocks
+ prioQCacheOperations
+ prioQCacheQueries
+ prioQCacheFreeMem
+ prioQCacheMemBlocks
+ prioGaleraWriteSets
+ prioGaleraBytes
+ prioGaleraQueue
+ prioGaleraConflicts
+ prioGaleraFlowControl
+ prioGaleraClusterStatus
+ prioGaleraClusterState
+ prioGaleraClusterSize
+ prioGaleraClusterWeight
+ prioGaleraClusterConnectionStatus
+ prioGaleraReadinessState
+ prioGaleraOpenTransactions
+ prioGaleraThreadCount
+ prioSlaveSecondsBehindMaster
+ prioSlaveSQLIOThreadRunningState
+ prioUserStatsCPUTime
+ prioUserStatsRows
+ prioUserStatsCommands
+ prioUserStatsDeniedCommands
+ prioUserStatsTransactions
+ prioUserStatsBinlogWritten
+ prioUserStatsEmptyQueries
+ prioUserStatsConnections
+ prioUserStatsLostConnections
+ prioUserStatsDeniedConnections
+)
+
+var baseCharts = module.Charts{
+ chartBandwidth.Copy(),
+ chartQueries.Copy(),
+ chartQueriesType.Copy(),
+ chartHandlers.Copy(),
+ chartTableLocks.Copy(),
+ chartTableJoinIssues.Copy(),
+ chartTableSortIssues.Copy(),
+ chartTmpOperations.Copy(),
+ chartConnections.Copy(),
+ chartActiveConnections.Copy(),
+ chartThreads.Copy(),
+ chartThreadCreationRate.Copy(),
+ chartThreadsCacheMisses.Copy(),
+ chartInnoDBIO.Copy(),
+ chartInnoDBIOOperations.Copy(),
+ chartInnoDBPendingIOOperations.Copy(),
+ chartInnoDBLogOperations.Copy(),
+ chartInnoDBCurrentRowLocks.Copy(),
+ chartInnoDBRowsOperations.Copy(),
+ chartInnoDBBufferPoolPages.Copy(),
+ chartInnoDBBufferPoolPagesFlushed.Copy(),
+ chartInnoDBBufferPoolBytes.Copy(),
+ chartInnoDBBufferPoolReadAhead.Copy(),
+ chartInnoDBBufferPoolReadAheadRnd.Copy(),
+ chartInnoDBBufferPoolOperations.Copy(),
+ chartOpenFiles.Copy(),
+ chartOpenedFilesRate.Copy(),
+ chartConnectionErrors.Copy(),
+ chartOpenedTables.Copy(),
+ chartOpenTables.Copy(),
+ chartProcessListFetchQueryDuration.Copy(),
+ chartProcessListQueries.Copy(),
+ chartProcessListLongestQueryDuration.Copy(),
+}
+
+var (
+ chartBandwidth = module.Chart{
+ ID: "net",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "bandwidth",
+ Ctx: "mysql.net",
+ Type: module.Area,
+ Priority: prioBandwidth,
+ Dims: module.Dims{
+ {ID: "bytes_received", Name: "in", Algo: module.Incremental, Mul: 8, Div: 1000},
+ {ID: "bytes_sent", Name: "out", Algo: module.Incremental, Mul: -8, Div: 1000},
+ },
+ }
+ chartQueries = module.Chart{
+ ID: "queries",
+ Title: "Queries",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "mysql.queries",
+ Priority: prioQueries,
+ Dims: module.Dims{
+ {ID: "queries", Name: "queries", Algo: module.Incremental},
+ {ID: "questions", Name: "questions", Algo: module.Incremental},
+ {ID: "slow_queries", Name: "slow_queries", Algo: module.Incremental},
+ },
+ }
+ chartQueriesType = module.Chart{
+ ID: "queries_type",
+ Title: "Queries By Type",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "mysql.queries_type",
+ Type: module.Stacked,
+ Priority: prioQueriesType,
+ Dims: module.Dims{
+ {ID: "com_select", Name: "select", Algo: module.Incremental},
+ {ID: "com_delete", Name: "delete", Algo: module.Incremental},
+ {ID: "com_update", Name: "update", Algo: module.Incremental},
+ {ID: "com_insert", Name: "insert", Algo: module.Incremental},
+ {ID: "com_replace", Name: "replace", Algo: module.Incremental},
+ },
+ }
+ chartHandlers = module.Chart{
+ ID: "handlers",
+ Title: "Handlers",
+ Units: "handlers/s",
+ Fam: "handlers",
+ Ctx: "mysql.handlers",
+ Priority: prioHandlers,
+ Dims: module.Dims{
+ {ID: "handler_commit", Name: "commit", Algo: module.Incremental},
+ {ID: "handler_delete", Name: "delete", Algo: module.Incremental},
+ {ID: "handler_prepare", Name: "prepare", Algo: module.Incremental},
+ {ID: "handler_read_first", Name: "read first", Algo: module.Incremental},
+ {ID: "handler_read_key", Name: "read key", Algo: module.Incremental},
+ {ID: "handler_read_next", Name: "read next", Algo: module.Incremental},
+ {ID: "handler_read_prev", Name: "read prev", Algo: module.Incremental},
+ {ID: "handler_read_rnd", Name: "read rnd", Algo: module.Incremental},
+ {ID: "handler_read_rnd_next", Name: "read rnd next", Algo: module.Incremental},
+ {ID: "handler_rollback", Name: "rollback", Algo: module.Incremental},
+ {ID: "handler_savepoint", Name: "savepoint", Algo: module.Incremental},
+ {ID: "handler_savepoint_rollback", Name: "savepointrollback", Algo: module.Incremental},
+ {ID: "handler_update", Name: "update", Algo: module.Incremental},
+ {ID: "handler_write", Name: "write", Algo: module.Incremental},
+ },
+ }
+ chartTableOpenCacheOverflows = module.Chart{
+ ID: "table_open_cache_overflows",
+ Title: "Table open cache overflows",
+ Units: "overflows/s",
+ Fam: "open cache",
+ Ctx: "mysql.table_open_cache_overflows",
+ Priority: prioTableOpenCacheOverflows,
+ Dims: module.Dims{
+ {ID: "table_open_cache_overflows", Name: "open_cache", Algo: module.Incremental},
+ },
+ }
+ chartTableLocks = module.Chart{
+ ID: "table_locks",
+ Title: "Table Locks",
+ Units: "locks/s",
+ Fam: "locks",
+ Ctx: "mysql.table_locks",
+ Priority: prioTableLocks,
+ Dims: module.Dims{
+ {ID: "table_locks_immediate", Name: "immediate", Algo: module.Incremental},
+ {ID: "table_locks_waited", Name: "waited", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartTableJoinIssues = module.Chart{
+ ID: "join_issues",
+ Title: "Table Select Join Issues",
+ Units: "joins/s",
+ Fam: "issues",
+ Ctx: "mysql.join_issues",
+ Priority: prioTableJoinIssues,
+ Dims: module.Dims{
+ {ID: "select_full_join", Name: "full join", Algo: module.Incremental},
+ {ID: "select_full_range_join", Name: "full range join", Algo: module.Incremental},
+ {ID: "select_range", Name: "range", Algo: module.Incremental},
+ {ID: "select_range_check", Name: "range check", Algo: module.Incremental},
+ {ID: "select_scan", Name: "scan", Algo: module.Incremental},
+ },
+ }
+ chartTableSortIssues = module.Chart{
+ ID: "sort_issues",
+ Title: "Table Sort Issues",
+ Units: "issues/s",
+ Fam: "issues",
+ Ctx: "mysql.sort_issues",
+ Priority: prioTableSortIssues,
+ Dims: module.Dims{
+ {ID: "sort_merge_passes", Name: "merge passes", Algo: module.Incremental},
+ {ID: "sort_range", Name: "range", Algo: module.Incremental},
+ {ID: "sort_scan", Name: "scan", Algo: module.Incremental},
+ },
+ }
+ chartTmpOperations = module.Chart{
+ ID: "tmp",
+ Title: "Tmp Operations",
+ Units: "events/s",
+ Fam: "temporaries",
+ Ctx: "mysql.tmp",
+ Priority: prioTmpOperations,
+ Dims: module.Dims{
+ {ID: "created_tmp_disk_tables", Name: "disk tables", Algo: module.Incremental},
+ {ID: "created_tmp_files", Name: "files", Algo: module.Incremental},
+ {ID: "created_tmp_tables", Name: "tables", Algo: module.Incremental},
+ },
+ }
+ chartConnections = module.Chart{
+ ID: "connections",
+ Title: "Connections",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "mysql.connections",
+ Priority: prioConnections,
+ Dims: module.Dims{
+ {ID: "connections", Name: "all", Algo: module.Incremental},
+ {ID: "aborted_connects", Name: "aborted", Algo: module.Incremental},
+ },
+ }
+ chartActiveConnections = module.Chart{
+ ID: "connections_active",
+ Title: "Active Connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "mysql.connections_active",
+ Priority: prioActiveConnections,
+ Dims: module.Dims{
+ {ID: "threads_connected", Name: "active"},
+ {ID: "max_connections", Name: "limit"},
+ {ID: "max_used_connections", Name: "max active"},
+ },
+ }
+ chartThreads = module.Chart{
+ ID: "threads",
+ Title: "Threads",
+ Units: "threads",
+ Fam: "threads",
+ Ctx: "mysql.threads",
+ Priority: prioThreads,
+ Dims: module.Dims{
+ {ID: "threads_connected", Name: "connected"},
+ {ID: "threads_cached", Name: "cached", Mul: -1},
+ {ID: "threads_running", Name: "running"},
+ },
+ }
+ chartThreadCreationRate = module.Chart{
+ ID: "threads_creation_rate",
+ Title: "Threads Creation Rate",
+ Units: "threads/s",
+ Fam: "threads",
+ Ctx: "mysql.threads_created",
+ Priority: prioThreadsCreated,
+ Dims: module.Dims{
+ {ID: "threads_created", Name: "created", Algo: module.Incremental},
+ },
+ }
+ chartThreadsCacheMisses = module.Chart{
+ ID: "thread_cache_misses",
+ Title: "Threads Cache Misses",
+ Units: "misses",
+ Fam: "threads",
+ Ctx: "mysql.thread_cache_misses",
+ Type: module.Area,
+ Priority: prioThreadCacheMisses,
+ Dims: module.Dims{
+ {ID: "thread_cache_misses", Name: "misses", Div: 100},
+ },
+ }
+ chartInnoDBIO = module.Chart{
+ ID: "innodb_io",
+ Title: "InnoDB I/O Bandwidth",
+ Units: "KiB/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_io",
+ Type: module.Area,
+ Priority: prioInnoDBIO,
+ Dims: module.Dims{
+ {ID: "innodb_data_read", Name: "read", Algo: module.Incremental, Div: 1024},
+ {ID: "innodb_data_written", Name: "write", Algo: module.Incremental, Div: 1024},
+ },
+ }
+ chartInnoDBIOOperations = module.Chart{
+ ID: "innodb_io_ops",
+ Title: "InnoDB I/O Operations",
+ Units: "operations/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_io_ops",
+ Priority: prioInnoDBIOOperations,
+ Dims: module.Dims{
+ {ID: "innodb_data_reads", Name: "reads", Algo: module.Incremental},
+ {ID: "innodb_data_writes", Name: "writes", Algo: module.Incremental, Mul: -1},
+ {ID: "innodb_data_fsyncs", Name: "fsyncs", Algo: module.Incremental},
+ },
+ }
+ chartInnoDBPendingIOOperations = module.Chart{
+ ID: "innodb_io_pending_ops",
+ Title: "InnoDB Pending I/O Operations",
+ Units: "operations",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_io_pending_ops",
+ Priority: prioInnoDBIOPendingOperations,
+ Dims: module.Dims{
+ {ID: "innodb_data_pending_reads", Name: "reads"},
+ {ID: "innodb_data_pending_writes", Name: "writes", Mul: -1},
+ {ID: "innodb_data_pending_fsyncs", Name: "fsyncs"},
+ },
+ }
+ chartInnoDBLogOperations = module.Chart{
+ ID: "innodb_log",
+ Title: "InnoDB Log Operations",
+ Units: "operations/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_log",
+ Priority: prioInnoDBLog,
+ Dims: module.Dims{
+ {ID: "innodb_log_waits", Name: "waits", Algo: module.Incremental},
+ {ID: "innodb_log_write_requests", Name: "write requests", Algo: module.Incremental, Mul: -1},
+ {ID: "innodb_log_writes", Name: "writes", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartInnoDBCurrentRowLocks = module.Chart{
+ ID: "innodb_cur_row_lock",
+ Title: "InnoDB Current Row Locks",
+ Units: "operations",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_cur_row_lock",
+ Type: module.Area,
+ Priority: prioInnoDBCurRowLock,
+ Dims: module.Dims{
+ {ID: "innodb_row_lock_current_waits", Name: "current waits"},
+ },
+ }
+ chartInnoDBRowsOperations = module.Chart{
+ ID: "innodb_rows",
+ Title: "InnoDB Row Operations",
+ Units: "operations/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_rows",
+ Type: module.Area,
+ Priority: prioInnoDBRows,
+ Dims: module.Dims{
+ {ID: "innodb_rows_inserted", Name: "inserted", Algo: module.Incremental},
+ {ID: "innodb_rows_read", Name: "read", Algo: module.Incremental},
+ {ID: "innodb_rows_updated", Name: "updated", Algo: module.Incremental},
+ {ID: "innodb_rows_deleted", Name: "deleted", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartInnoDBBufferPoolPages = module.Chart{
+ ID: "innodb_buffer_pool_pages",
+ Title: "InnoDB Buffer Pool Pages",
+ Units: "pages",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_buffer_pool_pages",
+ Priority: prioInnoDBBufferPoolPages,
+ Dims: module.Dims{
+ {ID: "innodb_buffer_pool_pages_data", Name: "data"},
+ {ID: "innodb_buffer_pool_pages_dirty", Name: "dirty", Mul: -1},
+ {ID: "innodb_buffer_pool_pages_free", Name: "free"},
+ {ID: "innodb_buffer_pool_pages_misc", Name: "misc", Mul: -1},
+ {ID: "innodb_buffer_pool_pages_total", Name: "total"},
+ },
+ }
+ chartInnoDBBufferPoolPagesFlushed = module.Chart{
+ ID: "innodb_buffer_pool_flush_pages_requests",
+ Title: "InnoDB Buffer Pool Flush Pages Requests",
+ Units: "requests/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_buffer_pool_pages_flushed",
+ Priority: prioInnoDBBufferPoolPagesFlushed,
+ Dims: module.Dims{
+ {ID: "innodb_buffer_pool_pages_flushed", Name: "flush pages", Algo: module.Incremental},
+ },
+ }
+ chartInnoDBBufferPoolBytes = module.Chart{
+ ID: "innodb_buffer_pool_bytes",
+ Title: "InnoDB Buffer Pool Bytes",
+ Units: "MiB",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_buffer_pool_bytes",
+ Type: module.Area,
+ Priority: prioInnoDBBufferPoolBytes,
+ Dims: module.Dims{
+ {ID: "innodb_buffer_pool_bytes_data", Name: "data", Div: 1024 * 1024},
+ {ID: "innodb_buffer_pool_bytes_dirty", Name: "dirty", Mul: -1, Div: 1024 * 1024},
+ },
+ }
+ chartInnoDBBufferPoolReadAhead = module.Chart{
+ ID: "innodb_buffer_pool_read_ahead",
+ Title: "InnoDB Buffer Pool Read Pages",
+ Units: "pages/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_buffer_pool_read_ahead",
+ Type: module.Area,
+ Priority: prioInnoDBBufferPoolReadAhead,
+ Dims: module.Dims{
+ {ID: "innodb_buffer_pool_read_ahead", Name: "all", Algo: module.Incremental},
+ {ID: "innodb_buffer_pool_read_ahead_evicted", Name: "evicted", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartInnoDBBufferPoolReadAheadRnd = module.Chart{
+ ID: "innodb_buffer_pool_read_ahead_rnd",
+ Title: "InnoDB Buffer Pool Random Read-Aheads",
+ Units: "operations/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_buffer_pool_read_ahead_rnd",
+ Priority: prioInnoDBBufferPoolReadAheadRnd,
+ Dims: module.Dims{
+ {ID: "innodb_buffer_pool_read_ahead_rnd", Name: "read-ahead", Algo: module.Incremental},
+ },
+ }
+ chartInnoDBBufferPoolOperations = module.Chart{
+ ID: "innodb_buffer_pool_ops",
+ Title: "InnoDB Buffer Pool Operations",
+ Units: "operations/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_buffer_pool_ops",
+ Type: module.Area,
+ Priority: prioInnoDBBufferPoolOperations,
+ Dims: module.Dims{
+ {ID: "innodb_buffer_pool_reads", Name: "disk reads", Algo: module.Incremental},
+ {ID: "innodb_buffer_pool_wait_free", Name: "wait free", Algo: module.Incremental, Mul: -1, Div: 1},
+ },
+ }
+ chartOpenFiles = module.Chart{
+ ID: "files",
+ Title: "Open Files",
+ Units: "files",
+ Fam: "files",
+ Ctx: "mysql.files",
+ Priority: prioOpenFiles,
+ Dims: module.Dims{
+ {ID: "open_files", Name: "files"},
+ },
+ }
+ chartOpenedFilesRate = module.Chart{
+ ID: "files_rate",
+ Title: "Opened Files Rate",
+ Units: "files/s",
+ Fam: "files",
+ Ctx: "mysql.files_rate",
+ Priority: prioOpenFilesRate,
+ Dims: module.Dims{
+ {ID: "opened_files", Name: "files", Algo: module.Incremental},
+ },
+ }
+ chartConnectionErrors = module.Chart{
+ ID: "connection_errors",
+ Title: "Connection Errors",
+ Units: "errors/s",
+ Fam: "connections",
+ Ctx: "mysql.connection_errors",
+ Priority: prioConnectionErrors,
+ Dims: module.Dims{
+ {ID: "connection_errors_accept", Name: "accept", Algo: module.Incremental},
+ {ID: "connection_errors_internal", Name: "internal", Algo: module.Incremental},
+ {ID: "connection_errors_max_connections", Name: "max", Algo: module.Incremental},
+ {ID: "connection_errors_peer_address", Name: "peer addr", Algo: module.Incremental},
+ {ID: "connection_errors_select", Name: "select", Algo: module.Incremental},
+ {ID: "connection_errors_tcpwrap", Name: "tcpwrap", Algo: module.Incremental},
+ },
+ }
+ chartOpenedTables = module.Chart{
+ ID: "opened_tables",
+ Title: "Opened Tables",
+ Units: "tables/s",
+ Fam: "open tables",
+ Ctx: "mysql.opened_tables",
+ Priority: prioOpenedTables,
+ Dims: module.Dims{
+ {ID: "opened_tables", Name: "tables", Algo: module.Incremental},
+ },
+ }
+ chartOpenTables = module.Chart{
+ ID: "open_tables",
+ Title: "Open Tables",
+ Units: "tables",
+ Fam: "open tables",
+ Ctx: "mysql.open_tables",
+ Type: module.Area,
+ Priority: prioOpenTables,
+ Dims: module.Dims{
+ {ID: "table_open_cache", Name: "cache"},
+ {ID: "open_tables", Name: "tables"},
+ },
+ }
+ chartProcessListFetchQueryDuration = module.Chart{
+ ID: "process_list_fetch_duration",
+ Title: "Process List Fetch Duration",
+ Units: "milliseconds",
+ Fam: "process list",
+ Ctx: "mysql.process_list_fetch_query_duration",
+ Priority: prioProcessListFetchQueryDuration,
+ Dims: module.Dims{
+ {ID: "process_list_fetch_query_duration", Name: "duration"},
+ },
+ }
+ chartProcessListQueries = module.Chart{
+ ID: "process_list_queries_count",
+ Title: "Queries Count",
+ Units: "queries",
+ Fam: "process list",
+ Ctx: "mysql.process_list_queries_count",
+ Type: module.Stacked,
+ Priority: prioProcessListQueries,
+ Dims: module.Dims{
+ {ID: "process_list_queries_count_system", Name: "system"},
+ {ID: "process_list_queries_count_user", Name: "user"},
+ },
+ }
+ chartProcessListLongestQueryDuration = module.Chart{
+ ID: "process_list_longest_query_duration",
+ Title: "Longest Query Duration",
+ Units: "seconds",
+ Fam: "process list",
+ Ctx: "mysql.process_list_longest_query_duration",
+ Priority: prioProcessListLongestQueryDuration,
+ Dims: module.Dims{
+ {ID: "process_list_longest_query_duration", Name: "duration"},
+ },
+ }
+)
+
+var chartsInnoDBOSLog = module.Charts{
+ chartInnoDBOSLogPendingOperations.Copy(),
+ chartInnoDBOSLogOperations.Copy(),
+ chartInnoDBOSLogIO.Copy(),
+}
+
+var (
+ chartInnoDBOSLogPendingOperations = module.Chart{
+ ID: "innodb_os_log",
+ Title: "InnoDB OS Log Pending Operations",
+ Units: "operations",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_os_log",
+ Priority: prioInnoDBOSLog,
+ Dims: module.Dims{
+ {ID: "innodb_os_log_pending_fsyncs", Name: "fsyncs"},
+ {ID: "innodb_os_log_pending_writes", Name: "writes", Mul: -1},
+ },
+ }
+ chartInnoDBOSLogOperations = module.Chart{
+ ID: "innodb_os_log_fsync_writes",
+ Title: "InnoDB OS Log Operations",
+ Units: "operations/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_os_log_fsync_writes",
+ Priority: prioInnoDBOSLogFsyncWrites,
+ Dims: module.Dims{
+ {ID: "innodb_os_log_fsyncs", Name: "fsyncs", Algo: module.Incremental},
+ },
+ }
+ chartInnoDBOSLogIO = module.Chart{
+ ID: "innodb_os_log_io",
+ Title: "InnoDB OS Log Bandwidth",
+ Units: "KiB/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_os_log_io",
+ Type: module.Area,
+ Priority: prioInnoDBOSLogIO,
+ Dims: module.Dims{
+ {ID: "innodb_os_log_written", Name: "write", Algo: module.Incremental, Mul: -1, Div: 1024},
+ },
+ }
+)
+
+var chartInnoDBDeadlocks = module.Chart{
+ ID: "innodb_deadlocks",
+ Title: "InnoDB Deadlocks",
+ Units: "operations/s",
+ Fam: "innodb",
+ Ctx: "mysql.innodb_deadlocks",
+ Type: module.Area,
+ Priority: prioInnoDBDeadlocks,
+ Dims: module.Dims{
+ {ID: "innodb_deadlocks", Name: "deadlocks", Algo: module.Incremental},
+ },
+}
+
+var chartsQCache = module.Charts{
+ chartQCacheOperations.Copy(),
+ chartQCacheQueries.Copy(),
+ chartQCacheFreeMemory.Copy(),
+ chartQCacheMemoryBlocks.Copy(),
+}
+
+var (
+ chartQCacheOperations = module.Chart{
+ ID: "qcache_ops",
+ Title: "QCache Operations",
+ Units: "queries/s",
+ Fam: "qcache",
+ Ctx: "mysql.qcache_ops",
+ Priority: prioQCacheOperations,
+ Dims: module.Dims{
+ {ID: "qcache_hits", Name: "hits", Algo: module.Incremental},
+ {ID: "qcache_lowmem_prunes", Name: "lowmem prunes", Algo: module.Incremental, Mul: -1},
+ {ID: "qcache_inserts", Name: "inserts", Algo: module.Incremental},
+ {ID: "qcache_not_cached", Name: "not cached", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartQCacheQueries = module.Chart{
+ ID: "qcache",
+ Title: "QCache Queries in Cache",
+ Units: "queries",
+ Fam: "qcache",
+ Ctx: "mysql.qcache",
+ Priority: prioQCacheQueries,
+ Dims: module.Dims{
+ {ID: "qcache_queries_in_cache", Name: "queries", Algo: module.Absolute},
+ },
+ }
+ chartQCacheFreeMemory = module.Chart{
+ ID: "qcache_freemem",
+ Title: "QCache Free Memory",
+ Units: "MiB",
+ Fam: "qcache",
+ Ctx: "mysql.qcache_freemem",
+ Type: module.Area,
+ Priority: prioQCacheFreeMem,
+ Dims: module.Dims{
+ {ID: "qcache_free_memory", Name: "free", Div: 1024 * 1024},
+ },
+ }
+ chartQCacheMemoryBlocks = module.Chart{
+ ID: "qcache_memblocks",
+ Title: "QCache Memory Blocks",
+ Units: "blocks",
+ Fam: "qcache",
+ Ctx: "mysql.qcache_memblocks",
+ Priority: prioQCacheMemBlocks,
+ Dims: module.Dims{
+ {ID: "qcache_free_blocks", Name: "free"},
+ {ID: "qcache_total_blocks", Name: "total"},
+ },
+ }
+)
+
+var chartsGalera = module.Charts{
+ chartGaleraWriteSets.Copy(),
+ chartGaleraBytes.Copy(),
+ chartGaleraQueue.Copy(),
+ chartGaleraConflicts.Copy(),
+ chartGaleraFlowControl.Copy(),
+ chartGaleraClusterStatus.Copy(),
+ chartGaleraClusterState.Copy(),
+ chartGaleraClusterSize.Copy(),
+ chartGaleraClusterWeight.Copy(),
+ chartGaleraClusterConnectionStatus.Copy(),
+ chartGaleraReadinessState.Copy(),
+ chartGaleraOpenTransactions.Copy(),
+ chartGaleraThreads.Copy(),
+}
+var (
+ chartGaleraWriteSets = module.Chart{
+ ID: "galera_writesets",
+ Title: "Replicated Writesets",
+ Units: "writesets/s",
+ Fam: "galera",
+ Ctx: "mysql.galera_writesets",
+ Priority: prioGaleraWriteSets,
+ Dims: module.Dims{
+ {ID: "wsrep_received", Name: "rx", Algo: module.Incremental},
+ {ID: "wsrep_replicated", Name: "tx", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartGaleraBytes = module.Chart{
+ ID: "galera_bytes",
+ Title: "Replicated Bytes",
+ Units: "KiB/s",
+ Fam: "galera",
+ Ctx: "mysql.galera_bytes",
+ Type: module.Area,
+ Priority: prioGaleraBytes,
+ Dims: module.Dims{
+ {ID: "wsrep_received_bytes", Name: "rx", Algo: module.Incremental, Div: 1024},
+ {ID: "wsrep_replicated_bytes", Name: "tx", Algo: module.Incremental, Mul: -1, Div: 1024},
+ },
+ }
+ chartGaleraQueue = module.Chart{
+ ID: "galera_queue",
+ Title: "Galera Queue",
+ Units: "writesets",
+ Fam: "galera",
+ Ctx: "mysql.galera_queue",
+ Priority: prioGaleraQueue,
+ Dims: module.Dims{
+ {ID: "wsrep_local_recv_queue", Name: "rx"},
+ {ID: "wsrep_local_send_queue", Name: "tx", Mul: -1},
+ },
+ }
+ chartGaleraConflicts = module.Chart{
+ ID: "galera_conflicts",
+ Title: "Replication Conflicts",
+ Units: "transactions",
+ Fam: "galera",
+ Ctx: "mysql.galera_conflicts",
+ Type: module.Area,
+ Priority: prioGaleraConflicts,
+ Dims: module.Dims{
+ {ID: "wsrep_local_bf_aborts", Name: "bf aborts", Algo: module.Incremental},
+ {ID: "wsrep_local_cert_failures", Name: "cert fails", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartGaleraFlowControl = module.Chart{
+ ID: "galera_flow_control",
+ Title: "Flow Control",
+ Units: "ms",
+ Fam: "galera",
+ Ctx: "mysql.galera_flow_control",
+ Type: module.Area,
+ Priority: prioGaleraFlowControl,
+ Dims: module.Dims{
+ {ID: "wsrep_flow_control_paused_ns", Name: "paused", Algo: module.Incremental, Div: 1000000},
+ },
+ }
+ chartGaleraClusterStatus = module.Chart{
+ ID: "galera_cluster_status",
+ Title: "Cluster Component Status",
+ Units: "status",
+ Fam: "galera",
+ Ctx: "mysql.galera_cluster_status",
+ Priority: prioGaleraClusterStatus,
+ Dims: module.Dims{
+ {ID: "wsrep_cluster_status_primary", Name: "primary"},
+ {ID: "wsrep_cluster_status_non_primary", Name: "non_primary"},
+ {ID: "wsrep_cluster_status_disconnected", Name: "disconnected"},
+ },
+ }
+ chartGaleraClusterState = module.Chart{
+ ID: "galera_cluster_state",
+ Title: "Cluster Component State",
+ Units: "state",
+ Fam: "galera",
+ Ctx: "mysql.galera_cluster_state",
+ Priority: prioGaleraClusterState,
+ Dims: module.Dims{
+ {ID: "wsrep_local_state_undefined", Name: "undefined"},
+ {ID: "wsrep_local_state_joiner", Name: "joining"},
+ {ID: "wsrep_local_state_donor", Name: "donor"},
+ {ID: "wsrep_local_state_joined", Name: "joined"},
+ {ID: "wsrep_local_state_synced", Name: "synced"},
+ {ID: "wsrep_local_state_error", Name: "error"},
+ },
+ }
+ chartGaleraClusterSize = module.Chart{
+ ID: "galera_cluster_size",
+ Title: "Number of Nodes in the Cluster",
+ Units: "nodes",
+ Fam: "galera",
+ Ctx: "mysql.galera_cluster_size",
+ Priority: prioGaleraClusterSize,
+ Dims: module.Dims{
+ {ID: "wsrep_cluster_size", Name: "nodes"},
+ },
+ }
+ chartGaleraClusterWeight = module.Chart{
+ ID: "galera_cluster_weight",
+ Title: "The Total Weight of the Current Members in the Cluster",
+ Units: "weight",
+ Fam: "galera",
+ Ctx: "mysql.galera_cluster_weight",
+ Priority: prioGaleraClusterWeight,
+ Dims: module.Dims{
+ {ID: "wsrep_cluster_weight", Name: "weight"},
+ },
+ }
+ chartGaleraClusterConnectionStatus = module.Chart{
+ ID: "galera_connected",
+ Title: "Cluster Connection Status",
+ Units: "boolean",
+ Fam: "galera",
+ Ctx: "mysql.galera_connected",
+ Priority: prioGaleraClusterConnectionStatus,
+ Dims: module.Dims{
+ {ID: "wsrep_connected", Name: "connected"},
+ },
+ }
+ chartGaleraReadinessState = module.Chart{
+ ID: "galera_ready",
+ Title: "Accept Queries Readiness Status",
+ Units: "boolean",
+ Fam: "galera",
+ Ctx: "mysql.galera_ready",
+ Priority: prioGaleraReadinessState,
+ Dims: module.Dims{
+ {ID: "wsrep_ready", Name: "ready"},
+ },
+ }
+ chartGaleraOpenTransactions = module.Chart{
+ ID: "galera_open_transactions",
+ Title: "Open Transactions",
+ Units: "transactions",
+ Fam: "galera",
+ Ctx: "mysql.galera_open_transactions",
+ Priority: prioGaleraOpenTransactions,
+ Dims: module.Dims{
+ {ID: "wsrep_open_transactions", Name: "open"},
+ },
+ }
+ chartGaleraThreads = module.Chart{
+ ID: "galera_thread_count",
+ Title: "Total Number of WSRep (applier/rollbacker) Threads",
+ Units: "threads",
+ Fam: "galera",
+ Ctx: "mysql.galera_thread_count",
+ Priority: prioGaleraThreadCount,
+ Dims: module.Dims{
+ {ID: "wsrep_thread_count", Name: "threads"},
+ },
+ }
+)
+
+var chartsMyISAM = module.Charts{
+ chartMyISAMKeyCacheBlocks.Copy(),
+ chartMyISAMKeyCacheRequests.Copy(),
+ chartMyISAMKeyCacheDiskOperations.Copy(),
+}
+var (
+ chartMyISAMKeyCacheBlocks = module.Chart{
+ ID: "key_blocks",
+ Title: "MyISAM Key Cache Blocks",
+ Units: "blocks",
+ Fam: "myisam",
+ Ctx: "mysql.key_blocks",
+ Priority: prioMyISAMKeyBlocks,
+ Dims: module.Dims{
+ {ID: "key_blocks_unused", Name: "unused"},
+ {ID: "key_blocks_used", Name: "used", Mul: -1},
+ {ID: "key_blocks_not_flushed", Name: "not flushed"},
+ },
+ }
+ chartMyISAMKeyCacheRequests = module.Chart{
+ ID: "key_requests",
+ Title: "MyISAM Key Cache Requests",
+ Units: "requests/s",
+ Fam: "myisam",
+ Ctx: "mysql.key_requests",
+ Type: module.Area,
+ Priority: prioMyISAMKeyRequests,
+ Dims: module.Dims{
+ {ID: "key_read_requests", Name: "reads", Algo: module.Incremental},
+ {ID: "key_write_requests", Name: "writes", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMyISAMKeyCacheDiskOperations = module.Chart{
+ ID: "key_disk_ops",
+ Title: "MyISAM Key Cache Disk Operations",
+ Units: "operations/s",
+ Fam: "myisam",
+ Ctx: "mysql.key_disk_ops",
+ Priority: prioMyISAMKeyDiskOperations,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "key_reads", Name: "reads", Algo: module.Incremental},
+ {ID: "key_writes", Name: "writes", Algo: module.Incremental, Mul: -1},
+ },
+ }
+)
+
+var chartsBinlog = module.Charts{
+ chartBinlogCache.Copy(),
+ chartBinlogStatementCache.Copy(),
+}
+
+var (
+ chartBinlogCache = module.Chart{
+ ID: "binlog_cache",
+ Title: "Binlog Cache",
+ Units: "transactions/s",
+ Fam: "binlog",
+ Ctx: "mysql.binlog_cache",
+ Priority: prioBinlogCache,
+ Dims: module.Dims{
+ {ID: "binlog_cache_disk_use", Name: "disk", Algo: module.Incremental},
+ {ID: "binlog_cache_use", Name: "all", Algo: module.Incremental},
+ },
+ }
+ chartBinlogStatementCache = module.Chart{
+ ID: "binlog_stmt_cache",
+ Title: "Binlog Statement Cache",
+ Units: "statements/s",
+ Fam: "binlog",
+ Ctx: "mysql.binlog_stmt_cache",
+ Priority: prioBinlogStatementCache,
+ Dims: module.Dims{
+ {ID: "binlog_stmt_cache_disk_use", Name: "disk", Algo: module.Incremental},
+ {ID: "binlog_stmt_cache_use", Name: "all", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartsSlaveReplication = module.Charts{
+ chartSlaveBehindSeconds.Copy(),
+ chartSlaveSQLIOThreadRunningState.Copy(),
+ }
+
+ chartSlaveBehindSeconds = module.Chart{
+ ID: "slave_behind",
+ Title: "Slave Behind Seconds",
+ Units: "seconds",
+ Fam: "slave",
+ Ctx: "mysql.slave_behind",
+ Priority: prioSlaveSecondsBehindMaster,
+ Dims: module.Dims{
+ {ID: "seconds_behind_master", Name: "seconds"},
+ },
+ }
+ chartSlaveSQLIOThreadRunningState = module.Chart{
+ ID: "slave_thread_running",
+ Title: "I/O / SQL Thread Running State",
+ Units: "boolean",
+ Fam: "slave",
+ Ctx: "mysql.slave_status",
+ Priority: prioSlaveSQLIOThreadRunningState,
+ Dims: module.Dims{
+ {ID: "slave_sql_running", Name: "sql_running"},
+ {ID: "slave_io_running", Name: "io_running"},
+ },
+ }
+)
+
+func newSlaveReplConnCharts(conn string) *module.Charts {
+ orig := conn
+ conn = strings.ToLower(conn)
+ cs := chartsSlaveReplication.Copy()
+ for _, chart := range *cs {
+ chart.ID += "_" + conn
+ chart.Title += " Connection " + orig
+ for _, dim := range chart.Dims {
+ dim.ID += "_" + conn
+ }
+ }
+ return cs
+}
+
+func newMariaDBUserStatisticsCharts(user string) *module.Charts {
+ lcUser := strings.ToLower(user)
+ charts := chartsTmplUserStats.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, lcUser)
+ c.Labels = []module.Label{
+ {Key: "user", Value: user},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, lcUser)
+ }
+ }
+ return charts
+}
+
+func newPerconaUserStatisticsCharts(user string) *module.Charts {
+ lcUser := strings.ToLower(user)
+ charts := chartsTmplPerconaUserStats.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, lcUser)
+ c.Labels = []module.Label{
+ {Key: "user", Value: user},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, lcUser)
+ }
+ }
+ return charts
+}
+
+var (
+ chartsTmplUserStats = module.Charts{
+ chartUserStatsCPU.Copy(),
+ chartTmplUserStatsRowsOperations.Copy(),
+ chartTmplUserStatsCommands.Copy(),
+ chartTmplUserStatsDeniedCommands.Copy(),
+ chartTmplUserStatsTransactions.Copy(),
+ chartTmplUserStatsBinlogWritten.Copy(),
+ chartTmplUserStatsEmptyQueries.Copy(),
+ chartTmplUserStatsCreatedConnections.Copy(),
+ chartTmplUserStatsLostConnections.Copy(),
+ chartTmplUserStatsDeniedConnections.Copy(),
+ }
+ chartsTmplPerconaUserStats = module.Charts{
+ chartUserStatsCPU.Copy(),
+ chartTmplPerconaUserStatsRowsOperations.Copy(),
+ chartTmplUserStatsCommands.Copy(),
+ chartTmplUserStatsDeniedCommands.Copy(),
+ chartTmplUserStatsTransactions.Copy(),
+ chartTmplUserStatsBinlogWritten.Copy(),
+ chartTmplUserStatsEmptyQueries.Copy(),
+ chartTmplUserStatsCreatedConnections.Copy(),
+ chartTmplUserStatsLostConnections.Copy(),
+ chartTmplUserStatsDeniedConnections.Copy(),
+ }
+
+ chartUserStatsCPU = module.Chart{
+ ID: "userstats_cpu_%s",
+ Title: "User CPU Time",
+ Units: "percentage",
+ Fam: "user cpu time",
+ Ctx: "mysql.userstats_cpu",
+ Priority: prioUserStatsCPUTime,
+ Dims: module.Dims{
+ {ID: "userstats_%s_cpu_time", Name: "used", Mul: 100, Div: 1000, Algo: module.Incremental},
+ },
+ }
+ chartTmplUserStatsRowsOperations = module.Chart{
+ ID: "userstats_rows_%s",
+ Title: "User Rows Operations",
+ Units: "operations/s",
+ Fam: "user operations",
+ Ctx: "mysql.userstats_rows",
+ Type: module.Stacked,
+ Priority: prioUserStatsRows,
+ Dims: module.Dims{
+ {ID: "userstats_%s_rows_read", Name: "read", Algo: module.Incremental},
+ {ID: "userstats_%s_rows_sent", Name: "sent", Algo: module.Incremental},
+ {ID: "userstats_%s_rows_updated", Name: "updated", Algo: module.Incremental},
+ {ID: "userstats_%s_rows_inserted", Name: "inserted", Algo: module.Incremental},
+ {ID: "userstats_%s_rows_deleted", Name: "deleted", Algo: module.Incremental},
+ },
+ }
+ chartTmplPerconaUserStatsRowsOperations = module.Chart{
+ ID: "userstats_rows_%s",
+ Title: "User Rows Operations",
+ Units: "operations/s",
+ Fam: "user operations",
+ Ctx: "mysql.userstats_rows",
+ Type: module.Stacked,
+ Priority: prioUserStatsRows,
+ Dims: module.Dims{
+ {ID: "userstats_%s_rows_fetched", Name: "fetched", Algo: module.Incremental},
+ {ID: "userstats_%s_rows_updated", Name: "updated", Algo: module.Incremental},
+ },
+ }
+ chartTmplUserStatsCommands = module.Chart{
+ ID: "userstats_commands_%s",
+ Title: "User Commands",
+ Units: "commands/s",
+ Fam: "user commands",
+ Ctx: "mysql.userstats_commands",
+ Type: module.Stacked,
+ Priority: prioUserStatsCommands,
+ Dims: module.Dims{
+ {ID: "userstats_%s_select_commands", Name: "select", Algo: module.Incremental},
+ {ID: "userstats_%s_update_commands", Name: "update", Algo: module.Incremental},
+ {ID: "userstats_%s_other_commands", Name: "other", Algo: module.Incremental},
+ },
+ }
+ chartTmplUserStatsDeniedCommands = module.Chart{
+ ID: "userstats_denied_commands_%s",
+ Title: "User Denied Commands",
+ Units: "commands/s",
+ Fam: "user commands denied",
+ Ctx: "mysql.userstats_denied_commands",
+ Priority: prioUserStatsDeniedCommands,
+ Dims: module.Dims{
+ {ID: "userstats_%s_access_denied", Name: "denied", Algo: module.Incremental},
+ },
+ }
+ chartTmplUserStatsTransactions = module.Chart{
+ ID: "userstats_transactions_%s",
+ Title: "User Transactions",
+ Units: "transactions/s",
+ Fam: "user transactions",
+ Ctx: "mysql.userstats_created_transactions",
+ Type: module.Area,
+ Priority: prioUserStatsTransactions,
+ Dims: module.Dims{
+ {ID: "userstats_%s_commit_transactions", Name: "commit", Algo: module.Incremental},
+ {ID: "userstats_%s_rollback_transactions", Name: "rollback", Algo: module.Incremental},
+ },
+ }
+ chartTmplUserStatsBinlogWritten = module.Chart{
+ ID: "userstats_binlog_written_%s",
+ Title: "User Binlog Written",
+ Units: "B/s",
+ Fam: "user binlog written",
+ Ctx: "mysql.userstats_binlog_written",
+ Priority: prioUserStatsBinlogWritten,
+ Dims: module.Dims{
+ {ID: "userstats_%s_binlog_bytes_written", Name: "written", Algo: module.Incremental},
+ },
+ }
+ chartTmplUserStatsEmptyQueries = module.Chart{
+ ID: "userstats_empty_queries_%s",
+ Title: "User Empty Queries",
+ Units: "queries/s",
+ Fam: "user empty queries",
+ Ctx: "mysql.userstats_empty_queries",
+ Priority: prioUserStatsEmptyQueries,
+ Dims: module.Dims{
+ {ID: "userstats_%s_empty_queries", Name: "empty", Algo: module.Incremental},
+ },
+ }
+ chartTmplUserStatsCreatedConnections = module.Chart{
+ ID: "userstats_connections_%s",
+ Title: "User Created Connections",
+ Units: "connections/s",
+ Fam: "user connections created ",
+ Ctx: "mysql.userstats_connections",
+ Priority: prioUserStatsConnections,
+ Dims: module.Dims{
+ {ID: "userstats_%s_total_connections", Name: "created", Algo: module.Incremental},
+ },
+ }
+ chartTmplUserStatsLostConnections = module.Chart{
+ ID: "userstats_lost_connections_%s",
+ Title: "User Lost Connections",
+ Units: "connections/s",
+ Fam: "user connections lost",
+ Ctx: "mysql.userstats_lost_connections",
+ Priority: prioUserStatsLostConnections,
+ Dims: module.Dims{
+ {ID: "userstats_%s_lost_connections", Name: "lost", Algo: module.Incremental},
+ },
+ }
+ chartTmplUserStatsDeniedConnections = module.Chart{
+ ID: "userstats_denied_connections_%s",
+ Title: "User Denied Connections",
+ Units: "connections/s",
+ Fam: "user connections denied",
+ Ctx: "mysql.userstats_denied_connections",
+ Priority: prioUserStatsDeniedConnections,
+ Dims: module.Dims{
+ {ID: "userstats_%s_denied_connections", Name: "denied", Algo: module.Incremental},
+ },
+ }
+)
+
+func (m *MySQL) addSlaveReplicationConnCharts(conn string) {
+ var charts *module.Charts
+ if conn == "" {
+ charts = chartsSlaveReplication.Copy()
+ } else {
+ charts = newSlaveReplConnCharts(conn)
+ }
+ if err := m.Charts().Add(*charts...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *MySQL) addUserStatisticsCharts(user string) {
+ if m.isPercona {
+ if err := m.Charts().Add(*newPerconaUserStatisticsCharts(user)...); err != nil {
+ m.Warning(err)
+ }
+ } else {
+ if err := m.Charts().Add(*newMariaDBUserStatisticsCharts(user)...); err != nil {
+ m.Warning(err)
+ }
+ }
+}
+
+func (m *MySQL) addInnoDBOSLogCharts() {
+ if err := m.Charts().Add(*chartsInnoDBOSLog.Copy()...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *MySQL) addMyISAMCharts() {
+ if err := m.Charts().Add(*chartsMyISAM.Copy()...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *MySQL) addBinlogCharts() {
+ if err := m.Charts().Add(*chartsBinlog.Copy()...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *MySQL) addInnodbDeadlocksChart() {
+ if err := m.Charts().Add(chartInnoDBDeadlocks.Copy()); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *MySQL) addQCacheCharts() {
+ if err := m.Charts().Add(*chartsQCache.Copy()...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *MySQL) addGaleraCharts() {
+ if err := m.Charts().Add(*chartsGalera.Copy()...); err != nil {
+ m.Warning(err)
+ }
+}
+
+func (m *MySQL) addTableOpenCacheOverflowChart() {
+ if err := m.Charts().Add(chartTableOpenCacheOverflows.Copy()); err != nil {
+ m.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mysql/collect.go b/src/go/plugin/go.d/modules/mysql/collect.go
new file mode 100644
index 000000000..5f28cd139
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/collect.go
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/blang/semver/v4"
+)
+
+func (m *MySQL) collect() (map[string]int64, error) {
+ if m.db == nil {
+ if err := m.openConnection(); err != nil {
+ return nil, err
+ }
+ }
+ if m.version == nil {
+ if err := m.collectVersion(); err != nil {
+ return nil, fmt.Errorf("error on collecting version: %v", err)
+ }
+ // https://mariadb.com/kb/en/user-statistics/
+ m.doUserStatistics = m.isPercona || m.isMariaDB && m.version.GTE(semver.Version{Major: 10, Minor: 1, Patch: 1})
+ }
+
+ m.disableSessionQueryLog()
+
+ mx := make(map[string]int64)
+
+ if err := m.collectGlobalStatus(mx); err != nil {
+ return nil, fmt.Errorf("error on collecting global status: %v", err)
+ }
+
+ if hasInnodbOSLog(mx) {
+ m.addInnoDBOSLogOnce.Do(m.addInnoDBOSLogCharts)
+ }
+ if hasInnodbDeadlocks(mx) {
+ m.addInnodbDeadlocksOnce.Do(m.addInnodbDeadlocksChart)
+ }
+ if hasQCacheMetrics(mx) {
+ m.addQCacheOnce.Do(m.addQCacheCharts)
+ }
+ if hasGaleraMetrics(mx) {
+ m.addGaleraOnce.Do(m.addGaleraCharts)
+ }
+ if hasTableOpenCacheOverflowsMetrics(mx) {
+ m.addTableOpenCacheOverflowsOnce.Do(m.addTableOpenCacheOverflowChart)
+ }
+
+ now := time.Now()
+ if now.Sub(m.recheckGlobalVarsTime) > m.recheckGlobalVarsEvery {
+ if err := m.collectGlobalVariables(); err != nil {
+ return nil, fmt.Errorf("error on collecting global variables: %v", err)
+ }
+ }
+ mx["max_connections"] = m.varMaxConns
+ mx["table_open_cache"] = m.varTableOpenCache
+
+ if m.isMariaDB || !strings.Contains(m.varDisabledStorageEngine, "MyISAM") {
+ m.addMyISAMOnce.Do(m.addMyISAMCharts)
+ }
+ if m.varLogBin != "OFF" {
+ m.addBinlogOnce.Do(m.addBinlogCharts)
+ }
+
+ // TODO: perhaps make a decisions based on privileges? (SHOW GRANTS FOR CURRENT_USER();)
+ if m.doSlaveStatus {
+ if err := m.collectSlaveStatus(mx); err != nil {
+ m.Warningf("error on collecting slave status: %v", err)
+ m.doSlaveStatus = errors.Is(err, context.DeadlineExceeded)
+ }
+ }
+
+ if m.doUserStatistics {
+ if err := m.collectUserStatistics(mx); err != nil {
+ m.Warningf("error on collecting user statistics: %v", err)
+ m.doUserStatistics = errors.Is(err, context.DeadlineExceeded)
+ }
+ }
+
+ if err := m.collectProcessListStatistics(mx); err != nil {
+ m.Errorf("error on collecting process list statistics: %v", err)
+ }
+
+ calcThreadCacheMisses(mx)
+ return mx, nil
+}
+
+func (m *MySQL) openConnection() error {
+ db, err := sql.Open("mysql", m.DSN)
+ if err != nil {
+ return fmt.Errorf("error on opening a connection with the mysql database [%s]: %v", m.safeDSN, err)
+ }
+
+ db.SetConnMaxLifetime(10 * time.Minute)
+
+ ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration())
+ defer cancel()
+
+ if err := db.PingContext(ctx); err != nil {
+ _ = db.Close()
+ return fmt.Errorf("error on pinging the mysql database [%s]: %v", m.safeDSN, err)
+ }
+
+ m.db = db
+ return nil
+}
+
+func calcThreadCacheMisses(collected map[string]int64) {
+ threads, cons := collected["threads_created"], collected["connections"]
+ if threads == 0 || cons == 0 {
+ collected["thread_cache_misses"] = 0
+ } else {
+ collected["thread_cache_misses"] = int64(float64(threads) / float64(cons) * 10000)
+ }
+}
+
+func hasInnodbOSLog(collected map[string]int64) bool {
+ // removed in MariaDB 10.8 (https://mariadb.com/kb/en/innodb-status-variables/#innodb_os_log_fsyncs)
+ _, ok := collected["innodb_os_log_fsyncs"]
+ return ok
+}
+
+func hasInnodbDeadlocks(collected map[string]int64) bool {
+ _, ok := collected["innodb_deadlocks"]
+ return ok
+}
+
+func hasGaleraMetrics(collected map[string]int64) bool {
+ _, ok := collected["wsrep_received"]
+ return ok
+}
+
+func hasQCacheMetrics(collected map[string]int64) bool {
+ _, ok := collected["qcache_hits"]
+ return ok
+}
+
+func hasTableOpenCacheOverflowsMetrics(collected map[string]int64) bool {
+ _, ok := collected["table_open_cache_overflows"]
+ return ok
+}
+
+func (m *MySQL) collectQuery(query string, assign func(column, value string, lineEnd bool)) (duration int64, err error) {
+ ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration())
+ defer cancel()
+
+ s := time.Now()
+ rows, err := m.db.QueryContext(ctx, query)
+ if err != nil {
+ return 0, err
+ }
+ duration = time.Since(s).Milliseconds()
+ defer func() { _ = rows.Close() }()
+
+ columns, err := rows.Columns()
+ if err != nil {
+ return duration, err
+ }
+
+ vs := makeValues(len(columns))
+ for rows.Next() {
+ if err := rows.Scan(vs...); err != nil {
+ return duration, err
+ }
+ for i, l := 0, len(vs); i < l; i++ {
+ assign(columns[i], valueToString(vs[i]), i == l-1)
+ }
+ }
+ return duration, rows.Err()
+}
+
+func makeValues(size int) []any {
+ vs := make([]any, size)
+ for i := range vs {
+ vs[i] = &sql.NullString{}
+ }
+ return vs
+}
+
+func valueToString(value any) string {
+ v, ok := value.(*sql.NullString)
+ if !ok || !v.Valid {
+ return ""
+ }
+ return v.String
+}
+
+func parseInt(s string) int64 {
+ v, _ := strconv.ParseInt(s, 10, 64)
+ return v
+}
+
+func parseFloat(s string) float64 {
+ v, _ := strconv.ParseFloat(s, 64)
+ return v
+}
diff --git a/src/go/plugin/go.d/modules/mysql/collect_global_status.go b/src/go/plugin/go.d/modules/mysql/collect_global_status.go
new file mode 100644
index 000000000..c6dff9e93
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/collect_global_status.go
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "strings"
+)
+
+const queryShowGlobalStatus = "SHOW GLOBAL STATUS;"
+
+func (m *MySQL) collectGlobalStatus(mx map[string]int64) error {
+ // MariaDB: https://mariadb.com/kb/en/server-status-variables/
+ // MySQL: https://dev.mysql.com/doc/refman/8.0/en/server-status-variable-reference.html
+ q := queryShowGlobalStatus
+ m.Debugf("executing query: '%s'", q)
+
+ var name string
+ _, err := m.collectQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "Variable_name":
+ name = value
+ case "Value":
+ if !globalStatusKeys[name] {
+ return
+ }
+ switch name {
+ case "wsrep_connected":
+ mx[name] = parseInt(convertWsrepConnected(value))
+ case "wsrep_ready":
+ mx[name] = parseInt(convertWsrepReady(value))
+ case "wsrep_local_state":
+ // https://mariadb.com/kb/en/galera-cluster-status-variables/#wsrep_local_state
+ // https://github.com/codership/wsrep-API/blob/eab2d5d5a31672c0b7d116ef1629ff18392fd7d0/wsrep_api.h#L256
+ mx[name+"_undefined"] = boolToInt(value == "0")
+ mx[name+"_joiner"] = boolToInt(value == "1")
+ mx[name+"_donor"] = boolToInt(value == "2")
+ mx[name+"_joined"] = boolToInt(value == "3")
+ mx[name+"_synced"] = boolToInt(value == "4")
+ mx[name+"_error"] = boolToInt(parseInt(value) >= 5)
+ case "wsrep_cluster_status":
+ // https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_cluster_status
+ // https://github.com/codership/wsrep-API/blob/eab2d5d5a31672c0b7d116ef1629ff18392fd7d0/wsrep_api.h
+ // https://github.com/codership/wsrep-API/blob/f71cd270414ee70dde839cfc59c1731eea4230ea/examples/node/wsrep.c#L80
+ value = strings.ToUpper(value)
+ mx[name+"_primary"] = boolToInt(value == "PRIMARY")
+ mx[name+"_non_primary"] = boolToInt(value == "NON-PRIMARY")
+ mx[name+"_disconnected"] = boolToInt(value == "DISCONNECTED")
+ default:
+ mx[strings.ToLower(name)] = parseInt(value)
+ }
+ }
+ })
+ return err
+}
+
+func convertWsrepConnected(val string) string {
+ // https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_connected
+ switch val {
+ case "OFF":
+ return "0"
+ case "ON":
+ return "1"
+ default:
+ return "-1"
+ }
+}
+
+func convertWsrepReady(val string) string {
+ // https://www.percona.com/doc/percona-xtradb-cluster/LATEST/wsrep-status-index.html#wsrep_ready
+ switch val {
+ case "OFF":
+ return "0"
+ case "ON":
+ return "1"
+ default:
+ return "-1"
+ }
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
+
+var globalStatusKeys = map[string]bool{
+ "Bytes_received": true,
+ "Bytes_sent": true,
+ "Queries": true,
+ "Questions": true,
+ "Slow_queries": true,
+ "Handler_commit": true,
+ "Handler_delete": true,
+ "Handler_prepare": true,
+ "Handler_read_first": true,
+ "Handler_read_key": true,
+ "Handler_read_next": true,
+ "Handler_read_prev": true,
+ "Handler_read_rnd": true,
+ "Handler_read_rnd_next": true,
+ "Handler_rollback": true,
+ "Handler_savepoint": true,
+ "Handler_savepoint_rollback": true,
+ "Handler_update": true,
+ "Handler_write": true,
+ "Table_locks_immediate": true,
+ "Table_locks_waited": true,
+ "Table_open_cache_overflows": true,
+ "Select_full_join": true,
+ "Select_full_range_join": true,
+ "Select_range": true,
+ "Select_range_check": true,
+ "Select_scan": true,
+ "Sort_merge_passes": true,
+ "Sort_range": true,
+ "Sort_scan": true,
+ "Created_tmp_disk_tables": true,
+ "Created_tmp_files": true,
+ "Created_tmp_tables": true,
+ "Connections": true,
+ "Aborted_connects": true,
+ "Max_used_connections": true,
+ "Binlog_cache_disk_use": true,
+ "Binlog_cache_use": true,
+ "Threads_connected": true,
+ "Threads_created": true,
+ "Threads_cached": true,
+ "Threads_running": true,
+ "Thread_cache_misses": true,
+ "Innodb_data_read": true,
+ "Innodb_data_written": true,
+ "Innodb_data_reads": true,
+ "Innodb_data_writes": true,
+ "Innodb_data_fsyncs": true,
+ "Innodb_data_pending_reads": true,
+ "Innodb_data_pending_writes": true,
+ "Innodb_data_pending_fsyncs": true,
+ "Innodb_log_waits": true,
+ "Innodb_log_write_requests": true,
+ "Innodb_log_writes": true,
+ "Innodb_os_log_fsyncs": true,
+ "Innodb_os_log_pending_fsyncs": true,
+ "Innodb_os_log_pending_writes": true,
+ "Innodb_os_log_written": true,
+ "Innodb_row_lock_current_waits": true,
+ "Innodb_rows_inserted": true,
+ "Innodb_rows_read": true,
+ "Innodb_rows_updated": true,
+ "Innodb_rows_deleted": true,
+ "Innodb_buffer_pool_pages_data": true,
+ "Innodb_buffer_pool_pages_dirty": true,
+ "Innodb_buffer_pool_pages_free": true,
+ "Innodb_buffer_pool_pages_flushed": true,
+ "Innodb_buffer_pool_pages_misc": true,
+ "Innodb_buffer_pool_pages_total": true,
+ "Innodb_buffer_pool_bytes_data": true,
+ "Innodb_buffer_pool_bytes_dirty": true,
+ "Innodb_buffer_pool_read_ahead": true,
+ "Innodb_buffer_pool_read_ahead_evicted": true,
+ "Innodb_buffer_pool_read_ahead_rnd": true,
+ "Innodb_buffer_pool_read_requests": true,
+ "Innodb_buffer_pool_write_requests": true,
+ "Innodb_buffer_pool_reads": true,
+ "Innodb_buffer_pool_wait_free": true,
+ "Innodb_deadlocks": true,
+ "Qcache_hits": true,
+ "Qcache_lowmem_prunes": true,
+ "Qcache_inserts": true,
+ "Qcache_not_cached": true,
+ "Qcache_queries_in_cache": true,
+ "Qcache_free_memory": true,
+ "Qcache_free_blocks": true,
+ "Qcache_total_blocks": true,
+ "Key_blocks_unused": true,
+ "Key_blocks_used": true,
+ "Key_blocks_not_flushed": true,
+ "Key_read_requests": true,
+ "Key_write_requests": true,
+ "Key_reads": true,
+ "Key_writes": true,
+ "Open_files": true,
+ "Opened_files": true,
+ "Binlog_stmt_cache_disk_use": true,
+ "Binlog_stmt_cache_use": true,
+ "Connection_errors_accept": true,
+ "Connection_errors_internal": true,
+ "Connection_errors_max_connections": true,
+ "Connection_errors_peer_address": true,
+ "Connection_errors_select": true,
+ "Connection_errors_tcpwrap": true,
+ "Com_delete": true,
+ "Com_insert": true,
+ "Com_select": true,
+ "Com_update": true,
+ "Com_replace": true,
+ "Opened_tables": true,
+ "Open_tables": true,
+ "wsrep_local_recv_queue": true,
+ "wsrep_local_send_queue": true,
+ "wsrep_received": true,
+ "wsrep_replicated": true,
+ "wsrep_received_bytes": true,
+ "wsrep_replicated_bytes": true,
+ "wsrep_local_bf_aborts": true,
+ "wsrep_local_cert_failures": true,
+ "wsrep_flow_control_paused_ns": true,
+ "wsrep_cluster_weight": true,
+ "wsrep_cluster_size": true,
+ "wsrep_local_state": true,
+ "wsrep_open_transactions": true,
+ "wsrep_thread_count": true,
+ "wsrep_connected": true,
+ "wsrep_ready": true,
+ "wsrep_cluster_status": true,
+}
diff --git a/src/go/plugin/go.d/modules/mysql/collect_global_vars.go b/src/go/plugin/go.d/modules/mysql/collect_global_vars.go
new file mode 100644
index 000000000..ae6278088
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/collect_global_vars.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+const (
+ queryShowGlobalVariables = `
+SHOW GLOBAL VARIABLES
+WHERE
+ Variable_name LIKE 'max_connections'
+ OR Variable_name LIKE 'table_open_cache'
+ OR Variable_name LIKE 'disabled_storage_engines'
+ OR Variable_name LIKE 'log_bin'
+ OR Variable_name LIKE 'performance_schema';`
+)
+
+func (m *MySQL) collectGlobalVariables() error {
+ // MariaDB: https://mariadb.com/kb/en/server-system-variables/
+ // MySQL: https://dev.mysql.com/doc/refman/8.0/en/server-system-variable-reference.html
+ q := queryShowGlobalVariables
+ m.Debugf("executing query: '%s'", q)
+
+ var name string
+ _, err := m.collectQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "Variable_name":
+ name = value
+ case "Value":
+ switch name {
+ case "disabled_storage_engines":
+ m.varDisabledStorageEngine = value
+ case "log_bin":
+ m.varLogBin = value
+ case "max_connections":
+ m.varMaxConns = parseInt(value)
+ case "performance_schema":
+ m.varPerformanceSchema = value
+ case "table_open_cache":
+ m.varTableOpenCache = parseInt(value)
+ }
+ }
+ })
+ return err
+}
diff --git a/src/go/plugin/go.d/modules/mysql/collect_process_list.go b/src/go/plugin/go.d/modules/mysql/collect_process_list.go
new file mode 100644
index 000000000..08c08c6d5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/collect_process_list.go
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "github.com/blang/semver/v4"
+)
+
+// Table Schema:
+// (MariaDB) https://mariadb.com/kb/en/information-schema-processlist-table/
+// (MySql) https://dev.mysql.com/doc/refman/5.7/en/information-schema-processlist-table.html
+const (
+ queryShowProcessList = `
+SELECT
+ time,
+ user
+FROM
+ information_schema.processlist
+WHERE
+ info IS NOT NULL
+ AND info NOT LIKE '%PROCESSLIST%'
+ORDER BY
+ time;`
+)
+
+// Performance Schema
+// (MySQL) https://dev.mysql.com/doc/refman/8.0/en/performance-schema-processlist-table.html
+const (
+ queryShowProcessListPS = `
+SELECT
+ time,
+ user
+FROM
+ performance_schema.processlist
+WHERE
+ info IS NOT NULL
+ AND info NOT LIKE '%PROCESSLIST%'
+ORDER BY
+ time;`
+)
+
+func (m *MySQL) collectProcessListStatistics(mx map[string]int64) error {
+ var q string
+ mysqlMinVer := semver.Version{Major: 8, Minor: 0, Patch: 22}
+ if !m.isMariaDB && m.version.GTE(mysqlMinVer) && m.varPerformanceSchema == "ON" {
+ q = queryShowProcessListPS
+ } else {
+ q = queryShowProcessList
+ }
+ m.Debugf("executing query: '%s'", q)
+
+ var maxTime int64 // slowest query milliseconds in process list
+
+ duration, err := m.collectQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "time":
+ maxTime = parseInt(value)
+ case "user":
+ // system user refers to non-client threads
+ // event_scheduler is the thread used to monitor scheduled events
+ // system user and event_scheduler threads are grouped as system/database threads
+ // authenticated and unauthenticated user are grouped as users
+ // please see USER section in
+ // https://dev.mysql.com/doc/refman/8.0/en/information-schema-processlist-table.html
+ switch value {
+ case "system user", "event_scheduler":
+ mx["process_list_queries_count_system"] += 1
+ default:
+ mx["process_list_queries_count_user"] += 1
+ }
+ }
+ })
+ if err != nil {
+ return err
+ }
+
+ if _, ok := mx["process_list_queries_count_system"]; !ok {
+ mx["process_list_queries_count_system"] = 0
+ }
+ if _, ok := mx["process_list_queries_count_user"]; !ok {
+ mx["process_list_queries_count_user"] = 0
+ }
+ mx["process_list_fetch_query_duration"] = duration
+ mx["process_list_longest_query_duration"] = maxTime
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/mysql/collect_slave_status.go b/src/go/plugin/go.d/modules/mysql/collect_slave_status.go
new file mode 100644
index 000000000..37d4bf59b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/collect_slave_status.go
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "strings"
+
+ "github.com/blang/semver/v4"
+)
+
+const (
+ queryShowReplicaStatus = "SHOW REPLICA STATUS;"
+ queryShowSlaveStatus = "SHOW SLAVE STATUS;"
+ queryShowAllSlavesStatus = "SHOW ALL SLAVES STATUS;"
+)
+
+func (m *MySQL) collectSlaveStatus(mx map[string]int64) error {
+ // https://mariadb.com/docs/reference/es/sql-statements/SHOW_ALL_SLAVES_STATUS/
+ mariaDBMinVer := semver.Version{Major: 10, Minor: 2, Patch: 0}
+ mysqlMinVer := semver.Version{Major: 8, Minor: 0, Patch: 22}
+ var q string
+ if m.isMariaDB && m.version.GTE(mariaDBMinVer) {
+ q = queryShowAllSlavesStatus
+ } else if !m.isMariaDB && m.version.GTE(mysqlMinVer) {
+ q = queryShowReplicaStatus
+ } else {
+ q = queryShowSlaveStatus
+ }
+ m.Debugf("executing query: '%s'", q)
+
+ v := struct {
+ name string
+ behindMaster int64
+ sqlRunning int64
+ ioRunning int64
+ }{}
+
+ _, err := m.collectQuery(q, func(column, value string, lineEnd bool) {
+ switch column {
+ case "Connection_name", "Channel_Name":
+ v.name = value
+ case "Seconds_Behind_Master", "Seconds_Behind_Source":
+ v.behindMaster = parseInt(value)
+ case "Slave_SQL_Running", "Replica_SQL_Running":
+ v.sqlRunning = parseInt(convertSlaveSQLRunning(value))
+ case "Slave_IO_Running", "Replica_IO_Running":
+ v.ioRunning = parseInt(convertSlaveIORunning(value))
+ }
+ if lineEnd {
+ if !m.collectedReplConns[v.name] {
+ m.collectedReplConns[v.name] = true
+ m.addSlaveReplicationConnCharts(v.name)
+ }
+ s := strings.ToLower(slaveMetricSuffix(v.name))
+ mx["seconds_behind_master"+s] = v.behindMaster
+ mx["slave_sql_running"+s] = v.sqlRunning
+ mx["slave_io_running"+s] = v.ioRunning
+ }
+ })
+ return err
+}
+
+func convertSlaveSQLRunning(value string) string {
+ switch value {
+ case "Yes":
+ return "1"
+ default:
+ return "0"
+ }
+}
+
+func convertSlaveIORunning(value string) string {
+ // NOTE: There is 'Connecting' state and probably others
+ switch value {
+ case "Yes":
+ return "1"
+ default:
+ return "0"
+ }
+}
+
+func slaveMetricSuffix(conn string) string {
+ if conn == "" {
+ return ""
+ }
+ return "_" + conn
+}
diff --git a/src/go/plugin/go.d/modules/mysql/collect_user_statistics.go b/src/go/plugin/go.d/modules/mysql/collect_user_statistics.go
new file mode 100644
index 000000000..b00703a46
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/collect_user_statistics.go
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "strings"
+)
+
+const queryShowUserStatistics = "SHOW USER_STATISTICS;"
+
+func (m *MySQL) collectUserStatistics(mx map[string]int64) error {
+ // https://mariadb.com/kb/en/user-statistics/
+ // https://mariadb.com/kb/en/information-schema-user_statistics-table/
+ q := queryShowUserStatistics
+ m.Debugf("executing query: '%s'", q)
+
+ var user, prefix string
+ _, err := m.collectQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "User":
+ user = value
+ prefix = "userstats_" + user + "_"
+ if !m.collectedUsers[user] {
+ m.collectedUsers[user] = true
+ m.addUserStatisticsCharts(user)
+ }
+ case "Cpu_time":
+ mx[strings.ToLower(prefix+column)] = int64(parseFloat(value) * 1000)
+ case
+ "Total_connections",
+ "Lost_connections",
+ "Denied_connections",
+ "Empty_queries",
+ "Binlog_bytes_written",
+ "Rows_read",
+ "Rows_sent",
+ "Rows_deleted",
+ "Rows_inserted",
+ "Rows_updated",
+ "Rows_fetched", // Percona
+ "Select_commands",
+ "Update_commands",
+ "Other_commands",
+ "Access_denied",
+ "Commit_transactions",
+ "Rollback_transactions":
+ mx[strings.ToLower(prefix+column)] = parseInt(value)
+ }
+ })
+ return err
+}
diff --git a/src/go/plugin/go.d/modules/mysql/collect_version.go b/src/go/plugin/go.d/modules/mysql/collect_version.go
new file mode 100644
index 000000000..b85922e2c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/collect_version.go
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/blang/semver/v4"
+)
+
+const queryShowVersion = `
+SHOW GLOBAL VARIABLES
+WHERE
+ Variable_name LIKE 'version'
+ OR Variable_name LIKE 'version_comment';`
+
+var reVersionCore = regexp.MustCompile(`^\d+\.\d+\.\d+`)
+
+func (m *MySQL) collectVersion() error {
+ // https://mariadb.com/kb/en/version/
+ q := queryShowVersion
+ m.Debugf("executing query: '%s'", queryShowVersion)
+
+ var name, version, versionComment string
+ _, err := m.collectQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "Variable_name":
+ name = value
+ case "Value":
+ switch name {
+ case "version":
+ version = value
+ case "version_comment":
+ versionComment = value
+ }
+ }
+ })
+ if err != nil {
+ return err
+ }
+
+ m.Infof("application version: '%s', version_comment: '%s'", version, versionComment)
+
+ // version string is not always valid semver (ex.: 8.0.22-0ubuntu0.20.04.2)
+ s := reVersionCore.FindString(version)
+ if s == "" {
+ return fmt.Errorf("couldn't parse version string '%s'", version)
+ }
+
+ ver, err := semver.New(s)
+ if err != nil {
+ return fmt.Errorf("couldn't parse version string '%s': %v", s, err)
+ }
+
+ m.version = ver
+ m.isMariaDB = strings.Contains(version, "MariaDB") || strings.Contains(versionComment, "mariadb")
+ m.isPercona = strings.Contains(versionComment, "Percona")
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/mysql/config_schema.json b/src/go/plugin/go.d/modules/mysql/config_schema.json
new file mode 100644
index 000000000..20bb265c0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/config_schema.json
@@ -0,0 +1,52 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MySQL collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "dsn": {
+ "title": "DSN",
+ "description": "MySQL server [Data Source Name (DSN)](https://github.com/go-sql-driver/mysql#dsn-data-source-name) specifying the connection details.",
+ "type": "string",
+ "default": "netdata@tcp(localhost:3306)/"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for queries, in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "my.cnf": {
+ "title": "my.cnf path",
+ "description": "Optional. Specifies the path to the my.cnf file containing connection settings under the [client] section.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "dsn"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "dsn": {
+ "ui:placeholder": "username:password@protocol(address)/dbname"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mysql/disable_logging.go b/src/go/plugin/go.d/modules/mysql/disable_logging.go
new file mode 100644
index 000000000..3a2eea6a1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/disable_logging.go
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+const (
+ queryShowSessionVariables = `
+SHOW SESSION VARIABLES
+WHERE
+ Variable_name LIKE 'sql_log_off'
+ OR Variable_name LIKE 'slow_query_log';`
+)
+
+const (
+ queryDisableSessionQueryLog = "SET SESSION sql_log_off='ON';"
+ queryDisableSessionSlowQueryLog = "SET SESSION slow_query_log='OFF';"
+)
+
+func (m *MySQL) disableSessionQueryLog() {
+ q := queryShowSessionVariables
+ m.Debugf("executing query: '%s'", q)
+
+ var sqlLogOff, slowQueryLog string
+ var name string
+ _, err := m.collectQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "Variable_name":
+ name = value
+ case "Value":
+ switch name {
+ case "sql_log_off":
+ sqlLogOff = value
+ case "slow_query_log":
+ slowQueryLog = value
+ }
+ }
+ })
+ if err != nil {
+ m.Debug(err)
+ return
+ }
+
+ if sqlLogOff == "OFF" && m.doDisableSessionQueryLog {
+ // requires SUPER privileges
+ q = queryDisableSessionQueryLog
+ m.Debugf("executing query: '%s'", q)
+ if _, err := m.collectQuery(q, func(_, _ string, _ bool) {}); err != nil {
+ m.Infof("failed to disable session query log (sql_log_off): %v", err)
+ m.doDisableSessionQueryLog = false
+ }
+ }
+ if slowQueryLog == "ON" {
+ q = queryDisableSessionSlowQueryLog
+ m.Debugf("executing query: '%s'", q)
+ if _, err := m.collectQuery(q, func(_, _ string, _ bool) {}); err != nil {
+ m.Debugf("failed to disable session slow query log (slow_query_log): %v", err)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md b/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md
new file mode 100644
index 000000000..b10e84b2a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md
@@ -0,0 +1,401 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/mysql/integrations/mariadb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/mysql/metadata.yaml"
+sidebar_label: "MariaDB"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MariaDB
+
+
+<img src="https://netdata.cloud/img/mariadb.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: mysql
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.
+
+
+It connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:
+
+Executed queries:
+
+- `SELECT VERSION();`
+- `SHOW GLOBAL STATUS;`
+- `SHOW GLOBAL VARIABLES;`
+- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)
+- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)
+- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:
+
+- 127.0.0.1:3306
+- "[::1]:3306"
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per MariaDB instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |
+|:------|:----------|:----|:---:|:---:|:---:|
+| mysql.net | in, out | kilobits/s | • | • | • |
+| mysql.queries | queries, questions, slow_queries | queries/s | • | • | • |
+| mysql.queries_type | select, delete, update, insert, replace | queries/s | • | • | • |
+| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | • | • | • |
+| mysql.table_open_cache_overflows | open_cache | overflows/s | • | • | • |
+| mysql.table_locks | immediate, waited | locks/s | • | • | • |
+| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | • | • | • |
+| mysql.sort_issues | merge_passes, range, scan | issues/s | • | • | • |
+| mysql.tmp | disk_tables, files, tables | events/s | • | • | • |
+| mysql.connections | all, aborted | connections/s | • | • | • |
+| mysql.connections_active | active, limit, max_active | connections | • | • | • |
+| mysql.threads | connected, cached, running | threads | • | • | • |
+| mysql.threads_created | created | threads/s | • | • | • |
+| mysql.thread_cache_misses | misses | misses | • | • | • |
+| mysql.innodb_io | read, write | KiB/s | • | • | • |
+| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | • | • | • |
+| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | • | • | • |
+| mysql.innodb_log | waits, write_requests, writes | operations/s | • | • | • |
+| mysql.innodb_cur_row_lock | current waits | operations | • | • | • |
+| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | • | • | • |
+| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | • | • | • |
+| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | • | • | • |
+| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | • | • | • |
+| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | • | • | • |
+| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | • | • | • |
+| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | • | • | • |
+| mysql.innodb_os_log | fsyncs, writes | operations | • | • | • |
+| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | • | • | • |
+| mysql.innodb_os_log_io | write | KiB/s | • | • | • |
+| mysql.innodb_deadlocks | deadlocks | operations/s | • | • | • |
+| mysql.files | files | files | • | • | • |
+| mysql.files_rate | files | files/s | • | • | • |
+| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | • | • | • |
+| mysql.opened_tables | tables | tables/s | • | • | • |
+| mysql.open_tables | cache, tables | tables | • | • | • |
+| mysql.process_list_fetch_query_duration | duration | milliseconds | • | • | • |
+| mysql.process_list_queries_count | system, user | queries | • | • | • |
+| mysql.process_list_longest_query_duration | duration | seconds | • | • | • |
+| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | • | • | • |
+| mysql.qcache | queries | queries | • | • | • |
+| mysql.qcache_freemem | free | MiB | • | • | • |
+| mysql.qcache_memblocks | free, total | blocks | • | • | • |
+| mysql.galera_writesets | rx, tx | writesets/s | • | • | • |
+| mysql.galera_bytes | rx, tx | KiB/s | • | • | • |
+| mysql.galera_queue | rx, tx | writesets | • | • | • |
+| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | • | • | • |
+| mysql.galera_flow_control | paused | ms | • | • | • |
+| mysql.galera_cluster_status | primary, non_primary, disconnected | status | • | • | • |
+| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | • | • | • |
+| mysql.galera_cluster_size | nodes | nodes | • | • | • |
+| mysql.galera_cluster_weight | weight | weight | • | • | • |
+| mysql.galera_connected | connected | boolean | • | • | • |
+| mysql.galera_ready | ready | boolean | • | • | • |
+| mysql.galera_open_transactions | open | transactions | • | • | • |
+| mysql.galera_thread_count | threads | threads | • | • | • |
+| mysql.key_blocks | unused, used, not_flushed | blocks | • | • | • |
+| mysql.key_requests | reads, writes | requests/s | • | • | • |
+| mysql.key_disk_ops | reads, writes | operations/s | • | • | • |
+| mysql.binlog_cache | disk, all | transactions/s | • | • | • |
+| mysql.binlog_stmt_cache | disk, all | statements/s | • | • | • |
+
+### Per connection
+
+These metrics refer to the replication connection.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |
+|:------|:----------|:----|:---:|:---:|:---:|
+| mysql.slave_behind | seconds | seconds | • | • | • |
+| mysql.slave_status | sql_running, io_running | boolean | • | • | • |
+
+### Per user
+
+These metrics refer to the MySQL user.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| user | username |
+
+Metrics:
+
+| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |
+|:------|:----------|:----|:---:|:---:|:---:|
+| mysql.userstats_cpu | used | percentage | | • | • |
+| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | • | • |
+| mysql.userstats_commands | select, update, other | commands/s | | • | • |
+| mysql.userstats_denied_commands | denied | commands/s | | • | • |
+| mysql.userstats_created_transactions | commit, rollback | transactions/s | | • | • |
+| mysql.userstats_binlog_written | written | B/s | | • | • |
+| mysql.userstats_empty_queries | empty | queries/s | | • | • |
+| mysql.userstats_connections | created | connections/s | | • | • |
+| mysql.userstats_lost_connections | lost | connections/s | | • | • |
+| mysql.userstats_denied_connections | denied | connections/s | | • | • |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |
+| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |
+| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |
+| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |
+| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |
+| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |
+| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |
+| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |
+| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |
+| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |
+| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |
+| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |
+
+
+## Setup
+
+### Prerequisites
+
+#### Create netdata user
+
+A user account should have the
+following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):
+
+- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)
+- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)
+- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)
+
+To create the `netdata` user with these permissions, execute the following in the MySQL shell:
+
+```mysql
+CREATE USER 'netdata'@'localhost';
+GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';
+FLUSH PRIVILEGES;
+```
+
+The `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only
+be able to gather statistics without being able to alter or affect operations in any way.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/mysql.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/mysql.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |
+| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |
+| timeout | Query timeout in seconds. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netdata@tcp(127.0.0.1:3306)/
+
+```
+</details>
+
+##### Unix socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netdata@unix(/var/lib/mysql/mysql.sock)/
+
+```
+</details>
+
+##### Connection with password
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netconfig:password@tcp(127.0.0.1:3306)/
+
+```
+</details>
+
+##### my.cnf
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ my.cnf: '/etc/my.cnf'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netdata@tcp(127.0.0.1:3306)/
+
+ - name: remote
+ dsn: netconfig:password@tcp(203.0.113.0:3306)/
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m mysql
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep mysql
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep mysql /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep mysql
+```
+
+
diff --git a/src/go/plugin/go.d/modules/mysql/integrations/mysql.md b/src/go/plugin/go.d/modules/mysql/integrations/mysql.md
new file mode 100644
index 000000000..f4f8a423a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/integrations/mysql.md
@@ -0,0 +1,401 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/mysql/integrations/mysql.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/mysql/metadata.yaml"
+sidebar_label: "MySQL"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MySQL
+
+
+<img src="https://netdata.cloud/img/mysql.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: mysql
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.
+
+
+It connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:
+
+Executed queries:
+
+- `SELECT VERSION();`
+- `SHOW GLOBAL STATUS;`
+- `SHOW GLOBAL VARIABLES;`
+- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)
+- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)
+- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:
+
+- 127.0.0.1:3306
+- "[::1]:3306"
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per MariaDB instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |
+|:------|:----------|:----|:---:|:---:|:---:|
+| mysql.net | in, out | kilobits/s | • | • | • |
+| mysql.queries | queries, questions, slow_queries | queries/s | • | • | • |
+| mysql.queries_type | select, delete, update, insert, replace | queries/s | • | • | • |
+| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | • | • | • |
+| mysql.table_open_cache_overflows | open_cache | overflows/s | • | • | • |
+| mysql.table_locks | immediate, waited | locks/s | • | • | • |
+| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | • | • | • |
+| mysql.sort_issues | merge_passes, range, scan | issues/s | • | • | • |
+| mysql.tmp | disk_tables, files, tables | events/s | • | • | • |
+| mysql.connections | all, aborted | connections/s | • | • | • |
+| mysql.connections_active | active, limit, max_active | connections | • | • | • |
+| mysql.threads | connected, cached, running | threads | • | • | • |
+| mysql.threads_created | created | threads/s | • | • | • |
+| mysql.thread_cache_misses | misses | misses | • | • | • |
+| mysql.innodb_io | read, write | KiB/s | • | • | • |
+| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | • | • | • |
+| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | • | • | • |
+| mysql.innodb_log | waits, write_requests, writes | operations/s | • | • | • |
+| mysql.innodb_cur_row_lock | current waits | operations | • | • | • |
+| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | • | • | • |
+| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | • | • | • |
+| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | • | • | • |
+| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | • | • | • |
+| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | • | • | • |
+| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | • | • | • |
+| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | • | • | • |
+| mysql.innodb_os_log | fsyncs, writes | operations | • | • | • |
+| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | • | • | • |
+| mysql.innodb_os_log_io | write | KiB/s | • | • | • |
+| mysql.innodb_deadlocks | deadlocks | operations/s | • | • | • |
+| mysql.files | files | files | • | • | • |
+| mysql.files_rate | files | files/s | • | • | • |
+| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | • | • | • |
+| mysql.opened_tables | tables | tables/s | • | • | • |
+| mysql.open_tables | cache, tables | tables | • | • | • |
+| mysql.process_list_fetch_query_duration | duration | milliseconds | • | • | • |
+| mysql.process_list_queries_count | system, user | queries | • | • | • |
+| mysql.process_list_longest_query_duration | duration | seconds | • | • | • |
+| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | • | • | • |
+| mysql.qcache | queries | queries | • | • | • |
+| mysql.qcache_freemem | free | MiB | • | • | • |
+| mysql.qcache_memblocks | free, total | blocks | • | • | • |
+| mysql.galera_writesets | rx, tx | writesets/s | • | • | • |
+| mysql.galera_bytes | rx, tx | KiB/s | • | • | • |
+| mysql.galera_queue | rx, tx | writesets | • | • | • |
+| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | • | • | • |
+| mysql.galera_flow_control | paused | ms | • | • | • |
+| mysql.galera_cluster_status | primary, non_primary, disconnected | status | • | • | • |
+| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | • | • | • |
+| mysql.galera_cluster_size | nodes | nodes | • | • | • |
+| mysql.galera_cluster_weight | weight | weight | • | • | • |
+| mysql.galera_connected | connected | boolean | • | • | • |
+| mysql.galera_ready | ready | boolean | • | • | • |
+| mysql.galera_open_transactions | open | transactions | • | • | • |
+| mysql.galera_thread_count | threads | threads | • | • | • |
+| mysql.key_blocks | unused, used, not_flushed | blocks | • | • | • |
+| mysql.key_requests | reads, writes | requests/s | • | • | • |
+| mysql.key_disk_ops | reads, writes | operations/s | • | • | • |
+| mysql.binlog_cache | disk, all | transactions/s | • | • | • |
+| mysql.binlog_stmt_cache | disk, all | statements/s | • | • | • |
+
+### Per connection
+
+These metrics refer to the replication connection.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |
+|:------|:----------|:----|:---:|:---:|:---:|
+| mysql.slave_behind | seconds | seconds | • | • | • |
+| mysql.slave_status | sql_running, io_running | boolean | • | • | • |
+
+### Per user
+
+These metrics refer to the MySQL user.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| user | username |
+
+Metrics:
+
+| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |
+|:------|:----------|:----|:---:|:---:|:---:|
+| mysql.userstats_cpu | used | percentage | | • | • |
+| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | • | • |
+| mysql.userstats_commands | select, update, other | commands/s | | • | • |
+| mysql.userstats_denied_commands | denied | commands/s | | • | • |
+| mysql.userstats_created_transactions | commit, rollback | transactions/s | | • | • |
+| mysql.userstats_binlog_written | written | B/s | | • | • |
+| mysql.userstats_empty_queries | empty | queries/s | | • | • |
+| mysql.userstats_connections | created | connections/s | | • | • |
+| mysql.userstats_lost_connections | lost | connections/s | | • | • |
+| mysql.userstats_denied_connections | denied | connections/s | | • | • |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |
+| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |
+| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |
+| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |
+| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |
+| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |
+| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |
+| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |
+| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |
+| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |
+| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |
+| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |
+
+
+## Setup
+
+### Prerequisites
+
+#### Create netdata user
+
+A user account should have the
+following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):
+
+- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)
+- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)
+- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)
+
+To create the `netdata` user with these permissions, execute the following in the MySQL shell:
+
+```mysql
+CREATE USER 'netdata'@'localhost';
+GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';
+FLUSH PRIVILEGES;
+```
+
+The `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only
+be able to gather statistics without being able to alter or affect operations in any way.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/mysql.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/mysql.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |
+| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |
+| timeout | Query timeout in seconds. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netdata@tcp(127.0.0.1:3306)/
+
+```
+</details>
+
+##### Unix socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netdata@unix(/var/lib/mysql/mysql.sock)/
+
+```
+</details>
+
+##### Connection with password
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netconfig:password@tcp(127.0.0.1:3306)/
+
+```
+</details>
+
+##### my.cnf
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ my.cnf: '/etc/my.cnf'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netdata@tcp(127.0.0.1:3306)/
+
+ - name: remote
+ dsn: netconfig:password@tcp(203.0.113.0:3306)/
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m mysql
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep mysql
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep mysql /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep mysql
+```
+
+
diff --git a/src/go/plugin/go.d/modules/mysql/integrations/percona_mysql.md b/src/go/plugin/go.d/modules/mysql/integrations/percona_mysql.md
new file mode 100644
index 000000000..2c967e229
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/integrations/percona_mysql.md
@@ -0,0 +1,401 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/mysql/integrations/percona_mysql.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/mysql/metadata.yaml"
+sidebar_label: "Percona MySQL"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Percona MySQL
+
+
+<img src="https://netdata.cloud/img/percona.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: mysql
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.
+
+
+It connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:
+
+Executed queries:
+
+- `SELECT VERSION();`
+- `SHOW GLOBAL STATUS;`
+- `SHOW GLOBAL VARIABLES;`
+- `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)
+- `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)
+- `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:
+
+- 127.0.0.1:3306
+- "[::1]:3306"
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per MariaDB instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |
+|:------|:----------|:----|:---:|:---:|:---:|
+| mysql.net | in, out | kilobits/s | • | • | • |
+| mysql.queries | queries, questions, slow_queries | queries/s | • | • | • |
+| mysql.queries_type | select, delete, update, insert, replace | queries/s | • | • | • |
+| mysql.handlers | commit, delete, prepare, read_first, read_key, read_next, read_prev, read_rnd, read_rnd_next, rollback, savepoint, savepointrollback, update, write | handlers/s | • | • | • |
+| mysql.table_open_cache_overflows | open_cache | overflows/s | • | • | • |
+| mysql.table_locks | immediate, waited | locks/s | • | • | • |
+| mysql.join_issues | full_join, full_range_join, range, range_check, scan | joins/s | • | • | • |
+| mysql.sort_issues | merge_passes, range, scan | issues/s | • | • | • |
+| mysql.tmp | disk_tables, files, tables | events/s | • | • | • |
+| mysql.connections | all, aborted | connections/s | • | • | • |
+| mysql.connections_active | active, limit, max_active | connections | • | • | • |
+| mysql.threads | connected, cached, running | threads | • | • | • |
+| mysql.threads_created | created | threads/s | • | • | • |
+| mysql.thread_cache_misses | misses | misses | • | • | • |
+| mysql.innodb_io | read, write | KiB/s | • | • | • |
+| mysql.innodb_io_ops | reads, writes, fsyncs | operations/s | • | • | • |
+| mysql.innodb_io_pending_ops | reads, writes, fsyncs | operations | • | • | • |
+| mysql.innodb_log | waits, write_requests, writes | operations/s | • | • | • |
+| mysql.innodb_cur_row_lock | current waits | operations | • | • | • |
+| mysql.innodb_rows | inserted, read, updated, deleted | operations/s | • | • | • |
+| mysql.innodb_buffer_pool_pages | data, dirty, free, misc, total | pages | • | • | • |
+| mysql.innodb_buffer_pool_pages_flushed | flush_pages | requests/s | • | • | • |
+| mysql.innodb_buffer_pool_bytes | data, dirty | MiB | • | • | • |
+| mysql.innodb_buffer_pool_read_ahead | all, evicted | pages/s | • | • | • |
+| mysql.innodb_buffer_pool_read_ahead_rnd | read-ahead | operations/s | • | • | • |
+| mysql.innodb_buffer_pool_ops | disk_reads, wait_free | operations/s | • | • | • |
+| mysql.innodb_os_log | fsyncs, writes | operations | • | • | • |
+| mysql.innodb_os_log_fsync_writes | fsyncs | operations/s | • | • | • |
+| mysql.innodb_os_log_io | write | KiB/s | • | • | • |
+| mysql.innodb_deadlocks | deadlocks | operations/s | • | • | • |
+| mysql.files | files | files | • | • | • |
+| mysql.files_rate | files | files/s | • | • | • |
+| mysql.connection_errors | accept, internal, max, peer_addr, select, tcpwrap | errors/s | • | • | • |
+| mysql.opened_tables | tables | tables/s | • | • | • |
+| mysql.open_tables | cache, tables | tables | • | • | • |
+| mysql.process_list_fetch_query_duration | duration | milliseconds | • | • | • |
+| mysql.process_list_queries_count | system, user | queries | • | • | • |
+| mysql.process_list_longest_query_duration | duration | seconds | • | • | • |
+| mysql.qcache_ops | hits, lowmem_prunes, inserts, not_cached | queries/s | • | • | • |
+| mysql.qcache | queries | queries | • | • | • |
+| mysql.qcache_freemem | free | MiB | • | • | • |
+| mysql.qcache_memblocks | free, total | blocks | • | • | • |
+| mysql.galera_writesets | rx, tx | writesets/s | • | • | • |
+| mysql.galera_bytes | rx, tx | KiB/s | • | • | • |
+| mysql.galera_queue | rx, tx | writesets | • | • | • |
+| mysql.galera_conflicts | bf_aborts, cert_fails | transactions | • | • | • |
+| mysql.galera_flow_control | paused | ms | • | • | • |
+| mysql.galera_cluster_status | primary, non_primary, disconnected | status | • | • | • |
+| mysql.galera_cluster_state | undefined, joining, donor, joined, synced, error | state | • | • | • |
+| mysql.galera_cluster_size | nodes | nodes | • | • | • |
+| mysql.galera_cluster_weight | weight | weight | • | • | • |
+| mysql.galera_connected | connected | boolean | • | • | • |
+| mysql.galera_ready | ready | boolean | • | • | • |
+| mysql.galera_open_transactions | open | transactions | • | • | • |
+| mysql.galera_thread_count | threads | threads | • | • | • |
+| mysql.key_blocks | unused, used, not_flushed | blocks | • | • | • |
+| mysql.key_requests | reads, writes | requests/s | • | • | • |
+| mysql.key_disk_ops | reads, writes | operations/s | • | • | • |
+| mysql.binlog_cache | disk, all | transactions/s | • | • | • |
+| mysql.binlog_stmt_cache | disk, all | statements/s | • | • | • |
+
+### Per connection
+
+These metrics refer to the replication connection.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |
+|:------|:----------|:----|:---:|:---:|:---:|
+| mysql.slave_behind | seconds | seconds | • | • | • |
+| mysql.slave_status | sql_running, io_running | boolean | • | • | • |
+
+### Per user
+
+These metrics refer to the MySQL user.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| user | username |
+
+Metrics:
+
+| Metric | Dimensions | Unit | MySQL | MariaDB | Percona |
+|:------|:----------|:----|:---:|:---:|:---:|
+| mysql.userstats_cpu | used | percentage | | • | • |
+| mysql.userstats_rows | read, sent, updated, inserted, deleted | operations/s | | • | • |
+| mysql.userstats_commands | select, update, other | commands/s | | • | • |
+| mysql.userstats_denied_commands | denied | commands/s | | • | • |
+| mysql.userstats_created_transactions | commit, rollback | transactions/s | | • | • |
+| mysql.userstats_binlog_written | written | B/s | | • | • |
+| mysql.userstats_empty_queries | empty | queries/s | | • | • |
+| mysql.userstats_connections | created | connections/s | | • | • |
+| mysql.userstats_lost_connections | lost | connections/s | | • | • |
+| mysql.userstats_denied_connections | denied | connections/s | | • | • |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ mysql_10s_slow_queries ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.queries | number of slow queries in the last 10 seconds |
+| [ mysql_10s_table_locks_immediate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table immediate locks in the last 10 seconds |
+| [ mysql_10s_table_locks_waited ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | number of table waited locks in the last 10 seconds |
+| [ mysql_10s_waited_locks_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.table_locks | ratio of waited table locks over the last 10 seconds |
+| [ mysql_connections ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.connections_active | client connections utilization |
+| [ mysql_replication ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_status | replication status (0: stopped, 1: working) |
+| [ mysql_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.slave_behind | difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master |
+| [ mysql_galera_cluster_size_max_2m ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | maximum galera cluster size in the last 2 minutes starting one minute ago |
+| [ mysql_galera_cluster_size ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_size | current galera cluster size, compared to the maximum size in the last 2 minutes |
+| [ mysql_galera_cluster_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Donor/Desynced or Joined |
+| [ mysql_galera_cluster_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_state | galera node state is either Undefined or Joining or Error |
+| [ mysql_galera_cluster_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf) | mysql.galera_cluster_status | galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations. |
+
+
+## Setup
+
+### Prerequisites
+
+#### Create netdata user
+
+A user account should have the
+following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):
+
+- [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)
+- [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)
+- [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)
+
+To create the `netdata` user with these permissions, execute the following in the MySQL shell:
+
+```mysql
+CREATE USER 'netdata'@'localhost';
+GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';
+FLUSH PRIVILEGES;
+```
+
+The `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only
+be able to gather statistics without being able to alter or affect operations in any way.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/mysql.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/mysql.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| dsn | MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | root@tcp(localhost:3306)/ | yes |
+| my.cnf | Specifies the my.cnf file to read the connection settings from the [client] section. | | no |
+| timeout | Query timeout in seconds. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netdata@tcp(127.0.0.1:3306)/
+
+```
+</details>
+
+##### Unix socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netdata@unix(/var/lib/mysql/mysql.sock)/
+
+```
+</details>
+
+##### Connection with password
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netconfig:password@tcp(127.0.0.1:3306)/
+
+```
+</details>
+
+##### my.cnf
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ my.cnf: '/etc/my.cnf'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: netdata@tcp(127.0.0.1:3306)/
+
+ - name: remote
+ dsn: netconfig:password@tcp(203.0.113.0:3306)/
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `mysql` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m mysql
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `mysql` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep mysql
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep mysql /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep mysql
+```
+
+
diff --git a/src/go/plugin/go.d/modules/mysql/metadata.yaml b/src/go/plugin/go.d/modules/mysql/metadata.yaml
new file mode 100644
index 000000000..6e0d1b6b7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/metadata.yaml
@@ -0,0 +1,802 @@
+plugin_name: go.d.plugin
+modules:
+ - &module
+ meta: &meta
+ id: collector-go.d.plugin-mysql
+ plugin_name: go.d.plugin
+ module_name: mysql
+ monitored_instance:
+ name: MySQL
+ link: https://www.mysql.com/
+ categories:
+ - data-collection.database-servers
+ icon_filename: mysql.svg
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - "db"
+ - "database"
+ - "mysql"
+ - "maria"
+ - "mariadb"
+ - "sql"
+ most_popular: true
+ overview:
+ multi_instance: true
+ data_collection:
+ metrics_description: |
+ This collector monitors the health and performance of MySQL servers and collects general statistics, replication and user metrics.
+ method_description: |
+ It connects to the MySQL instance via a TCP or UNIX socket and executes the following commands:
+
+ Executed queries:
+
+ - `SELECT VERSION();`
+ - `SHOW GLOBAL STATUS;`
+ - `SHOW GLOBAL VARIABLES;`
+ - `SHOW SLAVE STATUS;` or `SHOW ALL SLAVES STATUS;` (MariaDBv10.2+) or `SHOW REPLICA STATUS;` (MySQL 8.0.22+)
+ - `SHOW USER_STATISTICS;` (MariaDBv10.1.1+)
+ - `SELECT TIME,USER FROM INFORMATION_SCHEMA.PROCESSLIST;`
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects instances running on localhost by trying to connect as root and netdata using known MySQL TCP sockets:
+
+ - 127.0.0.1:3306
+ - "[::1]:3306"
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list:
+ - title: Create netdata user
+ description: |
+ A user account should have the
+ following [permissions](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html):
+
+ - [`USAGE`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_usage)
+ - [`REPLICATION CLIENT`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_replication-client)
+ - [`PROCESS`](https://dev.mysql.com/doc/refman/8.0/en/privileges-provided.html#priv_process)
+
+ To create the `netdata` user with these permissions, execute the following in the MySQL shell:
+
+ ```mysql
+ CREATE USER 'netdata'@'localhost';
+ GRANT USAGE, REPLICATION CLIENT, PROCESS ON *.* TO 'netdata'@'localhost';
+ FLUSH PRIVILEGES;
+ ```
+
+ The `netdata` user will have the ability to connect to the MySQL server on localhost without a password. It will only
+ be able to gather statistics without being able to alter or affect operations in any way.
+ configuration:
+ file:
+ name: go.d/mysql.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: dsn
+ description: MySQL server DSN (Data Source Name). See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name).
+ default_value: root@tcp(localhost:3306)/
+ required: true
+ - name: my.cnf
+ description: Specifies the my.cnf file to read the connection settings from the [client] section.
+ default_value: ""
+ required: false
+ - name: timeout
+ description: Query timeout in seconds.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: TCP socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ dsn: netdata@tcp(127.0.0.1:3306)/
+ - name: Unix socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ dsn: netdata@unix(/var/lib/mysql/mysql.sock)/
+ - name: Connection with password
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ dsn: netconfig:password@tcp(127.0.0.1:3306)/
+ - name: my.cnf
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ my.cnf: '/etc/my.cnf'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ dsn: netdata@tcp(127.0.0.1:3306)/
+
+ - name: remote
+ dsn: netconfig:password@tcp(203.0.113.0:3306)/
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: mysql_10s_slow_queries
+ metric: mysql.queries
+ info: number of slow queries in the last 10 seconds
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_10s_table_locks_immediate
+ metric: mysql.table_locks
+ info: number of table immediate locks in the last 10 seconds
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_10s_table_locks_waited
+ metric: mysql.table_locks
+ info: number of table waited locks in the last 10 seconds
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_10s_waited_locks_ratio
+ metric: mysql.table_locks
+ info: ratio of waited table locks over the last 10 seconds
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_connections
+ metric: mysql.connections_active
+ info: client connections utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_replication
+ metric: mysql.slave_status
+ info: "replication status (0: stopped, 1: working)"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_replication_lag
+ metric: mysql.slave_behind
+ info: difference between the timestamp of the latest transaction processed by the SQL thread and the timestamp of the same transaction when it was processed on the master
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_galera_cluster_size_max_2m
+ metric: mysql.galera_cluster_size
+ info: maximum galera cluster size in the last 2 minutes starting one minute ago
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_galera_cluster_size
+ metric: mysql.galera_cluster_size
+ info: current galera cluster size, compared to the maximum size in the last 2 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_galera_cluster_state_warn
+ metric: mysql.galera_cluster_state
+ info: galera node state is either Donor/Desynced or Joined
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_galera_cluster_state_crit
+ metric: mysql.galera_cluster_state
+ info: galera node state is either Undefined or Joining or Error
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ - name: mysql_galera_cluster_status
+ metric: mysql.galera_cluster_status
+ info: galera node is part of a nonoperational component. This occurs in cases of multiple membership changes that result in a loss of Quorum or in cases of split-brain situations.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/mysql.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability:
+ - MySQL
+ - MariaDB
+ - Percona
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: mysql.net
+ description: Bandwidth
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: mysql.queries
+ description: Queries
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: questions
+ - name: slow_queries
+ - name: mysql.queries_type
+ description: Queries By Type
+ unit: queries/s
+ chart_type: stacked
+ dimensions:
+ - name: select
+ - name: delete
+ - name: update
+ - name: insert
+ - name: replace
+ - name: mysql.handlers
+ description: Handlers
+ unit: handlers/s
+ chart_type: line
+ dimensions:
+ - name: commit
+ - name: delete
+ - name: prepare
+ - name: read_first
+ - name: read_key
+ - name: read_next
+ - name: read_prev
+ - name: read_rnd
+ - name: read_rnd_next
+ - name: rollback
+ - name: savepoint
+ - name: savepointrollback
+ - name: update
+ - name: write
+ - name: mysql.table_open_cache_overflows
+ description: Table open cache overflows
+ unit: overflows/s
+ chart_type: line
+ dimensions:
+ - name: open_cache
+ - name: mysql.table_locks
+ description: Table Locks
+ unit: locks/s
+ chart_type: line
+ dimensions:
+ - name: immediate
+ - name: waited
+ - name: mysql.join_issues
+ description: Table Select Join Issues
+ unit: joins/s
+ chart_type: line
+ dimensions:
+ - name: full_join
+ - name: full_range_join
+ - name: range
+ - name: range_check
+ - name: scan
+ - name: mysql.sort_issues
+ description: Table Sort Issues
+ unit: issues/s
+ chart_type: line
+ dimensions:
+ - name: merge_passes
+ - name: range
+ - name: scan
+ - name: mysql.tmp
+ description: Tmp Operations
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: disk_tables
+ - name: files
+ - name: tables
+ - name: mysql.connections
+ description: Connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: all
+ - name: aborted
+ - name: mysql.connections_active
+ description: Active Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: limit
+ - name: max_active
+ - name: mysql.threads
+ description: Threads
+ unit: threads
+ chart_type: line
+ dimensions:
+ - name: connected
+ - name: cached
+ - name: running
+ - name: mysql.threads_created
+ description: Threads Creation Rate
+ unit: threads/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: mysql.thread_cache_misses
+ description: Threads Cache Misses
+ unit: misses
+ chart_type: line
+ dimensions:
+ - name: misses
+ - name: mysql.innodb_io
+ description: InnoDB I/O Bandwidth
+ unit: KiB/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: mysql.innodb_io_ops
+ description: InnoDB I/O Operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: fsyncs
+ - name: mysql.innodb_io_pending_ops
+ description: InnoDB Pending I/O Operations
+ unit: operations
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: fsyncs
+ - name: mysql.innodb_log
+ description: InnoDB Log Operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: waits
+ - name: write_requests
+ - name: writes
+ - name: mysql.innodb_cur_row_lock
+ description: InnoDB Current Row Locks
+ unit: operations
+ chart_type: line
+ dimensions:
+ - name: current waits
+ - name: mysql.innodb_rows
+ description: InnoDB Row Operations
+ unit: operations/s
+ chart_type: area
+ dimensions:
+ - name: inserted
+ - name: read
+ - name: updated
+ - name: deleted
+ - name: mysql.innodb_buffer_pool_pages
+ description: InnoDB Buffer Pool Pages
+ unit: pages
+ chart_type: line
+ dimensions:
+ - name: data
+ - name: dirty
+ - name: free
+ - name: misc
+ - name: total
+ - name: mysql.innodb_buffer_pool_pages_flushed
+ description: InnoDB Buffer Pool Flush Pages Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: flush_pages
+ - name: mysql.innodb_buffer_pool_bytes
+ description: InnoDB Buffer Pool Bytes
+ unit: MiB
+ chart_type: line
+ dimensions:
+ - name: data
+ - name: dirty
+ - name: mysql.innodb_buffer_pool_read_ahead
+ description: InnoDB Buffer Pool Read Pages
+ unit: pages/s
+ chart_type: line
+ dimensions:
+ - name: all
+ - name: evicted
+ - name: mysql.innodb_buffer_pool_read_ahead_rnd
+ description: InnoDB Buffer Pool Random Read-Aheads
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: read-ahead
+ - name: mysql.innodb_buffer_pool_ops
+ description: InnoDB Buffer Pool Operations
+ unit: operations/s
+ chart_type: area
+ dimensions:
+ - name: disk_reads
+ - name: wait_free
+ - name: mysql.innodb_os_log
+ description: InnoDB OS Log Pending Operations
+ unit: operations
+ chart_type: line
+ dimensions:
+ - name: fsyncs
+ - name: writes
+ - name: mysql.innodb_os_log_fsync_writes
+ description: InnoDB OS Log Operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: fsyncs
+ - name: mysql.innodb_os_log_io
+ description: InnoDB OS Log Bandwidth
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: write
+ - name: mysql.innodb_deadlocks
+ description: InnoDB Deadlocks
+ unit: operations/s
+ chart_type: area
+ dimensions:
+ - name: deadlocks
+ - name: mysql.files
+ description: Open Files
+ unit: files
+ chart_type: line
+ dimensions:
+ - name: files
+ - name: mysql.files_rate
+ description: Opened Files Rate
+ unit: files/s
+ chart_type: line
+ dimensions:
+ - name: files
+ - name: mysql.connection_errors
+ description: Connection Errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: accept
+ - name: internal
+ - name: max
+ - name: peer_addr
+ - name: select
+ - name: tcpwrap
+ - name: mysql.opened_tables
+ description: Opened Tables
+ unit: tables/s
+ chart_type: line
+ dimensions:
+ - name: tables
+ - name: mysql.open_tables
+ description: Open Tables
+ unit: tables
+ chart_type: area
+ dimensions:
+ - name: cache
+ - name: tables
+ - name: mysql.process_list_fetch_query_duration
+ description: Process List Fetch Duration
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: duration
+ - name: mysql.process_list_queries_count
+ description: Queries Count
+ unit: queries
+ chart_type: stacked
+ dimensions:
+ - name: system
+ - name: user
+ - name: mysql.process_list_longest_query_duration
+ description: Longest Query Duration
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: duration
+ - name: mysql.qcache_ops
+ description: QCache Operations
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: lowmem_prunes
+ - name: inserts
+ - name: not_cached
+ - name: mysql.qcache
+ description: QCache Queries in Cache
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: mysql.qcache_freemem
+ description: QCache Free Memory
+ unit: MiB
+ chart_type: area
+ dimensions:
+ - name: free
+ - name: mysql.qcache_memblocks
+ description: QCache Memory Blocks
+ unit: blocks
+ chart_type: line
+ dimensions:
+ - name: free
+ - name: total
+ - name: mysql.galera_writesets
+ description: Replicated Writesets
+ unit: writesets/s
+ chart_type: line
+ dimensions:
+ - name: rx
+ - name: tx
+ - name: mysql.galera_bytes
+ description: Replicated Bytes
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: rx
+ - name: tx
+ - name: mysql.galera_queue
+ description: Galera Queue
+ unit: writesets
+ chart_type: line
+ dimensions:
+ - name: rx
+ - name: tx
+ - name: mysql.galera_conflicts
+ description: Replication Conflicts
+ unit: transactions
+ chart_type: area
+ dimensions:
+ - name: bf_aborts
+ - name: cert_fails
+ - name: mysql.galera_flow_control
+ description: Flow Control
+ unit: ms
+ chart_type: area
+ dimensions:
+ - name: paused
+ - name: mysql.galera_cluster_status
+ description: Cluster Component Status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: primary
+ - name: non_primary
+ - name: disconnected
+ - name: mysql.galera_cluster_state
+ description: Cluster Component State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: undefined
+ - name: joining
+ - name: donor
+ - name: joined
+ - name: synced
+ - name: error
+ - name: mysql.galera_cluster_size
+ description: Number of Nodes in the Cluster
+ unit: nodes
+ chart_type: line
+ dimensions:
+ - name: nodes
+ - name: mysql.galera_cluster_weight
+ description: The Total Weight of the Current Members in the Cluster
+ unit: weight
+ chart_type: line
+ dimensions:
+ - name: weight
+ - name: mysql.galera_connected
+ description: Cluster Connection Status
+ unit: boolean
+ chart_type: line
+ dimensions:
+ - name: connected
+ - name: mysql.galera_ready
+ description: Accept Queries Readiness Status
+ unit: boolean
+ chart_type: line
+ dimensions:
+ - name: ready
+ - name: mysql.galera_open_transactions
+ description: Open Transactions
+ unit: transactions
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: mysql.galera_thread_count
+ description: Total Number of WSRep (applier/rollbacker) Threads
+ unit: threads
+ chart_type: line
+ dimensions:
+ - name: threads
+ - name: mysql.key_blocks
+ description: MyISAM Key Cache Blocks
+ unit: blocks
+ chart_type: line
+ dimensions:
+ - name: unused
+ - name: used
+ - name: not_flushed
+ - name: mysql.key_requests
+ description: MyISAM Key Cache Requests
+ unit: requests/s
+ chart_type: area
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: mysql.key_disk_ops
+ description: MyISAM Key Cache Disk Operations
+ unit: operations/s
+ chart_type: area
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: mysql.binlog_cache
+ description: Binlog Cache
+ unit: transactions/s
+ chart_type: line
+ dimensions:
+ - name: disk
+ - name: all
+ - name: mysql.binlog_stmt_cache
+ description: Binlog Statement Cache
+ unit: statements/s
+ chart_type: line
+ dimensions:
+ - name: disk
+ - name: all
+ - name: connection
+ description: These metrics refer to the replication connection.
+ labels: []
+ metrics:
+ - name: mysql.slave_behind
+ description: Slave Behind Seconds
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: seconds
+ - name: mysql.slave_status
+ description: I/O / SQL Thread Running State
+ unit: boolean
+ chart_type: line
+ dimensions:
+ - name: sql_running
+ - name: io_running
+ - name: user
+ description: These metrics refer to the MySQL user.
+ labels:
+ - name: user
+ description: username
+ metrics:
+ - name: mysql.userstats_cpu
+ description: User CPU Time
+ unit: percentage
+ chart_type: line
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: used
+ - name: mysql.userstats_rows
+ description: User Rows Operations
+ unit: operations/s
+ chart_type: stacked
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: read
+ - name: sent
+ - name: updated
+ - name: inserted
+ - name: deleted
+ - name: mysql.userstats_commands
+ description: User Commands
+ unit: commands/s
+ chart_type: stacked
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: select
+ - name: update
+ - name: other
+ - name: mysql.userstats_denied_commands
+ description: User Denied Commands
+ unit: commands/s
+ chart_type: stacked
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: denied
+ - name: mysql.userstats_created_transactions
+ description: User Transactions
+ unit: transactions/s
+ chart_type: area
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: commit
+ - name: rollback
+ - name: mysql.userstats_binlog_written
+ description: User Binlog Written
+ unit: B/s
+ chart_type: line
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: written
+ - name: mysql.userstats_empty_queries
+ description: User Empty Queries
+ unit: queries/s
+ chart_type: line
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: empty
+ - name: mysql.userstats_connections
+ description: User Created Connections
+ unit: connections/s
+ chart_type: line
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: created
+ - name: mysql.userstats_lost_connections
+ description: User Lost Connections
+ unit: connections/s
+ chart_type: line
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: lost
+ - name: mysql.userstats_denied_connections
+ description: User Denied Connections
+ unit: connections/s
+ chart_type: line
+ availability:
+ - MariaDB
+ - Percona
+ dimensions:
+ - name: denied
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-mariadb
+ monitored_instance:
+ name: MariaDB
+ link: https://mariadb.org/
+ icon_filename: mariadb.svg
+ categories:
+ - data-collection.database-servers
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-percona_mysql
+ most_popular: false
+ monitored_instance:
+ name: Percona MySQL
+ link: https://www.percona.com/software/mysql-database/percona-server
+ icon_filename: percona.svg
+ categories:
+ - data-collection.database-servers
diff --git a/src/go/plugin/go.d/modules/mysql/mycnf.go b/src/go/plugin/go.d/modules/mysql/mycnf.go
new file mode 100644
index 000000000..2069af80d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/mycnf.go
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "fmt"
+ "os"
+
+ "gopkg.in/ini.v1"
+)
+
+func dsnFromFile(filename string) (string, error) {
+ f, err := ini.Load(filename)
+ if err != nil {
+ return "", err
+ }
+
+ section, err := f.GetSection("client")
+ if err != nil {
+ return "", err
+ }
+
+ defaultUser := getUser()
+ defaultHost := "localhost"
+ defaultPort := "3306"
+
+ user := section.Key("user").String()
+ password := section.Key("password").String()
+ socket := section.Key("socket").String()
+ host := section.Key("host").String()
+ port := section.Key("port").String()
+ database := section.Key("database").String()
+
+ var dsn string
+
+ if user != "" {
+ dsn = user
+ } else {
+ dsn = defaultUser
+ }
+
+ if password != "" {
+ dsn += ":" + password
+ }
+
+ switch {
+ case socket != "":
+ dsn += fmt.Sprintf("@unix(%s)/", socket)
+ case host != "" && port != "":
+ dsn += fmt.Sprintf("@tcp(%s:%s)/", host, port)
+ case host != "":
+ dsn += fmt.Sprintf("@tcp(%s:%s)/", host, defaultPort)
+ case port != "":
+ dsn += fmt.Sprintf("@tcp(%s:%s)/", defaultHost, port)
+ default:
+ dsn += "@/"
+ }
+
+ if database != "" {
+ dsn += database
+ }
+ return dsn, nil
+}
+
+func getUser() (user string) {
+ if user = os.Getenv("LOGNAME"); user != "" {
+ return user
+ }
+ if user = os.Getenv("USER"); user != "" {
+ return user
+ }
+ if user = os.Getenv("LNAME"); user != "" {
+ return user
+ }
+ if user = os.Getenv("USERNAME"); user != "" {
+ return user
+ }
+ return ""
+}
diff --git a/src/go/plugin/go.d/modules/mysql/mycnf_test.go b/src/go/plugin/go.d/modules/mysql/mycnf_test.go
new file mode 100644
index 000000000..f68680272
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/mycnf_test.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_dsnFromFile(t *testing.T) {
+ user := getUser()
+ tests := map[string]struct {
+ config string
+ expectedDSN string
+ wantErr bool
+ }{
+ "socket": {
+ config: `
+[client]
+socket=/opt/bitnami/mariadb/tmp/mysql.sock
+`,
+ expectedDSN: user + "@unix(/opt/bitnami/mariadb/tmp/mysql.sock)/",
+ },
+ "socket, host, port": {
+ config: `
+[client]
+host=10.0.0.0
+port=3307
+socket=/opt/bitnami/mariadb/tmp/mysql.sock
+`,
+ expectedDSN: user + "@unix(/opt/bitnami/mariadb/tmp/mysql.sock)/",
+ },
+ "host, port": {
+ config: `
+[client]
+host=10.0.0.0
+port=3307
+`,
+ expectedDSN: user + "@tcp(10.0.0.0:3307)/",
+ },
+ "only host": {
+ config: `
+[client]
+host=10.0.0.0
+`,
+ expectedDSN: user + "@tcp(10.0.0.0:3306)/",
+ },
+ "only port": {
+ config: `
+[client]
+port=3307
+`,
+ expectedDSN: user + "@tcp(localhost:3307)/",
+ },
+ "user, password": {
+ config: `
+[client]
+user=user
+password=password
+`,
+ expectedDSN: "user:password@/",
+ },
+ "empty": {
+ config: `
+[client]
+`,
+ expectedDSN: user + "@/",
+ },
+ "no client section": {
+ config: `
+[no_client]
+`,
+ wantErr: true,
+ },
+ }
+ pattern := "netdata-godplugin-mysql-dsnFromFile-*"
+ dir, err := os.MkdirTemp(os.TempDir(), pattern)
+ require.NoError(t, err)
+ defer func() { _ = os.RemoveAll(dir) }()
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ f, err := os.CreateTemp(dir, name)
+ require.NoError(t, err)
+ _ = f.Close()
+ defer func() { _ = os.Remove(f.Name()) }()
+ _ = os.WriteFile(f.Name(), []byte(test.config), 0644)
+
+ if dsn, err := dsnFromFile(f.Name()); test.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, test.expectedDSN, dsn)
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/modules/mysql/mysql.go b/src/go/plugin/go.d/modules/mysql/mysql.go
new file mode 100644
index 000000000..1e11de39e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/mysql.go
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "database/sql"
+ _ "embed"
+ "errors"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/blang/semver/v4"
+ "github.com/go-sql-driver/mysql"
+ _ "github.com/go-sql-driver/mysql"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("mysql", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *MySQL {
+ return &MySQL{
+ Config: Config{
+ DSN: "root@tcp(localhost:3306)/",
+ Timeout: web.Duration(time.Second),
+ },
+
+ charts: baseCharts.Copy(),
+ addInnoDBOSLogOnce: &sync.Once{},
+ addBinlogOnce: &sync.Once{},
+ addMyISAMOnce: &sync.Once{},
+ addInnodbDeadlocksOnce: &sync.Once{},
+ addGaleraOnce: &sync.Once{},
+ addQCacheOnce: &sync.Once{},
+ addTableOpenCacheOverflowsOnce: &sync.Once{},
+ doDisableSessionQueryLog: true,
+ doSlaveStatus: true,
+ doUserStatistics: true,
+ collectedReplConns: make(map[string]bool),
+ collectedUsers: make(map[string]bool),
+
+ recheckGlobalVarsEvery: time.Minute * 10,
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ DSN string `yaml:"dsn" json:"dsn"`
+ MyCNF string `yaml:"my.cnf,omitempty" json:"my.cnf"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type MySQL struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addInnoDBOSLogOnce *sync.Once
+ addBinlogOnce *sync.Once
+ addMyISAMOnce *sync.Once
+ addInnodbDeadlocksOnce *sync.Once
+ addGaleraOnce *sync.Once
+ addQCacheOnce *sync.Once
+ addTableOpenCacheOverflowsOnce *sync.Once
+
+ db *sql.DB
+
+ safeDSN string
+ version *semver.Version
+ isMariaDB bool
+ isPercona bool
+
+ doDisableSessionQueryLog bool
+
+ doSlaveStatus bool
+ collectedReplConns map[string]bool
+ doUserStatistics bool
+ collectedUsers map[string]bool
+
+ recheckGlobalVarsTime time.Time
+ recheckGlobalVarsEvery time.Duration
+ varMaxConns int64
+ varTableOpenCache int64
+ varDisabledStorageEngine string
+ varLogBin string
+ varPerformanceSchema string
+}
+
+func (m *MySQL) Configuration() any {
+ return m.Config
+}
+
+func (m *MySQL) Init() error {
+ if m.MyCNF != "" {
+ dsn, err := dsnFromFile(m.MyCNF)
+ if err != nil {
+ m.Error(err)
+ return err
+ }
+ m.DSN = dsn
+ }
+
+ if m.DSN == "" {
+ m.Error("dsn not set")
+ return errors.New("dsn not set")
+ }
+
+ cfg, err := mysql.ParseDSN(m.DSN)
+ if err != nil {
+ m.Errorf("error on parsing DSN: %v", err)
+ return err
+ }
+
+ cfg.Passwd = strings.Repeat("*", len(cfg.Passwd))
+ m.safeDSN = cfg.FormatDSN()
+
+ m.Debugf("using DSN [%s]", m.DSN)
+
+ return nil
+}
+
+func (m *MySQL) Check() error {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (m *MySQL) Charts() *module.Charts {
+ return m.charts
+}
+
+func (m *MySQL) Collect() map[string]int64 {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (m *MySQL) Cleanup() {
+ if m.db == nil {
+ return
+ }
+ if err := m.db.Close(); err != nil {
+ m.Errorf("cleanup: error on closing the mysql database [%s]: %v", m.safeDSN, err)
+ }
+ m.db = nil
+}
diff --git a/src/go/plugin/go.d/modules/mysql/mysql_test.go b/src/go/plugin/go.d/modules/mysql/mysql_test.go
new file mode 100644
index 000000000..300f8dabe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/mysql_test.go
@@ -0,0 +1,1759 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package mysql
+
+import (
+ "bufio"
+ "bytes"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/blang/semver/v4"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataSessionVariables, _ = os.ReadFile("testdata/session_variables.txt")
+
+ dataMySQLVer8030Version, _ = os.ReadFile("testdata/mysql/v8.0.30/version.txt")
+ dataMySQLVer8030GlobalStatus, _ = os.ReadFile("testdata/mysql/v8.0.30/global_status.txt")
+ dataMySQLVer8030GlobalVariables, _ = os.ReadFile("testdata/mysql/v8.0.30/global_variables.txt")
+ dataMySQLVer8030ReplicaStatusMultiSource, _ = os.ReadFile("testdata/mysql/v8.0.30/replica_status_multi_source.txt")
+ dataMySQLVer8030ProcessList, _ = os.ReadFile("testdata/mysql/v8.0.30/process_list.txt")
+
+ dataPerconaVer8029Version, _ = os.ReadFile("testdata/percona/v8.0.29/version.txt")
+ dataPerconaVer8029GlobalStatus, _ = os.ReadFile("testdata/percona/v8.0.29/global_status.txt")
+ dataPerconaVer8029GlobalVariables, _ = os.ReadFile("testdata/percona/v8.0.29/global_variables.txt")
+ dataPerconaVer8029UserStatistics, _ = os.ReadFile("testdata/percona/v8.0.29/user_statistics.txt")
+ dataPerconaV8029ProcessList, _ = os.ReadFile("testdata/percona/v8.0.29/process_list.txt")
+
+ dataMariaVer5564Version, _ = os.ReadFile("testdata/mariadb/v5.5.64/version.txt")
+ dataMariaVer5564GlobalStatus, _ = os.ReadFile("testdata/mariadb/v5.5.64/global_status.txt")
+ dataMariaVer5564GlobalVariables, _ = os.ReadFile("testdata/mariadb/v5.5.64/global_variables.txt")
+ dataMariaVer5564ProcessList, _ = os.ReadFile("testdata/mariadb/v5.5.64/process_list.txt")
+
+ dataMariaVer1084Version, _ = os.ReadFile("testdata/mariadb/v10.8.4/version.txt")
+ dataMariaVer1084GlobalStatus, _ = os.ReadFile("testdata/mariadb/v10.8.4/global_status.txt")
+ dataMariaVer1084GlobalVariables, _ = os.ReadFile("testdata/mariadb/v10.8.4/global_variables.txt")
+ dataMariaVer1084AllSlavesStatusSingleSource, _ = os.ReadFile("testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt")
+ dataMariaVer1084AllSlavesStatusMultiSource, _ = os.ReadFile("testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt")
+ dataMariaVer1084UserStatistics, _ = os.ReadFile("testdata/mariadb/v10.8.4/user_statistics.txt")
+ dataMariaVer1084ProcessList, _ = os.ReadFile("testdata/mariadb/v10.8.4/process_list.txt")
+
+ dataMariaGaleraClusterVer1084Version, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/version.txt")
+ dataMariaGaleraClusterVer1084GlobalStatus, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/global_status.txt")
+ dataMariaGaleraClusterVer1084GlobalVariables, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt")
+ dataMariaGaleraClusterVer1084UserStatistics, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt")
+ dataMariaGaleraClusterVer1084ProcessList, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/process_list.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataSessionVariables": dataSessionVariables,
+ "dataMySQLVer8030Version": dataMySQLVer8030Version,
+ "dataMySQLVer8030GlobalStatus": dataMySQLVer8030GlobalStatus,
+ "dataMySQLVer8030GlobalVariables": dataMySQLVer8030GlobalVariables,
+ "dataMySQLVer8030ReplicaStatusMultiSource": dataMySQLVer8030ReplicaStatusMultiSource,
+ "dataMySQLVer8030ProcessList": dataMySQLVer8030ProcessList,
+ "dataPerconaVer8029Version": dataPerconaVer8029Version,
+ "dataPerconaVer8029GlobalStatus": dataPerconaVer8029GlobalStatus,
+ "dataPerconaVer8029GlobalVariables": dataPerconaVer8029GlobalVariables,
+ "dataPerconaVer8029UserStatistics": dataPerconaVer8029UserStatistics,
+ "dataPerconaV8029ProcessList": dataPerconaV8029ProcessList,
+ "dataMariaVer5564Version": dataMariaVer5564Version,
+ "dataMariaVer5564GlobalStatus": dataMariaVer5564GlobalStatus,
+ "dataMariaVer5564GlobalVariables": dataMariaVer5564GlobalVariables,
+ "dataMariaVer5564ProcessList": dataMariaVer5564ProcessList,
+ "dataMariaVer1084Version": dataMariaVer1084Version,
+ "dataMariaVer1084GlobalStatus": dataMariaVer1084GlobalStatus,
+ "dataMariaVer1084GlobalVariables": dataMariaVer1084GlobalVariables,
+ "dataMariaVer1084AllSlavesStatusSingleSource": dataMariaVer1084AllSlavesStatusSingleSource,
+ "dataMariaVer1084AllSlavesStatusMultiSource": dataMariaVer1084AllSlavesStatusMultiSource,
+ "dataMariaVer1084UserStatistics": dataMariaVer1084UserStatistics,
+ "dataMariaVer1084ProcessList": dataMariaVer1084ProcessList,
+ "dataMariaGaleraClusterVer1084Version": dataMariaGaleraClusterVer1084Version,
+ "dataMariaGaleraClusterVer1084GlobalStatus": dataMariaGaleraClusterVer1084GlobalStatus,
+ "dataMariaGaleraClusterVer1084GlobalVariables": dataMariaGaleraClusterVer1084GlobalVariables,
+ "dataMariaGaleraClusterVer1084UserStatistics": dataMariaGaleraClusterVer1084UserStatistics,
+ "dataMariaGaleraClusterVer1084ProcessList": dataMariaGaleraClusterVer1084ProcessList,
+ } {
+ require.NotNil(t, data, fmt.Sprintf("read data: %s", name))
+ _, err := prepareMockRows(data)
+ require.NoError(t, err, fmt.Sprintf("prepare mock rows: %s", name))
+ }
+}
+
+func TestMySQL_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &MySQL{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestMySQL_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "empty DSN": {
+ config: Config{DSN: ""},
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mySQL := New()
+ mySQL.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, mySQL.Init())
+ } else {
+ assert.NoError(t, mySQL.Init())
+ }
+ })
+ }
+}
+
+func TestMySQL_Cleanup(t *testing.T) {
+ tests := map[string]func(t *testing.T) (mySQL *MySQL, cleanup func()){
+ "db connection not initialized": func(t *testing.T) (mySQL *MySQL, cleanup func()) {
+ return New(), func() {}
+ },
+ "db connection initialized": func(t *testing.T) (mySQL *MySQL, cleanup func()) {
+ db, mock, err := sqlmock.New()
+ require.NoError(t, err)
+
+ mock.ExpectClose()
+ mySQL = New()
+ mySQL.db = db
+ cleanup = func() { _ = db.Close() }
+
+ return mySQL, cleanup
+ },
+ }
+
+ for name, prepare := range tests {
+ t.Run(name, func(t *testing.T) {
+ mySQL, cleanup := prepare(t)
+ defer cleanup()
+
+ assert.NotPanics(t, mySQL.Cleanup)
+ assert.Nil(t, mySQL.db)
+ })
+ }
+}
+
+func TestMySQL_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestMySQL_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func(t *testing.T, m sqlmock.Sqlmock)
+ wantFail bool
+ }{
+ "success on all queries": {
+ wantFail: false,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusMultiSource)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
+ },
+ },
+ "fails when error on querying version": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpectErr(m, queryShowVersion)
+ },
+ },
+ "fails when error on querying global status": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpectErr(m, queryShowGlobalStatus)
+ },
+ },
+ "fails when error on querying global variables": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpectErr(m, queryShowGlobalStatus)
+ },
+ },
+ "success when error on querying slave status": {
+ wantFail: false,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpectErr(m, queryShowAllSlavesStatus)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
+ },
+ },
+ "success when error on querying user statistics": {
+ wantFail: false,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusMultiSource)
+ mockExpectErr(m, queryShowUserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
+ },
+ },
+ "success when error on querying process list": {
+ wantFail: false,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusMultiSource)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpectErr(m, queryShowProcessList)
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ my := New()
+ my.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, my.Init())
+
+ test.prepareMock(t, mock)
+
+ if test.wantFail {
+ assert.Error(t, my.Check())
+ } else {
+ assert.NoError(t, my.Check())
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func TestMySQL_Collect(t *testing.T) {
+ type testCaseStep struct {
+ prepareMock func(t *testing.T, m sqlmock.Sqlmock)
+ check func(t *testing.T, my *MySQL)
+ }
+ tests := map[string][]testCaseStep{
+ "MariaDB-Standalone[v5.5.46]: success on all queries": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer5564Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer5564GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer5564GlobalVariables)
+ mockExpect(t, m, queryShowSlaveStatus, nil)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer5564ProcessList)
+ },
+ check: func(t *testing.T, my *MySQL) {
+ mx := my.Collect()
+
+ expected := map[string]int64{
+ "aborted_connects": 0,
+ "binlog_cache_disk_use": 0,
+ "binlog_cache_use": 0,
+ "binlog_stmt_cache_disk_use": 0,
+ "binlog_stmt_cache_use": 0,
+ "bytes_received": 639,
+ "bytes_sent": 41620,
+ "com_delete": 0,
+ "com_insert": 0,
+ "com_replace": 0,
+ "com_select": 4,
+ "com_update": 0,
+ "connections": 4,
+ "created_tmp_disk_tables": 0,
+ "created_tmp_files": 6,
+ "created_tmp_tables": 5,
+ "handler_commit": 0,
+ "handler_delete": 0,
+ "handler_prepare": 0,
+ "handler_read_first": 0,
+ "handler_read_key": 0,
+ "handler_read_next": 0,
+ "handler_read_prev": 0,
+ "handler_read_rnd": 0,
+ "handler_read_rnd_next": 1264,
+ "handler_rollback": 0,
+ "handler_savepoint": 0,
+ "handler_savepoint_rollback": 0,
+ "handler_update": 0,
+ "handler_write": 0,
+ "innodb_buffer_pool_bytes_data": 2342912,
+ "innodb_buffer_pool_bytes_dirty": 0,
+ "innodb_buffer_pool_pages_data": 143,
+ "innodb_buffer_pool_pages_dirty": 0,
+ "innodb_buffer_pool_pages_flushed": 0,
+ "innodb_buffer_pool_pages_free": 16240,
+ "innodb_buffer_pool_pages_misc": 0,
+ "innodb_buffer_pool_pages_total": 16383,
+ "innodb_buffer_pool_read_ahead": 0,
+ "innodb_buffer_pool_read_ahead_evicted": 0,
+ "innodb_buffer_pool_read_ahead_rnd": 0,
+ "innodb_buffer_pool_read_requests": 459,
+ "innodb_buffer_pool_reads": 144,
+ "innodb_buffer_pool_wait_free": 0,
+ "innodb_buffer_pool_write_requests": 0,
+ "innodb_data_fsyncs": 3,
+ "innodb_data_pending_fsyncs": 0,
+ "innodb_data_pending_reads": 0,
+ "innodb_data_pending_writes": 0,
+ "innodb_data_read": 4542976,
+ "innodb_data_reads": 155,
+ "innodb_data_writes": 3,
+ "innodb_data_written": 1536,
+ "innodb_deadlocks": 0,
+ "innodb_log_waits": 0,
+ "innodb_log_write_requests": 0,
+ "innodb_log_writes": 1,
+ "innodb_os_log_fsyncs": 3,
+ "innodb_os_log_pending_fsyncs": 0,
+ "innodb_os_log_pending_writes": 0,
+ "innodb_os_log_written": 512,
+ "innodb_row_lock_current_waits": 0,
+ "innodb_rows_deleted": 0,
+ "innodb_rows_inserted": 0,
+ "innodb_rows_read": 0,
+ "innodb_rows_updated": 0,
+ "key_blocks_not_flushed": 0,
+ "key_blocks_unused": 107171,
+ "key_blocks_used": 0,
+ "key_read_requests": 0,
+ "key_reads": 0,
+ "key_write_requests": 0,
+ "key_writes": 0,
+ "max_connections": 100,
+ "max_used_connections": 1,
+ "open_files": 21,
+ "open_tables": 26,
+ "opened_files": 84,
+ "opened_tables": 0,
+ "process_list_fetch_query_duration": 0,
+ "process_list_longest_query_duration": 9,
+ "process_list_queries_count_system": 0,
+ "process_list_queries_count_user": 2,
+ "qcache_free_blocks": 1,
+ "qcache_free_memory": 67091120,
+ "qcache_hits": 0,
+ "qcache_inserts": 0,
+ "qcache_lowmem_prunes": 0,
+ "qcache_not_cached": 4,
+ "qcache_queries_in_cache": 0,
+ "qcache_total_blocks": 1,
+ "queries": 12,
+ "questions": 11,
+ "select_full_join": 0,
+ "select_full_range_join": 0,
+ "select_range": 0,
+ "select_range_check": 0,
+ "select_scan": 5,
+ "slow_queries": 0,
+ "sort_merge_passes": 0,
+ "sort_range": 0,
+ "sort_scan": 0,
+ "table_locks_immediate": 36,
+ "table_locks_waited": 0,
+ "table_open_cache": 400,
+ "thread_cache_misses": 2500,
+ "threads_cached": 0,
+ "threads_connected": 1,
+ "threads_created": 1,
+ "threads_running": 1,
+ }
+
+ copyProcessListQueryDuration(mx, expected)
+ require.Equal(t, expected, mx)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx)
+ },
+ },
+ },
+ "MariaDB-Standalone[v10.8.4]: success on all queries": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, nil)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
+ },
+ check: func(t *testing.T, my *MySQL) {
+ mx := my.Collect()
+
+ expected := map[string]int64{
+
+ "aborted_connects": 2,
+ "binlog_cache_disk_use": 0,
+ "binlog_cache_use": 0,
+ "binlog_stmt_cache_disk_use": 0,
+ "binlog_stmt_cache_use": 0,
+ "bytes_received": 81392,
+ "bytes_sent": 56794,
+ "com_delete": 0,
+ "com_insert": 0,
+ "com_replace": 0,
+ "com_select": 6,
+ "com_update": 0,
+ "connection_errors_accept": 0,
+ "connection_errors_internal": 0,
+ "connection_errors_max_connections": 0,
+ "connection_errors_peer_address": 0,
+ "connection_errors_select": 0,
+ "connection_errors_tcpwrap": 0,
+ "connections": 12,
+ "created_tmp_disk_tables": 0,
+ "created_tmp_files": 5,
+ "created_tmp_tables": 2,
+ "handler_commit": 30,
+ "handler_delete": 0,
+ "handler_prepare": 0,
+ "handler_read_first": 7,
+ "handler_read_key": 7,
+ "handler_read_next": 3,
+ "handler_read_prev": 0,
+ "handler_read_rnd": 0,
+ "handler_read_rnd_next": 626,
+ "handler_rollback": 0,
+ "handler_savepoint": 0,
+ "handler_savepoint_rollback": 0,
+ "handler_update": 3,
+ "handler_write": 13,
+ "innodb_buffer_pool_bytes_data": 5062656,
+ "innodb_buffer_pool_bytes_dirty": 475136,
+ "innodb_buffer_pool_pages_data": 309,
+ "innodb_buffer_pool_pages_dirty": 29,
+ "innodb_buffer_pool_pages_flushed": 0,
+ "innodb_buffer_pool_pages_free": 7755,
+ "innodb_buffer_pool_pages_misc": 0,
+ "innodb_buffer_pool_pages_total": 8064,
+ "innodb_buffer_pool_read_ahead": 0,
+ "innodb_buffer_pool_read_ahead_evicted": 0,
+ "innodb_buffer_pool_read_ahead_rnd": 0,
+ "innodb_buffer_pool_read_requests": 1911,
+ "innodb_buffer_pool_reads": 171,
+ "innodb_buffer_pool_wait_free": 0,
+ "innodb_buffer_pool_write_requests": 148,
+ "innodb_data_fsyncs": 17,
+ "innodb_data_pending_fsyncs": 0,
+ "innodb_data_pending_reads": 0,
+ "innodb_data_pending_writes": 0,
+ "innodb_data_read": 2801664,
+ "innodb_data_reads": 185,
+ "innodb_data_writes": 16,
+ "innodb_data_written": 0,
+ "innodb_deadlocks": 0,
+ "innodb_log_waits": 0,
+ "innodb_log_write_requests": 109,
+ "innodb_log_writes": 15,
+ "innodb_os_log_written": 6097,
+ "innodb_row_lock_current_waits": 0,
+ "innodb_rows_deleted": 0,
+ "innodb_rows_inserted": 0,
+ "innodb_rows_read": 0,
+ "innodb_rows_updated": 0,
+ "key_blocks_not_flushed": 0,
+ "key_blocks_unused": 107163,
+ "key_blocks_used": 0,
+ "key_read_requests": 0,
+ "key_reads": 0,
+ "key_write_requests": 0,
+ "key_writes": 0,
+ "max_connections": 151,
+ "max_used_connections": 1,
+ "open_files": 29,
+ "open_tables": 10,
+ "opened_files": 100,
+ "opened_tables": 16,
+ "process_list_fetch_query_duration": 0,
+ "process_list_longest_query_duration": 9,
+ "process_list_queries_count_system": 0,
+ "process_list_queries_count_user": 2,
+ "qcache_free_blocks": 1,
+ "qcache_free_memory": 1031272,
+ "qcache_hits": 0,
+ "qcache_inserts": 0,
+ "qcache_lowmem_prunes": 0,
+ "qcache_not_cached": 0,
+ "qcache_queries_in_cache": 0,
+ "qcache_total_blocks": 1,
+ "queries": 33,
+ "questions": 24,
+ "select_full_join": 0,
+ "select_full_range_join": 0,
+ "select_range": 0,
+ "select_range_check": 0,
+ "select_scan": 2,
+ "slow_queries": 0,
+ "sort_merge_passes": 0,
+ "sort_range": 0,
+ "sort_scan": 0,
+ "table_locks_immediate": 60,
+ "table_locks_waited": 0,
+ "table_open_cache": 2000,
+ "table_open_cache_overflows": 0,
+ "thread_cache_misses": 1666,
+ "threads_cached": 0,
+ "threads_connected": 1,
+ "threads_created": 2,
+ "threads_running": 3,
+ "userstats_netdata_access_denied": 33,
+ "userstats_netdata_binlog_bytes_written": 0,
+ "userstats_netdata_commit_transactions": 0,
+ "userstats_netdata_cpu_time": 77,
+ "userstats_netdata_denied_connections": 49698,
+ "userstats_netdata_empty_queries": 66,
+ "userstats_netdata_lost_connections": 0,
+ "userstats_netdata_other_commands": 0,
+ "userstats_netdata_rollback_transactions": 0,
+ "userstats_netdata_rows_deleted": 0,
+ "userstats_netdata_rows_inserted": 0,
+ "userstats_netdata_rows_read": 0,
+ "userstats_netdata_rows_sent": 99,
+ "userstats_netdata_rows_updated": 0,
+ "userstats_netdata_select_commands": 33,
+ "userstats_netdata_total_connections": 1,
+ "userstats_netdata_update_commands": 0,
+ "userstats_root_access_denied": 0,
+ "userstats_root_binlog_bytes_written": 0,
+ "userstats_root_commit_transactions": 0,
+ "userstats_root_cpu_time": 0,
+ "userstats_root_denied_connections": 0,
+ "userstats_root_empty_queries": 0,
+ "userstats_root_lost_connections": 0,
+ "userstats_root_other_commands": 0,
+ "userstats_root_rollback_transactions": 0,
+ "userstats_root_rows_deleted": 0,
+ "userstats_root_rows_inserted": 0,
+ "userstats_root_rows_read": 0,
+ "userstats_root_rows_sent": 2,
+ "userstats_root_rows_updated": 0,
+ "userstats_root_select_commands": 0,
+ "userstats_root_total_connections": 1,
+ "userstats_root_update_commands": 0,
+ "wsrep_cluster_size": 0,
+ "wsrep_cluster_status_disconnected": 1,
+ "wsrep_cluster_status_non_primary": 0,
+ "wsrep_cluster_status_primary": 0,
+ "wsrep_connected": 0,
+ "wsrep_local_bf_aborts": 0,
+ "wsrep_ready": 0,
+ "wsrep_thread_count": 0,
+ }
+
+ copyProcessListQueryDuration(mx, expected)
+ require.Equal(t, expected, mx)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx)
+ },
+ },
+ },
+ "MariaDB-SingleSourceReplication[v10.8.4]: success on all queries": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusSingleSource)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
+ },
+ check: func(t *testing.T, my *MySQL) {
+ mx := my.Collect()
+
+ expected := map[string]int64{
+ "aborted_connects": 2,
+ "binlog_cache_disk_use": 0,
+ "binlog_cache_use": 0,
+ "binlog_stmt_cache_disk_use": 0,
+ "binlog_stmt_cache_use": 0,
+ "bytes_received": 81392,
+ "bytes_sent": 56794,
+ "com_delete": 0,
+ "com_insert": 0,
+ "com_replace": 0,
+ "com_select": 6,
+ "com_update": 0,
+ "connection_errors_accept": 0,
+ "connection_errors_internal": 0,
+ "connection_errors_max_connections": 0,
+ "connection_errors_peer_address": 0,
+ "connection_errors_select": 0,
+ "connection_errors_tcpwrap": 0,
+ "connections": 12,
+ "created_tmp_disk_tables": 0,
+ "created_tmp_files": 5,
+ "created_tmp_tables": 2,
+ "handler_commit": 30,
+ "handler_delete": 0,
+ "handler_prepare": 0,
+ "handler_read_first": 7,
+ "handler_read_key": 7,
+ "handler_read_next": 3,
+ "handler_read_prev": 0,
+ "handler_read_rnd": 0,
+ "handler_read_rnd_next": 626,
+ "handler_rollback": 0,
+ "handler_savepoint": 0,
+ "handler_savepoint_rollback": 0,
+ "handler_update": 3,
+ "handler_write": 13,
+ "innodb_buffer_pool_bytes_data": 5062656,
+ "innodb_buffer_pool_bytes_dirty": 475136,
+ "innodb_buffer_pool_pages_data": 309,
+ "innodb_buffer_pool_pages_dirty": 29,
+ "innodb_buffer_pool_pages_flushed": 0,
+ "innodb_buffer_pool_pages_free": 7755,
+ "innodb_buffer_pool_pages_misc": 0,
+ "innodb_buffer_pool_pages_total": 8064,
+ "innodb_buffer_pool_read_ahead": 0,
+ "innodb_buffer_pool_read_ahead_evicted": 0,
+ "innodb_buffer_pool_read_ahead_rnd": 0,
+ "innodb_buffer_pool_read_requests": 1911,
+ "innodb_buffer_pool_reads": 171,
+ "innodb_buffer_pool_wait_free": 0,
+ "innodb_buffer_pool_write_requests": 148,
+ "innodb_data_fsyncs": 17,
+ "innodb_data_pending_fsyncs": 0,
+ "innodb_data_pending_reads": 0,
+ "innodb_data_pending_writes": 0,
+ "innodb_data_read": 2801664,
+ "innodb_data_reads": 185,
+ "innodb_data_writes": 16,
+ "innodb_data_written": 0,
+ "innodb_deadlocks": 0,
+ "innodb_log_waits": 0,
+ "innodb_log_write_requests": 109,
+ "innodb_log_writes": 15,
+ "innodb_os_log_written": 6097,
+ "innodb_row_lock_current_waits": 0,
+ "innodb_rows_deleted": 0,
+ "innodb_rows_inserted": 0,
+ "innodb_rows_read": 0,
+ "innodb_rows_updated": 0,
+ "key_blocks_not_flushed": 0,
+ "key_blocks_unused": 107163,
+ "key_blocks_used": 0,
+ "key_read_requests": 0,
+ "key_reads": 0,
+ "key_write_requests": 0,
+ "key_writes": 0,
+ "max_connections": 151,
+ "max_used_connections": 1,
+ "open_files": 29,
+ "open_tables": 10,
+ "opened_files": 100,
+ "opened_tables": 16,
+ "process_list_fetch_query_duration": 0,
+ "process_list_longest_query_duration": 9,
+ "process_list_queries_count_system": 0,
+ "process_list_queries_count_user": 2,
+ "qcache_free_blocks": 1,
+ "qcache_free_memory": 1031272,
+ "qcache_hits": 0,
+ "qcache_inserts": 0,
+ "qcache_lowmem_prunes": 0,
+ "qcache_not_cached": 0,
+ "qcache_queries_in_cache": 0,
+ "qcache_total_blocks": 1,
+ "queries": 33,
+ "questions": 24,
+ "seconds_behind_master": 0,
+ "select_full_join": 0,
+ "select_full_range_join": 0,
+ "select_range": 0,
+ "select_range_check": 0,
+ "select_scan": 2,
+ "slave_io_running": 1,
+ "slave_sql_running": 1,
+ "slow_queries": 0,
+ "sort_merge_passes": 0,
+ "sort_range": 0,
+ "sort_scan": 0,
+ "table_locks_immediate": 60,
+ "table_locks_waited": 0,
+ "table_open_cache": 2000,
+ "table_open_cache_overflows": 0,
+ "thread_cache_misses": 1666,
+ "threads_cached": 0,
+ "threads_connected": 1,
+ "threads_created": 2,
+ "threads_running": 3,
+ "userstats_netdata_access_denied": 33,
+ "userstats_netdata_binlog_bytes_written": 0,
+ "userstats_netdata_commit_transactions": 0,
+ "userstats_netdata_cpu_time": 77,
+ "userstats_netdata_denied_connections": 49698,
+ "userstats_netdata_empty_queries": 66,
+ "userstats_netdata_lost_connections": 0,
+ "userstats_netdata_other_commands": 0,
+ "userstats_netdata_rollback_transactions": 0,
+ "userstats_netdata_rows_deleted": 0,
+ "userstats_netdata_rows_inserted": 0,
+ "userstats_netdata_rows_read": 0,
+ "userstats_netdata_rows_sent": 99,
+ "userstats_netdata_rows_updated": 0,
+ "userstats_netdata_select_commands": 33,
+ "userstats_netdata_total_connections": 1,
+ "userstats_netdata_update_commands": 0,
+ "userstats_root_access_denied": 0,
+ "userstats_root_binlog_bytes_written": 0,
+ "userstats_root_commit_transactions": 0,
+ "userstats_root_cpu_time": 0,
+ "userstats_root_denied_connections": 0,
+ "userstats_root_empty_queries": 0,
+ "userstats_root_lost_connections": 0,
+ "userstats_root_other_commands": 0,
+ "userstats_root_rollback_transactions": 0,
+ "userstats_root_rows_deleted": 0,
+ "userstats_root_rows_inserted": 0,
+ "userstats_root_rows_read": 0,
+ "userstats_root_rows_sent": 2,
+ "userstats_root_rows_updated": 0,
+ "userstats_root_select_commands": 0,
+ "userstats_root_total_connections": 1,
+ "userstats_root_update_commands": 0,
+ "wsrep_cluster_size": 0,
+ "wsrep_cluster_status_disconnected": 1,
+ "wsrep_cluster_status_non_primary": 0,
+ "wsrep_cluster_status_primary": 0,
+ "wsrep_connected": 0,
+ "wsrep_local_bf_aborts": 0,
+ "wsrep_ready": 0,
+ "wsrep_thread_count": 0,
+ }
+
+ copyProcessListQueryDuration(mx, expected)
+ require.Equal(t, expected, mx)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx)
+ },
+ },
+ },
+ "MariaDB-MultiSourceReplication[v10.8.4]: success on all queries": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusMultiSource)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
+ },
+ check: func(t *testing.T, my *MySQL) {
+ mx := my.Collect()
+
+ expected := map[string]int64{
+ "aborted_connects": 2,
+ "binlog_cache_disk_use": 0,
+ "binlog_cache_use": 0,
+ "binlog_stmt_cache_disk_use": 0,
+ "binlog_stmt_cache_use": 0,
+ "bytes_received": 81392,
+ "bytes_sent": 56794,
+ "com_delete": 0,
+ "com_insert": 0,
+ "com_replace": 0,
+ "com_select": 6,
+ "com_update": 0,
+ "connection_errors_accept": 0,
+ "connection_errors_internal": 0,
+ "connection_errors_max_connections": 0,
+ "connection_errors_peer_address": 0,
+ "connection_errors_select": 0,
+ "connection_errors_tcpwrap": 0,
+ "connections": 12,
+ "created_tmp_disk_tables": 0,
+ "created_tmp_files": 5,
+ "created_tmp_tables": 2,
+ "handler_commit": 30,
+ "handler_delete": 0,
+ "handler_prepare": 0,
+ "handler_read_first": 7,
+ "handler_read_key": 7,
+ "handler_read_next": 3,
+ "handler_read_prev": 0,
+ "handler_read_rnd": 0,
+ "handler_read_rnd_next": 626,
+ "handler_rollback": 0,
+ "handler_savepoint": 0,
+ "handler_savepoint_rollback": 0,
+ "handler_update": 3,
+ "handler_write": 13,
+ "innodb_buffer_pool_bytes_data": 5062656,
+ "innodb_buffer_pool_bytes_dirty": 475136,
+ "innodb_buffer_pool_pages_data": 309,
+ "innodb_buffer_pool_pages_dirty": 29,
+ "innodb_buffer_pool_pages_flushed": 0,
+ "innodb_buffer_pool_pages_free": 7755,
+ "innodb_buffer_pool_pages_misc": 0,
+ "innodb_buffer_pool_pages_total": 8064,
+ "innodb_buffer_pool_read_ahead": 0,
+ "innodb_buffer_pool_read_ahead_evicted": 0,
+ "innodb_buffer_pool_read_ahead_rnd": 0,
+ "innodb_buffer_pool_read_requests": 1911,
+ "innodb_buffer_pool_reads": 171,
+ "innodb_buffer_pool_wait_free": 0,
+ "innodb_buffer_pool_write_requests": 148,
+ "innodb_data_fsyncs": 17,
+ "innodb_data_pending_fsyncs": 0,
+ "innodb_data_pending_reads": 0,
+ "innodb_data_pending_writes": 0,
+ "innodb_data_read": 2801664,
+ "innodb_data_reads": 185,
+ "innodb_data_writes": 16,
+ "innodb_data_written": 0,
+ "innodb_deadlocks": 0,
+ "innodb_log_waits": 0,
+ "innodb_log_write_requests": 109,
+ "innodb_log_writes": 15,
+ "innodb_os_log_written": 6097,
+ "innodb_row_lock_current_waits": 0,
+ "innodb_rows_deleted": 0,
+ "innodb_rows_inserted": 0,
+ "innodb_rows_read": 0,
+ "innodb_rows_updated": 0,
+ "key_blocks_not_flushed": 0,
+ "key_blocks_unused": 107163,
+ "key_blocks_used": 0,
+ "key_read_requests": 0,
+ "key_reads": 0,
+ "key_write_requests": 0,
+ "key_writes": 0,
+ "max_connections": 151,
+ "max_used_connections": 1,
+ "open_files": 29,
+ "open_tables": 10,
+ "opened_files": 100,
+ "opened_tables": 16,
+ "process_list_fetch_query_duration": 0,
+ "process_list_longest_query_duration": 9,
+ "process_list_queries_count_system": 0,
+ "process_list_queries_count_user": 2,
+ "qcache_free_blocks": 1,
+ "qcache_free_memory": 1031272,
+ "qcache_hits": 0,
+ "qcache_inserts": 0,
+ "qcache_lowmem_prunes": 0,
+ "qcache_not_cached": 0,
+ "qcache_queries_in_cache": 0,
+ "qcache_total_blocks": 1,
+ "queries": 33,
+ "questions": 24,
+ "seconds_behind_master_master1": 0,
+ "seconds_behind_master_master2": 0,
+ "select_full_join": 0,
+ "select_full_range_join": 0,
+ "select_range": 0,
+ "select_range_check": 0,
+ "select_scan": 2,
+ "slave_io_running_master1": 1,
+ "slave_io_running_master2": 1,
+ "slave_sql_running_master1": 1,
+ "slave_sql_running_master2": 1,
+ "slow_queries": 0,
+ "sort_merge_passes": 0,
+ "sort_range": 0,
+ "sort_scan": 0,
+ "table_locks_immediate": 60,
+ "table_locks_waited": 0,
+ "table_open_cache": 2000,
+ "table_open_cache_overflows": 0,
+ "thread_cache_misses": 1666,
+ "threads_cached": 0,
+ "threads_connected": 1,
+ "threads_created": 2,
+ "threads_running": 3,
+ "userstats_netdata_access_denied": 33,
+ "userstats_netdata_binlog_bytes_written": 0,
+ "userstats_netdata_commit_transactions": 0,
+ "userstats_netdata_cpu_time": 77,
+ "userstats_netdata_denied_connections": 49698,
+ "userstats_netdata_empty_queries": 66,
+ "userstats_netdata_lost_connections": 0,
+ "userstats_netdata_other_commands": 0,
+ "userstats_netdata_rollback_transactions": 0,
+ "userstats_netdata_rows_deleted": 0,
+ "userstats_netdata_rows_inserted": 0,
+ "userstats_netdata_rows_read": 0,
+ "userstats_netdata_rows_sent": 99,
+ "userstats_netdata_rows_updated": 0,
+ "userstats_netdata_select_commands": 33,
+ "userstats_netdata_total_connections": 1,
+ "userstats_netdata_update_commands": 0,
+ "userstats_root_access_denied": 0,
+ "userstats_root_binlog_bytes_written": 0,
+ "userstats_root_commit_transactions": 0,
+ "userstats_root_cpu_time": 0,
+ "userstats_root_denied_connections": 0,
+ "userstats_root_empty_queries": 0,
+ "userstats_root_lost_connections": 0,
+ "userstats_root_other_commands": 0,
+ "userstats_root_rollback_transactions": 0,
+ "userstats_root_rows_deleted": 0,
+ "userstats_root_rows_inserted": 0,
+ "userstats_root_rows_read": 0,
+ "userstats_root_rows_sent": 2,
+ "userstats_root_rows_updated": 0,
+ "userstats_root_select_commands": 0,
+ "userstats_root_total_connections": 1,
+ "userstats_root_update_commands": 0,
+ "wsrep_cluster_size": 0,
+ "wsrep_cluster_status_disconnected": 1,
+ "wsrep_cluster_status_non_primary": 0,
+ "wsrep_cluster_status_primary": 0,
+ "wsrep_connected": 0,
+ "wsrep_local_bf_aborts": 0,
+ "wsrep_ready": 0,
+ "wsrep_thread_count": 0,
+ }
+
+ copyProcessListQueryDuration(mx, expected)
+ require.Equal(t, expected, mx)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx)
+ },
+ },
+ },
+ "MariaDB-MultiSourceReplication[v10.8.4]: error on slaves status (no permissions)": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpectErr(m, queryShowAllSlavesStatus)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
+ },
+ check: func(t *testing.T, my *MySQL) {
+ mx := my.Collect()
+
+ expected := map[string]int64{
+ "aborted_connects": 2,
+ "binlog_cache_disk_use": 0,
+ "binlog_cache_use": 0,
+ "binlog_stmt_cache_disk_use": 0,
+ "binlog_stmt_cache_use": 0,
+ "bytes_received": 81392,
+ "bytes_sent": 56794,
+ "com_delete": 0,
+ "com_insert": 0,
+ "com_replace": 0,
+ "com_select": 6,
+ "com_update": 0,
+ "connection_errors_accept": 0,
+ "connection_errors_internal": 0,
+ "connection_errors_max_connections": 0,
+ "connection_errors_peer_address": 0,
+ "connection_errors_select": 0,
+ "connection_errors_tcpwrap": 0,
+ "connections": 12,
+ "created_tmp_disk_tables": 0,
+ "created_tmp_files": 5,
+ "created_tmp_tables": 2,
+ "handler_commit": 30,
+ "handler_delete": 0,
+ "handler_prepare": 0,
+ "handler_read_first": 7,
+ "handler_read_key": 7,
+ "handler_read_next": 3,
+ "handler_read_prev": 0,
+ "handler_read_rnd": 0,
+ "handler_read_rnd_next": 626,
+ "handler_rollback": 0,
+ "handler_savepoint": 0,
+ "handler_savepoint_rollback": 0,
+ "handler_update": 3,
+ "handler_write": 13,
+ "innodb_buffer_pool_bytes_data": 5062656,
+ "innodb_buffer_pool_bytes_dirty": 475136,
+ "innodb_buffer_pool_pages_data": 309,
+ "innodb_buffer_pool_pages_dirty": 29,
+ "innodb_buffer_pool_pages_flushed": 0,
+ "innodb_buffer_pool_pages_free": 7755,
+ "innodb_buffer_pool_pages_misc": 0,
+ "innodb_buffer_pool_pages_total": 8064,
+ "innodb_buffer_pool_read_ahead": 0,
+ "innodb_buffer_pool_read_ahead_evicted": 0,
+ "innodb_buffer_pool_read_ahead_rnd": 0,
+ "innodb_buffer_pool_read_requests": 1911,
+ "innodb_buffer_pool_reads": 171,
+ "innodb_buffer_pool_wait_free": 0,
+ "innodb_buffer_pool_write_requests": 148,
+ "innodb_data_fsyncs": 17,
+ "innodb_data_pending_fsyncs": 0,
+ "innodb_data_pending_reads": 0,
+ "innodb_data_pending_writes": 0,
+ "innodb_data_read": 2801664,
+ "innodb_data_reads": 185,
+ "innodb_data_writes": 16,
+ "innodb_data_written": 0,
+ "innodb_deadlocks": 0,
+ "innodb_log_waits": 0,
+ "innodb_log_write_requests": 109,
+ "innodb_log_writes": 15,
+ "innodb_os_log_written": 6097,
+ "innodb_row_lock_current_waits": 0,
+ "innodb_rows_deleted": 0,
+ "innodb_rows_inserted": 0,
+ "innodb_rows_read": 0,
+ "innodb_rows_updated": 0,
+ "key_blocks_not_flushed": 0,
+ "key_blocks_unused": 107163,
+ "key_blocks_used": 0,
+ "key_read_requests": 0,
+ "key_reads": 0,
+ "key_write_requests": 0,
+ "key_writes": 0,
+ "max_connections": 151,
+ "max_used_connections": 1,
+ "open_files": 29,
+ "open_tables": 10,
+ "opened_files": 100,
+ "opened_tables": 16,
+ "process_list_fetch_query_duration": 0,
+ "process_list_longest_query_duration": 9,
+ "process_list_queries_count_system": 0,
+ "process_list_queries_count_user": 2,
+ "qcache_free_blocks": 1,
+ "qcache_free_memory": 1031272,
+ "qcache_hits": 0,
+ "qcache_inserts": 0,
+ "qcache_lowmem_prunes": 0,
+ "qcache_not_cached": 0,
+ "qcache_queries_in_cache": 0,
+ "qcache_total_blocks": 1,
+ "queries": 33,
+ "questions": 24,
+ "select_full_join": 0,
+ "select_full_range_join": 0,
+ "select_range": 0,
+ "select_range_check": 0,
+ "select_scan": 2,
+ "slow_queries": 0,
+ "sort_merge_passes": 0,
+ "sort_range": 0,
+ "sort_scan": 0,
+ "table_locks_immediate": 60,
+ "table_locks_waited": 0,
+ "table_open_cache": 2000,
+ "table_open_cache_overflows": 0,
+ "thread_cache_misses": 1666,
+ "threads_cached": 0,
+ "threads_connected": 1,
+ "threads_created": 2,
+ "threads_running": 3,
+ "userstats_netdata_access_denied": 33,
+ "userstats_netdata_binlog_bytes_written": 0,
+ "userstats_netdata_commit_transactions": 0,
+ "userstats_netdata_cpu_time": 77,
+ "userstats_netdata_denied_connections": 49698,
+ "userstats_netdata_empty_queries": 66,
+ "userstats_netdata_lost_connections": 0,
+ "userstats_netdata_other_commands": 0,
+ "userstats_netdata_rollback_transactions": 0,
+ "userstats_netdata_rows_deleted": 0,
+ "userstats_netdata_rows_inserted": 0,
+ "userstats_netdata_rows_read": 0,
+ "userstats_netdata_rows_sent": 99,
+ "userstats_netdata_rows_updated": 0,
+ "userstats_netdata_select_commands": 33,
+ "userstats_netdata_total_connections": 1,
+ "userstats_netdata_update_commands": 0,
+ "userstats_root_access_denied": 0,
+ "userstats_root_binlog_bytes_written": 0,
+ "userstats_root_commit_transactions": 0,
+ "userstats_root_cpu_time": 0,
+ "userstats_root_denied_connections": 0,
+ "userstats_root_empty_queries": 0,
+ "userstats_root_lost_connections": 0,
+ "userstats_root_other_commands": 0,
+ "userstats_root_rollback_transactions": 0,
+ "userstats_root_rows_deleted": 0,
+ "userstats_root_rows_inserted": 0,
+ "userstats_root_rows_read": 0,
+ "userstats_root_rows_sent": 2,
+ "userstats_root_rows_updated": 0,
+ "userstats_root_select_commands": 0,
+ "userstats_root_total_connections": 1,
+ "userstats_root_update_commands": 0,
+ "wsrep_cluster_size": 0,
+ "wsrep_cluster_status_disconnected": 1,
+ "wsrep_cluster_status_non_primary": 0,
+ "wsrep_cluster_status_primary": 0,
+ "wsrep_connected": 0,
+ "wsrep_local_bf_aborts": 0,
+ "wsrep_ready": 0,
+ "wsrep_thread_count": 0,
+ }
+
+ copyProcessListQueryDuration(mx, expected)
+ require.Equal(t, expected, mx)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx)
+ },
+ },
+ },
+ "MariaDB-GaleraCluster[v10.8.4]: success on all queries": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMariaGaleraClusterVer1084Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaGaleraClusterVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaGaleraClusterVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, nil)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaGaleraClusterVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaGaleraClusterVer1084ProcessList)
+ },
+ check: func(t *testing.T, my *MySQL) {
+ mx := my.Collect()
+
+ expected := map[string]int64{
+ "aborted_connects": 0,
+ "binlog_cache_disk_use": 0,
+ "binlog_cache_use": 0,
+ "binlog_stmt_cache_disk_use": 0,
+ "binlog_stmt_cache_use": 0,
+ "bytes_received": 3009,
+ "bytes_sent": 228856,
+ "com_delete": 6,
+ "com_insert": 0,
+ "com_replace": 0,
+ "com_select": 12,
+ "com_update": 0,
+ "connection_errors_accept": 0,
+ "connection_errors_internal": 0,
+ "connection_errors_max_connections": 0,
+ "connection_errors_peer_address": 0,
+ "connection_errors_select": 0,
+ "connection_errors_tcpwrap": 0,
+ "connections": 15,
+ "created_tmp_disk_tables": 4,
+ "created_tmp_files": 5,
+ "created_tmp_tables": 17,
+ "handler_commit": 37,
+ "handler_delete": 7,
+ "handler_prepare": 0,
+ "handler_read_first": 3,
+ "handler_read_key": 9,
+ "handler_read_next": 1,
+ "handler_read_prev": 0,
+ "handler_read_rnd": 0,
+ "handler_read_rnd_next": 6222,
+ "handler_rollback": 0,
+ "handler_savepoint": 0,
+ "handler_savepoint_rollback": 0,
+ "handler_update": 0,
+ "handler_write": 9,
+ "innodb_buffer_pool_bytes_data": 5193728,
+ "innodb_buffer_pool_bytes_dirty": 2260992,
+ "innodb_buffer_pool_pages_data": 317,
+ "innodb_buffer_pool_pages_dirty": 138,
+ "innodb_buffer_pool_pages_flushed": 0,
+ "innodb_buffer_pool_pages_free": 7747,
+ "innodb_buffer_pool_pages_misc": 0,
+ "innodb_buffer_pool_pages_total": 8064,
+ "innodb_buffer_pool_read_ahead": 0,
+ "innodb_buffer_pool_read_ahead_evicted": 0,
+ "innodb_buffer_pool_read_ahead_rnd": 0,
+ "innodb_buffer_pool_read_requests": 2298,
+ "innodb_buffer_pool_reads": 184,
+ "innodb_buffer_pool_wait_free": 0,
+ "innodb_buffer_pool_write_requests": 203,
+ "innodb_data_fsyncs": 15,
+ "innodb_data_pending_fsyncs": 0,
+ "innodb_data_pending_reads": 0,
+ "innodb_data_pending_writes": 0,
+ "innodb_data_read": 3014656,
+ "innodb_data_reads": 201,
+ "innodb_data_writes": 14,
+ "innodb_data_written": 0,
+ "innodb_deadlocks": 0,
+ "innodb_log_waits": 0,
+ "innodb_log_write_requests": 65,
+ "innodb_log_writes": 13,
+ "innodb_os_log_written": 4785,
+ "innodb_row_lock_current_waits": 0,
+ "innodb_rows_deleted": 0,
+ "innodb_rows_inserted": 0,
+ "innodb_rows_read": 0,
+ "innodb_rows_updated": 0,
+ "key_blocks_not_flushed": 0,
+ "key_blocks_unused": 107163,
+ "key_blocks_used": 0,
+ "key_read_requests": 0,
+ "key_reads": 0,
+ "key_write_requests": 0,
+ "key_writes": 0,
+ "max_connections": 151,
+ "max_used_connections": 1,
+ "open_files": 7,
+ "open_tables": 0,
+ "opened_files": 125,
+ "opened_tables": 24,
+ "process_list_fetch_query_duration": 0,
+ "process_list_longest_query_duration": 9,
+ "process_list_queries_count_system": 0,
+ "process_list_queries_count_user": 2,
+ "qcache_free_blocks": 1,
+ "qcache_free_memory": 1031272,
+ "qcache_hits": 0,
+ "qcache_inserts": 0,
+ "qcache_lowmem_prunes": 0,
+ "qcache_not_cached": 0,
+ "qcache_queries_in_cache": 0,
+ "qcache_total_blocks": 1,
+ "queries": 75,
+ "questions": 62,
+ "select_full_join": 0,
+ "select_full_range_join": 0,
+ "select_range": 0,
+ "select_range_check": 0,
+ "select_scan": 17,
+ "slow_queries": 0,
+ "sort_merge_passes": 0,
+ "sort_range": 0,
+ "sort_scan": 0,
+ "table_locks_immediate": 17,
+ "table_locks_waited": 0,
+ "table_open_cache": 2000,
+ "table_open_cache_overflows": 0,
+ "thread_cache_misses": 4000,
+ "threads_cached": 0,
+ "threads_connected": 1,
+ "threads_created": 6,
+ "threads_running": 1,
+ "userstats_netdata_access_denied": 33,
+ "userstats_netdata_binlog_bytes_written": 0,
+ "userstats_netdata_commit_transactions": 0,
+ "userstats_netdata_cpu_time": 77,
+ "userstats_netdata_denied_connections": 49698,
+ "userstats_netdata_empty_queries": 66,
+ "userstats_netdata_lost_connections": 0,
+ "userstats_netdata_other_commands": 0,
+ "userstats_netdata_rollback_transactions": 0,
+ "userstats_netdata_rows_deleted": 0,
+ "userstats_netdata_rows_inserted": 0,
+ "userstats_netdata_rows_read": 0,
+ "userstats_netdata_rows_sent": 99,
+ "userstats_netdata_rows_updated": 0,
+ "userstats_netdata_select_commands": 33,
+ "userstats_netdata_total_connections": 1,
+ "userstats_netdata_update_commands": 0,
+ "userstats_root_access_denied": 0,
+ "userstats_root_binlog_bytes_written": 0,
+ "userstats_root_commit_transactions": 0,
+ "userstats_root_cpu_time": 0,
+ "userstats_root_denied_connections": 0,
+ "userstats_root_empty_queries": 0,
+ "userstats_root_lost_connections": 0,
+ "userstats_root_other_commands": 0,
+ "userstats_root_rollback_transactions": 0,
+ "userstats_root_rows_deleted": 0,
+ "userstats_root_rows_inserted": 0,
+ "userstats_root_rows_read": 0,
+ "userstats_root_rows_sent": 2,
+ "userstats_root_rows_updated": 0,
+ "userstats_root_select_commands": 0,
+ "userstats_root_total_connections": 1,
+ "userstats_root_update_commands": 0,
+ "wsrep_cluster_size": 3,
+ "wsrep_cluster_status_disconnected": 0,
+ "wsrep_cluster_status_non_primary": 0,
+ "wsrep_cluster_status_primary": 1,
+ "wsrep_cluster_weight": 3,
+ "wsrep_connected": 1,
+ "wsrep_flow_control_paused_ns": 0,
+ "wsrep_local_bf_aborts": 0,
+ "wsrep_local_cert_failures": 0,
+ "wsrep_local_recv_queue": 0,
+ "wsrep_local_send_queue": 0,
+ "wsrep_local_state_donor": 0,
+ "wsrep_local_state_error": 0,
+ "wsrep_local_state_joined": 0,
+ "wsrep_local_state_joiner": 0,
+ "wsrep_local_state_synced": 1,
+ "wsrep_local_state_undefined": 0,
+ "wsrep_open_transactions": 0,
+ "wsrep_ready": 1,
+ "wsrep_received": 11,
+ "wsrep_received_bytes": 1410,
+ "wsrep_replicated": 0,
+ "wsrep_replicated_bytes": 0,
+ "wsrep_thread_count": 5,
+ }
+
+ copyProcessListQueryDuration(mx, expected)
+ require.Equal(t, expected, mx)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx)
+ },
+ },
+ },
+ "MySQL-MultiSourceReplication[v8.0.30]: success on all queries": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataMySQLVer8030Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataMySQLVer8030GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMySQLVer8030GlobalVariables)
+ mockExpect(t, m, queryShowReplicaStatus, dataMySQLVer8030ReplicaStatusMultiSource)
+ mockExpect(t, m, queryShowProcessListPS, dataMySQLVer8030ProcessList)
+ },
+ check: func(t *testing.T, my *MySQL) {
+ mx := my.Collect()
+
+ expected := map[string]int64{
+ "aborted_connects": 0,
+ "binlog_cache_disk_use": 0,
+ "binlog_cache_use": 6,
+ "binlog_stmt_cache_disk_use": 0,
+ "binlog_stmt_cache_use": 0,
+ "bytes_received": 5584,
+ "bytes_sent": 70700,
+ "com_delete": 0,
+ "com_insert": 0,
+ "com_replace": 0,
+ "com_select": 2,
+ "com_update": 0,
+ "connection_errors_accept": 0,
+ "connection_errors_internal": 0,
+ "connection_errors_max_connections": 0,
+ "connection_errors_peer_address": 0,
+ "connection_errors_select": 0,
+ "connection_errors_tcpwrap": 0,
+ "connections": 25,
+ "created_tmp_disk_tables": 0,
+ "created_tmp_files": 5,
+ "created_tmp_tables": 6,
+ "handler_commit": 720,
+ "handler_delete": 8,
+ "handler_prepare": 24,
+ "handler_read_first": 50,
+ "handler_read_key": 1914,
+ "handler_read_next": 4303,
+ "handler_read_prev": 0,
+ "handler_read_rnd": 0,
+ "handler_read_rnd_next": 4723,
+ "handler_rollback": 1,
+ "handler_savepoint": 0,
+ "handler_savepoint_rollback": 0,
+ "handler_update": 373,
+ "handler_write": 1966,
+ "innodb_buffer_pool_bytes_data": 17121280,
+ "innodb_buffer_pool_bytes_dirty": 0,
+ "innodb_buffer_pool_pages_data": 1045,
+ "innodb_buffer_pool_pages_dirty": 0,
+ "innodb_buffer_pool_pages_flushed": 361,
+ "innodb_buffer_pool_pages_free": 7143,
+ "innodb_buffer_pool_pages_misc": 4,
+ "innodb_buffer_pool_pages_total": 8192,
+ "innodb_buffer_pool_read_ahead": 0,
+ "innodb_buffer_pool_read_ahead_evicted": 0,
+ "innodb_buffer_pool_read_ahead_rnd": 0,
+ "innodb_buffer_pool_read_requests": 16723,
+ "innodb_buffer_pool_reads": 878,
+ "innodb_buffer_pool_wait_free": 0,
+ "innodb_buffer_pool_write_requests": 2377,
+ "innodb_data_fsyncs": 255,
+ "innodb_data_pending_fsyncs": 0,
+ "innodb_data_pending_reads": 0,
+ "innodb_data_pending_writes": 0,
+ "innodb_data_read": 14453760,
+ "innodb_data_reads": 899,
+ "innodb_data_writes": 561,
+ "innodb_data_written": 6128128,
+ "innodb_log_waits": 0,
+ "innodb_log_write_requests": 1062,
+ "innodb_log_writes": 116,
+ "innodb_os_log_fsyncs": 69,
+ "innodb_os_log_pending_fsyncs": 0,
+ "innodb_os_log_pending_writes": 0,
+ "innodb_os_log_written": 147968,
+ "innodb_row_lock_current_waits": 0,
+ "innodb_rows_deleted": 0,
+ "innodb_rows_inserted": 0,
+ "innodb_rows_read": 0,
+ "innodb_rows_updated": 0,
+ "key_blocks_not_flushed": 0,
+ "key_blocks_unused": 6698,
+ "key_blocks_used": 0,
+ "key_read_requests": 0,
+ "key_reads": 0,
+ "key_write_requests": 0,
+ "key_writes": 0,
+ "max_connections": 151,
+ "max_used_connections": 2,
+ "open_files": 8,
+ "open_tables": 127,
+ "opened_files": 8,
+ "opened_tables": 208,
+ "process_list_fetch_query_duration": 0,
+ "process_list_longest_query_duration": 9,
+ "process_list_queries_count_system": 0,
+ "process_list_queries_count_user": 2,
+ "queries": 27,
+ "questions": 15,
+ "seconds_behind_master_master1": 0,
+ "seconds_behind_master_master2": 0,
+ "select_full_join": 0,
+ "select_full_range_join": 0,
+ "select_range": 0,
+ "select_range_check": 0,
+ "select_scan": 12,
+ "slave_io_running_master1": 1,
+ "slave_io_running_master2": 1,
+ "slave_sql_running_master1": 1,
+ "slave_sql_running_master2": 1,
+ "slow_queries": 0,
+ "sort_merge_passes": 0,
+ "sort_range": 0,
+ "sort_scan": 0,
+ "table_locks_immediate": 6,
+ "table_locks_waited": 0,
+ "table_open_cache": 4000,
+ "table_open_cache_overflows": 0,
+ "thread_cache_misses": 800,
+ "threads_cached": 1,
+ "threads_connected": 1,
+ "threads_created": 2,
+ "threads_running": 2,
+ }
+
+ copyProcessListQueryDuration(mx, expected)
+ require.Equal(t, expected, mx)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx)
+ },
+ },
+ },
+ "Percona-Standalone[v8.0.29]: success on all queries": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataPerconaVer8029Version)
+ mockExpect(t, m, queryShowSessionVariables, dataSessionVariables)
+ mockExpect(t, m, queryDisableSessionQueryLog, nil)
+ mockExpect(t, m, queryDisableSessionSlowQueryLog, nil)
+ mockExpect(t, m, queryShowGlobalStatus, dataPerconaVer8029GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataPerconaVer8029GlobalVariables)
+ mockExpect(t, m, queryShowReplicaStatus, nil)
+ mockExpect(t, m, queryShowUserStatistics, dataPerconaVer8029UserStatistics)
+ mockExpect(t, m, queryShowProcessListPS, dataPerconaV8029ProcessList)
+ },
+ check: func(t *testing.T, my *MySQL) {
+ mx := my.Collect()
+
+ expected := map[string]int64{
+ "aborted_connects": 1,
+ "binlog_cache_disk_use": 0,
+ "binlog_cache_use": 0,
+ "binlog_stmt_cache_disk_use": 0,
+ "binlog_stmt_cache_use": 0,
+ "bytes_received": 682970,
+ "bytes_sent": 33668405,
+ "com_delete": 0,
+ "com_insert": 0,
+ "com_replace": 0,
+ "com_select": 1687,
+ "com_update": 0,
+ "connection_errors_accept": 0,
+ "connection_errors_internal": 0,
+ "connection_errors_max_connections": 0,
+ "connection_errors_peer_address": 0,
+ "connection_errors_select": 0,
+ "connection_errors_tcpwrap": 0,
+ "connections": 13,
+ "created_tmp_disk_tables": 1683,
+ "created_tmp_files": 5,
+ "created_tmp_tables": 5054,
+ "handler_commit": 576,
+ "handler_delete": 0,
+ "handler_prepare": 0,
+ "handler_read_first": 1724,
+ "handler_read_key": 3439,
+ "handler_read_next": 4147,
+ "handler_read_prev": 0,
+ "handler_read_rnd": 0,
+ "handler_read_rnd_next": 2983285,
+ "handler_rollback": 0,
+ "handler_savepoint": 0,
+ "handler_savepoint_rollback": 0,
+ "handler_update": 317,
+ "handler_write": 906501,
+ "innodb_buffer_pool_bytes_data": 18399232,
+ "innodb_buffer_pool_bytes_dirty": 49152,
+ "innodb_buffer_pool_pages_data": 1123,
+ "innodb_buffer_pool_pages_dirty": 3,
+ "innodb_buffer_pool_pages_flushed": 205,
+ "innodb_buffer_pool_pages_free": 7064,
+ "innodb_buffer_pool_pages_misc": 5,
+ "innodb_buffer_pool_pages_total": 8192,
+ "innodb_buffer_pool_read_ahead": 0,
+ "innodb_buffer_pool_read_ahead_evicted": 0,
+ "innodb_buffer_pool_read_ahead_rnd": 0,
+ "innodb_buffer_pool_read_requests": 109817,
+ "innodb_buffer_pool_reads": 978,
+ "innodb_buffer_pool_wait_free": 0,
+ "innodb_buffer_pool_write_requests": 77412,
+ "innodb_data_fsyncs": 50,
+ "innodb_data_pending_fsyncs": 0,
+ "innodb_data_pending_reads": 0,
+ "innodb_data_pending_writes": 0,
+ "innodb_data_read": 16094208,
+ "innodb_data_reads": 1002,
+ "innodb_data_writes": 288,
+ "innodb_data_written": 3420160,
+ "innodb_log_waits": 0,
+ "innodb_log_write_requests": 651,
+ "innodb_log_writes": 47,
+ "innodb_os_log_fsyncs": 13,
+ "innodb_os_log_pending_fsyncs": 0,
+ "innodb_os_log_pending_writes": 0,
+ "innodb_os_log_written": 45568,
+ "innodb_row_lock_current_waits": 0,
+ "innodb_rows_deleted": 0,
+ "innodb_rows_inserted": 5055,
+ "innodb_rows_read": 5055,
+ "innodb_rows_updated": 0,
+ "key_blocks_not_flushed": 0,
+ "key_blocks_unused": 6698,
+ "key_blocks_used": 0,
+ "key_read_requests": 0,
+ "key_reads": 0,
+ "key_write_requests": 0,
+ "key_writes": 0,
+ "max_connections": 151,
+ "max_used_connections": 3,
+ "open_files": 2,
+ "open_tables": 77,
+ "opened_files": 2,
+ "opened_tables": 158,
+ "process_list_fetch_query_duration": 0,
+ "process_list_longest_query_duration": 9,
+ "process_list_queries_count_system": 0,
+ "process_list_queries_count_user": 2,
+ "queries": 6748,
+ "questions": 6746,
+ "select_full_join": 0,
+ "select_full_range_join": 0,
+ "select_range": 0,
+ "select_range_check": 0,
+ "select_scan": 8425,
+ "slow_queries": 0,
+ "sort_merge_passes": 0,
+ "sort_range": 0,
+ "sort_scan": 1681,
+ "table_locks_immediate": 3371,
+ "table_locks_waited": 0,
+ "table_open_cache": 4000,
+ "table_open_cache_overflows": 0,
+ "thread_cache_misses": 2307,
+ "threads_cached": 1,
+ "threads_connected": 2,
+ "threads_created": 3,
+ "threads_running": 2,
+ "userstats_netdata_access_denied": 0,
+ "userstats_netdata_binlog_bytes_written": 0,
+ "userstats_netdata_commit_transactions": 0,
+ "userstats_netdata_cpu_time": 0,
+ "userstats_netdata_denied_connections": 0,
+ "userstats_netdata_empty_queries": 0,
+ "userstats_netdata_lost_connections": 0,
+ "userstats_netdata_other_commands": 1,
+ "userstats_netdata_rollback_transactions": 0,
+ "userstats_netdata_rows_fetched": 1,
+ "userstats_netdata_rows_updated": 0,
+ "userstats_netdata_select_commands": 1,
+ "userstats_netdata_total_connections": 1,
+ "userstats_netdata_update_commands": 0,
+ "userstats_root_access_denied": 0,
+ "userstats_root_binlog_bytes_written": 0,
+ "userstats_root_commit_transactions": 0,
+ "userstats_root_cpu_time": 151,
+ "userstats_root_denied_connections": 1,
+ "userstats_root_empty_queries": 36,
+ "userstats_root_lost_connections": 0,
+ "userstats_root_other_commands": 110,
+ "userstats_root_rollback_transactions": 0,
+ "userstats_root_rows_fetched": 1,
+ "userstats_root_rows_updated": 0,
+ "userstats_root_select_commands": 37,
+ "userstats_root_total_connections": 2,
+ "userstats_root_update_commands": 0,
+ }
+
+ copyProcessListQueryDuration(mx, expected)
+ require.Equal(t, expected, mx)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, my, mx)
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ my := New()
+ my.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, my.Init())
+
+ for i, step := range test {
+ t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
+ step.prepareMock(t, mock)
+ step.check(t, my)
+ })
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, mySQL *MySQL, collected map[string]int64) {
+ for _, chart := range *mySQL.Charts() {
+ if mySQL.isMariaDB {
+ // https://mariadb.com/kb/en/server-status-variables/#connection_errors_accept
+ if mySQL.version.LT(semver.Version{Major: 10, Minor: 0, Patch: 4}) && chart.ID == "connection_errors" {
+ continue
+ }
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func copyProcessListQueryDuration(dst, src map[string]int64) {
+ if _, ok := dst["process_list_fetch_query_duration"]; !ok {
+ return
+ }
+ if _, ok := src["process_list_fetch_query_duration"]; !ok {
+ return
+ }
+ dst["process_list_fetch_query_duration"] = src["process_list_fetch_query_duration"]
+}
+
+func mustMockRows(t *testing.T, data []byte) *sqlmock.Rows {
+ rows, err := prepareMockRows(data)
+ require.NoError(t, err)
+ return rows
+}
+
+func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) {
+ mock.ExpectQuery(query).WillReturnRows(mustMockRows(t, rows)).RowsWillBeClosed()
+}
+
+func mockExpectErr(mock sqlmock.Sqlmock, query string) {
+ mock.ExpectQuery(query).WillReturnError(fmt.Errorf("mock error (%s)", query))
+}
+
+func prepareMockRows(data []byte) (*sqlmock.Rows, error) {
+ if len(data) == 0 {
+ return sqlmock.NewRows(nil), nil
+ }
+
+ r := bytes.NewReader(data)
+ sc := bufio.NewScanner(r)
+
+ var numColumns int
+ var rows *sqlmock.Rows
+
+ for sc.Scan() {
+ s := strings.TrimSpace(strings.Trim(sc.Text(), "|"))
+ switch {
+ case s == "",
+ strings.HasPrefix(s, "+"),
+ strings.HasPrefix(s, "ft_boolean_syntax"):
+ continue
+ }
+
+ parts := strings.Split(s, "|")
+ for i, v := range parts {
+ parts[i] = strings.TrimSpace(v)
+ }
+
+ if rows == nil {
+ numColumns = len(parts)
+ rows = sqlmock.NewRows(parts)
+ continue
+ }
+
+ if len(parts) != numColumns {
+ return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts))
+ }
+
+ values := make([]driver.Value, len(parts))
+ for i, v := range parts {
+ values[i] = v
+ }
+ rows.AddRow(values...)
+ }
+
+ if rows == nil {
+ return nil, errors.New("prepareMockRows(): nil rows result")
+ }
+
+ return rows, sc.Err()
+}
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/config.json b/src/go/plugin/go.d/modules/mysql/testdata/config.json
new file mode 100644
index 000000000..92a65cb5c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "dsn": "ok",
+ "my.cnf": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/config.yaml b/src/go/plugin/go.d/modules/mysql/testdata/config.yaml
new file mode 100644
index 000000000..9bb474b94
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+dsn: "ok"
+my.cnf: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_status.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_status.txt
new file mode 100644
index 000000000..8a6b691cd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_status.txt
@@ -0,0 +1,621 @@
++--------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+
+| Variable_name | Value |
++--------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+
+| Aborted_clients | 0 |
+| Aborted_connects | 0 |
+| Aborted_connects_preauth | 0 |
+| Access_denied_errors | 0 |
+| Acl_column_grants | 0 |
+| Acl_database_grants | 3 |
+| Acl_function_grants | 0 |
+| Acl_procedure_grants | 0 |
+| Acl_package_spec_grants | 0 |
+| Acl_package_body_grants | 0 |
+| Acl_proxy_users | 1 |
+| Acl_role_grants | 0 |
+| Acl_roles | 0 |
+| Acl_table_grants | 1 |
+| Acl_users | 6 |
+| Aria_pagecache_blocks_not_flushed | 0 |
+| Aria_pagecache_blocks_unused | 15647 |
+| Aria_pagecache_blocks_used | 13 |
+| Aria_pagecache_read_requests | 306 |
+| Aria_pagecache_reads | 17 |
+| Aria_pagecache_write_requests | 8 |
+| Aria_pagecache_writes | 8 |
+| Aria_transaction_log_syncs | 0 |
+| Binlog_commits | 0 |
+| Binlog_group_commits | 0 |
+| Binlog_group_commit_trigger_count | 0 |
+| Binlog_group_commit_trigger_lock_wait | 0 |
+| Binlog_group_commit_trigger_timeout | 0 |
+| Binlog_snapshot_file | mysql-bin.000005 |
+| Binlog_snapshot_position | 385 |
+| Binlog_bytes_written | 183 |
+| Binlog_cache_disk_use | 0 |
+| Binlog_cache_use | 0 |
+| Binlog_stmt_cache_disk_use | 0 |
+| Binlog_stmt_cache_use | 0 |
+| Busy_time | 0.000000 |
+| Bytes_received | 3009 |
+| Bytes_sent | 228856 |
+| Column_compressions | 0 |
+| Column_decompressions | 0 |
+| Com_admin_commands | 0 |
+| Com_alter_db | 0 |
+| Com_alter_db_upgrade | 0 |
+| Com_alter_event | 0 |
+| Com_alter_function | 0 |
+| Com_alter_procedure | 0 |
+| Com_alter_server | 0 |
+| Com_alter_sequence | 0 |
+| Com_alter_table | 3 |
+| Com_alter_user | 0 |
+| Com_analyze | 0 |
+| Com_assign_to_keycache | 0 |
+| Com_backup | 6 |
+| Com_backup_lock | 0 |
+| Com_begin | 0 |
+| Com_binlog | 0 |
+| Com_call_procedure | 0 |
+| Com_change_db | 0 |
+| Com_change_master | 0 |
+| Com_check | 0 |
+| Com_checksum | 0 |
+| Com_commit | 0 |
+| Com_compound_sql | 0 |
+| Com_create_db | 1 |
+| Com_create_event | 0 |
+| Com_create_function | 0 |
+| Com_create_index | 0 |
+| Com_create_package | 0 |
+| Com_create_package_body | 0 |
+| Com_create_procedure | 0 |
+| Com_create_role | 0 |
+| Com_create_sequence | 0 |
+| Com_create_server | 0 |
+| Com_create_table | 3 |
+| Com_create_temporary_table | 0 |
+| Com_create_trigger | 0 |
+| Com_create_udf | 0 |
+| Com_create_user | 0 |
+| Com_create_view | 0 |
+| Com_dealloc_sql | 0 |
+| Com_delete | 6 |
+| Com_delete_multi | 0 |
+| Com_do | 0 |
+| Com_drop_db | 0 |
+| Com_drop_event | 0 |
+| Com_drop_function | 0 |
+| Com_drop_index | 0 |
+| Com_drop_procedure | 0 |
+| Com_drop_package | 0 |
+| Com_drop_package_body | 0 |
+| Com_drop_role | 0 |
+| Com_drop_server | 0 |
+| Com_drop_sequence | 0 |
+| Com_drop_table | 0 |
+| Com_drop_temporary_table | 0 |
+| Com_drop_trigger | 0 |
+| Com_drop_user | 0 |
+| Com_drop_view | 0 |
+| Com_empty_query | 0 |
+| Com_execute_immediate | 0 |
+| Com_execute_sql | 0 |
+| Com_flush | 4 |
+| Com_get_diagnostics | 0 |
+| Com_grant | 0 |
+| Com_grant_role | 0 |
+| Com_ha_close | 0 |
+| Com_ha_open | 0 |
+| Com_ha_read | 0 |
+| Com_help | 0 |
+| Com_insert | 0 |
+| Com_insert_select | 0 |
+| Com_install_plugin | 0 |
+| Com_kill | 0 |
+| Com_load | 0 |
+| Com_lock_tables | 0 |
+| Com_optimize | 0 |
+| Com_preload_keys | 0 |
+| Com_prepare_sql | 0 |
+| Com_purge | 0 |
+| Com_purge_before_date | 0 |
+| Com_release_savepoint | 0 |
+| Com_rename_table | 0 |
+| Com_rename_user | 0 |
+| Com_repair | 0 |
+| Com_replace | 0 |
+| Com_replace_select | 0 |
+| Com_reset | 0 |
+| Com_resignal | 0 |
+| Com_revoke | 0 |
+| Com_revoke_all | 0 |
+| Com_revoke_role | 0 |
+| Com_rollback | 0 |
+| Com_rollback_to_savepoint | 0 |
+| Com_savepoint | 0 |
+| Com_select | 12 |
+| Com_set_option | 6 |
+| Com_show_authors | 0 |
+| Com_show_binlog_events | 0 |
+| Com_show_binlogs | 0 |
+| Com_show_charsets | 0 |
+| Com_show_collations | 0 |
+| Com_show_contributors | 0 |
+| Com_show_create_db | 0 |
+| Com_show_create_event | 0 |
+| Com_show_create_func | 0 |
+| Com_show_create_package | 0 |
+| Com_show_create_package_body | 0 |
+| Com_show_create_proc | 0 |
+| Com_show_create_table | 0 |
+| Com_show_create_trigger | 0 |
+| Com_show_create_user | 0 |
+| Com_show_databases | 1 |
+| Com_show_engine_logs | 0 |
+| Com_show_engine_mutex | 0 |
+| Com_show_engine_status | 2 |
+| Com_show_errors | 0 |
+| Com_show_events | 0 |
+| Com_show_explain | 0 |
+| Com_show_fields | 0 |
+| Com_show_function_status | 0 |
+| Com_show_generic | 0 |
+| Com_show_grants | 2 |
+| Com_show_keys | 0 |
+| Com_show_binlog_status | 6 |
+| Com_show_open_tables | 0 |
+| Com_show_package_status | 0 |
+| Com_show_package_body_status | 0 |
+| Com_show_plugins | 0 |
+| Com_show_privileges | 0 |
+| Com_show_procedure_status | 0 |
+| Com_show_processlist | 0 |
+| Com_show_profile | 0 |
+| Com_show_profiles | 0 |
+| Com_show_relaylog_events | 0 |
+| Com_show_slave_hosts | 0 |
+| Com_show_slave_status | 6 |
+| Com_show_status | 6 |
+| Com_show_storage_engines | 0 |
+| Com_show_table_status | 0 |
+| Com_show_tables | 0 |
+| Com_show_triggers | 0 |
+| Com_show_variables | 6 |
+| Com_show_warnings | 0 |
+| Com_shutdown | 0 |
+| Com_signal | 0 |
+| Com_start_all_slaves | 0 |
+| Com_start_slave | 0 |
+| Com_stmt_close | 0 |
+| Com_stmt_execute | 0 |
+| Com_stmt_fetch | 0 |
+| Com_stmt_prepare | 0 |
+| Com_stmt_reprepare | 0 |
+| Com_stmt_reset | 0 |
+| Com_stmt_send_long_data | 0 |
+| Com_stop_all_slaves | 0 |
+| Com_stop_slave | 0 |
+| Com_truncate | 0 |
+| Com_uninstall_plugin | 0 |
+| Com_unlock_tables | 0 |
+| Com_update | 0 |
+| Com_update_multi | 0 |
+| Com_xa_commit | 0 |
+| Com_xa_end | 0 |
+| Com_xa_prepare | 0 |
+| Com_xa_recover | 0 |
+| Com_xa_rollback | 0 |
+| Com_xa_start | 0 |
+| Compression | OFF |
+| Connection_errors_accept | 0 |
+| Connection_errors_internal | 0 |
+| Connection_errors_max_connections | 0 |
+| Connection_errors_peer_address | 0 |
+| Connection_errors_select | 0 |
+| Connection_errors_tcpwrap | 0 |
+| Connections | 15 |
+| Cpu_time | 0.000000 |
+| Created_tmp_disk_tables | 4 |
+| Created_tmp_files | 5 |
+| Created_tmp_tables | 17 |
+| Delayed_errors | 0 |
+| Delayed_insert_threads | 0 |
+| Delayed_writes | 0 |
+| Delete_scan | 6 |
+| Empty_queries | 2 |
+| Executed_events | 0 |
+| Executed_triggers | 0 |
+| Feature_application_time_periods | 0 |
+| Feature_check_constraint | 1 |
+| Feature_custom_aggregate_functions | 0 |
+| Feature_delay_key_write | 0 |
+| Feature_dynamic_columns | 0 |
+| Feature_fulltext | 0 |
+| Feature_gis | 0 |
+| Feature_insert_returning | 0 |
+| Feature_invisible_columns | 0 |
+| Feature_json | 1 |
+| Feature_locale | 0 |
+| Feature_subquery | 0 |
+| Feature_system_versioning | 0 |
+| Feature_timezone | 0 |
+| Feature_trigger | 0 |
+| Feature_window_functions | 0 |
+| Feature_xml | 0 |
+| Handler_commit | 37 |
+| Handler_delete | 7 |
+| Handler_discover | 0 |
+| Handler_external_lock | 0 |
+| Handler_icp_attempts | 0 |
+| Handler_icp_match | 0 |
+| Handler_mrr_init | 0 |
+| Handler_mrr_key_refills | 0 |
+| Handler_mrr_rowid_refills | 0 |
+| Handler_prepare | 0 |
+| Handler_read_first | 3 |
+| Handler_read_key | 9 |
+| Handler_read_last | 0 |
+| Handler_read_next | 1 |
+| Handler_read_prev | 0 |
+| Handler_read_retry | 0 |
+| Handler_read_rnd | 0 |
+| Handler_read_rnd_deleted | 0 |
+| Handler_read_rnd_next | 6222 |
+| Handler_rollback | 0 |
+| Handler_savepoint | 0 |
+| Handler_savepoint_rollback | 0 |
+| Handler_tmp_delete | 0 |
+| Handler_tmp_update | 0 |
+| Handler_tmp_write | 6165 |
+| Handler_update | 0 |
+| Handler_write | 9 |
+| Innodb_adaptive_hash_hash_searches | 0 |
+| Innodb_adaptive_hash_non_hash_searches | 0 |
+| Innodb_background_log_sync | 896 |
+| Innodb_buffer_pool_dump_status | |
+| Innodb_buffer_pool_load_status | Buffer pool(s) load completed at 220817 19:46:29 |
+| Innodb_buffer_pool_resize_status | |
+| Innodb_buffer_pool_load_incomplete | OFF |
+| Innodb_buffer_pool_pages_data | 317 |
+| Innodb_buffer_pool_bytes_data | 5193728 |
+| Innodb_buffer_pool_pages_dirty | 138 |
+| Innodb_buffer_pool_bytes_dirty | 2260992 |
+| Innodb_buffer_pool_pages_flushed | 0 |
+| Innodb_buffer_pool_pages_free | 7747 |
+| Innodb_buffer_pool_pages_made_not_young | 0 |
+| Innodb_buffer_pool_pages_made_young | 0 |
+| Innodb_buffer_pool_pages_misc | 0 |
+| Innodb_buffer_pool_pages_old | 0 |
+| Innodb_buffer_pool_pages_total | 8064 |
+| Innodb_buffer_pool_pages_lru_flushed | 0 |
+| Innodb_buffer_pool_pages_lru_freed | 0 |
+| Innodb_buffer_pool_read_ahead_rnd | 0 |
+| Innodb_buffer_pool_read_ahead | 0 |
+| Innodb_buffer_pool_read_ahead_evicted | 0 |
+| Innodb_buffer_pool_read_requests | 2298 |
+| Innodb_buffer_pool_reads | 184 |
+| Innodb_buffer_pool_wait_free | 0 |
+| Innodb_buffer_pool_write_requests | 203 |
+| Innodb_checkpoint_age | 4785 |
+| Innodb_checkpoint_max_age | 80819529 |
+| Innodb_data_fsyncs | 15 |
+| Innodb_data_pending_fsyncs | 0 |
+| Innodb_data_pending_reads | 0 |
+| Innodb_data_pending_writes | 0 |
+| Innodb_data_read | 3014656 |
+| Innodb_data_reads | 201 |
+| Innodb_data_writes | 14 |
+| Innodb_data_written | 0 |
+| Innodb_dblwr_pages_written | 0 |
+| Innodb_dblwr_writes | 0 |
+| Innodb_deadlocks | 0 |
+| Innodb_history_list_length | 1 |
+| Innodb_ibuf_discarded_delete_marks | 0 |
+| Innodb_ibuf_discarded_deletes | 0 |
+| Innodb_ibuf_discarded_inserts | 0 |
+| Innodb_ibuf_free_list | 0 |
+| Innodb_ibuf_merged_delete_marks | 0 |
+| Innodb_ibuf_merged_deletes | 0 |
+| Innodb_ibuf_merged_inserts | 0 |
+| Innodb_ibuf_merges | 0 |
+| Innodb_ibuf_segment_size | 2 |
+| Innodb_ibuf_size | 1 |
+| Innodb_log_waits | 0 |
+| Innodb_log_write_requests | 65 |
+| Innodb_log_writes | 13 |
+| Innodb_lsn_current | 73172 |
+| Innodb_lsn_flushed | 73172 |
+| Innodb_lsn_last_checkpoint | 68387 |
+| Innodb_master_thread_active_loops | 0 |
+| Innodb_master_thread_idle_loops | 896 |
+| Innodb_max_trx_id | 38 |
+| Innodb_mem_adaptive_hash | 0 |
+| Innodb_mem_dictionary | 862248 |
+| Innodb_os_log_written | 4785 |
+| Innodb_page_size | 16384 |
+| Innodb_pages_created | 133 |
+| Innodb_pages_read | 184 |
+| Innodb_pages_written | 0 |
+| Innodb_row_lock_current_waits | 0 |
+| Innodb_row_lock_time | 0 |
+| Innodb_row_lock_time_avg | 0 |
+| Innodb_row_lock_time_max | 0 |
+| Innodb_row_lock_waits | 0 |
+| Innodb_rows_deleted | 0 |
+| Innodb_rows_inserted | 0 |
+| Innodb_rows_read | 0 |
+| Innodb_rows_updated | 0 |
+| Innodb_system_rows_deleted | 7 |
+| Innodb_system_rows_inserted | 9 |
+| Innodb_system_rows_read | 15 |
+| Innodb_system_rows_updated | 0 |
+| Innodb_num_open_files | 9 |
+| Innodb_truncated_status_writes | 0 |
+| Innodb_available_undo_logs | 128 |
+| Innodb_undo_truncations | 0 |
+| Innodb_page_compression_saved | 0 |
+| Innodb_num_pages_page_compressed | 0 |
+| Innodb_num_page_compressed_trim_op | 0 |
+| Innodb_num_pages_page_decompressed | 0 |
+| Innodb_num_pages_page_compression_error | 0 |
+| Innodb_num_pages_encrypted | 0 |
+| Innodb_num_pages_decrypted | 0 |
+| Innodb_have_lz4 | OFF |
+| Innodb_have_lzo | OFF |
+| Innodb_have_lzma | OFF |
+| Innodb_have_bzip2 | OFF |
+| Innodb_have_snappy | OFF |
+| Innodb_have_punch_hole | ON |
+| Innodb_defragment_compression_failures | 0 |
+| Innodb_defragment_failures | 0 |
+| Innodb_defragment_count | 0 |
+| Innodb_instant_alter_column | 0 |
+| Innodb_onlineddl_rowlog_rows | 0 |
+| Innodb_onlineddl_rowlog_pct_used | 0 |
+| Innodb_onlineddl_pct_progress | 0 |
+| Innodb_secondary_index_triggered_cluster_reads | 0 |
+| Innodb_secondary_index_triggered_cluster_reads_avoided | 0 |
+| Innodb_encryption_rotation_pages_read_from_cache | 0 |
+| Innodb_encryption_rotation_pages_read_from_disk | 0 |
+| Innodb_encryption_rotation_pages_modified | 0 |
+| Innodb_encryption_rotation_pages_flushed | 0 |
+| Innodb_encryption_rotation_estimated_iops | 0 |
+| Innodb_encryption_n_merge_blocks_encrypted | 0 |
+| Innodb_encryption_n_merge_blocks_decrypted | 0 |
+| Innodb_encryption_n_rowlog_blocks_encrypted | 0 |
+| Innodb_encryption_n_rowlog_blocks_decrypted | 0 |
+| Innodb_encryption_n_temp_blocks_encrypted | 0 |
+| Innodb_encryption_n_temp_blocks_decrypted | 0 |
+| Innodb_encryption_num_key_requests | 0 |
+| Key_blocks_not_flushed | 0 |
+| Key_blocks_unused | 107163 |
+| Key_blocks_used | 0 |
+| Key_blocks_warm | 0 |
+| Key_read_requests | 0 |
+| Key_reads | 0 |
+| Key_write_requests | 0 |
+| Key_writes | 0 |
+| Last_query_cost | 0.000000 |
+| Master_gtid_wait_count | 0 |
+| Master_gtid_wait_time | 0 |
+| Master_gtid_wait_timeouts | 0 |
+| Max_statement_time_exceeded | 0 |
+| Max_used_connections | 1 |
+| Memory_used | 35590104 |
+| Memory_used_initial | 35583712 |
+| Not_flushed_delayed_rows | 0 |
+| Open_files | 7 |
+| Open_streams | 4 |
+| Open_table_definitions | 0 |
+| Open_tables | 0 |
+| Opened_files | 125 |
+| Opened_plugin_libraries | 1 |
+| Opened_table_definitions | 27 |
+| Opened_tables | 24 |
+| Opened_views | 0 |
+| Performance_schema_accounts_lost | 0 |
+| Performance_schema_cond_classes_lost | 0 |
+| Performance_schema_cond_instances_lost | 0 |
+| Performance_schema_digest_lost | 0 |
+| Performance_schema_file_classes_lost | 0 |
+| Performance_schema_file_handles_lost | 0 |
+| Performance_schema_file_instances_lost | 0 |
+| Performance_schema_hosts_lost | 0 |
+| Performance_schema_index_stat_lost | 0 |
+| Performance_schema_locker_lost | 0 |
+| Performance_schema_memory_classes_lost | 0 |
+| Performance_schema_metadata_lock_lost | 0 |
+| Performance_schema_mutex_classes_lost | 0 |
+| Performance_schema_mutex_instances_lost | 0 |
+| Performance_schema_nested_statement_lost | 0 |
+| Performance_schema_prepared_statements_lost | 0 |
+| Performance_schema_program_lost | 0 |
+| Performance_schema_rwlock_classes_lost | 0 |
+| Performance_schema_rwlock_instances_lost | 0 |
+| Performance_schema_session_connect_attrs_lost | 0 |
+| Performance_schema_socket_classes_lost | 0 |
+| Performance_schema_socket_instances_lost | 0 |
+| Performance_schema_stage_classes_lost | 0 |
+| Performance_schema_statement_classes_lost | 0 |
+| Performance_schema_table_handles_lost | 0 |
+| Performance_schema_table_instances_lost | 0 |
+| Performance_schema_table_lock_stat_lost | 0 |
+| Performance_schema_thread_classes_lost | 0 |
+| Performance_schema_thread_instances_lost | 0 |
+| Performance_schema_users_lost | 0 |
+| Prepared_stmt_count | 0 |
+| Qcache_free_blocks | 1 |
+| Qcache_free_memory | 1031272 |
+| Qcache_hits | 0 |
+| Qcache_inserts | 0 |
+| Qcache_lowmem_prunes | 0 |
+| Qcache_not_cached | 0 |
+| Qcache_queries_in_cache | 0 |
+| Qcache_total_blocks | 1 |
+| Queries | 75 |
+| Questions | 62 |
+| Resultset_metadata_skipped | 0 |
+| Rows_read | 27 |
+| Rows_sent | 5888 |
+| Rows_tmp_read | 6162 |
+| Rpl_semi_sync_master_clients | 0 |
+| Rpl_semi_sync_master_get_ack | 0 |
+| Rpl_semi_sync_master_net_avg_wait_time | 0 |
+| Rpl_semi_sync_master_net_wait_time | 0 |
+| Rpl_semi_sync_master_net_waits | 0 |
+| Rpl_semi_sync_master_no_times | 0 |
+| Rpl_semi_sync_master_no_tx | 0 |
+| Rpl_semi_sync_master_request_ack | 0 |
+| Rpl_semi_sync_master_status | OFF |
+| Rpl_semi_sync_master_timefunc_failures | 0 |
+| Rpl_semi_sync_master_tx_avg_wait_time | 0 |
+| Rpl_semi_sync_master_tx_wait_time | 0 |
+| Rpl_semi_sync_master_tx_waits | 0 |
+| Rpl_semi_sync_master_wait_pos_backtraverse | 0 |
+| Rpl_semi_sync_master_wait_sessions | 0 |
+| Rpl_semi_sync_master_yes_tx | 0 |
+| Rpl_semi_sync_slave_send_ack | 0 |
+| Rpl_semi_sync_slave_status | OFF |
+| Rpl_status | AUTH_MASTER |
+| Rpl_transactions_multi_engine | 0 |
+| Select_full_join | 0 |
+| Select_full_range_join | 0 |
+| Select_range | 0 |
+| Select_range_check | 0 |
+| Select_scan | 17 |
+| Slave_connections | 0 |
+| Slave_heartbeat_period | 0.000 |
+| Slave_open_temp_tables | 0 |
+| Slave_received_heartbeats | 0 |
+| Slave_retried_transactions | 0 |
+| Slave_running | OFF |
+| Slave_skipped_errors | 0 |
+| Slaves_connected | 0 |
+| Slaves_running | 0 |
+| Slow_launch_threads | 0 |
+| Slow_queries | 0 |
+| Sort_merge_passes | 0 |
+| Sort_priority_queue_sorts | 0 |
+| Sort_range | 0 |
+| Sort_rows | 0 |
+| Sort_scan | 0 |
+| Ssl_accept_renegotiates | 0 |
+| Ssl_accepts | 0 |
+| Ssl_callback_cache_hits | 0 |
+| Ssl_cipher | |
+| Ssl_cipher_list | |
+| Ssl_client_connects | 0 |
+| Ssl_connect_renegotiates | 0 |
+| Ssl_ctx_verify_depth | 0 |
+| Ssl_ctx_verify_mode | 0 |
+| Ssl_default_timeout | 0 |
+| Ssl_finished_accepts | 0 |
+| Ssl_finished_connects | 0 |
+| Ssl_server_not_after | |
+| Ssl_server_not_before | |
+| Ssl_session_cache_hits | 0 |
+| Ssl_session_cache_misses | 0 |
+| Ssl_session_cache_mode | NONE |
+| Ssl_session_cache_overflows | 0 |
+| Ssl_session_cache_size | 0 |
+| Ssl_session_cache_timeouts | 0 |
+| Ssl_sessions_reused | 0 |
+| Ssl_used_session_cache_entries | 0 |
+| Ssl_verify_depth | 0 |
+| Ssl_verify_mode | 0 |
+| Ssl_version | |
+| Subquery_cache_hit | 0 |
+| Subquery_cache_miss | 0 |
+| Syncs | 87 |
+| Table_locks_immediate | 17 |
+| Table_locks_waited | 0 |
+| Table_open_cache_active_instances | 1 |
+| Table_open_cache_hits | 16 |
+| Table_open_cache_misses | 24 |
+| Table_open_cache_overflows | 0 |
+| Tc_log_max_pages_used | 0 |
+| Tc_log_page_size | 0 |
+| Tc_log_page_waits | 0 |
+| Threadpool_idle_threads | 0 |
+| Threadpool_threads | 0 |
+| Threads_cached | 0 |
+| Threads_connected | 1 |
+| Threads_created | 6 |
+| Threads_running | 1 |
+| Transactions_gtid_foreign_engine | 0 |
+| Transactions_multi_engine | 0 |
+| Update_scan | 0 |
+| Uptime | 895 |
+| Uptime_since_flush_status | 895 |
+| wsrep_local_state_uuid | 479ce105-1e65-11ed-b2c3-8ac44f1dd1c9 |
+| wsrep_protocol_version | 10 |
+| wsrep_last_committed | 18 |
+| wsrep_replicated | 0 |
+| wsrep_replicated_bytes | 0 |
+| wsrep_repl_keys | 0 |
+| wsrep_repl_keys_bytes | 0 |
+| wsrep_repl_data_bytes | 0 |
+| wsrep_repl_other_bytes | 0 |
+| wsrep_received | 11 |
+| wsrep_received_bytes | 1410 |
+| wsrep_local_commits | 0 |
+| wsrep_local_cert_failures | 0 |
+| wsrep_local_replays | 0 |
+| wsrep_local_send_queue | 0 |
+| wsrep_local_send_queue_max | 2 |
+| wsrep_local_send_queue_min | 0 |
+| wsrep_local_send_queue_avg | 0.25 |
+| wsrep_local_recv_queue | 0 |
+| wsrep_local_recv_queue_max | 1 |
+| wsrep_local_recv_queue_min | 0 |
+| wsrep_local_recv_queue_avg | 0 |
+| wsrep_local_cached_downto | 1 |
+| wsrep_flow_control_paused_ns | 0 |
+| wsrep_flow_control_paused | 0 |
+| wsrep_flow_control_sent | 0 |
+| wsrep_flow_control_recv | 0 |
+| wsrep_flow_control_active | false |
+| wsrep_flow_control_requested | false |
+| wsrep_cert_deps_distance | 1 |
+| wsrep_apply_oooe | 0 |
+| wsrep_apply_oool | 0 |
+| wsrep_apply_window | 1 |
+| wsrep_apply_waits | 0 |
+| wsrep_commit_oooe | 0 |
+| wsrep_commit_oool | 0 |
+| wsrep_commit_window | 1 |
+| wsrep_local_state | 4 |
+| wsrep_local_state_comment | Synced |
+| wsrep_cert_index_size | 1 |
+| wsrep_causal_reads | 0 |
+| wsrep_cert_interval | 0 |
+| wsrep_open_transactions | 0 |
+| wsrep_open_connections | 0 |
+| wsrep_incoming_addresses | 172.17.0.2:3306,172.17.0.4:3306,172.17.0.3:3306 |
+| wsrep_cluster_weight | 3 |
+| wsrep_desync_count | 0 |
+| wsrep_evs_delayed | |
+| wsrep_evs_evict_list | |
+| wsrep_evs_repl_latency | 0.000200973/0.125339/1.00029/0.330702/8 |
+| wsrep_evs_state | OPERATIONAL |
+| wsrep_gcomm_uuid | 49826f19-1e65-11ed-8435-a308cd7c3ccc |
+| wsrep_gmcast_segment | 0 |
+| wsrep_applier_thread_count | 4 |
+| wsrep_cluster_capabilities | |
+| wsrep_cluster_conf_id | 3 |
+| wsrep_cluster_size | 3 |
+| wsrep_cluster_state_uuid | 479ce105-1e65-11ed-b2c3-8ac44f1dd1c9 |
+| wsrep_cluster_status | Primary |
+| wsrep_connected | ON |
+| wsrep_local_bf_aborts | 0 |
+| wsrep_local_index | 0 |
+| wsrep_provider_capabilities | :MULTI_MASTER:CERTIFICATION:PARALLEL_APPLYING:TRX_REPLAY:ISOLATION:PAUSE:CAUSAL_READS:INCREMENTAL_WRITESET:UNORDERED:PREORDERED:STREAMING:NBO: |
+| wsrep_provider_name | Galera |
+| wsrep_provider_vendor | Codership Oy <info@codership.com> |
+| wsrep_provider_version | 4.12(r6311685) |
+| wsrep_ready | ON |
+| wsrep_rollbacker_thread_count | 1 |
+| wsrep_thread_count | 5 |
++--------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt
new file mode 100644
index 000000000..96591afdf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt
@@ -0,0 +1,8 @@
++--------------------+-------+
+| Variable_name | Value |
++--------------------+-------+
+| log_bin | ON |
+| max_connections | 151 |
+| performance_schema | ON |
+| table_open_cache | 2000 |
++--------------------+-------+
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/process_list.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/process_list.txt
new file mode 100644
index 000000000..a44ce5e70
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/process_list.txt
@@ -0,0 +1,6 @@
++------+---------+
+| time | user |
++------+---------+
+| 1 | netdata |
+| 9 | root |
++------+---------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt
new file mode 100644
index 000000000..7a44b8b5a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt
@@ -0,0 +1,6 @@
++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+
+| User | Total_connections | Concurrent_connections | Connected_time | Busy_time | Cpu_time | Bytes_received | Bytes_sent | Binlog_bytes_written | Rows_read | Rows_sent | Rows_deleted | Rows_inserted | Rows_updated | Select_commands | Update_commands | Other_commands | Commit_transactions | Rollback_transactions | Denied_connections | Lost_connections | Access_denied | Empty_queries | Total_ssl_connections | Max_statement_time_exceeded |
++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+
+| root | 1 | 0 | 9 | 0.000156 | 0.0001541 | 25 | 2799 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| netdata | 1 | 0 | 32 | 0.09262200000000004 | 0.07723410000000001 | 13440 | 105432 | 0 | 0 | 99 | 0 | 0 | 0 | 33 | 0 | 0 | 0 | 0 | 49698 | 0 | 33 | 66 | 0 | 0 |
++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/version.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/version.txt
new file mode 100644
index 000000000..ee5e77d9a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4-galera-cluster/version.txt
@@ -0,0 +1,6 @@
++-----------------+---------------------+
+| Variable_name | Value |
++-----------------+---------------------+
+| version | 10.8.4-MariaDB-log |
+| version_comment | Source distribution |
++-----------------+---------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt
new file mode 100644
index 000000000..b117cb6c7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt
@@ -0,0 +1,6 @@
++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+
+| Connection_name | Slave_SQL_State | Slave_IO_State | Master_Host | Master_User | Master_Port | Connect_Retry | Master_Log_File | Read_Master_Log_Pos | Relay_Log_File | Relay_Log_Pos | Relay_Master_Log_File | Slave_IO_Running | Slave_SQL_Running | Replicate_Do_DB | Replicate_Ignore_DB | Replicate_Do_Table | Replicate_Ignore_Table | Replicate_Wild_Do_Table | Replicate_Wild_Ignore_Table | Last_Errno | Last_Error | Skip_Counter | Exec_Master_Log_Pos | Relay_Log_Space | Until_Condition | Until_Log_File | Until_Log_Pos | Master_SSL_Allowed | Master_SSL_CA_File | Master_SSL_CA_Path | Master_SSL_Cert | Master_SSL_Cipher | Master_SSL_Key | Seconds_Behind_Master | Master_SSL_Verify_Server_Cert | Last_IO_Errno | Last_IO_Error | Last_SQL_Errno | Last_SQL_Error | Replicate_Ignore_Server_Ids | Master_Server_Id | Master_SSL_Crl | Master_SSL_Crlpath | Using_Gtid | Gtid_IO_Pos | Replicate_Do_Domain_Ids | Replicate_Ignore_Domain_Ids | Parallel_Mode | SQL_Delay | SQL_Remaining_Delay | Slave_SQL_Running_State | Slave_DDL_Groups | Slave_Non_Transactional_Groups | Slave_Transactional_Groups | Retried_transactions | Max_relay_log_size | Executed_log_entries | Slave_received_heartbeats | Slave_heartbeat_period | Gtid_Slave_Pos |
++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+
+| Master1 | Slave has read all relay log; waiting for more updates | Waiting for master to send event | master | my_repl_user | 3306 | 10 | mysql-bin.000002 | 342 | mysql-relay-bin.000004 | 641 | mysql-bin.000002 | Yes | Yes | | | | | | | 0 | | 0 | 342 | 2785 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 765 | | | No | | | | optimistic | 0 | NULL | Slave has read all relay log; waiting for more updates | 7 | 2 | 0 | 0 | 1073741824 | 34 | 1767 | 30.000 | 0-308-9 |
+| Master2 | Slave has read all relay log; waiting for more updates | Waiting for master to send event | master | my_repl_user | 3306 | 10 | mysql-bin.000002 | 342 | mysql-relay-bin.000004 | 641 | mysql-bin.000002 | Yes | Yes | | | | | | | 0 | | 0 | 342 | 2785 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 765 | | | No | | | | optimistic | 0 | NULL | Slave has read all relay log; waiting for more updates | 7 | 2 | 0 | 0 | 1073741824 | 34 | 1767 | 30.000 | 0-308-9 |
++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt
new file mode 100644
index 000000000..61428f084
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt
@@ -0,0 +1,5 @@
++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+
+| Connection_name | Slave_SQL_State | Slave_IO_State | Master_Host | Master_User | Master_Port | Connect_Retry | Master_Log_File | Read_Master_Log_Pos | Relay_Log_File | Relay_Log_Pos | Relay_Master_Log_File | Slave_IO_Running | Slave_SQL_Running | Replicate_Do_DB | Replicate_Ignore_DB | Replicate_Do_Table | Replicate_Ignore_Table | Replicate_Wild_Do_Table | Replicate_Wild_Ignore_Table | Last_Errno | Last_Error | Skip_Counter | Exec_Master_Log_Pos | Relay_Log_Space | Until_Condition | Until_Log_File | Until_Log_Pos | Master_SSL_Allowed | Master_SSL_CA_File | Master_SSL_CA_Path | Master_SSL_Cert | Master_SSL_Cipher | Master_SSL_Key | Seconds_Behind_Master | Master_SSL_Verify_Server_Cert | Last_IO_Errno | Last_IO_Error | Last_SQL_Errno | Last_SQL_Error | Replicate_Ignore_Server_Ids | Master_Server_Id | Master_SSL_Crl | Master_SSL_Crlpath | Using_Gtid | Gtid_IO_Pos | Replicate_Do_Domain_Ids | Replicate_Ignore_Domain_Ids | Parallel_Mode | SQL_Delay | SQL_Remaining_Delay | Slave_SQL_Running_State | Slave_DDL_Groups | Slave_Non_Transactional_Groups | Slave_Transactional_Groups | Retried_transactions | Max_relay_log_size | Executed_log_entries | Slave_received_heartbeats | Slave_heartbeat_period | Gtid_Slave_Pos |
++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+
+| | Slave has read all relay log; waiting for more updates | Waiting for master to send event | master | my_repl_user | 3306 | 10 | mysql-bin.000002 | 342 | mysql-relay-bin.000004 | 641 | mysql-bin.000002 | Yes | Yes | | | | | | | 0 | | 0 | 342 | 2785 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 765 | | | No | | | | optimistic | 0 | NULL | Slave has read all relay log; waiting for more updates | 7 | 2 | 0 | 0 | 1073741824 | 34 | 1767 | 30.000 | 0-308-9 |
++-----------------+--------------------------------------------------------+----------------------------------+-------------+--------------+-------------+---------------+------------------+---------------------+------------------------+---------------+-----------------------+------------------+-------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+----------------+--------------------+------------+-------------+-------------------------+-----------------------------+---------------+-----------+---------------------+--------------------------------------------------------+------------------+--------------------------------+----------------------------+----------------------+--------------------+----------------------+---------------------------+------------------------+----------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/global_status.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/global_status.txt
new file mode 100644
index 000000000..c82531c74
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/global_status.txt
@@ -0,0 +1,569 @@
++--------------------------------------------------------+--------------------------------------------------+
+| Variable_name | Value |
++--------------------------------------------------------+--------------------------------------------------+
+| Aborted_clients | 1 |
+| Aborted_connects | 2 |
+| Aborted_connects_preauth | 0 |
+| Access_denied_errors | 2 |
+| Acl_column_grants | 0 |
+| Acl_database_grants | 3 |
+| Acl_function_grants | 0 |
+| Acl_procedure_grants | 0 |
+| Acl_package_spec_grants | 0 |
+| Acl_package_body_grants | 0 |
+| Acl_proxy_users | 1 |
+| Acl_role_grants | 0 |
+| Acl_roles | 0 |
+| Acl_table_grants | 1 |
+| Acl_users | 4 |
+| Aria_pagecache_blocks_not_flushed | 7 |
+| Aria_pagecache_blocks_unused | 15630 |
+| Aria_pagecache_blocks_used | 17 |
+| Aria_pagecache_read_requests | 78 |
+| Aria_pagecache_reads | 19 |
+| Aria_pagecache_write_requests | 8 |
+| Aria_pagecache_writes | 0 |
+| Aria_transaction_log_syncs | 6 |
+| Binlog_commits | 0 |
+| Binlog_group_commits | 0 |
+| Binlog_group_commit_trigger_count | 0 |
+| Binlog_group_commit_trigger_lock_wait | 0 |
+| Binlog_group_commit_trigger_timeout | 0 |
+| Binlog_snapshot_file | mysql-bin.000002 |
+| Binlog_snapshot_position | 1806 |
+| Binlog_bytes_written | 1842 |
+| Binlog_cache_disk_use | 0 |
+| Binlog_cache_use | 0 |
+| Binlog_stmt_cache_disk_use | 0 |
+| Binlog_stmt_cache_use | 0 |
+| Busy_time | 0.000000 |
+| Bytes_received | 81392 |
+| Bytes_sent | 56794 |
+| Column_compressions | 0 |
+| Column_decompressions | 0 |
+| Com_admin_commands | 0 |
+| Com_alter_db | 0 |
+| Com_alter_db_upgrade | 0 |
+| Com_alter_event | 0 |
+| Com_alter_function | 0 |
+| Com_alter_procedure | 0 |
+| Com_alter_server | 0 |
+| Com_alter_sequence | 0 |
+| Com_alter_table | 0 |
+| Com_alter_user | 0 |
+| Com_analyze | 0 |
+| Com_assign_to_keycache | 0 |
+| Com_backup | 0 |
+| Com_backup_lock | 0 |
+| Com_begin | 0 |
+| Com_binlog | 0 |
+| Com_call_procedure | 0 |
+| Com_change_db | 0 |
+| Com_change_master | 0 |
+| Com_check | 0 |
+| Com_checksum | 0 |
+| Com_commit | 0 |
+| Com_compound_sql | 0 |
+| Com_create_db | 1 |
+| Com_create_event | 0 |
+| Com_create_function | 0 |
+| Com_create_index | 0 |
+| Com_create_package | 0 |
+| Com_create_package_body | 0 |
+| Com_create_procedure | 0 |
+| Com_create_role | 0 |
+| Com_create_sequence | 0 |
+| Com_create_server | 0 |
+| Com_create_table | 0 |
+| Com_create_temporary_table | 0 |
+| Com_create_trigger | 0 |
+| Com_create_udf | 0 |
+| Com_create_user | 3 |
+| Com_create_view | 0 |
+| Com_dealloc_sql | 0 |
+| Com_delete | 0 |
+| Com_delete_multi | 0 |
+| Com_do | 0 |
+| Com_drop_db | 0 |
+| Com_drop_event | 0 |
+| Com_drop_function | 0 |
+| Com_drop_index | 0 |
+| Com_drop_procedure | 0 |
+| Com_drop_package | 0 |
+| Com_drop_package_body | 0 |
+| Com_drop_role | 0 |
+| Com_drop_server | 0 |
+| Com_drop_sequence | 0 |
+| Com_drop_table | 0 |
+| Com_drop_temporary_table | 0 |
+| Com_drop_trigger | 0 |
+| Com_drop_user | 0 |
+| Com_drop_view | 0 |
+| Com_empty_query | 0 |
+| Com_execute_immediate | 0 |
+| Com_execute_sql | 0 |
+| Com_flush | 2 |
+| Com_get_diagnostics | 0 |
+| Com_grant | 3 |
+| Com_grant_role | 0 |
+| Com_ha_close | 0 |
+| Com_ha_open | 0 |
+| Com_ha_read | 0 |
+| Com_help | 0 |
+| Com_insert | 0 |
+| Com_insert_select | 0 |
+| Com_install_plugin | 0 |
+| Com_kill | 0 |
+| Com_load | 0 |
+| Com_lock_tables | 0 |
+| Com_optimize | 0 |
+| Com_preload_keys | 0 |
+| Com_prepare_sql | 0 |
+| Com_purge | 0 |
+| Com_purge_before_date | 0 |
+| Com_release_savepoint | 0 |
+| Com_rename_table | 0 |
+| Com_rename_user | 0 |
+| Com_repair | 0 |
+| Com_replace | 0 |
+| Com_replace_select | 0 |
+| Com_reset | 0 |
+| Com_resignal | 0 |
+| Com_revoke | 0 |
+| Com_revoke_all | 0 |
+| Com_revoke_role | 0 |
+| Com_rollback | 0 |
+| Com_rollback_to_savepoint | 0 |
+| Com_savepoint | 0 |
+| Com_select | 6 |
+| Com_set_option | 0 |
+| Com_show_authors | 0 |
+| Com_show_binlog_events | 0 |
+| Com_show_binlogs | 0 |
+| Com_show_charsets | 0 |
+| Com_show_collations | 0 |
+| Com_show_contributors | 0 |
+| Com_show_create_db | 0 |
+| Com_show_create_event | 0 |
+| Com_show_create_func | 0 |
+| Com_show_create_package | 0 |
+| Com_show_create_package_body | 0 |
+| Com_show_create_proc | 0 |
+| Com_show_create_table | 0 |
+| Com_show_create_trigger | 0 |
+| Com_show_create_user | 0 |
+| Com_show_databases | 0 |
+| Com_show_engine_logs | 0 |
+| Com_show_engine_mutex | 0 |
+| Com_show_engine_status | 0 |
+| Com_show_errors | 0 |
+| Com_show_events | 0 |
+| Com_show_explain | 0 |
+| Com_show_fields | 0 |
+| Com_show_function_status | 0 |
+| Com_show_generic | 0 |
+| Com_show_grants | 0 |
+| Com_show_keys | 0 |
+| Com_show_binlog_status | 0 |
+| Com_show_open_tables | 0 |
+| Com_show_package_status | 0 |
+| Com_show_package_body_status | 0 |
+| Com_show_plugins | 0 |
+| Com_show_privileges | 0 |
+| Com_show_procedure_status | 0 |
+| Com_show_processlist | 0 |
+| Com_show_profile | 0 |
+| Com_show_profiles | 0 |
+| Com_show_relaylog_events | 0 |
+| Com_show_slave_hosts | 0 |
+| Com_show_slave_status | 14 |
+| Com_show_status | 2 |
+| Com_show_storage_engines | 0 |
+| Com_show_table_status | 0 |
+| Com_show_tables | 0 |
+| Com_show_triggers | 0 |
+| Com_show_variables | 0 |
+| Com_show_warnings | 0 |
+| Com_shutdown | 0 |
+| Com_signal | 0 |
+| Com_start_all_slaves | 0 |
+| Com_start_slave | 0 |
+| Com_stmt_close | 0 |
+| Com_stmt_execute | 0 |
+| Com_stmt_fetch | 0 |
+| Com_stmt_prepare | 0 |
+| Com_stmt_reprepare | 0 |
+| Com_stmt_reset | 0 |
+| Com_stmt_send_long_data | 0 |
+| Com_stop_all_slaves | 0 |
+| Com_stop_slave | 0 |
+| Com_truncate | 0 |
+| Com_uninstall_plugin | 0 |
+| Com_unlock_tables | 0 |
+| Com_update | 0 |
+| Com_update_multi | 0 |
+| Com_xa_commit | 0 |
+| Com_xa_end | 0 |
+| Com_xa_prepare | 0 |
+| Com_xa_recover | 0 |
+| Com_xa_rollback | 0 |
+| Com_xa_start | 0 |
+| Compression | OFF |
+| Connection_errors_accept | 0 |
+| Connection_errors_internal | 0 |
+| Connection_errors_max_connections | 0 |
+| Connection_errors_peer_address | 0 |
+| Connection_errors_select | 0 |
+| Connection_errors_tcpwrap | 0 |
+| Connections | 12 |
+| Cpu_time | 0.000000 |
+| Created_tmp_disk_tables | 0 |
+| Created_tmp_files | 5 |
+| Created_tmp_tables | 2 |
+| Delayed_errors | 0 |
+| Delayed_insert_threads | 0 |
+| Delayed_writes | 0 |
+| Delete_scan | 0 |
+| Empty_queries | 0 |
+| Executed_events | 0 |
+| Executed_triggers | 0 |
+| Feature_application_time_periods | 0 |
+| Feature_check_constraint | 1 |
+| Feature_custom_aggregate_functions | 0 |
+| Feature_delay_key_write | 0 |
+| Feature_dynamic_columns | 0 |
+| Feature_fulltext | 0 |
+| Feature_gis | 0 |
+| Feature_insert_returning | 0 |
+| Feature_invisible_columns | 0 |
+| Feature_json | 1 |
+| Feature_locale | 0 |
+| Feature_subquery | 0 |
+| Feature_system_versioning | 0 |
+| Feature_timezone | 0 |
+| Feature_trigger | 0 |
+| Feature_window_functions | 0 |
+| Feature_xml | 0 |
+| Handler_commit | 30 |
+| Handler_delete | 0 |
+| Handler_discover | 0 |
+| Handler_external_lock | 0 |
+| Handler_icp_attempts | 0 |
+| Handler_icp_match | 0 |
+| Handler_mrr_init | 0 |
+| Handler_mrr_key_refills | 0 |
+| Handler_mrr_rowid_refills | 0 |
+| Handler_prepare | 0 |
+| Handler_read_first | 7 |
+| Handler_read_key | 7 |
+| Handler_read_last | 0 |
+| Handler_read_next | 3 |
+| Handler_read_prev | 0 |
+| Handler_read_retry | 0 |
+| Handler_read_rnd | 0 |
+| Handler_read_rnd_deleted | 0 |
+| Handler_read_rnd_next | 626 |
+| Handler_rollback | 0 |
+| Handler_savepoint | 0 |
+| Handler_savepoint_rollback | 0 |
+| Handler_tmp_delete | 0 |
+| Handler_tmp_update | 0 |
+| Handler_tmp_write | 568 |
+| Handler_update | 3 |
+| Handler_write | 13 |
+| Innodb_adaptive_hash_hash_searches | 0 |
+| Innodb_adaptive_hash_non_hash_searches | 0 |
+| Innodb_background_log_sync | 52300 |
+| Innodb_buffer_pool_dump_status | |
+| Innodb_buffer_pool_load_status | Buffer pool(s) load completed at 220817 21:14:57 |
+| Innodb_buffer_pool_resize_status | |
+| Innodb_buffer_pool_load_incomplete | OFF |
+| Innodb_buffer_pool_pages_data | 309 |
+| Innodb_buffer_pool_bytes_data | 5062656 |
+| Innodb_buffer_pool_pages_dirty | 29 |
+| Innodb_buffer_pool_bytes_dirty | 475136 |
+| Innodb_buffer_pool_pages_flushed | 0 |
+| Innodb_buffer_pool_pages_free | 7755 |
+| Innodb_buffer_pool_pages_made_not_young | 0 |
+| Innodb_buffer_pool_pages_made_young | 0 |
+| Innodb_buffer_pool_pages_misc | 0 |
+| Innodb_buffer_pool_pages_old | 0 |
+| Innodb_buffer_pool_pages_total | 8064 |
+| Innodb_buffer_pool_pages_lru_flushed | 0 |
+| Innodb_buffer_pool_pages_lru_freed | 0 |
+| Innodb_buffer_pool_read_ahead_rnd | 0 |
+| Innodb_buffer_pool_read_ahead | 0 |
+| Innodb_buffer_pool_read_ahead_evicted | 0 |
+| Innodb_buffer_pool_read_requests | 1911 |
+| Innodb_buffer_pool_reads | 171 |
+| Innodb_buffer_pool_wait_free | 0 |
+| Innodb_buffer_pool_write_requests | 148 |
+| Innodb_checkpoint_age | 6097 |
+| Innodb_checkpoint_max_age | 80819529 |
+| Innodb_data_fsyncs | 17 |
+| Innodb_data_pending_fsyncs | 0 |
+| Innodb_data_pending_reads | 0 |
+| Innodb_data_pending_writes | 0 |
+| Innodb_data_read | 2801664 |
+| Innodb_data_reads | 185 |
+| Innodb_data_writes | 16 |
+| Innodb_data_written | 0 |
+| Innodb_dblwr_pages_written | 0 |
+| Innodb_dblwr_writes | 0 |
+| Innodb_deadlocks | 0 |
+| Innodb_history_list_length | 0 |
+| Innodb_ibuf_discarded_delete_marks | 0 |
+| Innodb_ibuf_discarded_deletes | 0 |
+| Innodb_ibuf_discarded_inserts | 0 |
+| Innodb_ibuf_free_list | 0 |
+| Innodb_ibuf_merged_delete_marks | 0 |
+| Innodb_ibuf_merged_deletes | 0 |
+| Innodb_ibuf_merged_inserts | 0 |
+| Innodb_ibuf_merges | 0 |
+| Innodb_ibuf_segment_size | 2 |
+| Innodb_ibuf_size | 1 |
+| Innodb_log_waits | 0 |
+| Innodb_log_write_requests | 109 |
+| Innodb_log_writes | 15 |
+| Innodb_lsn_current | 52826 |
+| Innodb_lsn_flushed | 52826 |
+| Innodb_lsn_last_checkpoint | 46729 |
+| Innodb_master_thread_active_loops | 0 |
+| Innodb_master_thread_idle_loops | 52301 |
+| Innodb_max_trx_id | 37 |
+| Innodb_mem_adaptive_hash | 0 |
+| Innodb_mem_dictionary | 855336 |
+| Innodb_os_log_written | 6097 |
+| Innodb_page_size | 16384 |
+| Innodb_pages_created | 138 |
+| Innodb_pages_read | 171 |
+| Innodb_pages_written | 0 |
+| Innodb_row_lock_current_waits | 0 |
+| Innodb_row_lock_time | 0 |
+| Innodb_row_lock_time_avg | 0 |
+| Innodb_row_lock_time_max | 0 |
+| Innodb_row_lock_waits | 0 |
+| Innodb_rows_deleted | 0 |
+| Innodb_rows_inserted | 0 |
+| Innodb_rows_read | 0 |
+| Innodb_rows_updated | 0 |
+| Innodb_system_rows_deleted | 0 |
+| Innodb_system_rows_inserted | 9 |
+| Innodb_system_rows_read | 0 |
+| Innodb_system_rows_updated | 0 |
+| Innodb_num_open_files | 6 |
+| Innodb_truncated_status_writes | 0 |
+| Innodb_available_undo_logs | 128 |
+| Innodb_undo_truncations | 0 |
+| Innodb_page_compression_saved | 0 |
+| Innodb_num_pages_page_compressed | 0 |
+| Innodb_num_page_compressed_trim_op | 0 |
+| Innodb_num_pages_page_decompressed | 0 |
+| Innodb_num_pages_page_compression_error | 0 |
+| Innodb_num_pages_encrypted | 0 |
+| Innodb_num_pages_decrypted | 0 |
+| Innodb_have_lz4 | OFF |
+| Innodb_have_lzo | OFF |
+| Innodb_have_lzma | OFF |
+| Innodb_have_bzip2 | OFF |
+| Innodb_have_snappy | OFF |
+| Innodb_have_punch_hole | ON |
+| Innodb_defragment_compression_failures | 0 |
+| Innodb_defragment_failures | 0 |
+| Innodb_defragment_count | 0 |
+| Innodb_instant_alter_column | 0 |
+| Innodb_onlineddl_rowlog_rows | 0 |
+| Innodb_onlineddl_rowlog_pct_used | 0 |
+| Innodb_onlineddl_pct_progress | 0 |
+| Innodb_secondary_index_triggered_cluster_reads | 0 |
+| Innodb_secondary_index_triggered_cluster_reads_avoided | 0 |
+| Innodb_encryption_rotation_pages_read_from_cache | 0 |
+| Innodb_encryption_rotation_pages_read_from_disk | 0 |
+| Innodb_encryption_rotation_pages_modified | 0 |
+| Innodb_encryption_rotation_pages_flushed | 0 |
+| Innodb_encryption_rotation_estimated_iops | 0 |
+| Innodb_encryption_n_merge_blocks_encrypted | 0 |
+| Innodb_encryption_n_merge_blocks_decrypted | 0 |
+| Innodb_encryption_n_rowlog_blocks_encrypted | 0 |
+| Innodb_encryption_n_rowlog_blocks_decrypted | 0 |
+| Innodb_encryption_n_temp_blocks_encrypted | 0 |
+| Innodb_encryption_n_temp_blocks_decrypted | 0 |
+| Innodb_encryption_num_key_requests | 0 |
+| Key_blocks_not_flushed | 0 |
+| Key_blocks_unused | 107163 |
+| Key_blocks_used | 0 |
+| Key_blocks_warm | 0 |
+| Key_read_requests | 0 |
+| Key_reads | 0 |
+| Key_write_requests | 0 |
+| Key_writes | 0 |
+| Last_query_cost | 0.000000 |
+| Master_gtid_wait_count | 0 |
+| Master_gtid_wait_time | 0 |
+| Master_gtid_wait_timeouts | 0 |
+| Max_statement_time_exceeded | 0 |
+| Max_used_connections | 1 |
+| Memory_used | 35982280 |
+| Memory_used_initial | 35441456 |
+| Not_flushed_delayed_rows | 0 |
+| Open_files | 29 |
+| Open_streams | 4 |
+| Open_table_definitions | 17 |
+| Open_tables | 10 |
+| Opened_files | 100 |
+| Opened_plugin_libraries | 0 |
+| Opened_table_definitions | 16 |
+| Opened_tables | 16 |
+| Opened_views | 0 |
+| Performance_schema_accounts_lost | 0 |
+| Performance_schema_cond_classes_lost | 0 |
+| Performance_schema_cond_instances_lost | 0 |
+| Performance_schema_digest_lost | 0 |
+| Performance_schema_file_classes_lost | 0 |
+| Performance_schema_file_handles_lost | 0 |
+| Performance_schema_file_instances_lost | 0 |
+| Performance_schema_hosts_lost | 0 |
+| Performance_schema_index_stat_lost | 0 |
+| Performance_schema_locker_lost | 0 |
+| Performance_schema_memory_classes_lost | 0 |
+| Performance_schema_metadata_lock_lost | 0 |
+| Performance_schema_mutex_classes_lost | 0 |
+| Performance_schema_mutex_instances_lost | 0 |
+| Performance_schema_nested_statement_lost | 0 |
+| Performance_schema_prepared_statements_lost | 0 |
+| Performance_schema_program_lost | 0 |
+| Performance_schema_rwlock_classes_lost | 0 |
+| Performance_schema_rwlock_instances_lost | 0 |
+| Performance_schema_session_connect_attrs_lost | 0 |
+| Performance_schema_socket_classes_lost | 0 |
+| Performance_schema_socket_instances_lost | 0 |
+| Performance_schema_stage_classes_lost | 0 |
+| Performance_schema_statement_classes_lost | 0 |
+| Performance_schema_table_handles_lost | 0 |
+| Performance_schema_table_instances_lost | 0 |
+| Performance_schema_table_lock_stat_lost | 0 |
+| Performance_schema_thread_classes_lost | 0 |
+| Performance_schema_thread_instances_lost | 0 |
+| Performance_schema_users_lost | 0 |
+| Prepared_stmt_count | 0 |
+| Qcache_free_blocks | 1 |
+| Qcache_free_memory | 1031272 |
+| Qcache_hits | 0 |
+| Qcache_inserts | 0 |
+| Qcache_lowmem_prunes | 0 |
+| Qcache_not_cached | 0 |
+| Qcache_queries_in_cache | 0 |
+| Qcache_total_blocks | 1 |
+| Queries | 33 |
+| Questions | 24 |
+| Resultset_metadata_skipped | 0 |
+| Rows_read | 36 |
+| Rows_sent | 571 |
+| Rows_tmp_read | 565 |
+| Rpl_semi_sync_master_clients | 0 |
+| Rpl_semi_sync_master_get_ack | 0 |
+| Rpl_semi_sync_master_net_avg_wait_time | 0 |
+| Rpl_semi_sync_master_net_wait_time | 0 |
+| Rpl_semi_sync_master_net_waits | 0 |
+| Rpl_semi_sync_master_no_times | 0 |
+| Rpl_semi_sync_master_no_tx | 0 |
+| Rpl_semi_sync_master_request_ack | 0 |
+| Rpl_semi_sync_master_status | OFF |
+| Rpl_semi_sync_master_timefunc_failures | 0 |
+| Rpl_semi_sync_master_tx_avg_wait_time | 0 |
+| Rpl_semi_sync_master_tx_wait_time | 0 |
+| Rpl_semi_sync_master_tx_waits | 0 |
+| Rpl_semi_sync_master_wait_pos_backtraverse | 0 |
+| Rpl_semi_sync_master_wait_sessions | 0 |
+| Rpl_semi_sync_master_yes_tx | 0 |
+| Rpl_semi_sync_slave_send_ack | 0 |
+| Rpl_semi_sync_slave_status | OFF |
+| Rpl_status | AUTH_MASTER |
+| Rpl_transactions_multi_engine | 0 |
+| Select_full_join | 0 |
+| Select_full_range_join | 0 |
+| Select_range | 0 |
+| Select_range_check | 0 |
+| Select_scan | 2 |
+| Slave_connections | 0 |
+| Slave_heartbeat_period | 30.000 |
+| Slave_open_temp_tables | 0 |
+| Slave_received_heartbeats | 1743 |
+| Slave_retried_transactions | 0 |
+| Slave_running | ON |
+| Slave_skipped_errors | 0 |
+| Slaves_connected | 0 |
+| Slaves_running | 1 |
+| Slow_launch_threads | 0 |
+| Slow_queries | 0 |
+| Sort_merge_passes | 0 |
+| Sort_priority_queue_sorts | 0 |
+| Sort_range | 0 |
+| Sort_rows | 0 |
+| Sort_scan | 0 |
+| Ssl_accept_renegotiates | 0 |
+| Ssl_accepts | 0 |
+| Ssl_callback_cache_hits | 0 |
+| Ssl_cipher | |
+| Ssl_cipher_list | |
+| Ssl_client_connects | 0 |
+| Ssl_connect_renegotiates | 0 |
+| Ssl_ctx_verify_depth | 0 |
+| Ssl_ctx_verify_mode | 0 |
+| Ssl_default_timeout | 0 |
+| Ssl_finished_accepts | 0 |
+| Ssl_finished_connects | 0 |
+| Ssl_server_not_after | |
+| Ssl_server_not_before | |
+| Ssl_session_cache_hits | 0 |
+| Ssl_session_cache_misses | 0 |
+| Ssl_session_cache_mode | NONE |
+| Ssl_session_cache_overflows | 0 |
+| Ssl_session_cache_size | 0 |
+| Ssl_session_cache_timeouts | 0 |
+| Ssl_sessions_reused | 0 |
+| Ssl_used_session_cache_entries | 0 |
+| Ssl_verify_depth | 0 |
+| Ssl_verify_mode | 0 |
+| Ssl_version | |
+| Subquery_cache_hit | 0 |
+| Subquery_cache_miss | 0 |
+| Syncs | 56 |
+| Table_locks_immediate | 60 |
+| Table_locks_waited | 0 |
+| Table_open_cache_active_instances | 1 |
+| Table_open_cache_hits | 54 |
+| Table_open_cache_misses | 16 |
+| Table_open_cache_overflows | 0 |
+| Tc_log_max_pages_used | 0 |
+| Tc_log_page_size | 0 |
+| Tc_log_page_waits | 0 |
+| Threadpool_idle_threads | 0 |
+| Threadpool_threads | 0 |
+| Threads_cached | 0 |
+| Threads_connected | 1 |
+| Threads_created | 2 |
+| Threads_running | 3 |
+| Transactions_gtid_foreign_engine | 0 |
+| Transactions_multi_engine | 0 |
+| Update_scan | 0 |
+| Uptime | 52310 |
+| Uptime_since_flush_status | 52310 |
+| wsrep | 0 |
+| wsrep_applier_thread_count | 0 |
+| wsrep_cluster_capabilities | |
+| wsrep_cluster_conf_id | 18446744073709551615 |
+| wsrep_cluster_size | 0 |
+| wsrep_cluster_state_uuid | |
+| wsrep_cluster_status | Disconnected |
+| wsrep_connected | OFF |
+| wsrep_local_bf_aborts | 0 |
+| wsrep_local_index | 18446744073709551615 |
+| wsrep_provider_capabilities | |
+| wsrep_provider_name | |
+| wsrep_provider_vendor | |
+| wsrep_provider_version | |
+| wsrep_ready | OFF |
+| wsrep_rollbacker_thread_count | 0 |
+| wsrep_thread_count | 0 |
++--------------------------------------------------------+--------------------------------------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/global_variables.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/global_variables.txt
new file mode 100644
index 000000000..96591afdf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/global_variables.txt
@@ -0,0 +1,8 @@
++--------------------+-------+
+| Variable_name | Value |
++--------------------+-------+
+| log_bin | ON |
+| max_connections | 151 |
+| performance_schema | ON |
+| table_open_cache | 2000 |
++--------------------+-------+
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/process_list.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/process_list.txt
new file mode 100644
index 000000000..a44ce5e70
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/process_list.txt
@@ -0,0 +1,6 @@
++------+---------+
+| time | user |
++------+---------+
+| 1 | netdata |
+| 9 | root |
++------+---------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/user_statistics.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/user_statistics.txt
new file mode 100644
index 000000000..7a44b8b5a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/user_statistics.txt
@@ -0,0 +1,6 @@
++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+
+| User | Total_connections | Concurrent_connections | Connected_time | Busy_time | Cpu_time | Bytes_received | Bytes_sent | Binlog_bytes_written | Rows_read | Rows_sent | Rows_deleted | Rows_inserted | Rows_updated | Select_commands | Update_commands | Other_commands | Commit_transactions | Rollback_transactions | Denied_connections | Lost_connections | Access_denied | Empty_queries | Total_ssl_connections | Max_statement_time_exceeded |
++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+
+| root | 1 | 0 | 9 | 0.000156 | 0.0001541 | 25 | 2799 | 0 | 0 | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| netdata | 1 | 0 | 32 | 0.09262200000000004 | 0.07723410000000001 | 13440 | 105432 | 0 | 0 | 99 | 0 | 0 | 0 | 33 | 0 | 0 | 0 | 0 | 49698 | 0 | 33 | 66 | 0 | 0 |
++---------+-------------------+------------------------+----------------+---------------------+---------------------+----------------+------------+----------------------+-----------+-----------+--------------+---------------+--------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+-----------------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/version.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/version.txt
new file mode 100644
index 000000000..2e7ca5b02
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v10.8.4/version.txt
@@ -0,0 +1,6 @@
++-----------------+---------------------+
+| Variable_name | Value |
++-----------------+---------------------+
+| version | 10.8.4-MariaDB |
+| version_comment | Source distribution |
++-----------------+---------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/global_status.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/global_status.txt
new file mode 100644
index 000000000..7c75f0619
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/global_status.txt
@@ -0,0 +1,423 @@
++------------------------------------------+-------------+
+| Variable_name | Value |
++------------------------------------------+-------------+
+| Aborted_clients | 0 |
+| Aborted_connects | 0 |
+| Access_denied_errors | 0 |
+| Aria_pagecache_blocks_not_flushed | 0 |
+| Aria_pagecache_blocks_unused | 15737 |
+| Aria_pagecache_blocks_used | 0 |
+| Aria_pagecache_read_requests | 0 |
+| Aria_pagecache_reads | 0 |
+| Aria_pagecache_write_requests | 0 |
+| Aria_pagecache_writes | 0 |
+| Aria_transaction_log_syncs | 0 |
+| Binlog_commits | 0 |
+| Binlog_group_commits | 0 |
+| Binlog_snapshot_file | |
+| Binlog_snapshot_position | 0 |
+| Binlog_bytes_written | 0 |
+| Binlog_cache_disk_use | 0 |
+| Binlog_cache_use | 0 |
+| Binlog_stmt_cache_disk_use | 0 |
+| Binlog_stmt_cache_use | 0 |
+| Busy_time | 0.000000 |
+| Bytes_received | 639 |
+| Bytes_sent | 41620 |
+| Com_admin_commands | 0 |
+| Com_alter_db | 0 |
+| Com_alter_db_upgrade | 0 |
+| Com_alter_event | 0 |
+| Com_alter_function | 0 |
+| Com_alter_procedure | 0 |
+| Com_alter_server | 0 |
+| Com_alter_table | 0 |
+| Com_alter_tablespace | 0 |
+| Com_analyze | 0 |
+| Com_assign_to_keycache | 0 |
+| Com_begin | 0 |
+| Com_binlog | 0 |
+| Com_call_procedure | 0 |
+| Com_change_db | 0 |
+| Com_change_master | 0 |
+| Com_check | 0 |
+| Com_checksum | 0 |
+| Com_commit | 0 |
+| Com_create_db | 0 |
+| Com_create_event | 0 |
+| Com_create_function | 0 |
+| Com_create_index | 0 |
+| Com_create_procedure | 0 |
+| Com_create_server | 0 |
+| Com_create_table | 0 |
+| Com_create_trigger | 0 |
+| Com_create_udf | 0 |
+| Com_create_user | 0 |
+| Com_create_view | 0 |
+| Com_dealloc_sql | 0 |
+| Com_delete | 0 |
+| Com_delete_multi | 0 |
+| Com_do | 0 |
+| Com_drop_db | 0 |
+| Com_drop_event | 0 |
+| Com_drop_function | 0 |
+| Com_drop_index | 0 |
+| Com_drop_procedure | 0 |
+| Com_drop_server | 0 |
+| Com_drop_table | 0 |
+| Com_drop_trigger | 0 |
+| Com_drop_user | 0 |
+| Com_drop_view | 0 |
+| Com_empty_query | 0 |
+| Com_execute_sql | 0 |
+| Com_flush | 0 |
+| Com_grant | 0 |
+| Com_ha_close | 0 |
+| Com_ha_open | 0 |
+| Com_ha_read | 0 |
+| Com_help | 0 |
+| Com_insert | 0 |
+| Com_insert_select | 0 |
+| Com_install_plugin | 0 |
+| Com_kill | 0 |
+| Com_load | 0 |
+| Com_lock_tables | 0 |
+| Com_optimize | 0 |
+| Com_preload_keys | 0 |
+| Com_prepare_sql | 0 |
+| Com_purge | 0 |
+| Com_purge_before_date | 0 |
+| Com_release_savepoint | 0 |
+| Com_rename_table | 0 |
+| Com_rename_user | 0 |
+| Com_repair | 0 |
+| Com_replace | 0 |
+| Com_replace_select | 0 |
+| Com_reset | 0 |
+| Com_resignal | 0 |
+| Com_revoke | 0 |
+| Com_revoke_all | 0 |
+| Com_rollback | 0 |
+| Com_rollback_to_savepoint | 0 |
+| Com_savepoint | 0 |
+| Com_select | 4 |
+| Com_set_option | 0 |
+| Com_show_authors | 0 |
+| Com_show_binlog_events | 0 |
+| Com_show_binlogs | 0 |
+| Com_show_charsets | 0 |
+| Com_show_client_statistics | 0 |
+| Com_show_collations | 0 |
+| Com_show_contributors | 0 |
+| Com_show_create_db | 0 |
+| Com_show_create_event | 0 |
+| Com_show_create_func | 0 |
+| Com_show_create_proc | 0 |
+| Com_show_create_table | 0 |
+| Com_show_create_trigger | 0 |
+| Com_show_databases | 0 |
+| Com_show_engine_logs | 0 |
+| Com_show_engine_mutex | 0 |
+| Com_show_engine_status | 0 |
+| Com_show_errors | 0 |
+| Com_show_events | 0 |
+| Com_show_fields | 0 |
+| Com_show_function_status | 0 |
+| Com_show_grants | 0 |
+| Com_show_index_statistics | 0 |
+| Com_show_keys | 0 |
+| Com_show_master_status | 0 |
+| Com_show_open_tables | 0 |
+| Com_show_plugins | 0 |
+| Com_show_privileges | 0 |
+| Com_show_procedure_status | 0 |
+| Com_show_processlist | 0 |
+| Com_show_profile | 0 |
+| Com_show_profiles | 0 |
+| Com_show_relaylog_events | 0 |
+| Com_show_slave_hosts | 0 |
+| Com_show_slave_status | 0 |
+| Com_show_status | 1 |
+| Com_show_storage_engines | 0 |
+| Com_show_table_statistics | 0 |
+| Com_show_table_status | 0 |
+| Com_show_tables | 0 |
+| Com_show_triggers | 0 |
+| Com_show_user_statistics | 0 |
+| Com_show_variables | 4 |
+| Com_show_warnings | 0 |
+| Com_signal | 0 |
+| Com_slave_start | 0 |
+| Com_slave_stop | 0 |
+| Com_stmt_close | 0 |
+| Com_stmt_execute | 0 |
+| Com_stmt_fetch | 0 |
+| Com_stmt_prepare | 0 |
+| Com_stmt_reprepare | 0 |
+| Com_stmt_reset | 0 |
+| Com_stmt_send_long_data | 0 |
+| Com_truncate | 0 |
+| Com_uninstall_plugin | 0 |
+| Com_unlock_tables | 0 |
+| Com_update | 0 |
+| Com_update_multi | 0 |
+| Com_xa_commit | 0 |
+| Com_xa_end | 0 |
+| Com_xa_prepare | 0 |
+| Com_xa_recover | 0 |
+| Com_xa_rollback | 0 |
+| Com_xa_start | 0 |
+| Compression | OFF |
+| Connections | 4 |
+| Cpu_time | 0.000000 |
+| Created_tmp_disk_tables | 0 |
+| Created_tmp_files | 6 |
+| Created_tmp_tables | 5 |
+| Delayed_errors | 0 |
+| Delayed_insert_threads | 0 |
+| Delayed_writes | 0 |
+| Empty_queries | 0 |
+| Executed_events | 0 |
+| Executed_triggers | 0 |
+| Feature_dynamic_columns | 0 |
+| Feature_fulltext | 0 |
+| Feature_gis | 0 |
+| Feature_locale | 0 |
+| Feature_subquery | 0 |
+| Feature_timezone | 0 |
+| Feature_trigger | 0 |
+| Feature_xml | 0 |
+| Flush_commands | 2 |
+| Handler_commit | 0 |
+| Handler_delete | 0 |
+| Handler_discover | 0 |
+| Handler_icp_attempts | 0 |
+| Handler_icp_match | 0 |
+| Handler_mrr_init | 0 |
+| Handler_mrr_key_refills | 0 |
+| Handler_mrr_rowid_refills | 0 |
+| Handler_prepare | 0 |
+| Handler_read_first | 0 |
+| Handler_read_key | 0 |
+| Handler_read_last | 0 |
+| Handler_read_next | 0 |
+| Handler_read_prev | 0 |
+| Handler_read_rnd | 0 |
+| Handler_read_rnd_deleted | 0 |
+| Handler_read_rnd_next | 1264 |
+| Handler_rollback | 0 |
+| Handler_savepoint | 0 |
+| Handler_savepoint_rollback | 0 |
+| Handler_tmp_update | 0 |
+| Handler_tmp_write | 1260 |
+| Handler_update | 0 |
+| Handler_write | 0 |
+| Innodb_adaptive_hash_cells | 553229 |
+| Innodb_adaptive_hash_hash_searches | 0 |
+| Innodb_adaptive_hash_heap_buffers | 0 |
+| Innodb_adaptive_hash_non_hash_searches | 19 |
+| Innodb_background_log_sync | 1 |
+| Innodb_buffer_pool_bytes_data | 2342912 |
+| Innodb_buffer_pool_bytes_dirty | 0 |
+| Innodb_buffer_pool_pages_data | 143 |
+| Innodb_buffer_pool_pages_dirty | 0 |
+| Innodb_buffer_pool_pages_flushed | 0 |
+| Innodb_buffer_pool_pages_free | 16240 |
+| Innodb_buffer_pool_pages_LRU_flushed | 0 |
+| Innodb_buffer_pool_pages_made_not_young | 0 |
+| Innodb_buffer_pool_pages_made_young | 0 |
+| Innodb_buffer_pool_pages_misc | 0 |
+| Innodb_buffer_pool_pages_old | 0 |
+| Innodb_buffer_pool_pages_total | 16383 |
+| Innodb_buffer_pool_read_ahead | 0 |
+| Innodb_buffer_pool_read_ahead_evicted | 0 |
+| Innodb_buffer_pool_read_ahead_rnd | 0 |
+| Innodb_buffer_pool_read_requests | 459 |
+| Innodb_buffer_pool_reads | 144 |
+| Innodb_buffer_pool_wait_free | 0 |
+| Innodb_buffer_pool_write_requests | 0 |
+| Innodb_checkpoint_age | 0 |
+| Innodb_checkpoint_max_age | 7782360 |
+| Innodb_checkpoint_target_age | 7539162 |
+| Innodb_current_row_locks | 0 |
+| Innodb_data_fsyncs | 3 |
+| Innodb_data_pending_fsyncs | 0 |
+| Innodb_data_pending_reads | 0 |
+| Innodb_data_pending_writes | 0 |
+| Innodb_data_read | 4542976 |
+| Innodb_data_reads | 155 |
+| Innodb_data_writes | 3 |
+| Innodb_data_written | 1536 |
+| Innodb_dblwr_pages_written | 0 |
+| Innodb_dblwr_writes | 0 |
+| Innodb_deadlocks | 0 |
+| Innodb_descriptors_memory | 8000 |
+| Innodb_dict_tables | 8 |
+| Innodb_have_atomic_builtins | ON |
+| Innodb_history_list_length | 0 |
+| Innodb_ibuf_discarded_delete_marks | 0 |
+| Innodb_ibuf_discarded_deletes | 0 |
+| Innodb_ibuf_discarded_inserts | 0 |
+| Innodb_ibuf_free_list | 0 |
+| Innodb_ibuf_merged_delete_marks | 0 |
+| Innodb_ibuf_merged_deletes | 0 |
+| Innodb_ibuf_merged_inserts | 0 |
+| Innodb_ibuf_merges | 0 |
+| Innodb_ibuf_segment_size | 2 |
+| Innodb_ibuf_size | 1 |
+| Innodb_log_waits | 0 |
+| Innodb_log_write_requests | 0 |
+| Innodb_log_writes | 1 |
+| Innodb_lsn_current | 1597945 |
+| Innodb_lsn_flushed | 1597945 |
+| Innodb_lsn_last_checkpoint | 1597945 |
+| Innodb_master_thread_1_second_loops | 1 |
+| Innodb_master_thread_10_second_loops | 0 |
+| Innodb_master_thread_background_loops | 1 |
+| Innodb_master_thread_main_flush_loops | 1 |
+| Innodb_master_thread_sleeps | 1 |
+| Innodb_max_trx_id | 1280 |
+| Innodb_mem_adaptive_hash | 4430048 |
+| Innodb_mem_dictionary | 1146964 |
+| Innodb_mem_total | 275513344 |
+| Innodb_mutex_os_waits | 0 |
+| Innodb_mutex_spin_rounds | 2 |
+| Innodb_mutex_spin_waits | 1 |
+| Innodb_oldest_view_low_limit_trx_id | 1280 |
+| Innodb_os_log_fsyncs | 3 |
+| Innodb_os_log_pending_fsyncs | 0 |
+| Innodb_os_log_pending_writes | 0 |
+| Innodb_os_log_written | 512 |
+| Innodb_page_size | 16384 |
+| Innodb_pages_created | 0 |
+| Innodb_pages_read | 143 |
+| Innodb_pages_written | 0 |
+| Innodb_purge_trx_id | 0 |
+| Innodb_purge_undo_no | 0 |
+| Innodb_read_views_memory | 88 |
+| Innodb_row_lock_current_waits | 0 |
+| Innodb_row_lock_time | 0 |
+| Innodb_row_lock_time_avg | 0 |
+| Innodb_row_lock_time_max | 0 |
+| Innodb_row_lock_waits | 0 |
+| Innodb_rows_deleted | 0 |
+| Innodb_rows_inserted | 0 |
+| Innodb_rows_read | 0 |
+| Innodb_rows_updated | 0 |
+| Innodb_s_lock_os_waits | 2 |
+| Innodb_s_lock_spin_rounds | 60 |
+| Innodb_s_lock_spin_waits | 2 |
+| Innodb_truncated_status_writes | 0 |
+| Innodb_x_lock_os_waits | 0 |
+| Innodb_x_lock_spin_rounds | 0 |
+| Innodb_x_lock_spin_waits | 0 |
+| Key_blocks_not_flushed | 0 |
+| Key_blocks_unused | 107171 |
+| Key_blocks_used | 0 |
+| Key_blocks_warm | 0 |
+| Key_read_requests | 0 |
+| Key_reads | 0 |
+| Key_write_requests | 0 |
+| Key_writes | 0 |
+| Last_query_cost | 0.000000 |
+| Max_used_connections | 1 |
+| Not_flushed_delayed_rows | 0 |
+| Open_files | 21 |
+| Open_streams | 0 |
+| Open_table_definitions | 33 |
+| Open_tables | 26 |
+| Opened_files | 84 |
+| Opened_table_definitions | 0 |
+| Opened_tables | 0 |
+| Opened_views | 0 |
+| Performance_schema_cond_classes_lost | 0 |
+| Performance_schema_cond_instances_lost | 0 |
+| Performance_schema_file_classes_lost | 0 |
+| Performance_schema_file_handles_lost | 0 |
+| Performance_schema_file_instances_lost | 0 |
+| Performance_schema_locker_lost | 0 |
+| Performance_schema_mutex_classes_lost | 0 |
+| Performance_schema_mutex_instances_lost | 0 |
+| Performance_schema_rwlock_classes_lost | 0 |
+| Performance_schema_rwlock_instances_lost | 0 |
+| Performance_schema_table_handles_lost | 0 |
+| Performance_schema_table_instances_lost | 0 |
+| Performance_schema_thread_classes_lost | 0 |
+| Performance_schema_thread_instances_lost | 0 |
+| Prepared_stmt_count | 0 |
+| Qcache_free_blocks | 1 |
+| Qcache_free_memory | 67091120 |
+| Qcache_hits | 0 |
+| Qcache_inserts | 0 |
+| Qcache_lowmem_prunes | 0 |
+| Qcache_not_cached | 4 |
+| Qcache_queries_in_cache | 0 |
+| Qcache_total_blocks | 1 |
+| Queries | 12 |
+| Questions | 11 |
+| Rows_read | 0 |
+| Rows_sent | 1264 |
+| Rows_tmp_read | 1260 |
+| Rpl_status | AUTH_MASTER |
+| Select_full_join | 0 |
+| Select_full_range_join | 0 |
+| Select_range | 0 |
+| Select_range_check | 0 |
+| Select_scan | 5 |
+| Slave_heartbeat_period | 0.000 |
+| Slave_open_temp_tables | 0 |
+| Slave_received_heartbeats | 0 |
+| Slave_retried_transactions | 0 |
+| Slave_running | OFF |
+| Slow_launch_threads | 0 |
+| Slow_queries | 0 |
+| Sort_merge_passes | 0 |
+| Sort_range | 0 |
+| Sort_rows | 0 |
+| Sort_scan | 0 |
+| Sphinx_error | |
+| Sphinx_time | |
+| Sphinx_total | |
+| Sphinx_total_found | |
+| Sphinx_word_count | |
+| Sphinx_words | |
+| Ssl_accept_renegotiates | 0 |
+| Ssl_accepts | 0 |
+| Ssl_callback_cache_hits | 0 |
+| Ssl_cipher | |
+| Ssl_cipher_list | |
+| Ssl_client_connects | 0 |
+| Ssl_connect_renegotiates | 0 |
+| Ssl_ctx_verify_depth | 0 |
+| Ssl_ctx_verify_mode | 0 |
+| Ssl_default_timeout | 0 |
+| Ssl_finished_accepts | 0 |
+| Ssl_finished_connects | 0 |
+| Ssl_session_cache_hits | 0 |
+| Ssl_session_cache_misses | 0 |
+| Ssl_session_cache_mode | NONE |
+| Ssl_session_cache_overflows | 0 |
+| Ssl_session_cache_size | 0 |
+| Ssl_session_cache_timeouts | 0 |
+| Ssl_sessions_reused | 0 |
+| Ssl_used_session_cache_entries | 0 |
+| Ssl_verify_depth | 0 |
+| Ssl_verify_mode | 0 |
+| Ssl_version | |
+| Subquery_cache_hit | 0 |
+| Subquery_cache_miss | 0 |
+| Syncs | 0 |
+| Table_locks_immediate | 36 |
+| Table_locks_waited | 0 |
+| Tc_log_max_pages_used | 0 |
+| Tc_log_page_size | 0 |
+| Tc_log_page_waits | 0 |
+| Threadpool_idle_threads | 0 |
+| Threadpool_threads | 0 |
+| Threads_cached | 0 |
+| Threads_connected | 1 |
+| Threads_created | 1 |
+| Threads_running | 1 |
+| Uptime | 113 |
+| Uptime_since_flush_status | 113 |
++------------------------------------------+-------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/global_variables.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/global_variables.txt
new file mode 100644
index 000000000..5f0906eed
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/global_variables.txt
@@ -0,0 +1,7 @@
++------------------+-------+
+| Variable_name | Value |
++------------------+-------+
+| log_bin | OFF |
+| max_connections | 100 |
+| table_open_cache | 400 |
++------------------+-------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/process_list.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/process_list.txt
new file mode 100644
index 000000000..a44ce5e70
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/process_list.txt
@@ -0,0 +1,6 @@
++------+---------+
+| time | user |
++------+---------+
+| 1 | netdata |
+| 9 | root |
++------+---------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/version.txt b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/version.txt
new file mode 100644
index 000000000..de684279d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mariadb/v5.5.64/version.txt
@@ -0,0 +1,6 @@
++-----------------+---------------------------------+
+| Variable_name | Value |
++-----------------+---------------------------------+
+| version | 5.5.64-MariaDB-1~trusty |
+| version_comment | mariadb.org binary distribution |
++-----------------+---------------------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/global_status.txt b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/global_status.txt
new file mode 100644
index 000000000..a4b2f2f93
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/global_status.txt
@@ -0,0 +1,490 @@
++-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Variable_name | Value |
++-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Aborted_clients | 0 |
+| Aborted_connects | 0 |
+| Acl_cache_items_count | 0 |
+| Binlog_cache_disk_use | 0 |
+| Binlog_cache_use | 6 |
+| Binlog_stmt_cache_disk_use | 0 |
+| Binlog_stmt_cache_use | 0 |
+| Bytes_received | 5584 |
+| Bytes_sent | 70700 |
+| Com_admin_commands | 5 |
+| Com_assign_to_keycache | 0 |
+| Com_alter_db | 0 |
+| Com_alter_event | 0 |
+| Com_alter_function | 0 |
+| Com_alter_instance | 0 |
+| Com_alter_procedure | 0 |
+| Com_alter_resource_group | 0 |
+| Com_alter_server | 0 |
+| Com_alter_table | 0 |
+| Com_alter_tablespace | 0 |
+| Com_alter_user | 2 |
+| Com_alter_user_default_role | 0 |
+| Com_analyze | 0 |
+| Com_begin | 0 |
+| Com_binlog | 0 |
+| Com_call_procedure | 0 |
+| Com_change_db | 1 |
+| Com_change_master | 0 |
+| Com_change_repl_filter | 0 |
+| Com_change_replication_source | 0 |
+| Com_check | 0 |
+| Com_checksum | 0 |
+| Com_clone | 0 |
+| Com_commit | 0 |
+| Com_create_db | 1 |
+| Com_create_event | 0 |
+| Com_create_function | 0 |
+| Com_create_index | 0 |
+| Com_create_procedure | 0 |
+| Com_create_role | 0 |
+| Com_create_server | 0 |
+| Com_create_table | 35 |
+| Com_create_resource_group | 0 |
+| Com_create_trigger | 0 |
+| Com_create_udf | 0 |
+| Com_create_user | 2 |
+| Com_create_view | 0 |
+| Com_create_spatial_reference_system | 0 |
+| Com_dealloc_sql | 0 |
+| Com_delete | 0 |
+| Com_delete_multi | 0 |
+| Com_do | 0 |
+| Com_drop_db | 0 |
+| Com_drop_event | 0 |
+| Com_drop_function | 0 |
+| Com_drop_index | 0 |
+| Com_drop_procedure | 0 |
+| Com_drop_resource_group | 0 |
+| Com_drop_role | 0 |
+| Com_drop_server | 0 |
+| Com_drop_spatial_reference_system | 0 |
+| Com_drop_table | 0 |
+| Com_drop_trigger | 0 |
+| Com_drop_user | 0 |
+| Com_drop_view | 0 |
+| Com_empty_query | 0 |
+| Com_execute_sql | 0 |
+| Com_explain_other | 0 |
+| Com_flush | 1 |
+| Com_get_diagnostics | 0 |
+| Com_grant | 2 |
+| Com_grant_roles | 0 |
+| Com_ha_close | 0 |
+| Com_ha_open | 0 |
+| Com_ha_read | 0 |
+| Com_help | 0 |
+| Com_import | 0 |
+| Com_insert | 0 |
+| Com_insert_select | 0 |
+| Com_install_component | 0 |
+| Com_install_plugin | 0 |
+| Com_kill | 0 |
+| Com_load | 0 |
+| Com_lock_instance | 0 |
+| Com_lock_tables | 0 |
+| Com_optimize | 0 |
+| Com_preload_keys | 0 |
+| Com_prepare_sql | 0 |
+| Com_purge | 0 |
+| Com_purge_before_date | 0 |
+| Com_release_savepoint | 0 |
+| Com_rename_table | 0 |
+| Com_rename_user | 0 |
+| Com_repair | 0 |
+| Com_replace | 0 |
+| Com_replace_select | 0 |
+| Com_reset | 0 |
+| Com_resignal | 0 |
+| Com_restart | 0 |
+| Com_revoke | 0 |
+| Com_revoke_all | 0 |
+| Com_revoke_roles | 0 |
+| Com_rollback | 0 |
+| Com_rollback_to_savepoint | 0 |
+| Com_savepoint | 0 |
+| Com_select | 2 |
+| Com_set_option | 4 |
+| Com_set_password | 0 |
+| Com_set_resource_group | 0 |
+| Com_set_role | 0 |
+| Com_signal | 0 |
+| Com_show_binlog_events | 0 |
+| Com_show_binlogs | 0 |
+| Com_show_charsets | 0 |
+| Com_show_collations | 0 |
+| Com_show_create_db | 0 |
+| Com_show_create_event | 0 |
+| Com_show_create_func | 0 |
+| Com_show_create_proc | 0 |
+| Com_show_create_table | 0 |
+| Com_show_create_trigger | 0 |
+| Com_show_databases | 0 |
+| Com_show_engine_logs | 0 |
+| Com_show_engine_mutex | 0 |
+| Com_show_engine_status | 0 |
+| Com_show_events | 0 |
+| Com_show_errors | 0 |
+| Com_show_fields | 0 |
+| Com_show_function_code | 0 |
+| Com_show_function_status | 0 |
+| Com_show_grants | 0 |
+| Com_show_keys | 0 |
+| Com_show_master_status | 0 |
+| Com_show_open_tables | 0 |
+| Com_show_plugins | 0 |
+| Com_show_privileges | 0 |
+| Com_show_procedure_code | 0 |
+| Com_show_procedure_status | 0 |
+| Com_show_processlist | 0 |
+| Com_show_profile | 0 |
+| Com_show_profiles | 0 |
+| Com_show_relaylog_events | 0 |
+| Com_show_replicas | 0 |
+| Com_show_slave_hosts | 0 |
+| Com_show_replica_status | 2 |
+| Com_show_slave_status | 2 |
+| Com_show_status | 5 |
+| Com_show_storage_engines | 0 |
+| Com_show_table_status | 0 |
+| Com_show_tables | 0 |
+| Com_show_triggers | 0 |
+| Com_show_variables | 1 |
+| Com_show_warnings | 0 |
+| Com_show_create_user | 0 |
+| Com_shutdown | 0 |
+| Com_replica_start | 0 |
+| Com_slave_start | 0 |
+| Com_replica_stop | 0 |
+| Com_slave_stop | 0 |
+| Com_group_replication_start | 0 |
+| Com_group_replication_stop | 0 |
+| Com_stmt_execute | 0 |
+| Com_stmt_close | 0 |
+| Com_stmt_fetch | 0 |
+| Com_stmt_prepare | 0 |
+| Com_stmt_reset | 0 |
+| Com_stmt_send_long_data | 0 |
+| Com_truncate | 0 |
+| Com_uninstall_component | 0 |
+| Com_uninstall_plugin | 0 |
+| Com_unlock_instance | 0 |
+| Com_unlock_tables | 0 |
+| Com_update | 0 |
+| Com_update_multi | 0 |
+| Com_xa_commit | 0 |
+| Com_xa_end | 0 |
+| Com_xa_prepare | 0 |
+| Com_xa_recover | 0 |
+| Com_xa_rollback | 0 |
+| Com_xa_start | 0 |
+| Com_stmt_reprepare | 0 |
+| Connection_errors_accept | 0 |
+| Connection_errors_internal | 0 |
+| Connection_errors_max_connections | 0 |
+| Connection_errors_peer_address | 0 |
+| Connection_errors_select | 0 |
+| Connection_errors_tcpwrap | 0 |
+| Connections | 25 |
+| Created_tmp_disk_tables | 0 |
+| Created_tmp_files | 5 |
+| Created_tmp_tables | 6 |
+| Current_tls_ca | ca.pem |
+| Current_tls_capath | |
+| Current_tls_cert | server-cert.pem |
+| Current_tls_cipher | |
+| Current_tls_ciphersuites | |
+| Current_tls_crl | |
+| Current_tls_crlpath | |
+| Current_tls_key | server-key.pem |
+| Current_tls_version | TLSv1.2,TLSv1.3 |
+| Delayed_errors | 0 |
+| Delayed_insert_threads | 0 |
+| Delayed_writes | 0 |
+| Error_log_buffered_bytes | 2752 |
+| Error_log_buffered_events | 15 |
+| Error_log_expired_events | 0 |
+| Error_log_latest_write | 1660827046947930 |
+| Flush_commands | 3 |
+| Global_connection_memory | 0 |
+| Handler_commit | 720 |
+| Handler_delete | 8 |
+| Handler_discover | 0 |
+| Handler_external_lock | 6779 |
+| Handler_mrr_init | 0 |
+| Handler_prepare | 24 |
+| Handler_read_first | 50 |
+| Handler_read_key | 1914 |
+| Handler_read_last | 0 |
+| Handler_read_next | 4303 |
+| Handler_read_prev | 0 |
+| Handler_read_rnd | 0 |
+| Handler_read_rnd_next | 4723 |
+| Handler_rollback | 1 |
+| Handler_savepoint | 0 |
+| Handler_savepoint_rollback | 0 |
+| Handler_update | 373 |
+| Handler_write | 1966 |
+| Innodb_buffer_pool_dump_status | Dumping of buffer pool not started |
+| Innodb_buffer_pool_load_status | Buffer pool(s) load completed at 220818 12:50:46 |
+| Innodb_buffer_pool_resize_status | |
+| Innodb_buffer_pool_pages_data | 1045 |
+| Innodb_buffer_pool_bytes_data | 17121280 |
+| Innodb_buffer_pool_pages_dirty | 0 |
+| Innodb_buffer_pool_bytes_dirty | 0 |
+| Innodb_buffer_pool_pages_flushed | 361 |
+| Innodb_buffer_pool_pages_free | 7143 |
+| Innodb_buffer_pool_pages_misc | 4 |
+| Innodb_buffer_pool_pages_total | 8192 |
+| Innodb_buffer_pool_read_ahead_rnd | 0 |
+| Innodb_buffer_pool_read_ahead | 0 |
+| Innodb_buffer_pool_read_ahead_evicted | 0 |
+| Innodb_buffer_pool_read_requests | 16723 |
+| Innodb_buffer_pool_reads | 878 |
+| Innodb_buffer_pool_wait_free | 0 |
+| Innodb_buffer_pool_write_requests | 2377 |
+| Innodb_data_fsyncs | 255 |
+| Innodb_data_pending_fsyncs | 0 |
+| Innodb_data_pending_reads | 0 |
+| Innodb_data_pending_writes | 0 |
+| Innodb_data_read | 14453760 |
+| Innodb_data_reads | 899 |
+| Innodb_data_writes | 561 |
+| Innodb_data_written | 6128128 |
+| Innodb_dblwr_pages_written | 220 |
+| Innodb_dblwr_writes | 58 |
+| Innodb_redo_log_read_only | OFF |
+| Innodb_redo_log_uuid | 1075899837 |
+| Innodb_redo_log_checkpoint_lsn | 31758453 |
+| Innodb_redo_log_current_lsn | 31758453 |
+| Innodb_redo_log_flushed_to_disk_lsn | 31758453 |
+| Innodb_redo_log_logical_size | 512 |
+| Innodb_redo_log_physical_size | 3276800 |
+| Innodb_redo_log_capacity_resized | 104857600 |
+| Innodb_redo_log_resize_status | OK |
+| Innodb_log_waits | 0 |
+| Innodb_log_write_requests | 1062 |
+| Innodb_log_writes | 116 |
+| Innodb_os_log_fsyncs | 69 |
+| Innodb_os_log_pending_fsyncs | 0 |
+| Innodb_os_log_pending_writes | 0 |
+| Innodb_os_log_written | 147968 |
+| Innodb_page_size | 16384 |
+| Innodb_pages_created | 168 |
+| Innodb_pages_read | 877 |
+| Innodb_pages_written | 361 |
+| Innodb_redo_log_enabled | ON |
+| Innodb_row_lock_current_waits | 0 |
+| Innodb_row_lock_time | 0 |
+| Innodb_row_lock_time_avg | 0 |
+| Innodb_row_lock_time_max | 0 |
+| Innodb_row_lock_waits | 0 |
+| Innodb_rows_deleted | 0 |
+| Innodb_rows_inserted | 0 |
+| Innodb_rows_read | 0 |
+| Innodb_rows_updated | 0 |
+| Innodb_system_rows_deleted | 8 |
+| Innodb_system_rows_inserted | 12 |
+| Innodb_system_rows_read | 5134 |
+| Innodb_system_rows_updated | 373 |
+| Innodb_sampled_pages_read | 0 |
+| Innodb_sampled_pages_skipped | 0 |
+| Innodb_num_open_files | 15 |
+| Innodb_truncated_status_writes | 0 |
+| Innodb_undo_tablespaces_total | 2 |
+| Innodb_undo_tablespaces_implicit | 2 |
+| Innodb_undo_tablespaces_explicit | 0 |
+| Innodb_undo_tablespaces_active | 2 |
+| Key_blocks_not_flushed | 0 |
+| Key_blocks_unused | 6698 |
+| Key_blocks_used | 0 |
+| Key_read_requests | 0 |
+| Key_reads | 0 |
+| Key_write_requests | 0 |
+| Key_writes | 0 |
+| Locked_connects | 0 |
+| Max_execution_time_exceeded | 0 |
+| Max_execution_time_set | 0 |
+| Max_execution_time_set_failed | 0 |
+| Max_used_connections | 2 |
+| Max_used_connections_time | 2022-08-18 12:51:46 |
+| Mysqlx_aborted_clients | 0 |
+| Mysqlx_address | :: |
+| Mysqlx_bytes_received | 0 |
+| Mysqlx_bytes_received_compressed_payload | 0 |
+| Mysqlx_bytes_received_uncompressed_frame | 0 |
+| Mysqlx_bytes_sent | 0 |
+| Mysqlx_bytes_sent_compressed_payload | 0 |
+| Mysqlx_bytes_sent_uncompressed_frame | 0 |
+| Mysqlx_compression_algorithm | |
+| Mysqlx_compression_level | |
+| Mysqlx_connection_accept_errors | 0 |
+| Mysqlx_connection_errors | 0 |
+| Mysqlx_connections_accepted | 0 |
+| Mysqlx_connections_closed | 0 |
+| Mysqlx_connections_rejected | 0 |
+| Mysqlx_crud_create_view | 0 |
+| Mysqlx_crud_delete | 0 |
+| Mysqlx_crud_drop_view | 0 |
+| Mysqlx_crud_find | 0 |
+| Mysqlx_crud_insert | 0 |
+| Mysqlx_crud_modify_view | 0 |
+| Mysqlx_crud_update | 0 |
+| Mysqlx_cursor_close | 0 |
+| Mysqlx_cursor_fetch | 0 |
+| Mysqlx_cursor_open | 0 |
+| Mysqlx_errors_sent | 0 |
+| Mysqlx_errors_unknown_message_type | 0 |
+| Mysqlx_expect_close | 0 |
+| Mysqlx_expect_open | 0 |
+| Mysqlx_init_error | 0 |
+| Mysqlx_messages_sent | 0 |
+| Mysqlx_notice_global_sent | 0 |
+| Mysqlx_notice_other_sent | 0 |
+| Mysqlx_notice_warning_sent | 0 |
+| Mysqlx_notified_by_group_replication | 0 |
+| Mysqlx_port | 33060 |
+| Mysqlx_prep_deallocate | 0 |
+| Mysqlx_prep_execute | 0 |
+| Mysqlx_prep_prepare | 0 |
+| Mysqlx_rows_sent | 0 |
+| Mysqlx_sessions | 0 |
+| Mysqlx_sessions_accepted | 0 |
+| Mysqlx_sessions_closed | 0 |
+| Mysqlx_sessions_fatal_error | 0 |
+| Mysqlx_sessions_killed | 0 |
+| Mysqlx_sessions_rejected | 0 |
+| Mysqlx_socket | /var/run/mysqld/mysqlx.sock |
+| Mysqlx_ssl_accepts | 0 |
+| Mysqlx_ssl_active | |
+| Mysqlx_ssl_cipher | |
+| Mysqlx_ssl_cipher_list | |
+| Mysqlx_ssl_ctx_verify_depth | 18446744073709551615 |
+| Mysqlx_ssl_ctx_verify_mode | 5 |
+| Mysqlx_ssl_finished_accepts | 0 |
+| Mysqlx_ssl_server_not_after | Aug 15 12:43:39 2032 GMT |
+| Mysqlx_ssl_server_not_before | Aug 18 12:43:39 2022 GMT |
+| Mysqlx_ssl_verify_depth | |
+| Mysqlx_ssl_verify_mode | |
+| Mysqlx_ssl_version | |
+| Mysqlx_stmt_create_collection | 0 |
+| Mysqlx_stmt_create_collection_index | 0 |
+| Mysqlx_stmt_disable_notices | 0 |
+| Mysqlx_stmt_drop_collection | 0 |
+| Mysqlx_stmt_drop_collection_index | 0 |
+| Mysqlx_stmt_enable_notices | 0 |
+| Mysqlx_stmt_ensure_collection | 0 |
+| Mysqlx_stmt_execute_mysqlx | 0 |
+| Mysqlx_stmt_execute_sql | 0 |
+| Mysqlx_stmt_execute_xplugin | 0 |
+| Mysqlx_stmt_get_collection_options | 0 |
+| Mysqlx_stmt_kill_client | 0 |
+| Mysqlx_stmt_list_clients | 0 |
+| Mysqlx_stmt_list_notices | 0 |
+| Mysqlx_stmt_list_objects | 0 |
+| Mysqlx_stmt_modify_collection_options | 0 |
+| Mysqlx_stmt_ping | 0 |
+| Mysqlx_worker_threads | 2 |
+| Mysqlx_worker_threads_active | 0 |
+| Not_flushed_delayed_rows | 0 |
+| Ongoing_anonymous_transaction_count | 0 |
+| Open_files | 8 |
+| Open_streams | 0 |
+| Open_table_definitions | 48 |
+| Open_tables | 127 |
+| Opened_files | 8 |
+| Opened_table_definitions | 77 |
+| Opened_tables | 208 |
+| Performance_schema_accounts_lost | 0 |
+| Performance_schema_cond_classes_lost | 0 |
+| Performance_schema_cond_instances_lost | 0 |
+| Performance_schema_digest_lost | 0 |
+| Performance_schema_file_classes_lost | 0 |
+| Performance_schema_file_handles_lost | 0 |
+| Performance_schema_file_instances_lost | 0 |
+| Performance_schema_hosts_lost | 0 |
+| Performance_schema_index_stat_lost | 0 |
+| Performance_schema_locker_lost | 0 |
+| Performance_schema_memory_classes_lost | 0 |
+| Performance_schema_metadata_lock_lost | 0 |
+| Performance_schema_mutex_classes_lost | 0 |
+| Performance_schema_mutex_instances_lost | 0 |
+| Performance_schema_nested_statement_lost | 0 |
+| Performance_schema_prepared_statements_lost | 0 |
+| Performance_schema_program_lost | 0 |
+| Performance_schema_rwlock_classes_lost | 0 |
+| Performance_schema_rwlock_instances_lost | 0 |
+| Performance_schema_session_connect_attrs_longest_seen | 112 |
+| Performance_schema_session_connect_attrs_lost | 0 |
+| Performance_schema_socket_classes_lost | 0 |
+| Performance_schema_socket_instances_lost | 0 |
+| Performance_schema_stage_classes_lost | 0 |
+| Performance_schema_statement_classes_lost | 0 |
+| Performance_schema_table_handles_lost | 0 |
+| Performance_schema_table_instances_lost | 0 |
+| Performance_schema_table_lock_stat_lost | 0 |
+| Performance_schema_thread_classes_lost | 0 |
+| Performance_schema_thread_instances_lost | 0 |
+| Performance_schema_users_lost | 0 |
+| Prepared_stmt_count | 0 |
+| Queries | 27 |
+| Questions | 15 |
+| Replica_open_temp_tables | 0 |
+| Secondary_engine_execution_count | 0 |
+| Select_full_join | 0 |
+| Select_full_range_join | 0 |
+| Select_range | 0 |
+| Select_range_check | 0 |
+| Select_scan | 12 |
+| Slave_open_temp_tables | 0 |
+| Slow_launch_threads | 0 |
+| Slow_queries | 0 |
+| Sort_merge_passes | 0 |
+| Sort_range | 0 |
+| Sort_rows | 0 |
+| Sort_scan | 0 |
+| Ssl_accept_renegotiates | 0 |
+| Ssl_accepts | 0 |
+| Ssl_callback_cache_hits | 0 |
+| Ssl_cipher | |
+| Ssl_cipher_list | |
+| Ssl_client_connects | 0 |
+| Ssl_connect_renegotiates | 0 |
+| Ssl_ctx_verify_depth | 18446744073709551615 |
+| Ssl_ctx_verify_mode | 5 |
+| Ssl_default_timeout | 0 |
+| Ssl_finished_accepts | 0 |
+| Ssl_finished_connects | 0 |
+| Ssl_server_not_after | Aug 15 12:43:39 2032 GMT |
+| Ssl_server_not_before | Aug 18 12:43:39 2022 GMT |
+| Ssl_session_cache_hits | 0 |
+| Ssl_session_cache_misses | 0 |
+| Ssl_session_cache_mode | SERVER |
+| Ssl_session_cache_overflows | 0 |
+| Ssl_session_cache_size | 128 |
+| Ssl_session_cache_timeout | 300 |
+| Ssl_session_cache_timeouts | 0 |
+| Ssl_sessions_reused | 0 |
+| Ssl_used_session_cache_entries | 0 |
+| Ssl_verify_depth | 0 |
+| Ssl_verify_mode | 0 |
+| Ssl_version | |
+| Table_locks_immediate | 6 |
+| Table_locks_waited | 0 |
+| Table_open_cache_hits | 3182 |
+| Table_open_cache_misses | 208 |
+| Table_open_cache_overflows | 0 |
+| Tc_log_max_pages_used | 0 |
+| Tc_log_page_size | 0 |
+| Tc_log_page_waits | 0 |
+| Threads_cached | 1 |
+| Threads_connected | 1 |
+| Threads_created | 2 |
+| Threads_running | 2 |
+| Tls_library_version | OpenSSL 1.1.1k FIPS 25 Mar 2021 |
+| Uptime | 152 |
+| Uptime_since_flush_status | 152 |
++-------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/global_variables.txt b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/global_variables.txt
new file mode 100644
index 000000000..02be0ae8e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/global_variables.txt
@@ -0,0 +1,9 @@
++--------------------------+-------+
+| Variable_name | Value |
++--------------------------+-------+
+| disabled_storage_engines | |
+| log_bin | ON |
+| max_connections | 151 |
+| performance_schema | ON |
+| table_open_cache | 4000 |
++--------------------------+-------+
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/process_list.txt b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/process_list.txt
new file mode 100644
index 000000000..a44ce5e70
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/process_list.txt
@@ -0,0 +1,6 @@
++------+---------+
+| time | user |
++------+---------+
+| 1 | netdata |
+| 9 | root |
++------+---------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/replica_status_multi_source.txt b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/replica_status_multi_source.txt
new file mode 100644
index 000000000..8a5e06836
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/replica_status_multi_source.txt
@@ -0,0 +1,6 @@
++----------------------------------+---------------+-------------+-------------+---------------+--------------------+---------------------+--------------------------------------+---------------+-----------------------+--------------------+---------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+--------------------------------------+-------------------------+-----------+---------------------+----------------------------------------------------------+--------------------+-------------+-------------------------+--------------------------+----------------+--------------------+------------------------------------------+-----------------------------------------------------------------------------------+---------------+----------------------+--------------+--------------------+------------------------+-----------------------+-------------------+
+| Replica_IO_State | Source_Host | Source_User | Source_Port | Connect_Retry | Source_Log_File | Read_Source_Log_Pos | Relay_Log_File | Relay_Log_Pos | Relay_Source_Log_File | Replica_IO_Running | Replica_SQL_Running | Replicate_Do_DB | Replicate_Ignore_DB | Replicate_Do_Table | Replicate_Ignore_Table | Replicate_Wild_Do_Table | Replicate_Wild_Ignore_Table | Last_Errno | Last_Error | Skip_Counter | Exec_Source_Log_Pos | Relay_Log_Space | Until_Condition | Until_Log_File | Until_Log_Pos | Source_SSL_Allowed | Source_SSL_CA_File | Source_SSL_CA_Path | Source_SSL_Cert | Source_SSL_Cipher | Source_SSL_Key | Seconds_Behind_Source | Source_SSL_Verify_Server_Cert | Last_IO_Errno | Last_IO_Error | Last_SQL_Errno | Last_SQL_Error | Replicate_Ignore_Server_Ids | Source_Server_Id | Master_UUID | Source_Info_File | SQL_Delay | SQL_Remaining_Delay | Replica_SQL_Running_State | Source_Retry_Count | Source_Bind | Last_IO_Error_Timestamp | Last_SQL_Error_Timestamp | Source_SSL_Crl | Source_SSL_Crlpath | Retrieved_Gtid_Set | Executed_Gtid_Set | Auto_Position | Replicate_Rewrite_DB | Channel_Name | Source_TLS_Version | Source_public_key_path | Get_Source_public_key | Network_Namespace |
++----------------------------------+---------------+-------------+-------------+---------------+--------------------+---------------------+--------------------------------------+---------------+-----------------------+--------------------+---------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+--------------------------------------+-------------------------+-----------+---------------------+----------------------------------------------------------+--------------------+-------------+-------------------------+--------------------------+----------------+--------------------+------------------------------------------+-----------------------------------------------------------------------------------+---------------+----------------------+--------------+--------------------+------------------------+-----------------------+-------------------+
+| Waiting for source to send event | mysql-master1 | repl1 | 3306 | 60 | mysql-bin-1.000003 | 975 | mysql-slave-relay-bin-master1.000003 | 1195 | mysql-bin-1.000003 | Yes | Yes | | | | | | | 0 | | 0 | 975 | 1599 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 1 | 61221e31-1ef3-11ed-a56a-0242ac120002 | mysql.slave_master_info | 0 | NULL | Replica has read all relay log; waiting for more updates | 86400 | | | | | | 61221e31-1ef3-11ed-a56a-0242ac120002:1-3 | 61221e31-1ef3-11ed-a56a-0242ac120002:1-3,6151d979-1ef3-11ed-a509-0242ac120003:1-3 | 1 | | master1 | | | 0 | |
+| Waiting for source to send event | mysql-master2 | repl2 | 3306 | 60 | mysql-bin-1.000003 | 974 | mysql-slave-relay-bin-master2.000003 | 1194 | mysql-bin-1.000003 | Yes | Yes | | | | | | | 0 | | 0 | 974 | 1598 | None | | 0 | No | | | | | | 0 | No | 0 | | 0 | | | 2 | 6151d979-1ef3-11ed-a509-0242ac120003 | mysql.slave_master_info | 0 | NULL | Replica has read all relay log; waiting for more updates | 86400 | | | | | | 6151d979-1ef3-11ed-a509-0242ac120003:1-3 | 61221e31-1ef3-11ed-a56a-0242ac120002:1-3,6151d979-1ef3-11ed-a509-0242ac120003:1-3 | 1 | | master2 | | | 0 | |
++----------------------------------+---------------+-------------+-------------+---------------+--------------------+---------------------+--------------------------------------+---------------+-----------------------+--------------------+---------------------+-----------------+---------------------+--------------------+------------------------+-------------------------+-----------------------------+------------+------------+--------------+---------------------+-----------------+-----------------+----------------+---------------+--------------------+--------------------+--------------------+-----------------+-------------------+----------------+-----------------------+-------------------------------+---------------+---------------+----------------+----------------+-----------------------------+------------------+--------------------------------------+-------------------------+-----------+---------------------+----------------------------------------------------------+--------------------+-------------+-------------------------+--------------------------+----------------+--------------------+------------------------------------------+-----------------------------------------------------------------------------------+---------------+----------------------+--------------+--------------------+------------------------+-----------------------+-------------------+
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/version.txt b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/version.txt
new file mode 100644
index 000000000..5c553b1ad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/mysql/v8.0.30/version.txt
@@ -0,0 +1,6 @@
++-----------------+------------------------------+
+| Variable_name | Value |
++-----------------+------------------------------+
+| version | 8.0.30 |
+| version_comment | MySQL Community Server - GPL |
++-----------------+------------------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/global_status.txt b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/global_status.txt
new file mode 100644
index 000000000..d7ee5741a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/global_status.txt
@@ -0,0 +1,533 @@
++--------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Variable_name | Value |
++--------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
+| Aborted_clients | 0 |
+| Aborted_connects | 1 |
+| Acl_cache_items_count | 0 |
+| Binlog_snapshot_file | |
+| Binlog_snapshot_position | 0 |
+| Binlog_cache_disk_use | 0 |
+| Binlog_cache_use | 0 |
+| Binlog_snapshot_gtid_executed | not-in-consistent-snapshot |
+| Binlog_stmt_cache_disk_use | 0 |
+| Binlog_stmt_cache_use | 0 |
+| Bytes_received | 682970 |
+| Bytes_sent | 33668405 |
+| Com_admin_commands | 1 |
+| Com_assign_to_keycache | 0 |
+| Com_alter_db | 0 |
+| Com_alter_event | 0 |
+| Com_alter_function | 0 |
+| Com_alter_instance | 0 |
+| Com_alter_procedure | 0 |
+| Com_alter_resource_group | 0 |
+| Com_alter_server | 0 |
+| Com_alter_table | 0 |
+| Com_alter_tablespace | 0 |
+| Com_alter_user | 0 |
+| Com_alter_user_default_role | 0 |
+| Com_analyze | 0 |
+| Com_begin | 0 |
+| Com_binlog | 0 |
+| Com_call_procedure | 0 |
+| Com_change_db | 1 |
+| Com_change_master | 0 |
+| Com_change_repl_filter | 0 |
+| Com_change_replication_source | 0 |
+| Com_check | 0 |
+| Com_checksum | 0 |
+| Com_clone | 0 |
+| Com_commit | 0 |
+| Com_create_compression_dictionary | 0 |
+| Com_create_db | 1 |
+| Com_create_event | 0 |
+| Com_create_function | 0 |
+| Com_create_index | 0 |
+| Com_create_procedure | 0 |
+| Com_create_role | 0 |
+| Com_create_server | 0 |
+| Com_create_table | 34 |
+| Com_create_resource_group | 0 |
+| Com_create_trigger | 0 |
+| Com_create_udf | 0 |
+| Com_create_user | 0 |
+| Com_create_view | 0 |
+| Com_create_spatial_reference_system | 0 |
+| Com_dealloc_sql | 0 |
+| Com_delete | 0 |
+| Com_delete_multi | 0 |
+| Com_do | 0 |
+| Com_drop_compression_dictionary | 0 |
+| Com_drop_db | 0 |
+| Com_drop_event | 0 |
+| Com_drop_function | 0 |
+| Com_drop_index | 0 |
+| Com_drop_procedure | 0 |
+| Com_drop_resource_group | 0 |
+| Com_drop_role | 0 |
+| Com_drop_server | 0 |
+| Com_drop_spatial_reference_system | 0 |
+| Com_drop_table | 0 |
+| Com_drop_trigger | 0 |
+| Com_drop_user | 0 |
+| Com_drop_view | 0 |
+| Com_empty_query | 0 |
+| Com_execute_sql | 0 |
+| Com_explain_other | 0 |
+| Com_flush | 1 |
+| Com_get_diagnostics | 0 |
+| Com_grant | 0 |
+| Com_grant_roles | 0 |
+| Com_ha_close | 0 |
+| Com_ha_open | 0 |
+| Com_ha_read | 0 |
+| Com_help | 0 |
+| Com_import | 0 |
+| Com_insert | 0 |
+| Com_insert_select | 0 |
+| Com_install_component | 0 |
+| Com_install_plugin | 0 |
+| Com_kill | 0 |
+| Com_load | 0 |
+| Com_lock_instance | 0 |
+| Com_lock_tables | 0 |
+| Com_lock_tables_for_backup | 0 |
+| Com_optimize | 0 |
+| Com_preload_keys | 0 |
+| Com_prepare_sql | 0 |
+| Com_purge | 0 |
+| Com_purge_before_date | 0 |
+| Com_release_savepoint | 0 |
+| Com_rename_table | 0 |
+| Com_rename_user | 0 |
+| Com_repair | 0 |
+| Com_replace | 0 |
+| Com_replace_select | 0 |
+| Com_reset | 0 |
+| Com_resignal | 0 |
+| Com_restart | 0 |
+| Com_revoke | 0 |
+| Com_revoke_all | 0 |
+| Com_revoke_roles | 0 |
+| Com_rollback | 0 |
+| Com_rollback_to_savepoint | 0 |
+| Com_savepoint | 0 |
+| Com_select | 1687 |
+| Com_set_option | 4 |
+| Com_set_password | 0 |
+| Com_set_resource_group | 0 |
+| Com_set_role | 0 |
+| Com_signal | 0 |
+| Com_show_binlog_events | 0 |
+| Com_show_binlogs | 0 |
+| Com_show_charsets | 0 |
+| Com_show_client_statistics | 0 |
+| Com_show_collations | 0 |
+| Com_show_create_db | 0 |
+| Com_show_create_event | 0 |
+| Com_show_create_func | 0 |
+| Com_show_create_proc | 0 |
+| Com_show_create_table | 0 |
+| Com_show_create_trigger | 0 |
+| Com_show_databases | 0 |
+| Com_show_engine_logs | 0 |
+| Com_show_engine_mutex | 0 |
+| Com_show_engine_status | 0 |
+| Com_show_events | 0 |
+| Com_show_errors | 0 |
+| Com_show_fields | 0 |
+| Com_show_function_code | 0 |
+| Com_show_function_status | 0 |
+| Com_show_grants | 0 |
+| Com_show_index_statistics | 0 |
+| Com_show_keys | 0 |
+| Com_show_master_status | 0 |
+| Com_show_open_tables | 0 |
+| Com_show_plugins | 2 |
+| Com_show_privileges | 0 |
+| Com_show_procedure_code | 0 |
+| Com_show_procedure_status | 0 |
+| Com_show_processlist | 0 |
+| Com_show_profile | 0 |
+| Com_show_profiles | 0 |
+| Com_show_relaylog_events | 0 |
+| Com_show_replicas | 0 |
+| Com_show_slave_hosts | 0 |
+| Com_show_replica_status | 1681 |
+| Com_show_slave_status | 1681 |
+| Com_show_status | 1682 |
+| Com_show_storage_engines | 0 |
+| Com_show_table_statistics | 0 |
+| Com_show_table_status | 0 |
+| Com_show_tables | 0 |
+| Com_show_thread_statistics | 0 |
+| Com_show_triggers | 0 |
+| Com_show_user_statistics | 0 |
+| Com_show_variables | 1689 |
+| Com_show_warnings | 0 |
+| Com_show_create_user | 0 |
+| Com_shutdown | 0 |
+| Com_replica_start | 0 |
+| Com_slave_start | 0 |
+| Com_replica_stop | 0 |
+| Com_slave_stop | 0 |
+| Com_group_replication_start | 0 |
+| Com_group_replication_stop | 0 |
+| Com_stmt_execute | 0 |
+| Com_stmt_close | 0 |
+| Com_stmt_fetch | 0 |
+| Com_stmt_prepare | 0 |
+| Com_stmt_reset | 0 |
+| Com_stmt_send_long_data | 0 |
+| Com_truncate | 0 |
+| Com_uninstall_component | 0 |
+| Com_uninstall_plugin | 0 |
+| Com_unlock_instance | 0 |
+| Com_unlock_tables | 0 |
+| Com_update | 0 |
+| Com_update_multi | 0 |
+| Com_xa_commit | 0 |
+| Com_xa_end | 0 |
+| Com_xa_prepare | 0 |
+| Com_xa_recover | 0 |
+| Com_xa_rollback | 0 |
+| Com_xa_start | 0 |
+| Com_stmt_reprepare | 0 |
+| Connection_errors_accept | 0 |
+| Connection_errors_internal | 0 |
+| Connection_errors_max_connections | 0 |
+| Connection_errors_peer_address | 0 |
+| Connection_errors_select | 0 |
+| Connection_errors_tcpwrap | 0 |
+| Connections | 13 |
+| Created_tmp_disk_tables | 1683 |
+| Created_tmp_files | 5 |
+| Created_tmp_tables | 5054 |
+| Current_tls_ca | ca.pem |
+| Current_tls_capath | |
+| Current_tls_cert | server-cert.pem |
+| Current_tls_cipher | |
+| Current_tls_ciphersuites | |
+| Current_tls_crl | |
+| Current_tls_crlpath | |
+| Current_tls_key | server-key.pem |
+| Current_tls_version | TLSv1.2,TLSv1.3 |
+| Delayed_errors | 0 |
+| Delayed_insert_threads | 0 |
+| Delayed_writes | 0 |
+| Error_log_buffered_bytes | 1304 |
+| Error_log_buffered_events | 9 |
+| Error_log_expired_events | 0 |
+| Error_log_latest_write | 1660920303043759 |
+| Flush_commands | 3 |
+| Global_connection_memory | 0 |
+| Handler_commit | 576 |
+| Handler_delete | 0 |
+| Handler_discover | 0 |
+| Handler_external_lock | 13215 |
+| Handler_mrr_init | 0 |
+| Handler_prepare | 0 |
+| Handler_read_first | 1724 |
+| Handler_read_key | 3439 |
+| Handler_read_last | 0 |
+| Handler_read_next | 4147 |
+| Handler_read_prev | 0 |
+| Handler_read_rnd | 0 |
+| Handler_read_rnd_next | 2983285 |
+| Handler_rollback | 0 |
+| Handler_savepoint | 0 |
+| Handler_savepoint_rollback | 0 |
+| Handler_update | 317 |
+| Handler_write | 906501 |
+| Innodb_background_log_sync | 0 |
+| Innodb_buffer_pool_dump_status | Dumping of buffer pool not started |
+| Innodb_buffer_pool_load_status | Buffer pool(s) load completed at 220819 14:45:02 |
+| Innodb_buffer_pool_resize_status | |
+| Innodb_buffer_pool_pages_data | 1123 |
+| Innodb_buffer_pool_bytes_data | 18399232 |
+| Innodb_buffer_pool_pages_dirty | 3 |
+| Innodb_buffer_pool_bytes_dirty | 49152 |
+| Innodb_buffer_pool_pages_flushed | 205 |
+| Innodb_buffer_pool_pages_free | 7064 |
+| Innodb_buffer_pool_pages_LRU_flushed | 0 |
+| Innodb_buffer_pool_pages_made_not_young | 27 |
+| Innodb_buffer_pool_pages_made_young | 6342 |
+| Innodb_buffer_pool_pages_misc | 5 |
+| Innodb_buffer_pool_pages_old | 421 |
+| Innodb_buffer_pool_pages_total | 8192 |
+| Innodb_buffer_pool_read_ahead_rnd | 0 |
+| Innodb_buffer_pool_read_ahead | 0 |
+| Innodb_buffer_pool_read_ahead_evicted | 0 |
+| Innodb_buffer_pool_read_requests | 109817 |
+| Innodb_buffer_pool_reads | 978 |
+| Innodb_buffer_pool_wait_free | 0 |
+| Innodb_buffer_pool_write_requests | 77412 |
+| Innodb_checkpoint_age | 0 |
+| Innodb_checkpoint_max_age | 80576000 |
+| Innodb_data_fsyncs | 50 |
+| Innodb_data_pending_fsyncs | 0 |
+| Innodb_data_pending_reads | 0 |
+| Innodb_data_pending_writes | 0 |
+| Innodb_data_read | 16094208 |
+| Innodb_data_reads | 1002 |
+| Innodb_data_writes | 288 |
+| Innodb_data_written | 3420160 |
+| Innodb_dblwr_pages_written | 30 |
+| Innodb_dblwr_writes | 8 |
+| Innodb_ibuf_free_list | 0 |
+| Innodb_ibuf_segment_size | 2 |
+| Innodb_log_waits | 0 |
+| Innodb_log_write_requests | 651 |
+| Innodb_log_writes | 47 |
+| Innodb_lsn_current | 31778525 |
+| Innodb_lsn_flushed | 31778525 |
+| Innodb_lsn_last_checkpoint | 31778525 |
+| Innodb_master_thread_active_loops | 1674 |
+| Innodb_master_thread_idle_loops | 36 |
+| Innodb_max_trx_id | 1803 |
+| Innodb_oldest_view_low_limit_trx_id | 0 |
+| Innodb_os_log_fsyncs | 13 |
+| Innodb_os_log_pending_fsyncs | 0 |
+| Innodb_os_log_pending_writes | 0 |
+| Innodb_os_log_written | 45568 |
+| Innodb_page_size | 16384 |
+| Innodb_pages_created | 155 |
+| Innodb_pages_read | 977 |
+| Innodb_pages0_read | 7 |
+| Innodb_pages_written | 205 |
+| Innodb_purge_trx_id | 1801 |
+| Innodb_purge_undo_no | 0 |
+| Innodb_redo_log_enabled | ON |
+| Innodb_row_lock_current_waits | 0 |
+| Innodb_row_lock_time | 0 |
+| Innodb_row_lock_time_avg | 0 |
+| Innodb_row_lock_time_max | 0 |
+| Innodb_row_lock_waits | 0 |
+| Innodb_rows_deleted | 0 |
+| Innodb_rows_inserted | 5055 |
+| Innodb_rows_read | 5055 |
+| Innodb_rows_updated | 0 |
+| Innodb_system_rows_deleted | 0 |
+| Innodb_system_rows_inserted | 0 |
+| Innodb_system_rows_read | 4881 |
+| Innodb_system_rows_updated | 317 |
+| Innodb_sampled_pages_read | 0 |
+| Innodb_sampled_pages_skipped | 0 |
+| Innodb_num_open_files | 17 |
+| Innodb_truncated_status_writes | 0 |
+| Innodb_undo_tablespaces_total | 2 |
+| Innodb_undo_tablespaces_implicit | 2 |
+| Innodb_undo_tablespaces_explicit | 0 |
+| Innodb_undo_tablespaces_active | 2 |
+| Innodb_secondary_index_triggered_cluster_reads | 2098 |
+| Innodb_secondary_index_triggered_cluster_reads_avoided | 0 |
+| Innodb_buffered_aio_submitted | 0 |
+| Innodb_scan_pages_contiguous | 0 |
+| Innodb_scan_pages_disjointed | 0 |
+| Innodb_scan_pages_total_seek_distance | 0 |
+| Innodb_scan_data_size | 0 |
+| Innodb_scan_deleted_recs_size | 0 |
+| Innodb_scrub_log | 0 |
+| Innodb_scrub_background_page_reorganizations | 0 |
+| Innodb_scrub_background_page_splits | 0 |
+| Innodb_scrub_background_page_split_failures_underflow | 0 |
+| Innodb_scrub_background_page_split_failures_out_of_filespace | 0 |
+| Innodb_scrub_background_page_split_failures_missing_index | 0 |
+| Innodb_scrub_background_page_split_failures_unknown | 0 |
+| Innodb_encryption_n_merge_blocks_encrypted | 0 |
+| Innodb_encryption_n_merge_blocks_decrypted | 0 |
+| Innodb_encryption_n_rowlog_blocks_encrypted | 0 |
+| Innodb_encryption_n_rowlog_blocks_decrypted | 0 |
+| Innodb_encryption_redo_key_version | 0 |
+| Key_blocks_not_flushed | 0 |
+| Key_blocks_unused | 6698 |
+| Key_blocks_used | 0 |
+| Key_read_requests | 0 |
+| Key_reads | 0 |
+| Key_write_requests | 0 |
+| Key_writes | 0 |
+| Locked_connects | 0 |
+| Max_execution_time_exceeded | 0 |
+| Max_execution_time_set | 0 |
+| Max_execution_time_set_failed | 0 |
+| Max_used_connections | 3 |
+| Max_used_connections_time | 2022-08-19 15:05:34 |
+| Mysqlx_aborted_clients | 0 |
+| Mysqlx_address | :: |
+| Mysqlx_bytes_received | 0 |
+| Mysqlx_bytes_received_compressed_payload | 0 |
+| Mysqlx_bytes_received_uncompressed_frame | 0 |
+| Mysqlx_bytes_sent | 0 |
+| Mysqlx_bytes_sent_compressed_payload | 0 |
+| Mysqlx_bytes_sent_uncompressed_frame | 0 |
+| Mysqlx_compression_algorithm | |
+| Mysqlx_compression_level | |
+| Mysqlx_connection_accept_errors | 0 |
+| Mysqlx_connection_errors | 0 |
+| Mysqlx_connections_accepted | 0 |
+| Mysqlx_connections_closed | 0 |
+| Mysqlx_connections_rejected | 0 |
+| Mysqlx_crud_create_view | 0 |
+| Mysqlx_crud_delete | 0 |
+| Mysqlx_crud_drop_view | 0 |
+| Mysqlx_crud_find | 0 |
+| Mysqlx_crud_insert | 0 |
+| Mysqlx_crud_modify_view | 0 |
+| Mysqlx_crud_update | 0 |
+| Mysqlx_cursor_close | 0 |
+| Mysqlx_cursor_fetch | 0 |
+| Mysqlx_cursor_open | 0 |
+| Mysqlx_errors_sent | 0 |
+| Mysqlx_errors_unknown_message_type | 0 |
+| Mysqlx_expect_close | 0 |
+| Mysqlx_expect_open | 0 |
+| Mysqlx_init_error | 0 |
+| Mysqlx_messages_sent | 0 |
+| Mysqlx_notice_global_sent | 0 |
+| Mysqlx_notice_other_sent | 0 |
+| Mysqlx_notice_warning_sent | 0 |
+| Mysqlx_notified_by_group_replication | 0 |
+| Mysqlx_port | 33060 |
+| Mysqlx_prep_deallocate | 0 |
+| Mysqlx_prep_execute | 0 |
+| Mysqlx_prep_prepare | 0 |
+| Mysqlx_rows_sent | 0 |
+| Mysqlx_sessions | 0 |
+| Mysqlx_sessions_accepted | 0 |
+| Mysqlx_sessions_closed | 0 |
+| Mysqlx_sessions_fatal_error | 0 |
+| Mysqlx_sessions_killed | 0 |
+| Mysqlx_sessions_rejected | 0 |
+| Mysqlx_socket | /var/lib/mysql/mysqlx.sock |
+| Mysqlx_ssl_accepts | 0 |
+| Mysqlx_ssl_active | |
+| Mysqlx_ssl_cipher | |
+| Mysqlx_ssl_cipher_list | |
+| Mysqlx_ssl_ctx_verify_depth | 18446744073709551615 |
+| Mysqlx_ssl_ctx_verify_mode | 5 |
+| Mysqlx_ssl_finished_accepts | 0 |
+| Mysqlx_ssl_server_not_after | Aug 16 14:44:56 2032 GMT |
+| Mysqlx_ssl_server_not_before | Aug 19 14:44:56 2022 GMT |
+| Mysqlx_ssl_verify_depth | |
+| Mysqlx_ssl_verify_mode | |
+| Mysqlx_ssl_version | |
+| Mysqlx_stmt_create_collection | 0 |
+| Mysqlx_stmt_create_collection_index | 0 |
+| Mysqlx_stmt_disable_notices | 0 |
+| Mysqlx_stmt_drop_collection | 0 |
+| Mysqlx_stmt_drop_collection_index | 0 |
+| Mysqlx_stmt_enable_notices | 0 |
+| Mysqlx_stmt_ensure_collection | 0 |
+| Mysqlx_stmt_execute_mysqlx | 0 |
+| Mysqlx_stmt_execute_sql | 0 |
+| Mysqlx_stmt_execute_xplugin | 0 |
+| Mysqlx_stmt_get_collection_options | 0 |
+| Mysqlx_stmt_kill_client | 0 |
+| Mysqlx_stmt_list_clients | 0 |
+| Mysqlx_stmt_list_notices | 0 |
+| Mysqlx_stmt_list_objects | 0 |
+| Mysqlx_stmt_modify_collection_options | 0 |
+| Mysqlx_stmt_ping | 0 |
+| Mysqlx_worker_threads | 2 |
+| Mysqlx_worker_threads_active | 0 |
+| Net_buffer_length | 32768 |
+| Not_flushed_delayed_rows | 0 |
+| Ongoing_anonymous_transaction_count | 0 |
+| Open_files | 2 |
+| Open_streams | 0 |
+| Open_table_definitions | 44 |
+| Open_tables | 77 |
+| Opened_files | 2 |
+| Opened_table_definitions | 73 |
+| Opened_tables | 158 |
+| Performance_schema_accounts_lost | 0 |
+| Performance_schema_cond_classes_lost | 0 |
+| Performance_schema_cond_instances_lost | 0 |
+| Performance_schema_digest_lost | 0 |
+| Performance_schema_file_classes_lost | 0 |
+| Performance_schema_file_handles_lost | 0 |
+| Performance_schema_file_instances_lost | 0 |
+| Performance_schema_hosts_lost | 0 |
+| Performance_schema_index_stat_lost | 0 |
+| Performance_schema_locker_lost | 0 |
+| Performance_schema_memory_classes_lost | 0 |
+| Performance_schema_metadata_lock_lost | 0 |
+| Performance_schema_mutex_classes_lost | 0 |
+| Performance_schema_mutex_instances_lost | 0 |
+| Performance_schema_nested_statement_lost | 0 |
+| Performance_schema_prepared_statements_lost | 0 |
+| Performance_schema_program_lost | 0 |
+| Performance_schema_rwlock_classes_lost | 0 |
+| Performance_schema_rwlock_instances_lost | 0 |
+| Performance_schema_session_connect_attrs_longest_seen | 117 |
+| Performance_schema_session_connect_attrs_lost | 0 |
+| Performance_schema_socket_classes_lost | 0 |
+| Performance_schema_socket_instances_lost | 0 |
+| Performance_schema_stage_classes_lost | 0 |
+| Performance_schema_statement_classes_lost | 0 |
+| Performance_schema_table_handles_lost | 0 |
+| Performance_schema_table_instances_lost | 0 |
+| Performance_schema_table_lock_stat_lost | 0 |
+| Performance_schema_thread_classes_lost | 0 |
+| Performance_schema_thread_instances_lost | 0 |
+| Performance_schema_users_lost | 0 |
+| Prepared_stmt_count | 0 |
+| Queries | 6748 |
+| Questions | 6746 |
+| Replica_open_temp_tables | 0 |
+| Secondary_engine_execution_count | 0 |
+| Select_full_join | 0 |
+| Select_full_range_join | 0 |
+| Select_range | 0 |
+| Select_range_check | 0 |
+| Select_scan | 8425 |
+| Slave_open_temp_tables | 0 |
+| Slow_launch_threads | 0 |
+| Slow_queries | 0 |
+| Sort_merge_passes | 0 |
+| Sort_range | 0 |
+| Sort_rows | 0 |
+| Sort_scan | 1681 |
+| Ssl_accept_renegotiates | 0 |
+| Ssl_accepts | 0 |
+| Ssl_callback_cache_hits | 0 |
+| Ssl_cipher | |
+| Ssl_cipher_list | |
+| Ssl_client_connects | 0 |
+| Ssl_connect_renegotiates | 0 |
+| Ssl_ctx_verify_depth | 18446744073709551615 |
+| Ssl_ctx_verify_mode | 5 |
+| Ssl_default_timeout | 0 |
+| Ssl_finished_accepts | 0 |
+| Ssl_finished_connects | 0 |
+| Ssl_server_not_after | Aug 16 14:44:56 2032 GMT |
+| Ssl_server_not_before | Aug 19 14:44:56 2022 GMT |
+| Ssl_session_cache_hits | 0 |
+| Ssl_session_cache_misses | 0 |
+| Ssl_session_cache_mode | SERVER |
+| Ssl_session_cache_overflows | 0 |
+| Ssl_session_cache_size | 128 |
+| Ssl_session_cache_timeout | 300 |
+| Ssl_session_cache_timeouts | 0 |
+| Ssl_sessions_reused | 0 |
+| Ssl_used_session_cache_entries | 0 |
+| Ssl_verify_depth | 0 |
+| Ssl_verify_mode | 0 |
+| Ssl_version | |
+| Table_locks_immediate | 3371 |
+| Table_locks_waited | 0 |
+| Table_open_cache_hits | 6450 |
+| Table_open_cache_misses | 158 |
+| Table_open_cache_overflows | 0 |
+| Tc_log_max_pages_used | 0 |
+| Tc_log_page_size | 0 |
+| Tc_log_page_waits | 0 |
+| Threadpool_idle_threads | 0 |
+| Threadpool_threads | 0 |
+| Threads_cached | 1 |
+| Threads_connected | 2 |
+| Threads_created | 3 |
+| Threads_running | 2 |
+| Uptime | 1711 |
+| Uptime_since_flush_status | 1711 |
++--------------------------------------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/global_variables.txt b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/global_variables.txt
new file mode 100644
index 000000000..02be0ae8e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/global_variables.txt
@@ -0,0 +1,9 @@
++--------------------------+-------+
+| Variable_name | Value |
++--------------------------+-------+
+| disabled_storage_engines | |
+| log_bin | ON |
+| max_connections | 151 |
+| performance_schema | ON |
+| table_open_cache | 4000 |
++--------------------------+-------+
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/process_list.txt b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/process_list.txt
new file mode 100644
index 000000000..a44ce5e70
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/process_list.txt
@@ -0,0 +1,6 @@
++------+---------+
+| time | user |
++------+---------+
+| 1 | netdata |
+| 9 | root |
++------+---------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/user_statistics.txt b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/user_statistics.txt
new file mode 100644
index 000000000..d7c206e47
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/user_statistics.txt
@@ -0,0 +1,6 @@
++---------+-------------------+------------------------+--------------------+---------------------+---------------------+----------------+------------+----------------------+--------------+--------------+-----------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+
+| User | Total_connections | Concurrent_connections | Connected_time | Busy_time | Cpu_time | Bytes_received | Bytes_sent | Binlog_bytes_written | Rows_fetched | Rows_updated | Table_rows_read | Select_commands | Update_commands | Other_commands | Commit_transactions | Rollback_transactions | Denied_connections | Lost_connections | Access_denied | Empty_queries | Total_ssl_connections |
++---------+-------------------+------------------------+--------------------+---------------------+---------------------+----------------+------------+----------------------+--------------+--------------+-----------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+
+| netdata | 1 | 0 | 7.6873109 | 0.000136 | 0.000141228 | 71 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| root | 2 | 0 | 1843013485340.5564 | 0.15132199999999996 | 0.15179981700000006 | 14681 | 573440 | 0 | 1 | 0 | 114633 | 37 | 0 | 110 | 0 | 0 | 1 | 0 | 0 | 36 | 0 |
++---------+-------------------+------------------------+--------------------+---------------------+---------------------+----------------+------------+----------------------+--------------+--------------+-----------------+-----------------+-----------------+----------------+---------------------+-----------------------+--------------------+------------------+---------------+---------------+-----------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/version.txt b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/version.txt
new file mode 100644
index 000000000..dede361ef
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/percona/v8.0.29/version.txt
@@ -0,0 +1,6 @@
++-----------------+--------------------------------------------------------+
+| Variable_name | Value |
++-----------------+--------------------------------------------------------+
+| version | 8.0.29-21 |
+| version_comment | Percona Server (GPL), Release 21, Revision c59f87d2854 |
++-----------------+--------------------------------------------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/mysql/testdata/session_variables.txt b/src/go/plugin/go.d/modules/mysql/testdata/session_variables.txt
new file mode 100644
index 000000000..358750607
--- /dev/null
+++ b/src/go/plugin/go.d/modules/mysql/testdata/session_variables.txt
@@ -0,0 +1,6 @@
++----------------+-------+
+| Variable_name | Value |
++----------------+-------+
+| sql_log_off | OFF |
+| slow_query_log | ON |
++----------------+-------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nginx/README.md b/src/go/plugin/go.d/modules/nginx/README.md
new file mode 120000
index 000000000..7b19fe44f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/README.md
@@ -0,0 +1 @@
+integrations/nginx.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nginx/apiclient.go b/src/go/plugin/go.d/modules/nginx/apiclient.go
new file mode 100644
index 000000000..53d9f2245
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/apiclient.go
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginx
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ connActive = "connActive"
+ connAccepts = "connAccepts"
+ connHandled = "connHandled"
+ requests = "requests"
+ requestTime = "requestTime"
+ connReading = "connReading"
+ connWriting = "connWriting"
+ connWaiting = "connWaiting"
+)
+
+var (
+ nginxSeq = []string{
+ connActive,
+ connAccepts,
+ connHandled,
+ requests,
+ connReading,
+ connWriting,
+ connWaiting,
+ }
+ tengineSeq = []string{
+ connActive,
+ connAccepts,
+ connHandled,
+ requests,
+ requestTime,
+ connReading,
+ connWriting,
+ connWaiting,
+ }
+
+ reStatus = regexp.MustCompile(`^Active connections: ([0-9]+)\n[^\d]+([0-9]+) ([0-9]+) ([0-9]+) ?([0-9]+)?\nReading: ([0-9]+) Writing: ([0-9]+) Waiting: ([0-9]+)`)
+)
+
+func newAPIClient(client *http.Client, request web.Request) *apiClient {
+ return &apiClient{httpClient: client, request: request}
+}
+
+type apiClient struct {
+ httpClient *http.Client
+ request web.Request
+}
+
+func (a apiClient) getStubStatus() (*stubStatus, error) {
+ req, err := web.NewHTTPRequest(a.request)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating request : %v", err)
+ }
+
+ resp, err := a.doRequestOK(req)
+ defer closeBody(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ status, err := parseStubStatus(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error on parsing response : %v", err)
+ }
+
+ return status, nil
+}
+
+func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) {
+ resp, err := a.httpClient.Do(req)
+ if err != nil {
+ return resp, fmt.Errorf("error on request : %v", err)
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
+ }
+
+ return resp, err
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func parseStubStatus(r io.Reader) (*stubStatus, error) {
+ sc := bufio.NewScanner(r)
+ var lines []string
+
+ for sc.Scan() {
+ lines = append(lines, strings.Trim(sc.Text(), "\r\n "))
+ }
+
+ parsed := reStatus.FindStringSubmatch(strings.Join(lines, "\n"))
+
+ if len(parsed) == 0 {
+ return nil, fmt.Errorf("can't parse '%v'", lines)
+ }
+
+ parsed = parsed[1:]
+
+ var (
+ seq []string
+ status stubStatus
+ )
+
+ switch len(parsed) {
+ default:
+ return nil, fmt.Errorf("invalid number of fields, got %d, expect %d or %d", len(parsed), len(nginxSeq), len(tengineSeq))
+ case len(nginxSeq):
+ seq = nginxSeq
+ case len(tengineSeq):
+ seq = tengineSeq
+ }
+
+ for i, key := range seq {
+ strValue := parsed[i]
+ if strValue == "" {
+ continue
+ }
+ value := mustParseInt(strValue)
+ switch key {
+ default:
+ return nil, fmt.Errorf("unknown key in seq : %s", key)
+ case connActive:
+ status.Connections.Active = value
+ case connAccepts:
+ status.Connections.Accepts = value
+ case connHandled:
+ status.Connections.Handled = value
+ case requests:
+ status.Requests.Total = value
+ case connReading:
+ status.Connections.Reading = value
+ case connWriting:
+ status.Connections.Writing = value
+ case connWaiting:
+ status.Connections.Waiting = value
+ case requestTime:
+ status.Requests.Time = &value
+ }
+ }
+
+ return &status, nil
+}
+
+func mustParseInt(value string) int64 {
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
diff --git a/src/go/plugin/go.d/modules/nginx/charts.go b/src/go/plugin/go.d/modules/nginx/charts.go
new file mode 100644
index 000000000..3415fbae8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/charts.go
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginx
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+)
+
+var charts = Charts{
+ {
+ ID: "connections",
+ Title: "Active Client Connections Including Waiting Connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "nginx.connections",
+ Dims: Dims{
+ {ID: "active"},
+ },
+ },
+ {
+ ID: "connections_statuses",
+ Title: "Active Connections Per Status",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "nginx.connections_status",
+ Dims: Dims{
+ {ID: "reading"},
+ {ID: "writing"},
+ {ID: "waiting", Name: "idle"},
+ },
+ },
+ {
+ ID: "connections_accepted_handled",
+ Title: "Accepted And Handled Connections",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "nginx.connections_accepted_handled",
+ Dims: Dims{
+ {ID: "accepts", Name: "accepted", Algo: module.Incremental},
+ {ID: "handled", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "requests",
+ Title: "Client Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "nginx.requests",
+ Dims: Dims{
+ {ID: "requests", Algo: module.Incremental},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/nginx/collect.go b/src/go/plugin/go.d/modules/nginx/collect.go
new file mode 100644
index 000000000..459570ae5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/collect.go
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginx
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func (n *Nginx) collect() (map[string]int64, error) {
+ status, err := n.apiClient.getStubStatus()
+
+ if err != nil {
+ return nil, err
+ }
+
+ return stm.ToMap(status), nil
+}
diff --git a/src/go/plugin/go.d/modules/nginx/config_schema.json b/src/go/plugin/go.d/modules/nginx/config_schema.json
new file mode 100644
index 000000000..25fead781
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NGINX collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the NGINX [status page](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).",
+ "type": "string",
+ "default": "http://127.0.0.1/stub_status",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginx/integrations/nginx.md b/src/go/plugin/go.d/modules/nginx/integrations/nginx.md
new file mode 100644
index 000000000..6d8338a10
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/integrations/nginx.md
@@ -0,0 +1,267 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nginx/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nginx/metadata.yaml"
+sidebar_label: "NGINX"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NGINX
+
+
+<img src="https://netdata.cloud/img/nginx.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: nginx
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.
+
+
+It sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects NGINX instances running on localhost that are listening on port 80.
+On startup, it tries to collect metrics from:
+
+- http://127.0.0.1/basic_status
+- http://localhost/stub_status
+- http://127.0.0.1/stub_status
+- http://127.0.0.1/nginx_status
+- http://127.0.0.1/status
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per NGINX instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginx.connections | active | connections |
+| nginx.connections_status | reading, writing, idle | connections |
+| nginx.connections_accepted_handled | accepted, handled | connections/s |
+| nginx.requests | requests | requests/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable status support
+
+Configure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/nginx.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/nginx.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1/stub_status | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/stub_status
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/stub_status
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+NGINX with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/stub_status
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/stub_status
+
+ - name: remote
+ url: http://192.0.2.1/stub_status
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `nginx` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m nginx
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `nginx` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep nginx
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep nginx /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep nginx
+```
+
+
diff --git a/src/go/plugin/go.d/modules/nginx/metadata.yaml b/src/go/plugin/go.d/modules/nginx/metadata.yaml
new file mode 100644
index 000000000..49b12c4ec
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/metadata.yaml
@@ -0,0 +1,226 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-nginx
+ plugin_name: go.d.plugin
+ module_name: nginx
+ monitored_instance:
+ name: NGINX
+ link: https://www.nginx.com/
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: nginx.svg
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: go.d.plugin
+ module_name: httpcheck
+ - plugin_name: go.d.plugin
+ module_name: web_log
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ alternative_monitored_instances: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - nginx
+ - web
+ - webserver
+ - http
+ - proxy
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the activity and performance of NGINX servers, and collects metrics such as the number of connections, their status, and client requests.
+ method_description: |
+ It sends HTTP requests to the NGINX location [stub-status](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html), which is a built-in location that provides metrics about the NGINX server.
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects NGINX instances running on localhost that are listening on port 80.
+ On startup, it tries to collect metrics from:
+
+ - http://127.0.0.1/basic_status
+ - http://localhost/stub_status
+ - http://127.0.0.1/stub_status
+ - http://127.0.0.1/nginx_status
+ - http://127.0.0.1/status
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list:
+ - title: Enable status support
+ description: |
+ Configure [ngx_http_stub_status_module](https://nginx.org/en/docs/http/ngx_http_stub_status_module.html).
+ configuration:
+ file:
+ name: go.d/nginx.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1/stub_status
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/stub_status
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/stub_status
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: NGINX with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/stub_status
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/stub_status
+
+ - name: remote
+ url: http://192.0.2.1/stub_status
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: nginx.connections
+ description: Active Client Connections Including Waiting Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: nginx.connections_status
+ description: Active Connections Per Status
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: reading
+ - name: writing
+ - name: idle
+ - name: nginx.connections_accepted_handled
+ description: Accepted And Handled Connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: accepted
+ - name: handled
+ - name: nginx.requests
+ description: Client Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
diff --git a/src/go/plugin/go.d/modules/nginx/metrics.go b/src/go/plugin/go.d/modules/nginx/metrics.go
new file mode 100644
index 000000000..66e6a160e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/metrics.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginx
+
+type stubStatus struct {
+ Connections struct {
+ // The current number of active client connections including Waiting connections.
+ Active int64 `stm:"active"`
+
+ // The total number of accepted client connections.
+ Accepts int64 `stm:"accepts"`
+
+ // The total number of handled connections.
+ // Generally, the parameter value is the same as accepts unless some resource limits have been reached.
+ Handled int64 `stm:"handled"`
+
+ // The current number of connections where nginx is reading the request header.
+ Reading int64 `stm:"reading"`
+
+ // The current number of connections where nginx is writing the response back to the client.
+ Writing int64 `stm:"writing"`
+
+ // The current number of idle client connections waiting for a request.
+ Waiting int64 `stm:"waiting"`
+ } `stm:""`
+ Requests struct {
+ // The total number of client requests.
+ Total int64 `stm:"requests"`
+
+ // Note: tengine specific
+ // The total requests' response time, which is in millisecond
+ Time *int64 `stm:"request_time"`
+ } `stm:""`
+}
diff --git a/src/go/plugin/go.d/modules/nginx/nginx.go b/src/go/plugin/go.d/modules/nginx/nginx.go
new file mode 100644
index 000000000..4a8e77439
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/nginx.go
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginx
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("nginx", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Nginx {
+ return &Nginx{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1/stub_status",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 1),
+ },
+ },
+ }}
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Nginx struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ apiClient *apiClient
+}
+
+func (n *Nginx) Configuration() any {
+ return n.Config
+}
+
+func (n *Nginx) Init() error {
+ if n.URL == "" {
+ n.Error("URL not set")
+ return errors.New("url not set")
+ }
+
+ client, err := web.NewHTTPClient(n.Client)
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+
+ n.apiClient = newAPIClient(client, n.Request)
+
+ n.Debugf("using URL %s", n.URL)
+ n.Debugf("using timeout: %s", n.Timeout)
+
+ return nil
+}
+
+func (n *Nginx) Check() error {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (n *Nginx) Charts() *Charts {
+ return charts.Copy()
+}
+
+func (n *Nginx) Collect() map[string]int64 {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (n *Nginx) Cleanup() {
+ if n.apiClient != nil && n.apiClient.httpClient != nil {
+ n.apiClient.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginx/nginx_test.go b/src/go/plugin/go.d/modules/nginx/nginx_test.go
new file mode 100644
index 000000000..255ea384c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/nginx_test.go
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginx
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatusMetrics, _ = os.ReadFile("testdata/status.txt")
+ dataTengineStatusMetrics, _ = os.ReadFile("testdata/tengine-status.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStatusMetrics": dataStatusMetrics,
+ "dataTengineStatusMetrics": dataTengineStatusMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestNginx_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Nginx{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNginx_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestNginx_Init(t *testing.T) {
+ job := New()
+
+ require.NoError(t, job.Init())
+ assert.NotNil(t, job.apiClient)
+}
+
+func TestNginx_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+}
+
+func TestNginx_CheckNG(t *testing.T) {
+ job := New()
+
+ job.URL = "http://127.0.0.1:38001/us"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestNginx_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestNginx_Collect(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "accepts": 36,
+ "active": 1,
+ "handled": 36,
+ "reading": 0,
+ "requests": 126,
+ "waiting": 0,
+ "writing": 1,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestNginx_CollectTengine(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataTengineStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "accepts": 1140,
+ "active": 1,
+ "handled": 1140,
+ "reading": 0,
+ "request_time": 75806,
+ "requests": 1140,
+ "waiting": 0,
+ "writing": 1,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestNginx_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestNginx_404(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
diff --git a/src/go/plugin/go.d/modules/nginx/testdata/config.json b/src/go/plugin/go.d/modules/nginx/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/nginx/testdata/config.yaml b/src/go/plugin/go.d/modules/nginx/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/nginx/testdata/status.txt b/src/go/plugin/go.d/modules/nginx/testdata/status.txt
new file mode 100644
index 000000000..f4835bef4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/testdata/status.txt
@@ -0,0 +1,4 @@
+Active connections: 1
+server accepts handled requests
+36 36 126
+Reading: 0 Writing: 1 Waiting: 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nginx/testdata/tengine-status.txt b/src/go/plugin/go.d/modules/nginx/testdata/tengine-status.txt
new file mode 100644
index 000000000..1e6a62c21
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginx/testdata/tengine-status.txt
@@ -0,0 +1,4 @@
+Active connections: 1
+server accepts handled requests request_time
+1140 1140 1140 75806
+Reading: 0 Writing: 1 Waiting: 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nginxplus/README.md b/src/go/plugin/go.d/modules/nginxplus/README.md
new file mode 120000
index 000000000..16cb6c1b7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/README.md
@@ -0,0 +1 @@
+integrations/nginx_plus.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nginxplus/cache.go b/src/go/plugin/go.d/modules/nginxplus/cache.go
new file mode 100644
index 000000000..af58f3a55
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/cache.go
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxplus
+
+func newCache() *cache {
+ return &cache{
+ httpCaches: make(map[string]*cacheHTTPCacheEntry),
+ httpServerZones: make(map[string]*cacheZoneEntry),
+ httpLocationZones: make(map[string]*cacheZoneEntry),
+ httpUpstreams: make(map[string]*cacheUpstreamEntry),
+ httpUpstreamServers: make(map[string]*cacheUpstreamServerEntry),
+ streamServerZones: make(map[string]*cacheZoneEntry),
+ streamUpstreams: make(map[string]*cacheUpstreamEntry),
+ streamUpstreamServers: make(map[string]*cacheUpstreamServerEntry),
+ resolvers: make(map[string]*cacheResolverEntry),
+ }
+}
+
+type (
+ cache struct {
+ httpCaches map[string]*cacheHTTPCacheEntry
+ httpServerZones map[string]*cacheZoneEntry
+ httpLocationZones map[string]*cacheZoneEntry
+ httpUpstreams map[string]*cacheUpstreamEntry
+ httpUpstreamServers map[string]*cacheUpstreamServerEntry
+ streamServerZones map[string]*cacheZoneEntry
+ streamUpstreams map[string]*cacheUpstreamEntry
+ streamUpstreamServers map[string]*cacheUpstreamServerEntry
+ resolvers map[string]*cacheResolverEntry
+ }
+ cacheEntry struct {
+ hasCharts bool
+ updated bool
+ notSeenTimes int
+ }
+ cacheHTTPCacheEntry struct {
+ name string
+ cacheEntry
+ }
+ cacheResolverEntry struct {
+ zone string
+ cacheEntry
+ }
+ cacheZoneEntry struct {
+ zone string
+ cacheEntry
+ }
+ cacheUpstreamEntry struct {
+ name string
+ zone string
+ cacheEntry
+ }
+ cacheUpstreamServerEntry struct {
+ name string
+ zone string
+ serverAddr string
+ serverName string
+ cacheEntry
+ }
+)
+
+func (c *cache) resetUpdated() {
+ for _, v := range c.httpCaches {
+ v.updated = false
+ }
+ for _, v := range c.httpServerZones {
+ v.updated = false
+ }
+ for _, v := range c.httpLocationZones {
+ v.updated = false
+ }
+ for _, v := range c.httpUpstreams {
+ v.updated = false
+ }
+ for _, v := range c.httpUpstreamServers {
+ v.updated = false
+ }
+ for _, v := range c.streamServerZones {
+ v.updated = false
+ }
+ for _, v := range c.streamUpstreams {
+ v.updated = false
+ }
+ for _, v := range c.streamUpstreamServers {
+ v.updated = false
+ }
+ for _, v := range c.resolvers {
+ v.updated = false
+ }
+}
+
+func (c *cache) putHTTPCache(cache string) {
+ v, ok := c.httpCaches[cache]
+ if !ok {
+ v = &cacheHTTPCacheEntry{name: cache}
+ c.httpCaches[cache] = v
+ }
+ v.updated, v.notSeenTimes = true, 0
+}
+
+func (c *cache) putHTTPServerZone(zone string) {
+ v, ok := c.httpServerZones[zone]
+ if !ok {
+ v = &cacheZoneEntry{zone: zone}
+ c.httpServerZones[zone] = v
+ }
+ v.updated, v.notSeenTimes = true, 0
+}
+
+func (c *cache) putHTTPLocationZone(zone string) {
+ v, ok := c.httpLocationZones[zone]
+ if !ok {
+ v = &cacheZoneEntry{zone: zone}
+ c.httpLocationZones[zone] = v
+ }
+ v.updated, v.notSeenTimes = true, 0
+}
+
+func (c *cache) putHTTPUpstream(name, zone string) {
+ v, ok := c.httpUpstreams[name+"_"+zone]
+ if !ok {
+ v = &cacheUpstreamEntry{name: name, zone: zone}
+ c.httpUpstreams[name+"_"+zone] = v
+ }
+ v.updated, v.notSeenTimes = true, 0
+}
+
+func (c *cache) putHTTPUpstreamServer(name, serverAddr, serverName, zone string) {
+ v, ok := c.httpUpstreamServers[name+"_"+serverAddr+"_"+zone]
+ if !ok {
+ v = &cacheUpstreamServerEntry{name: name, zone: zone, serverAddr: serverAddr, serverName: serverName}
+ c.httpUpstreamServers[name+"_"+serverAddr+"_"+zone] = v
+ }
+ v.updated, v.notSeenTimes = true, 0
+}
+
+func (c *cache) putStreamServerZone(zone string) {
+ v, ok := c.streamServerZones[zone]
+ if !ok {
+ v = &cacheZoneEntry{zone: zone}
+ c.streamServerZones[zone] = v
+ }
+ v.updated, v.notSeenTimes = true, 0
+
+}
+
+func (c *cache) putStreamUpstream(name, zone string) {
+ v, ok := c.streamUpstreams[name+"_"+zone]
+ if !ok {
+ v = &cacheUpstreamEntry{name: name, zone: zone}
+ c.streamUpstreams[name+"_"+zone] = v
+ }
+ v.updated, v.notSeenTimes = true, 0
+}
+
+func (c *cache) putStreamUpstreamServer(name, serverAddr, serverName, zone string) {
+ v, ok := c.streamUpstreamServers[name+"_"+serverAddr+"_"+zone]
+ if !ok {
+ v = &cacheUpstreamServerEntry{name: name, zone: zone, serverAddr: serverAddr, serverName: serverName}
+ c.streamUpstreamServers[name+"_"+serverAddr+"_"+zone] = v
+ }
+ v.updated, v.notSeenTimes = true, 0
+}
+
+func (c *cache) putResolver(zone string) {
+ v, ok := c.resolvers[zone]
+ if !ok {
+ v = &cacheResolverEntry{zone: zone}
+ c.resolvers[zone] = v
+ }
+ v.updated, v.notSeenTimes = true, 0
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/charts.go b/src/go/plugin/go.d/modules/nginxplus/charts.go
new file mode 100644
index 000000000..6070ee03b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/charts.go
@@ -0,0 +1,981 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxplus
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioClientConnectionsRate = module.Priority + iota
+ prioClientConnectionsCount
+
+ prioSSLHandshakesRate
+ prioSSLHandshakesFailuresRate
+ prioSSLVerificationErrorsRate
+ prioSSLSessionReusesRate
+
+ prioHTTPRequestsRate
+ prioHTTPRequestsCount
+ prioHTTPServerZoneRequestsRate
+ prioHTTPLocationZoneRequestsRate
+ prioHTTPServerZoneRequestsProcessingCount
+ prioHTTPServerZoneRequestsDiscardedRate
+ prioHTTPLocationZoneRequestsDiscardedRate
+
+ prioHTTPServerZoneResponsesPerCodeClassRate
+ prioHTTPLocationZoneResponsesPerCodeClassRate
+
+ prioHTTPServerZoneTrafficRate
+ prioHTTPLocationZoneTrafficRate
+
+ prioHTTPUpstreamPeersCount
+ prioHTTPUpstreamZombiesCount
+ prioHTTPUpstreamKeepaliveCount
+
+ prioHTTPUpstreamServerState
+ prioHTTPUpstreamServerDowntime
+
+ prioHTTPUpstreamServerConnectionsCount
+
+ prioHTTPUpstreamServerRequestsRate
+
+ prioHTTPUpstreamServerResponsesPerCodeClassRate
+
+ prioHTTPUpstreamServerResponseTime
+ prioHTTPUpstreamServerResponseHeaderTime
+
+ prioHTTPUpstreamServerTrafficRate
+
+ prioHTTPCacheState
+ prioHTTPCacheIOPS
+ prioHTTPCacheIO
+ prioHTTPCacheSize
+
+ prioStreamServerZoneConnectionsRate
+ prioStreamServerZoneConnectionsProcessingCount
+ prioStreamServerZoneConnectionsDiscardedRate
+
+ prioStreamServerZoneSessionsPerCodeClassRate
+
+ prioStreamServerZoneTrafficRate
+
+ prioStreamUpstreamPeersCount
+ prioStreamUpstreamZombiesCount
+
+ prioStreamUpstreamServerState
+ prioStreamUpstreamServerDowntime
+
+ prioStreamUpstreamServerConnectionsRate
+ prioStreamUpstreamServerConnectionsCount
+
+ prioStreamUpstreamServerTrafficRate
+
+ prioResolverZoneRequestsRate
+ prioResolverZoneResponsesRate
+
+ prioUptime
+)
+
+var (
+ baseCharts = module.Charts{
+ clientConnectionsRateChart.Copy(),
+ clientConnectionsCountChart.Copy(),
+ sslHandshakesRateChart.Copy(),
+ sslHandshakesFailuresRateChart.Copy(),
+ sslVerificationErrorsRateChart.Copy(),
+ sslSessionReusesRateChart.Copy(),
+ httpRequestsRateChart.Copy(),
+ httpRequestsCountChart.Copy(),
+ uptimeChart.Copy(),
+ }
+
+ clientConnectionsRateChart = module.Chart{
+ ID: "client_connections_rate",
+ Title: "Client connections rate",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "nginxplus.client_connections_rate",
+ Priority: prioClientConnectionsRate,
+ Dims: module.Dims{
+ {ID: "connections_accepted", Name: "accepted", Algo: module.Incremental},
+ {ID: "connections_dropped", Name: "dropped", Algo: module.Incremental},
+ },
+ }
+ clientConnectionsCountChart = module.Chart{
+ ID: "client_connections_count",
+ Title: "Client connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "nginxplus.client_connections_count",
+ Priority: prioClientConnectionsCount,
+ Dims: module.Dims{
+ {ID: "connections_active", Name: "active"},
+ {ID: "connections_idle", Name: "idle"},
+ },
+ }
+ sslHandshakesRateChart = module.Chart{
+ ID: "ssl_handshakes_rate",
+ Title: "SSL handshakes rate",
+ Units: "handshakes/s",
+ Fam: "ssl",
+ Ctx: "nginxplus.ssl_handshakes_rate",
+ Priority: prioSSLHandshakesRate,
+ Dims: module.Dims{
+ {ID: "ssl_handshakes", Name: "successful", Algo: module.Incremental},
+ {ID: "ssl_handshakes_failed", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ sslHandshakesFailuresRateChart = module.Chart{
+ ID: "ssl_handshakes_failures_rate",
+ Title: "SSL handshakes failures rate",
+ Units: "failures/s",
+ Fam: "ssl",
+ Ctx: "nginxplus.ssl_handshakes_failures_rate",
+ Priority: prioSSLHandshakesFailuresRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "ssl_no_common_protocol", Name: "no_common_protocol", Algo: module.Incremental},
+ {ID: "ssl_no_common_cipher", Name: "no_common_cipher", Algo: module.Incremental},
+ {ID: "ssl_handshake_timeout", Name: "timeout", Algo: module.Incremental},
+ {ID: "ssl_peer_rejected_cert", Name: "peer_rejected_cert", Algo: module.Incremental},
+ },
+ }
+ sslVerificationErrorsRateChart = module.Chart{
+ ID: "ssl_verification_errors_rate",
+ Title: "SSL verification errors rate",
+ Units: "errors/s",
+ Fam: "ssl",
+ Ctx: "nginxplus.ssl_verification_errors_rate",
+ Priority: prioSSLVerificationErrorsRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "ssl_verify_failures_no_cert", Name: "no_cert", Algo: module.Incremental},
+ {ID: "ssl_verify_failures_expired_cert", Name: "expired_cert", Algo: module.Incremental},
+ {ID: "ssl_verify_failures_revoked_cert", Name: "revoked_cert", Algo: module.Incremental},
+ {ID: "ssl_verify_failures_hostname_mismatch", Name: "hostname_mismatch", Algo: module.Incremental},
+ {ID: "ssl_verify_failures_other", Name: "other", Algo: module.Incremental},
+ },
+ }
+ sslSessionReusesRateChart = module.Chart{
+ ID: "ssl_session_reuses_rate",
+ Title: "Session reuses during SSL handshake",
+ Units: "reuses/s",
+ Fam: "ssl",
+ Ctx: "nginxplus.ssl_session_reuses_rate",
+ Priority: prioSSLSessionReusesRate,
+ Dims: module.Dims{
+ {ID: "ssl_session_reuses", Name: "ssl_session", Algo: module.Incremental},
+ },
+ }
+ httpRequestsRateChart = module.Chart{
+ ID: "http_requests_rate",
+ Title: "HTTP requests rate",
+ Units: "requests/s",
+ Fam: "http requests",
+ Ctx: "nginxplus.http_requests_rate",
+ Priority: prioHTTPRequestsRate,
+ Dims: module.Dims{
+ {ID: "http_requests_total", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ httpRequestsCountChart = module.Chart{
+ ID: "http_requests_count",
+ Title: "HTTP requests",
+ Units: "requests",
+ Fam: "http requests",
+ Ctx: "nginxplus.http_requests_count",
+ Priority: prioHTTPRequestsCount,
+ Dims: module.Dims{
+ {ID: "http_requests_current", Name: "requests"},
+ },
+ }
+ uptimeChart = module.Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "nginxplus.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "uptime", Name: "uptime"},
+ },
+ }
+)
+
+var (
+ httpServerZoneChartsTmpl = module.Charts{
+ httpServerZoneRequestsRateChartTmpl.Copy(),
+ httpServerZoneResponsesPerCodeClassRateChartTmpl.Copy(),
+ httpServerZoneTrafficRateChartTmpl.Copy(),
+ httpServerZoneRequestsProcessingCountChartTmpl.Copy(),
+ httpServerZoneRequestsDiscardedRateChartTmpl.Copy(),
+ }
+ httpServerZoneRequestsRateChartTmpl = module.Chart{
+ ID: "http_server_zone_%s_requests_rate",
+ Title: "HTTP Server Zone requests rate",
+ Units: "requests/s",
+ Fam: "http requests",
+ Ctx: "nginxplus.http_server_zone_requests_rate",
+ Priority: prioHTTPServerZoneRequestsRate,
+ Dims: module.Dims{
+ {ID: "http_server_zone_%s_requests", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ httpServerZoneResponsesPerCodeClassRateChartTmpl = module.Chart{
+ ID: "http_server_zone_%s_responses_per_code_class_rate",
+ Title: "HTTP Server Zone responses rate",
+ Units: "responses/s",
+ Fam: "http responses",
+ Ctx: "nginxplus.http_server_zone_responses_per_code_class_rate",
+ Priority: prioHTTPServerZoneResponsesPerCodeClassRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "http_server_zone_%s_responses_1xx", Name: "1xx", Algo: module.Incremental},
+ {ID: "http_server_zone_%s_responses_2xx", Name: "2xx", Algo: module.Incremental},
+ {ID: "http_server_zone_%s_responses_3xx", Name: "3xx", Algo: module.Incremental},
+ {ID: "http_server_zone_%s_responses_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: "http_server_zone_%s_responses_5xx", Name: "5xx", Algo: module.Incremental},
+ },
+ }
+ httpServerZoneTrafficRateChartTmpl = module.Chart{
+ ID: "http_server_zone_%s_traffic_rate",
+ Title: "HTTP Server Zone traffic",
+ Units: "bytes/s",
+ Fam: "http traffic",
+ Ctx: "nginxplus.http_server_zone_traffic_rate",
+ Priority: prioHTTPServerZoneTrafficRate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "http_server_zone_%s_bytes_received", Name: "received", Algo: module.Incremental},
+ {ID: "http_server_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ httpServerZoneRequestsProcessingCountChartTmpl = module.Chart{
+ ID: "http_server_zone_%s_requests_processing_count",
+ Title: "HTTP Server Zone currently processed requests",
+ Units: "requests",
+ Fam: "http requests",
+ Ctx: "nginxplus.http_server_zone_requests_processing_count",
+ Priority: prioHTTPServerZoneRequestsProcessingCount,
+ Dims: module.Dims{
+ {ID: "http_server_zone_%s_requests_processing", Name: "processing"},
+ },
+ }
+ httpServerZoneRequestsDiscardedRateChartTmpl = module.Chart{
+ ID: "http_server_zone_%s_requests_discarded_rate",
+ Title: "HTTP Server Zone requests discarded rate",
+ Units: "requests/s",
+ Fam: "http requests",
+ Ctx: "nginxplus.http_server_zone_requests_discarded_rate",
+ Priority: prioHTTPServerZoneRequestsDiscardedRate,
+ Dims: module.Dims{
+ {ID: "http_server_zone_%s_requests_discarded", Name: "discarded", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ httpLocationZoneChartsTmpl = module.Charts{
+ httpLocationZoneRequestsRateChartTmpl.Copy(),
+ httpLocationZoneRequestsDiscardedRateChartTmpl.Copy(),
+ httpLocationZoneTrafficRateChartTmpl.Copy(),
+ httpLocationZoneResponsesPerCodeClassRateChartTmpl.Copy(),
+ }
+ httpLocationZoneRequestsRateChartTmpl = module.Chart{
+ ID: "http_location_zone_%s_requests_rate",
+ Title: "HTTP Location Zone requests rate",
+ Units: "requests/s",
+ Fam: "http requests",
+ Ctx: "nginxplus.http_location_zone_requests_rate",
+ Priority: prioHTTPLocationZoneRequestsRate,
+ Dims: module.Dims{
+ {ID: "http_location_zone_%s_requests", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ httpLocationZoneResponsesPerCodeClassRateChartTmpl = module.Chart{
+ ID: "http_location_zone_%s_responses_per_code_class_rate",
+ Title: "HTTP Location Zone responses rate",
+ Units: "responses/s",
+ Fam: "http responses",
+ Ctx: "nginxplus.http_location_zone_responses_per_code_class_rate",
+ Priority: prioHTTPLocationZoneResponsesPerCodeClassRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "http_location_zone_%s_responses_1xx", Name: "1xx", Algo: module.Incremental},
+ {ID: "http_location_zone_%s_responses_2xx", Name: "2xx", Algo: module.Incremental},
+ {ID: "http_location_zone_%s_responses_3xx", Name: "3xx", Algo: module.Incremental},
+ {ID: "http_location_zone_%s_responses_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: "http_location_zone_%s_responses_5xx", Name: "5xx", Algo: module.Incremental},
+ },
+ }
+ httpLocationZoneTrafficRateChartTmpl = module.Chart{
+ ID: "http_location_zone_%s_traffic_rate",
+ Title: "HTTP Location Zone traffic rate",
+ Units: "bytes/s",
+ Fam: "http traffic",
+ Ctx: "nginxplus.http_location_zone_traffic_rate",
+ Priority: prioHTTPLocationZoneTrafficRate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "http_location_zone_%s_bytes_received", Name: "received", Algo: module.Incremental},
+ {ID: "http_location_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ httpLocationZoneRequestsDiscardedRateChartTmpl = module.Chart{
+ ID: "http_location_zone_%s_requests_discarded_rate",
+ Title: "HTTP Location Zone requests discarded rate",
+ Units: "requests/s",
+ Fam: "http requests",
+ Ctx: "nginxplus.http_location_zone_requests_discarded_rate",
+ Priority: prioHTTPLocationZoneRequestsDiscardedRate,
+ Dims: module.Dims{
+ {ID: "http_location_zone_%s_requests_discarded", Name: "discarded", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ httpUpstreamChartsTmpl = module.Charts{
+ httpUpstreamPeersCountChartTmpl.Copy(),
+ httpUpstreamZombiesCountChartTmpl.Copy(),
+ httpUpstreamKeepaliveCountChartTmpl.Copy(),
+ }
+ httpUpstreamPeersCountChartTmpl = module.Chart{
+ ID: "http_upstream_%s_zone_%s_peers_count",
+ Title: "HTTP Upstream peers",
+ Units: "peers",
+ Fam: "http upstream",
+ Ctx: "nginxplus.http_upstream_peers_count",
+ Priority: prioHTTPUpstreamPeersCount,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_zone_%s_peers", Name: "peers"},
+ },
+ }
+ httpUpstreamZombiesCountChartTmpl = module.Chart{
+ ID: "http_upstream_%s_zone_%s_zombies_count",
+ Title: "HTTP Upstream zombies",
+ Units: "servers",
+ Fam: "http upstream",
+ Ctx: "nginxplus.http_upstream_zombies_count",
+ Priority: prioHTTPUpstreamZombiesCount,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_zone_%s_zombies", Name: "zombie"},
+ },
+ }
+ httpUpstreamKeepaliveCountChartTmpl = module.Chart{
+ ID: "http_upstream_%s_zone_%s_keepalive_count",
+ Title: "HTTP Upstream keepalive",
+ Units: "connections",
+ Fam: "http upstream",
+ Ctx: "nginxplus.http_upstream_keepalive_count",
+ Priority: prioHTTPUpstreamKeepaliveCount,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_zone_%s_keepalive", Name: "keepalive"},
+ },
+ }
+
+ httpUpstreamServerChartsTmpl = module.Charts{
+ httpUpstreamServerRequestsRateChartTmpl.Copy(),
+ httpUpstreamServerResponsesPerCodeClassRateChartTmpl.Copy(),
+ httpUpstreamServerResponseTimeChartTmpl.Copy(),
+ httpUpstreamServerResponseHeaderTimeChartTmpl.Copy(),
+ httpUpstreamServerTrafficRateChartTmpl.Copy(),
+ httpUpstreamServerStateChartTmpl.Copy(),
+ httpUpstreamServerDowntimeChartTmpl.Copy(),
+ httpUpstreamServerConnectionsCountChartTmpl.Copy(),
+ }
+ httpUpstreamServerRequestsRateChartTmpl = module.Chart{
+ ID: "http_upstream_%s_server_%s_zone_%s_requests_rate",
+ Title: "HTTP Upstream Server requests",
+ Units: "requests/s",
+ Fam: "http upstream requests",
+ Ctx: "nginxplus.http_upstream_server_requests_rate",
+ Priority: prioHTTPUpstreamServerRequestsRate,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_server_%s_zone_%s_requests", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ httpUpstreamServerResponsesPerCodeClassRateChartTmpl = module.Chart{
+ ID: "http_upstream_%s_server_%s_zone_%s_responses_per_code_class_rate",
+ Title: "HTTP Upstream Server responses",
+ Units: "responses/s",
+ Fam: "http upstream responses",
+ Ctx: "nginxplus.http_upstream_server_responses_per_code_class_rate",
+ Priority: prioHTTPUpstreamServerResponsesPerCodeClassRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_server_%s_zone_%s_responses_1xx", Name: "1xx", Algo: module.Incremental},
+ {ID: "http_upstream_%s_server_%s_zone_%s_responses_2xx", Name: "2xx", Algo: module.Incremental},
+ {ID: "http_upstream_%s_server_%s_zone_%s_responses_3xx", Name: "3xx", Algo: module.Incremental},
+ {ID: "http_upstream_%s_server_%s_zone_%s_responses_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: "http_upstream_%s_server_%s_zone_%s_responses_5xx", Name: "5xx", Algo: module.Incremental},
+ },
+ }
+ httpUpstreamServerResponseTimeChartTmpl = module.Chart{
+ ID: "http_upstream_%s_server_%s_zone_%s_response_time",
+ Title: "HTTP Upstream Server average response time",
+ Units: "milliseconds",
+ Fam: "http upstream response time",
+ Ctx: "nginxplus.http_upstream_server_response_time",
+ Priority: prioHTTPUpstreamServerResponseTime,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_server_%s_zone_%s_response_time", Name: "response"},
+ },
+ }
+ httpUpstreamServerResponseHeaderTimeChartTmpl = module.Chart{
+ ID: "http_upstream_%s_server_%s_zone_%s_response_header_time",
+ Title: "HTTP Upstream Server average response header time",
+ Units: "milliseconds",
+ Fam: "http upstream response time",
+ Ctx: "nginxplus.http_upstream_server_response_header_time",
+ Priority: prioHTTPUpstreamServerResponseHeaderTime,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_server_%s_zone_%s_header_time", Name: "header"},
+ },
+ }
+ httpUpstreamServerTrafficRateChartTmpl = module.Chart{
+ ID: "http_upstream_%s_server_%s_zone_%s_traffic_rate",
+ Title: "HTTP Upstream Server traffic rate",
+ Units: "bytes/s",
+ Fam: "http upstream traffic",
+ Ctx: "nginxplus.http_upstream_server_traffic_rate",
+ Priority: prioHTTPUpstreamServerTrafficRate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_server_%s_zone_%s_bytes_received", Name: "received", Algo: module.Incremental},
+ {ID: "http_upstream_%s_server_%s_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ httpUpstreamServerStateChartTmpl = module.Chart{
+ ID: "http_upstream_%s_server_%s_zone_%s_state",
+ Title: "HTTP Upstream Server state",
+ Units: "state",
+ Fam: "http upstream state",
+ Ctx: "nginxplus.http_upstream_server_state",
+ Priority: prioHTTPUpstreamServerState,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_server_%s_zone_%s_state_up", Name: "up"},
+ {ID: "http_upstream_%s_server_%s_zone_%s_state_down", Name: "down"},
+ {ID: "http_upstream_%s_server_%s_zone_%s_state_draining", Name: "draining"},
+ {ID: "http_upstream_%s_server_%s_zone_%s_state_unavail", Name: "unavail"},
+ {ID: "http_upstream_%s_server_%s_zone_%s_state_checking", Name: "checking"},
+ {ID: "http_upstream_%s_server_%s_zone_%s_state_unhealthy", Name: "unhealthy"},
+ },
+ }
+ httpUpstreamServerConnectionsCountChartTmpl = module.Chart{
+ ID: "http_upstream_%s_server_%s_zone_%s_connection_count",
+ Title: "HTTP Upstream Server connections",
+ Units: "connections",
+ Fam: "http upstream connections",
+ Ctx: "nginxplus.http_upstream_server_connections_count",
+ Priority: prioHTTPUpstreamServerConnectionsCount,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_server_%s_zone_%s_active", Name: "active"},
+ },
+ }
+ httpUpstreamServerDowntimeChartTmpl = module.Chart{
+ ID: "http_upstream_%s_server_%s_zone_%s_downtime",
+ Title: "HTTP Upstream Server downtime",
+ Units: "seconds",
+ Fam: "http upstream state",
+ Ctx: "nginxplus.http_upstream_server_downtime",
+ Priority: prioHTTPUpstreamServerDowntime,
+ Dims: module.Dims{
+ {ID: "http_upstream_%s_server_%s_zone_%s_downtime", Name: "downtime"},
+ },
+ }
+)
+
+var (
+ httpCacheChartsTmpl = module.Charts{
+ httpCacheStateChartTmpl.Copy(),
+ httpCacheIOPSChartTmpl.Copy(),
+ httpCacheIOChartTmpl.Copy(),
+ httpCacheSizeChartTmpl.Copy(),
+ }
+ httpCacheStateChartTmpl = module.Chart{
+ ID: "http_cache_%s_state",
+ Title: "HTTP Cache state",
+ Units: "state",
+ Fam: "http cache",
+ Ctx: "nginxplus.http_cache_state",
+ Priority: prioHTTPCacheState,
+ Dims: module.Dims{
+ {ID: "http_cache_%s_state_warm", Name: "warm"},
+ {ID: "http_cache_%s_state_cold", Name: "cold"},
+ },
+ }
+ httpCacheSizeChartTmpl = module.Chart{
+ ID: "http_cache_%s_size",
+ Title: "HTTP Cache size",
+ Units: "bytes",
+ Fam: "http cache",
+ Ctx: "nginxplus.http_cache_size",
+ Priority: prioHTTPCacheSize,
+ Dims: module.Dims{
+ {ID: "http_cache_%s_size", Name: "size"},
+ },
+ }
+ httpCacheIOPSChartTmpl = module.Chart{
+ ID: "http_cache_%s_iops",
+ Title: "HTTP Cache IOPS",
+ Units: "responses/s",
+ Fam: "http cache",
+ Ctx: "nginxplus.http_cache_iops",
+ Priority: prioHTTPCacheIOPS,
+ Dims: module.Dims{
+ {ID: "http_cache_%s_served_responses", Name: "served", Algo: module.Incremental},
+ {ID: "http_cache_%s_written_responses", Name: "written", Algo: module.Incremental},
+ {ID: "http_cache_%s_bypassed_responses", Name: "bypassed", Algo: module.Incremental},
+ },
+ }
+ httpCacheIOChartTmpl = module.Chart{
+ ID: "http_cache_%s_io",
+ Title: "HTTP Cache IO",
+ Units: "bytes/s",
+ Fam: "http cache",
+ Ctx: "nginxplus.http_cache_io",
+ Priority: prioHTTPCacheIO,
+ Dims: module.Dims{
+ {ID: "http_cache_%s_served_bytes", Name: "served", Algo: module.Incremental},
+ {ID: "http_cache_%s_written_bytes", Name: "written", Algo: module.Incremental},
+ {ID: "http_cache_%s_bypassed_bytes", Name: "bypassed", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ streamServerZoneChartsTmpl = module.Charts{
+ streamServerZoneConnectionsRateChartTmpl.Copy(),
+ streamServerZoneTrafficRateChartTmpl.Copy(),
+ streamServerZoneSessionsPerCodeClassRateChartTmpl.Copy(),
+ streamServerZoneConnectionsProcessingCountRateChartTmpl.Copy(),
+ streamServerZoneConnectionsDiscardedRateChartTmpl.Copy(),
+ }
+ streamServerZoneConnectionsRateChartTmpl = module.Chart{
+ ID: "stream_server_zone_%s_connections_rate",
+ Title: "Stream Server Zone connections rate",
+ Units: "connections/s",
+ Fam: "stream connections",
+ Ctx: "nginxplus.stream_server_zone_connections_rate",
+ Priority: prioStreamServerZoneConnectionsRate,
+ Dims: module.Dims{
+ {ID: "stream_server_zone_%s_connections", Name: "accepted", Algo: module.Incremental},
+ },
+ }
+ streamServerZoneSessionsPerCodeClassRateChartTmpl = module.Chart{
+ ID: "stream_server_zone_%s_sessions_per_code_class_rate",
+ Title: "Stream Server Zone sessions rate",
+ Units: "sessions/s",
+ Fam: "stream sessions",
+ Ctx: "nginxplus.stream_server_zone_sessions_per_code_class_rate",
+ Priority: prioStreamServerZoneSessionsPerCodeClassRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "stream_server_zone_%s_sessions_2xx", Name: "2xx", Algo: module.Incremental},
+ {ID: "stream_server_zone_%s_sessions_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: "stream_server_zone_%s_sessions_5xx", Name: "5xx", Algo: module.Incremental},
+ },
+ }
+ streamServerZoneTrafficRateChartTmpl = module.Chart{
+ ID: "stream_server_zone_%s_traffic_rate",
+ Title: "Stream Server Zone traffic rate",
+ Units: "bytes/s",
+ Fam: "stream traffic",
+ Ctx: "nginxplus.stream_server_zone_traffic_rate",
+ Priority: prioStreamServerZoneTrafficRate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "stream_server_zone_%s_bytes_received", Name: "received", Algo: module.Incremental},
+ {ID: "stream_server_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ streamServerZoneConnectionsProcessingCountRateChartTmpl = module.Chart{
+ ID: "stream_server_zone_%s_connections_processing_count",
+ Title: "Stream Server Zone connections processed",
+ Units: "connections",
+ Fam: "stream connections",
+ Ctx: "nginxplus.stream_server_zone_connections_processing_count",
+ Priority: prioStreamServerZoneConnectionsProcessingCount,
+ Dims: module.Dims{
+ {ID: "stream_server_zone_%s_connections_processing", Name: "processing"},
+ },
+ }
+ streamServerZoneConnectionsDiscardedRateChartTmpl = module.Chart{
+ ID: "stream_server_zone_%s_connections_discarded_rate",
+ Title: "Stream Server Zone connections discarded",
+ Units: "connections/s",
+ Fam: "stream connections",
+ Ctx: "nginxplus.stream_server_zone_connections_discarded_rate",
+ Priority: prioStreamServerZoneConnectionsDiscardedRate,
+ Dims: module.Dims{
+ {ID: "stream_server_zone_%s_connections_discarded", Name: "discarded", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ streamUpstreamChartsTmpl = module.Charts{
+ streamUpstreamPeersCountChartTmpl.Copy(),
+ streamUpstreamZombiesCountChartTmpl.Copy(),
+ }
+ streamUpstreamPeersCountChartTmpl = module.Chart{
+ ID: "stream_upstream_%s_zone_%s_peers_count",
+ Title: "Stream Upstream peers",
+ Units: "peers",
+ Fam: "stream upstream",
+ Ctx: "nginxplus.stream_upstream_peers_count",
+ Priority: prioStreamUpstreamPeersCount,
+ Dims: module.Dims{
+ {ID: "stream_upstream_%s_zone_%s_peers", Name: "peers"},
+ },
+ }
+ streamUpstreamZombiesCountChartTmpl = module.Chart{
+ ID: "stream_upstream_%s_zone_%s_zombies_count",
+ Title: "Stream Upstream zombies",
+ Units: "servers",
+ Fam: "stream upstream",
+ Ctx: "nginxplus.stream_upstream_zombies_count",
+ Priority: prioStreamUpstreamZombiesCount,
+ Dims: module.Dims{
+ {ID: "stream_upstream_%s_zone_%s_zombies", Name: "zombie"},
+ },
+ }
+
+ streamUpstreamServerChartsTmpl = module.Charts{
+ streamUpstreamServerConnectionsRateChartTmpl.Copy(),
+ streamUpstreamServerTrafficRateChartTmpl.Copy(),
+ streamUpstreamServerConnectionsCountChartTmpl.Copy(),
+ streamUpstreamServerStateChartTmpl.Copy(),
+ streamUpstreamServerDowntimeChartTmpl.Copy(),
+ }
+ streamUpstreamServerConnectionsRateChartTmpl = module.Chart{
+ ID: "stream_upstream_%s_server_%s_zone_%s_connection_rate",
+ Title: "Stream Upstream Server connections",
+ Units: "connections/s",
+ Fam: "stream upstream connections",
+ Ctx: "nginxplus.stream_upstream_server_connections_rate",
+ Priority: prioStreamUpstreamServerConnectionsRate,
+ Dims: module.Dims{
+ {ID: "stream_upstream_%s_server_%s_zone_%s_connections", Name: "forwarded", Algo: module.Incremental},
+ },
+ }
+ streamUpstreamServerTrafficRateChartTmpl = module.Chart{
+ ID: "stream_upstream_%s_server_%s_zone_%s_traffic_rate",
+ Title: "Stream Upstream Server traffic rate",
+ Units: "bytes/s",
+ Fam: "stream upstream traffic",
+ Ctx: "nginxplus.stream_upstream_server_traffic_rate",
+ Priority: prioStreamUpstreamServerTrafficRate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "stream_upstream_%s_server_%s_zone_%s_bytes_received", Name: "received", Algo: module.Incremental},
+ {ID: "stream_upstream_%s_server_%s_zone_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ streamUpstreamServerStateChartTmpl = module.Chart{
+ ID: "stream_upstream_%s_server_%s_zone_%s_state",
+ Title: "Stream Upstream Server state",
+ Units: "state",
+ Fam: "stream upstream state",
+ Ctx: "nginxplus.stream_upstream_server_state",
+ Priority: prioStreamUpstreamServerState,
+ Dims: module.Dims{
+ {ID: "stream_upstream_%s_server_%s_zone_%s_state_up", Name: "up"},
+ {ID: "stream_upstream_%s_server_%s_zone_%s_state_down", Name: "down"},
+ {ID: "stream_upstream_%s_server_%s_zone_%s_state_unavail", Name: "unavail"},
+ {ID: "stream_upstream_%s_server_%s_zone_%s_state_checking", Name: "checking"},
+ {ID: "stream_upstream_%s_server_%s_zone_%s_state_unhealthy", Name: "unhealthy"},
+ },
+ }
+ streamUpstreamServerDowntimeChartTmpl = module.Chart{
+ ID: "stream_upstream_%s_server_%s_zone_%s_downtime",
+ Title: "Stream Upstream Server downtime",
+ Units: "seconds",
+ Fam: "stream upstream state",
+ Ctx: "nginxplus.stream_upstream_server_downtime",
+ Priority: prioStreamUpstreamServerDowntime,
+ Dims: module.Dims{
+ {ID: "stream_upstream_%s_server_%s_zone_%s_downtime", Name: "downtime"},
+ },
+ }
+ streamUpstreamServerConnectionsCountChartTmpl = module.Chart{
+ ID: "stream_upstream_%s_server_%s_zone_%s_connection_count",
+ Title: "Stream Upstream Server connections",
+ Units: "connections",
+ Fam: "stream upstream connections",
+ Ctx: "nginxplus.stream_upstream_server_connections_count",
+ Priority: prioStreamUpstreamServerConnectionsCount,
+ Dims: module.Dims{
+ {ID: "stream_upstream_%s_server_%s_zone_%s_active", Name: "active"},
+ },
+ }
+)
+
+var (
+ resolverZoneChartsTmpl = module.Charts{
+ resolverZoneRequestsRateChartTmpl.Copy(),
+ resolverZoneResponsesRateChartTmpl.Copy(),
+ }
+ resolverZoneRequestsRateChartTmpl = module.Chart{
+ ID: "resolver_zone_%s_requests_rate",
+ Title: "Resolver requests rate",
+ Units: "requests/s",
+ Fam: "resolver requests",
+ Ctx: "nginxplus.resolver_zone_requests_rate",
+ Priority: prioResolverZoneRequestsRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "resolver_zone_%s_requests_name", Name: "name", Algo: module.Incremental},
+ {ID: "resolver_zone_%s_requests_srv", Name: "srv", Algo: module.Incremental},
+ {ID: "resolver_zone_%s_requests_addr", Name: "addr", Algo: module.Incremental},
+ },
+ }
+ resolverZoneResponsesRateChartTmpl = module.Chart{
+ ID: "resolver_zone_%s_responses_rate",
+ Title: "Resolver responses rate",
+ Units: "responses/s",
+ Fam: "resolver responses",
+ Ctx: "nginxplus.resolver_zone_responses_rate",
+ Priority: prioResolverZoneResponsesRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "resolver_zone_%s_responses_noerror", Name: "noerror", Algo: module.Incremental},
+ {ID: "resolver_zone_%s_responses_formerr", Name: "formerr", Algo: module.Incremental},
+ {ID: "resolver_zone_%s_responses_servfail", Name: "servfail", Algo: module.Incremental},
+ {ID: "resolver_zone_%s_responses_nxdomain", Name: "nxdomain", Algo: module.Incremental},
+ {ID: "resolver_zone_%s_responses_notimp", Name: "notimp", Algo: module.Incremental},
+ {ID: "resolver_zone_%s_responses_refused", Name: "refused", Algo: module.Incremental},
+ {ID: "resolver_zone_%s_responses_timedout", Name: "timedout", Algo: module.Incremental},
+ {ID: "resolver_zone_%s_responses_unknown", Name: "unknown", Algo: module.Incremental},
+ },
+ }
+)
+
+func (n *NginxPlus) addHTTPCacheCharts(name string) {
+ charts := httpCacheChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Labels = []module.Label{
+ {Key: "http_cache", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NginxPlus) removeHTTPCacheCharts(name string) {
+ px := fmt.Sprintf("http_cache_%s_", name)
+ n.removeCharts(px)
+}
+
+func (n *NginxPlus) addHTTPServerZoneCharts(zone string) {
+ charts := httpServerZoneChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, zone)
+ chart.Labels = []module.Label{
+ {Key: "http_server_zone", Value: zone},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, zone)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NginxPlus) removeHTTPServerZoneCharts(zone string) {
+ px := fmt.Sprintf("http_server_zone_%s_", zone)
+ n.removeCharts(px)
+}
+
+func (n *NginxPlus) addHTTPLocationZoneCharts(zone string) {
+ charts := httpLocationZoneChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, zone)
+ chart.Labels = []module.Label{
+ {Key: "http_location_zone", Value: zone},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, zone)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NginxPlus) removeHTTPLocationZoneCharts(zone string) {
+ px := fmt.Sprintf("http_location_zone_%s_", zone)
+ n.removeCharts(px)
+}
+
+func (n *NginxPlus) addHTTPUpstreamCharts(name, zone string) {
+ charts := httpUpstreamChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name, zone)
+ chart.Labels = []module.Label{
+ {Key: "http_upstream_name", Value: name},
+ {Key: "http_upstream_zone", Value: zone},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name, zone)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NginxPlus) removeHTTPUpstreamCharts(name, zone string) {
+ px := fmt.Sprintf("http_upstream_%s_zone_%s", name, zone)
+ n.removeCharts(px)
+}
+
+func (n *NginxPlus) addHTTPUpstreamServerCharts(name, serverAddr, serverName, zone string) {
+ charts := httpUpstreamServerChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name, serverAddr, zone)
+ chart.Labels = []module.Label{
+ {Key: "http_upstream_name", Value: name},
+ {Key: "http_upstream_zone", Value: zone},
+ {Key: "http_upstream_server_address", Value: serverAddr},
+ {Key: "http_upstream_server_name", Value: serverName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name, serverAddr, zone)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NginxPlus) removeHTTPUpstreamServerCharts(name, serverAddr, zone string) {
+ px := fmt.Sprintf("http_upstream_%s_server_%s_zone_%s_", name, zone, serverAddr)
+ n.removeCharts(px)
+}
+
+func (n *NginxPlus) addStreamServerZoneCharts(zone string) {
+ charts := streamServerZoneChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, zone)
+ chart.Labels = []module.Label{
+ {Key: "stream_server_zone", Value: zone},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, zone)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NginxPlus) removeStreamServerZoneCharts(zone string) {
+ px := fmt.Sprintf("stream_server_zone_%s_", zone)
+ n.removeCharts(px)
+}
+
+func (n *NginxPlus) addStreamUpstreamCharts(zone, name string) {
+ charts := streamUpstreamChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, zone, name)
+ chart.Labels = []module.Label{
+ {Key: "stream_upstream_zone", Value: name},
+ {Key: "stream_upstream_zone", Value: zone},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, zone, name)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NginxPlus) removeStreamUpstreamCharts(name, zone string) {
+ px := fmt.Sprintf("stream_upstream_%s_zone_%s_", name, zone)
+ n.removeCharts(px)
+}
+
+func (n *NginxPlus) addStreamUpstreamServerCharts(name, serverAddr, serverName, zone string) {
+ charts := streamUpstreamServerChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name, serverAddr, zone)
+ chart.Labels = []module.Label{
+ {Key: "stream_upstream_name", Value: name},
+ {Key: "stream_upstream_zone", Value: zone},
+ {Key: "stream_upstream_server_address", Value: serverAddr},
+ {Key: "stream_upstream_server_name", Value: serverName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name, serverAddr, zone)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NginxPlus) removeStreamUpstreamServerCharts(name, serverAddr, zone string) {
+ px := fmt.Sprintf("stream_upstream_%s_server_%s_zone_%s", name, serverAddr, zone)
+ n.removeCharts(px)
+}
+
+func (n *NginxPlus) addResolverZoneCharts(zone string) {
+ charts := resolverZoneChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, zone)
+ chart.Labels = []module.Label{
+ {Key: "resolver_zone", Value: zone},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, zone)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NginxPlus) removeResolverZoneCharts(zone string) {
+ px := fmt.Sprintf("resolver_zone_%s_", zone)
+ n.removeCharts(px)
+}
+
+func (n *NginxPlus) removeCharts(prefix string) {
+ for _, chart := range *n.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/collect.go b/src/go/plugin/go.d/modules/nginxplus/collect.go
new file mode 100644
index 000000000..f986778ba
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/collect.go
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxplus
+
+import (
+ "errors"
+ "fmt"
+ "time"
+)
+
+func (n *NginxPlus) collect() (map[string]int64, error) {
+ if n.apiVersion == 0 {
+ v, err := n.queryAPIVersion()
+ if err != nil {
+ return nil, err
+ }
+ n.apiVersion = v
+ }
+
+ now := time.Now()
+ if now.Sub(n.queryEndpointsTime) > n.queryEndpointsEvery {
+ n.queryEndpointsTime = now
+ if err := n.queryAvailableEndpoints(); err != nil {
+ return nil, err
+ }
+ }
+
+ ms := n.queryMetrics()
+ if ms.empty() {
+ return nil, errors.New("no metrics collected")
+ }
+
+ mx := make(map[string]int64)
+ n.cache.resetUpdated()
+ n.collectInfo(mx, ms)
+ n.collectConnections(mx, ms)
+ n.collectSSL(mx, ms)
+ n.collectHTTPRequests(mx, ms)
+ n.collectHTTPCache(mx, ms)
+ n.collectHTTPServerZones(mx, ms)
+ n.collectHTTPLocationZones(mx, ms)
+ n.collectHTTPUpstreams(mx, ms)
+ n.collectStreamServerZones(mx, ms)
+ n.collectStreamUpstreams(mx, ms)
+ n.collectResolvers(mx, ms)
+ n.updateCharts()
+
+ return mx, nil
+}
+
+func (n *NginxPlus) collectInfo(mx map[string]int64, ms *nginxMetrics) {
+ if ms.info == nil {
+ return
+ }
+ mx["uptime"] = int64(ms.info.Timestamp.Sub(ms.info.LoadTimestamp).Seconds())
+}
+
+func (n *NginxPlus) collectConnections(mx map[string]int64, ms *nginxMetrics) {
+ if ms.connections == nil {
+ return
+ }
+ mx["connections_accepted"] = ms.connections.Accepted
+ mx["connections_dropped"] = ms.connections.Dropped
+ mx["connections_active"] = ms.connections.Active
+ mx["connections_idle"] = ms.connections.Idle
+}
+
+func (n *NginxPlus) collectSSL(mx map[string]int64, ms *nginxMetrics) {
+ if ms.ssl == nil {
+ return
+ }
+ mx["ssl_handshakes"] = ms.ssl.Handshakes
+ mx["ssl_handshakes_failed"] = ms.ssl.HandshakesFailed
+ mx["ssl_session_reuses"] = ms.ssl.SessionReuses
+ mx["ssl_no_common_protocol"] = ms.ssl.NoCommonProtocol
+ mx["ssl_no_common_cipher"] = ms.ssl.NoCommonCipher
+ mx["ssl_handshake_timeout"] = ms.ssl.HandshakeTimeout
+ mx["ssl_peer_rejected_cert"] = ms.ssl.PeerRejectedCert
+ mx["ssl_verify_failures_no_cert"] = ms.ssl.VerifyFailures.NoCert
+ mx["ssl_verify_failures_expired_cert"] = ms.ssl.VerifyFailures.ExpiredCert
+ mx["ssl_verify_failures_revoked_cert"] = ms.ssl.VerifyFailures.RevokedCert
+ mx["ssl_verify_failures_hostname_mismatch"] = ms.ssl.VerifyFailures.HostnameMismatch
+ mx["ssl_verify_failures_other"] = ms.ssl.VerifyFailures.Other
+}
+
+func (n *NginxPlus) collectHTTPRequests(mx map[string]int64, ms *nginxMetrics) {
+ if ms.httpRequests == nil {
+ return
+ }
+ mx["http_requests_total"] = ms.httpRequests.Total
+ mx["http_requests_current"] = ms.httpRequests.Current
+}
+
+func (n *NginxPlus) collectHTTPCache(mx map[string]int64, ms *nginxMetrics) {
+ if ms.httpCaches == nil {
+ return
+ }
+ for name, cache := range *ms.httpCaches {
+ n.cache.putHTTPCache(name)
+ px := fmt.Sprintf("http_cache_%s_", name)
+ mx[px+"state_cold"] = boolToInt(cache.Cold)
+ mx[px+"state_warm"] = boolToInt(!cache.Cold)
+ mx[px+"size"] = cache.Size
+ mx[px+"served_responses"] = cache.Hit.Responses + cache.Stale.Responses + cache.Updating.Responses + cache.Revalidated.Responses
+ mx[px+"written_responses"] = cache.Miss.ResponsesWritten + cache.Expired.ResponsesWritten + cache.Bypass.ResponsesWritten
+ mx[px+"bypassed_responses"] = cache.Miss.Responses + cache.Expired.Responses + cache.Bypass.Responses
+ mx[px+"served_bytes"] = cache.Hit.Bytes + cache.Stale.Bytes + cache.Updating.Bytes + cache.Revalidated.Bytes
+ mx[px+"written_bytes"] = cache.Miss.BytesWritten + cache.Expired.BytesWritten + cache.Bypass.BytesWritten
+ mx[px+"bypassed_bytes"] = cache.Miss.Bytes + cache.Expired.Bytes + cache.Bypass.Bytes
+ }
+}
+
+func (n *NginxPlus) collectHTTPServerZones(mx map[string]int64, ms *nginxMetrics) {
+ if ms.httpServerZones == nil {
+ return
+ }
+ for name, zone := range *ms.httpServerZones {
+ n.cache.putHTTPServerZone(name)
+
+ px := fmt.Sprintf("http_server_zone_%s_", name)
+ mx[px+"requests_processing"] = zone.Processing
+ mx[px+"requests"] = zone.Requests
+ mx[px+"requests_discarded"] = zone.Discarded
+ mx[px+"bytes_received"] = zone.Received
+ mx[px+"bytes_sent"] = zone.Sent
+ mx[px+"responses"] = zone.Responses.Total
+ mx[px+"responses_1xx"] = zone.Responses.Class1xx
+ mx[px+"responses_2xx"] = zone.Responses.Class2xx
+ mx[px+"responses_3xx"] = zone.Responses.Class3xx
+ mx[px+"responses_4xx"] = zone.Responses.Class4xx
+ mx[px+"responses_5xx"] = zone.Responses.Class5xx
+ }
+}
+
+func (n *NginxPlus) collectHTTPLocationZones(mx map[string]int64, ms *nginxMetrics) {
+ if ms.httpLocationZones == nil {
+ return
+ }
+ for name, zone := range *ms.httpLocationZones {
+ n.cache.putHTTPLocationZone(name)
+
+ px := fmt.Sprintf("http_location_zone_%s_", name)
+ mx[px+"requests"] = zone.Requests
+ mx[px+"requests_discarded"] = zone.Discarded
+ mx[px+"bytes_received"] = zone.Received
+ mx[px+"bytes_sent"] = zone.Sent
+ mx[px+"responses"] = zone.Responses.Total
+ mx[px+"responses_1xx"] = zone.Responses.Class1xx
+ mx[px+"responses_2xx"] = zone.Responses.Class2xx
+ mx[px+"responses_3xx"] = zone.Responses.Class3xx
+ mx[px+"responses_4xx"] = zone.Responses.Class4xx
+ mx[px+"responses_5xx"] = zone.Responses.Class5xx
+ }
+}
+
+func (n *NginxPlus) collectHTTPUpstreams(mx map[string]int64, ms *nginxMetrics) {
+ if ms.httpUpstreams == nil {
+ return
+ }
+ for name, upstream := range *ms.httpUpstreams {
+ n.cache.putHTTPUpstream(name, upstream.Zone)
+
+ px := fmt.Sprintf("http_upstream_%s_zone_%s_", name, upstream.Zone)
+ mx[px+"zombies"] = upstream.Zombies
+ mx[px+"keepalive"] = upstream.Keepalive
+ mx[px+"peers"] = int64(len(upstream.Peers))
+
+ for _, peer := range upstream.Peers {
+ n.cache.putHTTPUpstreamServer(name, peer.Server, peer.Name, upstream.Zone)
+
+ px = fmt.Sprintf("http_upstream_%s_server_%s_zone_%s_", name, peer.Server, upstream.Zone)
+ mx[px+"active"] = peer.Active
+ mx[px+"state_up"] = boolToInt(peer.State == "up")
+ mx[px+"state_down"] = boolToInt(peer.State == "down")
+ mx[px+"state_draining"] = boolToInt(peer.State == "draining")
+ mx[px+"state_unavail"] = boolToInt(peer.State == "unavail")
+ mx[px+"state_checking"] = boolToInt(peer.State == "checking")
+ mx[px+"state_unhealthy"] = boolToInt(peer.State == "unhealthy")
+ mx[px+"bytes_received"] = peer.Received
+ mx[px+"bytes_sent"] = peer.Sent
+ mx[px+"requests"] = peer.Requests
+ mx[px+"responses"] = peer.Responses.Total
+ mx[px+"responses_1xx"] = peer.Responses.Class1xx
+ mx[px+"responses_2xx"] = peer.Responses.Class2xx
+ mx[px+"responses_3xx"] = peer.Responses.Class3xx
+ mx[px+"responses_4xx"] = peer.Responses.Class4xx
+ mx[px+"responses_5xx"] = peer.Responses.Class5xx
+ mx[px+"response_time"] = peer.ResponseTime
+ mx[px+"header_time"] = peer.HeaderTime
+ mx[px+"downtime"] = peer.Downtime / 1000
+ }
+ }
+}
+
+func (n *NginxPlus) collectStreamServerZones(mx map[string]int64, ms *nginxMetrics) {
+ if ms.streamServerZones == nil {
+ return
+ }
+ for name, zone := range *ms.streamServerZones {
+ n.cache.putStreamServerZone(name)
+
+ px := fmt.Sprintf("stream_server_zone_%s_", name)
+ mx[px+"connections"] = zone.Connections
+ mx[px+"connections_processing"] = zone.Processing
+ mx[px+"connections_discarded"] = zone.Discarded
+ mx[px+"bytes_received"] = zone.Received
+ mx[px+"bytes_sent"] = zone.Sent
+ mx[px+"sessions"] = zone.Sessions.Total
+ mx[px+"sessions_2xx"] = zone.Sessions.Class2xx
+ mx[px+"sessions_4xx"] = zone.Sessions.Class4xx
+ mx[px+"sessions_5xx"] = zone.Sessions.Class5xx
+ }
+}
+
+func (n *NginxPlus) collectStreamUpstreams(mx map[string]int64, ms *nginxMetrics) {
+ if ms.streamUpstreams == nil {
+ return
+ }
+ for name, upstream := range *ms.streamUpstreams {
+ n.cache.putStreamUpstream(name, upstream.Zone)
+
+ px := fmt.Sprintf("stream_upstream_%s_zone_%s_", name, upstream.Zone)
+ mx[px+"zombies"] = upstream.Zombies
+ mx[px+"peers"] = int64(len(upstream.Peers))
+
+ for _, peer := range upstream.Peers {
+ n.cache.putStreamUpstreamServer(name, peer.Server, peer.Name, upstream.Zone)
+
+ px = fmt.Sprintf("stream_upstream_%s_server_%s_zone_%s_", name, peer.Server, upstream.Zone)
+ mx[px+"active"] = peer.Active
+ mx[px+"connections"] = peer.Connections
+ mx[px+"state_up"] = boolToInt(peer.State == "up")
+ mx[px+"state_down"] = boolToInt(peer.State == "down")
+ mx[px+"state_unavail"] = boolToInt(peer.State == "unavail")
+ mx[px+"state_checking"] = boolToInt(peer.State == "checking")
+ mx[px+"state_unhealthy"] = boolToInt(peer.State == "unhealthy")
+ mx[px+"bytes_received"] = peer.Received
+ mx[px+"bytes_sent"] = peer.Sent
+ mx[px+"downtime"] = peer.Downtime / 1000
+ }
+ }
+}
+
+func (n *NginxPlus) collectResolvers(mx map[string]int64, ms *nginxMetrics) {
+ if ms.resolvers == nil {
+ return
+ }
+ for name, zone := range *ms.resolvers {
+ n.cache.putResolver(name)
+
+ px := fmt.Sprintf("resolver_zone_%s_", name)
+ mx[px+"requests_name"] = zone.Requests.Name
+ mx[px+"requests_srv"] = zone.Requests.Srv
+ mx[px+"requests_addr"] = zone.Requests.Addr
+ mx[px+"responses_noerror"] = zone.Responses.NoError
+ mx[px+"responses_formerr"] = zone.Responses.Formerr
+ mx[px+"responses_servfail"] = zone.Responses.Servfail
+ mx[px+"responses_nxdomain"] = zone.Responses.Nxdomain
+ mx[px+"responses_notimp"] = zone.Responses.Notimp
+ mx[px+"responses_refused"] = zone.Responses.Refused
+ mx[px+"responses_timedout"] = zone.Responses.TimedOut
+ mx[px+"responses_unknown"] = zone.Responses.Unknown
+ }
+}
+
+func (n *NginxPlus) updateCharts() {
+ const notSeenLimit = 3
+
+ for key, v := range n.cache.httpCaches {
+ if v.updated && !v.hasCharts {
+ v.hasCharts = true
+ n.addHTTPCacheCharts(v.name)
+ continue
+ }
+ if !v.updated {
+ if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit {
+ delete(n.cache.httpCaches, key)
+ n.removeHTTPCacheCharts(v.name)
+ }
+ }
+ }
+ for key, v := range n.cache.httpServerZones {
+ if v.updated && !v.hasCharts {
+ v.hasCharts = true
+ n.addHTTPServerZoneCharts(v.zone)
+ continue
+ }
+ if !v.updated {
+ if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit {
+ delete(n.cache.httpServerZones, key)
+ n.removeHTTPServerZoneCharts(v.zone)
+ }
+ }
+ }
+ for key, v := range n.cache.httpLocationZones {
+ if v.updated && !v.hasCharts {
+ v.hasCharts = true
+ n.addHTTPLocationZoneCharts(v.zone)
+ continue
+ }
+ if !v.updated {
+ if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit {
+ delete(n.cache.httpLocationZones, key)
+ n.removeHTTPLocationZoneCharts(v.zone)
+ }
+ }
+ }
+ for key, v := range n.cache.httpUpstreams {
+ if v.updated && !v.hasCharts {
+ v.hasCharts = true
+ n.addHTTPUpstreamCharts(v.name, v.zone)
+ continue
+ }
+ if !v.updated {
+ if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit {
+ delete(n.cache.httpUpstreams, key)
+ n.removeHTTPUpstreamCharts(v.name, v.zone)
+ }
+ }
+ }
+ for key, v := range n.cache.httpUpstreamServers {
+ if v.updated && !v.hasCharts {
+ v.hasCharts = true
+ n.addHTTPUpstreamServerCharts(v.name, v.serverAddr, v.serverName, v.zone)
+ continue
+ }
+ if !v.updated {
+ if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit {
+ delete(n.cache.httpUpstreamServers, key)
+ n.removeHTTPUpstreamServerCharts(v.name, v.serverAddr, v.zone)
+ }
+ }
+ }
+ for key, v := range n.cache.streamServerZones {
+ if v.updated && !v.hasCharts {
+ v.hasCharts = true
+ n.addStreamServerZoneCharts(v.zone)
+ continue
+ }
+ if !v.updated {
+ if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit {
+ delete(n.cache.streamServerZones, key)
+ n.removeStreamServerZoneCharts(v.zone)
+ }
+ }
+ }
+ for key, v := range n.cache.streamUpstreams {
+ if v.updated && !v.hasCharts {
+ v.hasCharts = true
+ n.addStreamUpstreamCharts(v.name, v.zone)
+ continue
+ }
+ if !v.updated {
+ if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit {
+ delete(n.cache.streamUpstreams, key)
+ n.removeStreamUpstreamCharts(v.name, v.zone)
+ }
+ }
+ }
+ for key, v := range n.cache.streamUpstreamServers {
+ if v.updated && !v.hasCharts {
+ v.hasCharts = true
+ n.addStreamUpstreamServerCharts(v.name, v.serverAddr, v.serverName, v.zone)
+ continue
+ }
+ if !v.updated {
+ if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit {
+ delete(n.cache.streamUpstreamServers, key)
+ n.removeStreamUpstreamServerCharts(v.name, v.serverAddr, v.zone)
+ }
+ }
+ }
+ for key, v := range n.cache.resolvers {
+ if v.updated && !v.hasCharts {
+ v.hasCharts = true
+ n.addResolverZoneCharts(v.zone)
+ continue
+ }
+ if !v.updated {
+ if v.notSeenTimes++; v.notSeenTimes >= notSeenLimit {
+ delete(n.cache.resolvers, key)
+ n.removeResolverZoneCharts(v.zone)
+ }
+ }
+ }
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/config_schema.json b/src/go/plugin/go.d/modules/nginxplus/config_schema.json
new file mode 100644
index 000000000..fd4c38ef1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NGINX Plus collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the NGINX Plus webserver.",
+ "type": "string",
+ "default": "http://127.0.0.1:80",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md b/src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md
new file mode 100644
index 000000000..9ebb4b195
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/integrations/nginx_plus.md
@@ -0,0 +1,448 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nginxplus/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nginxplus/metadata.yaml"
+sidebar_label: "NGINX Plus"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NGINX Plus
+
+
+<img src="https://netdata.cloud/img/nginxplus.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: nginxplus
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors NGINX Plus servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per NGINX Plus instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.client_connections_rate | accepted, dropped | connections/s |
+| nginxplus.client_connections_count | active, idle | connections |
+| nginxplus.ssl_handshakes_rate | successful, failed | handshakes/s |
+| nginxplus.ssl_handshakes_failures_rate | no_common_protocol, no_common_cipher, timeout, peer_rejected_cert | failures/s |
+| nginxplus.ssl_verification_errors_rate | no_cert, expired_cert, revoked_cert, hostname_mismatch, other | errors/s |
+| nginxplus.ssl_session_reuses_rate | ssl_session | reuses/s |
+| nginxplus.http_requests_rate | requests | requests/s |
+| nginxplus.http_requests_count | requests | requests |
+| nginxplus.uptime | uptime | seconds |
+
+### Per http server zone
+
+These metrics refer to the HTTP server zone.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| http_server_zone | HTTP server zone name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.http_server_zone_requests_rate | requests | requests/s |
+| nginxplus.http_server_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |
+| nginxplus.http_server_zone_traffic_rate | received, sent | bytes/s |
+| nginxplus.http_server_zone_requests_processing_count | processing | requests |
+| nginxplus.http_server_zone_requests_discarded_rate | discarded | requests/s |
+
+### Per http location zone
+
+These metrics refer to the HTTP location zone.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| http_location_zone | HTTP location zone name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.http_location_zone_requests_rate | requests | requests/s |
+| nginxplus.http_location_zone_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |
+| nginxplus.http_location_zone_traffic_rate | received, sent | bytes/s |
+| nginxplus.http_location_zone_requests_discarded_rate | discarded | requests/s |
+
+### Per http upstream
+
+These metrics refer to the HTTP upstream.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| http_upstream_name | HTTP upstream name |
+| http_upstream_zone | HTTP upstream zone name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.http_upstream_peers_count | peers | peers |
+| nginxplus.http_upstream_zombies_count | zombie | servers |
+| nginxplus.http_upstream_keepalive_count | keepalive | connections |
+
+### Per http upstream server
+
+These metrics refer to the HTTP upstream server.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| http_upstream_name | HTTP upstream name |
+| http_upstream_zone | HTTP upstream zone name |
+| http_upstream_server_address | HTTP upstream server address (e.g. 127.0.0.1:81) |
+| http_upstream_server_name | HTTP upstream server name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.http_upstream_server_requests_rate | requests | requests/s |
+| nginxplus.http_upstream_server_responses_per_code_class_rate | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |
+| nginxplus.http_upstream_server_response_time | response | milliseconds |
+| nginxplus.http_upstream_server_response_header_time | header | milliseconds |
+| nginxplus.http_upstream_server_traffic_rate | received, sent | bytes/s |
+| nginxplus.http_upstream_server_state | up, down, draining, unavail, checking, unhealthy | state |
+| nginxplus.http_upstream_server_connections_count | active | connections |
+| nginxplus.http_upstream_server_downtime | downtime | seconds |
+
+### Per http cache
+
+These metrics refer to the HTTP cache.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| http_cache | HTTP cache name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.http_cache_state | warm, cold | state |
+| nginxplus.http_cache_iops | served, written, bypass | responses/s |
+| nginxplus.http_cache_io | served, written, bypass | bytes/s |
+| nginxplus.http_cache_size | size | bytes |
+
+### Per stream server zone
+
+These metrics refer to the Stream server zone.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| stream_server_zone | Stream server zone name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.stream_server_zone_connections_rate | accepted | connections/s |
+| nginxplus.stream_server_zone_sessions_per_code_class_rate | 2xx, 4xx, 5xx | sessions/s |
+| nginxplus.stream_server_zone_traffic_rate | received, sent | bytes/s |
+| nginxplus.stream_server_zone_connections_processing_count | processing | connections |
+| nginxplus.stream_server_zone_connections_discarded_rate | discarded | connections/s |
+
+### Per stream upstream
+
+These metrics refer to the Stream upstream.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| stream_upstream_name | Stream upstream name |
+| stream_upstream_zone | Stream upstream zone name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.stream_upstream_peers_count | peers | peers |
+| nginxplus.stream_upstream_zombies_count | zombie | servers |
+
+### Per stream upstream server
+
+These metrics refer to the Stream upstream server.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| stream_upstream_name | Stream upstream name |
+| stream_upstream_zone | Stream upstream zone name |
+| stream_upstream_server_address | Stream upstream server address (e.g. 127.0.0.1:12346) |
+| stream_upstream_server_name | Stream upstream server name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.stream_upstream_server_connections_rate | forwarded | connections/s |
+| nginxplus.stream_upstream_server_traffic_rate | received, sent | bytes/s |
+| nginxplus.stream_upstream_server_state | up, down, unavail, checking, unhealthy | state |
+| nginxplus.stream_upstream_server_downtime | downtime | seconds |
+| nginxplus.stream_upstream_server_connections_count | active | connections |
+
+### Per resolver zone
+
+These metrics refer to the resolver zone.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| resolver_zone | resolver zone name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxplus.resolver_zone_requests_rate | name, srv, addr | requests/s |
+| nginxplus.resolver_zone_responses_rate | noerror, formerr, servfail, nxdomain, notimp, refused, timedout, unknown | responses/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Config API
+
+To configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/nginxplus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/nginxplus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1
+
+```
+</details>
+
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+NGINX Plus with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1
+
+ - name: remote
+ url: http://192.0.2.1
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `nginxplus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m nginxplus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `nginxplus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep nginxplus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep nginxplus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep nginxplus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/nginxplus/metadata.yaml b/src/go/plugin/go.d/modules/nginxplus/metadata.yaml
new file mode 100644
index 000000000..6bc3a29bd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/metadata.yaml
@@ -0,0 +1,584 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-nginxplus
+ plugin_name: go.d.plugin
+ module_name: nginxplus
+ monitored_instance:
+ name: NGINX Plus
+ link: https://www.nginx.com/products/nginx/
+ icon_filename: nginxplus.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - nginxplus
+ - nginx
+ - web
+ - webserver
+ - http
+ - proxy
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors NGINX Plus servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Config API
+ description: |
+ To configure API, see the [official documentation](https://docs.nginx.com/nginx/admin-guide/monitoring/live-activity-monitoring/#configuring-the-api).
+ configuration:
+ file:
+ name: go.d/nginxplus.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: NGINX Plus with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1
+
+ - name: remote
+ url: http://192.0.2.1
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: nginxplus.client_connections_rate
+ description: Client connections rate
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: accepted
+ - name: dropped
+ - name: nginxplus.client_connections_count
+ description: Client connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: idle
+ - name: nginxplus.ssl_handshakes_rate
+ description: SSL handshakes rate
+ unit: handshakes/s
+ chart_type: line
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: nginxplus.ssl_handshakes_failures_rate
+ description: SSL handshakes failures rate
+ unit: failures/s
+ chart_type: stacked
+ dimensions:
+ - name: no_common_protocol
+ - name: no_common_cipher
+ - name: timeout
+ - name: peer_rejected_cert
+ - name: nginxplus.ssl_verification_errors_rate
+ description: SSL verification errors rate
+ unit: errors/s
+ chart_type: stacked
+ dimensions:
+ - name: no_cert
+ - name: expired_cert
+ - name: revoked_cert
+ - name: hostname_mismatch
+ - name: other
+ - name: nginxplus.ssl_session_reuses_rate
+ description: Session reuses during SSL handshak
+ unit: reuses/s
+ chart_type: line
+ dimensions:
+ - name: ssl_session
+ - name: nginxplus.http_requests_rate
+ description: HTTP requests rate
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: nginxplus.http_requests_count
+ description: HTTP requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: nginxplus.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: http server zone
+ description: These metrics refer to the HTTP server zone.
+ labels:
+ - name: http_server_zone
+ description: HTTP server zone name
+ metrics:
+ - name: nginxplus.http_server_zone_requests_rate
+ description: HTTP Server Zone requests rate
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: nginxplus.http_server_zone_responses_per_code_class_rate
+ description: HTTP Server Zone responses rate
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: 1xx
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: nginxplus.http_server_zone_traffic_rate
+ description: HTTP Server Zone traffic
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: nginxplus.http_server_zone_requests_processing_count
+ description: HTTP Server Zone currently processed requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: processing
+ - name: nginxplus.http_server_zone_requests_discarded_rate
+ description: HTTP Server Zone requests discarded rate
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: discarded
+ - name: http location zone
+ description: These metrics refer to the HTTP location zone.
+ labels:
+ - name: http_location_zone
+ description: HTTP location zone name
+ metrics:
+ - name: nginxplus.http_location_zone_requests_rate
+ description: HTTP Location Zone requests rate
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: nginxplus.http_location_zone_responses_per_code_class_rate
+ description: HTTP Location Zone responses rate
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: 1xx
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: nginxplus.http_location_zone_traffic_rate
+ description: HTTP Location Zone traffic rate
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: nginxplus.http_location_zone_requests_discarded_rate
+ description: HTTP Location Zone requests discarded rate
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: discarded
+ - name: http upstream
+ description: These metrics refer to the HTTP upstream.
+ labels:
+ - name: http_upstream_name
+ description: HTTP upstream name
+ - name: http_upstream_zone
+ description: HTTP upstream zone name
+ metrics:
+ - name: nginxplus.http_upstream_peers_count
+ description: HTTP Upstream peers
+ unit: peers
+ chart_type: line
+ dimensions:
+ - name: peers
+ - name: nginxplus.http_upstream_zombies_count
+ description: HTTP Upstream zombies
+ unit: servers
+ chart_type: line
+ dimensions:
+ - name: zombie
+ - name: nginxplus.http_upstream_keepalive_count
+ description: HTTP Upstream keepalive
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: keepalive
+ - name: http upstream server
+ description: These metrics refer to the HTTP upstream server.
+ labels:
+ - name: http_upstream_name
+ description: HTTP upstream name
+ - name: http_upstream_zone
+ description: HTTP upstream zone name
+ - name: http_upstream_server_address
+ description: HTTP upstream server address (e.g. 127.0.0.1:81)
+ - name: http_upstream_server_name
+ description: HTTP upstream server name
+ metrics:
+ - name: nginxplus.http_upstream_server_requests_rate
+ description: HTTP Upstream Server requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: nginxplus.http_upstream_server_responses_per_code_class_rate
+ description: HTTP Upstream Server responses
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: 1xx
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: nginxplus.http_upstream_server_response_time
+ description: HTTP Upstream Server average response time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: response
+ - name: nginxplus.http_upstream_server_response_header_time
+ description: HTTP Upstream Server average response header time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: header
+ - name: nginxplus.http_upstream_server_traffic_rate
+ description: HTTP Upstream Server traffic rate
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: nginxplus.http_upstream_server_state
+ description: HTTP Upstream Server state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: up
+ - name: down
+ - name: draining
+ - name: unavail
+ - name: checking
+ - name: unhealthy
+ - name: nginxplus.http_upstream_server_connections_count
+ description: HTTP Upstream Server connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: nginxplus.http_upstream_server_downtime
+ description: HTTP Upstream Server downtime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: downtime
+ - name: http cache
+ description: These metrics refer to the HTTP cache.
+ labels:
+ - name: http_cache
+ description: HTTP cache name
+ metrics:
+ - name: nginxplus.http_cache_state
+ description: HTTP Cache state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: warm
+ - name: cold
+ - name: nginxplus.http_cache_iops
+ description: HTTP Cache size
+ unit: responses/s
+ chart_type: line
+ dimensions:
+ - name: served
+ - name: written
+ - name: bypass
+ - name: nginxplus.http_cache_io
+ description: HTTP Cache IOPS
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: served
+ - name: written
+ - name: bypass
+ - name: nginxplus.http_cache_size
+ description: HTTP Cache IO
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: stream server zone
+ description: These metrics refer to the Stream server zone.
+ labels:
+ - name: stream_server_zone
+ description: Stream server zone name
+ metrics:
+ - name: nginxplus.stream_server_zone_connections_rate
+ description: Stream Server Zone connections rate
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: accepted
+ - name: nginxplus.stream_server_zone_sessions_per_code_class_rate
+ description: Stream Server Zone sessions rate
+ unit: sessions/s
+ chart_type: stacked
+ dimensions:
+ - name: 2xx
+ - name: 4xx
+ - name: 5xx
+ - name: nginxplus.stream_server_zone_traffic_rate
+ description: Stream Server Zone traffic rate
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: nginxplus.stream_server_zone_connections_processing_count
+ description: Stream Server Zone connections processed
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: processing
+ - name: nginxplus.stream_server_zone_connections_discarded_rate
+ description: Stream Server Zone connections discarded
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: discarded
+ - name: stream upstream
+ description: These metrics refer to the Stream upstream.
+ labels:
+ - name: stream_upstream_name
+ description: Stream upstream name
+ - name: stream_upstream_zone
+ description: Stream upstream zone name
+ metrics:
+ - name: nginxplus.stream_upstream_peers_count
+ description: Stream Upstream peers
+ unit: peers
+ chart_type: line
+ dimensions:
+ - name: peers
+ - name: nginxplus.stream_upstream_zombies_count
+ description: Stream Upstream zombies
+ unit: servers
+ chart_type: line
+ dimensions:
+ - name: zombie
+ - name: stream upstream server
+ description: These metrics refer to the Stream upstream server.
+ labels:
+ - name: stream_upstream_name
+ description: Stream upstream name
+ - name: stream_upstream_zone
+ description: Stream upstream zone name
+ - name: stream_upstream_server_address
+ description: Stream upstream server address (e.g. 127.0.0.1:12346)
+ - name: stream_upstream_server_name
+ description: Stream upstream server name
+ metrics:
+ - name: nginxplus.stream_upstream_server_connections_rate
+ description: Stream Upstream Server connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: forwarded
+ - name: nginxplus.stream_upstream_server_traffic_rate
+ description: Stream Upstream Server traffic rate
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: nginxplus.stream_upstream_server_state
+ description: Stream Upstream Server state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: up
+ - name: down
+ - name: unavail
+ - name: checking
+ - name: unhealthy
+ - name: nginxplus.stream_upstream_server_downtime
+ description: Stream Upstream Server downtime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: downtime
+ - name: nginxplus.stream_upstream_server_connections_count
+ description: Stream Upstream Server connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: resolver zone
+ description: These metrics refer to the resolver zone.
+ labels:
+ - name: resolver_zone
+ description: resolver zone name
+ metrics:
+ - name: nginxplus.resolver_zone_requests_rate
+ description: Resolver requests rate
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: name
+ - name: srv
+ - name: addr
+ - name: nginxplus.resolver_zone_responses_rate
+ description: Resolver responses rate
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: noerror
+ - name: formerr
+ - name: servfail
+ - name: nxdomain
+ - name: notimp
+ - name: refused
+ - name: timedout
+ - name: unknown
diff --git a/src/go/plugin/go.d/modules/nginxplus/nginx_http_api.go b/src/go/plugin/go.d/modules/nginxplus/nginx_http_api.go
new file mode 100644
index 000000000..0f7999ac5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/nginx_http_api.go
@@ -0,0 +1,212 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxplus
+
+import "time"
+
+// https://demo.nginx.com/dashboard.html
+// https://demo.nginx.com/swagger-ui/
+// http://nginx.org/en/docs/http/ngx_http_api_module.html
+
+type nginxAPIVersions []int64
+
+type (
+ nginxInfo struct {
+ Version string `json:"version"`
+ Build string `json:"build"`
+ Address string `json:"address"`
+ Generation int `json:"generation"`
+ LoadTimestamp time.Time `json:"load_timestamp"`
+ Timestamp time.Time `json:"timestamp"`
+ }
+ nginxConnections struct {
+ Accepted int64 `json:"accepted"`
+ Dropped int64 `json:"dropped"`
+ Active int64 `json:"active"`
+ Idle int64 `json:"idle"`
+ }
+ nginxSSL struct {
+ Handshakes int64 `json:"handshakes"`
+ HandshakesFailed int64 `json:"handshakes_failed"`
+ SessionReuses int64 `json:"session_reuses"`
+ NoCommonProtocol int64 `json:"no_common_protocol"`
+ NoCommonCipher int64 `json:"no_common_cipher"`
+ HandshakeTimeout int64 `json:"handshake_timeout"`
+ PeerRejectedCert int64 `json:"peer_rejected_cert"`
+ VerifyFailures struct {
+ NoCert int64 `json:"no_cert"`
+ ExpiredCert int64 `json:"expired_cert"`
+ RevokedCert int64 `json:"revoked_cert"`
+ HostnameMismatch int64 `json:"hostname_mismatch"`
+ Other int64 `json:"other"`
+ } `json:"verify_failures"`
+ }
+)
+
+type (
+ nginxHTTPRequests struct {
+ Total int64 `json:"total"`
+ Current int64 `json:"current"`
+ }
+ nginxHTTPServerZones map[string]struct {
+ Processing int64 `json:"processing"`
+ Requests int64 `json:"requests"`
+ Responses struct {
+ Class1xx int64 `json:"1xx"`
+ Class2xx int64 `json:"2xx"`
+ Class3xx int64 `json:"3xx"`
+ Class4xx int64 `json:"4xx"`
+ Class5xx int64 `json:"5xx"`
+ Total int64
+ } `json:"responses"`
+ Discarded int64 `json:"discarded"`
+ Received int64 `json:"received"`
+ Sent int64 `json:"sent"`
+ }
+ nginxHTTPLocationZones map[string]struct {
+ Requests int64 `json:"requests"`
+ Responses struct {
+ Class1xx int64 `json:"1xx"`
+ Class2xx int64 `json:"2xx"`
+ Class3xx int64 `json:"3xx"`
+ Class4xx int64 `json:"4xx"`
+ Class5xx int64 `json:"5xx"`
+ Total int64
+ } `json:"responses"`
+ Discarded int64 `json:"discarded"`
+ Received int64 `json:"received"`
+ Sent int64 `json:"sent"`
+ }
+ nginxHTTPUpstreams map[string]struct {
+ Peers []struct {
+ Id int64 `json:"id"`
+ Server string `json:"server"`
+ Name string `json:"name"`
+ Backup bool `json:"backup"`
+ Weight int64 `json:"weight"`
+ State string `json:"state"`
+ Active int64 `json:"active"`
+ Requests int64 `json:"requests"`
+ HeaderTime int64 `json:"header_time"`
+ ResponseTime int64 `json:"response_time"`
+ Responses struct {
+ Class1xx int64 `json:"1xx"`
+ Class2xx int64 `json:"2xx"`
+ Class3xx int64 `json:"3xx"`
+ Class4xx int64 `json:"4xx"`
+ Class5xx int64 `json:"5xx"`
+ Total int64
+ } `json:"responses"`
+ Sent int64 `json:"sent"`
+ Received int64 `json:"received"`
+ Fails int64 `json:"fails"`
+ Unavail int64 `json:"unavail"`
+ HealthChecks struct {
+ Checks int64 `json:"checks"`
+ Fails int64 `json:"fails"`
+ Unhealthy int64 `json:"unhealthy"`
+ } `json:"health_checks"`
+ Downtime int64 `json:"downtime"`
+ Selected time.Time `json:"selected"`
+ } `json:"peers"`
+ Keepalive int64 `json:"keepalive"`
+ Zombies int64 `json:"zombies"`
+ Zone string `json:"zone"`
+ }
+ nginxHTTPCaches map[string]struct {
+ Size int64 `json:"size"`
+ Cold bool `json:"cold"`
+ Hit struct {
+ Responses int64 `json:"responses"`
+ Bytes int64 `json:"bytes"`
+ } `json:"hit"`
+ Stale struct {
+ Responses int64 `json:"responses"`
+ Bytes int64 `json:"bytes"`
+ } `json:"stale"`
+ Updating struct {
+ Responses int64 `json:"responses"`
+ Bytes int64 `json:"bytes"`
+ } `json:"updating"`
+ Revalidated struct {
+ Responses int64 `json:"responses"`
+ Bytes int64 `json:"bytes"`
+ } `json:"revalidated"`
+ Miss struct {
+ Responses int64 `json:"responses"`
+ Bytes int64 `json:"bytes"`
+ ResponsesWritten int64 `json:"responses_written"`
+ BytesWritten int64 `json:"bytes_written"`
+ } `json:"miss"`
+ Expired struct {
+ Responses int64 `json:"responses"`
+ Bytes int64 `json:"bytes"`
+ ResponsesWritten int64 `json:"responses_written"`
+ BytesWritten int64 `json:"bytes_written"`
+ } `json:"expired"`
+ Bypass struct {
+ Responses int64 `json:"responses"`
+ Bytes int64 `json:"bytes"`
+ ResponsesWritten int64 `json:"responses_written"`
+ BytesWritten int64 `json:"bytes_written"`
+ } `json:"bypass"`
+ }
+)
+
+type (
+ nginxStreamServerZones map[string]struct {
+ Processing int64 `json:"processing"`
+ Connections int64 `json:"connections"`
+ Sessions struct {
+ Class2xx int64 `json:"2xx"`
+ Class4xx int64 `json:"4xx"`
+ Class5xx int64 `json:"5xx"`
+ Total int64 `json:"total"`
+ } `json:"sessions"`
+ Discarded int64 `json:"discarded"`
+ Received int64 `json:"received"`
+ Sent int64 `json:"sent"`
+ }
+ nginxStreamUpstreams map[string]struct {
+ Peers []struct {
+ Id int64 `json:"id"`
+ Server string `json:"server"`
+ Name string `json:"name"`
+ Backup bool `json:"backup"`
+ Weight int64 `json:"weight"`
+ State string `json:"state"`
+ Active int64 `json:"active"`
+ Connections int64 `json:"connections"`
+ Sent int64 `json:"sent"`
+ Received int64 `json:"received"`
+ Fails int64 `json:"fails"`
+ Unavail int64 `json:"unavail"`
+ HealthChecks struct {
+ Checks int64 `json:"checks"`
+ Fails int64 `json:"fails"`
+ Unhealthy int64 `json:"unhealthy"`
+ } `json:"health_checks"`
+ Downtime int64 `json:"downtime"`
+ } `json:"peers"`
+ Zombies int64 `json:"zombies"`
+ Zone string `json:"zone"`
+ }
+)
+
+type nginxResolvers map[string]struct {
+ Requests struct {
+ Name int64 `json:"name"`
+ Srv int64 `json:"srv"`
+ Addr int64 `json:"addr"`
+ } `json:"requests"`
+ Responses struct {
+ NoError int64 `json:"noerror"`
+ Formerr int64 `json:"formerr"`
+ Servfail int64 `json:"servfail"`
+ Nxdomain int64 `json:"nxdomain"`
+ Notimp int64 `json:"notimp"`
+ Refused int64 `json:"refused"`
+ TimedOut int64 `json:"timedout"`
+ Unknown int64 `json:"unknown"`
+ } `json:"responses"`
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/nginx_http_api_query.go b/src/go/plugin/go.d/modules/nginxplus/nginx_http_api_query.go
new file mode 100644
index 000000000..b54cd142a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/nginx_http_api_query.go
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxplus
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "sync"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ urlPathAPIVersions = "/api/"
+ urlPathAPIEndpointsRoot = "/api/%d"
+ urlPathAPINginx = "/api/%d/nginx"
+ urlPathAPIEndpointsHTTP = "/api/%d/http"
+ urlPathAPIEndpointsStream = "/api/%d/stream"
+ urlPathAPIConnections = "/api/%d/connections"
+ urlPathAPISSL = "/api/%d/ssl"
+ urlPathAPIResolvers = "/api/%d/resolvers"
+ urlPathAPIHTTPRequests = "/api/%d/http/requests"
+ urlPathAPIHTTPServerZones = "/api/%d/http/server_zones"
+ urlPathAPIHTTPLocationZones = "/api/%d/http/location_zones"
+ urlPathAPIHTTPUpstreams = "/api/%d/http/upstreams"
+ urlPathAPIHTTPCaches = "/api/%d/http/caches"
+ urlPathAPIStreamServerZones = "/api/%d/stream/server_zones"
+ urlPathAPIStreamUpstreams = "/api/%d/stream/upstreams"
+)
+
+type nginxMetrics struct {
+ info *nginxInfo
+ connections *nginxConnections
+ ssl *nginxSSL
+ httpRequests *nginxHTTPRequests
+ httpServerZones *nginxHTTPServerZones
+ httpLocationZones *nginxHTTPLocationZones
+ httpUpstreams *nginxHTTPUpstreams
+ httpCaches *nginxHTTPCaches
+ streamServerZones *nginxStreamServerZones
+ streamUpstreams *nginxStreamUpstreams
+ resolvers *nginxResolvers
+}
+
+func (n *NginxPlus) queryAPIVersion() (int64, error) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, urlPathAPIVersions)
+
+ var versions nginxAPIVersions
+ if err := n.doWithDecode(&versions, req); err != nil {
+ return 0, err
+ }
+
+ if len(versions) == 0 {
+ return 0, fmt.Errorf("'%s' returned no data", req.URL)
+ }
+
+ return versions[len(versions)-1], nil
+}
+
+func (n *NginxPlus) queryAvailableEndpoints() error {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIEndpointsRoot, n.apiVersion))
+
+ var endpoints []string
+ if err := n.doWithDecode(&endpoints, req); err != nil {
+ return err
+ }
+
+ n.Debugf("discovered root endpoints: %v", endpoints)
+ var hasHTTP, hasStream bool
+ for _, v := range endpoints {
+ switch v {
+ case "nginx":
+ n.endpoints.nginx = true
+ case "connections":
+ n.endpoints.connections = true
+ case "ssl":
+ n.endpoints.ssl = true
+ case "resolvers":
+ n.endpoints.resolvers = true
+ case "http":
+ hasHTTP = true
+ case "stream":
+ hasStream = true
+ }
+ }
+
+ if hasHTTP {
+ endpoints = endpoints[:0]
+ req, _ = web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIEndpointsHTTP, n.apiVersion))
+
+ if err := n.doWithDecode(&endpoints, req); err != nil {
+ return err
+ }
+
+ n.Debugf("discovered http endpoints: %v", endpoints)
+ for _, v := range endpoints {
+ switch v {
+ case "requests":
+ n.endpoints.httpRequest = true
+ case "server_zones":
+ n.endpoints.httpServerZones = true
+ case "location_zones":
+ n.endpoints.httpLocationZones = true
+ case "caches":
+ n.endpoints.httpCaches = true
+ case "upstreams":
+ n.endpoints.httpUpstreams = true
+ }
+ }
+ }
+
+ if hasStream {
+ endpoints = endpoints[:0]
+ req, _ = web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIEndpointsStream, n.apiVersion))
+
+ if err := n.doWithDecode(&endpoints, req); err != nil {
+ return err
+ }
+
+ n.Debugf("discovered stream endpoints: %v", endpoints)
+ for _, v := range endpoints {
+ switch v {
+ case "server_zones":
+ n.endpoints.streamServerZones = true
+ case "upstreams":
+ n.endpoints.streamUpstreams = true
+ }
+ }
+ }
+
+ return nil
+}
+
+func (n *NginxPlus) queryMetrics() *nginxMetrics {
+ ms := &nginxMetrics{}
+ wg := &sync.WaitGroup{}
+
+ for _, task := range []struct {
+ do bool
+ fn func(*nginxMetrics)
+ }{
+ {do: n.endpoints.nginx, fn: n.queryNginxInfo},
+ {do: n.endpoints.connections, fn: n.queryConnections},
+ {do: n.endpoints.ssl, fn: n.querySSL},
+ {do: n.endpoints.httpRequest, fn: n.queryHTTPRequests},
+ {do: n.endpoints.httpServerZones, fn: n.queryHTTPServerZones},
+ {do: n.endpoints.httpLocationZones, fn: n.queryHTTPLocationZones},
+ {do: n.endpoints.httpUpstreams, fn: n.queryHTTPUpstreams},
+ {do: n.endpoints.httpCaches, fn: n.queryHTTPCaches},
+ {do: n.endpoints.streamServerZones, fn: n.queryStreamServerZones},
+ {do: n.endpoints.streamUpstreams, fn: n.queryStreamUpstreams},
+ {do: n.endpoints.resolvers, fn: n.queryResolvers},
+ } {
+ task := task
+ if task.do {
+ wg.Add(1)
+ go func() { task.fn(ms); wg.Done() }()
+ }
+ }
+
+ wg.Wait()
+
+ return ms
+}
+
+func (n *NginxPlus) queryNginxInfo(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPINginx, n.apiVersion))
+
+ var v nginxInfo
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.nginx = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.info = &v
+}
+
+func (n *NginxPlus) queryConnections(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIConnections, n.apiVersion))
+
+ var v nginxConnections
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.connections = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.connections = &v
+}
+
+func (n *NginxPlus) querySSL(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPISSL, n.apiVersion))
+
+ var v nginxSSL
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.ssl = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.ssl = &v
+}
+
+func (n *NginxPlus) queryHTTPRequests(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPRequests, n.apiVersion))
+
+ var v nginxHTTPRequests
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.httpRequest = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.httpRequests = &v
+}
+
+func (n *NginxPlus) queryHTTPServerZones(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPServerZones, n.apiVersion))
+
+ var v nginxHTTPServerZones
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.httpServerZones = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.httpServerZones = &v
+}
+
+func (n *NginxPlus) queryHTTPLocationZones(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPLocationZones, n.apiVersion))
+
+ var v nginxHTTPLocationZones
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.httpLocationZones = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.httpLocationZones = &v
+}
+
+func (n *NginxPlus) queryHTTPUpstreams(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPUpstreams, n.apiVersion))
+
+ var v nginxHTTPUpstreams
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.httpUpstreams = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.httpUpstreams = &v
+}
+
+func (n *NginxPlus) queryHTTPCaches(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIHTTPCaches, n.apiVersion))
+
+ var v nginxHTTPCaches
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.httpCaches = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.httpCaches = &v
+}
+
+func (n *NginxPlus) queryStreamServerZones(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIStreamServerZones, n.apiVersion))
+
+ var v nginxStreamServerZones
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.streamServerZones = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.streamServerZones = &v
+}
+
+func (n *NginxPlus) queryStreamUpstreams(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIStreamUpstreams, n.apiVersion))
+
+ var v nginxStreamUpstreams
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.streamUpstreams = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.streamUpstreams = &v
+}
+
+func (n *NginxPlus) queryResolvers(ms *nginxMetrics) {
+ req, _ := web.NewHTTPRequestWithPath(n.Request, fmt.Sprintf(urlPathAPIResolvers, n.apiVersion))
+
+ var v nginxResolvers
+
+ if err := n.doWithDecode(&v, req); err != nil {
+ n.endpoints.resolvers = !errors.Is(err, errPathNotFound)
+ n.Warning(err)
+ return
+ }
+
+ ms.resolvers = &v
+}
+
+var (
+ errPathNotFound = errors.New("path not found")
+)
+
+func (n *NginxPlus) doWithDecode(dst interface{}, req *http.Request) error {
+ n.Debugf("executing %s '%s'", req.Method, req.URL)
+ resp, err := n.httpClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode == http.StatusNotFound {
+ return fmt.Errorf("%s returned %d status code (%w)", req.URL, resp.StatusCode, errPathNotFound)
+ }
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("%s returned %d status code (%s)", req.URL, resp.StatusCode, resp.Status)
+ }
+
+ content, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("error on reading response from %s : %v", req.URL, err)
+ }
+
+ if err := json.Unmarshal(content, dst); err != nil {
+ return fmt.Errorf("error on parsing response from %s : %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func (n *nginxMetrics) empty() bool {
+ return n.info != nil &&
+ n.connections == nil &&
+ n.ssl == nil &&
+ n.httpRequests == nil &&
+ n.httpServerZones == nil &&
+ n.httpLocationZones == nil &&
+ n.httpUpstreams == nil &&
+ n.httpCaches == nil &&
+ n.streamServerZones == nil &&
+ n.streamUpstreams == nil &&
+ n.resolvers != nil
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/nginxplus.go b/src/go/plugin/go.d/modules/nginxplus/nginxplus.go
new file mode 100644
index 000000000..f737e6819
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/nginxplus.go
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxplus
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("nginxplus", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *NginxPlus {
+ return &NginxPlus{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 1),
+ },
+ },
+ },
+ charts: baseCharts.Copy(),
+ queryEndpointsEvery: time.Minute,
+ cache: newCache(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type NginxPlus struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ apiVersion int64
+ endpoints struct {
+ nginx bool
+ connections bool
+ ssl bool
+ httpCaches bool
+ httpRequest bool
+ httpServerZones bool
+ httpLocationZones bool
+ httpUpstreams bool
+ streamServerZones bool
+ streamUpstreams bool
+ resolvers bool
+ }
+ queryEndpointsTime time.Time
+ queryEndpointsEvery time.Duration
+ cache *cache
+}
+
+func (n *NginxPlus) Configuration() any {
+ return n.Config
+}
+
+func (n *NginxPlus) Init() error {
+ if n.URL == "" {
+ n.Error("config validation: 'url' can not be empty'")
+ return errors.New("url not set")
+ }
+
+ client, err := web.NewHTTPClient(n.Client)
+ if err != nil {
+ n.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ n.httpClient = client
+
+ return nil
+}
+
+func (n *NginxPlus) Check() error {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (n *NginxPlus) Charts() *module.Charts {
+ return n.charts
+}
+
+func (n *NginxPlus) Collect() map[string]int64 {
+ mx, err := n.collect()
+
+ if err != nil {
+ n.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (n *NginxPlus) Cleanup() {
+ if n.httpClient != nil {
+ n.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/nginxplus_test.go b/src/go/plugin/go.d/modules/nginxplus/nginxplus_test.go
new file mode 100644
index 000000000..2628cc688
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/nginxplus_test.go
@@ -0,0 +1,596 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxplus
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataAPI8APIVersions, _ = os.ReadFile("testdata/api-8/api_versions.json")
+ dataAPI8Connections, _ = os.ReadFile("testdata/api-8/connections.json")
+ dataAPI8EndpointsHTTP, _ = os.ReadFile("testdata/api-8/endpoints_http.json")
+ dataAPI8EndpointsRoot, _ = os.ReadFile("testdata/api-8/endpoints_root.json")
+ dataAPI8EndpointsStream, _ = os.ReadFile("testdata/api-8/endpoints_stream.json")
+ dataAPI8HTTPCaches, _ = os.ReadFile("testdata/api-8/http_caches.json")
+ dataAPI8HTTPLocationZones, _ = os.ReadFile("testdata/api-8/http_location_zones.json")
+ dataAPI8HTTPRequests, _ = os.ReadFile("testdata/api-8/http_requests.json")
+ dataAPI8HTTPServerZones, _ = os.ReadFile("testdata/api-8/http_server_zones.json")
+ dataAPI8HTTPUpstreams, _ = os.ReadFile("testdata/api-8/http_upstreams.json")
+ dataAPI8SSL, _ = os.ReadFile("testdata/api-8/ssl.json")
+ dataAPI8StreamServerZones, _ = os.ReadFile("testdata/api-8/stream_server_zones.json")
+ dataAPI8StreamUpstreams, _ = os.ReadFile("testdata/api-8/stream_upstreams.json")
+ dataAPI8Resolvers, _ = os.ReadFile("testdata/api-8/resolvers.json")
+ data404, _ = os.ReadFile("testdata/404.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataAPI8APIVersions": dataAPI8APIVersions,
+ "dataAPI8Connections": dataAPI8Connections,
+ "dataAPI8EndpointsHTTP": dataAPI8EndpointsHTTP,
+ "dataAPI8EndpointsRoot": dataAPI8EndpointsRoot,
+ "dataAPI8EndpointsStream": dataAPI8EndpointsStream,
+ "dataAPI8HTTPCaches": dataAPI8HTTPCaches,
+ "dataAPI8HTTPLocationZones": dataAPI8HTTPLocationZones,
+ "dataAPI8HTTPRequests": dataAPI8HTTPRequests,
+ "dataAPI8HTTPServerZones": dataAPI8HTTPServerZones,
+ "dataAPI8HTTPUpstreams": dataAPI8HTTPUpstreams,
+ "dataAPI8SSL": dataAPI8SSL,
+ "dataAPI8StreamServerZones": dataAPI8StreamServerZones,
+ "dataAPI8StreamUpstreams": dataAPI8StreamUpstreams,
+ "dataAPI8Resolvers": dataAPI8Resolvers,
+ "data404": data404,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestNginxPlus_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NginxPlus{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNginxPlus_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nginx := New()
+ nginx.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, nginx.Init())
+ } else {
+ assert.NoError(t, nginx.Init())
+ }
+ })
+ }
+}
+
+func TestNginxPlus_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (nginx *NginxPlus, cleanup func())
+ }{
+ "success when all requests OK": {
+ wantFail: false,
+ prepare: caseAPI8AllRequestsOK,
+ },
+ "success when all requests except stream OK": {
+ wantFail: false,
+ prepare: caseAPI8AllRequestsExceptStreamOK,
+ },
+ "fail on invalid data response": {
+ wantFail: true,
+ prepare: caseInvalidDataResponse,
+ },
+ "fail on connection refused": {
+ wantFail: true,
+ prepare: caseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nginx, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, nginx.Check())
+ } else {
+ assert.NoError(t, nginx.Check())
+ }
+ })
+ }
+}
+
+func TestNginxPlus_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (nginx *NginxPlus, cleanup func())
+ wantNumOfCharts int
+ wantMetrics map[string]int64
+ }{
+ "success when all requests OK": {
+ prepare: caseAPI8AllRequestsOK,
+ wantNumOfCharts: len(baseCharts) +
+ len(httpCacheChartsTmpl) +
+ len(httpServerZoneChartsTmpl) +
+ len(httpLocationZoneChartsTmpl)*2 +
+ len(httpUpstreamChartsTmpl) +
+ len(httpUpstreamServerChartsTmpl)*2 +
+ len(streamServerZoneChartsTmpl) +
+ len(streamUpstreamChartsTmpl) +
+ len(streamUpstreamServerChartsTmpl)*2 +
+ len(resolverZoneChartsTmpl)*2,
+ wantMetrics: map[string]int64{
+ "connections_accepted": 6079,
+ "connections_active": 1,
+ "connections_dropped": 0,
+ "connections_idle": 8,
+ "http_cache_cache_backend_bypassed_bytes": 67035,
+ "http_cache_cache_backend_bypassed_responses": 109,
+ "http_cache_cache_backend_served_bytes": 0,
+ "http_cache_cache_backend_served_responses": 0,
+ "http_cache_cache_backend_size": 0,
+ "http_cache_cache_backend_state_cold": 0,
+ "http_cache_cache_backend_state_warm": 1,
+ "http_cache_cache_backend_written_bytes": 0,
+ "http_cache_cache_backend_written_responses": 0,
+ "http_location_zone_server_api_bytes_received": 1854427,
+ "http_location_zone_server_api_bytes_sent": 4668778,
+ "http_location_zone_server_api_requests": 9188,
+ "http_location_zone_server_api_requests_discarded": 0,
+ "http_location_zone_server_api_responses": 9188,
+ "http_location_zone_server_api_responses_1xx": 0,
+ "http_location_zone_server_api_responses_2xx": 9187,
+ "http_location_zone_server_api_responses_3xx": 0,
+ "http_location_zone_server_api_responses_4xx": 1,
+ "http_location_zone_server_api_responses_5xx": 0,
+ "http_location_zone_server_dashboard_bytes_received": 0,
+ "http_location_zone_server_dashboard_bytes_sent": 0,
+ "http_location_zone_server_dashboard_requests": 0,
+ "http_location_zone_server_dashboard_requests_discarded": 0,
+ "http_location_zone_server_dashboard_responses": 0,
+ "http_location_zone_server_dashboard_responses_1xx": 0,
+ "http_location_zone_server_dashboard_responses_2xx": 0,
+ "http_location_zone_server_dashboard_responses_3xx": 0,
+ "http_location_zone_server_dashboard_responses_4xx": 0,
+ "http_location_zone_server_dashboard_responses_5xx": 0,
+ "http_requests_current": 1,
+ "http_requests_total": 8363,
+ "http_server_zone_server_backend_bytes_received": 1773834,
+ "http_server_zone_server_backend_bytes_sent": 4585734,
+ "http_server_zone_server_backend_requests": 8962,
+ "http_server_zone_server_backend_requests_discarded": 0,
+ "http_server_zone_server_backend_requests_processing": 1,
+ "http_server_zone_server_backend_responses": 8961,
+ "http_server_zone_server_backend_responses_1xx": 0,
+ "http_server_zone_server_backend_responses_2xx": 8960,
+ "http_server_zone_server_backend_responses_3xx": 0,
+ "http_server_zone_server_backend_responses_4xx": 1,
+ "http_server_zone_server_backend_responses_5xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_active": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_bytes_received": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_bytes_sent": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_downtime": 1020,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_header_time": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_requests": 26,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_response_time": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_1xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_2xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_3xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_4xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_5xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_checking": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_down": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_draining": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_unavail": 1,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_unhealthy": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_up": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_active": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_bytes_received": 86496,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_bytes_sent": 9180,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_downtime": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_header_time": 1,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_requests": 102,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_response_time": 1,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses": 102,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_1xx": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_2xx": 102,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_3xx": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_4xx": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_5xx": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_checking": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_down": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_draining": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_unavail": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_unhealthy": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_up": 1,
+ "http_upstream_backend_zone_http_backend_keepalive": 0,
+ "http_upstream_backend_zone_http_backend_peers": 2,
+ "http_upstream_backend_zone_http_backend_zombies": 0,
+ "resolver_zone_resolver-http_requests_addr": 0,
+ "resolver_zone_resolver-http_requests_name": 0,
+ "resolver_zone_resolver-http_requests_srv": 2939408,
+ "resolver_zone_resolver-http_responses_formerr": 0,
+ "resolver_zone_resolver-http_responses_noerror": 0,
+ "resolver_zone_resolver-http_responses_notimp": 0,
+ "resolver_zone_resolver-http_responses_nxdomain": 2939404,
+ "resolver_zone_resolver-http_responses_refused": 0,
+ "resolver_zone_resolver-http_responses_servfail": 0,
+ "resolver_zone_resolver-http_responses_timedout": 4,
+ "resolver_zone_resolver-http_responses_unknown": 0,
+ "resolver_zone_resolver-stream_requests_addr": 0,
+ "resolver_zone_resolver-stream_requests_name": 638797,
+ "resolver_zone_resolver-stream_requests_srv": 0,
+ "resolver_zone_resolver-stream_responses_formerr": 0,
+ "resolver_zone_resolver-stream_responses_noerror": 433136,
+ "resolver_zone_resolver-stream_responses_notimp": 0,
+ "resolver_zone_resolver-stream_responses_nxdomain": 40022,
+ "resolver_zone_resolver-stream_responses_refused": 165639,
+ "resolver_zone_resolver-stream_responses_servfail": 0,
+ "resolver_zone_resolver-stream_responses_timedout": 0,
+ "resolver_zone_resolver-stream_responses_unknown": 0,
+ "ssl_handshake_timeout": 4,
+ "ssl_handshakes": 15804607,
+ "ssl_handshakes_failed": 37862,
+ "ssl_no_common_cipher": 24,
+ "ssl_no_common_protocol": 16648,
+ "ssl_peer_rejected_cert": 0,
+ "ssl_session_reuses": 13096060,
+ "ssl_verify_failures_expired_cert": 0,
+ "ssl_verify_failures_hostname_mismatch": 0,
+ "ssl_verify_failures_other": 0,
+ "ssl_verify_failures_no_cert": 0,
+ "ssl_verify_failures_revoked_cert": 0,
+ "stream_server_zone_tcp_server_bytes_received": 0,
+ "stream_server_zone_tcp_server_bytes_sent": 0,
+ "stream_server_zone_tcp_server_connections": 0,
+ "stream_server_zone_tcp_server_connections_discarded": 0,
+ "stream_server_zone_tcp_server_connections_processing": 0,
+ "stream_server_zone_tcp_server_sessions": 0,
+ "stream_server_zone_tcp_server_sessions_2xx": 0,
+ "stream_server_zone_tcp_server_sessions_4xx": 0,
+ "stream_server_zone_tcp_server_sessions_5xx": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_active": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_bytes_received": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_bytes_sent": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_connections": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_downtime": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_checking": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_down": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_unavail": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_unhealthy": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12346_zone_tcp_servers_state_up": 1,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_active": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_bytes_received": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_bytes_sent": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_connections": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_downtime": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_checking": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_down": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_unavail": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_unhealthy": 0,
+ "stream_upstream_stream_backend_server_127.0.0.1:12347_zone_tcp_servers_state_up": 1,
+ "stream_upstream_stream_backend_zone_tcp_servers_peers": 2,
+ "stream_upstream_stream_backend_zone_tcp_servers_zombies": 0,
+ },
+ },
+ "success when all requests except stream OK": {
+ prepare: caseAPI8AllRequestsExceptStreamOK,
+ wantNumOfCharts: len(baseCharts) +
+ len(httpCacheChartsTmpl) +
+ len(httpServerZoneChartsTmpl) +
+ len(httpLocationZoneChartsTmpl)*2 +
+ len(httpUpstreamChartsTmpl) +
+ len(httpUpstreamServerChartsTmpl)*2 +
+ len(resolverZoneChartsTmpl)*2,
+ wantMetrics: map[string]int64{
+ "connections_accepted": 6079,
+ "connections_active": 1,
+ "connections_dropped": 0,
+ "connections_idle": 8,
+ "http_cache_cache_backend_bypassed_bytes": 67035,
+ "http_cache_cache_backend_bypassed_responses": 109,
+ "http_cache_cache_backend_served_bytes": 0,
+ "http_cache_cache_backend_served_responses": 0,
+ "http_cache_cache_backend_size": 0,
+ "http_cache_cache_backend_state_cold": 0,
+ "http_cache_cache_backend_state_warm": 1,
+ "http_cache_cache_backend_written_bytes": 0,
+ "http_cache_cache_backend_written_responses": 0,
+ "http_location_zone_server_api_bytes_received": 1854427,
+ "http_location_zone_server_api_bytes_sent": 4668778,
+ "http_location_zone_server_api_requests": 9188,
+ "http_location_zone_server_api_requests_discarded": 0,
+ "http_location_zone_server_api_responses": 9188,
+ "http_location_zone_server_api_responses_1xx": 0,
+ "http_location_zone_server_api_responses_2xx": 9187,
+ "http_location_zone_server_api_responses_3xx": 0,
+ "http_location_zone_server_api_responses_4xx": 1,
+ "http_location_zone_server_api_responses_5xx": 0,
+ "http_location_zone_server_dashboard_bytes_received": 0,
+ "http_location_zone_server_dashboard_bytes_sent": 0,
+ "http_location_zone_server_dashboard_requests": 0,
+ "http_location_zone_server_dashboard_requests_discarded": 0,
+ "http_location_zone_server_dashboard_responses": 0,
+ "http_location_zone_server_dashboard_responses_1xx": 0,
+ "http_location_zone_server_dashboard_responses_2xx": 0,
+ "http_location_zone_server_dashboard_responses_3xx": 0,
+ "http_location_zone_server_dashboard_responses_4xx": 0,
+ "http_location_zone_server_dashboard_responses_5xx": 0,
+ "http_requests_current": 1,
+ "http_requests_total": 8363,
+ "http_server_zone_server_backend_bytes_received": 1773834,
+ "http_server_zone_server_backend_bytes_sent": 4585734,
+ "http_server_zone_server_backend_requests": 8962,
+ "http_server_zone_server_backend_requests_discarded": 0,
+ "http_server_zone_server_backend_requests_processing": 1,
+ "http_server_zone_server_backend_responses": 8961,
+ "http_server_zone_server_backend_responses_1xx": 0,
+ "http_server_zone_server_backend_responses_2xx": 8960,
+ "http_server_zone_server_backend_responses_3xx": 0,
+ "http_server_zone_server_backend_responses_4xx": 1,
+ "http_server_zone_server_backend_responses_5xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_active": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_bytes_received": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_bytes_sent": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_downtime": 1020,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_header_time": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_requests": 26,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_response_time": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_1xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_2xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_3xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_4xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_responses_5xx": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_checking": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_down": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_draining": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_unavail": 1,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_unhealthy": 0,
+ "http_upstream_backend_server_127.0.0.1:81_zone_http_backend_state_up": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_active": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_bytes_received": 86496,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_bytes_sent": 9180,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_downtime": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_header_time": 1,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_requests": 102,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_response_time": 1,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses": 102,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_1xx": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_2xx": 102,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_3xx": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_4xx": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_responses_5xx": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_checking": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_down": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_draining": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_unavail": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_unhealthy": 0,
+ "http_upstream_backend_server_127.0.0.1:82_zone_http_backend_state_up": 1,
+ "http_upstream_backend_zone_http_backend_keepalive": 0,
+ "http_upstream_backend_zone_http_backend_peers": 2,
+ "http_upstream_backend_zone_http_backend_zombies": 0,
+ "resolver_zone_resolver-http_requests_addr": 0,
+ "resolver_zone_resolver-http_requests_name": 0,
+ "resolver_zone_resolver-http_requests_srv": 2939408,
+ "resolver_zone_resolver-http_responses_formerr": 0,
+ "resolver_zone_resolver-http_responses_noerror": 0,
+ "resolver_zone_resolver-http_responses_notimp": 0,
+ "resolver_zone_resolver-http_responses_nxdomain": 2939404,
+ "resolver_zone_resolver-http_responses_refused": 0,
+ "resolver_zone_resolver-http_responses_servfail": 0,
+ "resolver_zone_resolver-http_responses_timedout": 4,
+ "resolver_zone_resolver-http_responses_unknown": 0,
+ "resolver_zone_resolver-stream_requests_addr": 0,
+ "resolver_zone_resolver-stream_requests_name": 638797,
+ "resolver_zone_resolver-stream_requests_srv": 0,
+ "resolver_zone_resolver-stream_responses_formerr": 0,
+ "resolver_zone_resolver-stream_responses_noerror": 433136,
+ "resolver_zone_resolver-stream_responses_notimp": 0,
+ "resolver_zone_resolver-stream_responses_nxdomain": 40022,
+ "resolver_zone_resolver-stream_responses_refused": 165639,
+ "resolver_zone_resolver-stream_responses_servfail": 0,
+ "resolver_zone_resolver-stream_responses_timedout": 0,
+ "resolver_zone_resolver-stream_responses_unknown": 0,
+ "ssl_handshake_timeout": 4,
+ "ssl_handshakes": 15804607,
+ "ssl_handshakes_failed": 37862,
+ "ssl_no_common_cipher": 24,
+ "ssl_no_common_protocol": 16648,
+ "ssl_peer_rejected_cert": 0,
+ "ssl_session_reuses": 13096060,
+ "ssl_verify_failures_expired_cert": 0,
+ "ssl_verify_failures_hostname_mismatch": 0,
+ "ssl_verify_failures_other": 0,
+ "ssl_verify_failures_no_cert": 0,
+ "ssl_verify_failures_revoked_cert": 0,
+ },
+ },
+ "fail on invalid data response": {
+ prepare: caseInvalidDataResponse,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ "fail on connection refused": {
+ prepare: caseConnectionRefused,
+ wantNumOfCharts: 0,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nginx, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := nginx.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ assert.Equalf(t, test.wantNumOfCharts, len(*nginx.Charts()), "number of charts")
+ ensureCollectedHasAllChartsDimsVarsIDs(t, nginx, mx)
+ }
+ })
+ }
+}
+
+func caseAPI8AllRequestsOK(t *testing.T) (*NginxPlus, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathAPIVersions:
+ _, _ = w.Write(dataAPI8APIVersions)
+ case fmt.Sprintf(urlPathAPIEndpointsRoot, 8):
+ _, _ = w.Write(dataAPI8EndpointsRoot)
+ case fmt.Sprintf(urlPathAPIEndpointsHTTP, 8):
+ _, _ = w.Write(dataAPI8EndpointsHTTP)
+ case fmt.Sprintf(urlPathAPIEndpointsStream, 8):
+ _, _ = w.Write(dataAPI8EndpointsStream)
+ case fmt.Sprintf(urlPathAPIConnections, 8):
+ _, _ = w.Write(dataAPI8Connections)
+ case fmt.Sprintf(urlPathAPISSL, 8):
+ _, _ = w.Write(dataAPI8SSL)
+ case fmt.Sprintf(urlPathAPIHTTPRequests, 8):
+ _, _ = w.Write(dataAPI8HTTPRequests)
+ case fmt.Sprintf(urlPathAPIHTTPServerZones, 8):
+ _, _ = w.Write(dataAPI8HTTPServerZones)
+ case fmt.Sprintf(urlPathAPIHTTPLocationZones, 8):
+ _, _ = w.Write(dataAPI8HTTPLocationZones)
+ case fmt.Sprintf(urlPathAPIHTTPUpstreams, 8):
+ _, _ = w.Write(dataAPI8HTTPUpstreams)
+ case fmt.Sprintf(urlPathAPIHTTPCaches, 8):
+ _, _ = w.Write(dataAPI8HTTPCaches)
+ case fmt.Sprintf(urlPathAPIStreamServerZones, 8):
+ _, _ = w.Write(dataAPI8StreamServerZones)
+ case fmt.Sprintf(urlPathAPIStreamUpstreams, 8):
+ _, _ = w.Write(dataAPI8StreamUpstreams)
+ case fmt.Sprintf(urlPathAPIResolvers, 8):
+ _, _ = w.Write(dataAPI8Resolvers)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ _, _ = w.Write(data404)
+
+ }
+ }))
+ nginx := New()
+ nginx.URL = srv.URL
+ require.NoError(t, nginx.Init())
+
+ return nginx, srv.Close
+}
+
+func caseAPI8AllRequestsExceptStreamOK(t *testing.T) (*NginxPlus, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathAPIVersions:
+ _, _ = w.Write(dataAPI8APIVersions)
+ case fmt.Sprintf(urlPathAPIEndpointsRoot, 8):
+ _, _ = w.Write(dataAPI8EndpointsRoot)
+ case fmt.Sprintf(urlPathAPIEndpointsHTTP, 8):
+ _, _ = w.Write(dataAPI8EndpointsHTTP)
+ case fmt.Sprintf(urlPathAPIEndpointsStream, 8):
+ _, _ = w.Write(dataAPI8EndpointsStream)
+ case fmt.Sprintf(urlPathAPIConnections, 8):
+ _, _ = w.Write(dataAPI8Connections)
+ case fmt.Sprintf(urlPathAPISSL, 8):
+ _, _ = w.Write(dataAPI8SSL)
+ case fmt.Sprintf(urlPathAPIHTTPRequests, 8):
+ _, _ = w.Write(dataAPI8HTTPRequests)
+ case fmt.Sprintf(urlPathAPIHTTPServerZones, 8):
+ _, _ = w.Write(dataAPI8HTTPServerZones)
+ case fmt.Sprintf(urlPathAPIHTTPLocationZones, 8):
+ _, _ = w.Write(dataAPI8HTTPLocationZones)
+ case fmt.Sprintf(urlPathAPIHTTPUpstreams, 8):
+ _, _ = w.Write(dataAPI8HTTPUpstreams)
+ case fmt.Sprintf(urlPathAPIHTTPCaches, 8):
+ _, _ = w.Write(dataAPI8HTTPCaches)
+ case fmt.Sprintf(urlPathAPIResolvers, 8):
+ _, _ = w.Write(dataAPI8Resolvers)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ _, _ = w.Write(data404)
+
+ }
+ }))
+ nginx := New()
+ nginx.URL = srv.URL
+ require.NoError(t, nginx.Init())
+
+ return nginx, srv.Close
+}
+
+func caseInvalidDataResponse(t *testing.T) (*NginxPlus, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ nginx := New()
+ nginx.URL = srv.URL
+ require.NoError(t, nginx.Init())
+
+ return nginx, srv.Close
+}
+
+func caseConnectionRefused(t *testing.T) (*NginxPlus, func()) {
+ t.Helper()
+ nginx := New()
+ nginx.URL = "http://127.0.0.1:65001"
+ require.NoError(t, nginx.Init())
+
+ return nginx, func() {}
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, n *NginxPlus, mx map[string]int64) {
+ for _, chart := range *n.Charts() {
+ if chart.ID == uptimeChart.ID {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/404.json b/src/go/plugin/go.d/modules/nginxplus/testdata/404.json
new file mode 100644
index 000000000..d2ed8c9a8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/404.json
@@ -0,0 +1,9 @@
+{
+ "error": {
+ "status": 404,
+ "text": "path not found",
+ "code": "PathNotFound"
+ },
+ "request_id": "f0d20aca461d043e787ebaa52f018cb2",
+ "href": "https://nginx.org/en/docs/http/ngx_http_api_module.html"
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/api_versions.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/api_versions.json
new file mode 100644
index 000000000..9ffc33973
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/api_versions.json
@@ -0,0 +1,10 @@
+[
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8
+]
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/connections.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/connections.json
new file mode 100644
index 000000000..490ca13fc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/connections.json
@@ -0,0 +1,6 @@
+{
+ "accepted": 6079,
+ "dropped": 0,
+ "active": 1,
+ "idle": 8
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_http.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_http.json
new file mode 100644
index 000000000..57c4e4aa2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_http.json
@@ -0,0 +1,10 @@
+[
+ "requests",
+ "server_zones",
+ "location_zones",
+ "caches",
+ "limit_conns",
+ "limit_reqs",
+ "keyvals",
+ "upstreams"
+]
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_root.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_root.json
new file mode 100644
index 000000000..b185c55f2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_root.json
@@ -0,0 +1,10 @@
+[
+ "nginx",
+ "processes",
+ "connections",
+ "slabs",
+ "http",
+ "stream",
+ "resolvers",
+ "ssl"
+]
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_stream.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_stream.json
new file mode 100644
index 000000000..0da092376
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/endpoints_stream.json
@@ -0,0 +1,6 @@
+[
+ "server_zones",
+ "limit_conns",
+ "keyvals",
+ "upstreams"
+]
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_caches.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_caches.json
new file mode 100644
index 000000000..dd2d03adf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_caches.json
@@ -0,0 +1,40 @@
+{
+ "cache_backend": {
+ "size": 0,
+ "cold": false,
+ "hit": {
+ "responses": 0,
+ "bytes": 0
+ },
+ "stale": {
+ "responses": 0,
+ "bytes": 0
+ },
+ "updating": {
+ "responses": 0,
+ "bytes": 0
+ },
+ "revalidated": {
+ "responses": 0,
+ "bytes": 0
+ },
+ "miss": {
+ "responses": 109,
+ "bytes": 67035,
+ "responses_written": 0,
+ "bytes_written": 0
+ },
+ "expired": {
+ "responses": 0,
+ "bytes": 0,
+ "responses_written": 0,
+ "bytes_written": 0
+ },
+ "bypass": {
+ "responses": 0,
+ "bytes": 0,
+ "responses_written": 0,
+ "bytes_written": 0
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_location_zones.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_location_zones.json
new file mode 100644
index 000000000..8812e6dff
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_location_zones.json
@@ -0,0 +1,35 @@
+{
+ "server_api": {
+ "requests": 9188,
+ "responses": {
+ "1xx": 0,
+ "2xx": 9187,
+ "3xx": 0,
+ "4xx": 1,
+ "5xx": 0,
+ "codes": {
+ "200": 9187,
+ "404": 1
+ },
+ "total": 9188
+ },
+ "discarded": 0,
+ "received": 1854427,
+ "sent": 4668778
+ },
+ "server_dashboard": {
+ "requests": 0,
+ "responses": {
+ "1xx": 0,
+ "2xx": 0,
+ "3xx": 0,
+ "4xx": 0,
+ "5xx": 0,
+ "codes": {},
+ "total": 0
+ },
+ "discarded": 0,
+ "received": 0,
+ "sent": 0
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_requests.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_requests.json
new file mode 100644
index 000000000..0c2a17503
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_requests.json
@@ -0,0 +1,4 @@
+{
+ "total": 8363,
+ "current": 1
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_server_zones.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_server_zones.json
new file mode 100644
index 000000000..c25389210
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_server_zones.json
@@ -0,0 +1,21 @@
+{
+ "server_backend": {
+ "processing": 1,
+ "requests": 8962,
+ "responses": {
+ "1xx": 0,
+ "2xx": 8960,
+ "3xx": 0,
+ "4xx": 1,
+ "5xx": 0,
+ "codes": {
+ "200": 8960,
+ "404": 1
+ },
+ "total": 8961
+ },
+ "discarded": 0,
+ "received": 1773834,
+ "sent": 4585734
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_upstreams.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_upstreams.json
new file mode 100644
index 000000000..0f7ba7135
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/http_upstreams.json
@@ -0,0 +1,76 @@
+{
+ "backend": {
+ "peers": [
+ {
+ "id": 0,
+ "server": "127.0.0.1:81",
+ "name": "127.0.0.1:81",
+ "backup": false,
+ "weight": 5,
+ "state": "unavail",
+ "active": 0,
+ "requests": 26,
+ "header_time": 0,
+ "response_time": 0,
+ "responses": {
+ "1xx": 0,
+ "2xx": 0,
+ "3xx": 0,
+ "4xx": 0,
+ "5xx": 0,
+ "codes": {},
+ "total": 0
+ },
+ "sent": 0,
+ "received": 0,
+ "fails": 26,
+ "unavail": 1,
+ "health_checks": {
+ "checks": 0,
+ "fails": 0,
+ "unhealthy": 0
+ },
+ "downtime": 1020702,
+ "downstart": "2022-11-18T19:17:09.258Z",
+ "selected": "2022-11-18T19:33:50Z"
+ },
+ {
+ "id": 1,
+ "server": "127.0.0.1:82",
+ "name": "127.0.0.1:82",
+ "backup": false,
+ "weight": 1,
+ "state": "up",
+ "active": 0,
+ "requests": 102,
+ "header_time": 1,
+ "response_time": 1,
+ "responses": {
+ "1xx": 0,
+ "2xx": 102,
+ "3xx": 0,
+ "4xx": 0,
+ "5xx": 0,
+ "codes": {
+ "200": 102
+ },
+ "total": 102
+ },
+ "sent": 9180,
+ "received": 86496,
+ "fails": 0,
+ "unavail": 0,
+ "health_checks": {
+ "checks": 0,
+ "fails": 0,
+ "unhealthy": 0
+ },
+ "downtime": 0,
+ "selected": "2022-11-18T19:34:00Z"
+ }
+ ],
+ "keepalive": 0,
+ "zombies": 0,
+ "zone": "http_backend"
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/nginx.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/nginx.json
new file mode 100644
index 000000000..4480c2bcc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/nginx.json
@@ -0,0 +1,10 @@
+{
+ "version": "1.21.6",
+ "build": "nginx-plus-r27-p1",
+ "address": "127.0.0.1",
+ "generation": 1,
+ "load_timestamp": "2022-11-19T14:38:38.676Z",
+ "timestamp": "2022-11-19T14:38:57.031Z",
+ "pid": 2254633,
+ "ppid": 2254629
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/resolvers.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/resolvers.json
new file mode 100644
index 000000000..ad66f5584
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/resolvers.json
@@ -0,0 +1,36 @@
+{
+ "resolver-http": {
+ "requests": {
+ "name": 0,
+ "srv": 2939408,
+ "addr": 0
+ },
+ "responses": {
+ "noerror": 0,
+ "formerr": 0,
+ "servfail": 0,
+ "nxdomain": 2939404,
+ "notimp": 0,
+ "refused": 0,
+ "timedout": 4,
+ "unknown": 0
+ }
+ },
+ "resolver-stream": {
+ "requests": {
+ "name": 638797,
+ "srv": 0,
+ "addr": 0
+ },
+ "responses": {
+ "noerror": 433136,
+ "formerr": 0,
+ "servfail": 0,
+ "nxdomain": 40022,
+ "notimp": 0,
+ "refused": 165639,
+ "timedout": 0,
+ "unknown": 0
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/ssl.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/ssl.json
new file mode 100644
index 000000000..2ca8a6a3e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/ssl.json
@@ -0,0 +1,16 @@
+{
+ "handshakes": 15804607,
+ "session_reuses": 13096060,
+ "handshakes_failed": 37862,
+ "no_common_protocol": 16648,
+ "no_common_cipher": 24,
+ "handshake_timeout": 4,
+ "peer_rejected_cert": 0,
+ "verify_failures": {
+ "no_cert": 0,
+ "expired_cert": 0,
+ "revoked_cert": 0,
+ "hostname_mismatch": 0,
+ "other": 0
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/stream_server_zones.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/stream_server_zones.json
new file mode 100644
index 000000000..0c7df7873
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/stream_server_zones.json
@@ -0,0 +1,15 @@
+{
+ "tcp_server": {
+ "processing": 0,
+ "connections": 0,
+ "sessions": {
+ "2xx": 0,
+ "4xx": 0,
+ "5xx": 0,
+ "total": 0
+ },
+ "discarded": 0,
+ "received": 0,
+ "sent": 0
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/stream_upstreams.json b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/stream_upstreams.json
new file mode 100644
index 000000000..707ad4db7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/api-8/stream_upstreams.json
@@ -0,0 +1,48 @@
+{
+ "stream_backend": {
+ "peers": [
+ {
+ "id": 0,
+ "server": "127.0.0.1:12346",
+ "name": "127.0.0.1:12346",
+ "backup": false,
+ "weight": 1,
+ "state": "up",
+ "active": 0,
+ "connections": 0,
+ "sent": 0,
+ "received": 0,
+ "fails": 0,
+ "unavail": 0,
+ "health_checks": {
+ "checks": 0,
+ "fails": 0,
+ "unhealthy": 0
+ },
+ "downtime": 0
+ },
+ {
+ "id": 1,
+ "server": "127.0.0.1:12347",
+ "name": "127.0.0.1:12347",
+ "backup": false,
+ "weight": 1,
+ "state": "up",
+ "active": 0,
+ "connections": 0,
+ "sent": 0,
+ "received": 0,
+ "fails": 0,
+ "unavail": 0,
+ "health_checks": {
+ "checks": 0,
+ "fails": 0,
+ "unhealthy": 0
+ },
+ "downtime": 0
+ }
+ ],
+ "zombies": 0,
+ "zone": "tcp_servers"
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/config.json b/src/go/plugin/go.d/modules/nginxplus/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/nginxplus/testdata/config.yaml b/src/go/plugin/go.d/modules/nginxplus/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxplus/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/nginxvts/README.md b/src/go/plugin/go.d/modules/nginxvts/README.md
new file mode 120000
index 000000000..e185fa81b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/README.md
@@ -0,0 +1 @@
+integrations/nginx_vts.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nginxvts/charts.go b/src/go/plugin/go.d/modules/nginxvts/charts.go
new file mode 100644
index 000000000..8dad7910f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/charts.go
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxvts
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+var mainCharts = module.Charts{
+ {
+ ID: "requests",
+ Title: "Total requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "nginxvts.requests_total",
+ Dims: module.Dims{
+ {ID: "connections_requests", Name: "requests", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "active_connections",
+ Title: "Active connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "nginxvts.active_connections",
+ Dims: module.Dims{
+ {ID: "connections_active", Name: "active"},
+ },
+ },
+ {
+ ID: "connections",
+ Title: "Total connections",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "nginxvts.connections_total",
+ Dims: module.Dims{
+ {ID: "connections_reading", Name: "reading", Algo: module.Incremental},
+ {ID: "connections_writing", Name: "writing", Algo: module.Incremental},
+ {ID: "connections_waiting", Name: "waiting", Algo: module.Incremental},
+ {ID: "connections_accepted", Name: "accepted", Algo: module.Incremental},
+ {ID: "connections_handled", Name: "handled", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "nginxvts.uptime",
+ Dims: module.Dims{
+ {ID: "uptime", Name: "uptime"},
+ },
+ },
+}
+var sharedZonesCharts = module.Charts{
+ {
+ ID: "shared_memory_size",
+ Title: "Shared memory size",
+ Units: "bytes",
+ Fam: "shared memory",
+ Ctx: "nginxvts.shm_usage",
+ Dims: module.Dims{
+ {ID: "sharedzones_maxsize", Name: "max"},
+ {ID: "sharedzones_usedsize", Name: "used"},
+ },
+ },
+ {
+ ID: "shared_memory_used_node",
+ Title: "Number of node using shared memory",
+ Units: "nodes",
+ Fam: "shared memory",
+ Ctx: "nginxvts.shm_used_node",
+ Dims: module.Dims{
+ {ID: "sharedzones_usednode", Name: "used"},
+ },
+ },
+}
+
+var serverZonesCharts = module.Charts{
+ {
+ ID: "server_requests_total",
+ Title: "Total number of client requests",
+ Units: "requests/s",
+ Fam: "serverzones",
+ Ctx: "nginxvts.server_requests_total",
+ Dims: module.Dims{
+ {ID: "total_requestcounter", Name: "requests", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "server_responses_total",
+ Title: "Total number of responses by code class",
+ Units: "responses/s",
+ Fam: "serverzones",
+ Ctx: "nginxvts.server_responses_total",
+ Dims: module.Dims{
+ {ID: "total_responses_1xx", Name: "1xx", Algo: module.Incremental},
+ {ID: "total_responses_2xx", Name: "2xx", Algo: module.Incremental},
+ {ID: "total_responses_3xx", Name: "3xx", Algo: module.Incremental},
+ {ID: "total_responses_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: "total_responses_5xx", Name: "5xx", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "server_traffic_total",
+ Title: "Total amount of data transferred to and from the server",
+ Units: "bytes/s",
+ Fam: "serverzones",
+ Ctx: "nginxvts.server_traffic_total",
+ Dims: module.Dims{
+ {ID: "total_inbytes", Name: "in", Algo: module.Incremental},
+ {ID: "total_outbytes", Name: "out", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "server_cache_total",
+ Title: "Total server cache",
+ Units: "events/s",
+ Fam: "serverzones",
+ Ctx: "nginxvts.server_cache_total",
+ Dims: module.Dims{
+ {ID: "total_cache_miss", Name: "miss", Algo: module.Incremental},
+ {ID: "total_cache_bypass", Name: "bypass", Algo: module.Incremental},
+ {ID: "total_cache_expired", Name: "expired", Algo: module.Incremental},
+ {ID: "total_cache_stale", Name: "stale", Algo: module.Incremental},
+ {ID: "total_cache_updating", Name: "updating", Algo: module.Incremental},
+ {ID: "total_cache_revalidated", Name: "revalidated", Algo: module.Incremental},
+ {ID: "total_cache_hit", Name: "hit", Algo: module.Incremental},
+ {ID: "total_cache_scarce", Name: "scarce", Algo: module.Incremental},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/nginxvts/collect.go b/src/go/plugin/go.d/modules/nginxvts/collect.go
new file mode 100644
index 000000000..02fe7cb65
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/collect.go
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxvts
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (vts *NginxVTS) collect() (map[string]int64, error) {
+ ms, err := vts.scapeVTS()
+ if err != nil {
+ return nil, nil
+ }
+
+ collected := make(map[string]interface{})
+ vts.collectMain(collected, ms)
+ vts.collectSharedZones(collected, ms)
+ vts.collectServerZones(collected, ms)
+
+ return stm.ToMap(collected), nil
+}
+
+func (vts *NginxVTS) collectMain(collected map[string]interface{}, ms *vtsMetrics) {
+ collected["uptime"] = (ms.NowMsec - ms.LoadMsec) / 1000
+ collected["connections"] = ms.Connections
+}
+
+func (vts *NginxVTS) collectSharedZones(collected map[string]interface{}, ms *vtsMetrics) {
+ collected["sharedzones"] = ms.SharedZones
+}
+
+func (vts *NginxVTS) collectServerZones(collected map[string]interface{}, ms *vtsMetrics) {
+ if !ms.hasServerZones() {
+ return
+ }
+
+ // "*" means all servers
+ collected["total"] = ms.ServerZones["*"]
+}
+
+func (vts *NginxVTS) scapeVTS() (*vtsMetrics, error) {
+ req, _ := web.NewHTTPRequest(vts.Request)
+
+ var total vtsMetrics
+
+ if err := vts.doOKDecode(req, &total); err != nil {
+ vts.Warning(err)
+ return nil, err
+ }
+ return &total, nil
+}
+
+func (vts *NginxVTS) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := vts.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxvts/config_schema.json b/src/go/plugin/go.d/modules/nginxvts/config_schema.json
new file mode 100644
index 000000000..ef6a1d237
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/config_schema.json
@@ -0,0 +1,182 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NGINX VTS module collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the NGINX VTS [module status page](https://github.com/vozlt/nginx-module-vts#readme).",
+ "type": "string",
+ "default": "http://localhost/status/format/json"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nginxvts/init.go b/src/go/plugin/go.d/modules/nginxvts/init.go
new file mode 100644
index 000000000..2e738e4d1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/init.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxvts
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (vts *NginxVTS) validateConfig() error {
+ if vts.URL == "" {
+ return errors.New("URL not set")
+ }
+
+ if _, err := web.NewHTTPRequest(vts.Request); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (vts *NginxVTS) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(vts.Client)
+}
+
+func (vts *NginxVTS) initCharts() (*module.Charts, error) {
+ charts := module.Charts{}
+
+ if err := charts.Add(*mainCharts.Copy()...); err != nil {
+ return nil, err
+ }
+
+ if err := charts.Add(*sharedZonesCharts.Copy()...); err != nil {
+ return nil, err
+ }
+
+ if err := charts.Add(*serverZonesCharts.Copy()...); err != nil {
+ return nil, err
+ }
+
+ if len(charts) == 0 {
+ return nil, errors.New("zero charts")
+ }
+ return &charts, nil
+}
diff --git a/src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md b/src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md
new file mode 100644
index 000000000..59918b39e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/integrations/nginx_vts.md
@@ -0,0 +1,268 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nginxvts/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nginxvts/metadata.yaml"
+sidebar_label: "NGINX VTS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NGINX VTS
+
+
+<img src="https://netdata.cloud/img/nginx.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: nginxvts
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).
+
+
+It sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis),
+which is a built-in location that provides metrics about the NGINX VTS server.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects NGINX instances running on localhost.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per NGINX VTS instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nginxvts.requests_total | requests | requests/s |
+| nginxvts.active_connections | active | connections |
+| nginxvts.connections_total | reading, writing, waiting, accepted, handled | connections/s |
+| nginxvts.uptime | uptime | seconds |
+| nginxvts.shm_usage | max, used | bytes |
+| nginxvts.shm_used_node | used | nodes |
+| nginxvts.server_requests_total | requests | requests/s |
+| nginxvts.server_responses_total | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |
+| nginxvts.server_traffic_total | in, out | bytes/s |
+| nginxvts.server_cache_total | miss, bypass, expired, stale, updating, revalidated, hit, scarce | events/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure nginx-vts module
+
+To configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/nginxvts.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/nginxvts.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1/status/format/json | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/status/format/json
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1/status/format/json
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/status/format/json
+
+ - name: remote
+ url: http://192.0.2.1/status/format/json
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `nginxvts` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m nginxvts
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `nginxvts` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep nginxvts
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep nginxvts /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep nginxvts
+```
+
+
diff --git a/src/go/plugin/go.d/modules/nginxvts/metadata.yaml b/src/go/plugin/go.d/modules/nginxvts/metadata.yaml
new file mode 100644
index 000000000..bb602863b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/metadata.yaml
@@ -0,0 +1,264 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-nginxvts
+ plugin_name: go.d.plugin
+ module_name: nginxvts
+ monitored_instance:
+ name: NGINX VTS
+ link: https://www.nginx.com/
+ icon_filename: nginx.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - webserver
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: go.d.plugin
+ module_name: weblog
+ - plugin_name: go.d.plugin
+ module_name: httpcheck
+ - plugin_name: apps.plugin
+ module_name: apps
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors NGINX servers with [virtual host traffic status module](https://github.com/vozlt/nginx-module-vts).
+ method_description: |
+ It sends HTTP requests to the NGINX VTS location [status](https://github.com/vozlt/nginx-module-vts#synopsis),
+ which is a built-in location that provides metrics about the NGINX VTS server.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects NGINX instances running on localhost.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Configure nginx-vts module
+ description: |
+ To configure nginx-vts, see the [https://github.com/vozlt/nginx-module-vts#installation).
+ configuration:
+ file:
+ name: go.d/nginxvts.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1/status/format/json
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/status/format/json
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/server-status?auto
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: |
+ Do not validate server certificate chain and hostname.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1/status/format/json
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/status/format/json
+
+ - name: remote
+ url: http://192.0.2.1/status/format/json
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: nginxvts.requests_total
+ description: Total requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: nginxvts.active_connections
+ description: Active connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: nginxvts.connections_total
+ description: Total connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: reading
+ - name: writing
+ - name: waiting
+ - name: accepted
+ - name: handled
+ - name: nginxvts.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: nginxvts.shm_usage
+ description: Shared memory size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: max
+ - name: used
+ - name: nginxvts.shm_used_node
+ description: Number of node using shared memory
+ unit: nodes
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: nginxvts.server_requests_total
+ description: Total number of client requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: nginxvts.server_responses_total
+ description: Total number of responses by code class
+ unit: responses/s
+ chart_type: line
+ dimensions:
+ - name: 1xx
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: nginxvts.server_traffic_total
+ description: Total amount of data transferred to and from the server
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: out
+ - name: nginxvts.server_cache_total
+ description: Total server cache
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: bypass
+ - name: expired
+ - name: stale
+ - name: updating
+ - name: revalidated
+ - name: hit
+ - name: scarce
diff --git a/src/go/plugin/go.d/modules/nginxvts/metrics.go b/src/go/plugin/go.d/modules/nginxvts/metrics.go
new file mode 100644
index 000000000..2674d4bbe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/metrics.go
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxvts
+
+// NginxVTS metrics: https://github.com/vozlt/nginx-module-vts#json
+
+type vtsMetrics struct {
+ // HostName string
+ // NginxVersion string
+ LoadMsec int64
+ NowMsec int64
+ Uptime int64
+ Connections struct {
+ Active int64 `stm:"active"`
+ Reading int64 `stm:"reading"`
+ Writing int64 `stm:"writing"`
+ Waiting int64 `stm:"waiting"`
+ Accepted int64 `stm:"accepted"`
+ Handled int64 `stm:"handled"`
+ Requests int64 `stm:"requests"`
+ } `stm:"connections"`
+ SharedZones struct {
+ // Name string
+ MaxSize int64 `stm:"maxsize"`
+ UsedSize int64 `stm:"usedsize"`
+ UsedNode int64 `stm:"usednode"`
+ }
+ ServerZones map[string]Server
+}
+
+func (m vtsMetrics) hasServerZones() bool { return m.ServerZones != nil }
+
+// Server is for total Nginx server
+type Server struct {
+ RequestCounter int64 `stm:"requestcounter"`
+ InBytes int64 `stm:"inbytes"`
+ OutBytes int64 `stm:"outbytes"`
+ Responses struct {
+ Resp1xx int64 `stm:"responses_1xx" json:"1xx"`
+ Resp2xx int64 `stm:"responses_2xx" json:"2xx"`
+ Resp3xx int64 `stm:"responses_3xx" json:"3xx"`
+ Resp4xx int64 `stm:"responses_4xx" json:"4xx"`
+ Resp5xx int64 `stm:"responses_5xx" json:"5xx"`
+ Miss int64 `stm:"cache_miss"`
+ Bypass int64 `stm:"cache_bypass"`
+ Expired int64 `stm:"cache_expired"`
+ Stale int64 `stm:"cache_stale"`
+ Updating int64 `stm:"cache_updating"`
+ Revalidated int64 `stm:"cache_revalidated"`
+ Hit int64 `stm:"cache_hit"`
+ Scarce int64 `stm:"cache_scarce"`
+ } `stm:""`
+}
diff --git a/src/go/plugin/go.d/modules/nginxvts/nginxvts.go b/src/go/plugin/go.d/modules/nginxvts/nginxvts.go
new file mode 100644
index 000000000..56868ff0a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/nginxvts.go
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxvts
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("nginxvts", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 1,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *NginxVTS {
+ return &NginxVTS{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://localhost/status/format/json",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type NginxVTS struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (vts *NginxVTS) Configuration() any {
+ return vts.Config
+}
+
+func (vts *NginxVTS) Cleanup() {
+ if vts.httpClient == nil {
+ return
+ }
+ vts.httpClient.CloseIdleConnections()
+}
+
+func (vts *NginxVTS) Init() error {
+ err := vts.validateConfig()
+ if err != nil {
+ vts.Errorf("check configuration: %v", err)
+ return err
+ }
+
+ httpClient, err := vts.initHTTPClient()
+ if err != nil {
+ vts.Errorf("init HTTP client: %v", err)
+ }
+ vts.httpClient = httpClient
+
+ charts, err := vts.initCharts()
+ if err != nil {
+ vts.Errorf("init charts: %v", err)
+ return err
+ }
+ vts.charts = charts
+
+ return nil
+}
+
+func (vts *NginxVTS) Check() error {
+ mx, err := vts.collect()
+ if err != nil {
+ vts.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (vts *NginxVTS) Charts() *module.Charts {
+ return vts.charts
+}
+
+func (vts *NginxVTS) Collect() map[string]int64 {
+ mx, err := vts.collect()
+ if err != nil {
+ vts.Error(err)
+ return nil
+ }
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
diff --git a/src/go/plugin/go.d/modules/nginxvts/nginxvts_test.go b/src/go/plugin/go.d/modules/nginxvts/nginxvts_test.go
new file mode 100644
index 000000000..f4c110372
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/nginxvts_test.go
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nginxvts
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer0118Response, _ = os.ReadFile("testdata/vts-v0.1.18.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer0118Response": dataVer0118Response,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestNginxVTS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NginxVTS{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNginxVTS_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantNumOfCharts int
+ wantFail bool
+ }{
+ "default": {
+ wantNumOfCharts: numOfCharts(
+ mainCharts,
+ sharedZonesCharts,
+ serverZonesCharts,
+ ),
+ config: New().Config,
+ },
+ "URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ }},
+ },
+ "invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ }},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ es := New()
+ es.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, es.Init())
+ } else {
+ assert.NoError(t, es.Init())
+ assert.Equal(t, test.wantNumOfCharts, len(*es.Charts()))
+ }
+ })
+ }
+}
+
+func TestNginxVTS_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (vts *NginxVTS, cleanup func())
+ wantFail bool
+ }{
+ "valid data": {prepare: prepareNginxVTSValidData},
+ "invalid data": {prepare: prepareNginxVTSInvalidData, wantFail: true},
+ "404": {prepare: prepareNginxVTS404, wantFail: true},
+ "connection refused": {prepare: prepareNginxVTSConnectionRefused, wantFail: true},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ vts, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, vts.Check())
+ } else {
+ assert.NoError(t, vts.Check())
+ }
+ })
+ }
+}
+
+func TestNginxVTS_Charts(t *testing.T) {
+ assert.Nil(t, New().Charts())
+}
+
+func TestNginxVTS_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestNginxVTS_Collect(t *testing.T) {
+ tests := map[string]struct {
+ // prepare func() *NginxVTS
+ prepare func(t *testing.T) (vts *NginxVTS, cleanup func())
+ wantCollected map[string]int64
+ checkCharts bool
+ }{
+ "right metrics": {
+ prepare: prepareNginxVTSValidData,
+ wantCollected: map[string]int64{
+ // Nginx running time
+ "uptime": 319,
+ // Nginx connections
+ "connections_active": 2,
+ "connections_reading": 0,
+ "connections_writing": 1,
+ "connections_waiting": 1,
+ "connections_accepted": 12,
+ "connections_handled": 12,
+ "connections_requests": 17,
+ // Nginx shared memory
+ "sharedzones_maxsize": 1048575,
+ "sharedzones_usedsize": 45799,
+ "sharedzones_usednode": 13,
+ // Nginx traffic
+ "total_requestcounter": 2,
+ "total_inbytes": 156,
+ "total_outbytes": 692,
+ // Nginx response code
+ "total_responses_1xx": 1,
+ "total_responses_2xx": 2,
+ "total_responses_3xx": 3,
+ "total_responses_4xx": 4,
+ "total_responses_5xx": 5,
+ // Nginx cache
+ "total_cache_miss": 2,
+ "total_cache_bypass": 4,
+ "total_cache_expired": 6,
+ "total_cache_stale": 8,
+ "total_cache_updating": 10,
+ "total_cache_revalidated": 12,
+ "total_cache_hit": 14,
+ "total_cache_scarce": 16,
+ },
+ checkCharts: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ vts, cleanup := test.prepare(t)
+ defer cleanup()
+
+ collected := vts.Collect()
+
+ assert.Equal(t, test.wantCollected, collected)
+ if test.checkCharts {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, vts, collected)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, vts *NginxVTS, collected map[string]int64) {
+ for _, chart := range *vts.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareNginxVTS(t *testing.T, createNginxVTS func() *NginxVTS) (vts *NginxVTS, cleanup func()) {
+ t.Helper()
+ vts = createNginxVTS()
+ srv := prepareNginxVTSEndpoint()
+ vts.URL = srv.URL
+
+ require.NoError(t, vts.Init())
+
+ return vts, srv.Close
+}
+
+func prepareNginxVTSValidData(t *testing.T) (vts *NginxVTS, cleanup func()) {
+ return prepareNginxVTS(t, New)
+}
+
+func prepareNginxVTSInvalidData(t *testing.T) (*NginxVTS, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ vts := New()
+ vts.URL = srv.URL
+ require.NoError(t, vts.Init())
+
+ return vts, srv.Close
+}
+
+func prepareNginxVTS404(t *testing.T) (*NginxVTS, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ vts := New()
+ vts.URL = srv.URL
+ require.NoError(t, vts.Init())
+
+ return vts, srv.Close
+}
+
+func prepareNginxVTSConnectionRefused(t *testing.T) (*NginxVTS, func()) {
+ t.Helper()
+ vts := New()
+ vts.URL = "http://127.0.0.1:18080"
+ require.NoError(t, vts.Init())
+
+ return vts, func() {}
+}
+
+func prepareNginxVTSEndpoint() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/":
+ _, _ = w.Write(dataVer0118Response)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
+
+func numOfCharts(charts ...module.Charts) (num int) {
+ for _, v := range charts {
+ num += len(v)
+ }
+ return num
+}
diff --git a/src/go/plugin/go.d/modules/nginxvts/testdata/config.json b/src/go/plugin/go.d/modules/nginxvts/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/nginxvts/testdata/config.yaml b/src/go/plugin/go.d/modules/nginxvts/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/nginxvts/testdata/vts-v0.1.18.json b/src/go/plugin/go.d/modules/nginxvts/testdata/vts-v0.1.18.json
new file mode 100644
index 000000000..cdc331d5f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nginxvts/testdata/vts-v0.1.18.json
@@ -0,0 +1,44 @@
+{
+ "hostName": "Web",
+ "nginxVersion": "1.18.0",
+ "loadMsec": 1606489796895,
+ "nowMsec": 1606490116734,
+ "connections": {
+ "active": 2,
+ "reading": 0,
+ "writing": 1,
+ "waiting": 1,
+ "accepted": 12,
+ "handled": 12,
+ "requests": 17
+ },
+ "sharedZones": {
+ "name": "ngx_http_vhost_traffic_status",
+ "maxSize": 1048575,
+ "usedSize": 45799,
+ "usedNode": 13
+ },
+ "serverZones": {
+ "*": {
+ "requestCounter": 2,
+ "inBytes": 156,
+ "outBytes": 692,
+ "responses": {
+ "1xx": 1,
+ "2xx": 2,
+ "3xx": 3,
+ "4xx": 4,
+ "5xx": 5,
+ "miss": 2,
+ "bypass": 4,
+ "expired": 6,
+ "stale": 8,
+ "updating": 10,
+ "revalidated": 12,
+ "hit": 14,
+ "scarce": 16
+ }
+ }
+ }
+}
+
diff --git a/src/go/plugin/go.d/modules/nsd/README.md b/src/go/plugin/go.d/modules/nsd/README.md
new file mode 120000
index 000000000..a5cb8c98b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/README.md
@@ -0,0 +1 @@
+integrations/nsd.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nsd/charts.go b/src/go/plugin/go.d/modules/nsd/charts.go
new file mode 100644
index 000000000..aed4f3098
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/charts.go
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nsd
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioQueries = module.Priority + iota
+ prioQueriesByType
+ prioQueriesByOpcode
+ prioQueriesByClass
+ prioQueriesByProtocol
+
+ prioAnswersByRcode
+
+ prioErrors
+
+ prioDrops
+
+ prioZones
+ prioZoneTransfersRequests
+ prioZoneTransferMemory
+
+ prioDatabaseSize
+
+ prioUptime
+)
+
+var charts = module.Charts{
+ queriesChart.Copy(),
+ queriesByTypeChart.Copy(),
+ queriesByOpcodeChart.Copy(),
+ queriesByClassChart.Copy(),
+ queriesByProtocolChart.Copy(),
+
+ answersByRcodeChart.Copy(),
+
+ zonesChart.Copy(),
+ zoneTransfersRequestsChart.Copy(),
+ zoneTransferMemoryChart.Copy(),
+
+ databaseSizeChart.Copy(),
+
+ errorsChart.Copy(),
+
+ dropsChart.Copy(),
+
+ uptimeChart.Copy(),
+}
+
+var (
+ queriesChart = module.Chart{
+ ID: "queries",
+ Title: "Queries",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "nsd.queries",
+ Priority: prioQueries,
+ Dims: module.Dims{
+ {ID: "num.queries", Name: "queries", Algo: module.Incremental},
+ },
+ }
+ queriesByTypeChart = func() module.Chart {
+ chart := module.Chart{
+ ID: "queries_by_type",
+ Title: "Queries Type",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "nsd.queries_by_type",
+ Priority: prioQueriesByType,
+ Type: module.Stacked,
+ }
+ for _, v := range queryTypes {
+ name := v
+ if s, ok := queryTypeNumberMap[v]; ok {
+ name = s
+ }
+ chart.Dims = append(chart.Dims, &module.Dim{
+ ID: "num.type." + v,
+ Name: name,
+ Algo: module.Incremental,
+ })
+ }
+ return chart
+ }()
+ queriesByOpcodeChart = func() module.Chart {
+ chart := module.Chart{
+ ID: "queries_by_opcode",
+ Title: "Queries Opcode",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "nsd.queries_by_opcode",
+ Priority: prioQueriesByOpcode,
+ Type: module.Stacked,
+ }
+ for _, v := range queryOpcodes {
+ chart.Dims = append(chart.Dims, &module.Dim{
+ ID: "num.opcode." + v,
+ Name: v,
+ Algo: module.Incremental,
+ })
+ }
+ return chart
+ }()
+ queriesByClassChart = func() module.Chart {
+ chart := module.Chart{
+ ID: "queries_by_class",
+ Title: "Queries Class",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "nsd.queries_by_class",
+ Priority: prioQueriesByClass,
+ Type: module.Stacked,
+ }
+ for _, v := range queryClasses {
+ chart.Dims = append(chart.Dims, &module.Dim{
+ ID: "num.class." + v,
+ Name: v,
+ Algo: module.Incremental,
+ })
+ }
+ return chart
+ }()
+ queriesByProtocolChart = module.Chart{
+ ID: "queries_by_protocol",
+ Title: "Queries Protocol",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "nsd.queries_by_protocol",
+ Priority: prioQueriesByProtocol,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "num.udp", Name: "udp", Algo: module.Incremental},
+ {ID: "num.udp6", Name: "udp6", Algo: module.Incremental},
+ {ID: "num.tcp", Name: "tcp", Algo: module.Incremental},
+ {ID: "num.tcp6", Name: "tcp6", Algo: module.Incremental},
+ {ID: "num.tls", Name: "tls", Algo: module.Incremental},
+ {ID: "num.tls6", Name: "tls6", Algo: module.Incremental},
+ },
+ }
+
+ answersByRcodeChart = func() module.Chart {
+ chart := module.Chart{
+ ID: "answers_by_rcode",
+ Title: "Answers Rcode",
+ Units: "answers/s",
+ Fam: "answers",
+ Ctx: "nsd.answers_by_rcode",
+ Priority: prioAnswersByRcode,
+ Type: module.Stacked,
+ }
+ for _, v := range answerRcodes {
+ chart.Dims = append(chart.Dims, &module.Dim{
+ ID: "num.rcode." + v,
+ Name: v,
+ Algo: module.Incremental,
+ })
+ }
+ return chart
+ }()
+
+ errorsChart = module.Chart{
+ ID: "errors",
+ Title: "Errors",
+ Units: "errors/s",
+ Fam: "errors",
+ Ctx: "nsd.errors",
+ Priority: prioErrors,
+ Dims: module.Dims{
+ {ID: "num.rxerr", Name: "query", Algo: module.Incremental},
+ {ID: "num.txerr", Name: "answer", Mul: -1, Algo: module.Incremental},
+ },
+ }
+
+ dropsChart = module.Chart{
+ ID: "drops",
+ Title: "Drops",
+ Units: "drops/s",
+ Fam: "drops",
+ Ctx: "nsd.drops",
+ Priority: prioDrops,
+ Dims: module.Dims{
+ {ID: "num.dropped", Name: "query", Algo: module.Incremental},
+ },
+ }
+
+ zonesChart = module.Chart{
+ ID: "zones",
+ Title: "Zones",
+ Units: "zones",
+ Fam: "zones",
+ Ctx: "nsd.zones",
+ Priority: prioZones,
+ Dims: module.Dims{
+ {ID: "zone.master", Name: "master"},
+ {ID: "zone.slave", Name: "slave"},
+ },
+ }
+ zoneTransfersRequestsChart = module.Chart{
+ ID: "zone_transfers_requests",
+ Title: "Zone Transfers",
+ Units: "requests/s",
+ Fam: "zones",
+ Ctx: "nsd.zone_transfers_requests",
+ Priority: prioZoneTransfersRequests,
+ Dims: module.Dims{
+ {ID: "num.raxfr", Name: "AXFR", Algo: module.Incremental},
+ {ID: "num.rixfr", Name: "IXFR", Algo: module.Incremental},
+ },
+ }
+ zoneTransferMemoryChart = module.Chart{
+ ID: "zone_transfer_memory",
+ Title: "Zone Transfer Memory",
+ Units: "bytes",
+ Fam: "zones",
+ Ctx: "nsd.zone_transfer_memory",
+ Priority: prioZoneTransferMemory,
+ Dims: module.Dims{
+ {ID: "size.xfrd.mem", Name: "used"},
+ },
+ }
+
+ databaseSizeChart = module.Chart{
+ ID: "database_size",
+ Title: "Database Size",
+ Units: "bytes",
+ Fam: "database",
+ Ctx: "nsd.database_size",
+ Priority: prioDatabaseSize,
+ Dims: module.Dims{
+ {ID: "size.db.disk", Name: "disk"},
+ {ID: "size.db.mem", Name: "mem"},
+ },
+ }
+
+ uptimeChart = module.Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "nsd.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "time.boot", Name: "uptime"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/nsd/collect.go b/src/go/plugin/go.d/modules/nsd/collect.go
new file mode 100644
index 000000000..d07341df3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/collect.go
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nsd
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "strconv"
+ "strings"
+)
+
+func (n *Nsd) collect() (map[string]int64, error) {
+ stats, err := n.exec.stats()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(stats) == 0 {
+ return nil, errors.New("empty stats response")
+ }
+
+ mx := make(map[string]int64)
+
+ sc := bufio.NewScanner(bytes.NewReader(stats))
+
+ for sc.Scan() {
+ n.collectStatsLine(mx, sc.Text())
+ }
+
+ if len(mx) == 0 {
+ return nil, errors.New("unexpected stats response: no metrics found")
+ }
+
+ addMissingMetrics(mx, "num.rcode.", answerRcodes)
+ addMissingMetrics(mx, "num.opcode.", queryOpcodes)
+ addMissingMetrics(mx, "num.class.", queryClasses)
+ addMissingMetrics(mx, "num.type.", queryTypes)
+
+ return mx, nil
+}
+
+func (n *Nsd) collectStatsLine(mx map[string]int64, line string) {
+ if line = strings.TrimSpace(line); line == "" {
+ return
+ }
+
+ key, value, ok := strings.Cut(line, "=")
+ if !ok {
+ n.Debugf("invalid line in stats: '%s'", line)
+ return
+ }
+
+ var v int64
+ var f float64
+ var err error
+
+ switch key {
+ case "time.boot":
+ f, err = strconv.ParseFloat(value, 64)
+ v = int64(f)
+ default:
+ v, err = strconv.ParseInt(value, 10, 64)
+ }
+
+ if err != nil {
+ n.Debugf("invalid value in stats line '%s': '%s'", line, value)
+ return
+ }
+
+ mx[key] = v
+}
+
+func addMissingMetrics(mx map[string]int64, prefix string, values []string) {
+ for _, v := range values {
+ k := prefix + v
+ if _, ok := mx[k]; !ok {
+ mx[k] = 0
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nsd/config_schema.json b/src/go/plugin/go.d/modules/nsd/config_schema.json
new file mode 100644
index 000000000..d49107c71
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NSD collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nsd/exec.go b/src/go/plugin/go.d/modules/nsd/exec.go
new file mode 100644
index 000000000..b05082f3c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/exec.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nsd
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+type nsdControlBinary interface {
+ stats() ([]byte, error)
+}
+
+func newNsdControlExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *nsdControlExec {
+ return &nsdControlExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ }
+}
+
+type nsdControlExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (e *nsdControlExec) stats() ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.ndsudoPath, "nsd-control-stats")
+
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/nsd/init.go b/src/go/plugin/go.d/modules/nsd/init.go
new file mode 100644
index 000000000..63843caba
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/init.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nsd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (n *Nsd) initNsdControlExec() (nsdControlBinary, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+
+ }
+
+ nsdControl := newNsdControlExec(ndsudoPath, n.Timeout.Duration(), n.Logger)
+
+ return nsdControl, nil
+}
diff --git a/src/go/plugin/go.d/modules/nsd/integrations/nsd.md b/src/go/plugin/go.d/modules/nsd/integrations/nsd.md
new file mode 100644
index 000000000..745b872d7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/integrations/nsd.md
@@ -0,0 +1,203 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nsd/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nsd/metadata.yaml"
+sidebar_label: "NSD"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NSD
+
+
+<img src="https://netdata.cloud/img/nsd.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: nsd
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors NSD statistics like queries, zones, protocols, query types and more. It relies on the [`nsd-control`](https://nsd.docs.nlnetlabs.nl/en/latest/manpages/nsd-control.html) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+Executed commands:
+- `nsd-control stats_noreset`
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per NSD instance
+
+These metrics refer to the the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nsd.queries | queries | queries/s |
+| nsd.queries_by_type | A, NS, MD, MF, CNAME, SOA, MB, MG, MR, NULL, WKS, PTR, HINFO, MINFO, MX, TXT, RP, AFSDB, X25, ISDN, RT, NSAP, SIG, KEY, PX, AAAA, LOC, NXT, SRV, NAPTR, KX, CERT, DNAME, OPT, APL, DS, SSHFP, IPSECKEY, RRSIG, NSEC, DNSKEY, DHCID, NSEC3, NSEC3PARAM, TLSA, SMIMEA, CDS, CDNSKEY, OPENPGPKEY, CSYNC, ZONEMD, SVCB, HTTPS, SPF, NID, L32, L64, LP, EUI48, EUI64, URI, CAA, AVC, DLV, IXFR, AXFR, MAILB, MAILA, ANY | queries/s |
+| nsd.queries_by_opcode | QUERY, IQUERY, STATUS, NOTIFY, UPDATE, OTHER | queries/s |
+| nsd.queries_by_class | IN, CS, CH, HS | queries/s |
+| nsd.queries_by_protocol | udp, udp6, tcp, tcp6, tls, tls6 | queries/s |
+| nsd.answers_by_rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN, YXRRSET, NXRRSET, NOTAUTH, NOTZONE, RCODE11, RCODE12, RCODE13, RCODE14, RCODE15, BADVERS | answers/s |
+| nsd.errors | query, answer | errors/s |
+| nsd.drops | query | drops/s |
+| nsd.zones | master, slave | zones |
+| nsd.zone_transfers_requests | AXFR, IXFR | requests/s |
+| nsd.zone_transfer_memory | used | bytes |
+| nsd.database_size | disk, mem | bytes |
+| nsd.uptime | uptime | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/nsd.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/nsd.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| timeout | nsd-control binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: nsd
+ update_every: 5 # Collect logical volume statistics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `nsd` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m nsd
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `nsd` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep nsd
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep nsd /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep nsd
+```
+
+
diff --git a/src/go/plugin/go.d/modules/nsd/metadata.yaml b/src/go/plugin/go.d/modules/nsd/metadata.yaml
new file mode 100644
index 000000000..a31aa38af
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/metadata.yaml
@@ -0,0 +1,272 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-nsd
+ plugin_name: go.d.plugin
+ module_name: nsd
+ monitored_instance:
+ name: NSD
+ link: "https://nsd.docs.nlnetlabs.nl/en/latest"
+ icon_filename: 'nsd.svg'
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - nsd
+ - dns
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector monitors NSD statistics like queries, zones, protocols, query types and more.
+ It relies on the [`nsd-control`](https://nsd.docs.nlnetlabs.nl/en/latest/manpages/nsd-control.html) CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+ Executed commands:
+
+ - `nsd-control stats_noreset`
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/nsd.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: nsd-control binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: nsd
+ update_every: 5 # Collect logical volume statistics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the the entire monitored application.
+ labels: []
+ metrics:
+ - name: nsd.queries
+ description: Queries
+ unit: 'queries/s'
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: nsd.queries_by_type
+ description: Queries Type
+ unit: 'queries/s'
+ chart_type: stacked
+ dimensions:
+ - name: "A"
+ - name: "NS"
+ - name: "MD"
+ - name: "MF"
+ - name: "CNAME"
+ - name: "SOA"
+ - name: "MB"
+ - name: "MG"
+ - name: "MR"
+ - name: "NULL"
+ - name: "WKS"
+ - name: "PTR"
+ - name: "HINFO"
+ - name: "MINFO"
+ - name: "MX"
+ - name: "TXT"
+ - name: "RP"
+ - name: "AFSDB"
+ - name: "X25"
+ - name: "ISDN"
+ - name: "RT"
+ - name: "NSAP"
+ - name: "SIG"
+ - name: "KEY"
+ - name: "PX"
+ - name: "AAAA"
+ - name: "LOC"
+ - name: "NXT"
+ - name: "SRV"
+ - name: "NAPTR"
+ - name: "KX"
+ - name: "CERT"
+ - name: "DNAME"
+ - name: "OPT"
+ - name: "APL"
+ - name: "DS"
+ - name: "SSHFP"
+ - name: "IPSECKEY"
+ - name: "RRSIG"
+ - name: "NSEC"
+ - name: "DNSKEY"
+ - name: "DHCID"
+ - name: "NSEC3"
+ - name: "NSEC3PARAM"
+ - name: "TLSA"
+ - name: "SMIMEA"
+ - name: "CDS"
+ - name: "CDNSKEY"
+ - name: "OPENPGPKEY"
+ - name: "CSYNC"
+ - name: "ZONEMD"
+ - name: "SVCB"
+ - name: "HTTPS"
+ - name: "SPF"
+ - name: "NID"
+ - name: "L32"
+ - name: "L64"
+ - name: "LP"
+ - name: "EUI48"
+ - name: "EUI64"
+ - name: "URI"
+ - name: "CAA"
+ - name: "AVC"
+ - name: "DLV"
+ - name: "IXFR"
+ - name: "AXFR"
+ - name: "MAILB"
+ - name: "MAILA"
+ - name: "ANY"
+ - name: nsd.queries_by_opcode
+ description: Queries Opcode
+ unit: 'queries/s'
+ chart_type: stacked
+ dimensions:
+ - name: "QUERY"
+ - name: "IQUERY"
+ - name: "STATUS"
+ - name: "NOTIFY"
+ - name: "UPDATE"
+ - name: "OTHER"
+ - name: nsd.queries_by_class
+ description: Queries Class
+ unit: 'queries/s'
+ chart_type: stacked
+ dimensions:
+ - name: "IN"
+ - name: "CS"
+ - name: "CH"
+ - name: "HS"
+ - name: nsd.queries_by_protocol
+ description: Queries Protocol
+ unit: 'queries/s'
+ chart_type: stacked
+ dimensions:
+ - name: "udp"
+ - name: "udp6"
+ - name: "tcp"
+ - name: "tcp6"
+ - name: "tls"
+ - name: "tls6"
+ - name: nsd.answers_by_rcode
+ description: Answers Rcode
+ unit: 'answers/s'
+ chart_type: stacked
+ dimensions:
+ - name: "NOERROR"
+ - name: "FORMERR"
+ - name: "SERVFAIL"
+ - name: "NXDOMAIN"
+ - name: "NOTIMP"
+ - name: "REFUSED"
+ - name: "YXDOMAIN"
+ - name: "YXRRSET"
+ - name: "NXRRSET"
+ - name: "NOTAUTH"
+ - name: "NOTZONE"
+ - name: "RCODE11"
+ - name: "RCODE12"
+ - name: "RCODE13"
+ - name: "RCODE14"
+ - name: "RCODE15"
+ - name: "BADVERS"
+ - name: nsd.errors
+ description: Errors
+ unit: 'errors/s'
+ chart_type: line
+ dimensions:
+ - name: "query"
+ - name: "answer"
+ - name: nsd.drops
+ description: Drops
+ unit: 'drops/s'
+ chart_type: line
+ dimensions:
+ - name: "query"
+ - name: nsd.zones
+ description: Zones
+ unit: 'zones'
+ chart_type: line
+ dimensions:
+ - name: "master"
+ - name: "slave"
+ - name: nsd.zone_transfers_requests
+ description: Zone Transfers
+ unit: 'requests/s'
+ chart_type: line
+ dimensions:
+ - name: "AXFR"
+ - name: "IXFR"
+ - name: nsd.zone_transfer_memory
+ description: Zone Transfer Memory
+ unit: 'bytes'
+ chart_type: line
+ dimensions:
+ - name: "used"
+ - name: nsd.database_size
+ description: Database Size
+ unit: 'bytes'
+ chart_type: line
+ dimensions:
+ - name: "disk"
+ - name: "mem"
+ - name: nsd.uptime
+ description: Uptime
+ unit: 'seconds'
+ chart_type: line
+ dimensions:
+ - name: "uptime"
diff --git a/src/go/plugin/go.d/modules/nsd/nsd.go b/src/go/plugin/go.d/modules/nsd/nsd.go
new file mode 100644
index 000000000..fae0f67f3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/nsd.go
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nsd
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("nsd", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Nsd {
+ return &Nsd{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type Nsd struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec nsdControlBinary
+}
+
+func (n *Nsd) Configuration() any {
+ return n.Config
+}
+
+func (n *Nsd) Init() error {
+ nsdControl, err := n.initNsdControlExec()
+ if err != nil {
+ n.Errorf("nsd-control exec initialization: %v", err)
+ return err
+ }
+ n.exec = nsdControl
+
+ return nil
+}
+
+func (n *Nsd) Check() error {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (n *Nsd) Charts() *module.Charts {
+ return n.charts
+}
+
+func (n *Nsd) Collect() map[string]int64 {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (n *Nsd) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/nsd/nsd_test.go b/src/go/plugin/go.d/modules/nsd/nsd_test.go
new file mode 100644
index 000000000..24f38b512
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/nsd_test.go
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nsd
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStats, _ = os.ReadFile("testdata/stats.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStats": dataStats,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestNsd_Configuration(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Nsd{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNsd_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if failed to locate ndsudo": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nsd := New()
+ nsd.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, nsd.Init())
+ } else {
+ assert.NoError(t, nsd.Init())
+ }
+ })
+ }
+}
+
+func TestNsd_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Nsd
+ }{
+ "not initialized exec": {
+ prepare: func() *Nsd {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Nsd {
+ nsd := New()
+ nsd.exec = prepareMockOK()
+ _ = nsd.Check()
+ return nsd
+ },
+ },
+ "after collect": {
+ prepare: func() *Nsd {
+ nsd := New()
+ nsd.exec = prepareMockOK()
+ _ = nsd.Collect()
+ return nsd
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nsd := test.prepare()
+
+ assert.NotPanics(t, nsd.Cleanup)
+ })
+ }
+}
+
+func TestNsd_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestNsd_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockNsdControl
+ wantFail bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantFail: false,
+ },
+ "error on stats call": {
+ prepareMock: prepareMockErrOnStats,
+ wantFail: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantFail: true,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nsd := New()
+ mock := test.prepareMock()
+ nsd.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, nsd.Check())
+ } else {
+ assert.NoError(t, nsd.Check())
+ }
+ })
+ }
+}
+
+func TestNsd_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockNsdControl
+ wantMetrics map[string]int64
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantMetrics: map[string]int64{
+ "num.answer_wo_aa": 1,
+ "num.class.CH": 0,
+ "num.class.CS": 0,
+ "num.class.HS": 0,
+ "num.class.IN": 1,
+ "num.dropped": 1,
+ "num.edns": 1,
+ "num.ednserr": 1,
+ "num.opcode.IQUERY": 0,
+ "num.opcode.NOTIFY": 0,
+ "num.opcode.OTHER": 0,
+ "num.opcode.QUERY": 1,
+ "num.opcode.STATUS": 0,
+ "num.opcode.UPDATE": 0,
+ "num.queries": 1,
+ "num.raxfr": 1,
+ "num.rcode.BADVERS": 0,
+ "num.rcode.FORMERR": 1,
+ "num.rcode.NOERROR": 1,
+ "num.rcode.NOTAUTH": 0,
+ "num.rcode.NOTIMP": 1,
+ "num.rcode.NOTZONE": 0,
+ "num.rcode.NXDOMAIN": 1,
+ "num.rcode.NXRRSET": 0,
+ "num.rcode.RCODE11": 0,
+ "num.rcode.RCODE12": 0,
+ "num.rcode.RCODE13": 0,
+ "num.rcode.RCODE14": 0,
+ "num.rcode.RCODE15": 0,
+ "num.rcode.REFUSED": 1,
+ "num.rcode.SERVFAIL": 1,
+ "num.rcode.YXDOMAIN": 1,
+ "num.rcode.YXRRSET": 0,
+ "num.rixfr": 1,
+ "num.rxerr": 1,
+ "num.tcp": 1,
+ "num.tcp6": 1,
+ "num.tls": 1,
+ "num.tls6": 1,
+ "num.truncated": 1,
+ "num.txerr": 1,
+ "num.type.A": 1,
+ "num.type.AAAA": 1,
+ "num.type.AFSDB": 1,
+ "num.type.APL": 1,
+ "num.type.AVC": 0,
+ "num.type.CAA": 0,
+ "num.type.CDNSKEY": 1,
+ "num.type.CDS": 1,
+ "num.type.CERT": 1,
+ "num.type.CNAME": 1,
+ "num.type.CSYNC": 1,
+ "num.type.DHCID": 1,
+ "num.type.DLV": 0,
+ "num.type.DNAME": 1,
+ "num.type.DNSKEY": 1,
+ "num.type.DS": 1,
+ "num.type.EUI48": 1,
+ "num.type.EUI64": 1,
+ "num.type.HINFO": 1,
+ "num.type.HTTPS": 1,
+ "num.type.IPSECKEY": 1,
+ "num.type.ISDN": 1,
+ "num.type.KEY": 1,
+ "num.type.KX": 1,
+ "num.type.L32": 1,
+ "num.type.L64": 1,
+ "num.type.LOC": 1,
+ "num.type.LP": 1,
+ "num.type.MB": 1,
+ "num.type.MD": 1,
+ "num.type.MF": 1,
+ "num.type.MG": 1,
+ "num.type.MINFO": 1,
+ "num.type.MR": 1,
+ "num.type.MX": 1,
+ "num.type.NAPTR": 1,
+ "num.type.NID": 1,
+ "num.type.NS": 1,
+ "num.type.NSAP": 1,
+ "num.type.NSEC": 1,
+ "num.type.NSEC3": 1,
+ "num.type.NSEC3PARAM": 1,
+ "num.type.NULL": 1,
+ "num.type.NXT": 1,
+ "num.type.OPENPGPKEY": 1,
+ "num.type.OPT": 1,
+ "num.type.PTR": 1,
+ "num.type.PX": 1,
+ "num.type.RP": 1,
+ "num.type.RRSIG": 1,
+ "num.type.RT": 1,
+ "num.type.SIG": 1,
+ "num.type.SMIMEA": 1,
+ "num.type.SOA": 1,
+ "num.type.SPF": 1,
+ "num.type.SRV": 1,
+ "num.type.SSHFP": 1,
+ "num.type.SVCB": 1,
+ "num.type.TLSA": 1,
+ "num.type.TXT": 1,
+ "num.type.TYPE252": 0,
+ "num.type.TYPE255": 0,
+ "num.type.URI": 0,
+ "num.type.WKS": 1,
+ "num.type.X25": 1,
+ "num.type.ZONEMD": 1,
+ "num.udp": 1,
+ "num.udp6": 1,
+ "server0.queries": 1,
+ "size.config.disk": 1,
+ "size.config.mem": 1064,
+ "size.db.disk": 576,
+ "size.db.mem": 920,
+ "size.xfrd.mem": 1160464,
+ "time.boot": 556,
+ "zone.master": 1,
+ "zone.slave": 1,
+ },
+ },
+ "error on lvs report call": {
+ prepareMock: prepareMockErrOnStats,
+ wantMetrics: nil,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nsd := New()
+ mock := test.prepareMock()
+ nsd.exec = mock
+
+ mx := nsd.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ assert.Len(t, *nsd.Charts(), len(charts))
+ module.TestMetricsHasAllChartsDims(t, nsd.Charts(), mx)
+ }
+ })
+ }
+}
+
+func prepareMockOK() *mockNsdControl {
+ return &mockNsdControl{
+ dataStats: dataStats,
+ }
+}
+
+func prepareMockErrOnStats() *mockNsdControl {
+ return &mockNsdControl{
+ errOnStatus: true,
+ }
+}
+
+func prepareMockEmptyResponse() *mockNsdControl {
+ return &mockNsdControl{}
+}
+
+func prepareMockUnexpectedResponse() *mockNsdControl {
+ return &mockNsdControl{
+ dataStats: []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`),
+ }
+}
+
+type mockNsdControl struct {
+ errOnStatus bool
+ dataStats []byte
+}
+
+func (m *mockNsdControl) stats() ([]byte, error) {
+ if m.errOnStatus {
+ return nil, errors.New("mock.status() error")
+ }
+ return m.dataStats, nil
+}
diff --git a/src/go/plugin/go.d/modules/nsd/stats_counters.go b/src/go/plugin/go.d/modules/nsd/stats_counters.go
new file mode 100644
index 000000000..8ebe706a5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/stats_counters.go
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nsd
+
+// Docs: https://nsd.docs.nlnetlabs.nl/en/latest/manpages/nsd-control.html?highlight=elapsed#statistics-counters
+// Source: https://github.com/NLnetLabs/nsd/blob/b4a5ccd2235a1f8f71f7c640390e409bf123c963/remote.c#L2735
+
+// https://github.com/NLnetLabs/nsd/blob/b4a5ccd2235a1f8f71f7c640390e409bf123c963/remote.c#L2737
+var answerRcodes = []string{
+ "NOERROR",
+ "FORMERR",
+ "SERVFAIL",
+ "NXDOMAIN",
+ "NOTIMP",
+ "REFUSED",
+ "YXDOMAIN",
+ "YXRRSET",
+ "NXRRSET",
+ "NOTAUTH",
+ "NOTZONE",
+ "RCODE11",
+ "RCODE12",
+ "RCODE13",
+ "RCODE14",
+ "RCODE15",
+ "BADVERS",
+}
+
+// https://github.com/NLnetLabs/nsd/blob/b4a5ccd2235a1f8f71f7c640390e409bf123c963/remote.c#L2706
+var queryOpcodes = []string{
+ "QUERY",
+ "IQUERY",
+ "STATUS",
+ "NOTIFY",
+ "UPDATE",
+ "OTHER",
+}
+
+// https://github.com/NLnetLabs/nsd/blob/b4a5ccd2235a1f8f71f7c640390e409bf123c963/dns.c#L27
+var queryClasses = []string{
+ "IN",
+ "CS",
+ "CH",
+ "HS",
+}
+
+// https://github.com/NLnetLabs/nsd/blob/b4a5ccd2235a1f8f71f7c640390e409bf123c963/dns.c#L35
+var queryTypes = []string{
+ "A",
+ "NS",
+ "MD",
+ "MF",
+ "CNAME",
+ "SOA",
+ "MB",
+ "MG",
+ "MR",
+ "NULL",
+ "WKS",
+ "PTR",
+ "HINFO",
+ "MINFO",
+ "MX",
+ "TXT",
+ "RP",
+ "AFSDB",
+ "X25",
+ "ISDN",
+ "RT",
+ "NSAP",
+ "SIG",
+ "KEY",
+ "PX",
+ "AAAA",
+ "LOC",
+ "NXT",
+ "SRV",
+ "NAPTR",
+ "KX",
+ "CERT",
+ "DNAME",
+ "OPT",
+ "APL",
+ "DS",
+ "SSHFP",
+ "IPSECKEY",
+ "RRSIG",
+ "NSEC",
+ "DNSKEY",
+ "DHCID",
+ "NSEC3",
+ "NSEC3PARAM",
+ "TLSA",
+ "SMIMEA",
+ "CDS",
+ "CDNSKEY",
+ "OPENPGPKEY",
+ "CSYNC",
+ "ZONEMD",
+ "SVCB",
+ "HTTPS",
+ "SPF",
+ "NID",
+ "L32",
+ "L64",
+ "LP",
+ "EUI48",
+ "EUI64",
+ "URI",
+ "CAA",
+ "AVC",
+ "DLV",
+ "TYPE252",
+ "TYPE255",
+}
+
+var queryTypeNumberMap = map[string]string{
+ "TYPE251": "IXFR",
+ "TYPE252": "AXFR",
+ "TYPE253": "MAILB",
+ "TYPE254": "MAILA",
+ "TYPE255": "ANY",
+}
diff --git a/src/go/plugin/go.d/modules/nsd/testdata/config.json b/src/go/plugin/go.d/modules/nsd/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/nsd/testdata/config.yaml b/src/go/plugin/go.d/modules/nsd/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/nsd/testdata/stats.txt b/src/go/plugin/go.d/modules/nsd/testdata/stats.txt
new file mode 100644
index 000000000..cb6d8b829
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nsd/testdata/stats.txt
@@ -0,0 +1,95 @@
+server0.queries=1
+num.queries=1
+time.boot=556.488415
+time.elapsed=556.488415
+size.db.disk=576
+size.db.mem=920
+size.xfrd.mem=1160464
+size.config.disk=1
+size.config.mem=1064
+num.type.A=1
+num.type.NS=1
+num.type.MD=1
+num.type.MF=1
+num.type.CNAME=1
+num.type.SOA=1
+num.type.MB=1
+num.type.MG=1
+num.type.MR=1
+num.type.NULL=1
+num.type.WKS=1
+num.type.PTR=1
+num.type.HINFO=1
+num.type.MINFO=1
+num.type.MX=1
+num.type.TXT=1
+num.type.RP=1
+num.type.AFSDB=1
+num.type.X25=1
+num.type.ISDN=1
+num.type.RT=1
+num.type.NSAP=1
+num.type.SIG=1
+num.type.KEY=1
+num.type.PX=1
+num.type.AAAA=1
+num.type.LOC=1
+num.type.NXT=1
+num.type.SRV=1
+num.type.NAPTR=1
+num.type.KX=1
+num.type.CERT=1
+num.type.DNAME=1
+num.type.OPT=1
+num.type.APL=1
+num.type.DS=1
+num.type.SSHFP=1
+num.type.IPSECKEY=1
+num.type.RRSIG=1
+num.type.NSEC=1
+num.type.DNSKEY=1
+num.type.DHCID=1
+num.type.NSEC3=1
+num.type.NSEC3PARAM=1
+num.type.TLSA=1
+num.type.SMIMEA=1
+num.type.CDS=1
+num.type.CDNSKEY=1
+num.type.OPENPGPKEY=1
+num.type.CSYNC=1
+num.type.ZONEMD=1
+num.type.SVCB=1
+num.type.HTTPS=1
+num.type.SPF=1
+num.type.NID=1
+num.type.L32=1
+num.type.L64=1
+num.type.LP=1
+num.type.EUI48=1
+num.type.EUI64=1
+num.opcode.QUERY=1
+num.class.IN=1
+num.rcode.NOERROR=1
+num.rcode.FORMERR=1
+num.rcode.SERVFAIL=1
+num.rcode.NXDOMAIN=1
+num.rcode.NOTIMP=1
+num.rcode.REFUSED=1
+num.rcode.YXDOMAIN=1
+num.edns=1
+num.ednserr=1
+num.udp=1
+num.udp6=1
+num.tcp=1
+num.tcp6=1
+num.tls=1
+num.tls6=1
+num.answer_wo_aa=1
+num.rxerr=1
+num.txerr=1
+num.raxfr=1
+num.rixfr=1
+num.truncated=1
+num.dropped=1
+zone.master=1
+zone.slave=1
diff --git a/src/go/plugin/go.d/modules/ntpd/README.md b/src/go/plugin/go.d/modules/ntpd/README.md
new file mode 120000
index 000000000..bad92b03a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/README.md
@@ -0,0 +1 @@
+integrations/ntpd.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/ntpd/charts.go b/src/go/plugin/go.d/modules/ntpd/charts.go
new file mode 100644
index 000000000..95baea471
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/charts.go
@@ -0,0 +1,346 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ntpd
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioSystemOffset = module.Priority + iota
+ prioSystemJitter
+ prioSystemFrequency
+ prioSystemWander
+ prioSystemRootDelay
+ prioSystemRootDispersion
+ prioSystemStratum
+ prioSystemTimeConstant
+ prioSystemPrecision
+
+ prioPeerOffset
+ prioPeerDelay
+ prioPeerDispersion
+ prioPeerJitter
+ prioPeerXleave
+ prioPeerRootDelay
+ prioPeerRootDispersion
+ prioPeerStratum
+ prioPeerHostMode
+ prioPeerPeerMode
+ prioPeerHostPoll
+ prioPeerPeerPoll
+ prioPeerPrecision
+)
+
+var (
+ systemCharts = module.Charts{
+ systemOffsetChart.Copy(),
+ systemJitterChart.Copy(),
+ systemFrequencyChart.Copy(),
+ systemWanderChart.Copy(),
+ systemRootDelayChart.Copy(),
+ systemRootDispersionChart.Copy(),
+ systemStratumChart.Copy(),
+ systemTimeConstantChart.Copy(),
+ systemPrecisionChart.Copy(),
+ }
+ systemOffsetChart = module.Chart{
+ ID: "sys_offset",
+ Title: "Combined offset of server relative to this host",
+ Units: "milliseconds",
+ Fam: "system",
+ Ctx: "ntpd.sys_offset",
+ Type: module.Area,
+ Priority: prioSystemOffset,
+ Dims: module.Dims{
+ {ID: "offset", Name: "offset", Div: precision},
+ },
+ }
+ systemJitterChart = module.Chart{
+ ID: "sys_jitter",
+ Title: "Combined system jitter and clock jitter",
+ Units: "milliseconds",
+ Fam: "system",
+ Ctx: "ntpd.sys_jitter",
+ Priority: prioSystemJitter,
+ Dims: module.Dims{
+ {ID: "sys_jitter", Name: "system", Div: precision},
+ {ID: "clk_jitter", Name: "clock", Div: precision},
+ },
+ }
+ systemFrequencyChart = module.Chart{
+ ID: "sys_frequency",
+ Title: "Frequency offset relative to hardware clock",
+ Units: "ppm",
+ Fam: "system",
+ Ctx: "ntpd.sys_frequency",
+ Type: module.Area,
+ Priority: prioSystemFrequency,
+ Dims: module.Dims{
+ {ID: "frequency", Name: "frequency", Div: precision},
+ },
+ }
+ systemWanderChart = module.Chart{
+ ID: "sys_wander",
+ Title: "Clock frequency wander",
+ Units: "ppm",
+ Fam: "system",
+ Ctx: "ntpd.sys_wander",
+ Type: module.Area,
+ Priority: prioSystemWander,
+ Dims: module.Dims{
+ {ID: "clk_wander", Name: "clock", Div: precision},
+ },
+ }
+ systemRootDelayChart = module.Chart{
+ ID: "sys_rootdelay",
+ Title: "Total roundtrip delay to the primary reference clock",
+ Units: "milliseconds",
+ Fam: "system",
+ Ctx: "ntpd.sys_rootdelay",
+ Type: module.Area,
+ Priority: prioSystemRootDelay,
+ Dims: module.Dims{
+ {ID: "rootdelay", Name: "delay", Div: precision},
+ },
+ }
+ systemRootDispersionChart = module.Chart{
+ ID: "sys_rootdisp",
+ Title: "Total root dispersion to the primary reference clock",
+ Units: "milliseconds",
+ Fam: "system",
+ Ctx: "ntpd.sys_rootdisp",
+ Type: module.Area,
+ Priority: prioSystemRootDispersion,
+ Dims: module.Dims{
+ {ID: "rootdisp", Name: "dispersion", Div: precision},
+ },
+ }
+ systemStratumChart = module.Chart{
+ ID: "sys_stratum",
+ Title: "Stratum",
+ Units: "stratum",
+ Fam: "system",
+ Ctx: "ntpd.sys_stratum",
+ Priority: prioSystemStratum,
+ Dims: module.Dims{
+ {ID: "stratum", Name: "stratum", Div: precision},
+ },
+ }
+ systemTimeConstantChart = module.Chart{
+ ID: "sys_tc",
+ Title: "Time constant and poll exponent",
+ Units: "log2",
+ Fam: "system",
+ Ctx: "ntpd.sys_tc",
+ Priority: prioSystemTimeConstant,
+ Dims: module.Dims{
+ {ID: "tc", Name: "current", Div: precision},
+ {ID: "mintc", Name: "minimum", Div: precision},
+ },
+ }
+ systemPrecisionChart = module.Chart{
+ ID: "sys_precision",
+ Title: "Precision",
+ Units: "log2",
+ Fam: "system",
+ Ctx: "ntpd.sys_precision",
+ Priority: prioSystemPrecision,
+ Dims: module.Dims{
+ {ID: "precision", Name: "precision", Div: precision},
+ },
+ }
+)
+
+var (
+ peerChartsTmpl = module.Charts{
+ peerOffsetChartTmpl.Copy(),
+ peerDelayChartTmpl.Copy(),
+ peerDispersionChartTmpl.Copy(),
+ peerJitterChartTmpl.Copy(),
+ peerXleaveChartTmpl.Copy(),
+ peerRootDelayChartTmpl.Copy(),
+ peerRootDispersionChartTmpl.Copy(),
+ peerStratumChartTmpl.Copy(),
+ peerHostModeChartTmpl.Copy(),
+ peerPeerModeChartTmpl.Copy(),
+ peerHostPollChartTmpl.Copy(),
+ peerPeerPollChartTmpl.Copy(),
+ peerPrecisionChartTmpl.Copy(),
+ }
+ peerOffsetChartTmpl = module.Chart{
+ ID: "peer_%s_offset",
+ Title: "Peer offset",
+ Units: "milliseconds",
+ Fam: "peers",
+ Ctx: "ntpd.peer_offset",
+ Priority: prioPeerOffset,
+ Dims: module.Dims{
+ {ID: "peer_%s_offset", Name: "offset", Div: precision},
+ },
+ }
+ peerDelayChartTmpl = module.Chart{
+ ID: "peer_%s_delay",
+ Title: "Peer delay",
+ Units: "milliseconds",
+ Fam: "peers",
+ Ctx: "ntpd.peer_delay",
+ Priority: prioPeerDelay,
+ Dims: module.Dims{
+ {ID: "peer_%s_delay", Name: "delay", Div: precision},
+ },
+ }
+ peerDispersionChartTmpl = module.Chart{
+ ID: "peer_%s_dispersion",
+ Title: "Peer dispersion",
+ Units: "milliseconds",
+ Fam: "peers",
+ Ctx: "ntpd.peer_dispersion",
+ Priority: prioPeerDispersion,
+ Dims: module.Dims{
+ {ID: "peer_%s_dispersion", Name: "dispersion", Div: precision},
+ },
+ }
+ peerJitterChartTmpl = module.Chart{
+ ID: "peer_%s_jitter",
+ Title: "Peer jitter",
+ Units: "milliseconds",
+ Fam: "peers",
+ Ctx: "ntpd.peer_jitter",
+ Priority: prioPeerJitter,
+ Dims: module.Dims{
+ {ID: "peer_%s_jitter", Name: "jitter", Div: precision},
+ },
+ }
+ peerXleaveChartTmpl = module.Chart{
+ ID: "peer_%s_xleave",
+ Title: "Peer interleave delay",
+ Units: "milliseconds",
+ Fam: "peers",
+ Ctx: "ntpd.peer_xleave",
+ Priority: prioPeerXleave,
+ Dims: module.Dims{
+ {ID: "peer_%s_xleave", Name: "xleave", Div: precision},
+ },
+ }
+ peerRootDelayChartTmpl = module.Chart{
+ ID: "peer_%s_rootdelay",
+ Title: "Peer roundtrip delay to the primary reference clock",
+ Units: "milliseconds",
+ Fam: "peers",
+ Ctx: "ntpd.peer_rootdelay",
+ Priority: prioPeerRootDelay,
+ Dims: module.Dims{
+ {ID: "peer_%s_rootdelay", Name: "rootdelay", Div: precision},
+ },
+ }
+ peerRootDispersionChartTmpl = module.Chart{
+ ID: "peer_%s_rootdisp",
+ Title: "Peer root dispersion to the primary reference clock",
+ Units: "milliseconds",
+ Fam: "peers",
+ Ctx: "ntpd.peer_rootdisp",
+ Priority: prioPeerRootDispersion,
+ Dims: module.Dims{
+ {ID: "peer_%s_rootdisp", Name: "dispersion", Div: precision},
+ },
+ }
+ peerStratumChartTmpl = module.Chart{
+ ID: "peer_%s_stratum",
+ Title: "Peer stratum",
+ Units: "stratum",
+ Fam: "peers",
+ Ctx: "ntpd.peer_stratum",
+ Priority: prioPeerStratum,
+ Dims: module.Dims{
+ {ID: "peer_%s_stratum", Name: "stratum", Div: precision},
+ },
+ }
+ peerHostModeChartTmpl = module.Chart{
+ ID: "peer_%s_hmode",
+ Title: "Peer host mode",
+ Units: "hmode",
+ Fam: "peers",
+ Ctx: "ntpd.peer_hmode",
+ Priority: prioPeerHostMode,
+ Dims: module.Dims{
+ {ID: "peer_%s_hmode", Name: "hmode", Div: precision},
+ },
+ }
+ peerPeerModeChartTmpl = module.Chart{
+ ID: "peer_%s_pmode",
+ Title: "Peer mode",
+ Units: "pmode",
+ Fam: "peers",
+ Ctx: "ntpd.peer_pmode",
+ Priority: prioPeerPeerMode,
+ Dims: module.Dims{
+ {ID: "peer_%s_pmode", Name: "pmode", Div: precision},
+ },
+ }
+ peerHostPollChartTmpl = module.Chart{
+ ID: "peer_%s_hpoll",
+ Title: "Peer host poll exponent",
+ Units: "log2",
+ Fam: "peers",
+ Ctx: "ntpd.peer_hpoll",
+ Priority: prioPeerHostPoll,
+ Dims: module.Dims{
+ {ID: "peer_%s_hpoll", Name: "hpoll", Div: precision},
+ },
+ }
+ peerPeerPollChartTmpl = module.Chart{
+ ID: "peer_%s_ppoll",
+ Title: "Peer poll exponent",
+ Units: "log2",
+ Fam: "peers",
+ Ctx: "ntpd.peer_ppoll",
+ Priority: prioPeerPeerPoll,
+ Dims: module.Dims{
+ {ID: "peer_%s_ppoll", Name: "hpoll", Div: precision},
+ },
+ }
+ peerPrecisionChartTmpl = module.Chart{
+ ID: "peer_%s_precision",
+ Title: "Peer precision",
+ Units: "log2",
+ Fam: "peers",
+ Ctx: "ntpd.peer_precision",
+ Priority: prioPeerPrecision,
+ Dims: module.Dims{
+ {ID: "peer_%s_precision", Name: "precision", Div: precision},
+ },
+ }
+)
+
+func (n *NTPd) addPeerCharts(addr string) {
+ charts := peerChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ReplaceAll(addr, ".", "_"))
+ chart.Labels = []module.Label{
+ {Key: "peer_address", Value: addr},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, addr)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NTPd) removePeerCharts(addr string) {
+ px := fmt.Sprintf("peer_%s", strings.ReplaceAll(addr, ".", "_"))
+
+ for _, chart := range *n.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ntpd/client.go b/src/go/plugin/go.d/modules/ntpd/client.go
new file mode 100644
index 000000000..8e111cd76
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/client.go
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ntpd
+
+import (
+ "net"
+ "time"
+
+ "github.com/facebook/time/ntp/control"
+)
+
+func newNTPClient(c Config) (ntpConn, error) {
+ conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration())
+ if err != nil {
+ return nil, err
+ }
+
+ client := &ntpClient{
+ conn: conn,
+ timeout: c.Timeout.Duration(),
+ client: &control.NTPClient{Connection: conn},
+ }
+
+ return client, nil
+}
+
+type ntpClient struct {
+ conn net.Conn
+ timeout time.Duration
+ client *control.NTPClient
+}
+
+func (c *ntpClient) systemInfo() (map[string]string, error) {
+ return c.peerInfo(0)
+}
+
+func (c *ntpClient) peerInfo(id uint16) (map[string]string, error) {
+ msg := &control.NTPControlMsgHead{
+ VnMode: control.MakeVnMode(2, control.Mode),
+ REMOp: control.OpReadVariables,
+ AssociationID: id,
+ }
+
+ if err := c.conn.SetDeadline(time.Now().Add(c.timeout)); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.client.Communicate(msg)
+ if err != nil {
+ return nil, err
+ }
+
+ return resp.GetAssociationInfo()
+}
+
+func (c *ntpClient) peerIDs() ([]uint16, error) {
+ msg := &control.NTPControlMsgHead{
+ VnMode: control.MakeVnMode(2, control.Mode),
+ REMOp: control.OpReadStatus,
+ }
+
+ if err := c.conn.SetDeadline(time.Now().Add(c.timeout)); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.client.Communicate(msg)
+ if err != nil {
+ return nil, err
+ }
+
+ peers, err := resp.GetAssociations()
+ if err != nil {
+ return nil, err
+ }
+
+ var ids []uint16
+ for id := range peers {
+ ids = append(ids, id)
+ }
+
+ return ids, nil
+}
+
+func (c *ntpClient) close() {
+ if c.conn != nil {
+ _ = c.conn.Close()
+ c.conn = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ntpd/collect.go b/src/go/plugin/go.d/modules/ntpd/collect.go
new file mode 100644
index 000000000..09553a65c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/collect.go
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ntpd
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "time"
+)
+
+const (
+ precision = 1000000
+)
+
+func (n *NTPd) collect() (map[string]int64, error) {
+ if n.client == nil {
+ client, err := n.newClient(n.Config)
+ if err != nil {
+ return nil, fmt.Errorf("creating NTP client: %v", err)
+ }
+ n.client = client
+ }
+
+ mx := make(map[string]int64)
+
+ if err := n.collectInfo(mx); err != nil {
+ return nil, err
+ }
+
+ if n.CollectPeers {
+ if now := time.Now(); now.Sub(n.findPeersTime) > n.findPeersEvery {
+ n.findPeersTime = now
+ if err := n.findPeers(); err != nil {
+ n.Warning(err)
+ }
+ }
+ n.collectPeersInfo(mx)
+ }
+
+ return mx, nil
+}
+
+func (n *NTPd) collectInfo(mx map[string]int64) error {
+ info, err := n.client.systemInfo()
+ if err != nil {
+ return fmt.Errorf("error on querying system info: %v", err)
+ }
+
+ for k, v := range info {
+ switch k {
+ case
+ "offset",
+ "sys_jitter",
+ "clk_jitter",
+ "frequency",
+ "clk_wander",
+ "rootdelay",
+ "rootdisp",
+ "stratum",
+ "tc",
+ "mintc",
+ "precision":
+ if val, err := strconv.ParseFloat(v, 64); err == nil {
+ mx[k] = int64(val * precision)
+ }
+ }
+ }
+ return nil
+}
+
+func (n *NTPd) collectPeersInfo(mx map[string]int64) {
+ for _, id := range n.peerIDs {
+ info, err := n.client.peerInfo(id)
+ if err != nil {
+ n.Warningf("error on querying NTP peer info id='%d': %v", id, err)
+ continue
+ }
+
+ addr, ok := info["srcadr"]
+ if !ok {
+ continue
+ }
+
+ for k, v := range info {
+ switch k {
+ case
+ "offset",
+ "delay",
+ "dispersion",
+ "jitter",
+ "xleave",
+ "rootdelay",
+ "rootdisp",
+ "stratum",
+ "hmode",
+ "pmode",
+ "hpoll",
+ "ppoll",
+ "precision":
+ if val, err := strconv.ParseFloat(v, 64); err == nil {
+ mx["peer_"+addr+"_"+k] = int64(val * precision)
+ }
+ }
+ }
+ }
+}
+
+func (n *NTPd) findPeers() error {
+ n.peerIDs = n.peerIDs[:0]
+
+ n.Debug("querying NTP peers")
+ peers, err := n.client.peerIDs()
+ if err != nil {
+ return fmt.Errorf("querying NTP peers: %v", err)
+ }
+
+ n.Debugf("found %d NTP peers (ids: %v)", len(peers), peers)
+ seen := make(map[string]bool)
+
+ for _, id := range peers {
+ info, err := n.client.peerInfo(id)
+ if err != nil {
+ n.Debugf("error on querying NTP peer info id='%d': %v", id, err)
+ continue
+ }
+
+ addr, ok := info["srcadr"]
+ if ip := net.ParseIP(addr); !ok || ip == nil || n.peerIPAddrFilter.Contains(ip) {
+ n.Debugf("skipping NTP peer id='%d', srcadr='%s'", id, addr)
+ continue
+ }
+
+ seen[addr] = true
+
+ if !n.peerAddr[addr] {
+ n.peerAddr[addr] = true
+ n.Debugf("new NTP peer id='%d', srcadr='%s': creating charts", id, addr)
+ n.addPeerCharts(addr)
+ }
+
+ n.peerIDs = append(n.peerIDs, id)
+ }
+
+ for addr := range n.peerAddr {
+ if !seen[addr] {
+ delete(n.peerAddr, addr)
+ n.Debugf("stale NTP peer srcadr='%s': removing charts", addr)
+ n.removePeerCharts(addr)
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/ntpd/config_schema.json b/src/go/plugin/go.d/modules/ntpd/config_schema.json
new file mode 100644
index 000000000..f4d763b82
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/config_schema.json
@@ -0,0 +1,49 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NTPd collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the NTPd daemon listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:123"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "collect_peers": {
+ "title": "Collect peers",
+ "description": "Collect metrics of NTP peers.",
+ "type": "boolean"
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ntpd/integrations/ntpd.md b/src/go/plugin/go.d/modules/ntpd/integrations/ntpd.md
new file mode 100644
index 000000000..c0094c524
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/integrations/ntpd.md
@@ -0,0 +1,263 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/ntpd/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/ntpd/metadata.yaml"
+sidebar_label: "NTPd"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/System Clock and NTP"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NTPd
+
+
+<img src="https://netdata.cloud/img/ntp.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: ntpd
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per NTPd instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ntpd.sys_offset | offset | milliseconds |
+| ntpd.sys_jitter | system, clock | milliseconds |
+| ntpd.sys_frequency | frequency | ppm |
+| ntpd.sys_wander | clock | ppm |
+| ntpd.sys_rootdelay | delay | milliseconds |
+| ntpd.sys_rootdisp | dispersion | milliseconds |
+| ntpd.sys_stratum | stratum | stratum |
+| ntpd.sys_tc | current, minimum | log2 |
+| ntpd.sys_precision | precision | log2 |
+
+### Per peer
+
+These metrics refer to the NTPd peer.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| peer_address | peer's source IP address |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ntpd.peer_offset | offset | milliseconds |
+| ntpd.peer_delay | delay | milliseconds |
+| ntpd.peer_dispersion | dispersion | milliseconds |
+| ntpd.peer_jitter | jitter | milliseconds |
+| ntpd.peer_xleave | xleave | milliseconds |
+| ntpd.peer_rootdelay | rootdelay | milliseconds |
+| ntpd.peer_rootdisp | dispersion | milliseconds |
+| ntpd.peer_stratum | stratum | stratum |
+| ntpd.peer_hmode | hmode | hmode |
+| ntpd.peer_pmode | pmode | pmode |
+| ntpd.peer_hpoll | hpoll | log2 |
+| ntpd.peer_ppoll | ppoll | log2 |
+| ntpd.peer_precision | precision | log2 |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/ntpd.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/ntpd.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Server address in IP:PORT format. | 127.0.0.1:123 | yes |
+| timeout | Connection/read/write timeout. | 1 | no |
+| collect_peers | Determines whether peer metrics will be collected. | no | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:123
+
+```
+</details>
+
+##### With peers metrics
+
+Collect peers metrics.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:123
+ collect_peers: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:123
+
+ - name: remote
+ address: 203.0.113.0:123
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `ntpd` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m ntpd
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `ntpd` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep ntpd
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep ntpd /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep ntpd
+```
+
+
diff --git a/src/go/plugin/go.d/modules/ntpd/metadata.yaml b/src/go/plugin/go.d/modules/ntpd/metadata.yaml
new file mode 100644
index 000000000..46178b031
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/metadata.yaml
@@ -0,0 +1,260 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-ntpd
+ plugin_name: go.d.plugin
+ module_name: ntpd
+ monitored_instance:
+ name: NTPd
+ link: https://www.ntp.org/documentation/4.2.8-series/ntpd
+ icon_filename: ntp.png
+ categories:
+ - data-collection.system-clock-and-ntp
+ keywords:
+ - ntpd
+ - ntp
+ - time
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers)
+ using the NTP Control Message Protocol via UDP socket, similar to `ntpq`,
+ the [standard NTP query program](https://doc.ntp.org/current-stable/ntpq.html).
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/ntpd.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: Server address in IP:PORT format.
+ default_value: 127.0.0.1:123
+ required: true
+ - name: timeout
+ description: Connection/read/write timeout.
+ default_value: 1
+ required: false
+ - name: collect_peers
+ description: Determines whether peer metrics will be collected.
+ default_value: false
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:123
+ - name: With peers metrics
+ description: Collect peers metrics.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:123
+ collect_peers: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:123
+
+ - name: remote
+ address: 203.0.113.0:123
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: ntpd.sys_offset
+ description: Combined offset of server relative to this host
+ unit: milliseconds
+ chart_type: area
+ dimensions:
+ - name: offset
+ - name: ntpd.sys_jitter
+ description: Combined system jitter and clock jitter
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: system
+ - name: clock
+ - name: ntpd.sys_frequency
+ description: Frequency offset relative to hardware clock
+ unit: ppm
+ chart_type: area
+ dimensions:
+ - name: frequency
+ - name: ntpd.sys_wander
+ description: Clock frequency wander
+ unit: ppm
+ chart_type: area
+ dimensions:
+ - name: clock
+ - name: ntpd.sys_rootdelay
+ description: Total roundtrip delay to the primary reference clock
+ unit: milliseconds
+ chart_type: area
+ dimensions:
+ - name: delay
+ - name: ntpd.sys_rootdisp
+ description: Total root dispersion to the primary reference clock
+ unit: milliseconds
+ chart_type: area
+ dimensions:
+ - name: dispersion
+ - name: ntpd.sys_stratum
+ description: Stratum
+ unit: stratum
+ chart_type: line
+ dimensions:
+ - name: stratum
+ - name: ntpd.sys_tc
+ description: Time constant and poll exponent
+ unit: log2
+ chart_type: line
+ dimensions:
+ - name: current
+ - name: minimum
+ - name: ntpd.sys_precision
+ description: Precision
+ unit: log2
+ chart_type: line
+ dimensions:
+ - name: precision
+ - name: peer
+ description: These metrics refer to the NTPd peer.
+ labels:
+ - name: peer_address
+ description: peer's source IP address
+ metrics:
+ - name: ntpd.peer_offset
+ description: Peer offset
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: offset
+ - name: ntpd.peer_delay
+ description: Peer delay
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: delay
+ - name: ntpd.peer_dispersion
+ description: Peer dispersion
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: dispersion
+ - name: ntpd.peer_jitter
+ description: Peer jitter
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: jitter
+ - name: ntpd.peer_xleave
+ description: Peer interleave delay
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: xleave
+ - name: ntpd.peer_rootdelay
+ description: Peer roundtrip delay to the primary reference clock
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: rootdelay
+ - name: ntpd.peer_rootdisp
+ description: Peer root dispersion to the primary reference clock
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: dispersion
+ - name: ntpd.peer_stratum
+ description: Peer stratum
+ unit: stratum
+ chart_type: line
+ dimensions:
+ - name: stratum
+ - name: ntpd.peer_hmode
+ description: Peer host mode
+ unit: hmode
+ chart_type: line
+ dimensions:
+ - name: hmode
+ - name: ntpd.peer_pmode
+ description: Peer mode
+ unit: pmode
+ chart_type: line
+ dimensions:
+ - name: pmode
+ - name: ntpd.peer_hpoll
+ description: Peer host poll exponent
+ unit: log2
+ chart_type: line
+ dimensions:
+ - name: hpoll
+ - name: ntpd.peer_ppoll
+ description: Peer poll exponent
+ unit: log2
+ chart_type: line
+ dimensions:
+ - name: ppoll
+ - name: ntpd.peer_precision
+ description: Peer precision
+ unit: log2
+ chart_type: line
+ dimensions:
+ - name: precision
diff --git a/src/go/plugin/go.d/modules/ntpd/ntpd.go b/src/go/plugin/go.d/modules/ntpd/ntpd.go
new file mode 100644
index 000000000..011624681
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/ntpd.go
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ntpd
+
+import (
+ _ "embed"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/iprange"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("ntpd", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *NTPd {
+ return &NTPd{
+ Config: Config{
+ Address: "127.0.0.1:123",
+ Timeout: web.Duration(time.Second),
+ CollectPeers: false,
+ },
+ charts: systemCharts.Copy(),
+ newClient: newNTPClient,
+ findPeersEvery: time.Minute * 3,
+ peerAddr: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ CollectPeers bool `yaml:"collect_peers" json:"collect_peers"`
+}
+
+type (
+ NTPd struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ client ntpConn
+ newClient func(c Config) (ntpConn, error)
+
+ findPeersTime time.Time
+ findPeersEvery time.Duration
+ peerAddr map[string]bool
+ peerIDs []uint16
+ peerIPAddrFilter iprange.Pool
+ }
+ ntpConn interface {
+ systemInfo() (map[string]string, error)
+ peerInfo(id uint16) (map[string]string, error)
+ peerIDs() ([]uint16, error)
+ close()
+ }
+)
+
+func (n *NTPd) Configuration() any {
+ return n.Config
+}
+
+func (n *NTPd) Init() error {
+ if n.Address == "" {
+ n.Error("config validation: 'address' can not be empty")
+ return errors.New("address not set")
+ }
+
+ txt := "0.0.0.0 127.0.0.0/8"
+ r, err := iprange.ParseRanges(txt)
+ if err != nil {
+ n.Errorf("error on parsing ip range '%s': %v", txt, err)
+ return fmt.Errorf("error on parsing ip range '%s': %v", txt, err)
+ }
+
+ n.peerIPAddrFilter = r
+
+ return nil
+}
+
+func (n *NTPd) Check() error {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (n *NTPd) Charts() *module.Charts {
+ return n.charts
+}
+
+func (n *NTPd) Collect() map[string]int64 {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (n *NTPd) Cleanup() {
+ if n.client != nil {
+ n.client.close()
+ n.client = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ntpd/ntpd_test.go b/src/go/plugin/go.d/modules/ntpd/ntpd_test.go
new file mode 100644
index 000000000..99c0519c8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/ntpd_test.go
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ntpd
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestNTPd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NTPd{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNTPd_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default config": {
+ config: New().Config,
+ },
+ "unset 'address'": {
+ wantFail: true,
+ config: Config{
+ Address: "",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ n := New()
+ n.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, n.Init())
+ } else {
+ assert.NoError(t, n.Init())
+ }
+ })
+ }
+}
+
+func TestNTPd_Charts(t *testing.T) {
+ assert.Equal(t, len(systemCharts), len(*New().Charts()))
+}
+
+func TestNTPd_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*NTPd)
+ wantClose bool
+ }{
+ "after New": {
+ wantClose: false,
+ prepare: func(*NTPd) {},
+ },
+ "after Init": {
+ wantClose: false,
+ prepare: func(n *NTPd) { _ = n.Init() },
+ },
+ "after Check": {
+ wantClose: true,
+ prepare: func(n *NTPd) { _ = n.Init(); _ = n.Check() },
+ },
+ "after Collect": {
+ wantClose: true,
+ prepare: func(n *NTPd) { _ = n.Init(); n.Collect() },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ m := &mockClient{}
+ n := prepareNTPdWithMock(m, true)
+ test.prepare(n)
+
+ require.NotPanics(t, n.Cleanup)
+
+ if test.wantClose {
+ assert.True(t, m.closeCalled)
+ } else {
+ assert.False(t, m.closeCalled)
+ }
+ })
+ }
+}
+
+func TestNTPd_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *NTPd
+ wantFail bool
+ }{
+ "system: success, peers: success": {
+ wantFail: false,
+ prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{}, true) },
+ },
+ "system: success, list peers: fails": {
+ wantFail: false,
+ prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnPeerIDs: true}, true) },
+ },
+ "system: success, peers info: fails": {
+ wantFail: false,
+ prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnPeerInfo: true}, true) },
+ },
+ "system: fails": {
+ wantFail: true,
+ prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnSystemInfo: true}, true) },
+ },
+ "fail on creating client": {
+ wantFail: true,
+ prepare: func() *NTPd { return prepareNTPdWithMock(nil, true) },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ n := test.prepare()
+
+ require.NoError(t, n.Init())
+
+ if test.wantFail {
+ assert.Error(t, n.Check())
+ } else {
+ assert.NoError(t, n.Check())
+ }
+ })
+ }
+
+}
+
+func TestNTPd_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *NTPd
+ expected map[string]int64
+ expectedCharts int
+ }{
+ "system: success, peers: success": {
+ prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{}, true) },
+ expected: map[string]int64{
+ "clk_jitter": 626000,
+ "clk_wander": 81000,
+ "mintc": 3000000,
+ "offset": -149638,
+ "peer_203.0.113.1_delay": 10464000,
+ "peer_203.0.113.1_dispersion": 5376000,
+ "peer_203.0.113.1_hmode": 3000000,
+ "peer_203.0.113.1_hpoll": 7000000,
+ "peer_203.0.113.1_jitter": 5204000,
+ "peer_203.0.113.1_offset": 312000,
+ "peer_203.0.113.1_pmode": 4000000,
+ "peer_203.0.113.1_ppoll": 7000000,
+ "peer_203.0.113.1_precision": -21000000,
+ "peer_203.0.113.1_rootdelay": 198000,
+ "peer_203.0.113.1_rootdisp": 14465000,
+ "peer_203.0.113.1_stratum": 2000000,
+ "peer_203.0.113.1_xleave": 95000,
+ "peer_203.0.113.2_delay": 10464000,
+ "peer_203.0.113.2_dispersion": 5376000,
+ "peer_203.0.113.2_hmode": 3000000,
+ "peer_203.0.113.2_hpoll": 7000000,
+ "peer_203.0.113.2_jitter": 5204000,
+ "peer_203.0.113.2_offset": 312000,
+ "peer_203.0.113.2_pmode": 4000000,
+ "peer_203.0.113.2_ppoll": 7000000,
+ "peer_203.0.113.2_precision": -21000000,
+ "peer_203.0.113.2_rootdelay": 198000,
+ "peer_203.0.113.2_rootdisp": 14465000,
+ "peer_203.0.113.2_stratum": 2000000,
+ "peer_203.0.113.2_xleave": 95000,
+ "peer_203.0.113.3_delay": 10464000,
+ "peer_203.0.113.3_dispersion": 5376000,
+ "peer_203.0.113.3_hmode": 3000000,
+ "peer_203.0.113.3_hpoll": 7000000,
+ "peer_203.0.113.3_jitter": 5204000,
+ "peer_203.0.113.3_offset": 312000,
+ "peer_203.0.113.3_pmode": 4000000,
+ "peer_203.0.113.3_ppoll": 7000000,
+ "peer_203.0.113.3_precision": -21000000,
+ "peer_203.0.113.3_rootdelay": 198000,
+ "peer_203.0.113.3_rootdisp": 14465000,
+ "peer_203.0.113.3_stratum": 2000000,
+ "peer_203.0.113.3_xleave": 95000,
+ "precision": -24000000,
+ "rootdelay": 10385000,
+ "rootdisp": 23404000,
+ "stratum": 2000000,
+ "sys_jitter": 1648010,
+ "tc": 7000000,
+ },
+ expectedCharts: len(systemCharts) + len(peerChartsTmpl)*3,
+ },
+ "system: success, list peers: fails": {
+ prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnPeerIDs: true}, true) },
+ expected: map[string]int64{
+ "clk_jitter": 626000,
+ "clk_wander": 81000,
+ "mintc": 3000000,
+ "offset": -149638,
+ "precision": -24000000,
+ "rootdelay": 10385000,
+ "rootdisp": 23404000,
+ "stratum": 2000000,
+ "sys_jitter": 1648010,
+ "tc": 7000000,
+ },
+ expectedCharts: len(systemCharts),
+ },
+ "system: success, peers info: fails": {
+ prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnPeerInfo: true}, true) },
+ expected: map[string]int64{
+ "clk_jitter": 626000,
+ "clk_wander": 81000,
+ "mintc": 3000000,
+ "offset": -149638,
+ "precision": -24000000,
+ "rootdelay": 10385000,
+ "rootdisp": 23404000,
+ "stratum": 2000000,
+ "sys_jitter": 1648010,
+ "tc": 7000000,
+ },
+ expectedCharts: len(systemCharts),
+ },
+ "system: fails": {
+ prepare: func() *NTPd { return prepareNTPdWithMock(&mockClient{errOnSystemInfo: true}, true) },
+ expected: nil,
+ expectedCharts: len(systemCharts),
+ },
+ "fail on creating client": {
+ prepare: func() *NTPd { return prepareNTPdWithMock(nil, true) },
+ expected: nil,
+ expectedCharts: len(systemCharts),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ n := test.prepare()
+
+ require.NoError(t, n.Init())
+ _ = n.Check()
+
+ mx := n.Collect()
+
+ assert.Equal(t, test.expected, mx)
+ assert.Equal(t, test.expectedCharts, len(*n.Charts()))
+ })
+ }
+}
+
+func prepareNTPdWithMock(m *mockClient, collectPeers bool) *NTPd {
+ n := New()
+ n.CollectPeers = collectPeers
+ if m == nil {
+ n.newClient = func(_ Config) (ntpConn, error) { return nil, errors.New("mock.newClient error") }
+ } else {
+ n.newClient = func(_ Config) (ntpConn, error) { return m, nil }
+ }
+ return n
+}
+
+type mockClient struct {
+ errOnSystemInfo bool
+ errOnPeerInfo bool
+ errOnPeerIDs bool
+ closeCalled bool
+}
+
+func (m *mockClient) systemInfo() (map[string]string, error) {
+ if m.errOnSystemInfo {
+ return nil, errors.New("mockClient.info() error")
+ }
+
+ info := map[string]string{
+ "rootdelay": "10.385",
+ "tc": "7",
+ "mintc": "3",
+ "processor": "x86_64",
+ "refid": "194.177.210.54",
+ "reftime": "0xe7504a10.74414244",
+ "clock": "0xe7504e80.8c46aa3f",
+ "peer": "14835",
+ "sys_jitter": "1.648010",
+ "leapsec": "201701010000",
+ "expire": "202306280000",
+ "leap": "0",
+ "stratum": "2",
+ "precision": "-24",
+ "offset": "-0.149638",
+ "frequency": "- 7.734",
+ "clk_wander": "0.081",
+ "tai": "37",
+ "version": "ntpd 4.2.8p15@1.3728-o Wed Sep 23 11:46:38 UTC 2020 (1)",
+ "rootdisp": "23.404",
+ "clk_jitter": "0.626",
+ "system": "Linux/5.10.0-19-amd64",
+ }
+
+ return info, nil
+}
+
+func (m *mockClient) peerInfo(id uint16) (map[string]string, error) {
+ if m.errOnPeerInfo {
+ return nil, errors.New("mockClient.peerInfo() error")
+ }
+
+ info := map[string]string{
+ "delay": "10.464",
+ "dispersion": "5.376",
+ "dstadr": "10.10.10.20",
+ "dstport": "123",
+ "filtdelay": "11.34 10.53 10.49 10.46 10.92 10.56 10.69 37.99",
+ "filtdisp": "0.00 2.01 4.01 5.93 7.89 9.84 11.81 13.73",
+ "filtoffset": "0.66 0.32 0.18 0.31 0.33 0.10 0.34 14.07",
+ "flash": "0x0",
+ "headway": "0",
+ "hmode": "3",
+ "hpoll": "7",
+ "jitter": "5.204",
+ "keyid": "0",
+ "leap": "0",
+ "offset": "0.312",
+ "pmode": "4",
+ "ppoll": "7",
+ "precision": "-21",
+ "reach": "0xff",
+ "rec": "0xe7504df8.74802284",
+ "refid": "193.93.164.193",
+ "reftime": "0xe7504b8b.0c98a518",
+ "rootdelay": "0.198",
+ "rootdisp": "14.465",
+ "srcadr": fmt.Sprintf("203.0.113.%d", id),
+ "srcport": "123",
+ "stratum": "2",
+ "unreach": "0",
+ "xleave": "0.095",
+ }
+
+ return info, nil
+}
+
+func (m *mockClient) peerIDs() ([]uint16, error) {
+ if m.errOnPeerIDs {
+ return nil, errors.New("mockClient.peerIDs() error")
+ }
+ return []uint16{1, 2, 3}, nil
+}
+
+func (m *mockClient) close() {
+ m.closeCalled = true
+}
diff --git a/src/go/plugin/go.d/modules/ntpd/testdata/config.json b/src/go/plugin/go.d/modules/ntpd/testdata/config.json
new file mode 100644
index 000000000..fc8d6844f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "collect_peers": true
+}
diff --git a/src/go/plugin/go.d/modules/ntpd/testdata/config.yaml b/src/go/plugin/go.d/modules/ntpd/testdata/config.yaml
new file mode 100644
index 000000000..94cee8526
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ntpd/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+collect_peers: yes
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/README.md b/src/go/plugin/go.d/modules/nvidia_smi/README.md
new file mode 120000
index 000000000..3527bdb4b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/README.md
@@ -0,0 +1 @@
+integrations/nvidia_gpu.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/charts.go b/src/go/plugin/go.d/modules/nvidia_smi/charts.go
new file mode 100644
index 000000000..746c8eed3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/charts.go
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvidia_smi
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioGPUPCIBandwidthUsage = module.Priority + iota
+ prioGPUPCIBandwidthUtilization
+ prioGPUFanSpeed
+ prioGPUUtilization
+ prioGPUMemUtilization
+ prioGPUDecoderUtilization
+ prioGPUEncoderUtilization
+ prioGPUMIGModeStatus
+ prioGPUMIGDevicesCount
+ prioGPUFBMemoryUsage
+ prioGPUMIGFBMemoryUsage
+ prioGPUBAR1MemoryUsage
+ prioGPUMIGBAR1MemoryUsage
+ prioGPUTemperatureChart
+ prioGPUVoltageChart
+ prioGPUClockFreq
+ prioGPUPowerDraw
+ prioGPUPerformanceState
+)
+
+var (
+ gpuXMLCharts = module.Charts{
+ gpuPCIBandwidthUsageChartTmpl.Copy(),
+ gpuPCIBandwidthUtilizationChartTmpl.Copy(),
+ gpuFanSpeedPercChartTmpl.Copy(),
+ gpuUtilizationChartTmpl.Copy(),
+ gpuMemUtilizationChartTmpl.Copy(),
+ gpuDecoderUtilizationChartTmpl.Copy(),
+ gpuEncoderUtilizationChartTmpl.Copy(),
+ gpuMIGModeCurrentStatusChartTmpl.Copy(),
+ gpuMIGDevicesCountChartTmpl.Copy(),
+ gpuFrameBufferMemoryUsageChartTmpl.Copy(),
+ gpuBAR1MemoryUsageChartTmpl.Copy(),
+ gpuVoltageChartTmpl.Copy(),
+ gpuTemperatureChartTmpl.Copy(),
+ gpuClockFreqChartTmpl.Copy(),
+ gpuPowerDrawChartTmpl.Copy(),
+ gpuPerformanceStateChartTmpl.Copy(),
+ }
+ migDeviceXMLCharts = module.Charts{
+ migDeviceFrameBufferMemoryUsageChartTmpl.Copy(),
+ migDeviceBAR1MemoryUsageChartTmpl.Copy(),
+ }
+)
+
+var (
+ gpuPCIBandwidthUsageChartTmpl = module.Chart{
+ ID: "gpu_%s_pcie_bandwidth_usage",
+ Title: "PCI Express Bandwidth Usage",
+ Units: "B/s",
+ Fam: "pcie bandwidth",
+ Ctx: "nvidia_smi.gpu_pcie_bandwidth_usage",
+ Type: module.Area,
+ Priority: prioGPUPCIBandwidthUsage,
+ Dims: module.Dims{
+ {ID: "gpu_%s_pcie_bandwidth_usage_rx", Name: "rx"},
+ {ID: "gpu_%s_pcie_bandwidth_usage_tx", Name: "tx", Mul: -1},
+ },
+ }
+ gpuPCIBandwidthUtilizationChartTmpl = module.Chart{
+ ID: "gpu_%s_pcie_bandwidth_utilization",
+ Title: "PCI Express Bandwidth Utilization",
+ Units: "percentage",
+ Fam: "pcie bandwidth",
+ Ctx: "nvidia_smi.gpu_pcie_bandwidth_utilization",
+ Priority: prioGPUPCIBandwidthUtilization,
+ Dims: module.Dims{
+ {ID: "gpu_%s_pcie_bandwidth_utilization_rx", Name: "rx", Div: 100},
+ {ID: "gpu_%s_pcie_bandwidth_utilization_tx", Name: "tx", Div: 100},
+ },
+ }
+ gpuFanSpeedPercChartTmpl = module.Chart{
+ ID: "gpu_%s_fan_speed_perc",
+ Title: "Fan speed",
+ Units: "%",
+ Fam: "fan speed",
+ Ctx: "nvidia_smi.gpu_fan_speed_perc",
+ Priority: prioGPUFanSpeed,
+ Dims: module.Dims{
+ {ID: "gpu_%s_fan_speed_perc", Name: "fan_speed"},
+ },
+ }
+ gpuUtilizationChartTmpl = module.Chart{
+ ID: "gpu_%s_gpu_utilization",
+ Title: "GPU utilization",
+ Units: "%",
+ Fam: "gpu utilization",
+ Ctx: "nvidia_smi.gpu_utilization",
+ Priority: prioGPUUtilization,
+ Dims: module.Dims{
+ {ID: "gpu_%s_gpu_utilization", Name: "gpu"},
+ },
+ }
+ gpuMemUtilizationChartTmpl = module.Chart{
+ ID: "gpu_%s_memory_utilization",
+ Title: "Memory utilization",
+ Units: "%",
+ Fam: "mem utilization",
+ Ctx: "nvidia_smi.gpu_memory_utilization",
+ Priority: prioGPUMemUtilization,
+ Dims: module.Dims{
+ {ID: "gpu_%s_mem_utilization", Name: "memory"},
+ },
+ }
+ gpuDecoderUtilizationChartTmpl = module.Chart{
+ ID: "gpu_%s_decoder_utilization",
+ Title: "Decoder utilization",
+ Units: "%",
+ Fam: "dec utilization",
+ Ctx: "nvidia_smi.gpu_decoder_utilization",
+ Priority: prioGPUDecoderUtilization,
+ Dims: module.Dims{
+ {ID: "gpu_%s_decoder_utilization", Name: "decoder"},
+ },
+ }
+ gpuEncoderUtilizationChartTmpl = module.Chart{
+ ID: "gpu_%s_encoder_utilization",
+ Title: "Encoder utilization",
+ Units: "%",
+ Fam: "enc utilization",
+ Ctx: "nvidia_smi.gpu_encoder_utilization",
+ Priority: prioGPUEncoderUtilization,
+ Dims: module.Dims{
+ {ID: "gpu_%s_encoder_utilization", Name: "encoder"},
+ },
+ }
+ gpuMIGModeCurrentStatusChartTmpl = module.Chart{
+ ID: "gpu_%s_mig_mode_current_status",
+ Title: "MIG current mode",
+ Units: "status",
+ Fam: "mig",
+ Ctx: "nvidia_smi.gpu_mig_mode_current_status",
+ Priority: prioGPUMIGModeStatus,
+ Dims: module.Dims{
+ {ID: "gpu_%s_mig_current_mode_enabled", Name: "enabled"},
+ {ID: "gpu_%s_mig_current_mode_disabled", Name: "disabled"},
+ },
+ }
+ gpuMIGDevicesCountChartTmpl = module.Chart{
+ ID: "gpu_%s_mig_devices_count",
+ Title: "MIG devices",
+ Units: "devices",
+ Fam: "mig",
+ Ctx: "nvidia_smi.gpu_mig_devices_count",
+ Priority: prioGPUMIGDevicesCount,
+ Dims: module.Dims{
+ {ID: "gpu_%s_mig_devices_count", Name: "mig"},
+ },
+ }
+ gpuFrameBufferMemoryUsageChartTmpl = module.Chart{
+ ID: "gpu_%s_frame_buffer_memory_usage",
+ Title: "Frame buffer memory usage",
+ Units: "B",
+ Fam: "fb mem usage",
+ Ctx: "nvidia_smi.gpu_frame_buffer_memory_usage",
+ Type: module.Stacked,
+ Priority: prioGPUFBMemoryUsage,
+ Dims: module.Dims{
+ {ID: "gpu_%s_frame_buffer_memory_usage_free", Name: "free"},
+ {ID: "gpu_%s_frame_buffer_memory_usage_used", Name: "used"},
+ {ID: "gpu_%s_frame_buffer_memory_usage_reserved", Name: "reserved"},
+ },
+ }
+ gpuBAR1MemoryUsageChartTmpl = module.Chart{
+ ID: "gpu_%s_bar1_memory_usage",
+ Title: "BAR1 memory usage",
+ Units: "B",
+ Fam: "bar1 mem usage",
+ Ctx: "nvidia_smi.gpu_bar1_memory_usage",
+ Type: module.Stacked,
+ Priority: prioGPUBAR1MemoryUsage,
+ Dims: module.Dims{
+ {ID: "gpu_%s_bar1_memory_usage_free", Name: "free"},
+ {ID: "gpu_%s_bar1_memory_usage_used", Name: "used"},
+ },
+ }
+ gpuTemperatureChartTmpl = module.Chart{
+ ID: "gpu_%s_temperature",
+ Title: "Temperature",
+ Units: "Celsius",
+ Fam: "temperature",
+ Ctx: "nvidia_smi.gpu_temperature",
+ Priority: prioGPUTemperatureChart,
+ Dims: module.Dims{
+ {ID: "gpu_%s_temperature", Name: "temperature"},
+ },
+ }
+ gpuVoltageChartTmpl = module.Chart{
+ ID: "gpu_%s_voltage",
+ Title: "Voltage",
+ Units: "V",
+ Fam: "voltage",
+ Ctx: "nvidia_smi.gpu_voltage",
+ Priority: prioGPUVoltageChart,
+ Dims: module.Dims{
+ {ID: "gpu_%s_voltage", Name: "voltage", Div: 1000}, // mV => V
+ },
+ }
+ gpuClockFreqChartTmpl = module.Chart{
+ ID: "gpu_%s_clock_freq",
+ Title: "Clock current frequency",
+ Units: "MHz",
+ Fam: "clocks",
+ Ctx: "nvidia_smi.gpu_clock_freq",
+ Priority: prioGPUClockFreq,
+ Dims: module.Dims{
+ {ID: "gpu_%s_graphics_clock", Name: "graphics"},
+ {ID: "gpu_%s_video_clock", Name: "video"},
+ {ID: "gpu_%s_sm_clock", Name: "sm"},
+ {ID: "gpu_%s_mem_clock", Name: "mem"},
+ },
+ }
+ gpuPowerDrawChartTmpl = module.Chart{
+ ID: "gpu_%s_power_draw",
+ Title: "Power draw",
+ Units: "Watts",
+ Fam: "power draw",
+ Ctx: "nvidia_smi.gpu_power_draw",
+ Priority: prioGPUPowerDraw,
+ Dims: module.Dims{
+ {ID: "gpu_%s_power_draw", Name: "power_draw"},
+ },
+ }
+ gpuPerformanceStateChartTmpl = module.Chart{
+ ID: "gpu_%s_performance_state",
+ Title: "Performance state",
+ Units: "state",
+ Fam: "performance state",
+ Ctx: "nvidia_smi.gpu_performance_state",
+ Priority: prioGPUPerformanceState,
+ Dims: module.Dims{
+ {ID: "gpu_%s_performance_state_P0", Name: "P0"},
+ {ID: "gpu_%s_performance_state_P1", Name: "P1"},
+ {ID: "gpu_%s_performance_state_P2", Name: "P2"},
+ {ID: "gpu_%s_performance_state_P3", Name: "P3"},
+ {ID: "gpu_%s_performance_state_P4", Name: "P4"},
+ {ID: "gpu_%s_performance_state_P5", Name: "P5"},
+ {ID: "gpu_%s_performance_state_P6", Name: "P6"},
+ {ID: "gpu_%s_performance_state_P7", Name: "P7"},
+ {ID: "gpu_%s_performance_state_P8", Name: "P8"},
+ {ID: "gpu_%s_performance_state_P9", Name: "P9"},
+ {ID: "gpu_%s_performance_state_P10", Name: "P10"},
+ {ID: "gpu_%s_performance_state_P11", Name: "P11"},
+ {ID: "gpu_%s_performance_state_P12", Name: "P12"},
+ {ID: "gpu_%s_performance_state_P13", Name: "P13"},
+ {ID: "gpu_%s_performance_state_P14", Name: "P14"},
+ {ID: "gpu_%s_performance_state_P15", Name: "P15"},
+ },
+ }
+)
+
+func (nv *NvidiaSmi) addGPUXMLCharts(gpu gpuInfo) {
+ charts := gpuXMLCharts.Copy()
+
+ if !isValidValue(gpu.Utilization.GpuUtil) {
+ _ = charts.Remove(gpuUtilizationChartTmpl.ID)
+ }
+ if !isValidValue(gpu.Utilization.MemoryUtil) {
+ _ = charts.Remove(gpuMemUtilizationChartTmpl.ID)
+ }
+ if !isValidValue(gpu.Utilization.DecoderUtil) {
+ _ = charts.Remove(gpuDecoderUtilizationChartTmpl.ID)
+ }
+ if !isValidValue(gpu.Utilization.EncoderUtil) {
+ _ = charts.Remove(gpuEncoderUtilizationChartTmpl.ID)
+ }
+ if !isValidValue(gpu.MIGMode.CurrentMIG) {
+ _ = charts.Remove(gpuMIGModeCurrentStatusChartTmpl.ID)
+ _ = charts.Remove(gpuMIGDevicesCountChartTmpl.ID)
+ }
+ if !isValidValue(gpu.FanSpeed) {
+ _ = charts.Remove(gpuFanSpeedPercChartTmpl.ID)
+ }
+ if (gpu.PowerReadings == nil || !isValidValue(gpu.PowerReadings.PowerDraw)) &&
+ (gpu.GPUPowerReadings == nil || !isValidValue(gpu.GPUPowerReadings.PowerDraw)) {
+ _ = charts.Remove(gpuPowerDrawChartTmpl.ID)
+ }
+ if !isValidValue(gpu.Voltage.GraphicsVolt) {
+ _ = charts.Remove(gpuVoltageChartTmpl.ID)
+ }
+
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, strings.ToLower(gpu.UUID))
+ c.Labels = []module.Label{
+ // csv output has no 'product_brand'
+ {Key: "uuid", Value: gpu.UUID},
+ {Key: "product_name", Value: gpu.ProductName},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, gpu.UUID)
+ }
+ }
+
+ if err := nv.Charts().Add(*charts...); err != nil {
+ nv.Warning(err)
+ }
+}
+
+var (
+ migDeviceFrameBufferMemoryUsageChartTmpl = module.Chart{
+ ID: "mig_instance_%s_gpu_%s_frame_buffer_memory_usage",
+ Title: "MIG Frame buffer memory usage",
+ Units: "B",
+ Fam: "fb mem usage",
+ Ctx: "nvidia_smi.gpu_mig_frame_buffer_memory_usage",
+ Type: module.Stacked,
+ Priority: prioGPUMIGFBMemoryUsage,
+ Dims: module.Dims{
+ {ID: "mig_instance_%s_gpu_%s_frame_buffer_memory_usage_free", Name: "free"},
+ {ID: "mig_instance_%s_gpu_%s_frame_buffer_memory_usage_used", Name: "used"},
+ {ID: "mig_instance_%s_gpu_%s_frame_buffer_memory_usage_reserved", Name: "reserved"},
+ },
+ }
+ migDeviceBAR1MemoryUsageChartTmpl = module.Chart{
+ ID: "mig_instance_%s_gpu_%s_bar1_memory_usage",
+ Title: "MIG BAR1 memory usage",
+ Units: "B",
+ Fam: "bar1 mem usage",
+ Ctx: "nvidia_smi.gpu_mig_bar1_memory_usage",
+ Type: module.Stacked,
+ Priority: prioGPUMIGBAR1MemoryUsage,
+ Dims: module.Dims{
+ {ID: "mig_instance_%s_gpu_%s_bar1_memory_usage_free", Name: "free"},
+ {ID: "mig_instance_%s_gpu_%s_bar1_memory_usage_used", Name: "used"},
+ },
+ }
+)
+
+func (nv *NvidiaSmi) addMIGDeviceCharts(gpu gpuInfo, mig gpuMIGDeviceInfo) {
+ charts := migDeviceXMLCharts.Copy()
+
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, strings.ToLower(mig.GPUInstanceID), strings.ToLower(gpu.UUID))
+ c.Labels = []module.Label{
+ {Key: "gpu_uuid", Value: gpu.UUID},
+ {Key: "gpu_product_name", Value: gpu.ProductName},
+ {Key: "gpu_instance_id", Value: mig.GPUInstanceID},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, mig.GPUInstanceID, gpu.UUID)
+ }
+ }
+
+ if err := nv.Charts().Add(*charts...); err != nil {
+ nv.Warning(err)
+ }
+}
+
+func (nv *NvidiaSmi) removeCharts(prefix string) {
+ prefix = strings.ToLower(prefix)
+
+ for _, c := range *nv.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/collect.go b/src/go/plugin/go.d/modules/nvidia_smi/collect.go
new file mode 100644
index 000000000..f621d191b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/collect.go
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvidia_smi
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func (nv *NvidiaSmi) collect() (map[string]int64, error) {
+ if nv.exec == nil {
+ return nil, errors.New("nvidia-smi exec is not initialized")
+ }
+
+ mx := make(map[string]int64)
+
+ if err := nv.collectGPUInfo(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (nv *NvidiaSmi) collectGPUInfo(mx map[string]int64) error {
+ bs, err := nv.exec.queryGPUInfo()
+ if err != nil {
+ return fmt.Errorf("error on quering XML GPU info: %v", err)
+ }
+
+ info := &gpusInfo{}
+ if err := xml.Unmarshal(bs, info); err != nil {
+ return fmt.Errorf("error on unmarshaling XML GPU info response: %v", err)
+ }
+
+ seenGPU := make(map[string]bool)
+ seenMIG := make(map[string]bool)
+
+ for _, gpu := range info.GPUs {
+ if !isValidValue(gpu.UUID) {
+ continue
+ }
+
+ px := "gpu_" + gpu.UUID + "_"
+
+ seenGPU[px] = true
+
+ if !nv.gpus[px] {
+ nv.gpus[px] = true
+ nv.addGPUXMLCharts(gpu)
+ }
+
+ addMetric(mx, px+"pcie_bandwidth_usage_rx", gpu.PCI.RxUtil, 1024) // KB => bytes
+ addMetric(mx, px+"pcie_bandwidth_usage_tx", gpu.PCI.TxUtil, 1024) // KB => bytes
+ if maxBw := calcMaxPCIEBandwidth(gpu); maxBw > 0 {
+ rx := parseFloat(gpu.PCI.RxUtil) * 1024 // KB => bytes
+ tx := parseFloat(gpu.PCI.TxUtil) * 1024 // KB => bytes
+ mx[px+"pcie_bandwidth_utilization_rx"] = int64((rx * 100 / maxBw) * 100)
+ mx[px+"pcie_bandwidth_utilization_tx"] = int64((tx * 100 / maxBw) * 100)
+ }
+ addMetric(mx, px+"fan_speed_perc", gpu.FanSpeed, 0)
+ addMetric(mx, px+"gpu_utilization", gpu.Utilization.GpuUtil, 0)
+ addMetric(mx, px+"mem_utilization", gpu.Utilization.MemoryUtil, 0)
+ addMetric(mx, px+"decoder_utilization", gpu.Utilization.DecoderUtil, 0)
+ addMetric(mx, px+"encoder_utilization", gpu.Utilization.EncoderUtil, 0)
+ addMetric(mx, px+"frame_buffer_memory_usage_free", gpu.FBMemoryUsage.Free, 1024*1024) // MiB => bytes
+ addMetric(mx, px+"frame_buffer_memory_usage_used", gpu.FBMemoryUsage.Used, 1024*1024) // MiB => bytes
+ addMetric(mx, px+"frame_buffer_memory_usage_reserved", gpu.FBMemoryUsage.Reserved, 1024*1024) // MiB => bytes
+ addMetric(mx, px+"bar1_memory_usage_free", gpu.Bar1MemoryUsage.Free, 1024*1024) // MiB => bytes
+ addMetric(mx, px+"bar1_memory_usage_used", gpu.Bar1MemoryUsage.Used, 1024*1024) // MiB => bytes
+ addMetric(mx, px+"temperature", gpu.Temperature.GpuTemp, 0)
+ addMetric(mx, px+"graphics_clock", gpu.Clocks.GraphicsClock, 0)
+ addMetric(mx, px+"video_clock", gpu.Clocks.VideoClock, 0)
+ addMetric(mx, px+"sm_clock", gpu.Clocks.SmClock, 0)
+ addMetric(mx, px+"mem_clock", gpu.Clocks.MemClock, 0)
+ if gpu.PowerReadings != nil {
+ addMetric(mx, px+"power_draw", gpu.PowerReadings.PowerDraw, 0)
+ } else if gpu.GPUPowerReadings != nil {
+ addMetric(mx, px+"power_draw", gpu.GPUPowerReadings.PowerDraw, 0)
+ }
+ addMetric(mx, px+"voltage", gpu.Voltage.GraphicsVolt, 0)
+ for i := 0; i < 16; i++ {
+ s := "P" + strconv.Itoa(i)
+ mx[px+"performance_state_"+s] = boolToInt(gpu.PerformanceState == s)
+ }
+ if isValidValue(gpu.MIGMode.CurrentMIG) {
+ mode := strings.ToLower(gpu.MIGMode.CurrentMIG)
+ mx[px+"mig_current_mode_enabled"] = boolToInt(mode == "enabled")
+ mx[px+"mig_current_mode_disabled"] = boolToInt(mode == "disabled")
+ mx[px+"mig_devices_count"] = int64(len(gpu.MIGDevices.MIGDevice))
+ }
+
+ for _, mig := range gpu.MIGDevices.MIGDevice {
+ if !isValidValue(mig.GPUInstanceID) {
+ continue
+ }
+
+ px := "mig_instance_" + mig.GPUInstanceID + "_" + px
+
+ seenMIG[px] = true
+
+ if !nv.migs[px] {
+ nv.migs[px] = true
+ nv.addMIGDeviceCharts(gpu, mig)
+ }
+
+ addMetric(mx, px+"ecc_error_sram_uncorrectable", mig.ECCErrorCount.VolatileCount.SRAMUncorrectable, 0)
+ addMetric(mx, px+"frame_buffer_memory_usage_free", mig.FBMemoryUsage.Free, 1024*1024) // MiB => bytes
+ addMetric(mx, px+"frame_buffer_memory_usage_used", mig.FBMemoryUsage.Used, 1024*1024) // MiB => bytes
+ addMetric(mx, px+"frame_buffer_memory_usage_reserved", mig.FBMemoryUsage.Reserved, 1024*1024) // MiB => bytes
+ addMetric(mx, px+"bar1_memory_usage_free", mig.BAR1MemoryUsage.Free, 1024*1024) // MiB => bytes
+ addMetric(mx, px+"bar1_memory_usage_used", mig.BAR1MemoryUsage.Used, 1024*1024) // MiB => bytes
+ }
+ }
+
+ for px := range nv.gpus {
+ if !seenGPU[px] {
+ delete(nv.gpus, px)
+ nv.removeCharts(px)
+ }
+ }
+
+ for px := range nv.migs {
+ if !seenMIG[px] {
+ delete(nv.migs, px)
+ nv.removeCharts(px)
+ }
+ }
+
+ return nil
+}
+
+func calcMaxPCIEBandwidth(gpu gpuInfo) float64 {
+ gen := gpu.PCI.PCIGPULinkInfo.PCIEGen.MaxLinkGen
+ width := strings.TrimSuffix(gpu.PCI.PCIGPULinkInfo.LinkWidths.MaxLinkWidth, "x")
+
+ if !isValidValue(gen) || !isValidValue(width) {
+ return 0
+ }
+
+ // https://enterprise-support.nvidia.com/s/article/understanding-pcie-configuration-for-maximum-performance
+ var speed, enc float64
+ switch gen {
+ case "1":
+ speed, enc = 2.5, 1.0/5.0
+ case "2":
+ speed, enc = 5, 1.0/5.0
+ case "3":
+ speed, enc = 8, 2.0/130.0
+ case "4":
+ speed, enc = 16, 2.0/130.0
+ case "5":
+ speed, enc = 32, 2.0/130.0
+ default:
+ return 0
+ }
+
+ // Maximum PCIe Bandwidth = SPEED * WIDTH * (1 - ENCODING) - 1Gb/s
+ return (speed*parseFloat(width)*(1-enc) - 1) * 1e9 / 8 // Gb/s => bytes
+}
+
+func addMetric(mx map[string]int64, key, value string, mul int) {
+ if !isValidValue(value) {
+ return
+ }
+
+ value = removeUnits(value)
+
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return
+ }
+
+ if mul > 0 {
+ v *= float64(mul)
+ }
+
+ mx[key] = int64(v)
+}
+
+func isValidValue(v string) bool {
+ return v != "" && v != "N/A" && v != "[N/A]"
+}
+
+func parseFloat(s string) float64 {
+ v, _ := strconv.ParseFloat(removeUnits(s), 64)
+ return v
+}
+
+func removeUnits(s string) string {
+ if i := strings.IndexByte(s, ' '); i != -1 {
+ s = s[:i]
+ }
+ return s
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/config_schema.json b/src/go/plugin/go.d/modules/nvidia_smi/config_schema.json
new file mode 100644
index 000000000..3f93badc2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/config_schema.json
@@ -0,0 +1,56 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NVIDIA SMI collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "binary_path": {
+ "title": "Binary path",
+ "description": "Path to the `nvidia-smi` binary.",
+ "type": "string",
+ "default": "nvidia-smi"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 10
+ },
+ "loop_mode": {
+ "title": "Loop Mode",
+ "description": "When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option.",
+ "type": "boolean",
+ "default": true
+ }
+ },
+ "required": [
+ "binary_path"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "binary_path": {
+ "ui:help": "If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "loop_mode": {
+ "ui:help": "In loop mode, `nvidia-smi` will repeatedly query GPU data at specified intervals, defined by the `-l SEC` or `--loop=SEC` parameter, rather than just running the query once. This enables ongoing performance tracking by putting the application to sleep between queries."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/exec.go b/src/go/plugin/go.d/modules/nvidia_smi/exec.go
new file mode 100644
index 000000000..11a26131f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/exec.go
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvidia_smi
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "os/exec"
+ "strconv"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+type nvidiaSmiBinary interface {
+ queryGPUInfo() ([]byte, error)
+ stop() error
+}
+
+func newNvidiaSmiBinary(path string, cfg Config, log *logger.Logger) (nvidiaSmiBinary, error) {
+ if !cfg.LoopMode {
+ return &nvidiaSmiExec{
+ Logger: log,
+ binPath: path,
+ timeout: cfg.Timeout.Duration(),
+ }, nil
+ }
+
+ smi := &nvidiaSmiLoopExec{
+ Logger: log,
+ binPath: path,
+ updateEvery: cfg.UpdateEvery,
+ firstSampleTimeout: time.Second * 3,
+ }
+
+ if err := smi.run(); err != nil {
+ return nil, err
+ }
+
+ return smi, nil
+}
+
+type nvidiaSmiExec struct {
+ *logger.Logger
+
+ binPath string
+ timeout time.Duration
+}
+
+func (e *nvidiaSmiExec) queryGPUInfo() ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.binPath, "-q", "-x")
+
+ e.Debugf("executing '%s'", cmd)
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
+
+func (e *nvidiaSmiExec) stop() error { return nil }
+
+type nvidiaSmiLoopExec struct {
+ *logger.Logger
+
+ binPath string
+
+ updateEvery int
+ firstSampleTimeout time.Duration
+
+ cmd *exec.Cmd
+ done chan struct{}
+
+ mux sync.Mutex
+ lastSample string
+}
+
+func (e *nvidiaSmiLoopExec) queryGPUInfo() ([]byte, error) {
+ select {
+ case <-e.done:
+ return nil, errors.New("process has already exited")
+ default:
+ }
+
+ e.mux.Lock()
+ defer e.mux.Unlock()
+
+ return []byte(e.lastSample), nil
+}
+
+func (e *nvidiaSmiLoopExec) run() error {
+ secs := 5
+ if e.updateEvery < secs {
+ secs = e.updateEvery
+ }
+
+ cmd := exec.Command(e.binPath, "-q", "-x", "-l", strconv.Itoa(secs))
+
+ e.Debugf("executing '%s'", cmd)
+
+ r, err := cmd.StdoutPipe()
+ if err != nil {
+ return err
+ }
+
+ if err := cmd.Start(); err != nil {
+ return err
+ }
+
+ firstSample := make(chan struct{}, 1)
+ done := make(chan struct{})
+ e.cmd = cmd
+ e.done = done
+
+ go func() {
+ defer close(done)
+
+ var buf bytes.Buffer
+ var insideLog bool
+ var emptyRows int64
+ var outsideLogRows int64
+
+ const unexpectedRowsLimit = 500
+
+ sc := bufio.NewScanner(r)
+
+ for sc.Scan() {
+ line := sc.Text()
+
+ if !insideLog {
+ outsideLogRows++
+ } else {
+ outsideLogRows = 0
+ }
+
+ if line == "" {
+ emptyRows++
+ } else {
+ emptyRows = 0
+ }
+
+ if outsideLogRows >= unexpectedRowsLimit || emptyRows >= unexpectedRowsLimit {
+ e.Errorf("unexpected output from nvidia-smi loop: outside log rows %d, empty rows %d", outsideLogRows, emptyRows)
+ break
+ }
+
+ switch {
+ case line == "<nvidia_smi_log>":
+ insideLog = true
+ buf.Reset()
+
+ buf.WriteString(line)
+ buf.WriteByte('\n')
+ case line == "</nvidia_smi_log>":
+ insideLog = false
+
+ buf.WriteString(line)
+
+ e.mux.Lock()
+ e.lastSample = buf.String()
+ e.mux.Unlock()
+
+ buf.Reset()
+
+ select {
+ case firstSample <- struct{}{}:
+ default:
+ }
+ case insideLog:
+ buf.WriteString(line)
+ buf.WriteByte('\n')
+ default:
+ continue
+ }
+ }
+ }()
+
+ select {
+ case <-e.done:
+ _ = e.stop()
+ return errors.New("process exited before the first sample was collected")
+ case <-time.After(e.firstSampleTimeout):
+ _ = e.stop()
+ return errors.New("timed out waiting for first sample")
+ case <-firstSample:
+ return nil
+ }
+}
+
+func (e *nvidiaSmiLoopExec) stop() error {
+ if e.cmd == nil || e.cmd.Process == nil {
+ return nil
+ }
+
+ _ = e.cmd.Process.Kill()
+ _ = e.cmd.Wait()
+ e.cmd = nil
+
+ select {
+ case <-e.done:
+ return nil
+ case <-time.After(time.Second * 2):
+ return errors.New("timed out waiting for process to exit")
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/gpu_info.go b/src/go/plugin/go.d/modules/nvidia_smi/gpu_info.go
new file mode 100644
index 000000000..506d36f6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/gpu_info.go
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvidia_smi
+
+type gpusInfo struct {
+ GPUs []gpuInfo `xml:"gpu"`
+}
+
+type (
+ gpuInfo struct {
+ ID string `xml:"id,attr"`
+ ProductName string `xml:"product_name"`
+ ProductBrand string `xml:"product_brand"`
+ ProductArchitecture string `xml:"product_architecture"`
+ UUID string `xml:"uuid"`
+ FanSpeed string `xml:"fan_speed"`
+ PerformanceState string `xml:"performance_state"`
+ MIGMode struct {
+ CurrentMIG string `xml:"current_mig"`
+ } `xml:"mig_mode"`
+ MIGDevices struct {
+ MIGDevice []gpuMIGDeviceInfo `xml:"mig_device"`
+ } `xml:"mig_devices"`
+ PCI struct {
+ TxUtil string `xml:"tx_util"`
+ RxUtil string `xml:"rx_util"`
+ PCIGPULinkInfo struct {
+ PCIEGen struct {
+ MaxLinkGen string `xml:"max_link_gen"`
+ } `xml:"pcie_gen"`
+ LinkWidths struct {
+ MaxLinkWidth string `xml:"max_link_width"`
+ } `xml:"link_widths"`
+ } `xml:"pci_gpu_link_info"`
+ } `xml:"pci"`
+ Utilization struct {
+ GpuUtil string `xml:"gpu_util"`
+ MemoryUtil string `xml:"memory_util"`
+ EncoderUtil string `xml:"encoder_util"`
+ DecoderUtil string `xml:"decoder_util"`
+ } `xml:"utilization"`
+ FBMemoryUsage struct {
+ Total string `xml:"total"`
+ Reserved string `xml:"reserved"`
+ Used string `xml:"used"`
+ Free string `xml:"free"`
+ } `xml:"fb_memory_usage"`
+ Bar1MemoryUsage struct {
+ Total string `xml:"total"`
+ Used string `xml:"used"`
+ Free string `xml:"free"`
+ } `xml:"bar1_memory_usage"`
+ Temperature struct {
+ GpuTemp string `xml:"gpu_temp"`
+ GpuTempMaxThreshold string `xml:"gpu_temp_max_threshold"`
+ GpuTempSlowThreshold string `xml:"gpu_temp_slow_threshold"`
+ GpuTempMaxGpuThreshold string `xml:"gpu_temp_max_gpu_threshold"`
+ GpuTargetTemperature string `xml:"gpu_target_temperature"`
+ MemoryTemp string `xml:"memory_temp"`
+ GpuTempMaxMemThreshold string `xml:"gpu_temp_max_mem_threshold"`
+ } `xml:"temperature"`
+ Clocks struct {
+ GraphicsClock string `xml:"graphics_clock"`
+ SmClock string `xml:"sm_clock"`
+ MemClock string `xml:"mem_clock"`
+ VideoClock string `xml:"video_clock"`
+ } `xml:"clocks"`
+ PowerReadings *gpuPowerReadings `xml:"power_readings"`
+ GPUPowerReadings *gpuPowerReadings `xml:"gpu_power_readings"`
+ Voltage struct {
+ GraphicsVolt string `xml:"graphics_volt"`
+ } `xml:"voltage"`
+ Processes struct {
+ ProcessInfo []struct {
+ PID string `xml:"pid"`
+ ProcessName string `xml:"process_name"`
+ UsedMemory string `xml:"used_memory"`
+ } `sml:"process_info"`
+ } `xml:"processes"`
+ }
+ gpuPowerReadings struct {
+ //PowerState string `xml:"power_state"`
+ //PowerManagement string `xml:"power_management"`
+ PowerDraw string `xml:"power_draw"`
+ //PowerLimit string `xml:"power_limit"`
+ //DefaultPowerLimit string `xml:"default_power_limit"`
+ //EnforcedPowerLimit string `xml:"enforced_power_limit"`
+ //MinPowerLimit string `xml:"min_power_limit"`
+ //MaxPowerLimit string `xml:"max_power_limit"`
+ }
+
+ gpuMIGDeviceInfo struct {
+ Index string `xml:"index"`
+ GPUInstanceID string `xml:"gpu_instance_id"`
+ ComputeInstanceID string `xml:"compute_instance_id"`
+ DeviceAttributes struct {
+ Shared struct {
+ MultiprocessorCount string `xml:"multiprocessor_count"`
+ CopyEngineCount string `xml:"copy_engine_count"`
+ EncoderCount string `xml:"encoder_count"`
+ DecoderCount string `xml:"decoder_count"`
+ OFACount string `xml:"ofa_count"`
+ JPGCount string `xml:"jpg_count"`
+ } `xml:"shared"`
+ } `xml:"device_attributes"`
+ ECCErrorCount struct {
+ VolatileCount struct {
+ SRAMUncorrectable string `xml:"sram_uncorrectable"`
+ } `xml:"volatile_count"`
+ } `xml:"ecc_error_count"`
+ FBMemoryUsage struct {
+ Free string `xml:"free"`
+ Used string `xml:"used"`
+ Reserved string `xml:"reserved"`
+ } `xml:"fb_memory_usage"`
+ BAR1MemoryUsage struct {
+ Free string `xml:"free"`
+ Used string `xml:"used"`
+ } `xml:"bar1_memory_usage"`
+ }
+)
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/init.go b/src/go/plugin/go.d/modules/nvidia_smi/init.go
new file mode 100644
index 000000000..c13b2fffd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/init.go
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvidia_smi
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+)
+
+func (nv *NvidiaSmi) initNvidiaSmiExec() (nvidiaSmiBinary, error) {
+ binPath := nv.BinaryPath
+ if _, err := os.Stat(binPath); os.IsNotExist(err) {
+ path, err := exec.LookPath(nv.binName)
+ if err != nil {
+ return nil, fmt.Errorf("error on lookup '%s': %v", nv.binName, err)
+ }
+ binPath = path
+ }
+
+ return newNvidiaSmiBinary(binPath, nv.Config, nv.Logger)
+}
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md b/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md
new file mode 100644
index 000000000..620c09639
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/integrations/nvidia_gpu.md
@@ -0,0 +1,232 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nvidia_smi/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nvidia_smi/metadata.yaml"
+sidebar_label: "Nvidia GPU"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Nvidia GPU
+
+
+<img src="https://netdata.cloud/img/nvidia.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: nvidia_smi
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors GPUs performance metrics using
+the [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per gpu
+
+These metrics refer to the GPU.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| uuid | GPU id (e.g. 00000000:00:04.0) |
+| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nvidia_smi.gpu_pcie_bandwidth_usage | rx, tx | B/s |
+| nvidia_smi.gpu_pcie_bandwidth_utilization | rx, tx | % |
+| nvidia_smi.gpu_fan_speed_perc | fan_speed | % |
+| nvidia_smi.gpu_utilization | gpu | % |
+| nvidia_smi.gpu_memory_utilization | memory | % |
+| nvidia_smi.gpu_decoder_utilization | decoder | % |
+| nvidia_smi.gpu_encoder_utilization | encoder | % |
+| nvidia_smi.gpu_frame_buffer_memory_usage | free, used, reserved | B |
+| nvidia_smi.gpu_bar1_memory_usage | free, used | B |
+| nvidia_smi.gpu_temperature | temperature | Celsius |
+| nvidia_smi.gpu_voltage | voltage | V |
+| nvidia_smi.gpu_clock_freq | graphics, video, sm, mem | MHz |
+| nvidia_smi.gpu_power_draw | power_draw | Watts |
+| nvidia_smi.gpu_performance_state | P0-P15 | state |
+| nvidia_smi.gpu_mig_mode_current_status | enabled, disabled | status |
+| nvidia_smi.gpu_mig_devices_count | mig | devices |
+
+### Per mig
+
+These metrics refer to the Multi-Instance GPU (MIG).
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| uuid | GPU id (e.g. 00000000:00:04.0) |
+| product_name | GPU product name (e.g. NVIDIA A100-SXM4-40GB) |
+| gpu_instance_id | GPU instance id (e.g. 1) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nvidia_smi.gpu_mig_frame_buffer_memory_usage | free, used, reserved | B |
+| nvidia_smi.gpu_mig_bar1_memory_usage | free, used | B |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/nvidia_smi.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/nvidia_smi.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| binary_path | Path to nvidia_smi binary. The default is "nvidia_smi" and the executable is looked for in the directories specified in the PATH environment variable. | nvidia_smi | no |
+| timeout | nvidia_smi binary execution timeout. | 2 | no |
+| loop_mode | When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option. | yes | no |
+
+</details>
+
+#### Examples
+
+##### Custom binary path
+
+The executable is not in the directories specified in the PATH environment variable.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: nvidia_smi
+ binary_path: /usr/local/sbin/nvidia_smi
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `nvidia_smi` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m nvidia_smi
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `nvidia_smi` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep nvidia_smi
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep nvidia_smi /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep nvidia_smi
+```
+
+
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/metadata.yaml b/src/go/plugin/go.d/modules/nvidia_smi/metadata.yaml
new file mode 100644
index 000000000..2a79b5ac1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/metadata.yaml
@@ -0,0 +1,234 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-nvidia_smi
+ plugin_name: go.d.plugin
+ module_name: nvidia_smi
+ monitored_instance:
+ name: Nvidia GPU
+ link: https://www.nvidia.com/en-us/
+ icon_filename: nvidia.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords:
+ - nvidia
+ - gpu
+ - hardware
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors GPUs performance metrics using
+ the [nvidia-smi](https://developer.nvidia.com/nvidia-system-management-interface) CLI tool.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/nvidia_smi.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: binary_path
+ description: Path to nvidia_smi binary. The default is "nvidia_smi" and the executable is looked for in the directories specified in the PATH environment variable.
+ default_value: nvidia_smi
+ required: false
+ - name: timeout
+ description: nvidia_smi binary execution timeout.
+ default_value: 2
+ required: false
+ - name: loop_mode
+ description: "When enabled, `nvidia-smi` is executed continuously in a separate thread using the `-l` option."
+ default_value: true
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom binary path
+ description: The executable is not in the directories specified in the PATH environment variable.
+ config: |
+ jobs:
+ - name: nvidia_smi
+ binary_path: /usr/local/sbin/nvidia_smi
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: gpu
+ description: These metrics refer to the GPU.
+ labels:
+ - name: uuid
+ description: GPU id (e.g. 00000000:00:04.0)
+ - name: product_name
+ description: GPU product name (e.g. NVIDIA A100-SXM4-40GB)
+ metrics:
+ - name: nvidia_smi.gpu_pcie_bandwidth_usage
+ description: PCI Express Bandwidth Usage
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: rx
+ - name: tx
+ - name: nvidia_smi.gpu_pcie_bandwidth_utilization
+ description: PCI Express Bandwidth Utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: rx
+ - name: tx
+ - name: nvidia_smi.gpu_fan_speed_perc
+ description: Fan speed
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: fan_speed
+ - name: nvidia_smi.gpu_utilization
+ description: GPU utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: gpu
+ - name: nvidia_smi.gpu_memory_utilization
+ description: Memory utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: nvidia_smi.gpu_decoder_utilization
+ description: Decoder utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: decoder
+ - name: nvidia_smi.gpu_encoder_utilization
+ description: Encoder utilization
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: encoder
+ - name: nvidia_smi.gpu_frame_buffer_memory_usage
+ description: Frame buffer memory usage
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: reserved
+ - name: nvidia_smi.gpu_bar1_memory_usage
+ description: BAR1 memory usage
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: nvidia_smi.gpu_temperature
+ description: Temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: nvidia_smi.gpu_voltage
+ description: Voltage
+ unit: V
+ chart_type: line
+ dimensions:
+ - name: voltage
+ - name: nvidia_smi.gpu_clock_freq
+ description: Clock current frequency
+ unit: MHz
+ chart_type: line
+ dimensions:
+ - name: graphics
+ - name: video
+ - name: sm
+ - name: mem
+ - name: nvidia_smi.gpu_power_draw
+ description: Power draw
+ unit: Watts
+ chart_type: line
+ dimensions:
+ - name: power_draw
+ - name: nvidia_smi.gpu_performance_state
+ description: Performance state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: P0-P15
+ - name: nvidia_smi.gpu_mig_mode_current_status
+ description: MIG current mode
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: enabled
+ - name: disabled
+ - name: nvidia_smi.gpu_mig_devices_count
+ description: MIG devices
+ unit: devices
+ chart_type: line
+ dimensions:
+ - name: mig
+ - name: mig
+ description: These metrics refer to the Multi-Instance GPU (MIG).
+ labels:
+ - name: uuid
+ description: GPU id (e.g. 00000000:00:04.0)
+ - name: product_name
+ description: GPU product name (e.g. NVIDIA A100-SXM4-40GB)
+ - name: gpu_instance_id
+ description: GPU instance id (e.g. 1)
+ metrics:
+ - name: nvidia_smi.gpu_mig_frame_buffer_memory_usage
+ description: Frame buffer memory usage
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: reserved
+ - name: nvidia_smi.gpu_mig_bar1_memory_usage
+ description: BAR1 memory usage
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi.go b/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi.go
new file mode 100644
index 000000000..3f89df05a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvidia_smi
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("nvidia_smi", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *NvidiaSmi {
+ return &NvidiaSmi{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 10),
+ LoopMode: true,
+ },
+ binName: "nvidia-smi",
+ charts: &module.Charts{},
+ gpus: make(map[string]bool),
+ migs: make(map[string]bool),
+ }
+
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ BinaryPath string `yaml:"binary_path" json:"binary_path"`
+ LoopMode bool `yaml:"loop_mode,omitempty" json:"loop_mode"`
+}
+
+type NvidiaSmi struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec nvidiaSmiBinary
+ binName string
+
+ gpus map[string]bool
+ migs map[string]bool
+}
+
+func (nv *NvidiaSmi) Configuration() any {
+ return nv.Config
+}
+
+func (nv *NvidiaSmi) Init() error {
+ if nv.exec == nil {
+ smi, err := nv.initNvidiaSmiExec()
+ if err != nil {
+ nv.Error(err)
+ return err
+ }
+ nv.exec = smi
+ }
+
+ return nil
+}
+
+func (nv *NvidiaSmi) Check() error {
+ mx, err := nv.collect()
+ if err != nil {
+ nv.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (nv *NvidiaSmi) Charts() *module.Charts {
+ return nv.charts
+}
+
+func (nv *NvidiaSmi) Collect() map[string]int64 {
+ mx, err := nv.collect()
+ if err != nil {
+ nv.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (nv *NvidiaSmi) Cleanup() {
+ if nv.exec != nil {
+ if err := nv.exec.stop(); err != nil {
+ nv.Errorf("cleanup: %v", err)
+ }
+ nv.exec = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi_test.go b/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi_test.go
new file mode 100644
index 000000000..d2070b069
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/nvidia_smi_test.go
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvidia_smi
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataXMLRTX2080Win, _ = os.ReadFile("testdata/rtx-2080-win.xml")
+ dataXMLRTX4090Driver535, _ = os.ReadFile("testdata/rtx-4090-driver-535.xml")
+ dataXMLRTX3060, _ = os.ReadFile("testdata/rtx-3060.xml")
+ dataXMLTeslaP100, _ = os.ReadFile("testdata/tesla-p100.xml")
+
+ dataXMLA100SXM4MIG, _ = os.ReadFile("testdata/a100-sxm4-mig.xml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataXMLRTX2080Win": dataXMLRTX2080Win,
+ "dataXMLRTX4090Driver535": dataXMLRTX4090Driver535,
+ "dataXMLRTX3060": dataXMLRTX3060,
+ "dataXMLTeslaP100": dataXMLTeslaP100,
+ "dataXMLA100SXM4MIG": dataXMLA100SXM4MIG,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestNvidiaSmi_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NvidiaSmi{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNvidiaSmi_Init(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(nv *NvidiaSmi)
+ wantFail bool
+ }{
+ "fails if can't local nvidia-smi": {
+ wantFail: true,
+ prepare: func(nv *NvidiaSmi) {
+ nv.binName += "!!!"
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nv := New()
+
+ test.prepare(nv)
+
+ if test.wantFail {
+ assert.Error(t, nv.Init())
+ } else {
+ assert.NoError(t, nv.Init())
+ }
+ })
+ }
+}
+
+func TestNvidiaSmi_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestNvidiaSmi_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(nv *NvidiaSmi)
+ wantFail bool
+ }{
+ "success A100-SXM4 MIG": {
+ wantFail: false,
+ prepare: prepareCaseMIGA100,
+ },
+ "success RTX 3060": {
+ wantFail: false,
+ prepare: prepareCaseRTX3060,
+ },
+ "success Tesla P100": {
+ wantFail: false,
+ prepare: prepareCaseTeslaP100,
+ },
+ "success RTX 2080 Win": {
+ wantFail: false,
+ prepare: prepareCaseRTX2080Win,
+ },
+ "fail on queryGPUInfo error": {
+ wantFail: true,
+ prepare: prepareCaseErrOnQueryGPUInfo,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nv := New()
+
+ test.prepare(nv)
+
+ if test.wantFail {
+ assert.Error(t, nv.Check())
+ } else {
+ assert.NoError(t, nv.Check())
+ }
+ })
+ }
+}
+
+func TestNvidiaSmi_Collect(t *testing.T) {
+ type testCaseStep struct {
+ prepare func(nv *NvidiaSmi)
+ check func(t *testing.T, nv *NvidiaSmi)
+ }
+ tests := map[string][]testCaseStep{
+ "success A100-SXM4 MIG": {
+ {
+ prepare: prepareCaseMIGA100,
+ check: func(t *testing.T, nv *NvidiaSmi) {
+ mx := nv.Collect()
+
+ expected := map[string]int64{
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_free": 68718428160,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_used": 1048576,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_free": 42273341440,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_reserved": 634388480,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_used": 39845888,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_graphics_clock": 1410,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_mem_clock": 1215,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_mig_current_mode_disabled": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_mig_current_mode_enabled": 1,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_mig_devices_count": 2,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_pcie_bandwidth_usage_rx": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_pcie_bandwidth_usage_tx": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_pcie_bandwidth_utilization_rx": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_pcie_bandwidth_utilization_tx": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P0": 1,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P1": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P10": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P11": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P12": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P13": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P14": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P15": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P2": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P3": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P4": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P5": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P6": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P7": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P8": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_performance_state_P9": 0,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_power_draw": 66,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_sm_clock": 1410,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_temperature": 36,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_video_clock": 1275,
+ "gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_voltage": 881,
+ "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_free": 34358689792,
+ "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_used": 0,
+ "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_ecc_error_sram_uncorrectable": 0,
+ "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_free": 20916994048,
+ "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_reserved": 0,
+ "mig_instance_1_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_used": 19922944,
+ "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_free": 34358689792,
+ "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_bar1_memory_usage_used": 0,
+ "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_ecc_error_sram_uncorrectable": 0,
+ "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_free": 20916994048,
+ "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_reserved": 0,
+ "mig_instance_2_gpu_GPU-27b94a00-ed54-5c24-b1fd-1054085de32a_frame_buffer_memory_usage_used": 19922944,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "success RTX 4090 Driver 535": {
+ {
+ prepare: prepareCaseRTX4090Driver535,
+ check: func(t *testing.T, nv *NvidiaSmi) {
+ mx := nv.Collect()
+
+ expected := map[string]int64{
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_bar1_memory_usage_free": 267386880,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_bar1_memory_usage_used": 1048576,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_decoder_utilization": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_encoder_utilization": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_fan_speed_perc": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_frame_buffer_memory_usage_free": 25390219264,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_frame_buffer_memory_usage_reserved": 362807296,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_frame_buffer_memory_usage_used": 2097152,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_gpu_utilization": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_graphics_clock": 210,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_mem_clock": 405,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_mem_utilization": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_pcie_bandwidth_usage_rx": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_pcie_bandwidth_usage_tx": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_pcie_bandwidth_utilization_rx": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_pcie_bandwidth_utilization_tx": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P0": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P1": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P10": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P11": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P12": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P13": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P14": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P15": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P2": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P3": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P4": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P5": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P6": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P7": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P8": 1,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_performance_state_P9": 0,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_power_draw": 26,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_sm_clock": 210,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_temperature": 40,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_video_clock": 1185,
+ "gpu_GPU-71d1acc2-662d-2166-bf9f-65272d2fc437_voltage": 880,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "success RTX 3060": {
+ {
+ prepare: prepareCaseRTX3060,
+ check: func(t *testing.T, nv *NvidiaSmi) {
+ mx := nv.Collect()
+
+ expected := map[string]int64{
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_bar1_memory_usage_free": 8586788864,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_bar1_memory_usage_used": 3145728,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_decoder_utilization": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_encoder_utilization": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_frame_buffer_memory_usage_free": 6228541440,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_frame_buffer_memory_usage_reserved": 206569472,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_frame_buffer_memory_usage_used": 5242880,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_gpu_utilization": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_graphics_clock": 210,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_mem_clock": 405,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_mem_utilization": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_pcie_bandwidth_usage_rx": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_pcie_bandwidth_usage_tx": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_pcie_bandwidth_utilization_rx": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_pcie_bandwidth_utilization_tx": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P0": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P1": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P10": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P11": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P12": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P13": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P14": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P15": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P2": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P3": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P4": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P5": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P6": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P7": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P8": 1,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_performance_state_P9": 0,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_power_draw": 8,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_sm_clock": 210,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_temperature": 45,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_video_clock": 555,
+ "gpu_GPU-473d8d0f-d462-185c-6b36-6fc23e23e571_voltage": 631,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "success Tesla P100": {
+ {
+ prepare: prepareCaseTeslaP100,
+ check: func(t *testing.T, nv *NvidiaSmi) {
+ mx := nv.Collect()
+
+ expected := map[string]int64{
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_bar1_memory_usage_free": 17177772032,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_bar1_memory_usage_used": 2097152,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_decoder_utilization": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_encoder_utilization": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_frame_buffer_memory_usage_free": 17070817280,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_frame_buffer_memory_usage_reserved": 108003328,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_frame_buffer_memory_usage_used": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_gpu_utilization": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_graphics_clock": 405,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_mem_clock": 715,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_mem_utilization": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_pcie_bandwidth_usage_rx": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_pcie_bandwidth_usage_tx": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_pcie_bandwidth_utilization_rx": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_pcie_bandwidth_utilization_tx": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P0": 1,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P1": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P10": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P11": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P12": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P13": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P14": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P15": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P2": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P3": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P4": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P5": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P6": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P7": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P8": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_performance_state_P9": 0,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_power_draw": 26,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_sm_clock": 405,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_temperature": 38,
+ "gpu_GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e_video_clock": 835,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "success RTX 2080 Win": {
+ {
+ prepare: prepareCaseRTX2080Win,
+ check: func(t *testing.T, nv *NvidiaSmi) {
+ mx := nv.Collect()
+
+ expected := map[string]int64{
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_bar1_memory_usage_free": 266338304,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_bar1_memory_usage_used": 2097152,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_decoder_utilization": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_encoder_utilization": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_fan_speed_perc": 37,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_frame_buffer_memory_usage_free": 7494172672,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_frame_buffer_memory_usage_reserved": 190840832,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_frame_buffer_memory_usage_used": 903872512,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_gpu_utilization": 2,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_graphics_clock": 193,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_mem_clock": 403,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_mem_utilization": 7,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_pcie_bandwidth_usage_rx": 93184000,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_pcie_bandwidth_usage_tx": 13312000,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_pcie_bandwidth_utilization_rx": 59,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_pcie_bandwidth_utilization_tx": 8,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P0": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P1": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P10": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P11": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P12": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P13": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P14": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P15": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P2": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P3": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P4": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P5": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P6": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P7": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P8": 1,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_performance_state_P9": 0,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_power_draw": 14,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_sm_clock": 193,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_temperature": 29,
+ "gpu_GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3_video_clock": 539,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "fails on queryGPUInfo error": {
+ {
+ prepare: prepareCaseErrOnQueryGPUInfo,
+ check: func(t *testing.T, nv *NvidiaSmi) {
+ mx := nv.Collect()
+
+ assert.Equal(t, map[string]int64(nil), mx)
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nv := New()
+
+ for i, step := range test {
+ t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
+ step.prepare(nv)
+ step.check(t, nv)
+ })
+ }
+ })
+ }
+}
+
+type mockNvidiaSmi struct {
+ gpuInfo []byte
+ errOnQueryGPUInfo bool
+}
+
+func (m *mockNvidiaSmi) queryGPUInfo() ([]byte, error) {
+ if m.errOnQueryGPUInfo {
+ return nil, errors.New("error on mock.queryGPUInfo()")
+ }
+ return m.gpuInfo, nil
+}
+
+func (m *mockNvidiaSmi) stop() error {
+ return nil
+}
+
+func prepareCaseMIGA100(nv *NvidiaSmi) {
+ nv.exec = &mockNvidiaSmi{gpuInfo: dataXMLA100SXM4MIG}
+}
+
+func prepareCaseRTX3060(nv *NvidiaSmi) {
+ nv.exec = &mockNvidiaSmi{gpuInfo: dataXMLRTX3060}
+}
+
+func prepareCaseRTX4090Driver535(nv *NvidiaSmi) {
+ nv.exec = &mockNvidiaSmi{gpuInfo: dataXMLRTX4090Driver535}
+}
+
+func prepareCaseTeslaP100(nv *NvidiaSmi) {
+ nv.exec = &mockNvidiaSmi{gpuInfo: dataXMLTeslaP100}
+}
+
+func prepareCaseRTX2080Win(nv *NvidiaSmi) {
+ nv.exec = &mockNvidiaSmi{gpuInfo: dataXMLRTX2080Win}
+}
+
+func prepareCaseErrOnQueryGPUInfo(nv *NvidiaSmi) {
+ nv.exec = &mockNvidiaSmi{errOnQueryGPUInfo: true}
+}
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/testdata/a100-sxm4-mig.xml b/src/go/plugin/go.d/modules/nvidia_smi/testdata/a100-sxm4-mig.xml
new file mode 100644
index 000000000..74146ac78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/testdata/a100-sxm4-mig.xml
@@ -0,0 +1,359 @@
+<?xml version="1.0" ?>
+<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v11.dtd">
+<nvidia_smi_log>
+ <timestamp>Fri Jan 27 11:32:31 2023</timestamp>
+ <driver_version>510.47.03</driver_version>
+ <cuda_version>11.6</cuda_version>
+ <attached_gpus>1</attached_gpus>
+ <gpu id="00000000:00:04.0">
+ <product_name>NVIDIA A100-SXM4-40GB</product_name>
+ <product_brand>NVIDIA</product_brand>
+ <product_architecture>Ampere</product_architecture>
+ <display_mode>Enabled</display_mode>
+ <display_active>Disabled</display_active>
+ <persistence_mode>Disabled</persistence_mode>
+ <mig_mode>
+ <current_mig>Enabled</current_mig>
+ <pending_mig>Enabled</pending_mig>
+ </mig_mode>
+ <mig_devices>
+ <mig_device>
+ <index>0</index>
+ <gpu_instance_id>1</gpu_instance_id>
+ <compute_instance_id>0</compute_instance_id>
+ <device_attributes>
+ <shared>
+ <multiprocessor_count>42</multiprocessor_count>
+ <copy_engine_count>3</copy_engine_count>
+ <encoder_count>0</encoder_count>
+ <decoder_count>2</decoder_count>
+ <ofa_count>0</ofa_count>
+ <jpg_count>0</jpg_count>
+ </shared>
+ </device_attributes>
+ <ecc_error_count>
+ <volatile_count>
+ <sram_uncorrectable>0</sram_uncorrectable>
+ </volatile_count>
+ </ecc_error_count>
+ <fb_memory_usage>
+ <total>19968 MiB</total>
+ <reserved>0 MiB</reserved>
+ <used>19 MiB</used>
+ <free>19948 MiB</free>
+ </fb_memory_usage>
+ <bar1_memory_usage>
+ <total>32767 MiB</total>
+ <used>0 MiB</used>
+ <free>32767 MiB</free>
+ </bar1_memory_usage>
+ </mig_device>
+ <mig_device>
+ <index>1</index>
+ <gpu_instance_id>2</gpu_instance_id>
+ <compute_instance_id>0</compute_instance_id>
+ <device_attributes>
+ <shared>
+ <multiprocessor_count>42</multiprocessor_count>
+ <copy_engine_count>3</copy_engine_count>
+ <encoder_count>0</encoder_count>
+ <decoder_count>2</decoder_count>
+ <ofa_count>0</ofa_count>
+ <jpg_count>0</jpg_count>
+ </shared>
+ </device_attributes>
+ <ecc_error_count>
+ <volatile_count>
+ <sram_uncorrectable>0</sram_uncorrectable>
+ </volatile_count>
+ </ecc_error_count>
+ <fb_memory_usage>
+ <total>19968 MiB</total>
+ <reserved>0 MiB</reserved>
+ <used>19 MiB</used>
+ <free>19948 MiB</free>
+ </fb_memory_usage>
+ <bar1_memory_usage>
+ <total>32767 MiB</total>
+ <used>0 MiB</used>
+ <free>32767 MiB</free>
+ </bar1_memory_usage>
+ </mig_device>
+ </mig_devices>
+ <accounting_mode>Disabled</accounting_mode>
+ <accounting_mode_buffer_size>4000</accounting_mode_buffer_size>
+ <driver_model>
+ <current_dm>N/A</current_dm>
+ <pending_dm>N/A</pending_dm>
+ </driver_model>
+ <serial>1324321002473</serial>
+ <uuid>GPU-27b94a00-ed54-5c24-b1fd-1054085de32a</uuid>
+ <minor_number>0</minor_number>
+ <vbios_version>92.00.45.00.03</vbios_version>
+ <multigpu_board>No</multigpu_board>
+ <board_id>0x4</board_id>
+ <gpu_part_number>692-2G506-0200-003</gpu_part_number>
+ <gpu_module_id>3</gpu_module_id>
+ <inforom_version>
+ <img_version>G506.0200.00.04</img_version>
+ <oem_object>2.0</oem_object>
+ <ecc_object>6.16</ecc_object>
+ <pwr_object>N/A</pwr_object>
+ </inforom_version>
+ <gpu_operation_mode>
+ <current_gom>N/A</current_gom>
+ <pending_gom>N/A</pending_gom>
+ </gpu_operation_mode>
+ <gsp_firmware_version>510.47.03</gsp_firmware_version>
+ <gpu_virtualization_mode>
+ <virtualization_mode>Pass-Through</virtualization_mode>
+ <host_vgpu_mode>N/A</host_vgpu_mode>
+ </gpu_virtualization_mode>
+ <ibmnpu>
+ <relaxed_ordering_mode>N/A</relaxed_ordering_mode>
+ </ibmnpu>
+ <pci>
+ <pci_bus>00</pci_bus>
+ <pci_device>04</pci_device>
+ <pci_domain>0000</pci_domain>
+ <pci_device_id>20B010DE</pci_device_id>
+ <pci_bus_id>00000000:00:04.0</pci_bus_id>
+ <pci_sub_system_id>134F10DE</pci_sub_system_id>
+ <pci_gpu_link_info>
+ <pcie_gen>
+ <max_link_gen>4</max_link_gen>
+ <current_link_gen>4</current_link_gen>
+ </pcie_gen>
+ <link_widths>
+ <max_link_width>16x</max_link_width>
+ <current_link_width>16x</current_link_width>
+ </link_widths>
+ </pci_gpu_link_info>
+ <pci_bridge_chip>
+ <bridge_chip_type>N/A</bridge_chip_type>
+ <bridge_chip_fw>N/A</bridge_chip_fw>
+ </pci_bridge_chip>
+ <replay_counter>0</replay_counter>
+ <replay_rollover_counter>0</replay_rollover_counter>
+ <tx_util>0 KB/s</tx_util>
+ <rx_util>0 KB/s</rx_util>
+ </pci>
+ <fan_speed>N/A</fan_speed>
+ <performance_state>P0</performance_state>
+ <clocks_throttle_reasons>
+ <clocks_throttle_reason_gpu_idle>Not Active</clocks_throttle_reason_gpu_idle>
+ <clocks_throttle_reason_applications_clocks_setting>Not Active
+ </clocks_throttle_reason_applications_clocks_setting>
+ <clocks_throttle_reason_sw_power_cap>Not Active</clocks_throttle_reason_sw_power_cap>
+ <clocks_throttle_reason_hw_slowdown>Not Active</clocks_throttle_reason_hw_slowdown>
+ <clocks_throttle_reason_hw_thermal_slowdown>Not Active</clocks_throttle_reason_hw_thermal_slowdown>
+ <clocks_throttle_reason_hw_power_brake_slowdown>Not Active</clocks_throttle_reason_hw_power_brake_slowdown>
+ <clocks_throttle_reason_sync_boost>Not Active</clocks_throttle_reason_sync_boost>
+ <clocks_throttle_reason_sw_thermal_slowdown>Not Active</clocks_throttle_reason_sw_thermal_slowdown>
+ <clocks_throttle_reason_display_clocks_setting>Not Active</clocks_throttle_reason_display_clocks_setting>
+ </clocks_throttle_reasons>
+ <fb_memory_usage>
+ <total>40960 MiB</total>
+ <reserved>605 MiB</reserved>
+ <used>38 MiB</used>
+ <free>40315 MiB</free>
+ </fb_memory_usage>
+ <bar1_memory_usage>
+ <total>65536 MiB</total>
+ <used>1 MiB</used>
+ <free>65535 MiB</free>
+ </bar1_memory_usage>
+ <compute_mode>Default</compute_mode>
+ <utilization>
+ <gpu_util>N/A</gpu_util>
+ <memory_util>N/A</memory_util>
+ <encoder_util>N/A</encoder_util>
+ <decoder_util>N/A</decoder_util>
+ </utilization>
+ <encoder_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </encoder_stats>
+ <fbc_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </fbc_stats>
+ <ecc_mode>
+ <current_ecc>Enabled</current_ecc>
+ <pending_ecc>Enabled</pending_ecc>
+ </ecc_mode>
+ <ecc_errors>
+ <volatile>
+ <sram_correctable>N/A</sram_correctable>
+ <sram_uncorrectable>N/A</sram_uncorrectable>
+ <dram_correctable>N/A</dram_correctable>
+ <dram_uncorrectable>N/A</dram_uncorrectable>
+ </volatile>
+ <aggregate>
+ <sram_correctable>0</sram_correctable>
+ <sram_uncorrectable>0</sram_uncorrectable>
+ <dram_correctable>0</dram_correctable>
+ <dram_uncorrectable>0</dram_uncorrectable>
+ </aggregate>
+ </ecc_errors>
+ <retired_pages>
+ <multiple_single_bit_retirement>
+ <retired_count>N/A</retired_count>
+ <retired_pagelist>N/A</retired_pagelist>
+ </multiple_single_bit_retirement>
+ <double_bit_retirement>
+ <retired_count>N/A</retired_count>
+ <retired_pagelist>N/A</retired_pagelist>
+ </double_bit_retirement>
+ <pending_blacklist>N/A</pending_blacklist>
+ <pending_retirement>N/A</pending_retirement>
+ </retired_pages>
+ <remapped_rows>N/A</remapped_rows>
+ <temperature>
+ <gpu_temp>36 C</gpu_temp>
+ <gpu_temp_max_threshold>92 C</gpu_temp_max_threshold>
+ <gpu_temp_slow_threshold>89 C</gpu_temp_slow_threshold>
+ <gpu_temp_max_gpu_threshold>85 C</gpu_temp_max_gpu_threshold>
+ <gpu_target_temperature>N/A</gpu_target_temperature>
+ <memory_temp>44 C</memory_temp>
+ <gpu_temp_max_mem_threshold>95 C</gpu_temp_max_mem_threshold>
+ </temperature>
+ <supported_gpu_target_temp>
+ <gpu_target_temp_min>N/A</gpu_target_temp_min>
+ <gpu_target_temp_max>N/A</gpu_target_temp_max>
+ </supported_gpu_target_temp>
+ <power_readings>
+ <power_state>P0</power_state>
+ <power_management>Supported</power_management>
+ <power_draw>66.92 W</power_draw>
+ <power_limit>400.00 W</power_limit>
+ <default_power_limit>400.00 W</default_power_limit>
+ <enforced_power_limit>400.00 W</enforced_power_limit>
+ <min_power_limit>100.00 W</min_power_limit>
+ <max_power_limit>400.00 W</max_power_limit>
+ </power_readings>
+ <clocks>
+ <graphics_clock>1410 MHz</graphics_clock>
+ <sm_clock>1410 MHz</sm_clock>
+ <mem_clock>1215 MHz</mem_clock>
+ <video_clock>1275 MHz</video_clock>
+ </clocks>
+ <applications_clocks>
+ <graphics_clock>1095 MHz</graphics_clock>
+ <mem_clock>1215 MHz</mem_clock>
+ </applications_clocks>
+ <default_applications_clocks>
+ <graphics_clock>1095 MHz</graphics_clock>
+ <mem_clock>1215 MHz</mem_clock>
+ </default_applications_clocks>
+ <max_clocks>
+ <graphics_clock>1410 MHz</graphics_clock>
+ <sm_clock>1410 MHz</sm_clock>
+ <mem_clock>1215 MHz</mem_clock>
+ <video_clock>1290 MHz</video_clock>
+ </max_clocks>
+ <max_customer_boost_clocks>
+ <graphics_clock>1410 MHz</graphics_clock>
+ </max_customer_boost_clocks>
+ <clock_policy>
+ <auto_boost>N/A</auto_boost>
+ <auto_boost_default>N/A</auto_boost_default>
+ </clock_policy>
+ <voltage>
+ <graphics_volt>881.250 mV</graphics_volt>
+ </voltage>
+ <supported_clocks>
+ <supported_mem_clock>
+ <value>1215 MHz</value>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1245 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1230 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1200 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1185 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1170 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1155 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1140 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1125 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1110 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1095 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1080 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1065 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1035 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1020 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1005 MHz</supported_graphics_clock>
+ <supported_graphics_clock>990 MHz</supported_graphics_clock>
+ <supported_graphics_clock>975 MHz</supported_graphics_clock>
+ <supported_graphics_clock>960 MHz</supported_graphics_clock>
+ <supported_graphics_clock>945 MHz</supported_graphics_clock>
+ <supported_graphics_clock>930 MHz</supported_graphics_clock>
+ <supported_graphics_clock>915 MHz</supported_graphics_clock>
+ <supported_graphics_clock>900 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>870 MHz</supported_graphics_clock>
+ <supported_graphics_clock>855 MHz</supported_graphics_clock>
+ <supported_graphics_clock>840 MHz</supported_graphics_clock>
+ <supported_graphics_clock>825 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>795 MHz</supported_graphics_clock>
+ <supported_graphics_clock>780 MHz</supported_graphics_clock>
+ <supported_graphics_clock>765 MHz</supported_graphics_clock>
+ <supported_graphics_clock>750 MHz</supported_graphics_clock>
+ <supported_graphics_clock>735 MHz</supported_graphics_clock>
+ <supported_graphics_clock>720 MHz</supported_graphics_clock>
+ <supported_graphics_clock>705 MHz</supported_graphics_clock>
+ <supported_graphics_clock>690 MHz</supported_graphics_clock>
+ <supported_graphics_clock>675 MHz</supported_graphics_clock>
+ <supported_graphics_clock>660 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ <supported_graphics_clock>390 MHz</supported_graphics_clock>
+ <supported_graphics_clock>375 MHz</supported_graphics_clock>
+ <supported_graphics_clock>360 MHz</supported_graphics_clock>
+ <supported_graphics_clock>345 MHz</supported_graphics_clock>
+ <supported_graphics_clock>330 MHz</supported_graphics_clock>
+ <supported_graphics_clock>315 MHz</supported_graphics_clock>
+ <supported_graphics_clock>300 MHz</supported_graphics_clock>
+ <supported_graphics_clock>285 MHz</supported_graphics_clock>
+ <supported_graphics_clock>270 MHz</supported_graphics_clock>
+ <supported_graphics_clock>255 MHz</supported_graphics_clock>
+ <supported_graphics_clock>240 MHz</supported_graphics_clock>
+ <supported_graphics_clock>225 MHz</supported_graphics_clock>
+ <supported_graphics_clock>210 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ </supported_clocks>
+ <processes>
+ </processes>
+ <accounted_processes>
+ </accounted_processes>
+ </gpu>
+
+</nvidia_smi_log>
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/testdata/config.json b/src/go/plugin/go.d/modules/nvidia_smi/testdata/config.json
new file mode 100644
index 000000000..6ff795390
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "binary_path": "ok",
+ "loop_mode": true
+}
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/testdata/config.yaml b/src/go/plugin/go.d/modules/nvidia_smi/testdata/config.yaml
new file mode 100644
index 000000000..1f2fedef5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+timeout: 123.123
+binary_path: "ok"
+loop_mode: true
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-2080-win.xml b/src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-2080-win.xml
new file mode 100644
index 000000000..9bc0d2220
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-2080-win.xml
@@ -0,0 +1,776 @@
+<?xml version="1.0" ?>
+<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v11.dtd">
+<nvidia_smi_log>
+ <timestamp>Tue Sep 20 14:07:39 2022</timestamp>
+ <driver_version>516.59</driver_version>
+ <cuda_version>11.7</cuda_version>
+ <attached_gpus>1</attached_gpus>
+ <gpu id="00000000:0A:00.0">
+ <product_name>NVIDIA GeForce RTX 2080</product_name>
+ <product_brand>GeForce</product_brand>
+ <product_architecture>Turing</product_architecture>
+ <display_mode>Enabled</display_mode>
+ <display_active>Enabled</display_active>
+ <persistence_mode>N/A</persistence_mode>
+ <mig_mode>
+ <current_mig>N/A</current_mig>
+ <pending_mig>N/A</pending_mig>
+ </mig_mode>
+ <mig_devices>
+ None
+ </mig_devices>
+ <accounting_mode>Disabled</accounting_mode>
+ <accounting_mode_buffer_size>4000</accounting_mode_buffer_size>
+ <driver_model>
+ <current_dm>WDDM</current_dm>
+ <pending_dm>WDDM</pending_dm>
+ </driver_model>
+ <serial>N/A</serial>
+ <uuid>GPU-fbd55ed4-1eec-4423-0a47-ad594b4333e3</uuid>
+ <minor_number>N/A</minor_number>
+ <vbios_version>90.04.23.00.db</vbios_version>
+ <multigpu_board>No</multigpu_board>
+ <board_id>0xa00</board_id>
+ <gpu_part_number>N/A</gpu_part_number>
+ <gpu_module_id>0</gpu_module_id>
+ <inforom_version>
+ <img_version>G001.0000.02.04</img_version>
+ <oem_object>1.1</oem_object>
+ <ecc_object>N/A</ecc_object>
+ <pwr_object>N/A</pwr_object>
+ </inforom_version>
+ <gpu_operation_mode>
+ <current_gom>N/A</current_gom>
+ <pending_gom>N/A</pending_gom>
+ </gpu_operation_mode>
+ <gsp_firmware_version>N/A</gsp_firmware_version>
+ <gpu_virtualization_mode>
+ <virtualization_mode>None</virtualization_mode>
+ <host_vgpu_mode>N/A</host_vgpu_mode>
+ </gpu_virtualization_mode>
+ <ibmnpu>
+ <relaxed_ordering_mode>N/A</relaxed_ordering_mode>
+ </ibmnpu>
+ <pci>
+ <pci_bus>0A</pci_bus>
+ <pci_device>00</pci_device>
+ <pci_domain>0000</pci_domain>
+ <pci_device_id>1E8710DE</pci_device_id>
+ <pci_bus_id>00000000:0A:00.0</pci_bus_id>
+ <pci_sub_system_id>37AF1458</pci_sub_system_id>
+ <pci_gpu_link_info>
+ <pcie_gen>
+ <max_link_gen>3</max_link_gen>
+ <current_link_gen>3</current_link_gen>
+ </pcie_gen>
+ <link_widths>
+ <max_link_width>16x</max_link_width>
+ <current_link_width>8x</current_link_width>
+ </link_widths>
+ </pci_gpu_link_info>
+ <pci_bridge_chip>
+ <bridge_chip_type>N/A</bridge_chip_type>
+ <bridge_chip_fw>N/A</bridge_chip_fw>
+ </pci_bridge_chip>
+ <replay_counter>0</replay_counter>
+ <replay_rollover_counter>0</replay_rollover_counter>
+ <tx_util>13000 KB/s</tx_util>
+ <rx_util>91000 KB/s</rx_util>
+ </pci>
+ <fan_speed>37 %</fan_speed>
+ <performance_state>P8</performance_state>
+ <clocks_throttle_reasons>
+ <clocks_throttle_reason_gpu_idle>Active</clocks_throttle_reason_gpu_idle>
+ <clocks_throttle_reason_applications_clocks_setting>Not Active</clocks_throttle_reason_applications_clocks_setting>
+ <clocks_throttle_reason_sw_power_cap>Not Active</clocks_throttle_reason_sw_power_cap>
+ <clocks_throttle_reason_hw_slowdown>Not Active</clocks_throttle_reason_hw_slowdown>
+ <clocks_throttle_reason_hw_thermal_slowdown>Not Active</clocks_throttle_reason_hw_thermal_slowdown>
+ <clocks_throttle_reason_hw_power_brake_slowdown>Not Active</clocks_throttle_reason_hw_power_brake_slowdown>
+ <clocks_throttle_reason_sync_boost>Not Active</clocks_throttle_reason_sync_boost>
+ <clocks_throttle_reason_sw_thermal_slowdown>Not Active</clocks_throttle_reason_sw_thermal_slowdown>
+ <clocks_throttle_reason_display_clocks_setting>Not Active</clocks_throttle_reason_display_clocks_setting>
+ </clocks_throttle_reasons>
+ <fb_memory_usage>
+ <total>8192 MiB</total>
+ <reserved>182 MiB</reserved>
+ <used>862 MiB</used>
+ <free>7147 MiB</free>
+ </fb_memory_usage>
+ <bar1_memory_usage>
+ <total>256 MiB</total>
+ <used>2 MiB</used>
+ <free>254 MiB</free>
+ </bar1_memory_usage>
+ <compute_mode>Default</compute_mode>
+ <utilization>
+ <gpu_util>2 %</gpu_util>
+ <memory_util>7 %</memory_util>
+ <encoder_util>0 %</encoder_util>
+ <decoder_util>0 %</decoder_util>
+ </utilization>
+ <encoder_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </encoder_stats>
+ <fbc_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </fbc_stats>
+ <ecc_mode>
+ <current_ecc>N/A</current_ecc>
+ <pending_ecc>N/A</pending_ecc>
+ </ecc_mode>
+ <ecc_errors>
+ <volatile>
+ <sram_correctable>N/A</sram_correctable>
+ <sram_uncorrectable>N/A</sram_uncorrectable>
+ <dram_correctable>N/A</dram_correctable>
+ <dram_uncorrectable>N/A</dram_uncorrectable>
+ </volatile>
+ <aggregate>
+ <sram_correctable>N/A</sram_correctable>
+ <sram_uncorrectable>N/A</sram_uncorrectable>
+ <dram_correctable>N/A</dram_correctable>
+ <dram_uncorrectable>N/A</dram_uncorrectable>
+ </aggregate>
+ </ecc_errors>
+ <retired_pages>
+ <multiple_single_bit_retirement>
+ <retired_count>N/A</retired_count>
+ <retired_pagelist>N/A</retired_pagelist>
+ </multiple_single_bit_retirement>
+ <double_bit_retirement>
+ <retired_count>N/A</retired_count>
+ <retired_pagelist>N/A</retired_pagelist>
+ </double_bit_retirement>
+ <pending_blacklist>N/A</pending_blacklist>
+ <pending_retirement>N/A</pending_retirement>
+ </retired_pages>
+ <remapped_rows>N/A</remapped_rows>
+ <temperature>
+ <gpu_temp>29 C</gpu_temp>
+ <gpu_temp_max_threshold>100 C</gpu_temp_max_threshold>
+ <gpu_temp_slow_threshold>97 C</gpu_temp_slow_threshold>
+ <gpu_temp_max_gpu_threshold>88 C</gpu_temp_max_gpu_threshold>
+ <gpu_target_temperature>83 C</gpu_target_temperature>
+ <memory_temp>N/A</memory_temp>
+ <gpu_temp_max_mem_threshold>N/A</gpu_temp_max_mem_threshold>
+ </temperature>
+ <supported_gpu_target_temp>
+ <gpu_target_temp_min>65 C</gpu_target_temp_min>
+ <gpu_target_temp_max>88 C</gpu_target_temp_max>
+ </supported_gpu_target_temp>
+ <power_readings>
+ <power_state>P8</power_state>
+ <power_management>Supported</power_management>
+ <power_draw>14.50 W</power_draw>
+ <power_limit>275.00 W</power_limit>
+ <default_power_limit>275.00 W</default_power_limit>
+ <enforced_power_limit>275.00 W</enforced_power_limit>
+ <min_power_limit>105.00 W</min_power_limit>
+ <max_power_limit>350.00 W</max_power_limit>
+ </power_readings>
+ <clocks>
+ <graphics_clock>193 MHz</graphics_clock>
+ <sm_clock>193 MHz</sm_clock>
+ <mem_clock>403 MHz</mem_clock>
+ <video_clock>539 MHz</video_clock>
+ </clocks>
+ <applications_clocks>
+ <graphics_clock>N/A</graphics_clock>
+ <mem_clock>N/A</mem_clock>
+ </applications_clocks>
+ <default_applications_clocks>
+ <graphics_clock>N/A</graphics_clock>
+ <mem_clock>N/A</mem_clock>
+ </default_applications_clocks>
+ <max_clocks>
+ <graphics_clock>3060 MHz</graphics_clock>
+ <sm_clock>3060 MHz</sm_clock>
+ <mem_clock>7560 MHz</mem_clock>
+ <video_clock>1950 MHz</video_clock>
+ </max_clocks>
+ <max_customer_boost_clocks>
+ <graphics_clock>N/A</graphics_clock>
+ </max_customer_boost_clocks>
+ <clock_policy>
+ <auto_boost>N/A</auto_boost>
+ <auto_boost_default>N/A</auto_boost_default>
+ </clock_policy>
+ <voltage>
+ <graphics_volt>N/A</graphics_volt>
+ </voltage>
+ <supported_clocks>
+ <supported_mem_clock>
+ <value>7560 MHz</value>
+ <supported_graphics_clock>2220 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2205 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2190 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2175 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2160 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2145 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2130 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2115 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>7360 MHz</value>
+ <supported_graphics_clock>2220 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2205 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2190 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2175 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2160 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2145 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2130 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2115 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>5000 MHz</value>
+ <supported_graphics_clock>2220 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2205 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2190 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2175 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2160 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2145 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2130 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2115 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>810 MHz</value>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1245 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1230 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1200 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1185 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1170 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1155 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1140 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1125 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1110 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1095 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1080 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1065 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1035 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1020 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1005 MHz</supported_graphics_clock>
+ <supported_graphics_clock>990 MHz</supported_graphics_clock>
+ <supported_graphics_clock>975 MHz</supported_graphics_clock>
+ <supported_graphics_clock>960 MHz</supported_graphics_clock>
+ <supported_graphics_clock>945 MHz</supported_graphics_clock>
+ <supported_graphics_clock>930 MHz</supported_graphics_clock>
+ <supported_graphics_clock>915 MHz</supported_graphics_clock>
+ <supported_graphics_clock>900 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>870 MHz</supported_graphics_clock>
+ <supported_graphics_clock>855 MHz</supported_graphics_clock>
+ <supported_graphics_clock>840 MHz</supported_graphics_clock>
+ <supported_graphics_clock>825 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>795 MHz</supported_graphics_clock>
+ <supported_graphics_clock>780 MHz</supported_graphics_clock>
+ <supported_graphics_clock>765 MHz</supported_graphics_clock>
+ <supported_graphics_clock>750 MHz</supported_graphics_clock>
+ <supported_graphics_clock>735 MHz</supported_graphics_clock>
+ <supported_graphics_clock>720 MHz</supported_graphics_clock>
+ <supported_graphics_clock>705 MHz</supported_graphics_clock>
+ <supported_graphics_clock>690 MHz</supported_graphics_clock>
+ <supported_graphics_clock>675 MHz</supported_graphics_clock>
+ <supported_graphics_clock>660 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ <supported_graphics_clock>390 MHz</supported_graphics_clock>
+ <supported_graphics_clock>375 MHz</supported_graphics_clock>
+ <supported_graphics_clock>360 MHz</supported_graphics_clock>
+ <supported_graphics_clock>345 MHz</supported_graphics_clock>
+ <supported_graphics_clock>330 MHz</supported_graphics_clock>
+ <supported_graphics_clock>315 MHz</supported_graphics_clock>
+ <supported_graphics_clock>300 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>405 MHz</value>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ <supported_graphics_clock>390 MHz</supported_graphics_clock>
+ <supported_graphics_clock>375 MHz</supported_graphics_clock>
+ <supported_graphics_clock>360 MHz</supported_graphics_clock>
+ <supported_graphics_clock>345 MHz</supported_graphics_clock>
+ <supported_graphics_clock>330 MHz</supported_graphics_clock>
+ <supported_graphics_clock>315 MHz</supported_graphics_clock>
+ <supported_graphics_clock>300 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ </supported_clocks>
+ <processes>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>7724</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files\WindowsApps\Microsoft.YourPhone_1.22062.543.0_x64__8wekyb3d8bbwe\PhoneExperienceHost.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>10808</pid>
+ <type>C+G</type>
+ <process_name></process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>11556</pid>
+ <type>C+G</type>
+ <process_name>C:\Windows\SystemApps\ShellExperienceHost_cw5n1h2txyewy\ShellExperienceHost.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>12452</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files\WindowsApps\Microsoft.SkypeApp_15.88.3401.0_x86__kzf8qxf38zg5c\Skype\Skype.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>12636</pid>
+ <type>C+G</type>
+ <process_name></process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>13120</pid>
+ <type>C+G</type>
+ <process_name></process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>14296</pid>
+ <type>C+G</type>
+ <process_name>C:\Windows\explorer.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>16508</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files\WindowsApps\Microsoft.549981C3F5F10_4.2204.13303.0_x64__8wekyb3d8bbwe\Cortana.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>16592</pid>
+ <type>C+G</type>
+ <process_name>C:\ProgramData\Logishrd\LogiOptions\Software\Current\LogiOptionsMgr.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>20920</pid>
+ <type>C+G</type>
+ <process_name>C:\Windows\SystemApps\Microsoft.LockApp_cw5n1h2txyewy\LockApp.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>21004</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files\WindowsApps\Microsoft.Windows.Photos_2022.31070.26005.0_x64__8wekyb3d8bbwe\Microsoft.Photos.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>21036</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files (x86)\Garmin\Express\CefSharp.BrowserSubprocess.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>21048</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files\WindowsApps\91750D7E.Slack_4.28.171.0_x64__8she8kybcnzg4\app\Slack.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>21104</pid>
+ <type>C+G</type>
+ <process_name>C:\Windows\SystemApps\MicrosoftWindows.Client.CBS_cw5n1h2txyewy\TextInputHost.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>21292</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files\WindowsApps\Microsoft.ZuneVideo_10.22041.10091.0_x64__8wekyb3d8bbwe\Video.UI.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>21472</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files (x86)\Google\Chrome\Application\chrome.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>21852</pid>
+ <type>C+G</type>
+ <process_name>C:\ProgramData\Logishrd\LogiOptions\Software\Current\LogiOverlay.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>22600</pid>
+ <type>C+G</type>
+ <process_name></process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>23652</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files\WindowsApps\microsoft.windowscommunicationsapps_16005.14326.20970.0_x64__8wekyb3d8bbwe\HxOutlook.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>25436</pid>
+ <type>C+G</type>
+ <process_name>C:\Windows\SystemApps\MicrosoftWindows.Client.CBS_cw5n1h2txyewy\SearchHost.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>25520</pid>
+ <type>C+G</type>
+ <process_name></process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>25696</pid>
+ <type>C+G</type>
+ <process_name>C:\Users\Vlad\AppData\Local\Viber\Viber.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>26972</pid>
+ <type>C+G</type>
+ <process_name>C:\Windows\SystemApps\Microsoft.Windows.StartMenuExperienceHost_cw5n1h2txyewy\StartMenuExperienceHost.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>27148</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files\WindowsApps\Microsoft.Office.OneNote_16001.14326.21090.0_x64__8wekyb3d8bbwe\onenoteim.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>27628</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files\WindowsApps\49297T.Partl.ClockOut_2.9.9.0_x64__jr9bq2af9farr\WorkingHours.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>30212</pid>
+ <type>C+G</type>
+ <process_name>C:\Program Files (x86)\Microsoft\EdgeWebView\Application\105.0.1343.42\msedgewebview2.exe</process_name>
+ <used_memory>N/A</used_memory>
+ </process_info>
+ </processes>
+ <accounted_processes>
+ </accounted_processes>
+ </gpu>
+
+</nvidia_smi_log> \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-3060.xml b/src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-3060.xml
new file mode 100644
index 000000000..ad63fd51b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-3060.xml
@@ -0,0 +1,917 @@
+<?xml version="1.0" ?>
+<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v11.dtd">
+<nvidia_smi_log>
+ <timestamp>Tue Sep 20 15:21:01 2022</timestamp>
+ <driver_version>515.65.01</driver_version>
+ <cuda_version>11.7</cuda_version>
+ <attached_gpus>1</attached_gpus>
+ <gpu id="00000000:01:00.0">
+ <product_name>NVIDIA GeForce RTX 3060 Laptop GPU</product_name>
+ <product_brand>GeForce</product_brand>
+ <product_architecture>Ampere</product_architecture>
+ <display_mode>Disabled</display_mode>
+ <display_active>Disabled</display_active>
+ <persistence_mode>Disabled</persistence_mode>
+ <mig_mode>
+ <current_mig>N/A</current_mig>
+ <pending_mig>N/A</pending_mig>
+ </mig_mode>
+ <mig_devices>
+ None
+ </mig_devices>
+ <accounting_mode>Disabled</accounting_mode>
+ <accounting_mode_buffer_size>4000</accounting_mode_buffer_size>
+ <driver_model>
+ <current_dm>N/A</current_dm>
+ <pending_dm>N/A</pending_dm>
+ </driver_model>
+ <serial>N/A</serial>
+ <uuid>GPU-473d8d0f-d462-185c-6b36-6fc23e23e571</uuid>
+ <minor_number>0</minor_number>
+ <vbios_version>94.06.19.00.51</vbios_version>
+ <multigpu_board>No</multigpu_board>
+ <board_id>0x100</board_id>
+ <gpu_part_number>N/A</gpu_part_number>
+ <gpu_module_id>0</gpu_module_id>
+ <inforom_version>
+ <img_version>G001.0000.03.03</img_version>
+ <oem_object>2.0</oem_object>
+ <ecc_object>N/A</ecc_object>
+ <pwr_object>N/A</pwr_object>
+ </inforom_version>
+ <gpu_operation_mode>
+ <current_gom>N/A</current_gom>
+ <pending_gom>N/A</pending_gom>
+ </gpu_operation_mode>
+ <gsp_firmware_version>N/A</gsp_firmware_version>
+ <gpu_virtualization_mode>
+ <virtualization_mode>None</virtualization_mode>
+ <host_vgpu_mode>N/A</host_vgpu_mode>
+ </gpu_virtualization_mode>
+ <ibmnpu>
+ <relaxed_ordering_mode>N/A</relaxed_ordering_mode>
+ </ibmnpu>
+ <pci>
+ <pci_bus>01</pci_bus>
+ <pci_device>00</pci_device>
+ <pci_domain>0000</pci_domain>
+ <pci_device_id>252010DE</pci_device_id>
+ <pci_bus_id>00000000:01:00.0</pci_bus_id>
+ <pci_sub_system_id>0A831028</pci_sub_system_id>
+ <pci_gpu_link_info>
+ <pcie_gen>
+ <max_link_gen>4</max_link_gen>
+ <current_link_gen>1</current_link_gen>
+ </pcie_gen>
+ <link_widths>
+ <max_link_width>16x</max_link_width>
+ <current_link_width>8x</current_link_width>
+ </link_widths>
+ </pci_gpu_link_info>
+ <pci_bridge_chip>
+ <bridge_chip_type>N/A</bridge_chip_type>
+ <bridge_chip_fw>N/A</bridge_chip_fw>
+ </pci_bridge_chip>
+ <replay_counter>0</replay_counter>
+ <replay_rollover_counter>0</replay_rollover_counter>
+ <tx_util>0 KB/s</tx_util>
+ <rx_util>0 KB/s</rx_util>
+ </pci>
+ <fan_speed>N/A</fan_speed>
+ <performance_state>P8</performance_state>
+ <clocks_throttle_reasons>
+ <clocks_throttle_reason_gpu_idle>Active</clocks_throttle_reason_gpu_idle>
+ <clocks_throttle_reason_applications_clocks_setting>Not Active</clocks_throttle_reason_applications_clocks_setting>
+ <clocks_throttle_reason_sw_power_cap>Not Active</clocks_throttle_reason_sw_power_cap>
+ <clocks_throttle_reason_hw_slowdown>Not Active</clocks_throttle_reason_hw_slowdown>
+ <clocks_throttle_reason_hw_thermal_slowdown>Not Active</clocks_throttle_reason_hw_thermal_slowdown>
+ <clocks_throttle_reason_hw_power_brake_slowdown>Not Active</clocks_throttle_reason_hw_power_brake_slowdown>
+ <clocks_throttle_reason_sync_boost>Not Active</clocks_throttle_reason_sync_boost>
+ <clocks_throttle_reason_sw_thermal_slowdown>Not Active</clocks_throttle_reason_sw_thermal_slowdown>
+ <clocks_throttle_reason_display_clocks_setting>Not Active</clocks_throttle_reason_display_clocks_setting>
+ </clocks_throttle_reasons>
+ <fb_memory_usage>
+ <total>6144 MiB</total>
+ <reserved>197 MiB</reserved>
+ <used>5 MiB</used>
+ <free>5940 MiB</free>
+ </fb_memory_usage>
+ <bar1_memory_usage>
+ <total>8192 MiB</total>
+ <used>3 MiB</used>
+ <free>8189 MiB</free>
+ </bar1_memory_usage>
+ <compute_mode>Default</compute_mode>
+ <utilization>
+ <gpu_util>0 %</gpu_util>
+ <memory_util>0 %</memory_util>
+ <encoder_util>0 %</encoder_util>
+ <decoder_util>0 %</decoder_util>
+ </utilization>
+ <encoder_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </encoder_stats>
+ <fbc_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </fbc_stats>
+ <ecc_mode>
+ <current_ecc>N/A</current_ecc>
+ <pending_ecc>N/A</pending_ecc>
+ </ecc_mode>
+ <ecc_errors>
+ <volatile>
+ <sram_correctable>N/A</sram_correctable>
+ <sram_uncorrectable>N/A</sram_uncorrectable>
+ <dram_correctable>N/A</dram_correctable>
+ <dram_uncorrectable>N/A</dram_uncorrectable>
+ </volatile>
+ <aggregate>
+ <sram_correctable>N/A</sram_correctable>
+ <sram_uncorrectable>N/A</sram_uncorrectable>
+ <dram_correctable>N/A</dram_correctable>
+ <dram_uncorrectable>N/A</dram_uncorrectable>
+ </aggregate>
+ </ecc_errors>
+ <retired_pages>
+ <multiple_single_bit_retirement>
+ <retired_count>N/A</retired_count>
+ <retired_pagelist>N/A</retired_pagelist>
+ </multiple_single_bit_retirement>
+ <double_bit_retirement>
+ <retired_count>N/A</retired_count>
+ <retired_pagelist>N/A</retired_pagelist>
+ </double_bit_retirement>
+ <pending_blacklist>N/A</pending_blacklist>
+ <pending_retirement>N/A</pending_retirement>
+ </retired_pages>
+ <remapped_rows>N/A</remapped_rows>
+ <temperature>
+ <gpu_temp>45 C</gpu_temp>
+ <gpu_temp_max_threshold>105 C</gpu_temp_max_threshold>
+ <gpu_temp_slow_threshold>102 C</gpu_temp_slow_threshold>
+ <gpu_temp_max_gpu_threshold>75 C</gpu_temp_max_gpu_threshold>
+ <gpu_target_temperature>N/A</gpu_target_temperature>
+ <memory_temp>N/A</memory_temp>
+ <gpu_temp_max_mem_threshold>N/A</gpu_temp_max_mem_threshold>
+ </temperature>
+ <supported_gpu_target_temp>
+ <gpu_target_temp_min>N/A</gpu_target_temp_min>
+ <gpu_target_temp_max>N/A</gpu_target_temp_max>
+ </supported_gpu_target_temp>
+ <power_readings>
+ <power_state>P8</power_state>
+ <power_management>N/A</power_management>
+ <power_draw>8.70 W</power_draw>
+ <power_limit>N/A</power_limit>
+ <default_power_limit>N/A</default_power_limit>
+ <enforced_power_limit>N/A</enforced_power_limit>
+ <min_power_limit>N/A</min_power_limit>
+ <max_power_limit>N/A</max_power_limit>
+ </power_readings>
+ <clocks>
+ <graphics_clock>210 MHz</graphics_clock>
+ <sm_clock>210 MHz</sm_clock>
+ <mem_clock>405 MHz</mem_clock>
+ <video_clock>555 MHz</video_clock>
+ </clocks>
+ <applications_clocks>
+ <graphics_clock>N/A</graphics_clock>
+ <mem_clock>N/A</mem_clock>
+ </applications_clocks>
+ <default_applications_clocks>
+ <graphics_clock>N/A</graphics_clock>
+ <mem_clock>N/A</mem_clock>
+ </default_applications_clocks>
+ <max_clocks>
+ <graphics_clock>2100 MHz</graphics_clock>
+ <sm_clock>2100 MHz</sm_clock>
+ <mem_clock>6001 MHz</mem_clock>
+ <video_clock>1950 MHz</video_clock>
+ </max_clocks>
+ <max_customer_boost_clocks>
+ <graphics_clock>N/A</graphics_clock>
+ </max_customer_boost_clocks>
+ <clock_policy>
+ <auto_boost>N/A</auto_boost>
+ <auto_boost_default>N/A</auto_boost_default>
+ </clock_policy>
+ <voltage>
+ <graphics_volt>631.250 mV</graphics_volt>
+ </voltage>
+ <supported_clocks>
+ <supported_mem_clock>
+ <value>6001 MHz</value>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2092 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2077 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2062 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2047 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2032 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2017 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2002 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1987 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1972 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1957 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1942 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1927 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1912 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1897 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1882 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1867 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1852 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1837 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1822 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1807 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1792 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1777 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1762 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1747 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1732 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1717 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1702 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1687 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1672 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1657 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1642 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1627 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1612 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1597 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1582 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1567 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1552 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1537 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1522 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1507 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1492 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1477 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1462 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1447 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1432 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1417 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1402 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1387 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1372 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1357 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1342 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1327 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1312 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1297 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1282 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1267 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1252 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1245 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1237 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1230 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1222 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1207 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1200 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1192 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1185 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1177 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1170 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1162 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1155 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1147 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1140 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1132 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1125 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1117 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1110 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1102 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1095 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1087 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1080 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1072 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1065 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1057 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1042 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1035 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1027 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1020 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1012 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1005 MHz</supported_graphics_clock>
+ <supported_graphics_clock>997 MHz</supported_graphics_clock>
+ <supported_graphics_clock>990 MHz</supported_graphics_clock>
+ <supported_graphics_clock>982 MHz</supported_graphics_clock>
+ <supported_graphics_clock>975 MHz</supported_graphics_clock>
+ <supported_graphics_clock>967 MHz</supported_graphics_clock>
+ <supported_graphics_clock>960 MHz</supported_graphics_clock>
+ <supported_graphics_clock>952 MHz</supported_graphics_clock>
+ <supported_graphics_clock>945 MHz</supported_graphics_clock>
+ <supported_graphics_clock>937 MHz</supported_graphics_clock>
+ <supported_graphics_clock>930 MHz</supported_graphics_clock>
+ <supported_graphics_clock>922 MHz</supported_graphics_clock>
+ <supported_graphics_clock>915 MHz</supported_graphics_clock>
+ <supported_graphics_clock>907 MHz</supported_graphics_clock>
+ <supported_graphics_clock>900 MHz</supported_graphics_clock>
+ <supported_graphics_clock>892 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>877 MHz</supported_graphics_clock>
+ <supported_graphics_clock>870 MHz</supported_graphics_clock>
+ <supported_graphics_clock>862 MHz</supported_graphics_clock>
+ <supported_graphics_clock>855 MHz</supported_graphics_clock>
+ <supported_graphics_clock>847 MHz</supported_graphics_clock>
+ <supported_graphics_clock>840 MHz</supported_graphics_clock>
+ <supported_graphics_clock>832 MHz</supported_graphics_clock>
+ <supported_graphics_clock>825 MHz</supported_graphics_clock>
+ <supported_graphics_clock>817 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>802 MHz</supported_graphics_clock>
+ <supported_graphics_clock>795 MHz</supported_graphics_clock>
+ <supported_graphics_clock>787 MHz</supported_graphics_clock>
+ <supported_graphics_clock>780 MHz</supported_graphics_clock>
+ <supported_graphics_clock>772 MHz</supported_graphics_clock>
+ <supported_graphics_clock>765 MHz</supported_graphics_clock>
+ <supported_graphics_clock>757 MHz</supported_graphics_clock>
+ <supported_graphics_clock>750 MHz</supported_graphics_clock>
+ <supported_graphics_clock>742 MHz</supported_graphics_clock>
+ <supported_graphics_clock>735 MHz</supported_graphics_clock>
+ <supported_graphics_clock>727 MHz</supported_graphics_clock>
+ <supported_graphics_clock>720 MHz</supported_graphics_clock>
+ <supported_graphics_clock>712 MHz</supported_graphics_clock>
+ <supported_graphics_clock>705 MHz</supported_graphics_clock>
+ <supported_graphics_clock>697 MHz</supported_graphics_clock>
+ <supported_graphics_clock>690 MHz</supported_graphics_clock>
+ <supported_graphics_clock>682 MHz</supported_graphics_clock>
+ <supported_graphics_clock>675 MHz</supported_graphics_clock>
+ <supported_graphics_clock>667 MHz</supported_graphics_clock>
+ <supported_graphics_clock>660 MHz</supported_graphics_clock>
+ <supported_graphics_clock>652 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>637 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>622 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>607 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>592 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>577 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>562 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>547 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>532 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>517 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>502 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>487 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>472 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>457 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>442 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>427 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>412 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>5501 MHz</value>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2092 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2077 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2062 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2047 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2032 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2017 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2002 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1987 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1972 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1957 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1942 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1927 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1912 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1897 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1882 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1867 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1852 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1837 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1822 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1807 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1792 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1777 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1762 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1747 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1732 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1717 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1702 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1687 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1672 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1657 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1642 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1627 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1612 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1597 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1582 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1567 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1552 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1537 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1522 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1507 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1492 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1477 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1462 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1447 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1432 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1417 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1402 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1387 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1372 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1357 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1342 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1327 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1312 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1297 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1282 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1267 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1252 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1245 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1237 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1230 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1222 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1207 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1200 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1192 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1185 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1177 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1170 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1162 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1155 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1147 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1140 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1132 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1125 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1117 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1110 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1102 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1095 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1087 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1080 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1072 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1065 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1057 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1042 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1035 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1027 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1020 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1012 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1005 MHz</supported_graphics_clock>
+ <supported_graphics_clock>997 MHz</supported_graphics_clock>
+ <supported_graphics_clock>990 MHz</supported_graphics_clock>
+ <supported_graphics_clock>982 MHz</supported_graphics_clock>
+ <supported_graphics_clock>975 MHz</supported_graphics_clock>
+ <supported_graphics_clock>967 MHz</supported_graphics_clock>
+ <supported_graphics_clock>960 MHz</supported_graphics_clock>
+ <supported_graphics_clock>952 MHz</supported_graphics_clock>
+ <supported_graphics_clock>945 MHz</supported_graphics_clock>
+ <supported_graphics_clock>937 MHz</supported_graphics_clock>
+ <supported_graphics_clock>930 MHz</supported_graphics_clock>
+ <supported_graphics_clock>922 MHz</supported_graphics_clock>
+ <supported_graphics_clock>915 MHz</supported_graphics_clock>
+ <supported_graphics_clock>907 MHz</supported_graphics_clock>
+ <supported_graphics_clock>900 MHz</supported_graphics_clock>
+ <supported_graphics_clock>892 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>877 MHz</supported_graphics_clock>
+ <supported_graphics_clock>870 MHz</supported_graphics_clock>
+ <supported_graphics_clock>862 MHz</supported_graphics_clock>
+ <supported_graphics_clock>855 MHz</supported_graphics_clock>
+ <supported_graphics_clock>847 MHz</supported_graphics_clock>
+ <supported_graphics_clock>840 MHz</supported_graphics_clock>
+ <supported_graphics_clock>832 MHz</supported_graphics_clock>
+ <supported_graphics_clock>825 MHz</supported_graphics_clock>
+ <supported_graphics_clock>817 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>802 MHz</supported_graphics_clock>
+ <supported_graphics_clock>795 MHz</supported_graphics_clock>
+ <supported_graphics_clock>787 MHz</supported_graphics_clock>
+ <supported_graphics_clock>780 MHz</supported_graphics_clock>
+ <supported_graphics_clock>772 MHz</supported_graphics_clock>
+ <supported_graphics_clock>765 MHz</supported_graphics_clock>
+ <supported_graphics_clock>757 MHz</supported_graphics_clock>
+ <supported_graphics_clock>750 MHz</supported_graphics_clock>
+ <supported_graphics_clock>742 MHz</supported_graphics_clock>
+ <supported_graphics_clock>735 MHz</supported_graphics_clock>
+ <supported_graphics_clock>727 MHz</supported_graphics_clock>
+ <supported_graphics_clock>720 MHz</supported_graphics_clock>
+ <supported_graphics_clock>712 MHz</supported_graphics_clock>
+ <supported_graphics_clock>705 MHz</supported_graphics_clock>
+ <supported_graphics_clock>697 MHz</supported_graphics_clock>
+ <supported_graphics_clock>690 MHz</supported_graphics_clock>
+ <supported_graphics_clock>682 MHz</supported_graphics_clock>
+ <supported_graphics_clock>675 MHz</supported_graphics_clock>
+ <supported_graphics_clock>667 MHz</supported_graphics_clock>
+ <supported_graphics_clock>660 MHz</supported_graphics_clock>
+ <supported_graphics_clock>652 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>637 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>622 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>607 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>592 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>577 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>562 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>547 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>532 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>517 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>502 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>487 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>472 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>457 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>442 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>427 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>412 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>810 MHz</value>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2092 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2077 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2062 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2047 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2032 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2017 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2002 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1987 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1972 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1957 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1942 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1927 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1912 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1897 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1882 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1867 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1852 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1837 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1822 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1807 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1792 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1777 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1762 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1747 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1732 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1717 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1702 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1687 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1672 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1657 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1642 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1627 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1612 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1597 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1582 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1567 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1552 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1537 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1522 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1507 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1492 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1477 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1462 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1447 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1432 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1417 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1402 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1387 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1372 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1357 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1342 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1327 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1312 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1297 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1282 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1267 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1252 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1245 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1237 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1230 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1222 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1207 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1200 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1192 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1185 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1177 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1170 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1162 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1155 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1147 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1140 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1132 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1125 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1117 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1110 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1102 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1095 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1087 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1080 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1072 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1065 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1057 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1042 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1035 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1027 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1020 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1012 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1005 MHz</supported_graphics_clock>
+ <supported_graphics_clock>997 MHz</supported_graphics_clock>
+ <supported_graphics_clock>990 MHz</supported_graphics_clock>
+ <supported_graphics_clock>982 MHz</supported_graphics_clock>
+ <supported_graphics_clock>975 MHz</supported_graphics_clock>
+ <supported_graphics_clock>967 MHz</supported_graphics_clock>
+ <supported_graphics_clock>960 MHz</supported_graphics_clock>
+ <supported_graphics_clock>952 MHz</supported_graphics_clock>
+ <supported_graphics_clock>945 MHz</supported_graphics_clock>
+ <supported_graphics_clock>937 MHz</supported_graphics_clock>
+ <supported_graphics_clock>930 MHz</supported_graphics_clock>
+ <supported_graphics_clock>922 MHz</supported_graphics_clock>
+ <supported_graphics_clock>915 MHz</supported_graphics_clock>
+ <supported_graphics_clock>907 MHz</supported_graphics_clock>
+ <supported_graphics_clock>900 MHz</supported_graphics_clock>
+ <supported_graphics_clock>892 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>877 MHz</supported_graphics_clock>
+ <supported_graphics_clock>870 MHz</supported_graphics_clock>
+ <supported_graphics_clock>862 MHz</supported_graphics_clock>
+ <supported_graphics_clock>855 MHz</supported_graphics_clock>
+ <supported_graphics_clock>847 MHz</supported_graphics_clock>
+ <supported_graphics_clock>840 MHz</supported_graphics_clock>
+ <supported_graphics_clock>832 MHz</supported_graphics_clock>
+ <supported_graphics_clock>825 MHz</supported_graphics_clock>
+ <supported_graphics_clock>817 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>802 MHz</supported_graphics_clock>
+ <supported_graphics_clock>795 MHz</supported_graphics_clock>
+ <supported_graphics_clock>787 MHz</supported_graphics_clock>
+ <supported_graphics_clock>780 MHz</supported_graphics_clock>
+ <supported_graphics_clock>772 MHz</supported_graphics_clock>
+ <supported_graphics_clock>765 MHz</supported_graphics_clock>
+ <supported_graphics_clock>757 MHz</supported_graphics_clock>
+ <supported_graphics_clock>750 MHz</supported_graphics_clock>
+ <supported_graphics_clock>742 MHz</supported_graphics_clock>
+ <supported_graphics_clock>735 MHz</supported_graphics_clock>
+ <supported_graphics_clock>727 MHz</supported_graphics_clock>
+ <supported_graphics_clock>720 MHz</supported_graphics_clock>
+ <supported_graphics_clock>712 MHz</supported_graphics_clock>
+ <supported_graphics_clock>705 MHz</supported_graphics_clock>
+ <supported_graphics_clock>697 MHz</supported_graphics_clock>
+ <supported_graphics_clock>690 MHz</supported_graphics_clock>
+ <supported_graphics_clock>682 MHz</supported_graphics_clock>
+ <supported_graphics_clock>675 MHz</supported_graphics_clock>
+ <supported_graphics_clock>667 MHz</supported_graphics_clock>
+ <supported_graphics_clock>660 MHz</supported_graphics_clock>
+ <supported_graphics_clock>652 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>637 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>622 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>607 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>592 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>577 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>562 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>547 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>532 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>517 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>502 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>487 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>472 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>457 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>442 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>427 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>412 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>405 MHz</value>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>412 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ </supported_clocks>
+ <processes>
+ <process_info>
+ <gpu_instance_id>N/A</gpu_instance_id>
+ <compute_instance_id>N/A</compute_instance_id>
+ <pid>28543</pid>
+ <type>G</type>
+ <process_name>/usr/libexec/Xorg</process_name>
+ <used_memory>4 MiB</used_memory>
+ </process_info>
+ </processes>
+ <accounted_processes>
+ </accounted_processes>
+ </gpu>
+
+</nvidia_smi_log>
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-4090-driver-535.xml b/src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-4090-driver-535.xml
new file mode 100644
index 000000000..c3c253ffa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/testdata/rtx-4090-driver-535.xml
@@ -0,0 +1,1082 @@
+<?xml version="1.0" ?>
+<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v12.dtd">
+<nvidia_smi_log>
+ <timestamp>Mon Aug 7 11:10:06 2023</timestamp>
+ <driver_version>535.86.05</driver_version>
+ <cuda_version>12.2</cuda_version>
+ <attached_gpus>1</attached_gpus>
+ <gpu id="00000000:84:00.0">
+ <product_name>NVIDIA GeForce RTX 4090</product_name>
+ <product_brand>GeForce</product_brand>
+ <product_architecture>Ada Lovelace</product_architecture>
+ <display_mode>Disabled</display_mode>
+ <display_active>Disabled</display_active>
+ <persistence_mode>Enabled</persistence_mode>
+ <addressing_mode>None</addressing_mode>
+ <mig_mode>
+ <current_mig>N/A</current_mig>
+ <pending_mig>N/A</pending_mig>
+ </mig_mode>
+ <mig_devices>
+ None
+ </mig_devices>
+ <accounting_mode>Disabled</accounting_mode>
+ <accounting_mode_buffer_size>4000</accounting_mode_buffer_size>
+ <driver_model>
+ <current_dm>N/A</current_dm>
+ <pending_dm>N/A</pending_dm>
+ </driver_model>
+ <serial>N/A</serial>
+ <uuid>GPU-71d1acc2-662d-2166-bf9f-65272d2fc437</uuid>
+ <minor_number>0</minor_number>
+ <vbios_version>95.02.18.80.5F</vbios_version>
+ <multigpu_board>No</multigpu_board>
+ <board_id>0x8400</board_id>
+ <board_part_number>N/A</board_part_number>
+ <gpu_part_number>2684-300-A1</gpu_part_number>
+ <gpu_fru_part_number>N/A</gpu_fru_part_number>
+ <gpu_module_id>1</gpu_module_id>
+ <inforom_version>
+ <img_version>G002.0000.00.03</img_version>
+ <oem_object>2.0</oem_object>
+ <ecc_object>6.16</ecc_object>
+ <pwr_object>N/A</pwr_object>
+ </inforom_version>
+ <gpu_operation_mode>
+ <current_gom>N/A</current_gom>
+ <pending_gom>N/A</pending_gom>
+ </gpu_operation_mode>
+ <gsp_firmware_version>N/A</gsp_firmware_version>
+ <gpu_virtualization_mode>
+ <virtualization_mode>None</virtualization_mode>
+ <host_vgpu_mode>N/A</host_vgpu_mode>
+ </gpu_virtualization_mode>
+ <gpu_reset_status>
+ <reset_required>No</reset_required>
+ <drain_and_reset_recommended>N/A</drain_and_reset_recommended>
+ </gpu_reset_status>
+ <ibmnpu>
+ <relaxed_ordering_mode>N/A</relaxed_ordering_mode>
+ </ibmnpu>
+ <pci>
+ <pci_bus>84</pci_bus>
+ <pci_device>00</pci_device>
+ <pci_domain>0000</pci_domain>
+ <pci_device_id>268410DE</pci_device_id>
+ <pci_bus_id>00000000:84:00.0</pci_bus_id>
+ <pci_sub_system_id>40BF1458</pci_sub_system_id>
+ <pci_gpu_link_info>
+ <pcie_gen>
+ <max_link_gen>3</max_link_gen>
+ <current_link_gen>1</current_link_gen>
+ <device_current_link_gen>1</device_current_link_gen>
+ <max_device_link_gen>4</max_device_link_gen>
+ <max_host_link_gen>3</max_host_link_gen>
+ </pcie_gen>
+ <link_widths>
+ <max_link_width>16x</max_link_width>
+ <current_link_width>16x</current_link_width>
+ </link_widths>
+ </pci_gpu_link_info>
+ <pci_bridge_chip>
+ <bridge_chip_type>N/A</bridge_chip_type>
+ <bridge_chip_fw>N/A</bridge_chip_fw>
+ </pci_bridge_chip>
+ <replay_counter>0</replay_counter>
+ <replay_rollover_counter>0</replay_rollover_counter>
+ <tx_util>0 KB/s</tx_util>
+ <rx_util>0 KB/s</rx_util>
+ <atomic_caps_inbound>N/A</atomic_caps_inbound>
+ <atomic_caps_outbound>N/A</atomic_caps_outbound>
+ </pci>
+ <fan_speed>0 %</fan_speed>
+ <performance_state>P8</performance_state>
+ <clocks_event_reasons>
+ <clocks_event_reason_gpu_idle>Active</clocks_event_reason_gpu_idle>
+ <clocks_event_reason_applications_clocks_setting>Not Active
+ </clocks_event_reason_applications_clocks_setting>
+ <clocks_event_reason_sw_power_cap>Not Active</clocks_event_reason_sw_power_cap>
+ <clocks_event_reason_hw_slowdown>Not Active</clocks_event_reason_hw_slowdown>
+ <clocks_event_reason_hw_thermal_slowdown>Not Active</clocks_event_reason_hw_thermal_slowdown>
+ <clocks_event_reason_hw_power_brake_slowdown>Not Active</clocks_event_reason_hw_power_brake_slowdown>
+ <clocks_event_reason_sync_boost>Not Active</clocks_event_reason_sync_boost>
+ <clocks_event_reason_sw_thermal_slowdown>Not Active</clocks_event_reason_sw_thermal_slowdown>
+ <clocks_event_reason_display_clocks_setting>Not Active</clocks_event_reason_display_clocks_setting>
+ </clocks_event_reasons>
+ <fb_memory_usage>
+ <total>24564 MiB</total>
+ <reserved>346 MiB</reserved>
+ <used>2 MiB</used>
+ <free>24214 MiB</free>
+ </fb_memory_usage>
+ <bar1_memory_usage>
+ <total>256 MiB</total>
+ <used>1 MiB</used>
+ <free>255 MiB</free>
+ </bar1_memory_usage>
+ <cc_protected_memory_usage>
+ <total>0 MiB</total>
+ <used>0 MiB</used>
+ <free>0 MiB</free>
+ </cc_protected_memory_usage>
+ <compute_mode>Default</compute_mode>
+ <utilization>
+ <gpu_util>0 %</gpu_util>
+ <memory_util>0 %</memory_util>
+ <encoder_util>0 %</encoder_util>
+ <decoder_util>0 %</decoder_util>
+ <jpeg_util>0 %</jpeg_util>
+ <ofa_util>0 %</ofa_util>
+ </utilization>
+ <encoder_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </encoder_stats>
+ <fbc_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </fbc_stats>
+ <ecc_mode>
+ <current_ecc>Disabled</current_ecc>
+ <pending_ecc>Disabled</pending_ecc>
+ </ecc_mode>
+ <ecc_errors>
+ <volatile>
+ <sram_correctable>N/A</sram_correctable>
+ <sram_uncorrectable>N/A</sram_uncorrectable>
+ <dram_correctable>N/A</dram_correctable>
+ <dram_uncorrectable>N/A</dram_uncorrectable>
+ </volatile>
+ <aggregate>
+ <sram_correctable>N/A</sram_correctable>
+ <sram_uncorrectable>N/A</sram_uncorrectable>
+ <dram_correctable>N/A</dram_correctable>
+ <dram_uncorrectable>N/A</dram_uncorrectable>
+ </aggregate>
+ </ecc_errors>
+ <retired_pages>
+ <multiple_single_bit_retirement>
+ <retired_count>N/A</retired_count>
+ <retired_pagelist>N/A</retired_pagelist>
+ </multiple_single_bit_retirement>
+ <double_bit_retirement>
+ <retired_count>N/A</retired_count>
+ <retired_pagelist>N/A</retired_pagelist>
+ </double_bit_retirement>
+ <pending_blacklist>N/A</pending_blacklist>
+ <pending_retirement>N/A</pending_retirement>
+ </retired_pages>
+ <remapped_rows>
+ <remapped_row_corr>0</remapped_row_corr>
+ <remapped_row_unc>0</remapped_row_unc>
+ <remapped_row_pending>No</remapped_row_pending>
+ <remapped_row_failure>No</remapped_row_failure>
+ <row_remapper_histogram>
+ <row_remapper_histogram_max>192 bank(s)</row_remapper_histogram_max>
+ <row_remapper_histogram_high>0 bank(s)</row_remapper_histogram_high>
+ <row_remapper_histogram_partial>0 bank(s)</row_remapper_histogram_partial>
+ <row_remapper_histogram_low>0 bank(s)</row_remapper_histogram_low>
+ <row_remapper_histogram_none>0 bank(s)</row_remapper_histogram_none>
+ </row_remapper_histogram>
+ </remapped_rows>
+ <temperature>
+ <gpu_temp>40 C</gpu_temp>
+ <gpu_temp_tlimit>43 C</gpu_temp_tlimit>
+ <gpu_temp_max_tlimit_threshold>-7 C</gpu_temp_max_tlimit_threshold>
+ <gpu_temp_slow_tlimit_threshold>-2 C</gpu_temp_slow_tlimit_threshold>
+ <gpu_temp_max_gpu_tlimit_threshold>0 C</gpu_temp_max_gpu_tlimit_threshold>
+ <gpu_target_temperature>84 C</gpu_target_temperature>
+ <memory_temp>N/A</memory_temp>
+ <gpu_temp_max_mem_tlimit_threshold>N/A</gpu_temp_max_mem_tlimit_threshold>
+ </temperature>
+ <supported_gpu_target_temp>
+ <gpu_target_temp_min>65 C</gpu_target_temp_min>
+ <gpu_target_temp_max>88 C</gpu_target_temp_max>
+ </supported_gpu_target_temp>
+ <gpu_power_readings>
+ <power_state>P8</power_state>
+ <power_draw>26.84 W</power_draw>
+ <current_power_limit>450.00 W</current_power_limit>
+ <requested_power_limit>450.00 W</requested_power_limit>
+ <default_power_limit>450.00 W</default_power_limit>
+ <min_power_limit>10.00 W</min_power_limit>
+ <max_power_limit>600.00 W</max_power_limit>
+ </gpu_power_readings>
+ <module_power_readings>
+ <power_state>P8</power_state>
+ <power_draw>N/A</power_draw>
+ <current_power_limit>N/A</current_power_limit>
+ <requested_power_limit>N/A</requested_power_limit>
+ <default_power_limit>N/A</default_power_limit>
+ <min_power_limit>N/A</min_power_limit>
+ <max_power_limit>N/A</max_power_limit>
+ </module_power_readings>
+ <clocks>
+ <graphics_clock>210 MHz</graphics_clock>
+ <sm_clock>210 MHz</sm_clock>
+ <mem_clock>405 MHz</mem_clock>
+ <video_clock>1185 MHz</video_clock>
+ </clocks>
+ <applications_clocks>
+ <graphics_clock>N/A</graphics_clock>
+ <mem_clock>N/A</mem_clock>
+ </applications_clocks>
+ <default_applications_clocks>
+ <graphics_clock>N/A</graphics_clock>
+ <mem_clock>N/A</mem_clock>
+ </default_applications_clocks>
+ <deferred_clocks>
+ <mem_clock>N/A</mem_clock>
+ </deferred_clocks>
+ <max_clocks>
+ <graphics_clock>3120 MHz</graphics_clock>
+ <sm_clock>3120 MHz</sm_clock>
+ <mem_clock>10501 MHz</mem_clock>
+ <video_clock>2415 MHz</video_clock>
+ </max_clocks>
+ <max_customer_boost_clocks>
+ <graphics_clock>N/A</graphics_clock>
+ </max_customer_boost_clocks>
+ <clock_policy>
+ <auto_boost>N/A</auto_boost>
+ <auto_boost_default>N/A</auto_boost_default>
+ </clock_policy>
+ <voltage>
+ <graphics_volt>880.000 mV</graphics_volt>
+ </voltage>
+ <fabric>
+ <state>N/A</state>
+ <status>N/A</status>
+ </fabric>
+ <supported_clocks>
+ <supported_mem_clock>
+ <value>10501 MHz</value>
+ <supported_graphics_clock>3120 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3105 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3090 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3075 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3060 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3045 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3030 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3015 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3000 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2985 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2970 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2955 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2940 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2925 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2910 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2895 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2880 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2865 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2850 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2835 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2820 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2805 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2790 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2775 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2760 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2745 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2730 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2715 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2700 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2685 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2670 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2655 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2640 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2625 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2610 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2595 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2580 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2565 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2550 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2535 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2520 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2505 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2490 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2475 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2460 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2445 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2430 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2415 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2400 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2385 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2370 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2355 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2340 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2325 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2310 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2295 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2280 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2265 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2250 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2235 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2220 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2205 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2190 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2175 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2160 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2145 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2130 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2115 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1245 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1230 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1200 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1185 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1170 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1155 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1140 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1125 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1110 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1095 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1080 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1065 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1035 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1020 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1005 MHz</supported_graphics_clock>
+ <supported_graphics_clock>990 MHz</supported_graphics_clock>
+ <supported_graphics_clock>975 MHz</supported_graphics_clock>
+ <supported_graphics_clock>960 MHz</supported_graphics_clock>
+ <supported_graphics_clock>945 MHz</supported_graphics_clock>
+ <supported_graphics_clock>930 MHz</supported_graphics_clock>
+ <supported_graphics_clock>915 MHz</supported_graphics_clock>
+ <supported_graphics_clock>900 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>870 MHz</supported_graphics_clock>
+ <supported_graphics_clock>855 MHz</supported_graphics_clock>
+ <supported_graphics_clock>840 MHz</supported_graphics_clock>
+ <supported_graphics_clock>825 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>795 MHz</supported_graphics_clock>
+ <supported_graphics_clock>780 MHz</supported_graphics_clock>
+ <supported_graphics_clock>765 MHz</supported_graphics_clock>
+ <supported_graphics_clock>750 MHz</supported_graphics_clock>
+ <supported_graphics_clock>735 MHz</supported_graphics_clock>
+ <supported_graphics_clock>720 MHz</supported_graphics_clock>
+ <supported_graphics_clock>705 MHz</supported_graphics_clock>
+ <supported_graphics_clock>690 MHz</supported_graphics_clock>
+ <supported_graphics_clock>675 MHz</supported_graphics_clock>
+ <supported_graphics_clock>660 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ <supported_graphics_clock>390 MHz</supported_graphics_clock>
+ <supported_graphics_clock>375 MHz</supported_graphics_clock>
+ <supported_graphics_clock>360 MHz</supported_graphics_clock>
+ <supported_graphics_clock>345 MHz</supported_graphics_clock>
+ <supported_graphics_clock>330 MHz</supported_graphics_clock>
+ <supported_graphics_clock>315 MHz</supported_graphics_clock>
+ <supported_graphics_clock>300 MHz</supported_graphics_clock>
+ <supported_graphics_clock>285 MHz</supported_graphics_clock>
+ <supported_graphics_clock>270 MHz</supported_graphics_clock>
+ <supported_graphics_clock>255 MHz</supported_graphics_clock>
+ <supported_graphics_clock>240 MHz</supported_graphics_clock>
+ <supported_graphics_clock>225 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>10251 MHz</value>
+ <supported_graphics_clock>3120 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3105 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3090 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3075 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3060 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3045 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3030 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3015 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3000 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2985 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2970 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2955 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2940 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2925 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2910 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2895 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2880 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2865 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2850 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2835 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2820 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2805 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2790 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2775 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2760 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2745 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2730 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2715 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2700 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2685 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2670 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2655 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2640 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2625 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2610 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2595 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2580 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2565 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2550 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2535 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2520 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2505 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2490 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2475 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2460 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2445 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2430 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2415 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2400 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2385 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2370 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2355 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2340 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2325 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2310 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2295 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2280 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2265 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2250 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2235 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2220 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2205 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2190 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2175 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2160 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2145 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2130 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2115 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1245 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1230 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1200 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1185 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1170 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1155 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1140 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1125 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1110 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1095 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1080 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1065 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1035 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1020 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1005 MHz</supported_graphics_clock>
+ <supported_graphics_clock>990 MHz</supported_graphics_clock>
+ <supported_graphics_clock>975 MHz</supported_graphics_clock>
+ <supported_graphics_clock>960 MHz</supported_graphics_clock>
+ <supported_graphics_clock>945 MHz</supported_graphics_clock>
+ <supported_graphics_clock>930 MHz</supported_graphics_clock>
+ <supported_graphics_clock>915 MHz</supported_graphics_clock>
+ <supported_graphics_clock>900 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>870 MHz</supported_graphics_clock>
+ <supported_graphics_clock>855 MHz</supported_graphics_clock>
+ <supported_graphics_clock>840 MHz</supported_graphics_clock>
+ <supported_graphics_clock>825 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>795 MHz</supported_graphics_clock>
+ <supported_graphics_clock>780 MHz</supported_graphics_clock>
+ <supported_graphics_clock>765 MHz</supported_graphics_clock>
+ <supported_graphics_clock>750 MHz</supported_graphics_clock>
+ <supported_graphics_clock>735 MHz</supported_graphics_clock>
+ <supported_graphics_clock>720 MHz</supported_graphics_clock>
+ <supported_graphics_clock>705 MHz</supported_graphics_clock>
+ <supported_graphics_clock>690 MHz</supported_graphics_clock>
+ <supported_graphics_clock>675 MHz</supported_graphics_clock>
+ <supported_graphics_clock>660 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ <supported_graphics_clock>390 MHz</supported_graphics_clock>
+ <supported_graphics_clock>375 MHz</supported_graphics_clock>
+ <supported_graphics_clock>360 MHz</supported_graphics_clock>
+ <supported_graphics_clock>345 MHz</supported_graphics_clock>
+ <supported_graphics_clock>330 MHz</supported_graphics_clock>
+ <supported_graphics_clock>315 MHz</supported_graphics_clock>
+ <supported_graphics_clock>300 MHz</supported_graphics_clock>
+ <supported_graphics_clock>285 MHz</supported_graphics_clock>
+ <supported_graphics_clock>270 MHz</supported_graphics_clock>
+ <supported_graphics_clock>255 MHz</supported_graphics_clock>
+ <supported_graphics_clock>240 MHz</supported_graphics_clock>
+ <supported_graphics_clock>225 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>5001 MHz</value>
+ <supported_graphics_clock>3120 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3105 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3090 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3075 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3060 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3045 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3030 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3015 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3000 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2985 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2970 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2955 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2940 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2925 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2910 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2895 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2880 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2865 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2850 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2835 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2820 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2805 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2790 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2775 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2760 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2745 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2730 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2715 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2700 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2685 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2670 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2655 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2640 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2625 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2610 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2595 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2580 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2565 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2550 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2535 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2520 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2505 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2490 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2475 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2460 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2445 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2430 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2415 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2400 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2385 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2370 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2355 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2340 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2325 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2310 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2295 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2280 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2265 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2250 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2235 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2220 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2205 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2190 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2175 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2160 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2145 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2130 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2115 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1245 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1230 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1200 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1185 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1170 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1155 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1140 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1125 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1110 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1095 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1080 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1065 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1035 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1020 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1005 MHz</supported_graphics_clock>
+ <supported_graphics_clock>990 MHz</supported_graphics_clock>
+ <supported_graphics_clock>975 MHz</supported_graphics_clock>
+ <supported_graphics_clock>960 MHz</supported_graphics_clock>
+ <supported_graphics_clock>945 MHz</supported_graphics_clock>
+ <supported_graphics_clock>930 MHz</supported_graphics_clock>
+ <supported_graphics_clock>915 MHz</supported_graphics_clock>
+ <supported_graphics_clock>900 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>870 MHz</supported_graphics_clock>
+ <supported_graphics_clock>855 MHz</supported_graphics_clock>
+ <supported_graphics_clock>840 MHz</supported_graphics_clock>
+ <supported_graphics_clock>825 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>795 MHz</supported_graphics_clock>
+ <supported_graphics_clock>780 MHz</supported_graphics_clock>
+ <supported_graphics_clock>765 MHz</supported_graphics_clock>
+ <supported_graphics_clock>750 MHz</supported_graphics_clock>
+ <supported_graphics_clock>735 MHz</supported_graphics_clock>
+ <supported_graphics_clock>720 MHz</supported_graphics_clock>
+ <supported_graphics_clock>705 MHz</supported_graphics_clock>
+ <supported_graphics_clock>690 MHz</supported_graphics_clock>
+ <supported_graphics_clock>675 MHz</supported_graphics_clock>
+ <supported_graphics_clock>660 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ <supported_graphics_clock>390 MHz</supported_graphics_clock>
+ <supported_graphics_clock>375 MHz</supported_graphics_clock>
+ <supported_graphics_clock>360 MHz</supported_graphics_clock>
+ <supported_graphics_clock>345 MHz</supported_graphics_clock>
+ <supported_graphics_clock>330 MHz</supported_graphics_clock>
+ <supported_graphics_clock>315 MHz</supported_graphics_clock>
+ <supported_graphics_clock>300 MHz</supported_graphics_clock>
+ <supported_graphics_clock>285 MHz</supported_graphics_clock>
+ <supported_graphics_clock>270 MHz</supported_graphics_clock>
+ <supported_graphics_clock>255 MHz</supported_graphics_clock>
+ <supported_graphics_clock>240 MHz</supported_graphics_clock>
+ <supported_graphics_clock>225 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>810 MHz</value>
+ <supported_graphics_clock>3105 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3090 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3075 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3060 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3045 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3030 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3015 MHz</supported_graphics_clock>
+ <supported_graphics_clock>3000 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2985 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2970 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2955 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2940 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2925 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2910 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2895 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2880 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2865 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2850 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2835 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2820 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2805 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2790 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2775 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2760 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2745 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2730 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2715 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2700 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2685 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2670 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2655 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2640 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2625 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2610 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2595 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2580 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2565 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2550 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2535 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2520 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2505 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2490 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2475 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2460 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2445 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2430 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2415 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2400 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2385 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2370 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2355 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2340 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2325 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2310 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2295 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2280 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2265 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2250 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2235 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2220 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2205 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2190 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2175 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2160 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2145 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2130 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2115 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2100 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2085 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2070 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2055 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2040 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>2010 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1995 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1980 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1965 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1950 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1935 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1920 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1905 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1890 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1875 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1845 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1830 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1815 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1800 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1785 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1770 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1755 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1740 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1725 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1710 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1695 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1680 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1665 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1650 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1635 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1605 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1590 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1575 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1560 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1545 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1530 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1515 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1500 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1485 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1470 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1455 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1440 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1425 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1410 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1395 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1380 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1365 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1350 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1335 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1320 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1305 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1275 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1260 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1245 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1230 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1200 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1185 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1170 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1155 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1140 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1125 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1110 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1095 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1080 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1065 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1035 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1020 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1005 MHz</supported_graphics_clock>
+ <supported_graphics_clock>990 MHz</supported_graphics_clock>
+ <supported_graphics_clock>975 MHz</supported_graphics_clock>
+ <supported_graphics_clock>960 MHz</supported_graphics_clock>
+ <supported_graphics_clock>945 MHz</supported_graphics_clock>
+ <supported_graphics_clock>930 MHz</supported_graphics_clock>
+ <supported_graphics_clock>915 MHz</supported_graphics_clock>
+ <supported_graphics_clock>900 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>870 MHz</supported_graphics_clock>
+ <supported_graphics_clock>855 MHz</supported_graphics_clock>
+ <supported_graphics_clock>840 MHz</supported_graphics_clock>
+ <supported_graphics_clock>825 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>795 MHz</supported_graphics_clock>
+ <supported_graphics_clock>780 MHz</supported_graphics_clock>
+ <supported_graphics_clock>765 MHz</supported_graphics_clock>
+ <supported_graphics_clock>750 MHz</supported_graphics_clock>
+ <supported_graphics_clock>735 MHz</supported_graphics_clock>
+ <supported_graphics_clock>720 MHz</supported_graphics_clock>
+ <supported_graphics_clock>705 MHz</supported_graphics_clock>
+ <supported_graphics_clock>690 MHz</supported_graphics_clock>
+ <supported_graphics_clock>675 MHz</supported_graphics_clock>
+ <supported_graphics_clock>660 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ <supported_graphics_clock>390 MHz</supported_graphics_clock>
+ <supported_graphics_clock>375 MHz</supported_graphics_clock>
+ <supported_graphics_clock>360 MHz</supported_graphics_clock>
+ <supported_graphics_clock>345 MHz</supported_graphics_clock>
+ <supported_graphics_clock>330 MHz</supported_graphics_clock>
+ <supported_graphics_clock>315 MHz</supported_graphics_clock>
+ <supported_graphics_clock>300 MHz</supported_graphics_clock>
+ <supported_graphics_clock>285 MHz</supported_graphics_clock>
+ <supported_graphics_clock>270 MHz</supported_graphics_clock>
+ <supported_graphics_clock>255 MHz</supported_graphics_clock>
+ <supported_graphics_clock>240 MHz</supported_graphics_clock>
+ <supported_graphics_clock>225 MHz</supported_graphics_clock>
+ <supported_graphics_clock>210 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ <supported_mem_clock>
+ <value>405 MHz</value>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>630 MHz</supported_graphics_clock>
+ <supported_graphics_clock>615 MHz</supported_graphics_clock>
+ <supported_graphics_clock>600 MHz</supported_graphics_clock>
+ <supported_graphics_clock>585 MHz</supported_graphics_clock>
+ <supported_graphics_clock>570 MHz</supported_graphics_clock>
+ <supported_graphics_clock>555 MHz</supported_graphics_clock>
+ <supported_graphics_clock>540 MHz</supported_graphics_clock>
+ <supported_graphics_clock>525 MHz</supported_graphics_clock>
+ <supported_graphics_clock>510 MHz</supported_graphics_clock>
+ <supported_graphics_clock>495 MHz</supported_graphics_clock>
+ <supported_graphics_clock>480 MHz</supported_graphics_clock>
+ <supported_graphics_clock>465 MHz</supported_graphics_clock>
+ <supported_graphics_clock>450 MHz</supported_graphics_clock>
+ <supported_graphics_clock>435 MHz</supported_graphics_clock>
+ <supported_graphics_clock>420 MHz</supported_graphics_clock>
+ <supported_graphics_clock>405 MHz</supported_graphics_clock>
+ <supported_graphics_clock>390 MHz</supported_graphics_clock>
+ <supported_graphics_clock>375 MHz</supported_graphics_clock>
+ <supported_graphics_clock>360 MHz</supported_graphics_clock>
+ <supported_graphics_clock>345 MHz</supported_graphics_clock>
+ <supported_graphics_clock>330 MHz</supported_graphics_clock>
+ <supported_graphics_clock>315 MHz</supported_graphics_clock>
+ <supported_graphics_clock>300 MHz</supported_graphics_clock>
+ <supported_graphics_clock>285 MHz</supported_graphics_clock>
+ <supported_graphics_clock>270 MHz</supported_graphics_clock>
+ <supported_graphics_clock>255 MHz</supported_graphics_clock>
+ <supported_graphics_clock>240 MHz</supported_graphics_clock>
+ <supported_graphics_clock>225 MHz</supported_graphics_clock>
+ <supported_graphics_clock>210 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ </supported_clocks>
+ <processes>
+ </processes>
+ <accounted_processes>
+ </accounted_processes>
+ </gpu>
+
+</nvidia_smi_log>
diff --git a/src/go/plugin/go.d/modules/nvidia_smi/testdata/tesla-p100.xml b/src/go/plugin/go.d/modules/nvidia_smi/testdata/tesla-p100.xml
new file mode 100644
index 000000000..4c43125f9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvidia_smi/testdata/tesla-p100.xml
@@ -0,0 +1,313 @@
+<?xml version="1.0" ?>
+<!DOCTYPE nvidia_smi_log SYSTEM "nvsmi_device_v11.dtd">
+<nvidia_smi_log>
+ <timestamp>Sat Sep 17 17:06:50 2022</timestamp>
+ <driver_version>510.47.03</driver_version>
+ <cuda_version>11.6</cuda_version>
+ <attached_gpus>1</attached_gpus>
+ <gpu id="00000000:00:04.0">
+ <product_name>Tesla P100-PCIE-16GB</product_name>
+ <product_brand>Tesla</product_brand>
+ <product_architecture>Pascal</product_architecture>
+ <display_mode>Enabled</display_mode>
+ <display_active>Disabled</display_active>
+ <persistence_mode>Disabled</persistence_mode>
+ <mig_mode>
+ <current_mig>N/A</current_mig>
+ <pending_mig>N/A</pending_mig>
+ </mig_mode>
+ <mig_devices>
+ None
+ </mig_devices>
+ <accounting_mode>Disabled</accounting_mode>
+ <accounting_mode_buffer_size>4000</accounting_mode_buffer_size>
+ <driver_model>
+ <current_dm>N/A</current_dm>
+ <pending_dm>N/A</pending_dm>
+ </driver_model>
+ <serial>0324217145110</serial>
+ <uuid>GPU-d3da8716-eaab-75db-efc1-60e88e1cd55e</uuid>
+ <minor_number>0</minor_number>
+ <vbios_version>86.00.52.00.02</vbios_version>
+ <multigpu_board>No</multigpu_board>
+ <board_id>0x4</board_id>
+ <gpu_part_number>900-2H400-6300-031</gpu_part_number>
+ <gpu_module_id>0</gpu_module_id>
+ <inforom_version>
+ <img_version>H400.0201.00.08</img_version>
+ <oem_object>1.1</oem_object>
+ <ecc_object>4.1</ecc_object>
+ <pwr_object>N/A</pwr_object>
+ </inforom_version>
+ <gpu_operation_mode>
+ <current_gom>N/A</current_gom>
+ <pending_gom>N/A</pending_gom>
+ </gpu_operation_mode>
+ <gsp_firmware_version>N/A</gsp_firmware_version>
+ <gpu_virtualization_mode>
+ <virtualization_mode>Pass-Through</virtualization_mode>
+ <host_vgpu_mode>N/A</host_vgpu_mode>
+ </gpu_virtualization_mode>
+ <ibmnpu>
+ <relaxed_ordering_mode>N/A</relaxed_ordering_mode>
+ </ibmnpu>
+ <pci>
+ <pci_bus>00</pci_bus>
+ <pci_device>04</pci_device>
+ <pci_domain>0000</pci_domain>
+ <pci_device_id>15F810DE</pci_device_id>
+ <pci_bus_id>00000000:00:04.0</pci_bus_id>
+ <pci_sub_system_id>118F10DE</pci_sub_system_id>
+ <pci_gpu_link_info>
+ <pcie_gen>
+ <max_link_gen>3</max_link_gen>
+ <current_link_gen>3</current_link_gen>
+ </pcie_gen>
+ <link_widths>
+ <max_link_width>16x</max_link_width>
+ <current_link_width>16x</current_link_width>
+ </link_widths>
+ </pci_gpu_link_info>
+ <pci_bridge_chip>
+ <bridge_chip_type>N/A</bridge_chip_type>
+ <bridge_chip_fw>N/A</bridge_chip_fw>
+ </pci_bridge_chip>
+ <replay_counter>0</replay_counter>
+ <replay_rollover_counter>0</replay_rollover_counter>
+ <tx_util>0 KB/s</tx_util>
+ <rx_util>0 KB/s</rx_util>
+ </pci>
+ <fan_speed>N/A</fan_speed>
+ <performance_state>P0</performance_state>
+ <clocks_throttle_reasons>
+ <clocks_throttle_reason_gpu_idle>Active</clocks_throttle_reason_gpu_idle>
+ <clocks_throttle_reason_applications_clocks_setting>Not Active</clocks_throttle_reason_applications_clocks_setting>
+ <clocks_throttle_reason_sw_power_cap>Not Active</clocks_throttle_reason_sw_power_cap>
+ <clocks_throttle_reason_hw_slowdown>Not Active</clocks_throttle_reason_hw_slowdown>
+ <clocks_throttle_reason_hw_thermal_slowdown>Not Active</clocks_throttle_reason_hw_thermal_slowdown>
+ <clocks_throttle_reason_hw_power_brake_slowdown>Not Active</clocks_throttle_reason_hw_power_brake_slowdown>
+ <clocks_throttle_reason_sync_boost>Not Active</clocks_throttle_reason_sync_boost>
+ <clocks_throttle_reason_sw_thermal_slowdown>Not Active</clocks_throttle_reason_sw_thermal_slowdown>
+ <clocks_throttle_reason_display_clocks_setting>Not Active</clocks_throttle_reason_display_clocks_setting>
+ </clocks_throttle_reasons>
+ <fb_memory_usage>
+ <total>16384 MiB</total>
+ <reserved>103 MiB</reserved>
+ <used>0 MiB</used>
+ <free>16280 MiB</free>
+ </fb_memory_usage>
+ <bar1_memory_usage>
+ <total>16384 MiB</total>
+ <used>2 MiB</used>
+ <free>16382 MiB</free>
+ </bar1_memory_usage>
+ <compute_mode>Default</compute_mode>
+ <utilization>
+ <gpu_util>0 %</gpu_util>
+ <memory_util>0 %</memory_util>
+ <encoder_util>0 %</encoder_util>
+ <decoder_util>0 %</decoder_util>
+ </utilization>
+ <encoder_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </encoder_stats>
+ <fbc_stats>
+ <session_count>0</session_count>
+ <average_fps>0</average_fps>
+ <average_latency>0</average_latency>
+ </fbc_stats>
+ <ecc_mode>
+ <current_ecc>Enabled</current_ecc>
+ <pending_ecc>Enabled</pending_ecc>
+ </ecc_mode>
+ <ecc_errors>
+ <volatile>
+ <single_bit>
+ <device_memory>0</device_memory>
+ <register_file>0</register_file>
+ <l1_cache>N/A</l1_cache>
+ <l2_cache>0</l2_cache>
+ <texture_memory>0</texture_memory>
+ <texture_shm>0</texture_shm>
+ <cbu>N/A</cbu>
+ <total>0</total>
+ </single_bit>
+ <double_bit>
+ <device_memory>0</device_memory>
+ <register_file>0</register_file>
+ <l1_cache>N/A</l1_cache>
+ <l2_cache>0</l2_cache>
+ <texture_memory>0</texture_memory>
+ <texture_shm>0</texture_shm>
+ <cbu>N/A</cbu>
+ <total>0</total>
+ </double_bit>
+ </volatile>
+ <aggregate>
+ <single_bit>
+ <device_memory>3</device_memory>
+ <register_file>0</register_file>
+ <l1_cache>N/A</l1_cache>
+ <l2_cache>0</l2_cache>
+ <texture_memory>0</texture_memory>
+ <texture_shm>0</texture_shm>
+ <cbu>N/A</cbu>
+ <total>3</total>
+ </single_bit>
+ <double_bit>
+ <device_memory>0</device_memory>
+ <register_file>0</register_file>
+ <l1_cache>N/A</l1_cache>
+ <l2_cache>0</l2_cache>
+ <texture_memory>0</texture_memory>
+ <texture_shm>0</texture_shm>
+ <cbu>N/A</cbu>
+ <total>0</total>
+ </double_bit>
+ </aggregate>
+ </ecc_errors>
+ <retired_pages>
+ <multiple_single_bit_retirement>
+ <retired_count>0</retired_count>
+ <retired_pagelist>
+ </retired_pagelist>
+ </multiple_single_bit_retirement>
+ <double_bit_retirement>
+ <retired_count>0</retired_count>
+ <retired_pagelist>
+ </retired_pagelist>
+ </double_bit_retirement>
+ <pending_blacklist>No</pending_blacklist>
+ <pending_retirement>No</pending_retirement>
+ </retired_pages>
+ <remapped_rows>N/A</remapped_rows>
+ <temperature>
+ <gpu_temp>38 C</gpu_temp>
+ <gpu_temp_max_threshold>85 C</gpu_temp_max_threshold>
+ <gpu_temp_slow_threshold>82 C</gpu_temp_slow_threshold>
+ <gpu_temp_max_gpu_threshold>N/A</gpu_temp_max_gpu_threshold>
+ <gpu_target_temperature>N/A</gpu_target_temperature>
+ <memory_temp>N/A</memory_temp>
+ <gpu_temp_max_mem_threshold>N/A</gpu_temp_max_mem_threshold>
+ </temperature>
+ <supported_gpu_target_temp>
+ <gpu_target_temp_min>N/A</gpu_target_temp_min>
+ <gpu_target_temp_max>N/A</gpu_target_temp_max>
+ </supported_gpu_target_temp>
+ <power_readings>
+ <power_state>P0</power_state>
+ <power_management>Supported</power_management>
+ <power_draw>26.16 W</power_draw>
+ <power_limit>250.00 W</power_limit>
+ <default_power_limit>250.00 W</default_power_limit>
+ <enforced_power_limit>250.00 W</enforced_power_limit>
+ <min_power_limit>125.00 W</min_power_limit>
+ <max_power_limit>250.00 W</max_power_limit>
+ </power_readings>
+ <clocks>
+ <graphics_clock>405 MHz</graphics_clock>
+ <sm_clock>405 MHz</sm_clock>
+ <mem_clock>715 MHz</mem_clock>
+ <video_clock>835 MHz</video_clock>
+ </clocks>
+ <applications_clocks>
+ <graphics_clock>1189 MHz</graphics_clock>
+ <mem_clock>715 MHz</mem_clock>
+ </applications_clocks>
+ <default_applications_clocks>
+ <graphics_clock>1189 MHz</graphics_clock>
+ <mem_clock>715 MHz</mem_clock>
+ </default_applications_clocks>
+ <max_clocks>
+ <graphics_clock>1328 MHz</graphics_clock>
+ <sm_clock>1328 MHz</sm_clock>
+ <mem_clock>715 MHz</mem_clock>
+ <video_clock>1328 MHz</video_clock>
+ </max_clocks>
+ <max_customer_boost_clocks>
+ <graphics_clock>1328 MHz</graphics_clock>
+ </max_customer_boost_clocks>
+ <clock_policy>
+ <auto_boost>N/A</auto_boost>
+ <auto_boost_default>N/A</auto_boost_default>
+ </clock_policy>
+ <voltage>
+ <graphics_volt>N/A</graphics_volt>
+ </voltage>
+ <supported_clocks>
+ <supported_mem_clock>
+ <value>715 MHz</value>
+ <supported_graphics_clock>1328 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1316 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1303 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1290 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1278 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1265 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1252 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1240 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1227 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1215 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1202 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1189 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1177 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1164 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1151 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1139 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1126 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1113 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1101 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1088 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1075 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1063 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1050 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1037 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1025 MHz</supported_graphics_clock>
+ <supported_graphics_clock>1012 MHz</supported_graphics_clock>
+ <supported_graphics_clock>999 MHz</supported_graphics_clock>
+ <supported_graphics_clock>987 MHz</supported_graphics_clock>
+ <supported_graphics_clock>974 MHz</supported_graphics_clock>
+ <supported_graphics_clock>961 MHz</supported_graphics_clock>
+ <supported_graphics_clock>949 MHz</supported_graphics_clock>
+ <supported_graphics_clock>936 MHz</supported_graphics_clock>
+ <supported_graphics_clock>923 MHz</supported_graphics_clock>
+ <supported_graphics_clock>911 MHz</supported_graphics_clock>
+ <supported_graphics_clock>898 MHz</supported_graphics_clock>
+ <supported_graphics_clock>885 MHz</supported_graphics_clock>
+ <supported_graphics_clock>873 MHz</supported_graphics_clock>
+ <supported_graphics_clock>860 MHz</supported_graphics_clock>
+ <supported_graphics_clock>847 MHz</supported_graphics_clock>
+ <supported_graphics_clock>835 MHz</supported_graphics_clock>
+ <supported_graphics_clock>822 MHz</supported_graphics_clock>
+ <supported_graphics_clock>810 MHz</supported_graphics_clock>
+ <supported_graphics_clock>797 MHz</supported_graphics_clock>
+ <supported_graphics_clock>784 MHz</supported_graphics_clock>
+ <supported_graphics_clock>772 MHz</supported_graphics_clock>
+ <supported_graphics_clock>759 MHz</supported_graphics_clock>
+ <supported_graphics_clock>746 MHz</supported_graphics_clock>
+ <supported_graphics_clock>734 MHz</supported_graphics_clock>
+ <supported_graphics_clock>721 MHz</supported_graphics_clock>
+ <supported_graphics_clock>708 MHz</supported_graphics_clock>
+ <supported_graphics_clock>696 MHz</supported_graphics_clock>
+ <supported_graphics_clock>683 MHz</supported_graphics_clock>
+ <supported_graphics_clock>670 MHz</supported_graphics_clock>
+ <supported_graphics_clock>658 MHz</supported_graphics_clock>
+ <supported_graphics_clock>645 MHz</supported_graphics_clock>
+ <supported_graphics_clock>632 MHz</supported_graphics_clock>
+ <supported_graphics_clock>620 MHz</supported_graphics_clock>
+ <supported_graphics_clock>607 MHz</supported_graphics_clock>
+ <supported_graphics_clock>594 MHz</supported_graphics_clock>
+ <supported_graphics_clock>582 MHz</supported_graphics_clock>
+ <supported_graphics_clock>569 MHz</supported_graphics_clock>
+ <supported_graphics_clock>556 MHz</supported_graphics_clock>
+ <supported_graphics_clock>544 MHz</supported_graphics_clock>
+ </supported_mem_clock>
+ </supported_clocks>
+ <processes>
+ </processes>
+ <accounted_processes>
+ </accounted_processes>
+ </gpu>
+
+</nvidia_smi_log> \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nvme/README.md b/src/go/plugin/go.d/modules/nvme/README.md
new file mode 120000
index 000000000..ca657b905
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/README.md
@@ -0,0 +1 @@
+integrations/nvme_devices.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nvme/charts.go b/src/go/plugin/go.d/modules/nvme/charts.go
new file mode 100644
index 000000000..08e215ec8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/charts.go
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvme
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ _ = 2050 + iota // right after Disks section
+ prioDeviceEstimatedEndurancePerc
+ prioDeviceAvailableSparePerc
+ prioDeviceCompositeTemperature
+ prioDeviceIOTransferredCount
+ prioDevicePowerCyclesCount
+ prioDevicePowerOnTime
+ prioDeviceUnsafeShutdownsCount
+ prioDeviceCriticalWarningsState
+ prioDeviceMediaErrorsRate
+ prioDeviceErrorLogEntriesRate
+ prioDeviceWarningCompositeTemperatureTime
+ prioDeviceCriticalCompositeTemperatureTime
+ prioDeviceThmTemp1TransitionsCount
+ prioDeviceThmTemp2TransitionsRate
+ prioDeviceThmTemp1Time
+ prioDeviceThmTemp2Time
+)
+
+var deviceChartsTmpl = module.Charts{
+ deviceEstimatedEndurancePercChartTmpl.Copy(),
+ deviceAvailableSparePercChartTmpl.Copy(),
+ deviceCompositeTemperatureChartTmpl.Copy(),
+ deviceIOTransferredCountChartTmpl.Copy(),
+ devicePowerCyclesCountChartTmpl.Copy(),
+ devicePowerOnTimeChartTmpl.Copy(),
+ deviceUnsafeShutdownsCountChartTmpl.Copy(),
+ deviceCriticalWarningsStateChartTmpl.Copy(),
+ deviceMediaErrorsRateChartTmpl.Copy(),
+ deviceErrorLogEntriesRateChartTmpl.Copy(),
+ deviceWarnCompositeTemperatureTimeChartTmpl.Copy(),
+ deviceCritCompositeTemperatureTimeChartTmpl.Copy(),
+ deviceThmTemp1TransitionsRateChartTmpl.Copy(),
+ deviceThmTemp2TransitionsRateChartTmpl.Copy(),
+ deviceThmTemp1TimeChartTmpl.Copy(),
+ deviceThmTemp2TimeChartTmpl.Copy(),
+}
+
+var deviceEstimatedEndurancePercChartTmpl = module.Chart{
+ ID: "device_%s_estimated_endurance_perc",
+ Title: "Estimated endurance",
+ Units: "percentage",
+ Fam: "endurance",
+ Ctx: "nvme.device_estimated_endurance_perc",
+ Priority: prioDeviceEstimatedEndurancePerc,
+ Dims: module.Dims{
+ {ID: "device_%s_percentage_used", Name: "used"},
+ },
+}
+var deviceAvailableSparePercChartTmpl = module.Chart{
+ ID: "device_%s_available_spare_perc",
+ Title: "Remaining spare capacity",
+ Units: "percentage",
+ Fam: "spare",
+ Ctx: "nvme.device_available_spare_perc",
+ Priority: prioDeviceAvailableSparePerc,
+ Dims: module.Dims{
+ {ID: "device_%s_available_spare", Name: "spare"},
+ },
+}
+var deviceCompositeTemperatureChartTmpl = module.Chart{
+ ID: "device_%s_temperature",
+ Title: "Composite temperature",
+ Units: "celsius",
+ Fam: "temperature",
+ Ctx: "nvme.device_composite_temperature",
+ Priority: prioDeviceCompositeTemperature,
+ Dims: module.Dims{
+ {ID: "device_%s_temperature", Name: "temperature"},
+ },
+}
+var deviceIOTransferredCountChartTmpl = module.Chart{
+ ID: "device_%s_io_transferred_count",
+ Title: "Amount of data transferred to and from device",
+ Units: "bytes",
+ Fam: "transferred data",
+ Ctx: "nvme.device_io_transferred_count",
+ Priority: prioDeviceIOTransferredCount,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "device_%s_data_units_read", Name: "read"},
+ {ID: "device_%s_data_units_written", Name: "written", Mul: -1},
+ },
+}
+
+var devicePowerCyclesCountChartTmpl = module.Chart{
+ ID: "device_%s_power_cycles_count",
+ Title: "Power cycles",
+ Units: "cycles",
+ Fam: "power cycles",
+ Ctx: "nvme.device_power_cycles_count",
+ Priority: prioDevicePowerCyclesCount,
+ Dims: module.Dims{
+ {ID: "device_%s_power_cycles", Name: "power"},
+ },
+}
+var devicePowerOnTimeChartTmpl = module.Chart{
+ ID: "device_%s_power_on_time",
+ Title: "Power-on time",
+ Units: "seconds",
+ Fam: "power-on time",
+ Ctx: "nvme.device_power_on_time",
+ Priority: prioDevicePowerOnTime,
+ Dims: module.Dims{
+ {ID: "device_%s_power_on_time", Name: "power-on"},
+ },
+}
+var deviceCriticalWarningsStateChartTmpl = module.Chart{
+ ID: "device_%s_critical_warnings_state",
+ Title: "Critical warnings state",
+ Units: "state",
+ Fam: "critical warnings",
+ Ctx: "nvme.device_critical_warnings_state",
+ Priority: prioDeviceCriticalWarningsState,
+ Dims: module.Dims{
+ {ID: "device_%s_critical_warning_available_spare", Name: "available_spare"},
+ {ID: "device_%s_critical_warning_temp_threshold", Name: "temp_threshold"},
+ {ID: "device_%s_critical_warning_nvm_subsystem_reliability", Name: "nvm_subsystem_reliability"},
+ {ID: "device_%s_critical_warning_read_only", Name: "read_only"},
+ {ID: "device_%s_critical_warning_volatile_mem_backup_failed", Name: "volatile_mem_backup_failed"},
+ {ID: "device_%s_critical_warning_persistent_memory_read_only", Name: "persistent_memory_read_only"},
+ },
+}
+var deviceUnsafeShutdownsCountChartTmpl = module.Chart{
+ ID: "device_%s_unsafe_shutdowns_count",
+ Title: "Unsafe shutdowns",
+ Units: "shutdowns",
+ Fam: "shutdowns",
+ Ctx: "nvme.device_unsafe_shutdowns_count",
+ Priority: prioDeviceUnsafeShutdownsCount,
+ Dims: module.Dims{
+ {ID: "device_%s_unsafe_shutdowns", Name: "unsafe"},
+ },
+}
+var deviceMediaErrorsRateChartTmpl = module.Chart{
+ ID: "device_%s_media_errors_rate",
+ Title: "Media and data integrity errors",
+ Units: "errors/s",
+ Fam: "media errors",
+ Ctx: "nvme.device_media_errors_rate",
+ Priority: prioDeviceMediaErrorsRate,
+ Dims: module.Dims{
+ {ID: "device_%s_media_errors", Name: "media", Algo: module.Incremental},
+ },
+}
+var deviceErrorLogEntriesRateChartTmpl = module.Chart{
+ ID: "device_%s_error_log_entries_rate",
+ Title: "Error log entries",
+ Units: "entries/s",
+ Fam: "error log",
+ Ctx: "nvme.device_error_log_entries_rate",
+ Priority: prioDeviceErrorLogEntriesRate,
+ Dims: module.Dims{
+ {ID: "device_%s_num_err_log_entries", Name: "error_log", Algo: module.Incremental},
+ },
+}
+var deviceWarnCompositeTemperatureTimeChartTmpl = module.Chart{
+ ID: "device_%s_warning_composite_temperature_time",
+ Title: "Warning composite temperature time",
+ Units: "seconds",
+ Fam: "warn temp time",
+ Ctx: "nvme.device_warning_composite_temperature_time",
+ Priority: prioDeviceWarningCompositeTemperatureTime,
+ Dims: module.Dims{
+ {ID: "device_%s_warning_temp_time", Name: "wctemp"},
+ },
+}
+var deviceCritCompositeTemperatureTimeChartTmpl = module.Chart{
+ ID: "device_%s_critical_composite_temperature_time",
+ Title: "Critical composite temperature time",
+ Units: "seconds",
+ Fam: "crit temp time",
+ Ctx: "nvme.device_critical_composite_temperature_time",
+ Priority: prioDeviceCriticalCompositeTemperatureTime,
+ Dims: module.Dims{
+ {ID: "device_%s_critical_comp_time", Name: "cctemp"},
+ },
+}
+var (
+ deviceThmTemp1TransitionsRateChartTmpl = module.Chart{
+ ID: "device_%s_thm_temp1_transitions_rate",
+ Title: "Thermal management temp1 transitions",
+ Units: "transitions/s",
+ Fam: "thermal mgmt transitions",
+ Ctx: "nvme.device_thermal_mgmt_temp1_transitions_rate",
+ Priority: prioDeviceThmTemp1TransitionsCount,
+ Dims: module.Dims{
+ {ID: "device_%s_thm_temp1_trans_count", Name: "temp1", Algo: module.Incremental},
+ },
+ }
+ deviceThmTemp2TransitionsRateChartTmpl = module.Chart{
+ ID: "device_%s_thm_temp2_transitions_rate",
+ Title: "Thermal management temp2 transitions",
+ Units: "transitions/s",
+ Fam: "thermal mgmt transitions",
+ Ctx: "nvme.device_thermal_mgmt_temp2_transitions_rate",
+ Priority: prioDeviceThmTemp2TransitionsRate,
+ Dims: module.Dims{
+ {ID: "device_%s_thm_temp2_trans_count", Name: "temp2", Algo: module.Incremental},
+ },
+ }
+)
+var (
+ deviceThmTemp1TimeChartTmpl = module.Chart{
+ ID: "device_%s_thm_temp1_time",
+ Title: "Thermal management temp1 time",
+ Units: "seconds",
+ Fam: "thermal mgmt time",
+ Ctx: "nvme.device_thermal_mgmt_temp1_time",
+ Priority: prioDeviceThmTemp1Time,
+ Dims: module.Dims{
+ {ID: "device_%s_thm_temp1_total_time", Name: "temp1"},
+ },
+ }
+ deviceThmTemp2TimeChartTmpl = module.Chart{
+ ID: "device_%s_thm_temp2_time",
+ Title: "Thermal management temp1 time",
+ Units: "seconds",
+ Fam: "thermal mgmt time",
+ Ctx: "nvme.device_thermal_mgmt_temp2_time",
+ Priority: prioDeviceThmTemp2Time,
+ Dims: module.Dims{
+ {ID: "device_%s_thm_temp2_total_time", Name: "temp2"},
+ },
+ }
+)
+
+func (n *NVMe) addDeviceCharts(device string) {
+ charts := deviceChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, device)
+ chart.Labels = []module.Label{
+ {Key: "device", Value: device},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, device)
+ }
+ }
+
+ if err := n.Charts().Add(*charts...); err != nil {
+ n.Warning(err)
+ }
+}
+
+func (n *NVMe) removeDeviceCharts(device string) {
+ px := fmt.Sprintf("device_%s", device)
+
+ for _, chart := range *n.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nvme/collect.go b/src/go/plugin/go.d/modules/nvme/collect.go
new file mode 100644
index 000000000..1cc942395
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/collect.go
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvme
+
+import (
+ "errors"
+ "fmt"
+ "path/filepath"
+ "strconv"
+ "time"
+)
+
+func (n *NVMe) collect() (map[string]int64, error) {
+ if n.exec == nil {
+ return nil, errors.New("nvme-cli is not initialized (nil)")
+ }
+
+ now := time.Now()
+ if n.forceListDevices || now.Sub(n.listDevicesTime) > n.listDevicesEvery {
+ n.forceListDevices = false
+ n.listDevicesTime = now
+ if err := n.listNVMeDevices(); err != nil {
+ return nil, err
+ }
+ }
+
+ mx := make(map[string]int64)
+
+ for path := range n.devicePaths {
+ if err := n.collectNVMeDevice(mx, path); err != nil {
+ n.Error(err)
+ n.forceListDevices = true
+ continue
+ }
+ }
+
+ return mx, nil
+}
+
+func (n *NVMe) collectNVMeDevice(mx map[string]int64, devicePath string) error {
+ stats, err := n.exec.smartLog(devicePath)
+ if err != nil {
+ return fmt.Errorf("exec nvme smart-log for '%s': %v", devicePath, err)
+ }
+
+ device := extractDeviceFromPath(devicePath)
+
+ mx["device_"+device+"_temperature"] = int64(float64(parseValue(stats.Temperature)) - 273.15) // Kelvin => Celsius
+ mx["device_"+device+"_percentage_used"] = parseValue(stats.PercentUsed)
+ mx["device_"+device+"_available_spare"] = parseValue(stats.AvailSpare)
+ mx["device_"+device+"_data_units_read"] = parseValue(stats.DataUnitsRead) * 1000 * 512 // units => bytes
+ mx["device_"+device+"_data_units_written"] = parseValue(stats.DataUnitsWritten) * 1000 * 512 // units => bytes
+ mx["device_"+device+"_host_read_commands"] = parseValue(stats.HostReadCommands)
+ mx["device_"+device+"_host_write_commands"] = parseValue(stats.HostWriteCommands)
+ mx["device_"+device+"_power_cycles"] = parseValue(stats.PowerCycles)
+ mx["device_"+device+"_power_on_time"] = parseValue(stats.PowerOnHours) * 3600 // hours => seconds
+ mx["device_"+device+"_unsafe_shutdowns"] = parseValue(stats.UnsafeShutdowns)
+ mx["device_"+device+"_media_errors"] = parseValue(stats.MediaErrors)
+ mx["device_"+device+"_num_err_log_entries"] = parseValue(stats.NumErrLogEntries)
+ mx["device_"+device+"_controller_busy_time"] = parseValue(stats.ControllerBusyTime) * 60 // minutes => seconds
+ mx["device_"+device+"_warning_temp_time"] = parseValue(stats.WarningTempTime) * 60 // minutes => seconds
+ mx["device_"+device+"_critical_comp_time"] = parseValue(stats.CriticalCompTime) * 60 // minutes => seconds
+ mx["device_"+device+"_thm_temp1_trans_count"] = parseValue(stats.ThmTemp1TransCount)
+ mx["device_"+device+"_thm_temp2_trans_count"] = parseValue(stats.ThmTemp2TransCount)
+ mx["device_"+device+"_thm_temp1_total_time"] = parseValue(stats.ThmTemp1TotalTime) // seconds
+ mx["device_"+device+"_thm_temp2_total_time"] = parseValue(stats.ThmTemp2TotalTime) // seconds
+
+ mx["device_"+device+"_critical_warning_available_spare"] = boolToInt(parseValue(stats.CriticalWarning)&1 != 0)
+ mx["device_"+device+"_critical_warning_temp_threshold"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<1) != 0)
+ mx["device_"+device+"_critical_warning_nvm_subsystem_reliability"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<2) != 0)
+ mx["device_"+device+"_critical_warning_read_only"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<3) != 0)
+ mx["device_"+device+"_critical_warning_volatile_mem_backup_failed"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<4) != 0)
+ mx["device_"+device+"_critical_warning_persistent_memory_read_only"] = boolToInt(parseValue(stats.CriticalWarning)&(1<<5) != 0)
+
+ return nil
+}
+
+func (n *NVMe) listNVMeDevices() error {
+ devices, err := n.exec.list()
+ if err != nil {
+ return fmt.Errorf("exec nvme list: %v", err)
+ }
+
+ seen := make(map[string]bool)
+ for _, v := range devices.Devices {
+ device := extractDeviceFromPath(v.DevicePath)
+ seen[device] = true
+
+ if !n.devicePaths[v.DevicePath] {
+ n.devicePaths[v.DevicePath] = true
+ n.addDeviceCharts(device)
+ }
+ }
+ for path := range n.devicePaths {
+ device := extractDeviceFromPath(path)
+ if !seen[device] {
+ delete(n.devicePaths, device)
+ n.removeDeviceCharts(device)
+ }
+ }
+
+ return nil
+}
+
+func extractDeviceFromPath(devicePath string) string {
+ _, name := filepath.Split(devicePath)
+ return name
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
+
+func parseValue(s nvmeNumber) int64 {
+ v, _ := strconv.ParseFloat(string(s), 64)
+ return int64(v)
+}
diff --git a/src/go/plugin/go.d/modules/nvme/config_schema.json b/src/go/plugin/go.d/modules/nvme/config_schema.json
new file mode 100644
index 000000000..179a24ab1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/config_schema.json
@@ -0,0 +1,36 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NVMe collector configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the `nvme`, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "required": [],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/nvme/exec.go b/src/go/plugin/go.d/modules/nvme/exec.go
new file mode 100644
index 000000000..8c1281a2f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/exec.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvme
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "os/exec"
+ "time"
+)
+
+type nvmeDeviceList struct {
+ Devices []struct {
+ DevicePath string `json:"DevicePath"`
+ UsedBytes nvmeNumber `json:"UsedBytes"`
+ PhysicalSize nvmeNumber `json:"PhysicalSize"`
+ SectorSize nvmeNumber `json:"SectorSize"`
+ }
+}
+
+// See "Health Information Log Page" in the Current Specification Version
+// https://nvmexpress.org/developers/nvme-specification/
+type nvmeDeviceSmartLog struct {
+ CriticalWarning nvmeNumber `json:"critical_warning"`
+ Temperature nvmeNumber `json:"temperature"`
+ AvailSpare nvmeNumber `json:"avail_spare"`
+ SpareThresh nvmeNumber `json:"spare_thresh"`
+ PercentUsed nvmeNumber `json:"percent_used"`
+ DataUnitsRead nvmeNumber `json:"data_units_read"`
+ DataUnitsWritten nvmeNumber `json:"data_units_written"`
+ HostReadCommands nvmeNumber `json:"host_read_commands"`
+ HostWriteCommands nvmeNumber `json:"host_write_commands"`
+ ControllerBusyTime nvmeNumber `json:"controller_busy_time"`
+ PowerCycles nvmeNumber `json:"power_cycles"`
+ PowerOnHours nvmeNumber `json:"power_on_hours"`
+ UnsafeShutdowns nvmeNumber `json:"unsafe_shutdowns"`
+ MediaErrors nvmeNumber `json:"media_errors"`
+ NumErrLogEntries nvmeNumber `json:"num_err_log_entries"`
+ WarningTempTime nvmeNumber `json:"warning_temp_time"`
+ CriticalCompTime nvmeNumber `json:"critical_comp_time"`
+ ThmTemp1TransCount nvmeNumber `json:"thm_temp1_trans_count"`
+ ThmTemp2TransCount nvmeNumber `json:"thm_temp2_trans_count"`
+ ThmTemp1TotalTime nvmeNumber `json:"thm_temp1_total_time"`
+ ThmTemp2TotalTime nvmeNumber `json:"thm_temp2_total_time"`
+}
+
+// nvme-cli 2.1.1 exposes some values as strings
+type nvmeNumber string
+
+func (n *nvmeNumber) UnmarshalJSON(b []byte) error {
+ *n = nvmeNumber(bytes.Trim(b, "\""))
+ return nil
+}
+
+type nvmeCLIExec struct {
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (n *nvmeCLIExec) list() (*nvmeDeviceList, error) {
+ bs, err := n.execute("nvme-list")
+ if err != nil {
+ return nil, err
+ }
+
+ var v nvmeDeviceList
+ if err := json.Unmarshal(bs, &v); err != nil {
+ return nil, err
+ }
+
+ return &v, nil
+}
+
+func (n *nvmeCLIExec) smartLog(devicePath string) (*nvmeDeviceSmartLog, error) {
+ bs, err := n.execute("nvme-smart-log", "--device", devicePath)
+ if err != nil {
+ return nil, err
+ }
+
+ var v nvmeDeviceSmartLog
+ if err := json.Unmarshal(bs, &v); err != nil {
+ return nil, err
+ }
+
+ return &v, nil
+}
+
+func (n *nvmeCLIExec) execute(arg ...string) ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), n.timeout)
+ defer cancel()
+
+ return exec.CommandContext(ctx, n.ndsudoPath, arg...).Output()
+}
diff --git a/src/go/plugin/go.d/modules/nvme/init.go b/src/go/plugin/go.d/modules/nvme/init.go
new file mode 100644
index 000000000..7196208e8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/init.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvme
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+ }
+
+ nvmeExec := &nvmeCLIExec{
+ ndsudoPath: ndsudoPath,
+ timeout: n.Timeout.Duration(),
+ }
+
+ return nvmeExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/nvme/integrations/nvme_devices.md b/src/go/plugin/go.d/modules/nvme/integrations/nvme_devices.md
new file mode 100644
index 000000000..9a93c11d0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/integrations/nvme_devices.md
@@ -0,0 +1,242 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nvme/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/nvme/metadata.yaml"
+sidebar_label: "NVMe devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NVMe devices
+
+
+<img src="https://netdata.cloud/img/nvme.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: nvme
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the health of NVMe devices. It relies on the [`nvme`](https://github.com/linux-nvme/nvme-cli#nvme-cli) CLI tool but avoids directly executing the binary. Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment. This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per device
+
+These metrics refer to the NVME device.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | NVMe device name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| nvme.device_estimated_endurance_perc | used | % |
+| nvme.device_available_spare_perc | spare | % |
+| nvme.device_composite_temperature | temperature | celsius |
+| nvme.device_io_transferred_count | read, written | bytes |
+| nvme.device_power_cycles_count | power | cycles |
+| nvme.device_power_on_time | power-on | seconds |
+| nvme.device_critical_warnings_state | available_spare, temp_threshold, nvm_subsystem_reliability, read_only, volatile_mem_backup_failed, persistent_memory_read_only | state |
+| nvme.device_unsafe_shutdowns_count | unsafe | shutdowns |
+| nvme.device_media_errors_rate | media | errors/s |
+| nvme.device_error_log_entries_rate | error_log | entries/s |
+| nvme.device_warning_composite_temperature_time | wctemp | seconds |
+| nvme.device_critical_composite_temperature_time | cctemp | seconds |
+| nvme.device_thermal_mgmt_temp1_transitions_rate | temp1 | transitions/s |
+| nvme.device_thermal_mgmt_temp2_transitions_rate | temp2 | transitions/s |
+| nvme.device_thermal_mgmt_temp1_time | temp1 | seconds |
+| nvme.device_thermal_mgmt_temp2_time | temp2 | seconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ nvme_device_critical_warnings_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf) | nvme.device_critical_warnings_state | NVMe device ${label:device} has critical warnings |
+
+
+## Setup
+
+### Prerequisites
+
+#### Install nvme-cli
+
+See [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.
+
+
+#### For Netdata running in a Docker container: grant NVMe device access
+
+Your NVMe devices need to be accessible within the Docker container for Netdata to monitor them.
+
+Include the following option in your `docker run` command or add the device mapping in your `docker-compose.yml` file:
+
+- `docker run`
+
+ ```bash
+ --device '/dev/nvme0n1:/dev/nvme0n1'
+ ```
+
+- `docker-compose.yml`
+
+ ```yaml
+ services:
+ netdata:
+ devices:
+ - "/dev/nvme0n1:/dev/nvme0n1"
+ ```
+
+**Note**: Replace `/dev/nvme0n1` with your actual NVMe device name.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/nvme.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/nvme.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| timeout | nvme binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: nvme
+ update_every: 5 # Collect NVMe metrics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `nvme` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m nvme
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `nvme` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep nvme
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep nvme /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep nvme
+```
+
+
diff --git a/src/go/plugin/go.d/modules/nvme/metadata.yaml b/src/go/plugin/go.d/modules/nvme/metadata.yaml
new file mode 100644
index 000000000..98f35af65
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/metadata.yaml
@@ -0,0 +1,225 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-nvme
+ plugin_name: go.d.plugin
+ module_name: nvme
+ monitored_instance:
+ name: NVMe devices
+ link: ""
+ icon_filename: nvme.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - nvme
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector monitors the health of NVMe devices.
+ It relies on the [`nvme`](https://github.com/linux-nvme/nvme-cli#nvme-cli) CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Install nvme-cli
+ description: |
+ See [Distro Support](https://github.com/linux-nvme/nvme-cli#distro-support). Install `nvme-cli` using your distribution's package manager.
+ - title: "For Netdata running in a Docker container: grant NVMe device access"
+ description: |
+ Your NVMe devices need to be accessible within the Docker container for Netdata to monitor them.
+
+ Include the following option in your `docker run` command or add the device mapping in your `docker-compose.yml` file:
+
+ - `docker run`
+
+ ```bash
+ --device '/dev/nvme0n1:/dev/nvme0n1'
+ ```
+
+ - `docker-compose.yml`
+
+ ```yaml
+ services:
+ netdata:
+ devices:
+ - "/dev/nvme0n1:/dev/nvme0n1"
+ ```
+
+ **Note**: Replace `/dev/nvme0n1` with your actual NVMe device name.
+ configuration:
+ file:
+ name: go.d/nvme.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: timeout
+ description: nvme binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: nvme
+ update_every: 5 # Collect NVMe metrics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: nvme_device_critical_warnings_state
+ metric: nvme.device_critical_warnings_state
+ info: "NVMe device ${label:device} has critical warnings"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/nvme.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: device
+ description: These metrics refer to the NVME device.
+ labels:
+ - name: device
+ description: NVMe device name
+ metrics:
+ - name: nvme.device_estimated_endurance_perc
+ description: Estimated endurance
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: nvme.device_available_spare_perc
+ description: Remaining spare capacity
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: spare
+ - name: nvme.device_composite_temperature
+ description: Composite temperature
+ unit: celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: nvme.device_io_transferred_count
+ description: Amount of data transferred to and from device
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: written
+ - name: nvme.device_power_cycles_count
+ description: Power cycles
+ unit: cycles
+ chart_type: line
+ dimensions:
+ - name: power
+ - name: nvme.device_power_on_time
+ description: Power-on time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: power-on
+ - name: nvme.device_critical_warnings_state
+ description: Critical warnings state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: available_spare
+ - name: temp_threshold
+ - name: nvm_subsystem_reliability
+ - name: read_only
+ - name: volatile_mem_backup_failed
+ - name: persistent_memory_read_only
+ - name: nvme.device_unsafe_shutdowns_count
+ description: Unsafe shutdowns
+ unit: shutdowns
+ chart_type: line
+ dimensions:
+ - name: unsafe
+ - name: nvme.device_media_errors_rate
+ description: Media and data integrity errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: media
+ - name: nvme.device_error_log_entries_rate
+ description: Error log entries
+ unit: entries/s
+ chart_type: line
+ dimensions:
+ - name: error_log
+ - name: nvme.device_warning_composite_temperature_time
+ description: Warning composite temperature time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: wctemp
+ - name: nvme.device_critical_composite_temperature_time
+ description: Critical composite temperature time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: cctemp
+ - name: nvme.device_thermal_mgmt_temp1_transitions_rate
+ description: Thermal management temp1 transitions
+ unit: transitions/s
+ chart_type: line
+ dimensions:
+ - name: temp1
+ - name: nvme.device_thermal_mgmt_temp2_transitions_rate
+ description: Thermal management temp2 transitions
+ unit: transitions/s
+ chart_type: line
+ dimensions:
+ - name: temp2
+ - name: nvme.device_thermal_mgmt_temp1_time
+ description: Thermal management temp1 time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: temp1
+ - name: nvme.device_thermal_mgmt_temp2_time
+ description: Thermal management temp2 time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: temp2
diff --git a/src/go/plugin/go.d/modules/nvme/nvme.go b/src/go/plugin/go.d/modules/nvme/nvme.go
new file mode 100644
index 000000000..b1b22f594
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/nvme.go
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvme
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("nvme", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *NVMe {
+ return &NVMe{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+
+ charts: &module.Charts{},
+ devicePaths: make(map[string]bool),
+ listDevicesEvery: time.Minute * 10,
+ }
+
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ NVMe struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec nvmeCLI
+
+ devicePaths map[string]bool
+ listDevicesTime time.Time
+ listDevicesEvery time.Duration
+ forceListDevices bool
+ }
+ nvmeCLI interface {
+ list() (*nvmeDeviceList, error)
+ smartLog(devicePath string) (*nvmeDeviceSmartLog, error)
+ }
+)
+
+func (n *NVMe) Configuration() any {
+ return n.Config
+}
+
+func (n *NVMe) Init() error {
+ nvmeExec, err := n.initNVMeCLIExec()
+ if err != nil {
+ n.Errorf("init nvme-cli exec: %v", err)
+ return err
+ }
+ n.exec = nvmeExec
+
+ return nil
+}
+
+func (n *NVMe) Check() error {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (n *NVMe) Charts() *module.Charts {
+ return n.charts
+}
+
+func (n *NVMe) Collect() map[string]int64 {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (n *NVMe) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/nvme/nvme_test.go b/src/go/plugin/go.d/modules/nvme/nvme_test.go
new file mode 100644
index 000000000..2009f789c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/nvme_test.go
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package nvme
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNVMeListJSON, _ = os.ReadFile("testdata/nvme-list.json")
+ dataNVMeListEmptyJSON, _ = os.ReadFile("testdata/nvme-list-empty.json")
+ dataNVMeSmartLogJSON, _ = os.ReadFile("testdata/nvme-smart-log.json")
+ dataNVMeSmartLogStringJSON, _ = os.ReadFile("testdata/nvme-smart-log-string.json")
+ dataNVMeSmartLogFloatJSON, _ = os.ReadFile("testdata/nvme-smart-log-float.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNVMeListJSON": dataNVMeListJSON,
+ "dataNVMeListEmptyJSON": dataNVMeListEmptyJSON,
+ "dataNVMeSmartLogStringJSON": dataNVMeSmartLogStringJSON,
+ "dataNVMeSmartLogFloatJSON": dataNVMeSmartLogFloatJSON,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestNVMe_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NVMe{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNVMe_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if 'ndsudo' not found": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ nv := New()
+
+ if test.wantFail {
+ assert.Error(t, nv.Init())
+ } else {
+ assert.NoError(t, nv.Init())
+ }
+ })
+ }
+}
+
+func TestNVMe_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestNVMe_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestNVMe_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(n *NVMe)
+ }{
+ "success if all calls successful": {
+ wantFail: false,
+ prepare: prepareCaseOK,
+ },
+ "fails if 'nvme list' returns an empty list": {
+ wantFail: true,
+ prepare: prepareCaseEmptyList,
+ },
+ "fails if 'nvme list' returns an error": {
+ wantFail: true,
+ prepare: prepareCaseErrOnList,
+ },
+ "fails if 'nvme smart-log' returns an error": {
+ wantFail: true,
+ prepare: prepareCaseErrOnSmartLog,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ n := New()
+
+ test.prepare(n)
+
+ if test.wantFail {
+ assert.Error(t, n.Check())
+ } else {
+ assert.NoError(t, n.Check())
+ }
+ })
+ }
+}
+
+func TestNVMe_Collect(t *testing.T) {
+ type testCaseStep struct {
+ prepare func(n *NVMe)
+ check func(t *testing.T, n *NVMe)
+ }
+
+ tests := map[string][]testCaseStep{
+ "success if all calls successful": {
+ {
+ prepare: prepareCaseOK,
+ check: func(t *testing.T, n *NVMe) {
+ mx := n.Collect()
+
+ expected := map[string]int64{
+ "device_nvme0n1_available_spare": 100,
+ "device_nvme0n1_controller_busy_time": 497040,
+ "device_nvme0n1_critical_comp_time": 0,
+ "device_nvme0n1_critical_warning_available_spare": 0,
+ "device_nvme0n1_critical_warning_nvm_subsystem_reliability": 0,
+ "device_nvme0n1_critical_warning_persistent_memory_read_only": 0,
+ "device_nvme0n1_critical_warning_read_only": 0,
+ "device_nvme0n1_critical_warning_temp_threshold": 0,
+ "device_nvme0n1_critical_warning_volatile_mem_backup_failed": 0,
+ "device_nvme0n1_data_units_read": 5068041216000,
+ "device_nvme0n1_data_units_written": 69712734208000,
+ "device_nvme0n1_host_read_commands": 313528805,
+ "device_nvme0n1_host_write_commands": 1928062610,
+ "device_nvme0n1_media_errors": 0,
+ "device_nvme0n1_num_err_log_entries": 110,
+ "device_nvme0n1_percentage_used": 2,
+ "device_nvme0n1_power_cycles": 64,
+ "device_nvme0n1_power_on_time": 17906400,
+ "device_nvme0n1_temperature": 36,
+ "device_nvme0n1_thm_temp1_total_time": 0,
+ "device_nvme0n1_thm_temp1_trans_count": 0,
+ "device_nvme0n1_thm_temp2_total_time": 0,
+ "device_nvme0n1_thm_temp2_trans_count": 0,
+ "device_nvme0n1_unsafe_shutdowns": 39,
+ "device_nvme0n1_warning_temp_time": 0,
+ "device_nvme1n1_available_spare": 100,
+ "device_nvme1n1_controller_busy_time": 497040,
+ "device_nvme1n1_critical_comp_time": 0,
+ "device_nvme1n1_critical_warning_available_spare": 0,
+ "device_nvme1n1_critical_warning_nvm_subsystem_reliability": 0,
+ "device_nvme1n1_critical_warning_persistent_memory_read_only": 0,
+ "device_nvme1n1_critical_warning_read_only": 0,
+ "device_nvme1n1_critical_warning_temp_threshold": 0,
+ "device_nvme1n1_critical_warning_volatile_mem_backup_failed": 0,
+ "device_nvme1n1_data_units_read": 5068041216000,
+ "device_nvme1n1_data_units_written": 69712734208000,
+ "device_nvme1n1_host_read_commands": 313528805,
+ "device_nvme1n1_host_write_commands": 1928062610,
+ "device_nvme1n1_media_errors": 0,
+ "device_nvme1n1_num_err_log_entries": 110,
+ "device_nvme1n1_percentage_used": 2,
+ "device_nvme1n1_power_cycles": 64,
+ "device_nvme1n1_power_on_time": 17906400,
+ "device_nvme1n1_temperature": 36,
+ "device_nvme1n1_thm_temp1_total_time": 0,
+ "device_nvme1n1_thm_temp1_trans_count": 0,
+ "device_nvme1n1_thm_temp2_total_time": 0,
+ "device_nvme1n1_thm_temp2_trans_count": 0,
+ "device_nvme1n1_unsafe_shutdowns": 39,
+ "device_nvme1n1_warning_temp_time": 0,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "success if all calls successful with string values": {
+ {
+ prepare: prepareCaseStringValuesOK,
+ check: func(t *testing.T, n *NVMe) {
+ mx := n.Collect()
+
+ expected := map[string]int64{
+ "device_nvme0n1_available_spare": 100,
+ "device_nvme0n1_controller_busy_time": 497040,
+ "device_nvme0n1_critical_comp_time": 0,
+ "device_nvme0n1_critical_warning_available_spare": 0,
+ "device_nvme0n1_critical_warning_nvm_subsystem_reliability": 0,
+ "device_nvme0n1_critical_warning_persistent_memory_read_only": 0,
+ "device_nvme0n1_critical_warning_read_only": 0,
+ "device_nvme0n1_critical_warning_temp_threshold": 0,
+ "device_nvme0n1_critical_warning_volatile_mem_backup_failed": 0,
+ "device_nvme0n1_data_units_read": 5068041216000,
+ "device_nvme0n1_data_units_written": 69712734208000,
+ "device_nvme0n1_host_read_commands": 313528805,
+ "device_nvme0n1_host_write_commands": 1928062610,
+ "device_nvme0n1_media_errors": 0,
+ "device_nvme0n1_num_err_log_entries": 110,
+ "device_nvme0n1_percentage_used": 2,
+ "device_nvme0n1_power_cycles": 64,
+ "device_nvme0n1_power_on_time": 17906400,
+ "device_nvme0n1_temperature": 36,
+ "device_nvme0n1_thm_temp1_total_time": 0,
+ "device_nvme0n1_thm_temp1_trans_count": 0,
+ "device_nvme0n1_thm_temp2_total_time": 0,
+ "device_nvme0n1_thm_temp2_trans_count": 0,
+ "device_nvme0n1_unsafe_shutdowns": 39,
+ "device_nvme0n1_warning_temp_time": 0,
+ "device_nvme1n1_available_spare": 100,
+ "device_nvme1n1_controller_busy_time": 497040,
+ "device_nvme1n1_critical_comp_time": 0,
+ "device_nvme1n1_critical_warning_available_spare": 0,
+ "device_nvme1n1_critical_warning_nvm_subsystem_reliability": 0,
+ "device_nvme1n1_critical_warning_persistent_memory_read_only": 0,
+ "device_nvme1n1_critical_warning_read_only": 0,
+ "device_nvme1n1_critical_warning_temp_threshold": 0,
+ "device_nvme1n1_critical_warning_volatile_mem_backup_failed": 0,
+ "device_nvme1n1_data_units_read": 5068041216000,
+ "device_nvme1n1_data_units_written": 69712734208000,
+ "device_nvme1n1_host_read_commands": 313528805,
+ "device_nvme1n1_host_write_commands": 1928062610,
+ "device_nvme1n1_media_errors": 0,
+ "device_nvme1n1_num_err_log_entries": 110,
+ "device_nvme1n1_percentage_used": 2,
+ "device_nvme1n1_power_cycles": 64,
+ "device_nvme1n1_power_on_time": 17906400,
+ "device_nvme1n1_temperature": 36,
+ "device_nvme1n1_thm_temp1_total_time": 0,
+ "device_nvme1n1_thm_temp1_trans_count": 0,
+ "device_nvme1n1_thm_temp2_total_time": 0,
+ "device_nvme1n1_thm_temp2_trans_count": 0,
+ "device_nvme1n1_unsafe_shutdowns": 39,
+ "device_nvme1n1_warning_temp_time": 0,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "success if all calls successful with float values": {
+ {
+ prepare: prepareCaseFloatValuesOK,
+ check: func(t *testing.T, n *NVMe) {
+ mx := n.Collect()
+
+ expected := map[string]int64{
+ "device_nvme0n1_available_spare": 100,
+ "device_nvme0n1_controller_busy_time": 497040,
+ "device_nvme0n1_critical_comp_time": 0,
+ "device_nvme0n1_critical_warning_available_spare": 0,
+ "device_nvme0n1_critical_warning_nvm_subsystem_reliability": 0,
+ "device_nvme0n1_critical_warning_persistent_memory_read_only": 0,
+ "device_nvme0n1_critical_warning_read_only": 0,
+ "device_nvme0n1_critical_warning_temp_threshold": 0,
+ "device_nvme0n1_critical_warning_volatile_mem_backup_failed": 0,
+ "device_nvme0n1_data_units_read": 5068041216000,
+ "device_nvme0n1_data_units_written": 69712734208000,
+ "device_nvme0n1_host_read_commands": 313528805,
+ "device_nvme0n1_host_write_commands": 1928062610,
+ "device_nvme0n1_media_errors": 0,
+ "device_nvme0n1_num_err_log_entries": 110,
+ "device_nvme0n1_percentage_used": 2,
+ "device_nvme0n1_power_cycles": 64,
+ "device_nvme0n1_power_on_time": 17906400,
+ "device_nvme0n1_temperature": 36,
+ "device_nvme0n1_thm_temp1_total_time": 0,
+ "device_nvme0n1_thm_temp1_trans_count": 0,
+ "device_nvme0n1_thm_temp2_total_time": 0,
+ "device_nvme0n1_thm_temp2_trans_count": 0,
+ "device_nvme0n1_unsafe_shutdowns": 39,
+ "device_nvme0n1_warning_temp_time": 0,
+ "device_nvme1n1_available_spare": 100,
+ "device_nvme1n1_controller_busy_time": 497040,
+ "device_nvme1n1_critical_comp_time": 0,
+ "device_nvme1n1_critical_warning_available_spare": 0,
+ "device_nvme1n1_critical_warning_nvm_subsystem_reliability": 0,
+ "device_nvme1n1_critical_warning_persistent_memory_read_only": 0,
+ "device_nvme1n1_critical_warning_read_only": 0,
+ "device_nvme1n1_critical_warning_temp_threshold": 0,
+ "device_nvme1n1_critical_warning_volatile_mem_backup_failed": 0,
+ "device_nvme1n1_data_units_read": 5068041216000,
+ "device_nvme1n1_data_units_written": 69712734208000,
+ "device_nvme1n1_host_read_commands": 313528805,
+ "device_nvme1n1_host_write_commands": 1928062610,
+ "device_nvme1n1_media_errors": 0,
+ "device_nvme1n1_num_err_log_entries": 110,
+ "device_nvme1n1_percentage_used": 2,
+ "device_nvme1n1_power_cycles": 64,
+ "device_nvme1n1_power_on_time": 17906400,
+ "device_nvme1n1_temperature": 36,
+ "device_nvme1n1_thm_temp1_total_time": 0,
+ "device_nvme1n1_thm_temp1_trans_count": 0,
+ "device_nvme1n1_thm_temp2_total_time": 0,
+ "device_nvme1n1_thm_temp2_trans_count": 0,
+ "device_nvme1n1_unsafe_shutdowns": 39,
+ "device_nvme1n1_warning_temp_time": 0,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "fail if 'nvme list' returns an empty list": {
+ {
+ prepare: prepareCaseEmptyList,
+ check: func(t *testing.T, n *NVMe) {
+ mx := n.Collect()
+
+ assert.Equal(t, (map[string]int64)(nil), mx)
+ },
+ },
+ },
+ "fail if 'nvme list' returns an error": {
+ {
+ prepare: prepareCaseErrOnList,
+ check: func(t *testing.T, n *NVMe) {
+ mx := n.Collect()
+
+ assert.Equal(t, (map[string]int64)(nil), mx)
+ },
+ },
+ },
+ "fail if 'nvme smart-log' returns an error": {
+ {
+ prepare: prepareCaseErrOnSmartLog,
+ check: func(t *testing.T, n *NVMe) {
+ mx := n.Collect()
+
+ assert.Equal(t, (map[string]int64)(nil), mx)
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ n := New()
+
+ for i, step := range test {
+ t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
+ step.prepare(n)
+ step.check(t, n)
+ })
+ }
+ })
+ }
+}
+
+func prepareCaseOK(n *NVMe) {
+ n.exec = &mockNVMeCLIExec{}
+}
+
+func prepareCaseStringValuesOK(n *NVMe) {
+ n.exec = &mockNVMeCLIExec{smartLogString: true}
+}
+
+func prepareCaseFloatValuesOK(n *NVMe) {
+ n.exec = &mockNVMeCLIExec{smartLogFloat: true}
+}
+
+func prepareCaseEmptyList(n *NVMe) {
+ n.exec = &mockNVMeCLIExec{emptyList: true}
+}
+
+func prepareCaseErrOnList(n *NVMe) {
+ n.exec = &mockNVMeCLIExec{errOnList: true}
+}
+
+func prepareCaseErrOnSmartLog(n *NVMe) {
+ n.exec = &mockNVMeCLIExec{errOnSmartLog: true}
+}
+
+type mockNVMeCLIExec struct {
+ errOnList bool
+ errOnSmartLog bool
+ emptyList bool
+ smartLogString bool
+ smartLogFloat bool
+}
+
+func (m *mockNVMeCLIExec) list() (*nvmeDeviceList, error) {
+ if m.errOnList {
+ return nil, errors.New("mock.list() error")
+ }
+
+ data := dataNVMeListJSON
+ if m.emptyList {
+ data = dataNVMeListEmptyJSON
+ }
+
+ var v nvmeDeviceList
+ if err := json.Unmarshal(data, &v); err != nil {
+ return nil, err
+ }
+
+ return &v, nil
+}
+
+func (m *mockNVMeCLIExec) smartLog(_ string) (*nvmeDeviceSmartLog, error) {
+ if m.errOnSmartLog {
+ return nil, errors.New("mock.smartLog() error")
+ }
+ if m.emptyList {
+ return nil, errors.New("mock.smartLog() no devices error")
+ }
+
+ data := dataNVMeSmartLogJSON
+ if m.smartLogString {
+ data = dataNVMeSmartLogStringJSON
+ }
+ if m.smartLogFloat {
+ data = dataNVMeSmartLogFloatJSON
+ }
+
+ var v nvmeDeviceSmartLog
+ if err := json.Unmarshal(data, &v); err != nil {
+ return nil, err
+ }
+
+ return &v, nil
+}
diff --git a/src/go/plugin/go.d/modules/nvme/testdata/config.json b/src/go/plugin/go.d/modules/nvme/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/nvme/testdata/config.yaml b/src/go/plugin/go.d/modules/nvme/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/nvme/testdata/nvme-list-empty.json b/src/go/plugin/go.d/modules/nvme/testdata/nvme-list-empty.json
new file mode 100644
index 000000000..e8da2407f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/testdata/nvme-list-empty.json
@@ -0,0 +1,4 @@
+{
+ "Devices": [
+ ]
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nvme/testdata/nvme-list.json b/src/go/plugin/go.d/modules/nvme/testdata/nvme-list.json
new file mode 100644
index 000000000..6bf159c4f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/testdata/nvme-list.json
@@ -0,0 +1,30 @@
+{
+ "Devices": [
+ {
+ "NameSpace": 1,
+ "DevicePath": "/dev/nvme0n1",
+ "Firmware": "SU6SM001",
+ "Index": 0,
+ "ModelNumber": "Seagate FireCuda 530 ZP4000GM30023",
+ "ProductName": "Non-Volatile memory controller: Seagate Technology PLC Device 0x5018",
+ "SerialNumber": "7VS00KNX",
+ "UsedBytes": 4000787030016,
+ "MaximumLBA": 7814037168,
+ "PhysicalSize": 4000787030016,
+ "SectorSize": 512
+ },
+ {
+ "NameSpace": 1,
+ "DevicePath": "/dev/nvme1n1",
+ "Firmware": "SU6SM001",
+ "Index": 1,
+ "ModelNumber": "Seagate FireCuda 530 ZP4000GM30023",
+ "ProductName": "Non-Volatile memory controller: Seagate Technology PLC Device 0x5018",
+ "SerialNumber": "7VS00J76",
+ "UsedBytes": 4000787030016,
+ "MaximumLBA": 7814037168,
+ "PhysicalSize": 4000787030016,
+ "SectorSize": 512
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log-float.json b/src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log-float.json
new file mode 100644
index 000000000..f63dd9772
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log-float.json
@@ -0,0 +1,24 @@
+{
+ "critical_warning": 0,
+ "temperature": 310.0,
+ "avail_spare": 100.0,
+ "spare_thresh": 5.0,
+ "percent_used": 2.0,
+ "endurance_grp_critical_warning_summary": 0,
+ "data_units_read": 9898518.0,
+ "data_units_written": 136157684.0,
+ "host_read_commands": 313528805.0,
+ "host_write_commands": 1928062610.0,
+ "controller_busy_time": 8284.0,
+ "power_cycles": 64.0,
+ "power_on_hours": 4974.0,
+ "unsafe_shutdowns": 39.0,
+ "media_errors": 0,
+ "num_err_log_entries": 110.0,
+ "warning_temp_time": 0,
+ "critical_comp_time": 0,
+ "thm_temp1_trans_count": 0,
+ "thm_temp2_trans_count": 0,
+ "thm_temp1_total_time": 0,
+ "thm_temp2_total_time": 0
+}
diff --git a/src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log-string.json b/src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log-string.json
new file mode 100644
index 000000000..f582e7485
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log-string.json
@@ -0,0 +1,24 @@
+{
+ "critical_warning": "0",
+ "temperature": "310",
+ "avail_spare": "100",
+ "spare_thresh": "5",
+ "percent_used": "2",
+ "endurance_grp_critical_warning_summary": "0",
+ "data_units_read": "9898518",
+ "data_units_written": "136157684",
+ "host_read_commands": "313528805",
+ "host_write_commands": "1928062610",
+ "controller_busy_time": "8284",
+ "power_cycles": "64",
+ "power_on_hours": "4974",
+ "unsafe_shutdowns": "39",
+ "media_errors": "0",
+ "num_err_log_entries": "110",
+ "warning_temp_time": "0",
+ "critical_comp_time": "0",
+ "thm_temp1_trans_count": "0",
+ "thm_temp2_trans_count": "0",
+ "thm_temp1_total_time": "0",
+ "thm_temp2_total_time": "0"
+}
diff --git a/src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log.json b/src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log.json
new file mode 100644
index 000000000..cbd0e4c7d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/nvme/testdata/nvme-smart-log.json
@@ -0,0 +1,24 @@
+{
+ "critical_warning": 0,
+ "temperature": 310,
+ "avail_spare": 100,
+ "spare_thresh": 5,
+ "percent_used": 2,
+ "endurance_grp_critical_warning_summary": 0,
+ "data_units_read": 9898518,
+ "data_units_written": 136157684,
+ "host_read_commands": 313528805,
+ "host_write_commands": 1928062610,
+ "controller_busy_time": 8284,
+ "power_cycles": 64,
+ "power_on_hours": 4974,
+ "unsafe_shutdowns": 39,
+ "media_errors": 0,
+ "num_err_log_entries": 110,
+ "warning_temp_time": 0,
+ "critical_comp_time": 0,
+ "thm_temp1_trans_count": 0,
+ "thm_temp2_trans_count": 0,
+ "thm_temp1_total_time": 0,
+ "thm_temp2_total_time": 0
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/README.md b/src/go/plugin/go.d/modules/openvpn/README.md
new file mode 120000
index 000000000..020da3ac6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/README.md
@@ -0,0 +1 @@
+integrations/openvpn.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/openvpn/charts.go b/src/go/plugin/go.d/modules/openvpn/charts.go
new file mode 100644
index 000000000..5874eced8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/charts.go
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+)
+
+var charts = Charts{
+ {
+ ID: "active_clients",
+ Title: "Total Number Of Active Clients",
+ Units: "clients",
+ Fam: "clients",
+ Ctx: "openvpn.active_clients",
+ Dims: Dims{
+ {ID: "clients"},
+ },
+ },
+ {
+ ID: "total_traffic",
+ Title: "Total Traffic",
+ Units: "kilobits/s",
+ Fam: "traffic",
+ Ctx: "openvpn.total_traffic",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "bytes_in", Name: "in", Algo: module.Incremental, Mul: 8, Div: 1000},
+ {ID: "bytes_out", Name: "out", Algo: module.Incremental, Mul: 8, Div: -1000},
+ },
+ },
+}
+
+var userCharts = Charts{
+ {
+ ID: "%s_user_traffic",
+ Title: "User Traffic",
+ Units: "kilobits/s",
+ Fam: "user %s",
+ Ctx: "openvpn.user_traffic",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "%s_bytes_received", Name: "received", Algo: module.Incremental, Mul: 8, Div: 1000},
+ {ID: "%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1000},
+ },
+ },
+ {
+ ID: "%s_user_connection_time",
+ Title: "User Connection Time",
+ Units: "seconds",
+ Fam: "user %s",
+ Ctx: "openvpn.user_connection_time",
+ Dims: Dims{
+ {ID: "%s_connection_time", Name: "time"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/client/client.go b/src/go/plugin/go.d/modules/openvpn/client/client.go
new file mode 100644
index 000000000..23ceb18d8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/client/client.go
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+var (
+ reLoadStats = regexp.MustCompile(`^SUCCESS: nclients=([0-9]+),bytesin=([0-9]+),bytesout=([0-9]+)`)
+ reVersion = regexp.MustCompile(`^OpenVPN Version: OpenVPN ([0-9]+)\.([0-9]+)\.([0-9]+) .+Management Version: ([0-9])`)
+)
+
+const maxLinesToRead = 500
+
+// New creates new OpenVPN client.
+func New(config socket.Config) *Client {
+ return &Client{Client: socket.New(config)}
+}
+
+// Client represents OpenVPN client.
+type Client struct {
+ socket.Client
+}
+
+// Users Users.
+func (c *Client) Users() (Users, error) {
+ lines, err := c.get(commandStatus3, readUntilEND)
+ if err != nil {
+ return nil, err
+ }
+ return decodeUsers(lines)
+}
+
+// LoadStats LoadStats.
+func (c *Client) LoadStats() (*LoadStats, error) {
+ lines, err := c.get(commandLoadStats, readOneLine)
+ if err != nil {
+ return nil, err
+ }
+ return decodeLoadStats(lines)
+}
+
+// Version Version.
+func (c *Client) Version() (*Version, error) {
+ lines, err := c.get(commandVersion, readUntilEND)
+ if err != nil {
+ return nil, err
+ }
+ return decodeVersion(lines)
+}
+
+func (c *Client) get(command string, stopRead stopReadFunc) (output []string, err error) {
+ var num int
+ var maxLinesErr error
+ err = c.Command(command, func(bytes []byte) bool {
+ line := string(bytes)
+ num++
+ if num > maxLinesToRead {
+ maxLinesErr = fmt.Errorf("read line limit exceeded (%d)", maxLinesToRead)
+ return false
+ }
+
+ // skip real-time messages
+ if strings.HasPrefix(line, ">") {
+ return true
+ }
+
+ line = strings.Trim(line, "\r\n ")
+ output = append(output, line)
+ if stopRead != nil && stopRead(line) {
+ return false
+ }
+ return true
+ })
+ if maxLinesErr != nil {
+ return nil, maxLinesErr
+ }
+ return output, err
+}
+
+type stopReadFunc func(string) bool
+
+func readOneLine(_ string) bool { return true }
+
+func readUntilEND(s string) bool { return strings.HasSuffix(s, "END") }
+
+func decodeLoadStats(src []string) (*LoadStats, error) {
+ m := reLoadStats.FindStringSubmatch(strings.Join(src, " "))
+ if len(m) == 0 {
+ return nil, fmt.Errorf("parse failed : %v", src)
+ }
+ return &LoadStats{
+ NumOfClients: mustParseInt(m[1]),
+ BytesIn: mustParseInt(m[2]),
+ BytesOut: mustParseInt(m[3]),
+ }, nil
+}
+
+func decodeVersion(src []string) (*Version, error) {
+ m := reVersion.FindStringSubmatch(strings.Join(src, " "))
+ if len(m) == 0 {
+ return nil, fmt.Errorf("parse failed : %v", src)
+ }
+ return &Version{
+ Major: mustParseInt(m[1]),
+ Minor: mustParseInt(m[2]),
+ Patch: mustParseInt(m[3]),
+ Management: mustParseInt(m[4]),
+ }, nil
+}
+
+// works only for `status 3\n`
+func decodeUsers(src []string) (Users, error) {
+ var users Users
+
+ // [CLIENT_LIST common_name 178.66.34.194:54200 10.9.0.5 9319 8978 Thu May 9 05:01:44 2019 1557345704 username]
+ for _, v := range src {
+ if !strings.HasPrefix(v, "CLIENT_LIST") {
+ continue
+ }
+ parts := strings.Fields(v)
+ // Right after the connection there are no virtual ip, and both common name and username UNDEF
+ // CLIENT_LIST UNDEF 178.70.95.93:39324 1411 3474 Fri May 10 07:41:54 2019 1557441714 UNDEF
+ if len(parts) != 13 {
+ continue
+ }
+ u := User{
+ CommonName: parts[1],
+ RealAddress: parts[2],
+ VirtualAddress: parts[3],
+ BytesReceived: mustParseInt(parts[4]),
+ BytesSent: mustParseInt(parts[5]),
+ ConnectedSince: mustParseInt(parts[11]),
+ Username: parts[12],
+ }
+ users = append(users, u)
+ }
+ return users, nil
+}
+
+func mustParseInt(str string) int64 {
+ v, err := strconv.ParseInt(str, 10, 64)
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/client/client_test.go b/src/go/plugin/go.d/modules/openvpn/client/client_test.go
new file mode 100644
index 000000000..d40f6ea1b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/client/client_test.go
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ testLoadStatsData, _ = os.ReadFile("testdata/load-stats.txt")
+ testVersionData, _ = os.ReadFile("testdata/version.txt")
+ testStatus3Data, _ = os.ReadFile("testdata/status3.txt")
+ testMaxLinesExceededData = strings.Repeat(">CLIENT:ESTABLISHED,0\n", 501)
+)
+
+func TestNew(t *testing.T) { assert.IsType(t, (*Client)(nil), New(socket.Config{})) }
+
+func TestClient_GetVersion(t *testing.T) {
+ client := Client{Client: &mockSocketClient{}}
+ ver, err := client.Version()
+ assert.NoError(t, err)
+ expected := &Version{Major: 2, Minor: 3, Patch: 4, Management: 1}
+ assert.Equal(t, expected, ver)
+}
+
+func TestClient_GetLoadStats(t *testing.T) {
+ client := Client{Client: &mockSocketClient{}}
+ stats, err := client.LoadStats()
+ assert.NoError(t, err)
+ expected := &LoadStats{NumOfClients: 1, BytesIn: 7811, BytesOut: 7667}
+ assert.Equal(t, expected, stats)
+}
+
+func TestClient_GetUsers(t *testing.T) {
+ client := Client{
+ Client: &mockSocketClient{},
+ }
+ users, err := client.Users()
+ assert.NoError(t, err)
+ expected := Users{{
+ CommonName: "pepehome",
+ RealAddress: "1.2.3.4:44347",
+ VirtualAddress: "10.9.0.5",
+ BytesReceived: 6043,
+ BytesSent: 5661,
+ ConnectedSince: 1555439465,
+ Username: "pepe",
+ }}
+ assert.Equal(t, expected, users)
+}
+
+func TestClient_MaxLineExceeded(t *testing.T) {
+ client := Client{
+ Client: &mockSocketClient{maxLineExceeded: true},
+ }
+ _, err := client.Users()
+ assert.Error(t, err)
+}
+
+type mockSocketClient struct {
+ maxLineExceeded bool
+}
+
+func (m *mockSocketClient) Connect() error { return nil }
+
+func (m *mockSocketClient) Disconnect() error { return nil }
+
+func (m *mockSocketClient) Command(command string, process socket.Processor) error {
+ var s *bufio.Scanner
+
+ switch command {
+ default:
+ return fmt.Errorf("unknown command : %s", command)
+ case commandExit:
+ case commandVersion:
+ s = bufio.NewScanner(bytes.NewReader(testVersionData))
+ case commandStatus3:
+ if m.maxLineExceeded {
+ s = bufio.NewScanner(strings.NewReader(testMaxLinesExceededData))
+ break
+ }
+ s = bufio.NewScanner(bytes.NewReader(testStatus3Data))
+ case commandLoadStats:
+ s = bufio.NewScanner(bytes.NewReader(testLoadStatsData))
+ }
+
+ if s == nil {
+ return nil
+ }
+
+ for s.Scan() {
+ process(s.Bytes())
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/client/commands.go b/src/go/plugin/go.d/modules/openvpn/client/commands.go
new file mode 100644
index 000000000..f06b05c90
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/client/commands.go
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+/*
+https://openvpn.net/community-resources/management-interface/
+
+OUTPUT FORMAT
+-------------
+
+(1) Command success/failure indicated by "SUCCESS: [text]" or
+ "ERROR: [text]".
+
+(2) For commands which print multiple lines of output,
+ the last line will be "END".
+
+(3) Real-time messages will be in the form ">[source]:[text]",
+ where source is "CLIENT", "ECHO", "FATAL", "HOLD", "INFO", "LOG",
+ "NEED-OK", "PASSWORD", or "STATE".
+*/
+
+var (
+ // Close the management session, and resume listening on the
+ // management port for connections from other clients. Currently,
+ // the OpenVPN daemon can at most support a single management client
+ // any one time.
+ commandExit = "exit\n"
+
+ // Show current daemon status information, in the same format as
+ // that produced by the OpenVPN --status directive.
+ commandStatus3 = "status 3\n"
+
+ // no description in docs ¯\(°_o)/¯
+ commandLoadStats = "load-stats\n"
+
+ // Show the current OpenVPN and Management Interface versions.
+ commandVersion = "version\n"
+)
diff --git a/src/go/plugin/go.d/modules/openvpn/client/testdata/load-stats.txt b/src/go/plugin/go.d/modules/openvpn/client/testdata/load-stats.txt
new file mode 100644
index 000000000..39c19ac5b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/client/testdata/load-stats.txt
@@ -0,0 +1 @@
+SUCCESS: nclients=1,bytesin=7811,bytesout=7667
diff --git a/src/go/plugin/go.d/modules/openvpn/client/testdata/status3.txt b/src/go/plugin/go.d/modules/openvpn/client/testdata/status3.txt
new file mode 100644
index 000000000..1986703d2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/client/testdata/status3.txt
@@ -0,0 +1,77 @@
+>CLIENT:ESTABLISHED,0
+>CLIENT:ENV,n_clients=1
+>CLIENT:ENV,ifconfig_pool_local_ip=10.9.0.6
+>CLIENT:ENV,ifconfig_pool_remote_ip=10.9.0.5
+>CLIENT:ENV,time_unix=1555439465
+>CLIENT:ENV,time_ascii=Wed Apr 17 03:31:05 2019
+>CLIENT:ENV,trusted_port=44347
+>CLIENT:ENV,trusted_ip=1.2.3.4
+>CLIENT:ENV,common_name=pepehome
+>CLIENT:ENV,auth_control_file=/tmp/openvpn_acf_ae7f48d495d3d4cfb3065763b916d9ab.tmp
+>CLIENT:ENV,untrusted_port=44347
+>CLIENT:ENV,untrusted_ip=1.2.3.4
+>CLIENT:ENV,username=pepe
+>CLIENT:ENV,tls_serial_hex_0=04
+>CLIENT:ENV,tls_serial_0=4
+>CLIENT:ENV,tls_digest_0=be:83:8c:95:21:bf:f3:87:1a:35:86:d9:2e:f3:f5:d7:08:a9:db:7e
+>CLIENT:ENV,tls_id_0=C=RU, ST=AM, L=Blagoveshchensk, O=L2ISBAD, OU=MyOrganizationalUnit, CN=pepehome, name=EasyRSA, emailAddress=me@myhost.mydomain
+>CLIENT:ENV,X509_0_emailAddress=me@myhost.mydomain
+>CLIENT:ENV,X509_0_name=EasyRSA
+>CLIENT:ENV,X509_0_CN=pepehome
+>CLIENT:ENV,X509_0_OU=MyOrganizationalUnit
+>CLIENT:ENV,X509_0_O=L2ISBAD
+>CLIENT:ENV,X509_0_L=Blagoveshchensk
+>CLIENT:ENV,X509_0_ST=AM
+>CLIENT:ENV,X509_0_C=RU
+>CLIENT:ENV,tls_serial_hex_1=ad:4c:1e:65:e8:3c:ec:6f
+>CLIENT:ENV,tls_serial_1=12487389289828379759
+>CLIENT:ENV,tls_digest_1=52:e2:1d:41:3f:34:09:70:4c:2d:71:8c:a7:28:fa:6b:66:2b:28:6e
+>CLIENT:ENV,tls_id_1=C=RU, ST=AM, L=Blagoveshchensk, O=L2ISBAD, OU=MyOrganizationalUnit, CN=L2ISBAD CA, name=EasyRSA, emailAddress=me@myhost.mydomain
+>CLIENT:ENV,X509_1_emailAddress=me@myhost.mydomain
+>CLIENT:ENV,X509_1_name=EasyRSA
+>CLIENT:ENV,X509_1_CN=L2ISBAD CA
+>CLIENT:ENV,X509_1_OU=MyOrganizationalUnit
+>CLIENT:ENV,X509_1_O=L2ISBAD
+>CLIENT:ENV,X509_1_L=Blagoveshchensk
+>CLIENT:ENV,X509_1_ST=AM
+>CLIENT:ENV,X509_1_C=RU
+>CLIENT:ENV,remote_port_1=1194
+>CLIENT:ENV,local_port_1=1194
+>CLIENT:ENV,proto_1=udp
+>CLIENT:ENV,daemon_pid=4237
+>CLIENT:ENV,daemon_start_time=1555439449
+>CLIENT:ENV,daemon_log_redirect=0
+>CLIENT:ENV,daemon=1
+>CLIENT:ENV,verb=3
+>CLIENT:ENV,config=/etc/openvpn/server.conf
+>CLIENT:ENV,ifconfig_local=10.8.0.1
+>CLIENT:ENV,ifconfig_remote=10.8.0.2
+>CLIENT:ENV,route_net_gateway=188.168.142.252
+>CLIENT:ENV,route_vpn_gateway=10.8.0.2
+>CLIENT:ENV,route_network_1=10.9.0.1
+>CLIENT:ENV,route_netmask_1=255.255.255.255
+>CLIENT:ENV,route_gateway_1=10.8.0.2
+>CLIENT:ENV,route_network_2=10.9.0.5
+>CLIENT:ENV,route_netmask_2=255.255.255.255
+>CLIENT:ENV,route_gateway_2=10.8.0.2
+>CLIENT:ENV,route_network_3=10.9.0.9
+>CLIENT:ENV,route_netmask_3=255.255.255.255
+>CLIENT:ENV,route_gateway_3=10.8.0.2
+>CLIENT:ENV,route_network_4=10.8.0.0
+>CLIENT:ENV,route_netmask_4=255.255.255.0
+>CLIENT:ENV,route_gateway_4=10.8.0.2
+>CLIENT:ENV,script_context=init
+>CLIENT:ENV,tun_mtu=1500
+>CLIENT:ENV,link_mtu=1558
+>CLIENT:ENV,dev=tun99
+>CLIENT:ENV,dev_type=tun
+>CLIENT:ENV,redirect_gateway=0
+>CLIENT:ENV,END
+TITLE OpenVPN 2.3.4 i586-pc-linux-gnu [SSL (OpenSSL)] [LZO] [EPOLL] [PKCS11] [MH] [IPv6] built on Jun 26 2017
+TIME Wed Apr 17 03:31:06 2019 1555439466
+HEADER CLIENT_LIST Common Name Real Address Virtual Address Bytes Received Bytes Sent Connected Since Connected Since (time_t) Username
+CLIENT_LIST pepehome 1.2.3.4:44347 10.9.0.5 6043 5661 Wed Apr 17 03:31:05 2019 1555439465 pepe
+HEADER ROUTING_TABLE Virtual Address Common Name Real Address Last Ref Last Ref (time_t)
+ROUTING_TABLE 10.9.0.5 pepehome 1.2.3.4:44347 Wed Apr 17 03:31:06 2019 1555439466
+GLOBAL_STATS Max bcast/mcast queue length 0
+END
diff --git a/src/go/plugin/go.d/modules/openvpn/client/testdata/version.txt b/src/go/plugin/go.d/modules/openvpn/client/testdata/version.txt
new file mode 100644
index 000000000..e525876d8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/client/testdata/version.txt
@@ -0,0 +1,3 @@
+OpenVPN Version: OpenVPN 2.3.4 i586-pc-linux-gnu [SSL (OpenSSL)] [LZO] [EPOLL] [PKCS11] [MH] [IPv6] built on Jun 26 2017
+Management Version: 1
+END
diff --git a/src/go/plugin/go.d/modules/openvpn/client/types.go b/src/go/plugin/go.d/modules/openvpn/client/types.go
new file mode 100644
index 000000000..a0a283028
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/client/types.go
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+type LoadStats struct {
+ NumOfClients int64
+ BytesIn int64
+ BytesOut int64
+}
+
+type Version struct {
+ Major int64
+ Minor int64
+ Patch int64
+ Management int64
+}
+
+type Users []User
+
+type User struct {
+ CommonName string
+ RealAddress string
+ VirtualAddress string
+ BytesReceived int64
+ BytesSent int64
+ ConnectedSince int64
+ Username string
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/collect.go b/src/go/plugin/go.d/modules/openvpn/collect.go
new file mode 100644
index 000000000..180fae3bd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/collect.go
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn
+
+import (
+ "fmt"
+ "time"
+)
+
+func (o *OpenVPN) collect() (map[string]int64, error) {
+ var err error
+
+ if err := o.client.Connect(); err != nil {
+ return nil, err
+ }
+ defer func() { _ = o.client.Disconnect() }()
+
+ mx := make(map[string]int64)
+
+ if err = o.collectLoadStats(mx); err != nil {
+ return nil, err
+ }
+
+ if o.perUserMatcher != nil {
+ if err = o.collectUsers(mx); err != nil {
+ return nil, err
+ }
+ }
+
+ return mx, nil
+}
+
+func (o *OpenVPN) collectLoadStats(mx map[string]int64) error {
+ stats, err := o.client.LoadStats()
+ if err != nil {
+ return err
+ }
+
+ mx["clients"] = stats.NumOfClients
+ mx["bytes_in"] = stats.BytesIn
+ mx["bytes_out"] = stats.BytesOut
+ return nil
+}
+
+func (o *OpenVPN) collectUsers(mx map[string]int64) error {
+ users, err := o.client.Users()
+ if err != nil {
+ return err
+ }
+
+ now := time.Now().Unix()
+ var name string
+
+ for _, user := range users {
+ if user.Username == "UNDEF" {
+ name = user.CommonName
+ } else {
+ name = user.Username
+ }
+
+ if !o.perUserMatcher.MatchString(name) {
+ continue
+ }
+ if !o.collectedUsers[name] {
+ o.collectedUsers[name] = true
+ if err := o.addUserCharts(name); err != nil {
+ o.Warning(err)
+ }
+ }
+ mx[name+"_bytes_received"] = user.BytesReceived
+ mx[name+"_bytes_sent"] = user.BytesSent
+ mx[name+"_connection_time"] = now - user.ConnectedSince
+ }
+ return nil
+}
+
+func (o *OpenVPN) addUserCharts(userName string) error {
+ cs := userCharts.Copy()
+
+ for _, chart := range *cs {
+ chart.ID = fmt.Sprintf(chart.ID, userName)
+ chart.Fam = fmt.Sprintf(chart.Fam, userName)
+
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, userName)
+ }
+ chart.MarkNotCreated()
+ }
+ return o.charts.Add(*cs...)
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/config_schema.json b/src/go/plugin/go.d/modules/openvpn/config_schema.json
new file mode 100644
index 000000000..8bbda1fd4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/config_schema.json
@@ -0,0 +1,102 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "OpenVPN collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:7505"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "per_user_stats": {
+ "title": "User selector",
+ "description": "Configuration for monitoring specific users. If left empty, no user stats will be collected.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "includes": {
+ "title": "Include",
+ "description": "Include users whose usernames match any of the specified inclusion [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Username pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "excludes": {
+ "title": "Exclude",
+ "description": "Exclude users whose usernames match any of the specified exclusion [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Username pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "per_user_stats": {
+ "ui:help": "The logic for inclusion and exclusion is as follows: `(include1 OR include2) AND !(exclude1 OR exclude2)`."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout"
+ ]
+ },
+ {
+ "title": "User stats",
+ "fields": [
+ "per_user_stats"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/init.go b/src/go/plugin/go.d/modules/openvpn/init.go
new file mode 100644
index 000000000..563edbaa6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/init.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn/client"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+func (o *OpenVPN) validateConfig() error {
+ return nil
+}
+
+func (o *OpenVPN) initPerUserMatcher() (matcher.Matcher, error) {
+ if o.PerUserStats.Empty() {
+ return nil, nil
+ }
+ return o.PerUserStats.Parse()
+}
+
+func (o *OpenVPN) initClient() *client.Client {
+ config := socket.Config{
+ Address: o.Address,
+ ConnectTimeout: o.Timeout.Duration(),
+ ReadTimeout: o.Timeout.Duration(),
+ WriteTimeout: o.Timeout.Duration(),
+ }
+ return &client.Client{Client: socket.New(config)}
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/integrations/openvpn.md b/src/go/plugin/go.d/modules/openvpn/integrations/openvpn.md
new file mode 100644
index 000000000..612d5eaab
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/integrations/openvpn.md
@@ -0,0 +1,258 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/openvpn/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/openvpn/metadata.yaml"
+sidebar_label: "OpenVPN"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/VPNs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenVPN
+
+
+<img src="https://netdata.cloud/img/openvpn.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: openvpn
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors OpenVPN servers.
+
+It uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per OpenVPN instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| openvpn.active_clients | clients | clients |
+| openvpn.total_traffic | in, out | kilobits/s |
+
+### Per user
+
+These metrics refer to the VPN user.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| username | VPN username |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| openvpn.user_traffic | in, out | kilobits/s |
+| openvpn.user_connection_time | time | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable in go.d.conf.
+
+This collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf).
+
+From the documentation for the OpenVPN Management Interface:
+> Currently, the OpenVPN daemon can at most support a single management client any one time.
+
+It is disabled to not break other tools which use `Management Interface`.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/openvpn.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/openvpn.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Server address in IP:PORT format. | 127.0.0.1:7505 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
+| per_user_stats | User selector. Determines which user metrics will be collected. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:7505
+
+```
+</details>
+
+##### With user metrics
+
+Collect metrics of all users.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:7505
+ per_user_stats:
+ includes:
+ - "* *"
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:7505
+
+ - name: remote
+ address: 203.0.113.0:7505
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `openvpn` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m openvpn
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `openvpn` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep openvpn
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep openvpn /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep openvpn
+```
+
+
diff --git a/src/go/plugin/go.d/modules/openvpn/metadata.yaml b/src/go/plugin/go.d/modules/openvpn/metadata.yaml
new file mode 100644
index 000000000..49360b2fd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/metadata.yaml
@@ -0,0 +1,177 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-openvpn
+ plugin_name: go.d.plugin
+ module_name: openvpn
+ monitored_instance:
+ name: OpenVPN
+ link: https://openvpn.net/
+ icon_filename: openvpn.svg
+ categories:
+ - data-collection.vpns
+ keywords:
+ - openvpn
+ - vpn
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors OpenVPN servers.
+
+ It uses OpenVPN [Management Interface](https://openvpn.net/community-resources/management-interface/) to collect metrics.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable in go.d.conf.
+ description: |
+ This collector is disabled by default. You need to explicitly enable it in [go.d.conf](https://github.com/netdata/netdata/blob/master/src/go/plugin/go.d/config/go.d.conf).
+
+ From the documentation for the OpenVPN Management Interface:
+ > Currently, the OpenVPN daemon can at most support a single management client any one time.
+
+ It is disabled to not break other tools which use `Management Interface`.
+ configuration:
+ file:
+ name: go.d/openvpn.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: Server address in IP:PORT format.
+ default_value: 127.0.0.1:7505
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
+ - name: per_user_stats
+ description: User selector. Determines which user metrics will be collected.
+ default_value: ""
+ required: false
+ details: |
+ Metrics of users matching the selector will be collected.
+
+ - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+ - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).
+ - Syntax:
+
+ ```yaml
+ per_user_stats:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+ ```
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:7505
+ - name: With user metrics
+ description: Collect metrics of all users.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:7505
+ per_user_stats:
+ includes:
+ - "* *"
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:7505
+
+ - name: remote
+ address: 203.0.113.0:7505
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: openvpn.active_clients
+ description: Total Number Of Active Clients
+ unit: clients
+ chart_type: line
+ dimensions:
+ - name: clients
+ - name: openvpn.total_traffic
+ description: Total Traffic
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: user
+ description: These metrics refer to the VPN user.
+ labels:
+ - name: username
+ description: VPN username
+ metrics:
+ - name: openvpn.user_traffic
+ description: User Traffic
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: openvpn.user_connection_time
+ description: User Connection Time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
diff --git a/src/go/plugin/go.d/modules/openvpn/openvpn.go b/src/go/plugin/go.d/modules/openvpn/openvpn.go
new file mode 100644
index 000000000..52bada3ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/openvpn.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn
+
+import (
+ _ "embed"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn/client"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("openvpn", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *OpenVPN {
+ return &OpenVPN{
+ Config: Config{
+ Address: "127.0.0.1:7505",
+ Timeout: web.Duration(time.Second),
+ },
+
+ charts: charts.Copy(),
+ collectedUsers: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ PerUserStats matcher.SimpleExpr `yaml:"per_user_stats,omitempty" json:"per_user_stats"`
+}
+
+type (
+ OpenVPN struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ client openVPNClient
+
+ collectedUsers map[string]bool
+ perUserMatcher matcher.Matcher
+ }
+ openVPNClient interface {
+ socket.Client
+ Version() (*client.Version, error)
+ LoadStats() (*client.LoadStats, error)
+ Users() (client.Users, error)
+ }
+)
+
+func (o *OpenVPN) Configuration() any {
+ return o.Config
+}
+
+func (o *OpenVPN) Init() error {
+ if err := o.validateConfig(); err != nil {
+ o.Error(err)
+ return err
+ }
+
+ m, err := o.initPerUserMatcher()
+ if err != nil {
+ o.Error(err)
+ return err
+ }
+ o.perUserMatcher = m
+
+ o.client = o.initClient()
+
+ o.Infof("using address: %s, timeout: %s", o.Address, o.Timeout)
+
+ return nil
+}
+
+func (o *OpenVPN) Check() error {
+ if err := o.client.Connect(); err != nil {
+ o.Error(err)
+ return err
+ }
+ defer func() { _ = o.client.Disconnect() }()
+
+ ver, err := o.client.Version()
+ if err != nil {
+ o.Error(err)
+ o.Cleanup()
+ return err
+ }
+
+ o.Infof("connected to OpenVPN v%d.%d.%d, Management v%d", ver.Major, ver.Minor, ver.Patch, ver.Management)
+
+ return nil
+}
+
+func (o *OpenVPN) Charts() *Charts { return o.charts }
+
+func (o *OpenVPN) Collect() map[string]int64 {
+ mx, err := o.collect()
+ if err != nil {
+ o.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (o *OpenVPN) Cleanup() {
+ if o.client == nil {
+ return
+ }
+ _ = o.client.Disconnect()
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/openvpn_test.go b/src/go/plugin/go.d/modules/openvpn/openvpn_test.go
new file mode 100644
index 000000000..d81747ceb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/openvpn_test.go
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn
+
+import (
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/openvpn/client"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestOpenVPN_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &OpenVPN{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestOpenVPN_Init(t *testing.T) {
+ assert.NoError(t, New().Init())
+}
+
+func TestOpenVPN_Check(t *testing.T) {
+ job := New()
+
+ require.NoError(t, job.Init())
+ job.client = prepareMockOpenVPNClient()
+ require.NoError(t, job.Check())
+}
+
+func TestOpenVPN_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestOpenVPN_Cleanup(t *testing.T) {
+ job := New()
+
+ assert.NotPanics(t, job.Cleanup)
+ require.NoError(t, job.Init())
+ job.client = prepareMockOpenVPNClient()
+ require.NoError(t, job.Check())
+ job.Cleanup()
+}
+
+func TestOpenVPN_Collect(t *testing.T) {
+ job := New()
+
+ require.NoError(t, job.Init())
+ job.perUserMatcher = matcher.TRUE()
+ job.client = prepareMockOpenVPNClient()
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "bytes_in": 1,
+ "bytes_out": 1,
+ "clients": 1,
+ "name_bytes_received": 1,
+ "name_bytes_sent": 2,
+ }
+
+ mx := job.Collect()
+ require.NotNil(t, mx)
+ delete(mx, "name_connection_time")
+ assert.Equal(t, expected, mx)
+}
+
+func TestOpenVPN_Collect_UNDEFUsername(t *testing.T) {
+ job := New()
+
+ require.NoError(t, job.Init())
+ job.perUserMatcher = matcher.TRUE()
+ cl := prepareMockOpenVPNClient()
+ cl.users = testUsersUNDEF
+ job.client = cl
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "bytes_in": 1,
+ "bytes_out": 1,
+ "clients": 1,
+ "common_name_bytes_received": 1,
+ "common_name_bytes_sent": 2,
+ }
+
+ mx := job.Collect()
+ require.NotNil(t, mx)
+ delete(mx, "common_name_connection_time")
+ assert.Equal(t, expected, mx)
+}
+
+func prepareMockOpenVPNClient() *mockOpenVPNClient {
+ return &mockOpenVPNClient{
+ version: testVersion,
+ loadStats: testLoadStats,
+ users: testUsers,
+ }
+}
+
+type mockOpenVPNClient struct {
+ version client.Version
+ loadStats client.LoadStats
+ users client.Users
+}
+
+func (m *mockOpenVPNClient) Connect() error { return nil }
+func (m *mockOpenVPNClient) Disconnect() error { return nil }
+func (m *mockOpenVPNClient) Version() (*client.Version, error) { return &m.version, nil }
+func (m *mockOpenVPNClient) LoadStats() (*client.LoadStats, error) { return &m.loadStats, nil }
+func (m *mockOpenVPNClient) Users() (client.Users, error) { return m.users, nil }
+func (m *mockOpenVPNClient) Command(_ string, _ socket.Processor) error {
+ // mocks are done on the individual commands. e.g. in Version() below
+ panic("should be called in the mock")
+}
+
+var (
+ testVersion = client.Version{Major: 1, Minor: 1, Patch: 1, Management: 1}
+ testLoadStats = client.LoadStats{NumOfClients: 1, BytesIn: 1, BytesOut: 1}
+ testUsers = client.Users{{
+ CommonName: "common_name",
+ RealAddress: "1.2.3.4:4321",
+ VirtualAddress: "1.2.3.4",
+ BytesReceived: 1,
+ BytesSent: 2,
+ ConnectedSince: 3,
+ Username: "name",
+ }}
+ testUsersUNDEF = client.Users{{
+ CommonName: "common_name",
+ RealAddress: "1.2.3.4:4321",
+ VirtualAddress: "1.2.3.4",
+ BytesReceived: 1,
+ BytesSent: 2,
+ ConnectedSince: 3,
+ Username: "UNDEF",
+ }}
+)
diff --git a/src/go/plugin/go.d/modules/openvpn/testdata/config.json b/src/go/plugin/go.d/modules/openvpn/testdata/config.json
new file mode 100644
index 000000000..30411ebf3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/testdata/config.json
@@ -0,0 +1,13 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "per_user_stats": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/src/go/plugin/go.d/modules/openvpn/testdata/config.yaml b/src/go/plugin/go.d/modules/openvpn/testdata/config.yaml
new file mode 100644
index 000000000..22296ce56
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn/testdata/config.yaml
@@ -0,0 +1,8 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+per_user_stats:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/README.md b/src/go/plugin/go.d/modules/openvpn_status_log/README.md
new file mode 120000
index 000000000..603c8249b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/README.md
@@ -0,0 +1 @@
+integrations/openvpn_status_log.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/charts.go b/src/go/plugin/go.d/modules/openvpn_status_log/charts.go
new file mode 100644
index 000000000..56716d294
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/charts.go
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn_status_log
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+var charts = module.Charts{
+ {
+ ID: "active_clients",
+ Title: "Active Clients",
+ Units: "active clients",
+ Fam: "active_clients",
+ Ctx: "openvpn.active_clients",
+ Dims: module.Dims{
+ {ID: "clients"},
+ },
+ },
+ {
+ ID: "traffic",
+ Title: "Traffic",
+ Units: "kilobits/s",
+ Fam: "traffic",
+ Ctx: "openvpn.total_traffic",
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "bytes_in", Name: "in", Algo: module.Incremental, Mul: 8, Div: 1000},
+ {ID: "bytes_out", Name: "out", Algo: module.Incremental, Mul: -8, Div: 1000},
+ },
+ },
+}
+
+var userCharts = module.Charts{
+ {
+ ID: "%s_user_traffic",
+ Title: "User Traffic",
+ Units: "kilobits/s",
+ Fam: "user stats",
+ Ctx: "openvpn.user_traffic",
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "%s_bytes_in", Name: "in", Algo: module.Incremental, Mul: 8, Div: 1000},
+ {ID: "%s_bytes_out", Name: "out", Algo: module.Incremental, Mul: -8, Div: 1000},
+ },
+ },
+ {
+ ID: "%s_user_connection_time",
+ Title: "User Connection Time",
+ Units: "seconds",
+ Fam: "user stats",
+ Ctx: "openvpn.user_connection_time",
+ Dims: module.Dims{
+ {ID: "%s_connection_time", Name: "time"},
+ },
+ },
+}
+
+func (o *OpenVPNStatusLog) addUserCharts(userName string) error {
+ cs := userCharts.Copy()
+
+ for _, chart := range *cs {
+ chart.ID = fmt.Sprintf(chart.ID, userName)
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, userName)
+ }
+ chart.MarkNotCreated()
+ }
+ return o.charts.Add(*cs...)
+}
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/collect.go b/src/go/plugin/go.d/modules/openvpn_status_log/collect.go
new file mode 100644
index 000000000..f6a442fd5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/collect.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn_status_log
+
+import (
+ "time"
+)
+
+func (o *OpenVPNStatusLog) collect() (map[string]int64, error) {
+ clients, err := parse(o.LogPath)
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ collectTotalStats(mx, clients)
+
+ if o.perUserMatcher != nil && numOfClients(clients) > 0 {
+ o.collectUsers(mx, clients)
+ }
+
+ return mx, nil
+}
+
+func collectTotalStats(mx map[string]int64, clients []clientInfo) {
+ var in, out int64
+ for _, c := range clients {
+ in += c.bytesReceived
+ out += c.bytesSent
+ }
+ mx["clients"] = numOfClients(clients)
+ mx["bytes_in"] = in
+ mx["bytes_out"] = out
+}
+
+func (o *OpenVPNStatusLog) collectUsers(mx map[string]int64, clients []clientInfo) {
+ now := time.Now().Unix()
+
+ for _, user := range clients {
+ name := user.commonName
+ if !o.perUserMatcher.MatchString(name) {
+ continue
+ }
+ if !o.collectedUsers[name] {
+ o.collectedUsers[name] = true
+ if err := o.addUserCharts(name); err != nil {
+ o.Warning(err)
+ }
+ }
+ mx[name+"_bytes_in"] = user.bytesReceived
+ mx[name+"_bytes_out"] = user.bytesSent
+ mx[name+"_connection_time"] = now - user.connectedSince
+ }
+}
+
+func numOfClients(clients []clientInfo) int64 {
+ var num int64
+ for _, v := range clients {
+ if v.commonName != "" && v.commonName != "UNDEF" {
+ num++
+ }
+ }
+ return num
+}
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/config_schema.json b/src/go/plugin/go.d/modules/openvpn_status_log/config_schema.json
new file mode 100644
index 000000000..db3af2cc8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/config_schema.json
@@ -0,0 +1,92 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "OpenVPN status log collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "log_path": {
+ "title": "Log file",
+ "description": "Path to the status log file.",
+ "type": "string",
+ "default": "/var/log/openvpn/status.log",
+ "pattern": "^$|^/"
+ },
+ "per_user_stats": {
+ "title": "User selector",
+ "description": "Configuration for monitoring specific users. If left empty, no user stats will be collected.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "includes": {
+ "title": "Include",
+ "description": "Include users whose usernames match any of the specified inclusion [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Username pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "excludes": {
+ "title": "Exclude",
+ "description": "Exclude users whose usernames match any of the specified exclusion [patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Username pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "log_path"
+ ]
+ },
+ {
+ "title": "User stats",
+ "fields": [
+ "per_user_stats"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/init.go b/src/go/plugin/go.d/modules/openvpn_status_log/init.go
new file mode 100644
index 000000000..f2e6bee37
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/init.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn_status_log
+
+import (
+ "errors"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+func (o *OpenVPNStatusLog) validateConfig() error {
+ if o.LogPath == "" {
+ return errors.New("empty 'log_path'")
+ }
+ return nil
+}
+
+func (o *OpenVPNStatusLog) initPerUserStatsMatcher() (matcher.Matcher, error) {
+ if o.PerUserStats.Empty() {
+ return nil, nil
+ }
+ m, err := o.PerUserStats.Parse()
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/integrations/openvpn_status_log.md b/src/go/plugin/go.d/modules/openvpn_status_log/integrations/openvpn_status_log.md
new file mode 100644
index 000000000..9a5b56663
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/integrations/openvpn_status_log.md
@@ -0,0 +1,213 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/openvpn_status_log/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/openvpn_status_log/metadata.yaml"
+sidebar_label: "OpenVPN status log"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/VPNs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenVPN status log
+
+
+<img src="https://netdata.cloud/img/openvpn.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: openvpn_status_log
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors OpenVPN server.
+
+It parses server log files and provides summary and per user metrics.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per OpenVPN status log instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| openvpn.active_clients | clients | clients |
+| openvpn.total_traffic | in, out | kilobits/s |
+
+### Per user
+
+These metrics refer to the VPN user.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| username | VPN username |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| openvpn.user_traffic | in, out | kilobits/s |
+| openvpn.user_connection_time | time | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/openvpn_status_log.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/openvpn_status_log.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| log_path | Path to status log. | /var/log/openvpn/status.log | yes |
+| per_user_stats | User selector. Determines which user metrics will be collected. | | no |
+
+</details>
+
+#### Examples
+
+##### With user metrics
+
+Collect metrics of all users.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ per_user_stats:
+ includes:
+ - "* *"
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `openvpn_status_log` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m openvpn_status_log
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `openvpn_status_log` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep openvpn_status_log
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep openvpn_status_log /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep openvpn_status_log
+```
+
+
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/metadata.yaml b/src/go/plugin/go.d/modules/openvpn_status_log/metadata.yaml
new file mode 100644
index 000000000..8636de63b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/metadata.yaml
@@ -0,0 +1,144 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-openvpn_status_log
+ plugin_name: go.d.plugin
+ module_name: openvpn_status_log
+ monitored_instance:
+ name: OpenVPN status log
+ link: https://openvpn.net/
+ icon_filename: openvpn.svg
+ categories:
+ - data-collection.vpns
+ keywords:
+ - openvpn
+ - vpn
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors OpenVPN server.
+
+ It parses server log files and provides summary and per user metrics.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/openvpn_status_log.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: log_path
+ description: Path to status log.
+ default_value: /var/log/openvpn/status.log
+ required: true
+ - name: per_user_stats
+ description: User selector. Determines which user metrics will be collected.
+ default_value: ""
+ required: false
+ details: |
+ Metrics of users matching the selector will be collected.
+ - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+ - Pattern syntax: [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).
+ - Syntax:
+ ```yaml
+ per_user_stats:
+ includes:
+ - pattern1
+ - pattern2
+ excludes:
+ - pattern3
+ - pattern4
+ ```
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: With user metrics
+ description: Collect metrics of all users.
+ config: |
+ jobs:
+ - name: local
+ per_user_stats:
+ includes:
+ - "* *"
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: openvpn.active_clients
+ description: Total Number Of Active Clients
+ unit: clients
+ chart_type: line
+ dimensions:
+ - name: clients
+ - name: openvpn.total_traffic
+ description: Total Traffic
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: user
+ description: These metrics refer to the VPN user.
+ labels:
+ - name: username
+ description: VPN username
+ metrics:
+ - name: openvpn.user_traffic
+ description: User Traffic
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: openvpn.user_connection_time
+ description: User Connection Time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/openvpn.go b/src/go/plugin/go.d/modules/openvpn_status_log/openvpn.go
new file mode 100644
index 000000000..7b2914df9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/openvpn.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn_status_log
+
+import (
+ _ "embed"
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("openvpn_status_log", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *OpenVPNStatusLog {
+ return &OpenVPNStatusLog{
+ Config: Config{
+ LogPath: "/var/log/openvpn/status.log",
+ },
+ charts: charts.Copy(),
+ collectedUsers: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ LogPath string `yaml:"log_path" json:"log_path"`
+ PerUserStats matcher.SimpleExpr `yaml:"per_user_stats,omitempty" json:"per_user_stats"`
+}
+
+type OpenVPNStatusLog struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ perUserMatcher matcher.Matcher
+ collectedUsers map[string]bool
+}
+
+func (o *OpenVPNStatusLog) Configuration() any {
+ return o.Config
+}
+
+func (o *OpenVPNStatusLog) Init() error {
+ if err := o.validateConfig(); err != nil {
+ o.Errorf("error on validating config: %v", err)
+ return err
+ }
+
+ m, err := o.initPerUserStatsMatcher()
+ if err != nil {
+ o.Errorf("error on creating 'per_user_stats' matcher: %v", err)
+ return err
+ }
+ if m != nil {
+ o.perUserMatcher = m
+ }
+
+ return nil
+}
+
+func (o *OpenVPNStatusLog) Check() error {
+ mx, err := o.collect()
+ if err != nil {
+ o.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (o *OpenVPNStatusLog) Charts() *module.Charts {
+ return o.charts
+}
+
+func (o *OpenVPNStatusLog) Collect() map[string]int64 {
+ mx, err := o.collect()
+ if err != nil {
+ o.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (o *OpenVPNStatusLog) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/openvpn_test.go b/src/go/plugin/go.d/modules/openvpn_status_log/openvpn_test.go
new file mode 100644
index 000000000..f3d852d5a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/openvpn_test.go
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn_status_log
+
+import (
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ pathNonExistentFile = "testdata/v2.5.1/non-existent.txt"
+ pathEmptyFile = "testdata/v2.5.1/empty.txt"
+ pathStaticKey = "testdata/v2.5.1/static-key.txt"
+ pathStatusVersion1 = "testdata/v2.5.1/version1.txt"
+ pathStatusVersion1NoClients = "testdata/v2.5.1/version1-no-clients.txt"
+ pathStatusVersion2 = "testdata/v2.5.1/version2.txt"
+ pathStatusVersion2NoClients = "testdata/v2.5.1/version2-no-clients.txt"
+ pathStatusVersion3 = "testdata/v2.5.1/version3.txt"
+ pathStatusVersion3NoClients = "testdata/v2.5.1/version3-no-clients.txt"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestOpenVPNStatusLog_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &OpenVPNStatusLog{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestOpenVPNStatusLog_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default config": {
+ config: New().Config,
+ },
+ "unset 'log_path'": {
+ wantFail: true,
+ config: Config{
+ LogPath: "",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ovpn := New()
+ ovpn.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, ovpn.Init())
+ } else {
+ assert.NoError(t, ovpn.Init())
+ }
+ })
+ }
+}
+
+func TestOpenVPNStatusLog_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *OpenVPNStatusLog
+ wantFail bool
+ }{
+ "status version 1": {prepare: prepareCaseStatusVersion1},
+ "status version 1 with no clients": {prepare: prepareCaseStatusVersion1NoClients},
+ "status version 2": {prepare: prepareCaseStatusVersion2},
+ "status version 2 with no clients": {prepare: prepareCaseStatusVersion2NoClients},
+ "status version 3": {prepare: prepareCaseStatusVersion3},
+ "status version 3 with no clients": {prepare: prepareCaseStatusVersion3NoClients},
+ "empty file": {prepare: prepareCaseEmptyFile, wantFail: true},
+ "non-existent file": {prepare: prepareCaseNonExistentFile, wantFail: true},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ovpn := test.prepare()
+
+ require.NoError(t, ovpn.Init())
+
+ if test.wantFail {
+ assert.Error(t, ovpn.Check())
+ } else {
+ assert.NoError(t, ovpn.Check())
+ }
+ })
+ }
+}
+
+func TestOpenVPNStatusLog_Charts(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *OpenVPNStatusLog
+ wantNumCharts int
+ }{
+ "status version 1 with user stats": {
+ prepare: prepareCaseStatusVersion1WithUserStats,
+ wantNumCharts: len(charts) + len(userCharts)*2,
+ },
+ "status version 2 with user stats": {
+ prepare: prepareCaseStatusVersion2WithUserStats,
+ wantNumCharts: len(charts) + len(userCharts)*2,
+ },
+ "status version 3 with user stats": {
+ prepare: prepareCaseStatusVersion2WithUserStats,
+ wantNumCharts: len(charts) + len(userCharts)*2,
+ },
+ "status version with static key": {
+ prepare: prepareCaseStatusStaticKey,
+ wantNumCharts: len(charts),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ovpn := test.prepare()
+
+ require.NoError(t, ovpn.Init())
+ _ = ovpn.Check()
+ _ = ovpn.Collect()
+
+ assert.Equal(t, test.wantNumCharts, len(*ovpn.Charts()))
+ })
+ }
+}
+
+func TestOpenVPNStatusLog_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *OpenVPNStatusLog
+ expected map[string]int64
+ }{
+ "status version 1": {
+ prepare: prepareCaseStatusVersion1,
+ expected: map[string]int64{
+ "bytes_in": 6168,
+ "bytes_out": 6369,
+ "clients": 2,
+ },
+ },
+ "status version 1 with user stats": {
+ prepare: prepareCaseStatusVersion1WithUserStats,
+ expected: map[string]int64{
+ "bytes_in": 6168,
+ "bytes_out": 6369,
+ "clients": 2,
+ "vpnclient2_bytes_in": 3084,
+ "vpnclient2_bytes_out": 3184,
+ "vpnclient2_connection_time": 63793143069,
+ "vpnclient_bytes_in": 3084,
+ "vpnclient_bytes_out": 3185,
+ "vpnclient_connection_time": 63793143069,
+ },
+ },
+ "status version 1 with no clients": {
+ prepare: prepareCaseStatusVersion1NoClients,
+ expected: map[string]int64{
+ "bytes_in": 0,
+ "bytes_out": 0,
+ "clients": 0,
+ },
+ },
+ "status version 2": {
+ prepare: prepareCaseStatusVersion2,
+ expected: map[string]int64{
+ "bytes_in": 6241,
+ "bytes_out": 6369,
+ "clients": 2,
+ },
+ },
+ "status version 2 with user stats": {
+ prepare: prepareCaseStatusVersion2WithUserStats,
+ expected: map[string]int64{
+ "bytes_in": 6241,
+ "bytes_out": 6369,
+ "clients": 2,
+ "vpnclient2_bytes_in": 3157,
+ "vpnclient2_bytes_out": 3184,
+ "vpnclient2_connection_time": 264610,
+ "vpnclient_bytes_in": 3084,
+ "vpnclient_bytes_out": 3185,
+ "vpnclient_connection_time": 264609,
+ },
+ },
+ "status version 2 with no clients": {
+ prepare: prepareCaseStatusVersion2NoClients,
+ expected: map[string]int64{
+ "bytes_in": 0,
+ "bytes_out": 0,
+ "clients": 0,
+ },
+ },
+ "status version 3": {
+ prepare: prepareCaseStatusVersion3,
+ expected: map[string]int64{
+ "bytes_in": 7308,
+ "bytes_out": 7235,
+ "clients": 2,
+ },
+ },
+ "status version 3 with user stats": {
+ prepare: prepareCaseStatusVersion3WithUserStats,
+ expected: map[string]int64{
+ "bytes_in": 7308,
+ "bytes_out": 7235,
+ "clients": 2,
+ "vpnclient2_bytes_in": 3654,
+ "vpnclient2_bytes_out": 3617,
+ "vpnclient2_connection_time": 265498,
+ "vpnclient_bytes_in": 3654,
+ "vpnclient_bytes_out": 3618,
+ "vpnclient_connection_time": 265496,
+ },
+ },
+ "status version 3 with no clients": {
+ prepare: prepareCaseStatusVersion3NoClients,
+ expected: map[string]int64{
+ "bytes_in": 0,
+ "bytes_out": 0,
+ "clients": 0,
+ },
+ },
+ "status with static key": {
+ prepare: prepareCaseStatusStaticKey,
+ expected: map[string]int64{
+ "bytes_in": 19265,
+ "bytes_out": 261631,
+ "clients": 0,
+ },
+ },
+ "empty file": {
+ prepare: prepareCaseEmptyFile,
+ expected: nil,
+ },
+ "non-existent file": {
+ prepare: prepareCaseNonExistentFile,
+ expected: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ovpn := test.prepare()
+
+ require.NoError(t, ovpn.Init())
+ _ = ovpn.Check()
+
+ collected := ovpn.Collect()
+
+ copyConnTime(collected, test.expected)
+ assert.Equal(t, test.expected, collected)
+ })
+ }
+}
+
+func prepareCaseStatusVersion1() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStatusVersion1
+ return ovpn
+}
+
+func prepareCaseStatusVersion1WithUserStats() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStatusVersion1
+ ovpn.PerUserStats = matcher.SimpleExpr{
+ Includes: []string{"* *"},
+ }
+ return ovpn
+}
+
+func prepareCaseStatusVersion1NoClients() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStatusVersion1NoClients
+ return ovpn
+}
+
+func prepareCaseStatusVersion2() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStatusVersion2
+ return ovpn
+}
+
+func prepareCaseStatusVersion2WithUserStats() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStatusVersion2
+ ovpn.PerUserStats = matcher.SimpleExpr{
+ Includes: []string{"* *"},
+ }
+ return ovpn
+}
+
+func prepareCaseStatusVersion2NoClients() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStatusVersion2NoClients
+ return ovpn
+}
+
+func prepareCaseStatusVersion3() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStatusVersion3
+ return ovpn
+}
+
+func prepareCaseStatusVersion3WithUserStats() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStatusVersion3
+ ovpn.PerUserStats = matcher.SimpleExpr{
+ Includes: []string{"* *"},
+ }
+ return ovpn
+}
+
+func prepareCaseStatusVersion3NoClients() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStatusVersion3NoClients
+ return ovpn
+}
+
+func prepareCaseStatusStaticKey() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathStaticKey
+ return ovpn
+}
+
+func prepareCaseEmptyFile() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathEmptyFile
+ return ovpn
+}
+
+func prepareCaseNonExistentFile() *OpenVPNStatusLog {
+ ovpn := New()
+ ovpn.LogPath = pathNonExistentFile
+ return ovpn
+}
+
+func copyConnTime(dst, src map[string]int64) {
+ for k, v := range src {
+ if !strings.HasSuffix(k, "connection_time") {
+ continue
+ }
+ if _, ok := dst[k]; !ok {
+ continue
+ }
+ dst[k] = v
+ }
+}
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/parser.go b/src/go/plugin/go.d/modules/openvpn_status_log/parser.go
new file mode 100644
index 000000000..c734fd5fb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/parser.go
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn_status_log
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type clientInfo struct {
+ commonName string
+ bytesReceived int64
+ bytesSent int64
+ connectedSince int64
+}
+
+func parse(path string) ([]clientInfo, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = f.Close() }()
+
+ sc := bufio.NewScanner(f)
+ _ = sc.Scan()
+ line := sc.Text()
+
+ if line == "OpenVPN CLIENT LIST" {
+ return parseV1(sc), nil
+ }
+ if strings.HasPrefix(line, "TITLE,OpenVPN") || strings.HasPrefix(line, "TITLE\tOpenVPN") {
+ return parseV2V3(sc), nil
+ }
+ if line == "OpenVPN STATISTICS" {
+ return parseStaticKey(sc), nil
+ }
+ return nil, fmt.Errorf("the status log file is invalid (%s)", path)
+}
+
+func parseV1(sc *bufio.Scanner) []clientInfo {
+ // https://github.com/OpenVPN/openvpn/blob/d5315a5d7400a26f1113bbc44766d49dd0c3688f/src/openvpn/multi.c#L836
+ var clients []clientInfo
+
+ for sc.Scan() {
+ if !strings.HasPrefix(sc.Text(), "Common Name") {
+ continue
+ }
+ for sc.Scan() && !strings.HasPrefix(sc.Text(), "ROUTING TABLE") {
+ parts := strings.Split(sc.Text(), ",")
+ if len(parts) != 5 {
+ continue
+ }
+
+ name := parts[0]
+ bytesRx, _ := strconv.ParseInt(parts[2], 10, 64)
+ bytesTx, _ := strconv.ParseInt(parts[3], 10, 64)
+ connSince, _ := time.Parse("Mon Jan 2 15:04:05 2006", parts[4])
+
+ clients = append(clients, clientInfo{
+ commonName: name,
+ bytesReceived: bytesRx,
+ bytesSent: bytesTx,
+ connectedSince: connSince.Unix(),
+ })
+ }
+ break
+ }
+ return clients
+}
+
+func parseV2V3(sc *bufio.Scanner) []clientInfo {
+ // https://github.com/OpenVPN/openvpn/blob/d5315a5d7400a26f1113bbc44766d49dd0c3688f/src/openvpn/multi.c#L901
+ var clients []clientInfo
+ var sep string
+ if strings.IndexByte(sc.Text(), '\t') != -1 {
+ sep = "\t"
+ } else {
+ sep = ","
+ }
+
+ for sc.Scan() {
+ line := sc.Text()
+ if !strings.HasPrefix(line, "CLIENT_LIST") {
+ continue
+ }
+ parts := strings.Split(line, sep)
+ if len(parts) != 13 {
+ continue
+ }
+
+ name := parts[1]
+ bytesRx, _ := strconv.ParseInt(parts[5], 10, 64)
+ bytesTx, _ := strconv.ParseInt(parts[6], 10, 64)
+ connSince, _ := strconv.ParseInt(parts[8], 10, 64)
+
+ clients = append(clients, clientInfo{
+ commonName: name,
+ bytesReceived: bytesRx,
+ bytesSent: bytesTx,
+ connectedSince: connSince,
+ })
+ }
+ return clients
+}
+
+func parseStaticKey(sc *bufio.Scanner) []clientInfo {
+ // https://github.com/OpenVPN/openvpn/blob/d5315a5d7400a26f1113bbc44766d49dd0c3688f/src/openvpn/sig.c#L283
+ var info clientInfo
+ for sc.Scan() {
+ line := sc.Text()
+ if !strings.HasPrefix(line, "TCP/UDP") {
+ continue
+ }
+ i := strings.IndexByte(line, ',')
+ if i == -1 || len(line) == i {
+ continue
+ }
+ bytes, _ := strconv.ParseInt(line[i+1:], 10, 64)
+ switch line[:i] {
+ case "TCP/UDP read bytes":
+ info.bytesReceived += bytes
+ case "TCP/UDP write bytes":
+ info.bytesSent += bytes
+ }
+ }
+ return []clientInfo{info}
+}
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/config.json b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/config.json
new file mode 100644
index 000000000..078a1ae56
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/config.json
@@ -0,0 +1,12 @@
+{
+ "update_every": 123,
+ "log_path": "ok",
+ "per_user_stats": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/config.yaml b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/config.yaml
new file mode 100644
index 000000000..1a27ab974
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/config.yaml
@@ -0,0 +1,7 @@
+update_every: 123
+log_path: "ok"
+per_user_stats:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/empty.txt b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/empty.txt
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/empty.txt
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/static-key.txt b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/static-key.txt
new file mode 100644
index 000000000..64b691fcd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/static-key.txt
@@ -0,0 +1,8 @@
+OpenVPN STATISTICS
+Updated,2022-05-05 12:35:47
+TUN/TAP read bytes,123
+TUN/TAP write bytes,1155
+TCP/UDP read bytes,19265
+TCP/UDP write bytes,261631
+Auth read bytes,0
+END
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version1-no-clients.txt b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version1-no-clients.txt
new file mode 100644
index 000000000..34d7a748f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version1-no-clients.txt
@@ -0,0 +1,8 @@
+OpenVPN CLIENT LIST
+Updated,2022-07-08 15:05:57
+Common Name,Real Address,Bytes Received,Bytes Sent,Connected Since
+ROUTING TABLE
+Virtual Address,Common Name,Real Address,Last Ref
+GLOBAL STATS
+Max bcast/mcast queue length,0
+END \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version1.txt b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version1.txt
new file mode 100644
index 000000000..0d2f33ba5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version1.txt
@@ -0,0 +1,12 @@
+OpenVPN CLIENT LIST
+Updated,2022-07-08 15:14:45
+Common Name,Real Address,Bytes Received,Bytes Sent,Connected Since
+vpnclient,10.10.10.107:46195,3084,3185,2022-07-08 15:14:42
+vpnclient2,10.10.10.50:51275,3084,3184,2022-07-08 15:14:41
+ROUTING TABLE
+Virtual Address,Common Name,Real Address,Last Ref
+10.8.0.10,vpnclient,10.10.10.107:46195,2022-07-08 15:14:42
+10.8.0.6,vpnclient2,10.10.10.50:51275,2022-07-08 15:14:41
+GLOBAL STATS
+Max bcast/mcast queue length,0
+END \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version2-no-clients.txt b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version2-no-clients.txt
new file mode 100644
index 000000000..6d1ea1e32
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version2-no-clients.txt
@@ -0,0 +1,6 @@
+TITLE,OpenVPN 2.5.1 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on May 14 2021
+TIME,2022-07-08 15:04:54,1657281894
+HEADER,CLIENT_LIST,Common Name,Real Address,Virtual Address,Virtual IPv6 Address,Bytes Received,Bytes Sent,Connected Since,Connected Since (time_t),Username,Client ID,Peer ID,Data Channel Cipher
+HEADER,ROUTING_TABLE,Virtual Address,Common Name,Real Address,Last Ref,Last Ref (time_t)
+GLOBAL_STATS,Max bcast/mcast queue length,0
+END \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version2.txt b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version2.txt
new file mode 100644
index 000000000..d0f4ac8e3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version2.txt
@@ -0,0 +1,10 @@
+TITLE,OpenVPN 2.5.1 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on May 14 2021
+TIME,2022-07-08 15:05:14,1657281914
+HEADER,CLIENT_LIST,Common Name,Real Address,Virtual Address,Virtual IPv6 Address,Bytes Received,Bytes Sent,Connected Since,Connected Since (time_t),Username,Client ID,Peer ID,Data Channel Cipher
+CLIENT_LIST,vpnclient2,10.10.10.50:38535,10.8.0.6,,3157,3184,2022-07-08 15:05:09,1657281909,UNDEF,0,0,AES-256-GCM
+CLIENT_LIST,vpnclient,10.10.10.107:50026,10.8.0.10,,3084,3185,2022-07-08 15:05:10,1657281910,UNDEF,1,1,AES-256-GCM
+HEADER,ROUTING_TABLE,Virtual Address,Common Name,Real Address,Last Ref,Last Ref (time_t)
+ROUTING_TABLE,10.8.0.6,vpnclient2,10.10.10.50:38535,2022-07-08 15:05:09,1657281909
+ROUTING_TABLE,10.8.0.10,vpnclient,10.10.10.107:50026,2022-07-08 15:05:10,1657281910
+GLOBAL_STATS,Max bcast/mcast queue length,0
+END \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version3-no-clients.txt b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version3-no-clients.txt
new file mode 100644
index 000000000..6ab671f20
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version3-no-clients.txt
@@ -0,0 +1,6 @@
+TITLE OpenVPN 2.5.1 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on May 14 2021
+TIME 2022-07-08 15:02:27 1657281747
+HEADER CLIENT_LIST Common Name Real Address Virtual Address Virtual IPv6 Address Bytes Received Bytes Sent Connected Since Connected Since (time_t) Username Client ID Peer ID Data Channel Cipher
+HEADER ROUTING_TABLE Virtual Address Common Name Real Address Last Ref Last Ref (time_t)
+GLOBAL_STATS Max bcast/mcast queue length 2
+END \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version3.txt b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version3.txt
new file mode 100644
index 000000000..7d732042e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/openvpn_status_log/testdata/v2.5.1/version3.txt
@@ -0,0 +1,10 @@
+TITLE OpenVPN 2.5.1 x86_64-pc-linux-gnu [SSL (OpenSSL)] [LZO] [LZ4] [EPOLL] [PKCS11] [MH/PKTINFO] [AEAD] built on May 14 2021
+TIME 2022-07-08 14:53:40 1657281220
+HEADER CLIENT_LIST Common Name Real Address Virtual Address Virtual IPv6 Address Bytes Received Bytes Sent Connected Since Connected Since (time_t) Username Client ID Peer ID Data Channel Cipher
+CLIENT_LIST vpnclient2 10.10.10.50:53856 10.8.0.6 3654 3617 2022-07-08 14:50:56 1657281056 UNDEF 0 0 AES-256-GCM
+CLIENT_LIST vpnclient 10.10.10.107:42132 10.8.0.10 3654 3618 2022-07-08 14:50:58 1657281058 UNDEF 1 1 AES-256-GCM
+HEADER ROUTING_TABLE Virtual Address Common Name Real Address Last Ref Last Ref (time_t)
+ROUTING_TABLE 10.8.0.6 vpnclient2 10.10.10.50:53856 2022-07-08 14:50:56 1657281056
+ROUTING_TABLE 10.8.0.10 vpnclient 10.10.10.107:42132 2022-07-08 14:50:58 1657281058
+GLOBAL_STATS Max bcast/mcast queue length 2
+END \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pgbouncer/README.md b/src/go/plugin/go.d/modules/pgbouncer/README.md
new file mode 120000
index 000000000..3bfcaba0b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/README.md
@@ -0,0 +1 @@
+integrations/pgbouncer.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pgbouncer/charts.go b/src/go/plugin/go.d/modules/pgbouncer/charts.go
new file mode 100644
index 000000000..4ee7b2bc5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/charts.go
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pgbouncer
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioClientConnectionsUtilization = module.Priority + iota
+ prioDBClientConnections
+ prioDBServerConnections
+ prioDBServerConnectionsUtilization
+ prioDBClientsWaitTime
+ prioDBClientsWaitMaxTime
+ prioDBTransactions
+ prioDBTransactionsTime
+ prioDBTransactionsAvgTime
+ prioDBQueries
+ prioDBQueriesTime
+ prioDBQueryAvgTime
+ prioDBNetworkIO
+)
+
+var (
+ globalCharts = module.Charts{
+ clientConnectionsUtilization.Copy(),
+ }
+
+ clientConnectionsUtilization = module.Chart{
+ ID: "client_connections_utilization",
+ Title: "Client connections utilization",
+ Units: "percentage",
+ Fam: "client connections",
+ Ctx: "pgbouncer.client_connections_utilization",
+ Priority: prioClientConnectionsUtilization,
+ Dims: module.Dims{
+ {ID: "cl_conns_utilization", Name: "used"},
+ },
+ }
+)
+
+var (
+ dbChartsTmpl = module.Charts{
+ dbClientConnectionsTmpl.Copy(),
+
+ dbServerConnectionsUtilizationTmpl.Copy(),
+ dbServerConnectionsTmpl.Copy(),
+
+ dbClientsWaitTimeChartTmpl.Copy(),
+ dbClientMaxWaitTimeChartTmpl.Copy(),
+
+ dbTransactionsChartTmpl.Copy(),
+ dbTransactionsTimeChartTmpl.Copy(),
+ dbTransactionAvgTimeChartTmpl.Copy(),
+
+ dbQueriesChartTmpl.Copy(),
+ dbQueriesTimeChartTmpl.Copy(),
+ dbQueryAvgTimeChartTmpl.Copy(),
+
+ dbNetworkIOChartTmpl.Copy(),
+ }
+
+ dbClientConnectionsTmpl = module.Chart{
+ ID: "db_%s_client_connections",
+ Title: "Database client connections",
+ Units: "connections",
+ Fam: "client connections",
+ Ctx: "pgbouncer.db_client_connections",
+ Priority: prioDBClientConnections,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "db_%s_cl_active", Name: "active"},
+ {ID: "db_%s_cl_waiting", Name: "waiting"},
+ {ID: "db_%s_cl_cancel_req", Name: "cancel_req"},
+ },
+ }
+
+ dbServerConnectionsTmpl = module.Chart{
+ ID: "db_%s_server_connections",
+ Title: "Database server connections",
+ Units: "connections",
+ Fam: "server connections",
+ Ctx: "pgbouncer.db_server_connections",
+ Priority: prioDBServerConnections,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "db_%s_sv_active", Name: "active"},
+ {ID: "db_%s_sv_idle", Name: "idle"},
+ {ID: "db_%s_sv_used", Name: "used"},
+ {ID: "db_%s_sv_tested", Name: "tested"},
+ {ID: "db_%s_sv_login", Name: "login"},
+ },
+ }
+
+ dbServerConnectionsUtilizationTmpl = module.Chart{
+ ID: "db_%s_server_connections_utilization",
+ Title: "Database server connections utilization",
+ Units: "percentage",
+ Fam: "server connections limit",
+ Ctx: "pgbouncer.db_server_connections_utilization",
+ Priority: prioDBServerConnectionsUtilization,
+ Dims: module.Dims{
+ {ID: "db_%s_sv_conns_utilization", Name: "used"},
+ },
+ }
+
+ dbClientsWaitTimeChartTmpl = module.Chart{
+ ID: "db_%s_clients_wait_time",
+ Title: "Database clients wait time",
+ Units: "seconds",
+ Fam: "clients wait time",
+ Ctx: "pgbouncer.db_clients_wait_time",
+ Priority: prioDBClientsWaitTime,
+ Dims: module.Dims{
+ {ID: "db_%s_total_wait_time", Name: "time", Algo: module.Incremental, Div: 1e6},
+ },
+ }
+ dbClientMaxWaitTimeChartTmpl = module.Chart{
+ ID: "db_%s_client_max_wait_time",
+ Title: "Database client max wait time",
+ Units: "seconds",
+ Fam: "client max wait time",
+ Ctx: "pgbouncer.db_client_max_wait_time",
+ Priority: prioDBClientsWaitMaxTime,
+ Dims: module.Dims{
+ {ID: "db_%s_maxwait", Name: "time", Div: 1e6},
+ },
+ }
+
+ dbTransactionsChartTmpl = module.Chart{
+ ID: "db_%s_transactions",
+ Title: "Database pooled SQL transactions",
+ Units: "transactions/s",
+ Fam: "transactions",
+ Ctx: "pgbouncer.db_transactions",
+ Priority: prioDBTransactions,
+ Dims: module.Dims{
+ {ID: "db_%s_total_xact_count", Name: "transactions", Algo: module.Incremental},
+ },
+ }
+ dbTransactionsTimeChartTmpl = module.Chart{
+ ID: "db_%s_transactions_time",
+ Title: "Database transactions time",
+ Units: "seconds",
+ Fam: "transactions time",
+ Ctx: "pgbouncer.db_transactions_time",
+ Priority: prioDBTransactionsTime,
+ Dims: module.Dims{
+ {ID: "db_%s_total_xact_time", Name: "time", Algo: module.Incremental, Div: 1e6},
+ },
+ }
+ dbTransactionAvgTimeChartTmpl = module.Chart{
+ ID: "db_%s_transactions_average_time",
+ Title: "Database transaction average time",
+ Units: "seconds",
+ Fam: "transaction avg time",
+ Ctx: "pgbouncer.db_transaction_avg_time",
+ Priority: prioDBTransactionsAvgTime,
+ Dims: module.Dims{
+ {ID: "db_%s_avg_xact_time", Name: "time", Algo: module.Incremental, Div: 1e6},
+ },
+ }
+
+ dbQueriesChartTmpl = module.Chart{
+ ID: "db_%s_queries",
+ Title: "Database pooled SQL queries",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "pgbouncer.db_queries",
+ Priority: prioDBQueries,
+ Dims: module.Dims{
+ {ID: "db_%s_total_query_count", Name: "queries", Algo: module.Incremental},
+ },
+ }
+ dbQueriesTimeChartTmpl = module.Chart{
+ ID: "db_%s_queries_time",
+ Title: "Database queries time",
+ Units: "seconds",
+ Fam: "queries time",
+ Ctx: "pgbouncer.db_queries_time",
+ Priority: prioDBQueriesTime,
+ Dims: module.Dims{
+ {ID: "db_%s_total_query_time", Name: "time", Algo: module.Incremental, Div: 1e6},
+ },
+ }
+ dbQueryAvgTimeChartTmpl = module.Chart{
+ ID: "db_%s_query_average_time",
+ Title: "Database query average time",
+ Units: "seconds",
+ Fam: "query avg time",
+ Ctx: "pgbouncer.db_query_avg_time",
+ Priority: prioDBQueryAvgTime,
+ Dims: module.Dims{
+ {ID: "db_%s_avg_query_time", Name: "time", Algo: module.Incremental, Div: 1e6},
+ },
+ }
+
+ dbNetworkIOChartTmpl = module.Chart{
+ ID: "db_%s_network_io",
+ Title: "Database traffic",
+ Units: "B/s",
+ Fam: "traffic",
+ Ctx: "pgbouncer.db_network_io",
+ Priority: prioDBNetworkIO,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "db_%s_total_received", Name: "received", Algo: module.Incremental},
+ {ID: "db_%s_total_sent", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+)
+
+func newDatabaseCharts(dbname, pgDBName string) *module.Charts {
+ charts := dbChartsTmpl.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, dbname)
+ c.Labels = []module.Label{
+ {Key: "database", Value: dbname},
+ {Key: "postgres_database", Value: pgDBName},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, dbname)
+ }
+ }
+ return charts
+}
+
+func (p *PgBouncer) addNewDatabaseCharts(dbname, pgDBName string) {
+ charts := newDatabaseCharts(dbname, pgDBName)
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *PgBouncer) removeDatabaseCharts(dbname string) {
+ prefix := fmt.Sprintf("db_%s_", dbname)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pgbouncer/collect.go b/src/go/plugin/go.d/modules/pgbouncer/collect.go
new file mode 100644
index 000000000..c0e4bf2da
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/collect.go
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pgbouncer
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/blang/semver/v4"
+ "github.com/jackc/pgx/v4"
+ "github.com/jackc/pgx/v4/stdlib"
+)
+
+// 'SHOW STATS;' response was changed significantly in v1.8.0
+// v1.8.0 was released in 2015 - no need to complicate the code to support the old version.
+var minSupportedVersion = semver.Version{Major: 1, Minor: 8, Patch: 0}
+
+const (
+ queryShowVersion = "SHOW VERSION;"
+ queryShowConfig = "SHOW CONFIG;"
+ queryShowDatabases = "SHOW DATABASES;"
+ queryShowStats = "SHOW STATS;"
+ queryShowPools = "SHOW POOLS;"
+)
+
+func (p *PgBouncer) collect() (map[string]int64, error) {
+ if p.db == nil {
+ if err := p.openConnection(); err != nil {
+ return nil, err
+ }
+ }
+ if p.version == nil {
+ ver, err := p.queryVersion()
+ if err != nil {
+ return nil, err
+ }
+ p.Debugf("connected to PgBouncer v%s", ver)
+ if ver.LE(minSupportedVersion) {
+ return nil, fmt.Errorf("unsupported version: v%s, required v%s+", ver, minSupportedVersion)
+ }
+ p.version = ver
+ }
+
+ now := time.Now()
+ if now.Sub(p.recheckSettingsTime) > p.recheckSettingsEvery {
+ v, err := p.queryMaxClientConn()
+ if err != nil {
+ return nil, err
+ }
+ p.maxClientConn = v
+ }
+
+ // http://www.pgbouncer.org/usage.html
+
+ p.resetMetrics()
+
+ if err := p.collectDatabases(); err != nil {
+ return nil, err
+ }
+ if err := p.collectStats(); err != nil {
+ return nil, err
+ }
+ if err := p.collectPools(); err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+ p.collectMetrics(mx)
+
+ return mx, nil
+}
+
+func (p *PgBouncer) collectMetrics(mx map[string]int64) {
+ var clientConns int64
+ for name, db := range p.metrics.dbs {
+ if !db.updated {
+ delete(p.metrics.dbs, name)
+ p.removeDatabaseCharts(name)
+ continue
+ }
+ if !db.hasCharts {
+ db.hasCharts = true
+ p.addNewDatabaseCharts(name, db.pgDBName)
+ }
+
+ mx["db_"+name+"_total_xact_count"] = db.totalXactCount
+ mx["db_"+name+"_total_xact_time"] = db.totalXactTime
+ mx["db_"+name+"_avg_xact_time"] = db.avgXactTime
+
+ mx["db_"+name+"_total_query_count"] = db.totalQueryCount
+ mx["db_"+name+"_total_query_time"] = db.totalQueryTime
+ mx["db_"+name+"_avg_query_time"] = db.avgQueryTime
+
+ mx["db_"+name+"_total_wait_time"] = db.totalWaitTime
+ mx["db_"+name+"_maxwait"] = db.maxWait*1e6 + db.maxWaitUS
+
+ mx["db_"+name+"_cl_active"] = db.clActive
+ mx["db_"+name+"_cl_waiting"] = db.clWaiting
+ mx["db_"+name+"_cl_cancel_req"] = db.clCancelReq
+ clientConns += db.clActive + db.clWaiting + db.clCancelReq
+
+ mx["db_"+name+"_sv_active"] = db.svActive
+ mx["db_"+name+"_sv_idle"] = db.svIdle
+ mx["db_"+name+"_sv_used"] = db.svUsed
+ mx["db_"+name+"_sv_tested"] = db.svTested
+ mx["db_"+name+"_sv_login"] = db.svLogin
+
+ mx["db_"+name+"_total_received"] = db.totalReceived
+ mx["db_"+name+"_total_sent"] = db.totalSent
+
+ mx["db_"+name+"_sv_conns_utilization"] = calcPercentage(db.currentConnections, db.maxConnections)
+ }
+
+ mx["cl_conns_utilization"] = calcPercentage(clientConns, p.maxClientConn)
+}
+
+func (p *PgBouncer) collectDatabases() error {
+ q := queryShowDatabases
+ p.Debugf("executing query: %v", q)
+
+ var db string
+ return p.collectQuery(q, func(column, value string) {
+ switch column {
+ case "name":
+ db = value
+ p.getDBMetrics(db).updated = true
+ case "database":
+ p.getDBMetrics(db).pgDBName = value
+ case "max_connections":
+ p.getDBMetrics(db).maxConnections = parseInt(value)
+ case "current_connections":
+ p.getDBMetrics(db).currentConnections = parseInt(value)
+ case "paused":
+ p.getDBMetrics(db).paused = parseInt(value)
+ case "disabled":
+ p.getDBMetrics(db).disabled = parseInt(value)
+ }
+ })
+}
+
+func (p *PgBouncer) collectStats() error {
+ q := queryShowStats
+ p.Debugf("executing query: %v", q)
+
+ var db string
+ return p.collectQuery(q, func(column, value string) {
+ switch column {
+ case "database":
+ db = value
+ p.getDBMetrics(db).updated = true
+ case "total_xact_count":
+ p.getDBMetrics(db).totalXactCount = parseInt(value)
+ case "total_query_count":
+ p.getDBMetrics(db).totalQueryCount = parseInt(value)
+ case "total_received":
+ p.getDBMetrics(db).totalReceived = parseInt(value)
+ case "total_sent":
+ p.getDBMetrics(db).totalSent = parseInt(value)
+ case "total_xact_time":
+ p.getDBMetrics(db).totalXactTime = parseInt(value)
+ case "total_query_time":
+ p.getDBMetrics(db).totalQueryTime = parseInt(value)
+ case "total_wait_time":
+ p.getDBMetrics(db).totalWaitTime = parseInt(value)
+ case "avg_xact_time":
+ p.getDBMetrics(db).avgXactTime = parseInt(value)
+ case "avg_query_time":
+ p.getDBMetrics(db).avgQueryTime = parseInt(value)
+ }
+ })
+}
+
+func (p *PgBouncer) collectPools() error {
+ q := queryShowPools
+ p.Debugf("executing query: %v", q)
+
+ // an entry is made for each couple of (database, user).
+ var db string
+ return p.collectQuery(q, func(column, value string) {
+ switch column {
+ case "database":
+ db = value
+ p.getDBMetrics(db).updated = true
+ case "cl_active":
+ p.getDBMetrics(db).clActive += parseInt(value)
+ case "cl_waiting":
+ p.getDBMetrics(db).clWaiting += parseInt(value)
+ case "cl_cancel_req":
+ p.getDBMetrics(db).clCancelReq += parseInt(value)
+ case "sv_active":
+ p.getDBMetrics(db).svActive += parseInt(value)
+ case "sv_idle":
+ p.getDBMetrics(db).svIdle += parseInt(value)
+ case "sv_used":
+ p.getDBMetrics(db).svUsed += parseInt(value)
+ case "sv_tested":
+ p.getDBMetrics(db).svTested += parseInt(value)
+ case "sv_login":
+ p.getDBMetrics(db).svLogin += parseInt(value)
+ case "maxwait":
+ p.getDBMetrics(db).maxWait += parseInt(value)
+ case "maxwait_us":
+ p.getDBMetrics(db).maxWaitUS += parseInt(value)
+ }
+ })
+}
+
+func (p *PgBouncer) queryMaxClientConn() (int64, error) {
+ q := queryShowConfig
+ p.Debugf("executing query: %v", q)
+
+ var v int64
+ var key string
+ err := p.collectQuery(q, func(column, value string) {
+ switch column {
+ case "key":
+ key = value
+ case "value":
+ if key == "max_client_conn" {
+ v = parseInt(value)
+ }
+ }
+ })
+ return v, err
+}
+
+var reVersion = regexp.MustCompile(`\d+\.\d+\.\d+`)
+
+func (p *PgBouncer) queryVersion() (*semver.Version, error) {
+ q := queryShowVersion
+ p.Debugf("executing query: %v", q)
+
+ var resp string
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+ if err := p.db.QueryRowContext(ctx, q).Scan(&resp); err != nil {
+ return nil, err
+ }
+
+ if !strings.Contains(resp, "PgBouncer") {
+ return nil, fmt.Errorf("not PgBouncer instance: version response: %s", resp)
+ }
+
+ ver := reVersion.FindString(resp)
+ if ver == "" {
+ return nil, fmt.Errorf("couldn't parse version string '%s' (expected pattern '%s')", resp, reVersion)
+ }
+
+ v, err := semver.New(ver)
+ if err != nil {
+ return nil, fmt.Errorf("couldn't parse version string '%s': %v", ver, err)
+ }
+
+ return v, nil
+}
+
+func (p *PgBouncer) openConnection() error {
+ cfg, err := pgx.ParseConfig(p.DSN)
+ if err != nil {
+ return err
+ }
+ cfg.PreferSimpleProtocol = true
+
+ db, err := sql.Open("pgx", stdlib.RegisterConnConfig(cfg))
+ if err != nil {
+ return fmt.Errorf("error on opening a connection with the PgBouncer database [%s]: %v", p.DSN, err)
+ }
+
+ db.SetMaxOpenConns(1)
+ db.SetMaxIdleConns(1)
+ db.SetConnMaxLifetime(10 * time.Minute)
+
+ p.db = db
+
+ return nil
+}
+
+func (p *PgBouncer) collectQuery(query string, assign func(column, value string)) error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+ rows, err := p.db.QueryContext(ctx, query)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = rows.Close() }()
+
+ columns, err := rows.Columns()
+ if err != nil {
+ return err
+ }
+
+ values := makeNullStrings(len(columns))
+ for rows.Next() {
+ if err := rows.Scan(values...); err != nil {
+ return err
+ }
+ for i, v := range values {
+ assign(columns[i], valueToString(v))
+ }
+ }
+ return rows.Err()
+}
+
+func (p *PgBouncer) getDBMetrics(dbname string) *dbMetrics {
+ db, ok := p.metrics.dbs[dbname]
+ if !ok {
+ db = &dbMetrics{name: dbname}
+ p.metrics.dbs[dbname] = db
+ }
+ return db
+}
+
+func (p *PgBouncer) resetMetrics() {
+ for name, db := range p.metrics.dbs {
+ p.metrics.dbs[name] = &dbMetrics{
+ name: db.name,
+ pgDBName: db.pgDBName,
+ hasCharts: db.hasCharts,
+ }
+ }
+}
+
+func valueToString(value any) string {
+ v, ok := value.(*sql.NullString)
+ if !ok || !v.Valid {
+ return ""
+ }
+ return v.String
+}
+
+func makeNullStrings(size int) []any {
+ vs := make([]any, size)
+ for i := range vs {
+ vs[i] = &sql.NullString{}
+ }
+ return vs
+}
+
+func parseInt(s string) int64 {
+ v, _ := strconv.ParseInt(s, 10, 64)
+ return v
+}
+
+func calcPercentage(value, total int64) int64 {
+ if total == 0 {
+ return 0
+ }
+ return value * 100 / total
+}
diff --git a/src/go/plugin/go.d/modules/pgbouncer/config_schema.json b/src/go/plugin/go.d/modules/pgbouncer/config_schema.json
new file mode 100644
index 000000000..d8d08bc51
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/config_schema.json
@@ -0,0 +1,47 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "PgBouncer collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "dsn": {
+ "title": "DSN",
+ "description": "PgBouncer server Data Source Name in [key/value string](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-KEYWORD-VALUE) or [URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS) format.",
+ "type": "string",
+ "default": "postgres://netdata:password@127.0.0.1:6432/pgbouncer"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for queries, in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ },
+ "required": [
+ "dsn"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "dsn": {
+ "ui:placeholder": "postgres://username:password@host:port/dbname"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pgbouncer/init.go b/src/go/plugin/go.d/modules/pgbouncer/init.go
new file mode 100644
index 000000000..146335085
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/init.go
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pgbouncer
+
+import "errors"
+
+func (p *PgBouncer) validateConfig() error {
+ if p.DSN == "" {
+ return errors.New("DSN not set")
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/pgbouncer/integrations/pgbouncer.md b/src/go/plugin/go.d/modules/pgbouncer/integrations/pgbouncer.md
new file mode 100644
index 000000000..1b5e6e719
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/integrations/pgbouncer.md
@@ -0,0 +1,289 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/pgbouncer/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/pgbouncer/metadata.yaml"
+sidebar_label: "PgBouncer"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# PgBouncer
+
+
+<img src="https://netdata.cloud/img/postgres.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: pgbouncer
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors PgBouncer servers.
+
+Executed queries:
+
+- `SHOW VERSION;`
+- `SHOW CONFIG;`
+- `SHOW DATABASES;`
+- `SHOW STATS;`
+- `SHOW POOLS;`
+
+Information about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per PgBouncer instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| pgbouncer.client_connections_utilization | used | percentage |
+
+### Per database
+
+These metrics refer to the database.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | database name |
+| postgres_database | Postgres database name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| pgbouncer.db_client_connections | active, waiting, cancel_req | connections |
+| pgbouncer.db_server_connections | active, idle, used, tested, login | connections |
+| pgbouncer.db_server_connections_utilization | used | percentage |
+| pgbouncer.db_clients_wait_time | time | seconds |
+| pgbouncer.db_client_max_wait_time | time | seconds |
+| pgbouncer.db_transactions | transactions | transactions/s |
+| pgbouncer.db_transactions_time | time | seconds |
+| pgbouncer.db_transaction_avg_time | time | seconds |
+| pgbouncer.db_queries | queries | queries/s |
+| pgbouncer.db_queries_time | time | seconds |
+| pgbouncer.db_query_avg_time | time | seconds |
+| pgbouncer.db_network_io | received, sent | B/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Create netdata user
+
+Create a user with `stats_users` permissions to query your PgBouncer instance.
+
+To create the `netdata` user:
+
+- Add `netdata` user to the `pgbouncer.ini` file:
+
+ ```text
+ stats_users = netdata
+ ```
+
+- Add a password for the `netdata` user to the `userlist.txt` file:
+
+ ```text
+ "netdata" "<PASSWORD>"
+ ```
+
+- To verify the credentials, run the following command
+
+ ```bash
+ psql -h localhost -U netdata -p 6432 pgbouncer -c "SHOW VERSION;" >/dev/null 2>&1 && echo OK || echo FAIL
+ ```
+
+ When it prompts for a password, enter the password you added to `userlist.txt`.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/pgbouncer.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/pgbouncer.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| dsn | PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:6432/pgbouncer | yes |
+| timeout | Query timeout in seconds. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'
+
+```
+</details>
+
+##### Unix socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'
+
+ - name: remote
+ dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `pgbouncer` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m pgbouncer
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `pgbouncer` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep pgbouncer
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep pgbouncer /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep pgbouncer
+```
+
+
diff --git a/src/go/plugin/go.d/modules/pgbouncer/metadata.yaml b/src/go/plugin/go.d/modules/pgbouncer/metadata.yaml
new file mode 100644
index 000000000..e4a098bc2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/metadata.yaml
@@ -0,0 +1,239 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-pgbouncer
+ plugin_name: go.d.plugin
+ module_name: pgbouncer
+ monitored_instance:
+ name: PgBouncer
+ link: https://www.pgbouncer.org/
+ icon_filename: postgres.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - pgbouncer
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors PgBouncer servers.
+
+ Executed queries:
+
+ - `SHOW VERSION;`
+ - `SHOW CONFIG;`
+ - `SHOW DATABASES;`
+ - `SHOW STATS;`
+ - `SHOW POOLS;`
+
+ Information about the queries can be found in the [PgBouncer Documentation](https://www.pgbouncer.org/usage.html).
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Create netdata user
+ description: |
+ Create a user with `stats_users` permissions to query your PgBouncer instance.
+
+ To create the `netdata` user:
+
+ - Add `netdata` user to the `pgbouncer.ini` file:
+
+ ```text
+ stats_users = netdata
+ ```
+
+ - Add a password for the `netdata` user to the `userlist.txt` file:
+
+ ```text
+ "netdata" "<PASSWORD>"
+ ```
+
+ - To verify the credentials, run the following command
+
+ ```bash
+ psql -h localhost -U netdata -p 6432 pgbouncer -c "SHOW VERSION;" >/dev/null 2>&1 && echo OK || echo FAIL
+ ```
+
+ When it prompts for a password, enter the password you added to `userlist.txt`.
+ configuration:
+ file:
+ name: go.d/pgbouncer.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: dsn
+ description: PgBouncer server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+ default_value: postgres://postgres:postgres@127.0.0.1:6432/pgbouncer
+ required: true
+ - name: timeout
+ description: Query timeout in seconds.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: TCP socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'
+ - name: Unix socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'
+
+ - name: remote
+ dsn: 'postgres://postgres:postgres@203.0.113.10:6432/pgbouncer'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: pgbouncer.client_connections_utilization
+ description: Client connections utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: database
+ description: These metrics refer to the database.
+ labels:
+ - name: database
+ description: database name
+ - name: postgres_database
+ description: Postgres database name
+ metrics:
+ - name: pgbouncer.db_client_connections
+ description: Database client connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: waiting
+ - name: cancel_req
+ - name: pgbouncer.db_server_connections
+ description: Database server connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: idle
+ - name: used
+ - name: tested
+ - name: login
+ - name: pgbouncer.db_server_connections_utilization
+ description: Database server connections utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: pgbouncer.db_clients_wait_time
+ description: Database clients wait time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: pgbouncer.db_client_max_wait_time
+ description: Database client max wait time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: pgbouncer.db_transactions
+ description: Database pooled SQL transactions
+ unit: transactions/s
+ chart_type: line
+ dimensions:
+ - name: transactions
+ - name: pgbouncer.db_transactions_time
+ description: Database transactions time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: pgbouncer.db_transaction_avg_time
+ description: Database transaction average time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: pgbouncer.db_queries
+ description: Database pooled SQL queries
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: pgbouncer.db_queries_time
+ description: Database queries time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: pgbouncer.db_query_avg_time
+ description: Database query average time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: pgbouncer.db_network_io
+ description: Database traffic
+ unit: B/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
diff --git a/src/go/plugin/go.d/modules/pgbouncer/metrics.go b/src/go/plugin/go.d/modules/pgbouncer/metrics.go
new file mode 100644
index 000000000..eaac52771
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/metrics.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pgbouncer
+
+type metrics struct {
+ dbs map[string]*dbMetrics
+}
+
+// dbMetrics represents PgBouncer database (not the PostgreSQL database of the outgoing connection).
+type dbMetrics struct {
+ name string
+ pgDBName string
+
+ updated bool
+ hasCharts bool
+
+ // command 'SHOW DATABASES;'
+ maxConnections int64
+ currentConnections int64
+ paused int64
+ disabled int64
+
+ // command 'SHOW STATS;'
+ // https://github.com/pgbouncer/pgbouncer/blob/9a346b0e451d842d7202abc3eccf0ff5a66b2dd6/src/stats.c#L76
+ totalXactCount int64 // v1.8+
+ totalQueryCount int64 // v1.8+
+ totalReceived int64
+ totalSent int64
+ totalXactTime int64 // v1.8+
+ totalQueryTime int64
+ totalWaitTime int64 // v1.8+
+ avgXactTime int64 // v1.8+
+ avgQueryTime int64
+
+ // command 'SHOW POOLS;'
+ // https://github.com/pgbouncer/pgbouncer/blob/9a346b0e451d842d7202abc3eccf0ff5a66b2dd6/src/admin.c#L804
+ clActive int64
+ clWaiting int64
+ clCancelReq int64
+ svActive int64
+ svIdle int64
+ svUsed int64
+ svTested int64
+ svLogin int64
+ maxWait int64
+ maxWaitUS int64 // v1.8+
+}
diff --git a/src/go/plugin/go.d/modules/pgbouncer/pgbouncer.go b/src/go/plugin/go.d/modules/pgbouncer/pgbouncer.go
new file mode 100644
index 000000000..fbe554dc3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/pgbouncer.go
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pgbouncer
+
+import (
+ "database/sql"
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/blang/semver/v4"
+ _ "github.com/jackc/pgx/v4/stdlib"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("pgbouncer", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *PgBouncer {
+ return &PgBouncer{
+ Config: Config{
+ Timeout: web.Duration(time.Second),
+ DSN: "postgres://postgres:postgres@127.0.0.1:6432/pgbouncer",
+ },
+ charts: globalCharts.Copy(),
+ recheckSettingsEvery: time.Minute * 5,
+ metrics: &metrics{
+ dbs: make(map[string]*dbMetrics),
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ DSN string `yaml:"dsn" json:"dsn"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type PgBouncer struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ db *sql.DB
+
+ version *semver.Version
+ recheckSettingsTime time.Time
+ recheckSettingsEvery time.Duration
+ maxClientConn int64
+
+ metrics *metrics
+}
+
+func (p *PgBouncer) Configuration() any {
+ return p.Config
+}
+
+func (p *PgBouncer) Init() error {
+ err := p.validateConfig()
+ if err != nil {
+ p.Errorf("config validation: %v", err)
+ return err
+ }
+
+ return nil
+}
+
+func (p *PgBouncer) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (p *PgBouncer) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *PgBouncer) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (p *PgBouncer) Cleanup() {
+ if p.db == nil {
+ return
+ }
+ if err := p.db.Close(); err != nil {
+ p.Warningf("cleanup: error on closing the PgBouncer database [%s]: %v", p.DSN, err)
+ }
+ p.db = nil
+}
diff --git a/src/go/plugin/go.d/modules/pgbouncer/pgbouncer_test.go b/src/go/plugin/go.d/modules/pgbouncer/pgbouncer_test.go
new file mode 100644
index 000000000..51c838aca
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/pgbouncer_test.go
@@ -0,0 +1,364 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pgbouncer
+
+import (
+ "bufio"
+ "bytes"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer170Version, _ = os.ReadFile("testdata/v1.7.0/version.txt")
+ dataVer1170Version, _ = os.ReadFile("testdata/v1.17.0/version.txt")
+ dataVer1170Config, _ = os.ReadFile("testdata/v1.17.0/config.txt")
+ dataVer1170Databases, _ = os.ReadFile("testdata/v1.17.0/databases.txt")
+ dataVer1170Pools, _ = os.ReadFile("testdata/v1.17.0/pools.txt")
+ dataVer1170Stats, _ = os.ReadFile("testdata/v1.17.0/stats.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer170Version": dataVer170Version,
+ "dataVer1170Version": dataVer1170Version,
+ "dataVer1170Config": dataVer1170Config,
+ "dataVer1170Databases": dataVer1170Databases,
+ "dataVer1170Pools": dataVer1170Pools,
+ "dataVer1170Stats": dataVer1170Stats,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPgBouncer_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &PgBouncer{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPgBouncer_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "Success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "Fail when DSN not set": {
+ wantFail: true,
+ config: Config{DSN: ""},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ p := New()
+ p.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, p.Init())
+ } else {
+ assert.NoError(t, p.Init())
+ }
+ })
+ }
+}
+
+func TestPgBouncer_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestPgBouncer_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func(t *testing.T, m sqlmock.Sqlmock)
+ wantFail bool
+ }{
+ "Success when all queries are successful (v1.17.0)": {
+ wantFail: false,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataVer1170Version)
+ mockExpect(t, m, queryShowConfig, dataVer1170Config)
+ mockExpect(t, m, queryShowDatabases, dataVer1170Databases)
+ mockExpect(t, m, queryShowStats, dataVer1170Stats)
+ mockExpect(t, m, queryShowPools, dataVer1170Pools)
+ },
+ },
+ "Fail when querying version returns an error": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpectErr(m, queryShowVersion)
+ },
+ },
+ "Fail when querying version returns unsupported version": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataVer170Version)
+ },
+ },
+ "Fail when querying config returns an error": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataVer1170Version)
+ mockExpectErr(m, queryShowConfig)
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ p := New()
+ p.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, p.Init())
+
+ test.prepareMock(t, mock)
+
+ if test.wantFail {
+ assert.Error(t, p.Check())
+ } else {
+ assert.NoError(t, p.Check())
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func TestPgBouncer_Collect(t *testing.T) {
+ type testCaseStep struct {
+ prepareMock func(t *testing.T, m sqlmock.Sqlmock)
+ check func(t *testing.T, p *PgBouncer)
+ }
+ tests := map[string][]testCaseStep{
+ "Success on all queries (v1.17.0)": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataVer1170Version)
+ mockExpect(t, m, queryShowConfig, dataVer1170Config)
+ mockExpect(t, m, queryShowDatabases, dataVer1170Databases)
+ mockExpect(t, m, queryShowStats, dataVer1170Stats)
+ mockExpect(t, m, queryShowPools, dataVer1170Pools)
+ },
+ check: func(t *testing.T, p *PgBouncer) {
+ mx := p.Collect()
+
+ expected := map[string]int64{
+ "cl_conns_utilization": 47,
+ "db_myprod1_avg_query_time": 575,
+ "db_myprod1_avg_xact_time": 575,
+ "db_myprod1_cl_active": 15,
+ "db_myprod1_cl_cancel_req": 0,
+ "db_myprod1_cl_waiting": 0,
+ "db_myprod1_maxwait": 0,
+ "db_myprod1_sv_active": 15,
+ "db_myprod1_sv_conns_utilization": 0,
+ "db_myprod1_sv_idle": 5,
+ "db_myprod1_sv_login": 0,
+ "db_myprod1_sv_tested": 0,
+ "db_myprod1_sv_used": 0,
+ "db_myprod1_total_query_count": 12683170,
+ "db_myprod1_total_query_time": 7223566620,
+ "db_myprod1_total_received": 809093651,
+ "db_myprod1_total_sent": 1990971542,
+ "db_myprod1_total_wait_time": 1029555,
+ "db_myprod1_total_xact_count": 12683170,
+ "db_myprod1_total_xact_time": 7223566620,
+ "db_myprod2_avg_query_time": 581,
+ "db_myprod2_avg_xact_time": 581,
+ "db_myprod2_cl_active": 12,
+ "db_myprod2_cl_cancel_req": 0,
+ "db_myprod2_cl_waiting": 0,
+ "db_myprod2_maxwait": 0,
+ "db_myprod2_sv_active": 11,
+ "db_myprod2_sv_conns_utilization": 0,
+ "db_myprod2_sv_idle": 9,
+ "db_myprod2_sv_login": 0,
+ "db_myprod2_sv_tested": 0,
+ "db_myprod2_sv_used": 0,
+ "db_myprod2_total_query_count": 12538544,
+ "db_myprod2_total_query_time": 7144226450,
+ "db_myprod2_total_received": 799867464,
+ "db_myprod2_total_sent": 1968267687,
+ "db_myprod2_total_wait_time": 993313,
+ "db_myprod2_total_xact_count": 12538544,
+ "db_myprod2_total_xact_time": 7144226450,
+ "db_pgbouncer_avg_query_time": 0,
+ "db_pgbouncer_avg_xact_time": 0,
+ "db_pgbouncer_cl_active": 2,
+ "db_pgbouncer_cl_cancel_req": 0,
+ "db_pgbouncer_cl_waiting": 0,
+ "db_pgbouncer_maxwait": 0,
+ "db_pgbouncer_sv_active": 0,
+ "db_pgbouncer_sv_conns_utilization": 0,
+ "db_pgbouncer_sv_idle": 0,
+ "db_pgbouncer_sv_login": 0,
+ "db_pgbouncer_sv_tested": 0,
+ "db_pgbouncer_sv_used": 0,
+ "db_pgbouncer_total_query_count": 45,
+ "db_pgbouncer_total_query_time": 0,
+ "db_pgbouncer_total_received": 0,
+ "db_pgbouncer_total_sent": 0,
+ "db_pgbouncer_total_wait_time": 0,
+ "db_pgbouncer_total_xact_count": 45,
+ "db_pgbouncer_total_xact_time": 0,
+ "db_postgres_avg_query_time": 2790,
+ "db_postgres_avg_xact_time": 2790,
+ "db_postgres_cl_active": 18,
+ "db_postgres_cl_cancel_req": 0,
+ "db_postgres_cl_waiting": 0,
+ "db_postgres_maxwait": 0,
+ "db_postgres_sv_active": 18,
+ "db_postgres_sv_conns_utilization": 0,
+ "db_postgres_sv_idle": 2,
+ "db_postgres_sv_login": 0,
+ "db_postgres_sv_tested": 0,
+ "db_postgres_sv_used": 0,
+ "db_postgres_total_query_count": 25328823,
+ "db_postgres_total_query_time": 72471882827,
+ "db_postgres_total_received": 1615791619,
+ "db_postgres_total_sent": 3976053858,
+ "db_postgres_total_wait_time": 50439622253,
+ "db_postgres_total_xact_count": 25328823,
+ "db_postgres_total_xact_time": 72471882827,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "Fail when querying version returns an error": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpectErr(m, queryShowVersion)
+ },
+ check: func(t *testing.T, p *PgBouncer) {
+ mx := p.Collect()
+ var expected map[string]int64
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "Fail when querying version returns unsupported version": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataVer170Version)
+ },
+ check: func(t *testing.T, p *PgBouncer) {
+ mx := p.Collect()
+ var expected map[string]int64
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "Fail when querying config returns an error": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryShowVersion, dataVer1170Version)
+ mockExpectErr(m, queryShowConfig)
+ },
+ check: func(t *testing.T, p *PgBouncer) {
+ mx := p.Collect()
+ var expected map[string]int64
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ p := New()
+ p.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, p.Init())
+
+ for i, step := range test {
+ t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
+ step.prepareMock(t, mock)
+ step.check(t, p)
+ })
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) {
+ mock.ExpectQuery(query).WillReturnRows(mustMockRows(t, rows)).RowsWillBeClosed()
+}
+
+func mockExpectErr(mock sqlmock.Sqlmock, query string) {
+ mock.ExpectQuery(query).WillReturnError(fmt.Errorf("mock error (%s)", query))
+}
+
+func mustMockRows(t *testing.T, data []byte) *sqlmock.Rows {
+ rows, err := prepareMockRows(data)
+ require.NoError(t, err)
+ return rows
+}
+
+func prepareMockRows(data []byte) (*sqlmock.Rows, error) {
+ r := bytes.NewReader(data)
+ sc := bufio.NewScanner(r)
+
+ var numColumns int
+ var rows *sqlmock.Rows
+
+ for sc.Scan() {
+ s := strings.TrimSpace(sc.Text())
+ if s == "" || strings.HasPrefix(s, "---") {
+ continue
+ }
+
+ parts := strings.Split(s, "|")
+ for i, v := range parts {
+ parts[i] = strings.TrimSpace(v)
+ }
+
+ if rows == nil {
+ numColumns = len(parts)
+ rows = sqlmock.NewRows(parts)
+ continue
+ }
+
+ if len(parts) != numColumns {
+ return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts))
+ }
+
+ values := make([]driver.Value, len(parts))
+ for i, v := range parts {
+ values[i] = v
+ }
+ rows.AddRow(values...)
+ }
+
+ if rows == nil {
+ return nil, errors.New("prepareMockRows(): nil rows result")
+ }
+
+ return rows, nil
+}
diff --git a/src/go/plugin/go.d/modules/pgbouncer/testdata/config.json b/src/go/plugin/go.d/modules/pgbouncer/testdata/config.json
new file mode 100644
index 000000000..ed8b72dcb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "dsn": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/pgbouncer/testdata/config.yaml b/src/go/plugin/go.d/modules/pgbouncer/testdata/config.yaml
new file mode 100644
index 000000000..caff49039
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+dsn: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/config.txt b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/config.txt
new file mode 100644
index 000000000..da1aba609
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/config.txt
@@ -0,0 +1,86 @@
+ key | value | default | changeable
+---------------------------+--------------------------------------------------------+--------------------------------------------------------+------------
+ admin_users | postgres | | yes
+ application_name_add_host | 0 | 0 | yes
+ auth_file | /etc/pgbouncer/userlist.txt | | yes
+ auth_hba_file | | | yes
+ auth_query | SELECT usename, passwd FROM pg_shadow WHERE usename=$1 | SELECT usename, passwd FROM pg_shadow WHERE usename=$1 | yes
+ auth_type | md5 | md5 | yes
+ auth_user | | | yes
+ autodb_idle_timeout | 3600 | 3600 | yes
+ client_idle_timeout | 0 | 0 | yes
+ client_login_timeout | 60 | 60 | yes
+ client_tls_ca_file | | | yes
+ client_tls_cert_file | | | yes
+ client_tls_ciphers | fast | fast | yes
+ client_tls_dheparams | auto | auto | yes
+ client_tls_ecdhcurve | auto | auto | yes
+ client_tls_key_file | | | yes
+ client_tls_protocols | secure | secure | yes
+ client_tls_sslmode | disable | disable | yes
+ conffile | /etc/pgbouncer/pgbouncer.ini | | yes
+ default_pool_size | 20 | 20 | yes
+ disable_pqexec | 0 | 0 | no
+ dns_max_ttl | 15 | 15 | yes
+ dns_nxdomain_ttl | 15 | 15 | yes
+ dns_zone_check_period | 0 | 0 | yes
+ idle_transaction_timeout | 0 | 0 | yes
+ ignore_startup_parameters | extra_float_digits | | yes
+ job_name | pgbouncer | pgbouncer | no
+ listen_addr | 0.0.0.0 | | no
+ listen_backlog | 128 | 128 | no
+ listen_port | 6432 | 6432 | no
+ log_connections | 1 | 1 | yes
+ log_disconnections | 1 | 1 | yes
+ log_pooler_errors | 1 | 1 | yes
+ log_stats | 1 | 1 | yes
+ logfile | | | yes
+ max_client_conn | 100 | 100 | yes
+ max_db_connections | 0 | 0 | yes
+ max_packet_size | 2147483647 | 2147483647 | yes
+ max_user_connections | 0 | 0 | yes
+ min_pool_size | 0 | 0 | yes
+ pidfile | | | no
+ pkt_buf | 4096 | 4096 | no
+ pool_mode | session | session | yes
+ query_timeout | 0 | 0 | yes
+ query_wait_timeout | 120 | 120 | yes
+ reserve_pool_size | 0 | 0 | yes
+ reserve_pool_timeout | 5 | 5 | yes
+ resolv_conf | | | no
+ sbuf_loopcnt | 5 | 5 | yes
+ server_check_delay | 30 | 30 | yes
+ server_check_query | select 1 | select 1 | yes
+ server_connect_timeout | 15 | 15 | yes
+ server_fast_close | 0 | 0 | yes
+ server_idle_timeout | 600 | 600 | yes
+ server_lifetime | 3600 | 3600 | yes
+ server_login_retry | 15 | 15 | yes
+ server_reset_query | DISCARD ALL | DISCARD ALL | yes
+ server_reset_query_always | 0 | 0 | yes
+ server_round_robin | 0 | 0 | yes
+ server_tls_ca_file | | | yes
+ server_tls_cert_file | | | yes
+ server_tls_ciphers | fast | fast | yes
+ server_tls_key_file | | | yes
+ server_tls_protocols | secure | secure | yes
+ server_tls_sslmode | disable | disable | yes
+ so_reuseport | 0 | 0 | no
+ stats_period | 60 | 60 | yes
+ stats_users | | | yes
+ suspend_timeout | 10 | 10 | yes
+ syslog | 0 | 0 | yes
+ syslog_facility | daemon | daemon | yes
+ syslog_ident | pgbouncer | pgbouncer | yes
+ tcp_defer_accept | 1 | 1 | yes
+ tcp_keepalive | 1 | 1 | yes
+ tcp_keepcnt | 0 | 0 | yes
+ tcp_keepidle | 0 | 0 | yes
+ tcp_keepintvl | 0 | 0 | yes
+ tcp_socket_buffer | 0 | 0 | yes
+ tcp_user_timeout | 0 | 0 | yes
+ unix_socket_dir | | /tmp | no
+ unix_socket_group | | | no
+ unix_socket_mode | 511 | 0777 | no
+ user | postgres | | no
+ verbose | 0 | | yes \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/databases.txt b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/databases.txt
new file mode 100644
index 000000000..9e8f14695
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/databases.txt
@@ -0,0 +1,6 @@
+ name | host | port | database | force_user | pool_size | min_pool_size | reserve_pool | pool_mode | max_connections | current_connections | paused | disabled
+-----------+-----------+------+-----------+------------+-----------+---------------+--------------+-----------+-----------------+---------------------+--------+----------
+ myprod1 | 127.0.0.1 | 5432 | myprod1 | postgres | 20 | 0 | 0 | | 0 | 20 | 0 | 0
+ myprod2 | 127.0.0.1 | 5432 | myprod2 | postgres | 20 | 0 | 0 | | 0 | 20 | 0 | 0
+ pgbouncer | | 6432 | pgbouncer | pgbouncer | 2 | 0 | 0 | statement | 0 | 0 | 0 | 0
+ postgres | 127.0.0.1 | 5432 | postgres | postgres | 20 | 0 | 0 | | 0 | 20 | 0 | 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/pools.txt b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/pools.txt
new file mode 100644
index 000000000..dec3326ad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/pools.txt
@@ -0,0 +1,6 @@
+ database | user | cl_active | cl_waiting | cl_cancel_req | sv_active | sv_idle | sv_used | sv_tested | sv_login | maxwait | maxwait_us | pool_mode
+-----------+-----------+-----------+------------+---------------+-----------+---------+---------+-----------+----------+---------+------------+-----------
+ myprod1 | postgres | 15 | 0 | 0 | 15 | 5 | 0 | 0 | 0 | 0 | 0 | session
+ myprod2 | postgres | 12 | 0 | 0 | 11 | 9 | 0 | 0 | 0 | 0 | 0 | session
+ pgbouncer | pgbouncer | 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | statement
+ postgres | postgres | 18 | 0 | 0 | 18 | 2 | 0 | 0 | 0 | 0 | 0 | session \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/stats.txt b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/stats.txt
new file mode 100644
index 000000000..3b66fc323
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/stats.txt
@@ -0,0 +1,6 @@
+ database | total_xact_count | total_query_count | total_received | total_sent | total_xact_time | total_query_time | total_wait_time | avg_xact_count | avg_query_count | avg_recv | avg_sent | avg_xact_time | avg_query_time | avg_wait_time
+-----------+------------------+-------------------+----------------+------------+-----------------+------------------+-----------------+----------------+-----------------+----------+----------+---------------+----------------+---------------
+ myprod1 | 12683170 | 12683170 | 809093651 | 1990971542 | 7223566620 | 7223566620 | 1029555 | 900 | 900 | 57434 | 141358 | 575 | 575 | 3
+ myprod2 | 12538544 | 12538544 | 799867464 | 1968267687 | 7144226450 | 7144226450 | 993313 | 885 | 885 | 56511 | 139050 | 581 | 581 | 14
+ pgbouncer | 45 | 45 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0
+ postgres | 25328823 | 25328823 | 1615791619 | 3976053858 | 72471882827 | 72471882827 | 50439622253 | 1901 | 1901 | 121329 | 298556 | 2790 | 2790 | 3641761 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/version.txt b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/version.txt
new file mode 100644
index 000000000..fa2c806a2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.17.0/version.txt
@@ -0,0 +1,3 @@
+ version
+------------------
+ PgBouncer 1.17.0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.7.0/version.txt b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.7.0/version.txt
new file mode 100644
index 000000000..ff0fd70a8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pgbouncer/testdata/v1.7.0/version.txt
@@ -0,0 +1,3 @@
+ version
+------------------
+ PgBouncer 1.7.0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/phpdaemon/README.md b/src/go/plugin/go.d/modules/phpdaemon/README.md
new file mode 120000
index 000000000..2f2fca9f1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/README.md
@@ -0,0 +1 @@
+integrations/phpdaemon.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/phpdaemon/charts.go b/src/go/plugin/go.d/modules/phpdaemon/charts.go
new file mode 100644
index 000000000..e96a209bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/charts.go
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpdaemon
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Chart is an alias for module.Chart
+ Chart = module.Chart
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+)
+
+var charts = Charts{
+ {
+ ID: "workers",
+ Title: "Workers",
+ Units: "workers",
+ Fam: "workers",
+ Ctx: "phpdaemon.workers",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "alive"},
+ {ID: "shutdown"},
+ },
+ },
+ {
+ ID: "alive_workers",
+ Title: "Alive Workers State",
+ Units: "workers",
+ Fam: "workers",
+ Ctx: "phpdaemon.alive_workers",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "idle"},
+ {ID: "busy"},
+ {ID: "reloading"},
+ },
+ },
+ {
+ ID: "idle_workers",
+ Title: "Idle Workers State",
+ Units: "workers",
+ Fam: "workers",
+ Ctx: "phpdaemon.idle_workers",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "preinit"},
+ {ID: "init"},
+ {ID: "initialized"},
+ },
+ },
+}
+
+var uptimeChart = Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "phpdaemon.uptime",
+ Dims: Dims{
+ {ID: "uptime", Name: "time"},
+ },
+}
diff --git a/src/go/plugin/go.d/modules/phpdaemon/client.go b/src/go/plugin/go.d/modules/phpdaemon/client.go
new file mode 100644
index 000000000..bc54265d3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/client.go
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpdaemon
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type decodeFunc func(dst interface{}, reader io.Reader) error
+
+func decodeJson(dst interface{}, reader io.Reader) error { return json.NewDecoder(reader).Decode(dst) }
+
+func newAPIClient(httpClient *http.Client, request web.Request) *client {
+ return &client{
+ httpClient: httpClient,
+ request: request,
+ }
+}
+
+type client struct {
+ httpClient *http.Client
+ request web.Request
+}
+
+func (c *client) queryFullStatus() (*FullStatus, error) {
+ var status FullStatus
+ err := c.doWithDecode(&status, decodeJson, c.request)
+ if err != nil {
+ return nil, err
+ }
+
+ return &status, nil
+}
+
+func (c *client) doWithDecode(dst interface{}, decode decodeFunc, request web.Request) error {
+ req, err := web.NewHTTPRequest(request)
+ if err != nil {
+ return fmt.Errorf("error on creating http request to %s : %v", request.URL, err)
+ }
+
+ resp, err := c.doOK(req)
+ defer closeBody(resp)
+ if err != nil {
+ return err
+ }
+
+ if err = decode(dst, resp.Body); err != nil {
+ return fmt.Errorf("error on parsing response from %s : %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func (c *client) doOK(req *http.Request) (*http.Response, error) {
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return resp, fmt.Errorf("error on request : %v", err)
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return resp, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
+ }
+
+ return resp, err
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/phpdaemon/collect.go b/src/go/plugin/go.d/modules/phpdaemon/collect.go
new file mode 100644
index 000000000..9be718ea9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/collect.go
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpdaemon
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+
+func (p *PHPDaemon) collect() (map[string]int64, error) {
+ s, err := p.client.queryFullStatus()
+
+ if err != nil {
+ return nil, err
+ }
+
+ // https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Core/Daemon.php
+ // see getStateOfWorkers()
+ s.Initialized = s.Idle - (s.Init + s.Preinit)
+
+ return stm.ToMap(s), nil
+}
diff --git a/src/go/plugin/go.d/modules/phpdaemon/config_schema.json b/src/go/plugin/go.d/modules/phpdaemon/config_schema.json
new file mode 100644
index 000000000..a154aaa59
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "phpDaemon collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the phpDaemon status page.",
+ "type": "string",
+ "default": "http://127.0.0.1:8509/FullStatus",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/phpdaemon/init.go b/src/go/plugin/go.d/modules/phpdaemon/init.go
new file mode 100644
index 000000000..ec9925b7a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/init.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpdaemon
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (p *PHPDaemon) validateConfig() error {
+ if p.URL == "" {
+ return errors.New("url not set")
+ }
+ if _, err := web.NewHTTPRequest(p.Request); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (p *PHPDaemon) initClient() (*client, error) {
+ httpClient, err := web.NewHTTPClient(p.Client)
+ if err != nil {
+ return nil, err
+ }
+ return newAPIClient(httpClient, p.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/phpdaemon/integrations/phpdaemon.md b/src/go/plugin/go.d/modules/phpdaemon/integrations/phpdaemon.md
new file mode 100644
index 000000000..11445455f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/integrations/phpdaemon.md
@@ -0,0 +1,333 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/phpdaemon/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/phpdaemon/metadata.yaml"
+sidebar_label: "phpDaemon"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# phpDaemon
+
+
+<img src="https://netdata.cloud/img/php.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: phpdaemon
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors phpDaemon instances.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per phpDaemon instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| phpdaemon.workers | alive, shutdown | workers |
+| phpdaemon.alive_workers | idle, busy, reloading | workers |
+| phpdaemon.idle_workers | preinit, init, initialized | workers |
+| phpdaemon.uptime | time | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable phpDaemon's HTTP server
+
+Statistics expected to be in JSON format.
+
+<details>
+<summary>phpDaemon configuration</summary>
+
+Instruction from [@METAJIJI](https://github.com/METAJIJI).
+
+To enable `phpd` statistics on http, you must enable the http server and write an application.
+Application is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.
+
+```php
+// /opt/phpdaemon/conf/phpd.conf
+
+path /opt/phpdaemon/conf/AppResolver.php;
+Pool:HTTPServer {
+ privileged;
+ listen '127.0.0.1';
+ port 8509;
+}
+```
+
+```php
+// /opt/phpdaemon/conf/AppResolver.php
+
+<?php
+
+class MyAppResolver extends \PHPDaemon\Core\AppResolver {
+ public function getRequestRoute($req, $upstream) {
+ if (preg_match('~^/(ServerStatus|FullStatus)/~', $req->attrs->server['DOCUMENT_URI'], $m)) {
+ return $m[1];
+ }
+ }
+}
+
+return new MyAppResolver;
+```
+
+```php
+/opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php
+
+<?php
+namespace PHPDaemon\Applications;
+
+class FullStatus extends \PHPDaemon\Core\AppInstance {
+ public function beginRequest($req, $upstream) {
+ return new FullStatusRequest($this, $upstream, $req);
+ }
+}
+```
+
+```php
+// /opt/phpdaemon/conf/PHPDaemon/Applications/FullStatusRequest.php
+
+<?php
+namespace PHPDaemon\Applications;
+
+use PHPDaemon\Core\Daemon;
+use PHPDaemon\HTTPRequest\Generic;
+
+class FullStatusRequest extends Generic {
+ public function run() {
+ $stime = microtime(true);
+ $this->header('Content-Type: application/javascript; charset=utf-8');
+
+ $stat = Daemon::getStateOfWorkers();
+ $stat['uptime'] = time() - Daemon::$startTime;
+ echo json_encode($stat);
+ }
+}
+```
+
+</details>
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/phpdaemon.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/phpdaemon.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8509/FullStatus | yes |
+| timeout | HTTP request timeout. | 2 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8509/FullStatus
+
+```
+</details>
+
+##### HTTP authentication
+
+HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8509/FullStatus
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+HTTPS with self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8509/FullStatus
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8509/FullStatus
+
+ - name: remote
+ url: http://192.0.2.1:8509/FullStatus
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `phpdaemon` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m phpdaemon
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `phpdaemon` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep phpdaemon
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep phpdaemon /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep phpdaemon
+```
+
+
diff --git a/src/go/plugin/go.d/modules/phpdaemon/metadata.yaml b/src/go/plugin/go.d/modules/phpdaemon/metadata.yaml
new file mode 100644
index 000000000..bd3ae8e57
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/metadata.yaml
@@ -0,0 +1,276 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-phpdaemon
+ plugin_name: go.d.plugin
+ module_name: phpdaemon
+ monitored_instance:
+ name: phpDaemon
+ link: https://github.com/kakserpom/phpdaemon
+ icon_filename: php.svg
+ categories:
+ - data-collection.apm
+ keywords:
+ - phpdaemon
+ - php
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors phpDaemon instances.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable phpDaemon's HTTP server
+ description: |
+ Statistics expected to be in JSON format.
+
+ <details>
+ <summary>phpDaemon configuration</summary>
+
+ Instruction from [@METAJIJI](https://github.com/METAJIJI).
+
+ To enable `phpd` statistics on http, you must enable the http server and write an application.
+ Application is important, because standalone application [ServerStatus.php](https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Applications/ServerStatus.php) provides statistics in html format and unusable for `netdata`.
+
+ ```php
+ // /opt/phpdaemon/conf/phpd.conf
+
+ path /opt/phpdaemon/conf/AppResolver.php;
+ Pool:HTTPServer {
+ privileged;
+ listen '127.0.0.1';
+ port 8509;
+ }
+ ```
+
+ ```php
+ // /opt/phpdaemon/conf/AppResolver.php
+
+ <?php
+
+ class MyAppResolver extends \PHPDaemon\Core\AppResolver {
+ public function getRequestRoute($req, $upstream) {
+ if (preg_match('~^/(ServerStatus|FullStatus)/~', $req->attrs->server['DOCUMENT_URI'], $m)) {
+ return $m[1];
+ }
+ }
+ }
+
+ return new MyAppResolver;
+ ```
+
+ ```php
+ /opt/phpdaemon/conf/PHPDaemon/Applications/FullStatus.php
+
+ <?php
+ namespace PHPDaemon\Applications;
+
+ class FullStatus extends \PHPDaemon\Core\AppInstance {
+ public function beginRequest($req, $upstream) {
+ return new FullStatusRequest($this, $upstream, $req);
+ }
+ }
+ ```
+
+ ```php
+ // /opt/phpdaemon/conf/PHPDaemon/Applications/FullStatusRequest.php
+
+ <?php
+ namespace PHPDaemon\Applications;
+
+ use PHPDaemon\Core\Daemon;
+ use PHPDaemon\HTTPRequest\Generic;
+
+ class FullStatusRequest extends Generic {
+ public function run() {
+ $stime = microtime(true);
+ $this->header('Content-Type: application/javascript; charset=utf-8');
+
+ $stat = Daemon::getStateOfWorkers();
+ $stat['uptime'] = time() - Daemon::$startTime;
+ echo json_encode($stat);
+ }
+ }
+ ```
+
+ </details>
+ configuration:
+ file:
+ name: go.d/phpdaemon.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8509/FullStatus
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 2
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8509/FullStatus
+ - name: HTTP authentication
+ description: HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8509/FullStatus
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: HTTPS with self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8509/FullStatus
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8509/FullStatus
+
+ - name: remote
+ url: http://192.0.2.1:8509/FullStatus
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: phpdaemon.workers
+ description: Workers
+ unit: workers
+ chart_type: line
+ dimensions:
+ - name: alive
+ - name: shutdown
+ - name: phpdaemon.alive_workers
+ description: Alive Workers State
+ unit: workers
+ chart_type: line
+ dimensions:
+ - name: idle
+ - name: busy
+ - name: reloading
+ - name: phpdaemon.idle_workers
+ description: Idle Workers State
+ unit: workers
+ chart_type: line
+ dimensions:
+ - name: preinit
+ - name: init
+ - name: initialized
+ - name: phpdaemon.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
diff --git a/src/go/plugin/go.d/modules/phpdaemon/metrics.go b/src/go/plugin/go.d/modules/phpdaemon/metrics.go
new file mode 100644
index 000000000..1be3c0be3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/metrics.go
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpdaemon
+
+// https://github.com/kakserpom/phpdaemon/blob/master/PHPDaemon/Core/Daemon.php
+// see getStateOfWorkers()
+
+// WorkerState represents phpdaemon worker state.
+type WorkerState struct {
+ // Alive is sum of Idle, Busy and Reloading
+ Alive int64 `stm:"alive"`
+ Shutdown int64 `stm:"shutdown"`
+
+ // Idle that the worker is not in the middle of execution valuable callback (e.g. request) at this moment of time.
+ // It does not mean that worker not have any pending operations.
+ // Idle is sum of Preinit, Init and Initialized.
+ Idle int64 `stm:"idle"`
+ // Busy means that the worker is in the middle of execution valuable callback.
+ Busy int64 `stm:"busy"`
+ Reloading int64 `stm:"reloading"`
+
+ Preinit int64 `stm:"preinit"`
+ // Init means that worker is starting right now.
+ Init int64 `stm:"init"`
+ // Initialized means that the worker is in Idle state.
+ Initialized int64 `stm:"initialized"`
+}
+
+// FullStatus FullStatus.
+type FullStatus struct {
+ WorkerState `stm:""`
+ Uptime *int64 `stm:"uptime"`
+}
diff --git a/src/go/plugin/go.d/modules/phpdaemon/phpdaemon.go b/src/go/plugin/go.d/modules/phpdaemon/phpdaemon.go
new file mode 100644
index 000000000..d9af10591
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/phpdaemon.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpdaemon
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("phpdaemon", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *PHPDaemon {
+ return &PHPDaemon{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8509/FullStatus",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type PHPDaemon struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ client *client
+}
+
+func (p *PHPDaemon) Configuration() any {
+ return p.Config
+}
+
+func (p *PHPDaemon) Init() error {
+ if err := p.validateConfig(); err != nil {
+ p.Error(err)
+ return err
+ }
+
+ c, err := p.initClient()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ p.client = c
+
+ p.Debugf("using URL %s", p.URL)
+ p.Debugf("using timeout: %s", p.Timeout)
+
+ return nil
+}
+
+func (p *PHPDaemon) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ if _, ok := mx["uptime"]; ok {
+ _ = p.charts.Add(uptimeChart.Copy())
+ }
+
+ return nil
+}
+
+func (p *PHPDaemon) Charts() *Charts {
+ return p.charts
+}
+
+func (p *PHPDaemon) Collect() map[string]int64 {
+ mx, err := p.collect()
+
+ if err != nil {
+ p.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (p *PHPDaemon) Cleanup() {
+ if p.client != nil && p.client.httpClient != nil {
+ p.client.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/phpdaemon/phpdaemon_test.go b/src/go/plugin/go.d/modules/phpdaemon/phpdaemon_test.go
new file mode 100644
index 000000000..e9e35af6d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/phpdaemon_test.go
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpdaemon
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataFullStatusMetrics, _ = os.ReadFile("testdata/fullstatus.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataFullStatusMetrics": dataFullStatusMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPHPDaemon_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &PHPDaemon{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPHPDaemon_Init(t *testing.T) {
+ job := New()
+
+ require.NoError(t, job.Init())
+ assert.NotNil(t, job.client)
+}
+
+func TestPHPDaemon_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataFullStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+}
+
+func TestPHPDaemon_CheckNG(t *testing.T) {
+ job := New()
+ job.URL = "http://127.0.0.1:38001"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestPHPDaemon_Charts(t *testing.T) {
+ job := New()
+
+ assert.NotNil(t, job.Charts())
+ assert.False(t, job.charts.Has(uptimeChart.ID))
+
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataFullStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+ assert.True(t, job.charts.Has(uptimeChart.ID))
+}
+
+func TestPHPDaemon_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestPHPDaemon_Collect(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataFullStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "alive": 350,
+ "busy": 200,
+ "idle": 50,
+ "init": 20,
+ "initialized": 10,
+ "preinit": 20,
+ "reloading": 100,
+ "shutdown": 500,
+ "uptime": 15765,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+
+}
+
+func TestPHPDaemon_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestPHPDaemon_404(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
diff --git a/src/go/plugin/go.d/modules/phpdaemon/testdata/config.json b/src/go/plugin/go.d/modules/phpdaemon/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/phpdaemon/testdata/config.yaml b/src/go/plugin/go.d/modules/phpdaemon/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/phpdaemon/testdata/fullstatus.json b/src/go/plugin/go.d/modules/phpdaemon/testdata/fullstatus.json
new file mode 100644
index 000000000..b7d2a5e77
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpdaemon/testdata/fullstatus.json
@@ -0,0 +1,10 @@
+{
+ "idle": 50,
+ "busy": 200,
+ "alive": 350,
+ "shutdown": 500,
+ "preinit": 20,
+ "init": 20,
+ "reloading": 100,
+ "uptime": 15765
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/phpfpm/README.md b/src/go/plugin/go.d/modules/phpfpm/README.md
new file mode 120000
index 000000000..2953ff4df
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/README.md
@@ -0,0 +1 @@
+integrations/php-fpm.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/phpfpm/charts.go b/src/go/plugin/go.d/modules/phpfpm/charts.go
new file mode 100644
index 000000000..2e1e35cf3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/charts.go
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpfpm
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+)
+
+var charts = Charts{
+ {
+ ID: "connections",
+ Title: "Active Connections",
+ Units: "connections",
+ Fam: "active connections",
+ Ctx: "phpfpm.connections",
+ Dims: Dims{
+ {ID: "active"},
+ {ID: "maxActive", Name: "max active"},
+ {ID: "idle"},
+ },
+ },
+ {
+ ID: "requests",
+ Title: "Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "phpfpm.requests",
+ Dims: Dims{
+ {ID: "requests", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "performance",
+ Title: "Performance",
+ Units: "status",
+ Fam: "performance",
+ Ctx: "phpfpm.performance",
+ Dims: Dims{
+ {ID: "reached", Name: "max children reached"},
+ {ID: "slow", Name: "slow requests"},
+ },
+ },
+ {
+ ID: "request_duration",
+ Title: "Requests Duration Among All Idle Processes",
+ Units: "milliseconds",
+ Fam: "request duration",
+ Ctx: "phpfpm.request_duration",
+ Dims: Dims{
+ {ID: "minReqDur", Name: "min", Div: 1000},
+ {ID: "maxReqDur", Name: "max", Div: 1000},
+ {ID: "avgReqDur", Name: "avg", Div: 1000},
+ },
+ },
+ {
+ ID: "request_cpu",
+ Title: "Last Request CPU Usage Among All Idle Processes",
+ Units: "percentage",
+ Fam: "request CPU",
+ Ctx: "phpfpm.request_cpu",
+ Dims: Dims{
+ {ID: "minReqCpu", Name: "min"},
+ {ID: "maxReqCpu", Name: "max"},
+ {ID: "avgReqCpu", Name: "avg"},
+ },
+ },
+ {
+ ID: "request_mem",
+ Title: "Last Request Memory Usage Among All Idle Processes",
+ Units: "KB",
+ Fam: "request memory",
+ Ctx: "phpfpm.request_mem",
+ Dims: Dims{
+ {ID: "minReqMem", Name: "min", Div: 1024},
+ {ID: "maxReqMem", Name: "max", Div: 1024},
+ {ID: "avgReqMem", Name: "avg", Div: 1024},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/phpfpm/client.go b/src/go/plugin/go.d/modules/phpfpm/client.go
new file mode 100644
index 000000000..4e8e8cec8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/client.go
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpfpm
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ fcgiclient "github.com/kanocz/fcgi_client"
+)
+
+type (
+ status struct {
+ Active int64 `json:"active processes" stm:"active"`
+ MaxActive int64 `json:"max active processes" stm:"maxActive"`
+ Idle int64 `json:"idle processes" stm:"idle"`
+ Requests int64 `json:"accepted conn" stm:"requests"`
+ Reached int64 `json:"max children reached" stm:"reached"`
+ Slow int64 `json:"slow requests" stm:"slow"`
+ Processes []proc `json:"processes"`
+ }
+ proc struct {
+ PID int64 `json:"pid"`
+ State string `json:"state"`
+ Duration requestDuration `json:"request duration"`
+ CPU float64 `json:"last request cpu"`
+ Memory int64 `json:"last request memory"`
+ }
+ requestDuration int64
+)
+
+// UnmarshalJSON customise JSON for timestamp.
+func (rd *requestDuration) UnmarshalJSON(b []byte) error {
+ if rdc, err := strconv.Atoi(string(b)); err != nil {
+ *rd = 0
+ } else {
+ *rd = requestDuration(rdc)
+ }
+ return nil
+}
+
+type client interface {
+ getStatus() (*status, error)
+}
+
+type httpClient struct {
+ client *http.Client
+ req web.Request
+ dec decoder
+}
+
+func newHTTPClient(c *http.Client, r web.Request) (*httpClient, error) {
+ u, err := url.Parse(r.URL)
+ if err != nil {
+ return nil, err
+ }
+
+ dec := decodeText
+ if _, ok := u.Query()["json"]; ok {
+ dec = decodeJSON
+ }
+ return &httpClient{
+ client: c,
+ req: r,
+ dec: dec,
+ }, nil
+}
+
+func (c *httpClient) getStatus() (*status, error) {
+ req, err := web.NewHTTPRequest(c.req)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating HTTP request: %v", err)
+ }
+
+ resp, err := c.client.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on HTTP request to '%s': %v", req.URL, err)
+ }
+ defer func() {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }()
+
+ if resp.StatusCode != http.StatusOK {
+ return nil, fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
+ }
+
+ st := &status{}
+ if err := c.dec(resp.Body, st); err != nil {
+ return nil, fmt.Errorf("error parsing HTTP response from '%s': %v", req.URL, err)
+ }
+
+ return st, nil
+}
+
+type socketClient struct {
+ *logger.Logger
+
+ socket string
+ timeout time.Duration
+ env map[string]string
+}
+
+func newSocketClient(log *logger.Logger, socket string, timeout time.Duration, fcgiPath string) *socketClient {
+ return &socketClient{
+ Logger: log,
+ socket: socket,
+ timeout: timeout,
+ env: map[string]string{
+ "SCRIPT_NAME": fcgiPath,
+ "SCRIPT_FILENAME": fcgiPath,
+ "SERVER_SOFTWARE": "go / fcgiclient ",
+ "REMOTE_ADDR": "127.0.0.1",
+ "QUERY_STRING": "json&full",
+ "REQUEST_METHOD": "GET",
+ "CONTENT_TYPE": "application/json",
+ },
+ }
+}
+
+func (c *socketClient) getStatus() (*status, error) {
+ socket, err := fcgiclient.DialTimeout("unix", c.socket, c.timeout)
+ if err != nil {
+ return nil, fmt.Errorf("error on connecting to socket '%s': %v", c.socket, err)
+ }
+ defer socket.Close()
+
+ if err := socket.SetTimeout(c.timeout); err != nil {
+ return nil, fmt.Errorf("error on setting socket timeout: %v", err)
+ }
+
+ resp, err := socket.Get(c.env)
+ if err != nil {
+ return nil, fmt.Errorf("error on getting data from socket '%s': %v", c.socket, err)
+ }
+
+ content, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error on reading response from socket '%s': %v", c.socket, err)
+ }
+
+ if len(content) == 0 {
+ return nil, fmt.Errorf("no data returned from socket '%s'", c.socket)
+ }
+
+ st := &status{}
+ if err := json.Unmarshal(content, st); err != nil {
+ c.Debugf("failed to JSON decode data: %s", string(content))
+ return nil, fmt.Errorf("error on decoding response from socket '%s': %v", c.socket, err)
+ }
+
+ return st, nil
+}
+
+type tcpClient struct {
+ *logger.Logger
+
+ address string
+ timeout time.Duration
+ env map[string]string
+}
+
+func newTcpClient(log *logger.Logger, address string, timeout time.Duration, fcgiPath string) *tcpClient {
+ return &tcpClient{
+ Logger: log,
+ address: address,
+ timeout: timeout,
+ env: map[string]string{
+ "SCRIPT_NAME": fcgiPath,
+ "SCRIPT_FILENAME": fcgiPath,
+ "SERVER_SOFTWARE": "go / fcgiclient ",
+ "REMOTE_ADDR": "127.0.0.1",
+ "QUERY_STRING": "json&full",
+ "REQUEST_METHOD": "GET",
+ "CONTENT_TYPE": "application/json",
+ },
+ }
+}
+
+func (c *tcpClient) getStatus() (*status, error) {
+ client, err := fcgiclient.DialTimeout("tcp", c.address, c.timeout)
+ if err != nil {
+ return nil, fmt.Errorf("error on connecting to address '%s': %v", c.address, err)
+ }
+ defer client.Close()
+
+ resp, err := client.Get(c.env)
+ if err != nil {
+ return nil, fmt.Errorf("error on getting data from address '%s': %v", c.address, err)
+ }
+
+ content, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error on reading response from address '%s': %v", c.address, err)
+ }
+
+ if len(content) == 0 {
+ return nil, fmt.Errorf("no data returned from address '%s'", c.address)
+ }
+
+ st := &status{}
+ if err := json.Unmarshal(content, st); err != nil {
+ c.Debugf("failed to JSON decode data: %s", string(content))
+ return nil, fmt.Errorf("error on decoding response from address '%s': %v", c.address, err)
+ }
+
+ return st, nil
+}
diff --git a/src/go/plugin/go.d/modules/phpfpm/collect.go b/src/go/plugin/go.d/modules/phpfpm/collect.go
new file mode 100644
index 000000000..08a3b9f61
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/collect.go
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpfpm
+
+import (
+ "math"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func (p *Phpfpm) collect() (map[string]int64, error) {
+ st, err := p.client.getStatus()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := stm.ToMap(st)
+ if !hasIdleProcesses(st.Processes) {
+ return mx, nil
+ }
+
+ calcIdleProcessesRequestsDuration(mx, st.Processes)
+ calcIdleProcessesLastRequestCPU(mx, st.Processes)
+ calcIdleProcessesLastRequestMemory(mx, st.Processes)
+ return mx, nil
+}
+
+func calcIdleProcessesRequestsDuration(mx map[string]int64, processes []proc) {
+ statProcesses(mx, processes, "ReqDur", func(p proc) int64 { return int64(p.Duration) })
+}
+
+func calcIdleProcessesLastRequestCPU(mx map[string]int64, processes []proc) {
+ statProcesses(mx, processes, "ReqCpu", func(p proc) int64 { return int64(p.CPU) })
+}
+
+func calcIdleProcessesLastRequestMemory(mx map[string]int64, processes []proc) {
+ statProcesses(mx, processes, "ReqMem", func(p proc) int64 { return p.Memory })
+}
+
+func hasIdleProcesses(processes []proc) bool {
+ for _, p := range processes {
+ if p.State == "Idle" {
+ return true
+ }
+ }
+ return false
+}
+
+type accessor func(p proc) int64
+
+func statProcesses(m map[string]int64, processes []proc, met string, acc accessor) {
+ var sum, count, min, max int64
+ for _, proc := range processes {
+ if proc.State != "Idle" {
+ continue
+ }
+
+ val := acc(proc)
+ sum += val
+ count += 1
+ if count == 1 {
+ min, max = val, val
+ continue
+ }
+ min = int64(math.Min(float64(min), float64(val)))
+ max = int64(math.Max(float64(max), float64(val)))
+ }
+
+ m["min"+met] = min
+ m["max"+met] = max
+ m["avg"+met] = sum / count
+}
diff --git a/src/go/plugin/go.d/modules/phpfpm/config_schema.json b/src/go/plugin/go.d/modules/phpfpm/config_schema.json
new file mode 100644
index 000000000..81b4005af
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/config_schema.json
@@ -0,0 +1,211 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "PHP-FPM collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for requests.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the PHP-FPM [status page](https://www.php.net/manual/en/fpm.status.php).",
+ "type": "string",
+ "default": "http://127.0.0.1/status?full&json",
+ "format": "uri"
+ },
+ "address": {
+ "title": "Address",
+ "description": "The PHP-FPM daemon's TCP listening address. This will be preferred over the **URL** if set.",
+ "type": "string",
+ "default": ""
+ },
+ "socket": {
+ "title": "Socket",
+ "description": "The PHP-FPM daemon's Unix socket. This will be preferred over both the **URL** and **Address** if set.",
+ "type": "string",
+ "default": "",
+ "pattern": "^$|^/"
+ },
+ "fcgi_path": {
+ "title": "FCGI status path",
+ "description": "The URI to view the [FPM status page](https://www.php.net/manual/en/fpm.status.php).",
+ "type": "string",
+ "default": "/status",
+ "pattern": "^$|^/"
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "address": {
+ "ui:placeholder": "127.0.0.1:9000"
+ },
+ "socket": {
+ "ui:placeholder": "/tmp/php-fpm.sock"
+ },
+ "fcgi_path": {
+ "ui:widget": "hidden"
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "timeout",
+ "url",
+ "address",
+ "socket",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/phpfpm/decode.go b/src/go/plugin/go.d/modules/phpfpm/decode.go
new file mode 100644
index 000000000..021e1fb4c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/decode.go
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpfpm
+
+import (
+ "bufio"
+ "encoding/json"
+ "errors"
+ "io"
+ "strconv"
+ "strings"
+)
+
+type decoder func(r io.Reader, s *status) error
+
+func decodeJSON(r io.Reader, s *status) error {
+ return json.NewDecoder(r).Decode(s)
+}
+
+func decodeText(r io.Reader, s *status) error {
+ parts := readParts(r)
+ if len(parts) == 0 {
+ return errors.New("invalid text format")
+ }
+
+ part, parts := parts[0], parts[1:]
+ if err := readStatus(part, s); err != nil {
+ return err
+ }
+
+ return readProcesses(parts, s)
+}
+
+func readParts(r io.Reader) [][]string {
+ sc := bufio.NewScanner(r)
+
+ var parts [][]string
+ var lines []string
+ for sc.Scan() {
+ line := strings.Trim(sc.Text(), "\r\n ")
+ // Split parts by star border
+ if strings.HasPrefix(line, "***") {
+ parts = append(parts, lines)
+ lines = []string{}
+ continue
+ }
+ // Skip empty lines
+ if line == "" {
+ continue
+ }
+ lines = append(lines, line)
+ }
+
+ if len(lines) > 0 {
+ parts = append(parts, lines)
+ }
+ return parts
+}
+
+func readStatus(data []string, s *status) error {
+ for _, line := range data {
+ key, val, err := parseLine(line)
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "active processes":
+ s.Active = parseInt(val)
+ case "max active processes":
+ s.MaxActive = parseInt(val)
+ case "idle processes":
+ s.Idle = parseInt(val)
+ case "accepted conn":
+ s.Requests = parseInt(val)
+ case "max children reached":
+ s.Reached = parseInt(val)
+ case "slow requests":
+ s.Slow = parseInt(val)
+ }
+ }
+ return nil
+}
+
+func readProcesses(procs [][]string, s *status) error {
+ for _, part := range procs {
+ var proc proc
+ for _, line := range part {
+ key, val, err := parseLine(line)
+ if err != nil {
+ return err
+ }
+
+ switch key {
+ case "state":
+ proc.State = val
+ case "request duration":
+ proc.Duration = requestDuration(parseInt(val))
+ case "last request cpu":
+ proc.CPU = parseFloat(val)
+ case "last request memory":
+ proc.Memory = parseInt(val)
+ }
+ }
+ s.Processes = append(s.Processes, proc)
+ }
+ return nil
+}
+
+func parseLine(s string) (string, string, error) {
+ kv := strings.SplitN(s, ":", 2)
+ if len(kv) != 2 {
+ return "", "", errors.New("invalid text format line")
+ }
+ return strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]), nil
+}
+
+func parseInt(s string) int64 {
+ val, err := strconv.ParseInt(strings.TrimSpace(s), 10, 64)
+ if err != nil {
+ return 0
+ }
+ return val
+}
+
+func parseFloat(s string) float64 {
+ val, err := strconv.ParseFloat(strings.TrimSpace(s), 64)
+ if err != nil {
+ return 0
+ }
+ return val
+}
diff --git a/src/go/plugin/go.d/modules/phpfpm/init.go b/src/go/plugin/go.d/modules/phpfpm/init.go
new file mode 100644
index 000000000..5615012f0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/init.go
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpfpm
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (p *Phpfpm) initClient() (client, error) {
+ if p.Socket != "" {
+ return p.initSocketClient()
+ }
+ if p.Address != "" {
+ return p.initTcpClient()
+ }
+ if p.URL != "" {
+ return p.initHTTPClient()
+ }
+
+ return nil, errors.New("neither 'socket' nor 'url' set")
+}
+
+func (p *Phpfpm) initHTTPClient() (*httpClient, error) {
+ c, err := web.NewHTTPClient(p.Client)
+ if err != nil {
+ return nil, fmt.Errorf("create HTTP client: %v", err)
+ }
+
+ p.Debugf("using HTTP client: url='%s', timeout='%s'", p.URL, p.Timeout)
+
+ return newHTTPClient(c, p.Request)
+}
+
+func (p *Phpfpm) initSocketClient() (*socketClient, error) {
+ if _, err := os.Stat(p.Socket); err != nil {
+ return nil, fmt.Errorf("the socket '%s' does not exist: %v", p.Socket, err)
+ }
+
+ p.Debugf("using socket client: socket='%s', timeout='%s', fcgi_path='%s'", p.Socket, p.Timeout, p.FcgiPath)
+
+ return newSocketClient(p.Logger, p.Socket, p.Timeout.Duration(), p.FcgiPath), nil
+}
+
+func (p *Phpfpm) initTcpClient() (*tcpClient, error) {
+ p.Debugf("using tcp client: address='%s', timeout='%s', fcgi_path='%s'", p.Address, p.Timeout, p.FcgiPath)
+
+ return newTcpClient(p.Logger, p.Address, p.Timeout.Duration(), p.FcgiPath), nil
+}
diff --git a/src/go/plugin/go.d/modules/phpfpm/integrations/php-fpm.md b/src/go/plugin/go.d/modules/phpfpm/integrations/php-fpm.md
new file mode 100644
index 000000000..1839d00d6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/integrations/php-fpm.md
@@ -0,0 +1,264 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/phpfpm/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/phpfpm/metadata.yaml"
+sidebar_label: "PHP-FPM"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# PHP-FPM
+
+
+<img src="https://netdata.cloud/img/php.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: phpfpm
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors PHP-FPM instances.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per PHP-FPM instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| phpfpm.connections | active, max_active, idle | connections |
+| phpfpm.requests | requests | requests/s |
+| phpfpm.performance | max_children_reached, slow_requests | status |
+| phpfpm.request_duration | min, max, avg | milliseconds |
+| phpfpm.request_cpu | min, max, avg | percentage |
+| phpfpm.request_mem | min, max, avg | KB |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable status page
+
+Uncomment the `pm.status_path = /status` variable in the `php-fpm` config file.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/phpfpm.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/phpfpm.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1/status?full&json | yes |
+| socket | Server Unix socket. | | no |
+| address | Server address in IP:PORT format. | | no |
+| fcgi_path | Status path. | /status | no |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### HTTP
+
+Collecting data from a local instance over HTTP.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://localhost/status?full&json
+
+```
+</details>
+
+##### Unix socket
+
+Collecting data from a local instance over Unix socket.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ socket: '/tmp/php-fpm.sock'
+
+```
+</details>
+
+##### TCP socket
+
+Collecting data from a local instance over TCP socket.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:9000
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://localhost/status?full&json
+
+ - name: remote
+ url: http://203.0.113.10/status?full&json
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `phpfpm` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m phpfpm
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `phpfpm` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep phpfpm
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep phpfpm /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep phpfpm
+```
+
+
diff --git a/src/go/plugin/go.d/modules/phpfpm/metadata.yaml b/src/go/plugin/go.d/modules/phpfpm/metadata.yaml
new file mode 100644
index 000000000..739e7b7b8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/metadata.yaml
@@ -0,0 +1,230 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-phpfpm
+ plugin_name: go.d.plugin
+ module_name: phpfpm
+ monitored_instance:
+ name: PHP-FPM
+ link: https://php-fpm.org/
+ icon_filename: php.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - phpfpm
+ - php
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors PHP-FPM instances.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable status page
+ description: |
+ Uncomment the `pm.status_path = /status` variable in the `php-fpm` config file.
+ configuration:
+ file:
+ name: go.d/phpfpm.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1/status?full&json
+ required: true
+ - name: socket
+ description: Server Unix socket.
+ default_value: ""
+ required: false
+ - name: address
+ description: Server address in IP:PORT format.
+ default_value: ""
+ required: false
+ - name: fcgi_path
+ description: Status path.
+ default_value: /status
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: HTTP
+ description: Collecting data from a local instance over HTTP.
+ config: |
+ jobs:
+ - name: local
+ url: http://localhost/status?full&json
+ - name: Unix socket
+ description: Collecting data from a local instance over Unix socket.
+ config: |
+ jobs:
+ - name: local
+ socket: '/tmp/php-fpm.sock'
+ - name: TCP socket
+ description: Collecting data from a local instance over TCP socket.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:9000
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://localhost/status?full&json
+
+ - name: remote
+ url: http://203.0.113.10/status?full&json
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: phpfpm.connections
+ description: Active Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: max_active
+ - name: idle
+ - name: phpfpm.requests
+ description: Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: phpfpm.performance
+ description: Performance
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: max_children_reached
+ - name: slow_requests
+ - name: phpfpm.request_duration
+ description: Requests Duration Among All Idle Processes
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: phpfpm.request_cpu
+ description: Last Request CPU Usage Among All Idle Processes
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: phpfpm.request_mem
+ description: Last Request Memory Usage Among All Idle Processes
+ unit: KB
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
diff --git a/src/go/plugin/go.d/modules/phpfpm/phpfpm.go b/src/go/plugin/go.d/modules/phpfpm/phpfpm.go
new file mode 100644
index 000000000..76057c8f9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/phpfpm.go
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpfpm
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("phpfpm", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Phpfpm {
+ return &Phpfpm{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1/status?full&json",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ FcgiPath: "/status",
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ Socket string `yaml:"socket,omitempty" json:"socket"`
+ Address string `yaml:"address,omitempty" json:"address"`
+ FcgiPath string `yaml:"fcgi_path,omitempty" json:"fcgi_path"`
+}
+
+type Phpfpm struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ client client
+}
+
+func (p *Phpfpm) Configuration() any {
+ return p.Config
+}
+
+func (p *Phpfpm) Init() error {
+ c, err := p.initClient()
+ if err != nil {
+ p.Errorf("init client: %v", err)
+ return err
+ }
+ p.client = c
+
+ return nil
+}
+
+func (p *Phpfpm) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (p *Phpfpm) Charts() *Charts {
+ return charts.Copy()
+}
+
+func (p *Phpfpm) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (p *Phpfpm) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/phpfpm/phpfpm_test.go b/src/go/plugin/go.d/modules/phpfpm/phpfpm_test.go
new file mode 100644
index 000000000..b089c1ef8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/phpfpm_test.go
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpfpm
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatusJSON, _ = os.ReadFile("testdata/status.json")
+ dataStatusFullJSON, _ = os.ReadFile("testdata/status-full.json")
+ dataStatusFullNoIdleJSON, _ = os.ReadFile("testdata/status-full-no-idle.json")
+ dataStatusText, _ = os.ReadFile("testdata/status.txt")
+ dataStatusFullText, _ = os.ReadFile("testdata/status-full.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStatusJSON": dataStatusJSON,
+ "dataStatusFullJSON": dataStatusFullJSON,
+ "dataStatusFullNoIdleJSON": dataStatusFullNoIdleJSON,
+ "dataStatusText": dataStatusText,
+ "dataStatusFullText": dataStatusFullText,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPhpfpm_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Phpfpm{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPhpfpm_Init(t *testing.T) {
+ job := New()
+
+ require.NoError(t, job.Init())
+ assert.NotNil(t, job.client)
+}
+
+func TestPhpfpm_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusText)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.NoError(t, job.Check())
+}
+
+func TestPhpfpm_CheckReturnsFalseOnFailure(t *testing.T) {
+ job := New()
+ job.URL = "http://127.0.0.1:38001/us"
+ require.NoError(t, job.Init())
+
+ assert.Error(t, job.Check())
+}
+
+func TestPhpfpm_Charts(t *testing.T) {
+ job := New()
+
+ assert.NotNil(t, job.Charts())
+}
+
+func TestPhpfpm_CollectJSON(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusJSON)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/?json"
+ require.NoError(t, job.Init())
+
+ got := job.Collect()
+
+ want := map[string]int64{
+ "active": 1,
+ "idle": 1,
+ "maxActive": 1,
+ "reached": 0,
+ "requests": 21,
+ "slow": 0,
+ }
+ assert.Equal(t, want, got)
+}
+
+func TestPhpfpm_CollectJSONFull(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusFullJSON)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/?json"
+ require.NoError(t, job.Init())
+
+ got := job.Collect()
+
+ want := map[string]int64{
+ "active": 1,
+ "idle": 1,
+ "maxActive": 1,
+ "reached": 0,
+ "requests": 22,
+ "slow": 0,
+ "minReqCpu": 0,
+ "maxReqCpu": 10,
+ "avgReqCpu": 5,
+ "minReqDur": 0,
+ "maxReqDur": 919,
+ "avgReqDur": 459,
+ "minReqMem": 2093045,
+ "maxReqMem": 2097152,
+ "avgReqMem": 2095098,
+ }
+ assert.Equal(t, want, got)
+}
+
+func TestPhpfpm_CollectNoIdleProcessesJSONFull(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusFullNoIdleJSON)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL + "/?json"
+ require.NoError(t, job.Init())
+
+ got := job.Collect()
+
+ want := map[string]int64{
+ "active": 1,
+ "idle": 1,
+ "maxActive": 1,
+ "reached": 0,
+ "requests": 22,
+ "slow": 0,
+ }
+ assert.Equal(t, want, got)
+}
+
+func TestPhpfpm_CollectText(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusText)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ got := job.Collect()
+
+ want := map[string]int64{
+ "active": 1,
+ "idle": 1,
+ "maxActive": 1,
+ "reached": 0,
+ "requests": 19,
+ "slow": 0,
+ }
+ assert.Equal(t, want, got)
+}
+
+func TestPhpfpm_CollectTextFull(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusFullText)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ got := job.Collect()
+
+ want := map[string]int64{
+ "active": 1,
+ "idle": 1,
+ "maxActive": 1,
+ "reached": 0,
+ "requests": 20,
+ "slow": 0,
+ "minReqCpu": 0,
+ "maxReqCpu": 10,
+ "avgReqCpu": 5,
+ "minReqDur": 0,
+ "maxReqDur": 536,
+ "avgReqDur": 268,
+ "minReqMem": 2093045,
+ "maxReqMem": 2097152,
+ "avgReqMem": 2095098,
+ }
+ assert.Equal(t, want, got)
+}
+
+func TestPhpfpm_CollectReturnsNothingWhenInvalidData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye\nfrom someone\nfoobar"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.Len(t, job.Collect(), 0)
+}
+
+func TestPhpfpm_CollectReturnsNothingWhenEmptyData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte{})
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.Len(t, job.Collect(), 0)
+}
+
+func TestPhpfpm_CollectReturnsNothingWhenBadStatusCode(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+
+ assert.Len(t, job.Collect(), 0)
+}
+
+func TestPhpfpm_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
diff --git a/src/go/plugin/go.d/modules/phpfpm/testdata/config.json b/src/go/plugin/go.d/modules/phpfpm/testdata/config.json
new file mode 100644
index 000000000..458343f74
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/testdata/config.json
@@ -0,0 +1,23 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "socket": "ok",
+ "address": "ok",
+ "fcgi_path": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/phpfpm/testdata/config.yaml b/src/go/plugin/go.d/modules/phpfpm/testdata/config.yaml
new file mode 100644
index 000000000..6c7bea094
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/testdata/config.yaml
@@ -0,0 +1,20 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+socket: "ok"
+address: "ok"
+fcgi_path: "ok"
diff --git a/src/go/plugin/go.d/modules/phpfpm/testdata/status-full-no-idle.json b/src/go/plugin/go.d/modules/phpfpm/testdata/status-full-no-idle.json
new file mode 100644
index 000000000..e5b63accd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/testdata/status-full-no-idle.json
@@ -0,0 +1,63 @@
+{
+ "pool": "www",
+ "process manager": "dynamic",
+ "start time": 1566371090,
+ "start since": 1119,
+ "accepted conn": 22,
+ "listen queue": 0,
+ "max listen queue": 0,
+ "listen queue len": 0,
+ "idle processes": 1,
+ "active processes": 1,
+ "total processes": 2,
+ "max active processes": 1,
+ "max children reached": 0,
+ "slow requests": 0,
+ "processes": [
+ {
+ "pid": 67858,
+ "state": "Running",
+ "start time": 1566371090,
+ "start since": 1119,
+ "requests": 11,
+ "request duration": 834,
+ "request method": "GET",
+ "request uri": "/status?json&full",
+ "content length": 0,
+ "user": "-",
+ "script": "-",
+ "last request cpu": 0,
+ "last request memory": 0
+ },
+ {
+ "pid": 67859,
+ "state": "Running",
+ "start time": 1566371090,
+ "start since": 1119,
+ "requests": 11,
+ "request duration": 919,
+ "request method": "GET",
+ "request uri": "/status?json",
+ "content length": 0,
+ "user": "-",
+ "script": "-",
+ "last request cpu": 0,
+ "last request memory": 2097152
+ },
+ {
+ "pid": 67860,
+ "state": "Running",
+ "start time": 1566371090,
+ "start since": 1119,
+ "requests": 11,
+ "request duration": 18446744073709551227,
+ "request method": "GET",
+ "request uri": "/status?json&full",
+ "content length": 0,
+ "user": "-",
+ "script": "-",
+ "last request cpu": 10.0,
+ "last request memory": 2093045
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/phpfpm/testdata/status-full.json b/src/go/plugin/go.d/modules/phpfpm/testdata/status-full.json
new file mode 100644
index 000000000..456f6253e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/testdata/status-full.json
@@ -0,0 +1,63 @@
+{
+ "pool": "www",
+ "process manager": "dynamic",
+ "start time": 1566371090,
+ "start since": 1119,
+ "accepted conn": 22,
+ "listen queue": 0,
+ "max listen queue": 0,
+ "listen queue len": 0,
+ "idle processes": 1,
+ "active processes": 1,
+ "total processes": 2,
+ "max active processes": 1,
+ "max children reached": 0,
+ "slow requests": 0,
+ "processes": [
+ {
+ "pid": 67858,
+ "state": "Running",
+ "start time": 1566371090,
+ "start since": 1119,
+ "requests": 11,
+ "request duration": 834,
+ "request method": "GET",
+ "request uri": "/status?json&full",
+ "content length": 0,
+ "user": "-",
+ "script": "-",
+ "last request cpu": 0,
+ "last request memory": 0
+ },
+ {
+ "pid": 67859,
+ "state": "Idle",
+ "start time": 1566371090,
+ "start since": 1119,
+ "requests": 11,
+ "request duration": 919,
+ "request method": "GET",
+ "request uri": "/status?json",
+ "content length": 0,
+ "user": "-",
+ "script": "-",
+ "last request cpu": 0,
+ "last request memory": 2097152
+ },
+ {
+ "pid": 67860,
+ "state": "Idle",
+ "start time": 1566371090,
+ "start since": 1119,
+ "requests": 11,
+ "request duration": 18446744073709551227,
+ "request method": "GET",
+ "request uri": "/status?json&full",
+ "content length": 0,
+ "user": "-",
+ "script": "-",
+ "last request cpu": 10.0,
+ "last request memory": 2093045
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/phpfpm/testdata/status-full.txt b/src/go/plugin/go.d/modules/phpfpm/testdata/status-full.txt
new file mode 100644
index 000000000..a5e90987c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/testdata/status-full.txt
@@ -0,0 +1,59 @@
+pool: www
+process manager: dynamic
+start time: 21/Aug/2019:09:04:50 +0200
+start since: 1079
+accepted conn: 20
+listen queue: 0
+max listen queue: 0
+listen queue len: 0
+idle processes: 1
+active processes: 1
+total processes: 2
+max active processes: 1
+max children reached: 0
+slow requests: 0
+
+************************
+pid: 67858
+state: Running
+start time: 21/Aug/2019:09:04:50 +0200
+start since: 1079
+requests: 10
+request duration: 697
+request method: GET
+request URI: /status?full
+content length: 0
+user: -
+script: -
+last request cpu: 0.00
+last request memory: 0
+
+************************
+pid: 67859
+state: Idle
+start time: 21/Aug/2019:09:04:50 +0200
+start since: 1079
+requests: 10
+request duration: 536
+request method: GET
+request URI: /status
+content length: 0
+user: -
+script: -
+last request cpu: 0.00
+last request memory: 2097152
+
+************************
+pid: 67860
+state: Idle
+start time: 21/Aug/2019:09:04:50 +0200
+start since: 1079
+requests: 10
+request duration: 18446744073709551227
+request method: GET
+request URI: /status?full
+content length: 0
+user: -
+script: -
+last request cpu: 10.00
+last request memory: 2093045 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/phpfpm/testdata/status.json b/src/go/plugin/go.d/modules/phpfpm/testdata/status.json
new file mode 100644
index 000000000..80af3e0bc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/testdata/status.json
@@ -0,0 +1,16 @@
+{
+ "pool": "www",
+ "process manager": "dynamic",
+ "start time": 1566371090,
+ "start since": 1088,
+ "accepted conn": 21,
+ "listen queue": 0,
+ "max listen queue": 0,
+ "listen queue len": 0,
+ "idle processes": 1,
+ "active processes": 1,
+ "total processes": 2,
+ "max active processes": 1,
+ "max children reached": 0,
+ "slow requests": 0
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/phpfpm/testdata/status.txt b/src/go/plugin/go.d/modules/phpfpm/testdata/status.txt
new file mode 100644
index 000000000..08dc158fb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/phpfpm/testdata/status.txt
@@ -0,0 +1,14 @@
+pool: www
+process manager: dynamic
+start time: 21/Aug/2019:09:04:50 +0200
+start since: 1066
+accepted conn: 19
+listen queue: 0
+max listen queue: 0
+listen queue len: 0
+idle processes: 1
+active processes: 1
+total processes: 2
+max active processes: 1
+max children reached: 0
+slow requests: 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pihole/README.md b/src/go/plugin/go.d/modules/pihole/README.md
new file mode 120000
index 000000000..b8d3a7b40
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/README.md
@@ -0,0 +1 @@
+integrations/pi-hole.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pihole/charts.go b/src/go/plugin/go.d/modules/pihole/charts.go
new file mode 100644
index 000000000..862a2544f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/charts.go
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pihole
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioDNSQueriesTotal = module.Priority + iota
+ prioDNSQueries
+ prioDNSQueriesPerc
+ prioUniqueClients
+ prioDomainsOnBlocklist
+ prioBlocklistLastUpdate
+ prioUnwantedDomainsBlockingStatus
+
+ prioDNSQueriesTypes
+ prioDNSQueriesForwardedDestination
+)
+
+var baseCharts = module.Charts{
+ chartDNSQueriesTotal.Copy(),
+ chartDNSQueries.Copy(),
+ chartDNSQueriesPerc.Copy(),
+ chartUniqueClients.Copy(),
+ chartDomainsOnBlocklist.Copy(),
+ chartBlocklistLastUpdate.Copy(),
+ chartUnwantedDomainsBlockingStatus.Copy(),
+}
+
+var (
+ chartDNSQueriesTotal = module.Chart{
+ ID: "dns_queries_total",
+ Title: "DNS Queries Total (Cached, Blocked and Forwarded)",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "pihole.dns_queries_total",
+ Priority: prioDNSQueriesTotal,
+ Dims: module.Dims{
+ {ID: "dns_queries_today", Name: "queries"},
+ },
+ }
+ chartDNSQueries = module.Chart{
+ ID: "dns_queries",
+ Title: "DNS Queries",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "pihole.dns_queries",
+ Type: module.Stacked,
+ Priority: prioDNSQueries,
+ Dims: module.Dims{
+ {ID: "queries_cached", Name: "cached"},
+ {ID: "ads_blocked_today", Name: "blocked"},
+ {ID: "queries_forwarded", Name: "forwarded"},
+ },
+ }
+ chartDNSQueriesPerc = module.Chart{
+ ID: "dns_queries_percentage",
+ Title: "DNS Queries Percentage",
+ Units: "percentage",
+ Fam: "queries",
+ Ctx: "pihole.dns_queries_percentage",
+ Type: module.Stacked,
+ Priority: prioDNSQueriesPerc,
+ Dims: module.Dims{
+ {ID: "queries_cached_perc", Name: "cached", Div: precision},
+ {ID: "ads_blocked_today_perc", Name: "blocked", Div: precision},
+ {ID: "queries_forwarded_perc", Name: "forwarded", Div: precision},
+ },
+ }
+ chartUniqueClients = module.Chart{
+ ID: "unique_clients",
+ Title: "Unique Clients",
+ Units: "clients",
+ Fam: "clients",
+ Ctx: "pihole.unique_clients",
+ Priority: prioUniqueClients,
+ Dims: module.Dims{
+ {ID: "unique_clients", Name: "unique"},
+ },
+ }
+ chartDomainsOnBlocklist = module.Chart{
+ ID: "domains_on_blocklist",
+ Title: "Domains On Blocklist",
+ Units: "domains",
+ Fam: "blocklist",
+ Ctx: "pihole.domains_on_blocklist",
+ Priority: prioDomainsOnBlocklist,
+ Dims: module.Dims{
+ {ID: "domains_being_blocked", Name: "blocklist"},
+ },
+ }
+ chartBlocklistLastUpdate = module.Chart{
+ ID: "blocklist_last_update",
+ Title: "Blocklist Last Update",
+ Units: "seconds",
+ Fam: "blocklist",
+ Ctx: "pihole.blocklist_last_update",
+ Priority: prioBlocklistLastUpdate,
+ Dims: module.Dims{
+ {ID: "blocklist_last_update", Name: "ago"},
+ },
+ }
+ chartUnwantedDomainsBlockingStatus = module.Chart{
+ ID: "unwanted_domains_blocking_status",
+ Title: "Unwanted Domains Blocking Status",
+ Units: "status",
+ Fam: "status",
+ Ctx: "pihole.unwanted_domains_blocking_status",
+ Priority: prioUnwantedDomainsBlockingStatus,
+ Dims: module.Dims{
+ {ID: "blocking_status_enabled", Name: "enabled"},
+ {ID: "blocking_status_disabled", Name: "disabled"},
+ },
+ }
+)
+
+var (
+ chartDNSQueriesTypes = module.Chart{
+ ID: "dns_queries_types",
+ Title: "DNS Queries Per Type",
+ Units: "percentage",
+ Fam: "doQuery types",
+ Ctx: "pihole.dns_queries_types",
+ Type: module.Stacked,
+ Priority: prioDNSQueriesTypes,
+ Dims: module.Dims{
+ {ID: "A", Div: 100},
+ {ID: "AAAA", Div: 100},
+ {ID: "ANY", Div: 100},
+ {ID: "PTR", Div: 100},
+ {ID: "SOA", Div: 100},
+ {ID: "SRV", Div: 100},
+ {ID: "TXT", Div: 100},
+ },
+ }
+ chartDNSQueriesForwardedDestination = module.Chart{
+ ID: "dns_queries_forwarded_destination",
+ Title: "DNS Queries Per Destination",
+ Units: "percentage",
+ Fam: "queries answered by",
+ Ctx: "pihole.dns_queries_forwarded_destination",
+ Type: module.Stacked,
+ Priority: prioDNSQueriesForwardedDestination,
+ Dims: module.Dims{
+ {ID: "destination_cached", Name: "cached", Div: 100},
+ {ID: "destination_blocked", Name: "blocked", Div: 100},
+ {ID: "destination_other", Name: "other", Div: 100},
+ },
+ }
+)
+
+func (p *Pihole) addChartDNSQueriesType() {
+ chart := chartDNSQueriesTypes.Copy()
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Pihole) addChartDNSQueriesForwardedDestinations() {
+ chart := chartDNSQueriesForwardedDestination.Copy()
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pihole/collect.go b/src/go/plugin/go.d/modules/pihole/collect.go
new file mode 100644
index 000000000..c9e6d8451
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/collect.go
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pihole
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const wantAPIVersion = 3
+
+const (
+ urlPathAPI = "/admin/api.php"
+ urlQueryKeyAuth = "auth"
+ urlQueryKeyAPIVersion = "version"
+ urlQueryKeySummaryRaw = "summaryRaw"
+ urlQueryKeyGetQueryTypes = "getQueryTypes" // need auth
+ urlQueryKeyGetForwardDestinations = "getForwardDestinations" // need auth
+)
+
+const (
+ precision = 1000
+)
+
+func (p *Pihole) collect() (map[string]int64, error) {
+ if p.checkVersion {
+ ver, err := p.queryAPIVersion()
+ if err != nil {
+ return nil, err
+ }
+ if ver != wantAPIVersion {
+ return nil, fmt.Errorf("API version: %d, supported version: %d", ver, wantAPIVersion)
+ }
+ p.checkVersion = false
+ }
+
+ pmx := new(piholeMetrics)
+ p.queryMetrics(pmx, true)
+
+ if pmx.hasQueryTypes() {
+ p.addQueriesTypesOnce.Do(p.addChartDNSQueriesType)
+ }
+ if pmx.hasForwarders() {
+ p.addFwsDestinationsOnce.Do(p.addChartDNSQueriesForwardedDestinations)
+ }
+
+ mx := make(map[string]int64)
+ p.collectMetrics(mx, pmx)
+
+ return mx, nil
+}
+
+func (p *Pihole) collectMetrics(mx map[string]int64, pmx *piholeMetrics) {
+ if pmx.hasSummary() {
+ mx["ads_blocked_today"] = pmx.summary.AdsBlockedToday
+ mx["ads_percentage_today"] = int64(pmx.summary.AdsPercentageToday * 100)
+ mx["domains_being_blocked"] = pmx.summary.DomainsBeingBlocked
+ // GravityLastUpdated.Absolute is <nil> if the file does not exist (deleted/moved)
+ if pmx.summary.GravityLastUpdated.Absolute != nil {
+ mx["blocklist_last_update"] = time.Now().Unix() - *pmx.summary.GravityLastUpdated.Absolute
+ }
+ mx["dns_queries_today"] = pmx.summary.DNSQueriesToday
+ mx["queries_forwarded"] = pmx.summary.QueriesForwarded
+ mx["queries_cached"] = pmx.summary.QueriesCached
+ mx["unique_clients"] = pmx.summary.UniqueClients
+ mx["blocking_status_enabled"] = boolToInt(pmx.summary.Status == "enabled")
+ mx["blocking_status_disabled"] = boolToInt(pmx.summary.Status != "enabled")
+
+ tot := pmx.summary.QueriesCached + pmx.summary.AdsBlockedToday + pmx.summary.QueriesForwarded
+ mx["queries_cached_perc"] = calcPercentage(pmx.summary.QueriesCached, tot)
+ mx["ads_blocked_today_perc"] = calcPercentage(pmx.summary.AdsBlockedToday, tot)
+ mx["queries_forwarded_perc"] = calcPercentage(pmx.summary.QueriesForwarded, tot)
+ }
+
+ if pmx.hasQueryTypes() {
+ mx["A"] = int64(pmx.queryTypes.Types.A * 100)
+ mx["AAAA"] = int64(pmx.queryTypes.Types.AAAA * 100)
+ mx["ANY"] = int64(pmx.queryTypes.Types.ANY * 100)
+ mx["PTR"] = int64(pmx.queryTypes.Types.PTR * 100)
+ mx["SOA"] = int64(pmx.queryTypes.Types.SOA * 100)
+ mx["SRV"] = int64(pmx.queryTypes.Types.SRV * 100)
+ mx["TXT"] = int64(pmx.queryTypes.Types.TXT * 100)
+ }
+
+ if pmx.hasForwarders() {
+ for k, v := range pmx.forwarders.Destinations {
+ name := strings.Split(k, "|")[0]
+ mx["destination_"+name] = int64(v * 100)
+ }
+ }
+}
+
+func (p *Pihole) queryMetrics(pmx *piholeMetrics, doConcurrently bool) {
+ type task func(*piholeMetrics)
+
+ var tasks = []task{p.querySummary}
+
+ if p.Password != "" {
+ tasks = []task{
+ p.querySummary,
+ p.queryQueryTypes,
+ p.queryForwardedDestinations,
+ }
+ }
+
+ wg := &sync.WaitGroup{}
+
+ wrap := func(call task) task {
+ return func(metrics *piholeMetrics) { call(metrics); wg.Done() }
+ }
+
+ for _, task := range tasks {
+ if doConcurrently {
+ wg.Add(1)
+ task = wrap(task)
+ go task(pmx)
+ } else {
+ task(pmx)
+ }
+ }
+
+ wg.Wait()
+}
+
+func (p *Pihole) querySummary(pmx *piholeMetrics) {
+ req, err := web.NewHTTPRequestWithPath(p.Request, urlPathAPI)
+ if err != nil {
+ p.Error(err)
+ return
+ }
+
+ req.URL.RawQuery = url.Values{
+ urlQueryKeyAuth: []string{p.Password},
+ urlQueryKeySummaryRaw: []string{"true"},
+ }.Encode()
+
+ var v summaryRawMetrics
+ if err = p.doWithDecode(&v, req); err != nil {
+ p.Error(err)
+ return
+ }
+
+ pmx.summary = &v
+}
+
+func (p *Pihole) queryQueryTypes(pmx *piholeMetrics) {
+ req, err := web.NewHTTPRequestWithPath(p.Request, urlPathAPI)
+ if err != nil {
+ p.Error(err)
+ return
+ }
+
+ req.URL.RawQuery = url.Values{
+ urlQueryKeyAuth: []string{p.Password},
+ urlQueryKeyGetQueryTypes: []string{"true"},
+ }.Encode()
+
+ var v queryTypesMetrics
+ err = p.doWithDecode(&v, req)
+ if err != nil {
+ p.Error(err)
+ return
+ }
+
+ pmx.queryTypes = &v
+}
+
+func (p *Pihole) queryForwardedDestinations(pmx *piholeMetrics) {
+ req, err := web.NewHTTPRequestWithPath(p.Request, urlPathAPI)
+ if err != nil {
+ p.Error(err)
+ return
+ }
+
+ req.URL.RawQuery = url.Values{
+ urlQueryKeyAuth: []string{p.Password},
+ urlQueryKeyGetForwardDestinations: []string{"true"},
+ }.Encode()
+
+ var v forwardDestinations
+ err = p.doWithDecode(&v, req)
+ if err != nil {
+ p.Error(err)
+ return
+ }
+
+ pmx.forwarders = &v
+}
+
+func (p *Pihole) queryAPIVersion() (int, error) {
+ req, err := web.NewHTTPRequestWithPath(p.Request, urlPathAPI)
+ if err != nil {
+ return 0, err
+ }
+
+ req.URL.RawQuery = url.Values{
+ urlQueryKeyAuth: []string{p.Password},
+ urlQueryKeyAPIVersion: []string{"true"},
+ }.Encode()
+
+ var v piholeAPIVersion
+ err = p.doWithDecode(&v, req)
+ if err != nil {
+ return 0, err
+ }
+
+ return v.Version, nil
+}
+
+func (p *Pihole) doWithDecode(dst interface{}, req *http.Request) error {
+ resp, err := p.httpClient.Do(req)
+ if err != nil {
+ return err
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("%s returned %d status code", req.URL, resp.StatusCode)
+ }
+
+ content, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return fmt.Errorf("error on reading response from %s : %v", req.URL, err)
+ }
+
+ // empty array if unauthorized query or wrong query
+ if isEmptyArray(content) {
+ return fmt.Errorf("unauthorized access to %s", req.URL)
+ }
+
+ if err := json.Unmarshal(content, dst); err != nil {
+ return fmt.Errorf("error on parsing response from %s : %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func isEmptyArray(data []byte) bool {
+ empty := "[]"
+ return len(data) == len(empty) && string(data) == empty
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func boolToInt(b bool) int64 {
+ if !b {
+ return 0
+ }
+ return 1
+}
+
+func calcPercentage(value, total int64) (v int64) {
+ if total == 0 {
+ return 0
+ }
+ return int64(float64(value) * 100 / float64(total) * precision)
+}
diff --git a/src/go/plugin/go.d/modules/pihole/config_schema.json b/src/go/plugin/go.d/modules/pihole/config_schema.json
new file mode 100644
index 000000000..14523a2e8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/config_schema.json
@@ -0,0 +1,190 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Pi-hole collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the Pi-hole instance.",
+ "type": "string",
+ "default": "http://127.0.0.1:80",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "setup_vars_path": {
+ "title": "Path to setupVars.conf",
+ "description": "This file is used to get the web password.",
+ "type": "string",
+ "default": "/etc/pihole/setupVars.conf"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "setup_vars_path"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pihole/init.go b/src/go/plugin/go.d/modules/pihole/init.go
new file mode 100644
index 000000000..bd5d952cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/init.go
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pihole
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (p *Pihole) validateConfig() error {
+ if p.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (p *Pihole) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(p.Client)
+}
+
+func (p *Pihole) getWebPassword() string {
+ // do no read setupVarsPath is password is set in the configuration file
+ if p.Password != "" {
+ return p.Password
+ }
+ if !isLocalHost(p.URL) {
+ p.Info("abort web password auto detection, host is not localhost")
+ return ""
+ }
+
+ p.Infof("starting web password auto detection, reading : %s", p.SetupVarsPath)
+ pass, err := getWebPassword(p.SetupVarsPath)
+ if err != nil {
+ p.Warningf("error during reading '%s' : %v", p.SetupVarsPath, err)
+ }
+
+ return pass
+}
+
+func getWebPassword(path string) (string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return "", err
+ }
+ defer func() { _ = f.Close() }()
+
+ s := bufio.NewScanner(f)
+ var password string
+
+ for s.Scan() && password == "" {
+ if strings.HasPrefix(s.Text(), "WEBPASSWORD") {
+ parts := strings.Split(s.Text(), "=")
+ if len(parts) != 2 {
+ return "", fmt.Errorf("unparsable line : %s", s.Text())
+ }
+ password = parts[1]
+ }
+ }
+
+ return password, nil
+}
+
+func isLocalHost(u string) bool {
+ if strings.Contains(u, "127.0.0.1") {
+ return true
+ }
+ if strings.Contains(u, "localhost") {
+ return true
+ }
+
+ return false
+}
diff --git a/src/go/plugin/go.d/modules/pihole/integrations/pi-hole.md b/src/go/plugin/go.d/modules/pihole/integrations/pi-hole.md
new file mode 100644
index 000000000..290dfcb03
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/integrations/pi-hole.md
@@ -0,0 +1,259 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/pihole/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/pihole/metadata.yaml"
+sidebar_label: "Pi-hole"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Pi-hole
+
+
+<img src="https://netdata.cloud/img/pihole.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: pihole
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE).
+
+The data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the
+module's collection interval.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Pi-hole instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| pihole.dns_queries_total | queries | queries |
+| pihole.dns_queries | cached, blocked, forwarded | queries |
+| pihole.dns_queries_percentage | cached, blocked, forwarded | percentage |
+| pihole.unique_clients | unique | clients |
+| pihole.domains_on_blocklist | blocklist | domains |
+| pihole.blocklist_last_update | ago | seconds |
+| pihole.unwanted_domains_blocking_status | enabled, disabled | status |
+| pihole.dns_queries_types | a, aaaa, any, ptr, soa, srv, txt | percentage |
+| pihole.dns_queries_forwarded_destination | cached, blocked, other | percentage |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ pihole_blocklist_last_update ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.blocklist_last_update | gravity.list (blocklist) file last update time |
+| [ pihole_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf) | pihole.unwanted_domains_blocking_status | unwanted domains blocking is disabled |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/pihole.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/pihole.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1 | yes |
+| setup_vars_path | Path to setupVars.conf. This file is used to get the web password. | /etc/pihole/setupVars.conf | no |
+| timeout | HTTP request timeout. | 5 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Remote instance with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://203.0.113.11
+ tls_skip_verify: yes
+ password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1
+
+ - name: remote
+ url: http://203.0.113.10
+ password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `pihole` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m pihole
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `pihole` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep pihole
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep pihole /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep pihole
+```
+
+
diff --git a/src/go/plugin/go.d/modules/pihole/metadata.yaml b/src/go/plugin/go.d/modules/pihole/metadata.yaml
new file mode 100644
index 000000000..b6ef9656f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/metadata.yaml
@@ -0,0 +1,248 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-pihole
+ plugin_name: go.d.plugin
+ module_name: pihole
+ monitored_instance:
+ name: Pi-hole
+ link: https://pi-hole.net
+ icon_filename: pihole.png
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - pihole
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Pi-hole instances using [PHP API](https://github.com/pi-hole/AdminLTE).
+
+ The data provided by the API is for the last 24 hours. All collected values refer to this time period and not to the
+ module's collection interval.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/pihole.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1
+ required: true
+ - name: setup_vars_path
+ description: Path to setupVars.conf. This file is used to get the web password.
+ default_value: /etc/pihole/setupVars.conf
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 5
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1
+ - name: HTTPS with self-signed certificate
+ description: Remote instance with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://203.0.113.11
+ tls_skip_verify: yes
+ password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1
+
+ - name: remote
+ url: http://203.0.113.10
+ password: 1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: pihole_blocklist_last_update
+ metric: pihole.blocklist_last_update
+ info: "gravity.list (blocklist) file last update time"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf
+ - name: pihole_status
+ metric: pihole.unwanted_domains_blocking_status
+ info: unwanted domains blocking is disabled
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/pihole.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: pihole.dns_queries_total
+ description: DNS Queries Total (Cached, Blocked and Forwarded)
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: pihole.dns_queries
+ description: DNS Queries
+ unit: queries
+ chart_type: stacked
+ dimensions:
+ - name: cached
+ - name: blocked
+ - name: forwarded
+ - name: pihole.dns_queries_percentage
+ description: DNS Queries Percentage
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: cached
+ - name: blocked
+ - name: forwarded
+ - name: pihole.unique_clients
+ description: Unique Clients
+ unit: clients
+ chart_type: line
+ dimensions:
+ - name: unique
+ - name: pihole.domains_on_blocklist
+ description: Domains On Blocklist
+ unit: domains
+ chart_type: line
+ dimensions:
+ - name: blocklist
+ - name: pihole.blocklist_last_update
+ description: Blocklist Last Update
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: ago
+ - name: pihole.unwanted_domains_blocking_status
+ description: Unwanted Domains Blocking Status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: enabled
+ - name: disabled
+ - name: pihole.dns_queries_types
+ description: DNS Queries Per Type
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: a
+ - name: aaaa
+ - name: any
+ - name: ptr
+ - name: soa
+ - name: srv
+ - name: txt
+ - name: pihole.dns_queries_forwarded_destination
+ description: DNS Queries Per Destination
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: cached
+ - name: blocked
+ - name: other
diff --git a/src/go/plugin/go.d/modules/pihole/metrics.go b/src/go/plugin/go.d/modules/pihole/metrics.go
new file mode 100644
index 000000000..dd4b3b644
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/metrics.go
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pihole
+
+type piholeMetrics struct {
+ summary *summaryRawMetrics // ?summary
+ queryTypes *queryTypesMetrics // ?getQueryTypes
+ forwarders *forwardDestinations // ?getForwardedDestinations
+}
+
+func (p piholeMetrics) hasSummary() bool {
+ return p.summary != nil
+}
+func (p piholeMetrics) hasQueryTypes() bool {
+ return p.queryTypes != nil
+}
+func (p piholeMetrics) hasForwarders() bool {
+ return p.forwarders != nil && len(p.forwarders.Destinations) > 0
+}
+
+type piholeAPIVersion struct {
+ Version int
+}
+
+type summaryRawMetrics struct {
+ DomainsBeingBlocked int64 `json:"domains_being_blocked"`
+ DNSQueriesToday int64 `json:"dns_queries_today"`
+ AdsBlockedToday int64 `json:"ads_blocked_today"`
+ AdsPercentageToday float64 `json:"ads_percentage_today"`
+ UniqueDomains int64 `json:"unique_domains"`
+ QueriesForwarded int64 `json:"queries_forwarded"`
+ QueriesCached int64 `json:"queries_cached"`
+ ClientsEverSeen int64 `json:"clients_ever_seen"`
+ UniqueClients int64 `json:"unique_clients"`
+ DNSQueriesAllTypes int64 `json:"dns_queries_all_types"`
+ ReplyNODATA int64 `json:"reply_NODATA"`
+ ReplyNXDOMAIN int64 `json:"reply_NXDOMAIN"`
+ ReplyCNAME int64 `json:"reply_CNAME"`
+ ReplyIP int64 `json:"reply_IP"`
+ PrivacyLevel int64 `json:"privacy_level"`
+ Status string `json:"status"`
+ GravityLastUpdated struct {
+ // gravity.list has been removed (https://github.com/pi-hole/pi-hole/pull/2871#issuecomment-520251509)
+ FileExists bool `json:"file_exists"`
+ Absolute *int64
+ } `json:"gravity_last_updated"`
+}
+
+type queryTypesMetrics struct {
+ Types struct {
+ A float64 `json:"A (IPv4)"`
+ AAAA float64 `json:"AAAA (IPv6)"`
+ ANY float64
+ SRV float64
+ SOA float64
+ PTR float64
+ TXT float64
+ } `json:"querytypes"`
+}
+
+// https://github.com/pi-hole/FTL/blob/6f69dd5b4ca60f925d68bfff3869350e934a7240/src/api/api.c#L474
+type forwardDestinations struct {
+ Destinations map[string]float64 `json:"forward_destinations"`
+}
+
+//type (
+// item map[string]int64
+//
+// topClients struct {
+// Sources item `json:"top_sources"`
+// }
+// topItems struct {
+// TopQueries item `json:"top_queries"`
+// TopAds item `json:"top_ads"`
+// }
+//)
+//
+//func (i *item) UnmarshalJSON(data []byte) error {
+// if isEmptyArray(data) {
+// return nil
+// }
+// type plain *item
+// return json.Unmarshal(data, (plain)(i))
+//}
diff --git a/src/go/plugin/go.d/modules/pihole/pihole.go b/src/go/plugin/go.d/modules/pihole/pihole.go
new file mode 100644
index 000000000..9c93d0512
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/pihole.go
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pihole
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("pihole", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Pihole {
+ return &Pihole{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 5),
+ },
+ },
+ SetupVarsPath: "/etc/pihole/setupVars.conf",
+ },
+ checkVersion: true,
+ charts: baseCharts.Copy(),
+ addQueriesTypesOnce: &sync.Once{},
+ addFwsDestinationsOnce: &sync.Once{},
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ SetupVarsPath string `yaml:"setup_vars_path" json:"setup_vars_path"`
+}
+
+type Pihole struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addQueriesTypesOnce *sync.Once
+ addFwsDestinationsOnce *sync.Once
+
+ httpClient *http.Client
+
+ checkVersion bool
+}
+
+func (p *Pihole) Configuration() any {
+ return p.Config
+}
+
+func (p *Pihole) Init() error {
+ if err := p.validateConfig(); err != nil {
+ p.Errorf("config validation: %v", err)
+ return err
+ }
+
+ httpClient, err := p.initHTTPClient()
+ if err != nil {
+ p.Errorf("init http client: %v", err)
+ return err
+ }
+ p.httpClient = httpClient
+
+ p.Password = p.getWebPassword()
+ if p.Password == "" {
+ p.Warning("no web password, not all metrics available")
+ } else {
+ p.Debugf("web password: %s", p.Password)
+ }
+
+ return nil
+}
+
+func (p *Pihole) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (p *Pihole) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *Pihole) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (p *Pihole) Cleanup() {
+ if p.httpClient != nil {
+ p.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pihole/pihole_test.go b/src/go/plugin/go.d/modules/pihole/pihole_test.go
new file mode 100644
index 000000000..86b17b623
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/pihole_test.go
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pihole
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ pathSetupVarsOK = "testdata/setupVars.conf"
+ pathSetupVarsWrong = "testdata/wrong.conf"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataEmptyResp = []byte("[]")
+ dataSummaryRawResp, _ = os.ReadFile("testdata/summaryRaw.json")
+ dataGetQueryTypesResp, _ = os.ReadFile("testdata/getQueryTypes.json")
+ dataGetForwardDestinationsResp, _ = os.ReadFile("testdata/getForwardDestinations.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataEmptyResp": dataEmptyResp,
+ "dataSummaryRawResp": dataSummaryRawResp,
+ "dataGetQueryTypesResp": dataGetQueryTypesResp,
+ "dataGetForwardDestinationsResp": dataGetForwardDestinationsResp,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPihole_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Pihole{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPihole_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ p := New()
+ p.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, p.Init())
+ } else {
+ assert.NoError(t, p.Init())
+ }
+ })
+ }
+}
+
+func TestPihole_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (p *Pihole, cleanup func())
+ }{
+ "success with web password": {
+ wantFail: false,
+ prepare: caseSuccessWithWebPassword,
+ },
+ "fail without web password": {
+ wantFail: true,
+ prepare: caseFailNoWebPassword,
+ },
+ "fail on unsupported version": {
+ wantFail: true,
+ prepare: caseFailUnsupportedVersion,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ p, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, p.Check())
+ } else {
+ assert.NoError(t, p.Check())
+ }
+ })
+ }
+}
+
+func TestPihole_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestPihole_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (p *Pihole, cleanup func())
+ wantMetrics map[string]int64
+ wantNumCharts int
+ }{
+ "success with web password": {
+ prepare: caseSuccessWithWebPassword,
+ wantNumCharts: len(baseCharts) + 2,
+ wantMetrics: map[string]int64{
+ "A": 1229,
+ "AAAA": 1229,
+ "ANY": 100,
+ "PTR": 7143,
+ "SOA": 100,
+ "SRV": 100,
+ "TXT": 100,
+ "ads_blocked_today": 1,
+ "ads_blocked_today_perc": 33333,
+ "ads_percentage_today": 100,
+ "blocking_status_disabled": 0,
+ "blocking_status_enabled": 1,
+ "blocklist_last_update": 106273651,
+ "destination_blocked": 220,
+ "destination_cached": 8840,
+ "destination_other": 940,
+ "dns_queries_today": 1,
+ "domains_being_blocked": 1,
+ "queries_cached": 1,
+ "queries_cached_perc": 33333,
+ "queries_forwarded": 1,
+ "queries_forwarded_perc": 33333,
+ "unique_clients": 1,
+ },
+ },
+ "fail without web password": {
+ prepare: caseFailNoWebPassword,
+ wantMetrics: nil,
+ },
+ "fail on unsupported version": {
+ prepare: caseFailUnsupportedVersion,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ p, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := p.Collect()
+
+ copyBlockListLastUpdate(mx, test.wantMetrics)
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ assert.Len(t, *p.Charts(), test.wantNumCharts)
+ }
+ })
+ }
+}
+
+func caseSuccessWithWebPassword(t *testing.T) (*Pihole, func()) {
+ p, srv := New(), mockPiholeServer{}.newPiholeHTTPServer()
+
+ p.SetupVarsPath = pathSetupVarsOK
+ p.URL = srv.URL
+
+ require.NoError(t, p.Init())
+
+ return p, srv.Close
+}
+
+func caseFailNoWebPassword(t *testing.T) (*Pihole, func()) {
+ p, srv := New(), mockPiholeServer{}.newPiholeHTTPServer()
+
+ p.SetupVarsPath = pathSetupVarsWrong
+ p.URL = srv.URL
+
+ require.NoError(t, p.Init())
+
+ return p, srv.Close
+}
+
+func caseFailUnsupportedVersion(t *testing.T) (*Pihole, func()) {
+ p, srv := New(), mockPiholeServer{unsupportedVersion: true}.newPiholeHTTPServer()
+
+ p.SetupVarsPath = pathSetupVarsOK
+ p.URL = srv.URL
+
+ require.NoError(t, p.Init())
+
+ return p, srv.Close
+}
+
+type mockPiholeServer struct {
+ unsupportedVersion bool
+ errOnAPIVersion bool
+ errOnSummary bool
+ errOnQueryTypes bool
+ errOnGetForwardDst bool
+}
+
+func (m mockPiholeServer) newPiholeHTTPServer() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != urlPathAPI || len(r.URL.Query()) == 0 {
+ w.WriteHeader(http.StatusBadRequest)
+ }
+
+ if r.URL.Query().Get(urlQueryKeyAuth) == "" {
+ _, _ = w.Write(dataEmptyResp)
+ return
+ }
+
+ if r.URL.Query().Has(urlQueryKeyAPIVersion) {
+ if m.errOnAPIVersion {
+ w.WriteHeader(http.StatusNotFound)
+ } else if m.unsupportedVersion {
+ _, _ = w.Write([]byte(fmt.Sprintf(`{"version": %d}`, wantAPIVersion+1)))
+ } else {
+ _, _ = w.Write([]byte(fmt.Sprintf(`{"version": %d}`, wantAPIVersion)))
+ }
+ return
+ }
+
+ if r.URL.Query().Has(urlQueryKeySummaryRaw) {
+ if m.errOnSummary {
+ w.WriteHeader(http.StatusNotFound)
+ } else {
+ _, _ = w.Write(dataSummaryRawResp)
+ }
+ return
+ }
+
+ data := dataEmptyResp
+ isErr := false
+ switch {
+ case r.URL.Query().Has(urlQueryKeyGetQueryTypes):
+ data, isErr = dataGetQueryTypesResp, m.errOnQueryTypes
+ case r.URL.Query().Has(urlQueryKeyGetForwardDestinations):
+ data, isErr = dataGetForwardDestinationsResp, m.errOnGetForwardDst
+ }
+
+ if isErr {
+ w.WriteHeader(http.StatusNotFound)
+ } else {
+ _, _ = w.Write(data)
+ }
+ }))
+}
+
+func copyBlockListLastUpdate(dst, src map[string]int64) {
+ k := "blocklist_last_update"
+ if v, ok := src[k]; ok {
+ if _, ok := dst[k]; ok {
+ dst[k] = v
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pihole/testdata/config.json b/src/go/plugin/go.d/modules/pihole/testdata/config.json
new file mode 100644
index 000000000..2d82443b0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "setup_vars_path": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/pihole/testdata/config.yaml b/src/go/plugin/go.d/modules/pihole/testdata/config.yaml
new file mode 100644
index 000000000..a9361246a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+setup_vars_path: "ok"
diff --git a/src/go/plugin/go.d/modules/pihole/testdata/getForwardDestinations.json b/src/go/plugin/go.d/modules/pihole/testdata/getForwardDestinations.json
new file mode 100644
index 000000000..3bfc646d0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/testdata/getForwardDestinations.json
@@ -0,0 +1,7 @@
+{
+ "forward_destinations": {
+ "blocked|blocked": 2.2,
+ "cached|cached": 88.4,
+ "other|other": 9.4
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pihole/testdata/getQueryTypes.json b/src/go/plugin/go.d/modules/pihole/testdata/getQueryTypes.json
new file mode 100644
index 000000000..cf7f19f95
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/testdata/getQueryTypes.json
@@ -0,0 +1,11 @@
+{
+ "querytypes": {
+ "A (IPv4)": 12.29,
+ "AAAA (IPv6)": 12.29,
+ "ANY": 1,
+ "SRV": 1,
+ "SOA": 1,
+ "PTR": 71.43,
+ "TXT": 1
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pihole/testdata/setupVars.conf b/src/go/plugin/go.d/modules/pihole/testdata/setupVars.conf
new file mode 100644
index 000000000..97f260297
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/testdata/setupVars.conf
@@ -0,0 +1,11 @@
+WEBPASSWORD=1ebd33f882f9aa5fac26a7cb74704742f91100228eb322e41b7bd6e6aeb8f74b
+BLOCKING_ENABLED=true
+PIHOLE_INTERFACE=enp0s9
+IPV4_ADDRESS=192.168.88.228/24
+IPV6_ADDRESS=
+PIHOLE_DNS_1=208.67.222.222
+PIHOLE_DNS_2=208.67.220.220
+QUERY_LOGGING=true
+INSTALL_WEB_SERVER=true
+INSTALL_WEB_INTERFACE=true
+LIGHTTPD_ENABLED=true \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pihole/testdata/summaryRaw.json b/src/go/plugin/go.d/modules/pihole/testdata/summaryRaw.json
new file mode 100644
index 000000000..8a4e59c16
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pihole/testdata/summaryRaw.json
@@ -0,0 +1,27 @@
+{
+ "domains_being_blocked": 1,
+ "dns_queries_today": 1,
+ "ads_blocked_today": 1,
+ "ads_percentage_today": 1,
+ "unique_domains": 1,
+ "queries_forwarded": 1,
+ "queries_cached": 1,
+ "clients_ever_seen": 1,
+ "unique_clients": 1,
+ "dns_queries_all_types": 1,
+ "reply_NODATA": 1,
+ "reply_NXDOMAIN": 1,
+ "reply_CNAME": 1,
+ "reply_IP": 1,
+ "privacy_level": 1,
+ "status": "enabled",
+ "gravity_last_updated": {
+ "file_exists": true,
+ "absolute": 1560443834,
+ "relative": {
+ "days": "3",
+ "hours": "06",
+ "minutes": "05"
+ }
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pika/README.md b/src/go/plugin/go.d/modules/pika/README.md
new file mode 120000
index 000000000..5e3a8da77
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/README.md
@@ -0,0 +1 @@
+integrations/pika.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pika/charts.go b/src/go/plugin/go.d/modules/pika/charts.go
new file mode 100644
index 000000000..6ba0e5d4d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/charts.go
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pika
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+var pikaCharts = module.Charts{
+ chartConnections.Copy(),
+ chartClients.Copy(),
+
+ chartMemory.Copy(),
+
+ chartConnectedReplicas.Copy(),
+
+ chartCommands.Copy(),
+ chartCommandsCalls.Copy(),
+
+ chartDbStringsKeys.Copy(),
+ chartDbStringsExpiresKeys.Copy(),
+ chartDbStringsInvalidKeys.Copy(),
+ chartDbHashesKeys.Copy(),
+ chartDbHashesExpiresKeys.Copy(),
+ chartDbHashesInvalidKeys.Copy(),
+ chartDbListsKeys.Copy(),
+ chartDbListsExpiresKeys.Copy(),
+ chartDbListsInvalidKeys.Copy(),
+ chartDbZsetsKeys.Copy(),
+ chartDbZsetsExpiresKeys.Copy(),
+ chartDbZsetsInvalidKeys.Copy(),
+ chartDbSetsKeys.Copy(),
+ chartDbSetsExpiresKeys.Copy(),
+ chartDbSetsInvalidKeys.Copy(),
+
+ chartUptime.Copy(),
+}
+
+var (
+ chartConnections = module.Chart{
+ ID: "connections",
+ Title: "Connections",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "pika.connections",
+ Dims: module.Dims{
+ {ID: "total_connections_received", Name: "accepted", Algo: module.Incremental},
+ },
+ }
+ chartClients = module.Chart{
+ ID: "clients",
+ Title: "Clients",
+ Units: "clients",
+ Fam: "connections",
+ Ctx: "pika.clients",
+ Dims: module.Dims{
+ {ID: "connected_clients", Name: "connected"},
+ },
+ }
+)
+
+var (
+ chartMemory = module.Chart{
+ ID: "memory",
+ Title: "Memory usage",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "pika.memory",
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "used_memory", Name: "used"},
+ },
+ }
+)
+
+var (
+ chartConnectedReplicas = module.Chart{
+ ID: "connected_replicas",
+ Title: "Connected replicas",
+ Units: "replicas",
+ Fam: "replication",
+ Ctx: "pika.connected_replicas",
+ Dims: module.Dims{
+ {ID: "connected_slaves", Name: "connected"},
+ },
+ }
+)
+
+var (
+ chartCommands = module.Chart{
+ ID: "commands",
+ Title: "Processed commands",
+ Units: "commands/s",
+ Fam: "commands",
+ Ctx: "pika.commands",
+ Dims: module.Dims{
+ {ID: "total_commands_processed", Name: "processed", Algo: module.Incremental},
+ },
+ }
+ chartCommandsCalls = module.Chart{
+ ID: "commands_calls",
+ Title: "Calls per command",
+ Units: "calls/s",
+ Fam: "commands",
+ Ctx: "pika.commands_calls",
+ Type: module.Stacked,
+ }
+)
+
+var (
+ chartDbStringsKeys = module.Chart{
+ ID: "database_strings_keys",
+ Title: "Strings type keys per database",
+ Units: "keys",
+ Fam: "keyspace strings",
+ Ctx: "pika.database_strings_keys",
+ Type: module.Stacked,
+ }
+ chartDbStringsExpiresKeys = module.Chart{
+ ID: "database_strings_expires_keys",
+ Title: "Strings type expires keys per database",
+ Units: "keys",
+ Fam: "keyspace strings",
+ Ctx: "pika.database_strings_expires_keys",
+ Type: module.Stacked,
+ }
+ chartDbStringsInvalidKeys = module.Chart{
+ ID: "database_strings_invalid_keys",
+ Title: "Strings type invalid keys per database",
+ Units: "keys",
+ Fam: "keyspace strings",
+ Ctx: "pika.database_strings_invalid_keys",
+ Type: module.Stacked,
+ }
+
+ chartDbHashesKeys = module.Chart{
+ ID: "database_hashes_keys",
+ Title: "Hashes type keys per database",
+ Units: "keys",
+ Fam: "keyspace hashes",
+ Ctx: "pika.database_hashes_keys",
+ Type: module.Stacked,
+ }
+ chartDbHashesExpiresKeys = module.Chart{
+ ID: "database_hashes_expires_keys",
+ Title: "Hashes type expires keys per database",
+ Units: "keys",
+ Fam: "keyspace hashes",
+ Ctx: "pika.database_hashes_expires_keys",
+ Type: module.Stacked,
+ }
+ chartDbHashesInvalidKeys = module.Chart{
+ ID: "database_hashes_invalid_keys",
+ Title: "Hashes type invalid keys per database",
+ Units: "keys",
+ Fam: "keyspace hashes",
+ Ctx: "pika.database_hashes_invalid_keys",
+ Type: module.Stacked,
+ }
+
+ chartDbListsKeys = module.Chart{
+ ID: "database_lists_keys",
+ Title: "Lists type keys per database",
+ Units: "keys",
+ Fam: "keyspace lists",
+ Ctx: "pika.database_lists_keys",
+ Type: module.Stacked,
+ }
+ chartDbListsExpiresKeys = module.Chart{
+ ID: "database_lists_expires_keys",
+ Title: "Lists type expires keys per database",
+ Units: "keys",
+ Fam: "keyspace lists",
+ Ctx: "pika.database_lists_expires_keys",
+ Type: module.Stacked,
+ }
+ chartDbListsInvalidKeys = module.Chart{
+ ID: "database_lists_invalid_keys",
+ Title: "Lists type invalid keys per database",
+ Units: "keys",
+ Fam: "keyspace lists",
+ Ctx: "pika.database_lists_invalid_keys",
+ Type: module.Stacked,
+ }
+
+ chartDbZsetsKeys = module.Chart{
+ ID: "database_zsets_keys",
+ Title: "Zsets type keys per database",
+ Units: "keys",
+ Fam: "keyspace zsets",
+ Ctx: "pika.database_zsets_keys",
+ Type: module.Stacked,
+ }
+ chartDbZsetsExpiresKeys = module.Chart{
+ ID: "database_zsets_expires_keys",
+ Title: "Zsets type expires keys per database",
+ Units: "keys",
+ Fam: "keyspace zsets",
+ Ctx: "pika.database_zsets_expires_keys",
+ Type: module.Stacked,
+ }
+ chartDbZsetsInvalidKeys = module.Chart{
+ ID: "database_zsets_invalid_keys",
+ Title: "Zsets type invalid keys per database",
+ Units: "keys",
+ Fam: "keyspace zsets",
+ Ctx: "pika.database_zsets_invalid_keys",
+ Type: module.Stacked,
+ }
+
+ chartDbSetsKeys = module.Chart{
+ ID: "database_sets_keys",
+ Title: "Sets type keys per database",
+ Units: "keys",
+ Fam: "keyspace sets",
+ Ctx: "pika.database_sets_keys",
+ Type: module.Stacked,
+ }
+ chartDbSetsExpiresKeys = module.Chart{
+ ID: "database_sets_expires_keys",
+ Title: "Sets type expires keys per database",
+ Units: "keys",
+ Fam: "keyspace sets",
+ Ctx: "pika.database_sets_expires_keys",
+ Type: module.Stacked,
+ }
+ chartDbSetsInvalidKeys = module.Chart{
+ ID: "database_sets_invalid_keys",
+ Title: "Sets invalid keys per database",
+ Units: "keys",
+ Fam: "keyspace sets",
+ Ctx: "pika.database_sets_invalid_keys",
+ Type: module.Stacked,
+ }
+)
+
+var (
+ chartUptime = module.Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "pika.uptime",
+ Dims: module.Dims{
+ {ID: "uptime_in_seconds", Name: "uptime"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/pika/collect.go b/src/go/plugin/go.d/modules/pika/collect.go
new file mode 100644
index 000000000..72a4961dd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/collect.go
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pika
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/blang/semver/v4"
+)
+
+const precision = 1000 // float values multiplier and dimensions divisor
+
+func (p *Pika) collect() (map[string]int64, error) {
+ info, err := p.pdb.Info(context.Background(), "all").Result()
+ if err != nil {
+ return nil, err
+ }
+
+ if p.server == "" {
+ s, v, err := extractServerVersion(info)
+ if err != nil {
+ return nil, fmt.Errorf("can not extract server app and version: %v", err)
+ }
+ p.server, p.version = s, v
+ p.Debugf(`server="%s",version="%s"`, s, v)
+ }
+
+ if p.server != "pika" {
+ return nil, fmt.Errorf("unsupported server app, want=pika, got=%s", p.server)
+ }
+
+ ms := make(map[string]int64)
+ p.collectInfo(ms, info)
+
+ return ms, nil
+}
+
+// pika_version:3.4.0
+var reVersion = regexp.MustCompile(`([a-z]+)_version:(\d+\.\d+\.\d+)`)
+
+func extractServerVersion(info string) (string, *semver.Version, error) {
+ var versionLine string
+ for sc := bufio.NewScanner(strings.NewReader(info)); sc.Scan(); {
+ line := sc.Text()
+ if strings.Contains(line, "_version") {
+ versionLine = strings.TrimSpace(line)
+ break
+ }
+ }
+ if versionLine == "" {
+ return "", nil, errors.New("no version property")
+ }
+
+ match := reVersion.FindStringSubmatch(versionLine)
+ if match == nil {
+ return "", nil, fmt.Errorf("can not parse version property '%s'", versionLine)
+ }
+
+ server, version := match[1], match[2]
+ ver, err := semver.New(version)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return server, ver, nil
+}
diff --git a/src/go/plugin/go.d/modules/pika/collect_info.go b/src/go/plugin/go.d/modules/pika/collect_info.go
new file mode 100644
index 000000000..0494ae576
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/collect_info.go
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pika
+
+import (
+ "bufio"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+// https://github.com/Qihoo360/pika/blob/master/src/pika_admin.cc
+// https://github.com/Qihoo360/pika/blob/a0dbdcf5897dd7800ba8a4d1eafce1595619ddc8/src/pika_admin.cc#L694-L710
+
+const (
+ infoSectionServer = "# Server"
+ infoSectionData = "# Data"
+ infoSectionClients = "# Clients"
+ infoSectionStats = "# Stats"
+ infoSectionCommandExecCount = "# Command_Exec_Count"
+ infoSectionCPU = "# CPU"
+ infoSectionReplMaster = "# Replication(MASTER)"
+ infoSectionReplSlave = "# Replication(SLAVE)"
+ infoSectionReplMasterSlave = "# Replication(Master && SLAVE)"
+ infoSectionKeyspace = "# Keyspace"
+)
+
+var infoSections = map[string]struct{}{
+ infoSectionServer: {},
+ infoSectionData: {},
+ infoSectionClients: {},
+ infoSectionStats: {},
+ infoSectionCommandExecCount: {},
+ infoSectionCPU: {},
+ infoSectionReplMaster: {},
+ infoSectionReplSlave: {},
+ infoSectionReplMasterSlave: {},
+ infoSectionKeyspace: {},
+}
+
+func isInfoSection(line string) bool { _, ok := infoSections[line]; return ok }
+
+func (p *Pika) collectInfo(ms map[string]int64, info string) {
+ var curSection string
+
+ sc := bufio.NewScanner(strings.NewReader(info))
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+ if len(line) == 0 {
+ curSection = ""
+ continue
+ }
+ if strings.HasPrefix(line, "#") {
+ if isInfoSection(line) {
+ curSection = line
+ }
+ continue
+ }
+
+ field, value, ok := parseProperty(line)
+ if !ok {
+ continue
+ }
+
+ switch curSection {
+ case infoSectionCommandExecCount:
+ p.collectInfoCommandExecCountProperty(ms, field, value)
+ case infoSectionKeyspace:
+ p.collectInfoKeyspaceProperty(ms, field, value)
+ default:
+ collectNumericValue(ms, field, value)
+ }
+ }
+}
+
+var reKeyspaceValue = regexp.MustCompile(`^(.+)_keys=(\d+), expires=(\d+), invalid_keys=(\d+)`)
+
+func (p *Pika) collectInfoKeyspaceProperty(ms map[string]int64, field, value string) {
+ match := reKeyspaceValue.FindStringSubmatch(value)
+ if match == nil {
+ return
+ }
+
+ dataType, keys, expires, invalid := strings.ToLower(match[1]), match[2], match[3], match[4]
+ collectNumericValue(ms, field+"_"+dataType+"_keys", keys)
+ collectNumericValue(ms, field+"_"+dataType+"_expires_keys", expires)
+ collectNumericValue(ms, field+"_"+dataType+"_invalid_keys", invalid)
+
+ if !p.collectedDbs[field] {
+ p.collectedDbs[field] = true
+ p.addDbToKeyspaceCharts(field)
+ }
+}
+
+func (p *Pika) collectInfoCommandExecCountProperty(ms map[string]int64, field, value string) {
+ collectNumericValue(ms, "cmd_"+field+"_calls", value)
+
+ if !p.collectedCommands[field] {
+ p.collectedCommands[field] = true
+ p.addCmdToCommandsCharts(field)
+ }
+}
+
+func (p *Pika) addCmdToCommandsCharts(cmd string) {
+ p.addDimToChart(chartCommandsCalls.ID, &module.Dim{
+ ID: "cmd_" + cmd + "_calls",
+ Name: cmd,
+ Algo: module.Incremental,
+ })
+}
+
+func (p *Pika) addDbToKeyspaceCharts(db string) {
+ p.addDimToChart(chartDbStringsKeys.ID, &module.Dim{
+ ID: db + "_strings_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbStringsExpiresKeys.ID, &module.Dim{
+ ID: db + "_strings_expires_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbStringsInvalidKeys.ID, &module.Dim{
+ ID: db + "_strings_invalid_keys",
+ Name: db,
+ })
+
+ p.addDimToChart(chartDbHashesKeys.ID, &module.Dim{
+ ID: db + "_hashes_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbHashesExpiresKeys.ID, &module.Dim{
+ ID: db + "_hashes_expires_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbHashesInvalidKeys.ID, &module.Dim{
+ ID: db + "_hashes_invalid_keys",
+ Name: db,
+ })
+
+ p.addDimToChart(chartDbListsKeys.ID, &module.Dim{
+ ID: db + "_lists_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbListsExpiresKeys.ID, &module.Dim{
+ ID: db + "_lists_expires_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbListsInvalidKeys.ID, &module.Dim{
+ ID: db + "_lists_invalid_keys",
+ Name: db,
+ })
+
+ p.addDimToChart(chartDbZsetsKeys.ID, &module.Dim{
+ ID: db + "_zsets_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbZsetsExpiresKeys.ID, &module.Dim{
+ ID: db + "_zsets_expires_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbZsetsInvalidKeys.ID, &module.Dim{
+ ID: db + "_zsets_invalid_keys",
+ Name: db,
+ })
+
+ p.addDimToChart(chartDbSetsKeys.ID, &module.Dim{
+ ID: db + "_sets_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbSetsExpiresKeys.ID, &module.Dim{
+ ID: db + "_sets_expires_keys",
+ Name: db,
+ })
+ p.addDimToChart(chartDbSetsInvalidKeys.ID, &module.Dim{
+ ID: db + "_sets_invalid_keys",
+ Name: db,
+ })
+}
+
+func (p *Pika) addDimToChart(chartID string, dim *module.Dim) {
+ chart := p.Charts().Get(chartID)
+ if chart == nil {
+ p.Warningf("error on adding '%s' dimension: can not find '%s' chart", dim.ID, chartID)
+ return
+ }
+ if err := chart.AddDim(dim); err != nil {
+ p.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func parseProperty(prop string) (field, value string, ok bool) {
+ var sep byte
+ if strings.HasPrefix(prop, "db") {
+ sep = ' '
+ } else {
+ sep = ':'
+ }
+ i := strings.IndexByte(prop, sep)
+ if i == -1 {
+ return "", "", false
+ }
+ field, value = prop[:i], prop[i+1:]
+ return field, value, field != "" && value != ""
+}
+
+func collectNumericValue(ms map[string]int64, field, value string) {
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return
+ }
+ if strings.IndexByte(value, '.') == -1 {
+ ms[field] = int64(v)
+ } else {
+ ms[field] = int64(v * precision)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pika/config_schema.json b/src/go/plugin/go.d/modules/pika/config_schema.json
new file mode 100644
index 000000000..885cbed0f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/config_schema.json
@@ -0,0 +1,93 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "title": "Pika collector configuration.",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "URI",
+ "description": "The URI specifying the connection details for the Pika server.",
+ "type": "string",
+ "default": "redis://@localhost:9221"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "address": {
+ "ui:placeholder": "redis://user:password@host:port",
+ "ui:help": "Tcp connection: `redis://user:password@host:port`. Unix connection: `unix://user:password@/path/to/redis.sock`."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pika/init.go b/src/go/plugin/go.d/modules/pika/init.go
new file mode 100644
index 000000000..b51152952
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/init.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pika
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/go-redis/redis/v8"
+)
+
+func (p *Pika) validateConfig() error {
+ if p.Address == "" {
+ return errors.New("'address' not set")
+ }
+ return nil
+}
+
+func (p *Pika) initRedisClient() (*redis.Client, error) {
+ opts, err := redis.ParseURL(p.Address)
+ if err != nil {
+ return nil, err
+ }
+
+ tlsConfig, err := tlscfg.NewTLSConfig(p.TLSConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ if opts.TLSConfig != nil && tlsConfig != nil {
+ tlsConfig.ServerName = opts.TLSConfig.ServerName
+ }
+
+ opts.PoolSize = 1
+ opts.TLSConfig = tlsConfig
+ opts.DialTimeout = p.Timeout.Duration()
+ opts.ReadTimeout = p.Timeout.Duration()
+ opts.WriteTimeout = p.Timeout.Duration()
+
+ return redis.NewClient(opts), nil
+}
+
+func (p *Pika) initCharts() (*module.Charts, error) {
+ return pikaCharts.Copy(), nil
+}
diff --git a/src/go/plugin/go.d/modules/pika/integrations/pika.md b/src/go/plugin/go.d/modules/pika/integrations/pika.md
new file mode 100644
index 000000000..04a2b329c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/integrations/pika.md
@@ -0,0 +1,256 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/pika/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/pika/metadata.yaml"
+sidebar_label: "Pika"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Pika
+
+
+<img src="https://netdata.cloud/img/pika.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: pika
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Pika servers.
+
+It collects information and statistics about the server executing the following commands:
+
+- [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Pika instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| pika.connections | accepted | connections |
+| pika.clients | connected | clients |
+| pika.memory | used | bytes |
+| pika.connected_replicas | connected | replicas |
+| pika.commands | processed | commands/s |
+| pika.commands_calls | a dimension per command | calls/s |
+| pika.database_strings_keys | a dimension per database | keys |
+| pika.database_strings_expires_keys | a dimension per database | keys |
+| pika.database_strings_invalid_keys | a dimension per database | keys |
+| pika.database_hashes_keys | a dimension per database | keys |
+| pika.database_hashes_expires_keys | a dimension per database | keys |
+| pika.database_hashes_invalid_keys | a dimension per database | keys |
+| pika.database_lists_keys | a dimension per database | keys |
+| pika.database_lists_expires_keys | a dimension per database | keys |
+| pika.database_lists_invalid_keys | a dimension per database | keys |
+| pika.database_zsets_keys | a dimension per database | keys |
+| pika.database_zsets_expires_keys | a dimension per database | keys |
+| pika.database_zsets_invalid_keys | a dimension per database | keys |
+| pika.database_sets_keys | a dimension per database | keys |
+| pika.database_sets_expires_keys | a dimension per database | keys |
+| pika.database_sets_invalid_keys | a dimension per database | keys |
+| pika.uptime | uptime | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/pika.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/pika.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Pika server address. | redis://@localhost:9221 | yes |
+| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |
+| username | Username used for authentication. | | no |
+| password | Password used for authentication. | | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certificate authority that client use when verifying server certificates. | | no |
+| tls_cert | Client tls certificate. | | no |
+| tls_key | Client tls key. | | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 'redis://@localhost:9221'
+
+```
+</details>
+
+##### TCP socket with password
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 'redis://:password@127.0.0.1:9221'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 'redis://:password@127.0.0.1:9221'
+
+ - name: remote
+ address: 'redis://user:password@203.0.113.0:9221'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `pika` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m pika
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `pika` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep pika
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep pika /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep pika
+```
+
+
diff --git a/src/go/plugin/go.d/modules/pika/metadata.yaml b/src/go/plugin/go.d/modules/pika/metadata.yaml
new file mode 100644
index 000000000..c87cd9b27
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/metadata.yaml
@@ -0,0 +1,277 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-pika
+ plugin_name: go.d.plugin
+ module_name: pika
+ monitored_instance:
+ name: Pika
+ link: https://github.com/OpenAtomFoundation/pika
+ icon_filename: pika.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - pika
+ - databases
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Pika servers.
+
+ It collects information and statistics about the server executing the following commands:
+
+ - [`INFO ALL`](https://github.com/OpenAtomFoundation/pika/wiki/pika-info%E4%BF%A1%E6%81%AF%E8%AF%B4%E6%98%8E)
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/pika.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: Pika server address.
+ default_value: redis://@localhost:9221
+ required: true
+ details: |
+ There are two connection types: by tcp socket and by unix socket.
+
+ - Tcp connection: `redis://<user>:<password>@<host>:<port>/<db_number>`
+ - Unix connection: `unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>`
+ - name: timeout
+ description: Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username used for authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password used for authentication.
+ default_value: ""
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certificate authority that client use when verifying server certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client tls certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client tls key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: TCP socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 'redis://@localhost:9221'
+ - name: TCP socket with password
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 'redis://:password@127.0.0.1:9221'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 'redis://:password@127.0.0.1:9221'
+
+ - name: remote
+ address: 'redis://user:password@203.0.113.0:9221'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: pika.connections
+ description: Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: accepted
+ - name: pika.clients
+ description: Clients
+ unit: clients
+ chart_type: line
+ dimensions:
+ - name: connected
+ - name: pika.memory
+ description: Memory usage
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: pika.connected_replicas
+ description: Connected replicas
+ unit: replicas
+ chart_type: line
+ dimensions:
+ - name: connected
+ - name: pika.commands
+ description: Processed commands
+ unit: commands/s
+ chart_type: line
+ dimensions:
+ - name: processed
+ - name: pika.commands_calls
+ description: Calls per command
+ unit: calls/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per command
+ - name: pika.database_strings_keys
+ description: Strings type keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_strings_expires_keys
+ description: Strings type expires keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_strings_invalid_keys
+ description: Strings type invalid keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_hashes_keys
+ description: Hashes type keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_hashes_expires_keys
+ description: Hashes type expires keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_hashes_invalid_keys
+ description: Hashes type invalid keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_lists_keys
+ description: Lists type keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_lists_expires_keys
+ description: Lists type expires keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_lists_invalid_keys
+ description: Lists type invalid keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_zsets_keys
+ description: Zsets type keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_zsets_expires_keys
+ description: Zsets type expires keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_zsets_invalid_keys
+ description: Zsets type invalid keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_sets_keys
+ description: Sets type keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_sets_expires_keys
+ description: Sets type expires keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.database_sets_invalid_keys
+ description: Sets invalid keys per database
+ unit: keys
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per database
+ - name: pika.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
diff --git a/src/go/plugin/go.d/modules/pika/pika.go b/src/go/plugin/go.d/modules/pika/pika.go
new file mode 100644
index 000000000..705c3db49
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/pika.go
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pika
+
+import (
+ "context"
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/blang/semver/v4"
+ "github.com/go-redis/redis/v8"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("pika", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Pika {
+ return &Pika{
+ Config: Config{
+ Address: "redis://@localhost:9221",
+ Timeout: web.Duration(time.Second),
+ },
+
+ collectedCommands: make(map[string]bool),
+ collectedDbs: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+}
+
+type (
+ Pika struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ pdb redisClient
+
+ server string
+ version *semver.Version
+ collectedCommands map[string]bool
+ collectedDbs map[string]bool
+ }
+ redisClient interface {
+ Info(ctx context.Context, section ...string) *redis.StringCmd
+ Close() error
+ }
+)
+
+func (p *Pika) Configuration() any {
+ return p.Config
+}
+
+func (p *Pika) Init() error {
+ err := p.validateConfig()
+ if err != nil {
+ p.Errorf("config validation: %v", err)
+ return err
+ }
+
+ pdb, err := p.initRedisClient()
+ if err != nil {
+ p.Errorf("init redis client: %v", err)
+ return err
+ }
+ p.pdb = pdb
+
+ charts, err := p.initCharts()
+ if err != nil {
+ p.Errorf("init charts: %v", err)
+ return err
+ }
+ p.charts = charts
+
+ return nil
+}
+
+func (p *Pika) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (p *Pika) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *Pika) Collect() map[string]int64 {
+ ms, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+
+func (p *Pika) Cleanup() {
+ if p.pdb == nil {
+ return
+ }
+ err := p.pdb.Close()
+ if err != nil {
+ p.Warningf("cleanup: error on closing redis client [%s]: %v", p.Address, err)
+ }
+ p.pdb = nil
+}
diff --git a/src/go/plugin/go.d/modules/pika/pika_test.go b/src/go/plugin/go.d/modules/pika/pika_test.go
new file mode 100644
index 000000000..940619255
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/pika_test.go
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pika
+
+import (
+ "context"
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/go-redis/redis/v8"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataRedisInfoAll, _ = os.ReadFile("testdata/redis/info_all.txt")
+ dataVer340InfoAll, _ = os.ReadFile("testdata/v3.4.0/info_all.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataRedisInfoAll": dataRedisInfoAll,
+ "dataVer340InfoAll": dataVer340InfoAll,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPika_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Pika{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPika_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset 'address'": {
+ wantFail: true,
+ config: Config{Address: ""},
+ },
+ "fails on invalid 'address' format": {
+ wantFail: true,
+ config: Config{Address: "127.0.0.1:9221"},
+ },
+ "fails on invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ Address: "redis://@127.0.0.1:9221",
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pika := New()
+ pika.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, pika.Init())
+ } else {
+ assert.NoError(t, pika.Init())
+ }
+ })
+ }
+}
+
+func TestPika_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) *Pika
+ wantFail bool
+ }{
+ "success on valid response v3.4.0": {
+ prepare: preparePikaV340,
+ },
+ "fails on error on Info": {
+ wantFail: true,
+ prepare: preparePikaErrorOnInfo,
+ },
+ "fails on response from not Pika instance": {
+ wantFail: true,
+ prepare: preparePikaWithRedisMetrics,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pika := test.prepare(t)
+
+ if test.wantFail {
+ assert.Error(t, pika.Check())
+ } else {
+ assert.NoError(t, pika.Check())
+ }
+ })
+ }
+}
+
+func TestPika_Charts(t *testing.T) {
+ pika := New()
+ require.NoError(t, pika.Init())
+
+ assert.NotNil(t, pika.Charts())
+}
+
+func TestPika_Cleanup(t *testing.T) {
+ pika := New()
+ assert.NotPanics(t, pika.Cleanup)
+
+ require.NoError(t, pika.Init())
+ m := &mockRedisClient{}
+ pika.pdb = m
+
+ pika.Cleanup()
+
+ assert.True(t, m.calledClose)
+}
+
+func TestPika_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) *Pika
+ wantCollected map[string]int64
+ }{
+ "success on valid response v3.4.0": {
+ prepare: preparePikaV340,
+ wantCollected: map[string]int64{
+ "cmd_INFO_calls": 1,
+ "cmd_SET_calls": 2,
+ "arch_bits": 64,
+ "connected_clients": 1,
+ "connected_slaves": 0,
+ "db0_hashes_expires_keys": 0,
+ "db0_hashes_invalid_keys": 0,
+ "db0_hashes_keys": 0,
+ "db0_lists_expires_keys": 0,
+ "db0_lists_invalid_keys": 0,
+ "db0_lists_keys": 0,
+ "db0_sets_expires_keys": 0,
+ "db0_sets_invalid_keys": 0,
+ "db0_sets_keys": 0,
+ "db0_strings_expires_keys": 0,
+ "db0_strings_invalid_keys": 0,
+ "db0_strings_keys": 0,
+ "db0_zsets_expires_keys": 0,
+ "db0_zsets_invalid_keys": 0,
+ "db0_zsets_keys": 0,
+ "instantaneous_ops_per_sec": 0,
+ "log_size": 4272814,
+ "process_id": 1,
+ "server_id": 1,
+ "sync_thread_num": 6,
+ "tcp_port": 9221,
+ "thread_num": 1,
+ "total_commands_processed": 3,
+ "total_connections_received": 3,
+ "uptime_in_days": 1,
+ "uptime_in_seconds": 1884,
+ "used_cpu_sys": 158200,
+ "used_cpu_sys_children": 30,
+ "used_cpu_user": 22050,
+ "used_cpu_user_children": 20,
+ "used_memory": 8198,
+ },
+ },
+ "fails on error on Info": {
+ prepare: preparePikaErrorOnInfo,
+ },
+ "fails on response from not Pika instance": {
+ prepare: preparePikaWithRedisMetrics,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pika := test.prepare(t)
+
+ ms := pika.Collect()
+
+ assert.Equal(t, test.wantCollected, ms)
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, pika, ms)
+ ensureCollectedCommandsAddedToCharts(t, pika)
+ ensureCollectedDbsAddedToCharts(t, pika)
+ }
+ })
+ }
+}
+
+func preparePikaV340(t *testing.T) *Pika {
+ pika := New()
+ require.NoError(t, pika.Init())
+ pika.pdb = &mockRedisClient{
+ result: dataVer340InfoAll,
+ }
+ return pika
+}
+
+func preparePikaErrorOnInfo(t *testing.T) *Pika {
+ pika := New()
+ require.NoError(t, pika.Init())
+ pika.pdb = &mockRedisClient{
+ errOnInfo: true,
+ }
+ return pika
+}
+
+func preparePikaWithRedisMetrics(t *testing.T) *Pika {
+ pika := New()
+ require.NoError(t, pika.Init())
+ pika.pdb = &mockRedisClient{
+ result: dataRedisInfoAll,
+ }
+ return pika
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, pika *Pika, ms map[string]int64) {
+ for _, chart := range *pika.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := ms[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := ms[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
+
+func ensureCollectedCommandsAddedToCharts(t *testing.T, pika *Pika) {
+ for _, id := range []string{
+ chartCommandsCalls.ID,
+ } {
+ chart := pika.Charts().Get(id)
+ require.NotNilf(t, chart, "'%s' chart is not in charts", id)
+ assert.Lenf(t, chart.Dims, len(pika.collectedCommands),
+ "'%s' chart unexpected number of dimensions", id)
+ }
+}
+
+func ensureCollectedDbsAddedToCharts(t *testing.T, pika *Pika) {
+ for _, id := range []string{
+ chartDbStringsKeys.ID,
+ chartDbStringsExpiresKeys.ID,
+ chartDbStringsInvalidKeys.ID,
+ chartDbHashesKeys.ID,
+ chartDbHashesExpiresKeys.ID,
+ chartDbHashesInvalidKeys.ID,
+ chartDbListsKeys.ID,
+ chartDbListsExpiresKeys.ID,
+ chartDbListsInvalidKeys.ID,
+ chartDbZsetsKeys.ID,
+ chartDbZsetsExpiresKeys.ID,
+ chartDbZsetsInvalidKeys.ID,
+ chartDbSetsKeys.ID,
+ chartDbSetsExpiresKeys.ID,
+ chartDbSetsInvalidKeys.ID,
+ } {
+ chart := pika.Charts().Get(id)
+ require.NotNilf(t, chart, "'%s' chart is not in charts", id)
+ assert.Lenf(t, chart.Dims, len(pika.collectedDbs),
+ "'%s' chart unexpected number of dimensions", id)
+ }
+}
+
+type mockRedisClient struct {
+ errOnInfo bool
+ result []byte
+ calledClose bool
+}
+
+func (m *mockRedisClient) Info(_ context.Context, _ ...string) (cmd *redis.StringCmd) {
+ if m.errOnInfo {
+ cmd = redis.NewStringResult("", errors.New("error on Info"))
+ } else {
+ cmd = redis.NewStringResult(string(m.result), nil)
+ }
+ return cmd
+}
+
+func (m *mockRedisClient) Close() error {
+ m.calledClose = true
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/pika/testdata/config.json b/src/go/plugin/go.d/modules/pika/testdata/config.json
new file mode 100644
index 000000000..d8ba812ab
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/testdata/config.json
@@ -0,0 +1,9 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/pika/testdata/config.yaml b/src/go/plugin/go.d/modules/pika/testdata/config.yaml
new file mode 100644
index 000000000..6a6f6ae69
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/testdata/config.yaml
@@ -0,0 +1,7 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/pika/testdata/redis/info_all.txt b/src/go/plugin/go.d/modules/pika/testdata/redis/info_all.txt
new file mode 100644
index 000000000..8ab381620
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/testdata/redis/info_all.txt
@@ -0,0 +1,165 @@
+$4050
+# Server
+redis_version:6.0.9
+redis_git_sha1:00000000
+redis_git_dirty:0
+redis_build_id:12c354e6793cb936
+redis_mode:standalone
+os:Linux 5.4.39-linuxkit x86_64
+arch_bits:64
+multiplexing_api:epoll
+atomicvar_api:atomic-builtin
+gcc_version:8.3.0
+process_id:1
+run_id:5d97fd948bbf6cb68458685fc747f9f9019c3fc4
+tcp_port:6379
+uptime_in_seconds:252812
+uptime_in_days:2
+hz:10
+configured_hz:10
+lru_clock:13181377
+executable:/data/redis-server
+config_file:
+io_threads_active:0
+
+# Clients
+connected_clients:1
+client_recent_max_input_buffer:8
+client_recent_max_output_buffer:0
+blocked_clients:0
+tracking_clients:0
+clients_in_timeout_table:0
+
+# Memory
+used_memory:867160
+used_memory_human:846.84K
+used_memory_rss:3989504
+used_memory_rss_human:3.80M
+used_memory_peak:923360
+used_memory_peak_human:901.72K
+used_memory_peak_perc:93.91%
+used_memory_overhead:803344
+used_memory_startup:803152
+used_memory_dataset:63816
+used_memory_dataset_perc:99.70%
+allocator_allocated:903408
+allocator_active:1208320
+allocator_resident:3723264
+total_system_memory:2084032512
+total_system_memory_human:1.94G
+used_memory_lua:37888
+used_memory_lua_human:37.00K
+used_memory_scripts:0
+used_memory_scripts_human:0B
+number_of_cached_scripts:0
+maxmemory:0
+maxmemory_human:0B
+maxmemory_policy:noeviction
+allocator_frag_ratio:1.34
+allocator_frag_bytes:304912
+allocator_rss_ratio:3.08
+allocator_rss_bytes:2514944
+rss_overhead_ratio:1.07
+rss_overhead_bytes:266240
+mem_fragmentation_ratio:4.96
+mem_fragmentation_bytes:3185848
+mem_not_counted_for_evict:0
+mem_replication_backlog:0
+mem_clients_slaves:0
+mem_clients_normal:0
+mem_aof_buffer:0
+mem_allocator:jemalloc-5.1.0
+active_defrag_running:0
+lazyfree_pending_objects:0
+
+# Persistence
+loading:0
+rdb_changes_since_last_save:0
+rdb_bgsave_in_progress:0
+rdb_last_save_time:1606951667
+rdb_last_bgsave_status:ok
+rdb_last_bgsave_time_sec:0
+rdb_current_bgsave_time_sec:-1
+rdb_last_cow_size:290816
+aof_enabled:0
+aof_rewrite_in_progress:0
+aof_rewrite_scheduled:0
+aof_last_rewrite_time_sec:-1
+aof_current_rewrite_time_sec:-1
+aof_last_bgrewrite_status:ok
+aof_last_write_status:ok
+aof_last_cow_size:0
+module_fork_in_progress:0
+module_fork_last_cow_size:0
+
+# Stats
+total_connections_received:87
+total_commands_processed:161
+instantaneous_ops_per_sec:0
+total_net_input_bytes:2301
+total_net_output_bytes:507187
+instantaneous_input_kbps:0.00
+instantaneous_output_kbps:0.00
+rejected_connections:0
+sync_full:0
+sync_partial_ok:0
+sync_partial_err:0
+expired_keys:0
+expired_stale_perc:0.00
+expired_time_cap_reached_count:0
+expire_cycle_cpu_milliseconds:28362
+evicted_keys:0
+keyspace_hits:2
+keyspace_misses:0
+pubsub_channels:0
+pubsub_patterns:0
+latest_fork_usec:810
+migrate_cached_sockets:0
+slave_expires_tracked_keys:0
+active_defrag_hits:0
+active_defrag_misses:0
+active_defrag_key_hits:0
+active_defrag_key_misses:0
+tracking_total_keys:0
+tracking_total_items:0
+tracking_total_prefixes:0
+unexpected_error_replies:0
+total_reads_processed:250
+total_writes_processed:163
+io_threaded_reads_processed:0
+io_threaded_writes_processed:0
+
+# Replication
+role:master
+connected_slaves:0
+master_replid:3f0ad529c9c59a17834bde8ae85f09f77609ecb1
+master_replid2:0000000000000000000000000000000000000000
+master_repl_offset:0
+second_repl_offset:-1
+repl_backlog_active:0
+repl_backlog_size:1048576
+repl_backlog_first_byte_offset:0
+repl_backlog_histlen:0
+
+# CPU
+used_cpu_sys:630.829091
+used_cpu_user:188.394908
+used_cpu_sys_children:0.020626
+used_cpu_user_children:0.002731
+
+# Modules
+
+# Commandstats
+cmdstat_set:calls=3,usec=140,usec_per_call=46.67
+cmdstat_command:calls=2,usec=2182,usec_per_call=1091.00
+cmdstat_get:calls=2,usec=29,usec_per_call=14.50
+cmdstat_hmset:calls=2,usec=408,usec_per_call=204.00
+cmdstat_hello:calls=1,usec=15,usec_per_call=15.00
+cmdstat_ping:calls=19,usec=286,usec_per_call=15.05
+cmdstat_info:calls=132,usec=37296,usec_per_call=282.55
+
+# Cluster
+cluster_enabled:0
+
+# Keyspace
+db0:keys=4,expires=0,avg_ttl=0
diff --git a/src/go/plugin/go.d/modules/pika/testdata/v3.4.0/info_all.txt b/src/go/plugin/go.d/modules/pika/testdata/v3.4.0/info_all.txt
new file mode 100644
index 000000000..ec58524ce
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pika/testdata/v3.4.0/info_all.txt
@@ -0,0 +1,64 @@
+$1283
+# Server
+pika_version:3.4.0
+pika_git_sha:bd30511bf82038c2c6531b3d84872c9825fe836a
+pika_build_compile_date: Dec 1 2020
+os:Linux 5.4.39-linuxkit x86_64
+arch_bits:64
+process_id:1
+tcp_port:9221
+thread_num:1
+sync_thread_num:6
+uptime_in_seconds:1884
+uptime_in_days:1
+config_file:/pika/conf/pika.conf
+server_id:1
+
+# Data
+db_size:645807
+db_size_human:0M
+log_size:4272814
+log_size_human:4M
+compression:snappy
+used_memory:8198
+used_memory_human:0M
+db_memtable_usage:8072
+db_tablereader_usage:126
+db_fatal:0
+db_fatal_msg:NULL
+
+# Clients
+connected_clients:1
+
+# Stats
+total_connections_received:3
+instantaneous_ops_per_sec:0
+total_commands_processed:3
+is_bgsaving:No
+is_scaning_keyspace:No
+is_compact:No
+compact_cron:
+compact_interval:
+
+# Command_Exec_Count
+INFO:1
+SET:2
+
+# CPU
+used_cpu_sys:158.20
+used_cpu_user:22.05
+used_cpu_sys_children:0.03
+used_cpu_user_children:0.02
+
+# Replication(MASTER)
+role:master
+connected_slaves:0
+db0 binlog_offset=0 589,safety_purge=none
+
+# Keyspace
+# Time:1970-01-01 08:00:00
+db0 Strings_keys=0, expires=0, invalid_keys=0
+db0 Hashes_keys=0, expires=0, invalid_keys=0
+db0 Lists_keys=0, expires=0, invalid_keys=0
+db0 Zsets_keys=0, expires=0, invalid_keys=0
+db0 Sets_keys=0, expires=0, invalid_keys=0
diff --git a/src/go/plugin/go.d/modules/ping/README.md b/src/go/plugin/go.d/modules/ping/README.md
new file mode 120000
index 000000000..a1381e57b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/README.md
@@ -0,0 +1 @@
+integrations/ping.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/ping/charts.go b/src/go/plugin/go.d/modules/ping/charts.go
new file mode 100644
index 000000000..04dfc17d5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/charts.go
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ping
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioHostRTT = module.Priority + iota
+ prioHostStdDevRTT
+ prioHostPingPacketLoss
+ prioHostPingPackets
+)
+
+var hostChartsTmpl = module.Charts{
+ hostRTTChartTmpl.Copy(),
+ hostStdDevRTTChartTmpl.Copy(),
+ hostPacketLossChartTmpl.Copy(),
+ hostPacketsChartTmpl.Copy(),
+}
+
+var (
+ hostRTTChartTmpl = module.Chart{
+ ID: "host_%s_rtt",
+ Title: "Ping round-trip time",
+ Units: "milliseconds",
+ Fam: "latency",
+ Ctx: "ping.host_rtt",
+ Priority: prioHostRTT,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "host_%s_min_rtt", Name: "min", Div: 1e3},
+ {ID: "host_%s_max_rtt", Name: "max", Div: 1e3},
+ {ID: "host_%s_avg_rtt", Name: "avg", Div: 1e3},
+ },
+ }
+ hostStdDevRTTChartTmpl = module.Chart{
+ ID: "host_%s_std_dev_rtt",
+ Title: "Ping round-trip time standard deviation",
+ Units: "milliseconds",
+ Fam: "latency",
+ Ctx: "ping.host_std_dev_rtt",
+ Priority: prioHostStdDevRTT,
+ Dims: module.Dims{
+ {ID: "host_%s_std_dev_rtt", Name: "std_dev", Div: 1e3},
+ },
+ }
+)
+
+var hostPacketLossChartTmpl = module.Chart{
+ ID: "host_%s_packet_loss",
+ Title: "Ping packet loss",
+ Units: "percentage",
+ Fam: "packet loss",
+ Ctx: "ping.host_packet_loss",
+ Priority: prioHostPingPacketLoss,
+ Dims: module.Dims{
+ {ID: "host_%s_packet_loss", Name: "loss", Div: 1000},
+ },
+}
+
+var hostPacketsChartTmpl = module.Chart{
+ ID: "host_%s_packets",
+ Title: "Ping packets transferred",
+ Units: "packets",
+ Fam: "packets",
+ Ctx: "ping.host_packets",
+ Priority: prioHostPingPackets,
+ Dims: module.Dims{
+ {ID: "host_%s_packets_recv", Name: "received"},
+ {ID: "host_%s_packets_sent", Name: "sent"},
+ },
+}
+
+func newHostCharts(host string) *module.Charts {
+ charts := hostChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ReplaceAll(host, ".", "_"))
+ chart.Labels = []module.Label{
+ {Key: "host", Value: host},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, host)
+ }
+ }
+
+ return charts
+}
+
+func (p *Ping) addHostCharts(host string) {
+ charts := newHostCharts(host)
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ping/collect.go b/src/go/plugin/go.d/modules/ping/collect.go
new file mode 100644
index 000000000..c162a2b15
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/collect.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ping
+
+import (
+ "fmt"
+ "sync"
+)
+
+func (p *Ping) collect() (map[string]int64, error) {
+ mu := &sync.Mutex{}
+ mx := make(map[string]int64)
+ var wg sync.WaitGroup
+
+ for _, v := range p.Hosts {
+ wg.Add(1)
+ go func(v string) { defer wg.Done(); p.pingHost(v, mx, mu) }(v)
+ }
+ wg.Wait()
+
+ return mx, nil
+}
+
+func (p *Ping) pingHost(host string, mx map[string]int64, mu *sync.Mutex) {
+ stats, err := p.prober.ping(host)
+ if err != nil {
+ p.Error(err)
+ return
+ }
+
+ mu.Lock()
+ defer mu.Unlock()
+
+ if !p.hosts[host] {
+ p.hosts[host] = true
+ p.addHostCharts(host)
+ }
+
+ px := fmt.Sprintf("host_%s_", host)
+ if stats.PacketsRecv != 0 {
+ mx[px+"min_rtt"] = stats.MinRtt.Microseconds()
+ mx[px+"max_rtt"] = stats.MaxRtt.Microseconds()
+ mx[px+"avg_rtt"] = stats.AvgRtt.Microseconds()
+ mx[px+"std_dev_rtt"] = stats.StdDevRtt.Microseconds()
+ }
+ mx[px+"packets_recv"] = int64(stats.PacketsRecv)
+ mx[px+"packets_sent"] = int64(stats.PacketsSent)
+ mx[px+"packet_loss"] = int64(stats.PacketLoss * 1000)
+}
diff --git a/src/go/plugin/go.d/modules/ping/config_schema.json b/src/go/plugin/go.d/modules/ping/config_schema.json
new file mode 100644
index 000000000..1168e3388
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/config_schema.json
@@ -0,0 +1,95 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "title": "Ping collector configuration.",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "privileged": {
+ "title": "Privileged mode",
+ "description": "If unset, sends unprivileged UDP ping packets (require [additional configuration](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/ping#overview)); otherwise, sends raw ICMP ping packets ([not recommended](https://github.com/netdata/netdata/issues/15410)).",
+ "type": "boolean",
+ "default": false
+ },
+ "hosts": {
+ "title": "Network hosts",
+ "description": "List of network hosts (IP addresses or domain names) to send ping packets.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Host",
+ "type": "string"
+ },
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "network": {
+ "title": "Network",
+ "description": "The protocol version used for resolving the specified hosts IP addresses.",
+ "type": "string",
+ "default": "ip",
+ "enum": [
+ "ip",
+ "ip4",
+ "ip6"
+ ]
+ },
+ "packets": {
+ "title": "Packets",
+ "description": "Number of ping packets to send for each host.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "interval": {
+ "title": "Interval",
+ "description": "Timeout between sending ping packets, in seconds.",
+ "type": "number",
+ "minimum": 0.1,
+ "default": 0.1
+ },
+ "interface": {
+ "title": "Interface",
+ "description": "The name of the network interface whose IP address will be used as the source for sending ping packets.",
+ "type": "string",
+ "default": ""
+ }
+ },
+ "required": [
+ "hosts"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "update_every": {
+ "ui:help": "Sets the frequency at which a specified number of ping packets (determined by 'packets') are sent to designated hosts."
+ },
+ "network": {
+ "ui:help": "`ip` selects IPv4 or IPv6 based on system configuration, `ipv4` forces resolution to IPv4 addresses, and `ipv6` forces resolution to IPv6 addresses.",
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "interval": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "hosts": {
+ "ui:listFlavour": "list"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/ping/init.go b/src/go/plugin/go.d/modules/ping/init.go
new file mode 100644
index 000000000..62d78c8e6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/init.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ping
+
+import (
+ "errors"
+ "time"
+)
+
+func (p *Ping) validateConfig() error {
+ if len(p.Hosts) == 0 {
+ return errors.New("'hosts' can't be empty")
+ }
+ if p.SendPackets <= 0 {
+ return errors.New("'send_packets' can't be <= 0")
+ }
+ return nil
+}
+
+func (p *Ping) initProber() (prober, error) {
+ mul := 0.9
+ if p.UpdateEvery > 1 {
+ mul = 0.95
+ }
+ deadline := time.Millisecond * time.Duration(float64(p.UpdateEvery)*mul*1000)
+ if deadline.Milliseconds() == 0 {
+ return nil, errors.New("zero ping deadline")
+ }
+
+ conf := pingProberConfig{
+ privileged: p.Privileged,
+ packets: p.SendPackets,
+ iface: p.Interface,
+ interval: p.Interval.Duration(),
+ deadline: deadline,
+ }
+
+ return p.newProber(conf, p.Logger), nil
+}
diff --git a/src/go/plugin/go.d/modules/ping/integrations/ping.md b/src/go/plugin/go.d/modules/ping/integrations/ping.md
new file mode 100644
index 000000000..db97288b0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/integrations/ping.md
@@ -0,0 +1,271 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/ping/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/ping/metadata.yaml"
+sidebar_label: "Ping"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Ping
+
+
+<img src="https://netdata.cloud/img/globe.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: ping
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This module measures round-trip time and packet loss by sending ping messages to network hosts.
+
+There are two operational modes:
+
+- privileged (send raw ICMP ping, default). Requires
+ CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges:
+ > **Note**: set automatically during Netdata installation.
+
+ ```bash
+ sudo setcap CAP_NET_RAW=eip <INSTALL_PREFIX>/usr/libexec/netdata/plugins.d/go.d.plugin
+ ```
+
+- unprivileged (send UDP ping, Linux only).
+ Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):
+
+ ```bash
+ sudo sysctl -w net.ipv4.ping_group_range="0 2147483647"
+ ```
+ To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and
+ execute `sudo sysctl -p`.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per host
+
+These metrics refer to the remote host.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| host | remote host |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| ping.host_rtt | min, max, avg | milliseconds |
+| ping.host_std_dev_rtt | std_dev | milliseconds |
+| ping.host_packet_loss | loss | percentage |
+| ping.host_packets | received, sent | packets |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ ping_host_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | network host ${lab1el:host} reachability status |
+| [ ping_packet_loss ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_packet_loss | packet loss percentage to the network host ${label:host} over the last 10 minutes |
+| [ ping_host_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf) | ping.host_rtt | average latency to the network host ${label:host} over the last 10 seconds |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/ping.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/ping.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| hosts | Network hosts. | | yes |
+| network | Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6). | ip | no |
+| privileged | Ping packets type. "no" means send an "unprivileged" UDP ping, "yes" - raw ICMP ping. | yes | no |
+| packets | Number of ping packets to send. | 5 | no |
+| interval | Timeout between sending ping packets. | 100ms | no |
+
+</details>
+
+#### Examples
+
+##### IPv4 hosts
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: example
+ hosts:
+ - 192.0.2.0
+ - 192.0.2.1
+
+```
+</details>
+
+##### Unprivileged mode
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: example
+ privileged: no
+ hosts:
+ - 192.0.2.0
+ - 192.0.2.1
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Multiple instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: example1
+ hosts:
+ - 192.0.2.0
+ - 192.0.2.1
+
+ - name: example2
+ packets: 10
+ hosts:
+ - 192.0.2.3
+ - 192.0.2.4
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `ping` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m ping
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `ping` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep ping
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep ping /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep ping
+```
+
+
diff --git a/src/go/plugin/go.d/modules/ping/metadata.yaml b/src/go/plugin/go.d/modules/ping/metadata.yaml
new file mode 100644
index 000000000..8686d103b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/metadata.yaml
@@ -0,0 +1,193 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-ping
+ plugin_name: go.d.plugin
+ module_name: ping
+ monitored_instance:
+ name: Ping
+ link: ""
+ icon_filename: globe.svg
+ categories:
+ - data-collection.synthetic-checks
+ keywords:
+ - ping
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This module measures round-trip time and packet loss by sending ping messages to network hosts.
+
+ There are two operational modes:
+
+ - privileged (send raw ICMP ping, default). Requires
+ CAP_NET_RAW [capability](https://man7.org/linux/man-pages/man7/capabilities.7.html) or root privileges:
+ > **Note**: set automatically during Netdata installation.
+
+ ```bash
+ sudo setcap CAP_NET_RAW=eip <INSTALL_PREFIX>/usr/libexec/netdata/plugins.d/go.d.plugin
+ ```
+
+ - unprivileged (send UDP ping, Linux only).
+ Requires configuring [ping_group_range](https://www.man7.org/linux/man-pages/man7/icmp.7.html):
+
+ ```bash
+ sudo sysctl -w net.ipv4.ping_group_range="0 2147483647"
+ ```
+ To persist the change add `net.ipv4.ping_group_range=0 2147483647` to `/etc/sysctl.conf` and
+ execute `sudo sysctl -p`.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/ping.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: hosts
+ description: Network hosts.
+ default_value: ""
+ required: true
+ - name: network
+ description: "Allows configuration of DNS resolution. Supported options: ip (select IPv4 or IPv6), ip4 (select IPv4), ip6 (select IPv6)."
+ default_value: "ip"
+ required: false
+ - name: privileged
+ description: Ping packets type. "no" means send an "unprivileged" UDP ping, "yes" - raw ICMP ping.
+ default_value: true
+ required: false
+ - name: packets
+ description: Number of ping packets to send.
+ default_value: 5
+ required: false
+ - name: interval
+ description: Timeout between sending ping packets.
+ default_value: 100ms
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: IPv4 hosts
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: example
+ hosts:
+ - 192.0.2.0
+ - 192.0.2.1
+ - name: Unprivileged mode
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: example
+ privileged: no
+ hosts:
+ - 192.0.2.0
+ - 192.0.2.1
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Multiple instances.
+ config: |
+ jobs:
+ - name: example1
+ hosts:
+ - 192.0.2.0
+ - 192.0.2.1
+
+ - name: example2
+ packets: 10
+ hosts:
+ - 192.0.2.3
+ - 192.0.2.4
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: ping_host_reachable
+ metric: ping.host_packet_loss
+ info: "network host ${lab1el:host} reachability status"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf
+ - name: ping_packet_loss
+ metric: ping.host_packet_loss
+ info: "packet loss percentage to the network host ${label:host} over the last 10 minutes"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf
+ - name: ping_host_latency
+ metric: ping.host_rtt
+ info: "average latency to the network host ${label:host} over the last 10 seconds"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ping.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: host
+ description: These metrics refer to the remote host.
+ labels:
+ - name: host
+ description: remote host
+ metrics:
+ - name: ping.host_rtt
+ description: Ping round-trip time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: ping.host_std_dev_rtt
+ description: Ping round-trip time standard deviation
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: std_dev
+ - name: ping.host_packet_loss
+ description: Ping packet loss
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: loss
+ - name: ping.host_packets
+ description: Ping packets transferred
+ unit: packets
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
diff --git a/src/go/plugin/go.d/modules/ping/ping.go b/src/go/plugin/go.d/modules/ping/ping.go
new file mode 100644
index 000000000..9d1ef929f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/ping.go
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ping
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ probing "github.com/prometheus-community/pro-bing"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("ping", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Ping {
+ return &Ping{
+ Config: Config{
+ Network: "ip",
+ Privileged: true,
+ SendPackets: 5,
+ Interval: web.Duration(time.Millisecond * 100),
+ },
+
+ charts: &module.Charts{},
+ hosts: make(map[string]bool),
+ newProber: newPingProber,
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Hosts []string `yaml:"hosts" json:"hosts"`
+ Network string `yaml:"network,omitempty" json:"network"`
+ Privileged bool `yaml:"privileged" json:"privileged"`
+ SendPackets int `yaml:"packets,omitempty" json:"packets"`
+ Interval web.Duration `yaml:"interval,omitempty" json:"interval"`
+ Interface string `yaml:"interface,omitempty" json:"interface"`
+}
+
+type (
+ Ping struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prober prober
+ newProber func(pingProberConfig, *logger.Logger) prober
+
+ hosts map[string]bool
+ }
+ prober interface {
+ ping(host string) (*probing.Statistics, error)
+ }
+)
+
+func (p *Ping) Configuration() any {
+ return p.Config
+}
+
+func (p *Ping) Init() error {
+ err := p.validateConfig()
+ if err != nil {
+ p.Errorf("config validation: %v", err)
+ return err
+ }
+
+ pr, err := p.initProber()
+ if err != nil {
+ p.Errorf("init prober: %v", err)
+ return err
+ }
+ p.prober = pr
+
+ return nil
+}
+
+func (p *Ping) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (p *Ping) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *Ping) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (p *Ping) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/ping/ping_test.go b/src/go/plugin/go.d/modules/ping/ping_test.go
new file mode 100644
index 000000000..52d16dd3e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/ping_test.go
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ping
+
+import (
+ "errors"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ probing "github.com/prometheus-community/pro-bing"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPing_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Ping{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPing_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "fail with default": {
+ wantFail: true,
+ config: New().Config,
+ },
+ "success when 'hosts' set": {
+ wantFail: false,
+ config: Config{
+ SendPackets: 1,
+ Hosts: []string{"192.0.2.0"},
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ping := New()
+ ping.Config = test.config
+ ping.UpdateEvery = 1
+
+ if test.wantFail {
+ assert.Error(t, ping.Init())
+ } else {
+ assert.NoError(t, ping.Init())
+ }
+ })
+ }
+}
+
+func TestPing_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestPing_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestPing_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) *Ping
+ }{
+ "success when ping does not return an error": {
+ wantFail: false,
+ prepare: casePingSuccess,
+ },
+ "fail when ping returns an error": {
+ wantFail: true,
+ prepare: casePingError,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ping := test.prepare(t)
+
+ if test.wantFail {
+ assert.Error(t, ping.Check())
+ } else {
+ assert.NoError(t, ping.Check())
+ }
+ })
+ }
+}
+
+func TestPing_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) *Ping
+ wantMetrics map[string]int64
+ wantNumCharts int
+ }{
+ "success when ping does not return an error": {
+ prepare: casePingSuccess,
+ wantMetrics: map[string]int64{
+ "host_192.0.2.1_avg_rtt": 15000,
+ "host_192.0.2.1_max_rtt": 20000,
+ "host_192.0.2.1_min_rtt": 10000,
+ "host_192.0.2.1_packet_loss": 0,
+ "host_192.0.2.1_packets_recv": 5,
+ "host_192.0.2.1_packets_sent": 5,
+ "host_192.0.2.1_std_dev_rtt": 5000,
+ "host_192.0.2.2_avg_rtt": 15000,
+ "host_192.0.2.2_max_rtt": 20000,
+ "host_192.0.2.2_min_rtt": 10000,
+ "host_192.0.2.2_packet_loss": 0,
+ "host_192.0.2.2_packets_recv": 5,
+ "host_192.0.2.2_packets_sent": 5,
+ "host_192.0.2.2_std_dev_rtt": 5000,
+ "host_example.com_avg_rtt": 15000,
+ "host_example.com_max_rtt": 20000,
+ "host_example.com_min_rtt": 10000,
+ "host_example.com_packet_loss": 0,
+ "host_example.com_packets_recv": 5,
+ "host_example.com_packets_sent": 5,
+ "host_example.com_std_dev_rtt": 5000,
+ },
+ wantNumCharts: 3 * len(hostChartsTmpl),
+ },
+ "fail when ping returns an error": {
+ prepare: casePingError,
+ wantMetrics: nil,
+ wantNumCharts: 0,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ping := test.prepare(t)
+
+ mx := ping.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ assert.Len(t, *ping.Charts(), test.wantNumCharts)
+ }
+ })
+ }
+}
+
+func casePingSuccess(t *testing.T) *Ping {
+ ping := New()
+ ping.UpdateEvery = 1
+ ping.Hosts = []string{"192.0.2.1", "192.0.2.2", "example.com"}
+ ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober {
+ return &mockProber{}
+ }
+ require.NoError(t, ping.Init())
+ return ping
+}
+
+func casePingError(t *testing.T) *Ping {
+ ping := New()
+ ping.UpdateEvery = 1
+ ping.Hosts = []string{"192.0.2.1", "192.0.2.2", "example.com"}
+ ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober {
+ return &mockProber{errOnPing: true}
+ }
+ require.NoError(t, ping.Init())
+ return ping
+}
+
+type mockProber struct {
+ errOnPing bool
+}
+
+func (m *mockProber) ping(host string) (*probing.Statistics, error) {
+ if m.errOnPing {
+ return nil, errors.New("mock.ping() error")
+ }
+
+ stats := probing.Statistics{
+ PacketsRecv: 5,
+ PacketsSent: 5,
+ PacketsRecvDuplicates: 0,
+ PacketLoss: 0,
+ Addr: host,
+ Rtts: nil,
+ MinRtt: time.Millisecond * 10,
+ MaxRtt: time.Millisecond * 20,
+ AvgRtt: time.Millisecond * 15,
+ StdDevRtt: time.Millisecond * 5,
+ }
+
+ return &stats, nil
+}
diff --git a/src/go/plugin/go.d/modules/ping/prober.go b/src/go/plugin/go.d/modules/ping/prober.go
new file mode 100644
index 000000000..70c31dcde
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/prober.go
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package ping
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+
+ probing "github.com/prometheus-community/pro-bing"
+)
+
+func newPingProber(conf pingProberConfig, log *logger.Logger) prober {
+ var source string
+ if conf.iface != "" {
+ if addr, err := getInterfaceIPAddress(conf.iface); err != nil {
+ log.Warningf("error getting interface '%s' IP address: %v", conf.iface, err)
+ } else {
+ log.Infof("interface '%s' IP address '%s', will use it as the source", conf.iface, addr)
+ source = addr
+ }
+ }
+
+ return &pingProber{
+ network: conf.network,
+ privileged: conf.privileged,
+ packets: conf.packets,
+ source: source,
+ interval: conf.interval,
+ deadline: conf.deadline,
+ Logger: log,
+ }
+}
+
+type pingProberConfig struct {
+ network string
+ privileged bool
+ packets int
+ iface string
+ interval time.Duration
+ deadline time.Duration
+}
+
+type pingProber struct {
+ *logger.Logger
+
+ network string
+ privileged bool
+ packets int
+ source string
+ interval time.Duration
+ deadline time.Duration
+}
+
+func (p *pingProber) ping(host string) (*probing.Statistics, error) {
+ pr := probing.New(host)
+
+ pr.SetNetwork(p.network)
+
+ if err := pr.Resolve(); err != nil {
+ return nil, fmt.Errorf("DNS lookup '%s' : %v", host, err)
+ }
+
+ pr.Source = p.source
+ pr.RecordRtts = false
+ pr.Interval = p.interval
+ pr.Count = p.packets
+ pr.Timeout = p.deadline
+ pr.SetPrivileged(p.privileged)
+ pr.SetLogger(nil)
+
+ if err := pr.Run(); err != nil {
+ return nil, fmt.Errorf("pinging host '%s' (ip %s): %v", pr.Addr(), pr.IPAddr(), err)
+ }
+
+ stats := pr.Statistics()
+
+ p.Debugf("ping stats for host '%s' (ip '%s'): %+v", pr.Addr(), pr.IPAddr(), stats)
+
+ return stats, nil
+}
+
+func getInterfaceIPAddress(ifaceName string) (ipaddr string, err error) {
+ iface, err := net.InterfaceByName(ifaceName)
+ if err != nil {
+ return "", err
+ }
+
+ addresses, err := iface.Addrs()
+ if err != nil {
+ return "", err
+ }
+
+ // FIXME: add IPv6 support
+ var v4Addr string
+ for _, addr := range addresses {
+ if ipnet, ok := addr.(*net.IPNet); ok && ipnet.IP.To4() != nil {
+ v4Addr = ipnet.IP.To4().String()
+ break
+ }
+ }
+
+ if v4Addr == "" {
+ return "", errors.New("ipv4 addresses not found")
+ }
+
+ return v4Addr, nil
+}
diff --git a/src/go/plugin/go.d/modules/ping/testdata/config.json b/src/go/plugin/go.d/modules/ping/testdata/config.json
new file mode 100644
index 000000000..18df64529
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/testdata/config.json
@@ -0,0 +1,11 @@
+{
+ "update_every": 123,
+ "hosts": [
+ "ok"
+ ],
+ "network": "ok",
+ "privileged": true,
+ "packets": 123,
+ "interval": 123.123,
+ "interface": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/ping/testdata/config.yaml b/src/go/plugin/go.d/modules/ping/testdata/config.yaml
new file mode 100644
index 000000000..5eacb9413
--- /dev/null
+++ b/src/go/plugin/go.d/modules/ping/testdata/config.yaml
@@ -0,0 +1,8 @@
+update_every: 123
+hosts:
+ - "ok"
+network: "ok"
+privileged: yes
+packets: 123
+interval: 123.123
+interface: "ok"
diff --git a/src/go/plugin/go.d/modules/portcheck/README.md b/src/go/plugin/go.d/modules/portcheck/README.md
new file mode 120000
index 000000000..4bee556ef
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/README.md
@@ -0,0 +1 @@
+integrations/tcp_endpoints.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/portcheck/charts.go b/src/go/plugin/go.d/modules/portcheck/charts.go
new file mode 100644
index 000000000..6797f00a6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/charts.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package portcheck
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioCheckStatus = module.Priority + iota
+ prioCheckInStatusDuration
+ prioCheckLatency
+)
+
+var chartsTmpl = module.Charts{
+ checkStatusChartTmpl.Copy(),
+ checkInStateDurationChartTmpl.Copy(),
+ checkConnectionLatencyChartTmpl.Copy(),
+}
+
+var checkStatusChartTmpl = module.Chart{
+ ID: "port_%d_status",
+ Title: "TCP Check Status",
+ Units: "boolean",
+ Fam: "status",
+ Ctx: "portcheck.status",
+ Priority: prioCheckStatus,
+ Dims: module.Dims{
+ {ID: "port_%d_success", Name: "success"},
+ {ID: "port_%d_failed", Name: "failed"},
+ {ID: "port_%d_timeout", Name: "timeout"},
+ },
+}
+
+var checkInStateDurationChartTmpl = module.Chart{
+ ID: "port_%d_current_state_duration",
+ Title: "Current State Duration",
+ Units: "seconds",
+ Fam: "status duration",
+ Ctx: "portcheck.state_duration",
+ Priority: prioCheckInStatusDuration,
+ Dims: module.Dims{
+ {ID: "port_%d_current_state_duration", Name: "time"},
+ },
+}
+
+var checkConnectionLatencyChartTmpl = module.Chart{
+ ID: "port_%d_connection_latency",
+ Title: "TCP Connection Latency",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "portcheck.latency",
+ Priority: prioCheckLatency,
+ Dims: module.Dims{
+ {ID: "port_%d_latency", Name: "time"},
+ },
+}
+
+func newPortCharts(host string, port int) *module.Charts {
+ charts := chartsTmpl.Copy()
+ for _, chart := range *charts {
+ chart.Labels = []module.Label{
+ {Key: "host", Value: host},
+ {Key: "port", Value: strconv.Itoa(port)},
+ }
+ chart.ID = fmt.Sprintf(chart.ID, port)
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, port)
+ }
+ }
+ return charts
+}
diff --git a/src/go/plugin/go.d/modules/portcheck/collect.go b/src/go/plugin/go.d/modules/portcheck/collect.go
new file mode 100644
index 000000000..dab45ec41
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/collect.go
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package portcheck
+
+import (
+ "fmt"
+ "sync"
+ "time"
+)
+
+type checkState string
+
+const (
+ checkStateSuccess checkState = "success"
+ checkStateTimeout checkState = "timeout"
+ checkStateFailed checkState = "failed"
+)
+
+func (pc *PortCheck) collect() (map[string]int64, error) {
+ wg := &sync.WaitGroup{}
+
+ for _, p := range pc.ports {
+ wg.Add(1)
+ go func(p *port) { pc.checkPort(p); wg.Done() }(p)
+ }
+ wg.Wait()
+
+ mx := make(map[string]int64)
+
+ for _, p := range pc.ports {
+ mx[fmt.Sprintf("port_%d_current_state_duration", p.number)] = int64(p.inState)
+ mx[fmt.Sprintf("port_%d_latency", p.number)] = int64(p.latency)
+ mx[fmt.Sprintf("port_%d_%s", p.number, checkStateSuccess)] = 0
+ mx[fmt.Sprintf("port_%d_%s", p.number, checkStateTimeout)] = 0
+ mx[fmt.Sprintf("port_%d_%s", p.number, checkStateFailed)] = 0
+ mx[fmt.Sprintf("port_%d_%s", p.number, p.state)] = 1
+ }
+
+ return mx, nil
+}
+
+func (pc *PortCheck) checkPort(p *port) {
+ start := time.Now()
+ conn, err := pc.dial("tcp", fmt.Sprintf("%s:%d", pc.Host, p.number), pc.Timeout.Duration())
+ dur := time.Since(start)
+
+ defer func() {
+ if conn != nil {
+ _ = conn.Close()
+ }
+ }()
+
+ if err != nil {
+ v, ok := err.(interface{ Timeout() bool })
+ if ok && v.Timeout() {
+ pc.setPortState(p, checkStateTimeout)
+ } else {
+ pc.setPortState(p, checkStateFailed)
+ }
+ return
+ }
+ pc.setPortState(p, checkStateSuccess)
+ p.latency = durationToMs(dur)
+}
+
+func (pc *PortCheck) setPortState(p *port, s checkState) {
+ if p.state != s {
+ p.inState = pc.UpdateEvery
+ p.state = s
+ } else {
+ p.inState += pc.UpdateEvery
+ }
+}
+
+func durationToMs(duration time.Duration) int {
+ return int(duration) / (int(time.Millisecond) / int(time.Nanosecond))
+}
diff --git a/src/go/plugin/go.d/modules/portcheck/config_schema.json b/src/go/plugin/go.d/modules/portcheck/config_schema.json
new file mode 100644
index 000000000..025b78f85
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/config_schema.json
@@ -0,0 +1,66 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Portcheck collector configuration.",
+ "description": "Collector for monitoring TCP service availability and response time.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection, including domain name resolution, in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "host": {
+ "title": "Network host",
+ "description": "The IP address or domain name of the network host.",
+ "type": "string"
+ },
+ "ports": {
+ "title": "Ports",
+ "description": "A list of ports to monitor for TCP service availability and response time.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Port",
+ "type": "integer",
+ "minimum": 1
+ },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "required": [
+ "host",
+ "ports"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "host": {
+ "ui:placeholder": "127.0.0.1"
+ },
+ "ports": {
+ "ui:listFlavour": "list"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/portcheck/init.go b/src/go/plugin/go.d/modules/portcheck/init.go
new file mode 100644
index 000000000..17b402340
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/init.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package portcheck
+
+import (
+ "errors"
+ "net"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+type dialFunc func(network, address string, timeout time.Duration) (net.Conn, error)
+
+type port struct {
+ number int
+ state checkState
+ inState int
+ latency int
+}
+
+func (pc *PortCheck) validateConfig() error {
+ if pc.Host == "" {
+ return errors.New("'host' parameter not set")
+ }
+ if len(pc.Ports) == 0 {
+ return errors.New("'ports' parameter not set")
+ }
+ return nil
+}
+
+func (pc *PortCheck) initCharts() (*module.Charts, error) {
+ charts := module.Charts{}
+
+ for _, port := range pc.Ports {
+ if err := charts.Add(*newPortCharts(pc.Host, port)...); err != nil {
+ return nil, err
+ }
+ }
+
+ return &charts, nil
+}
+
+func (pc *PortCheck) initPorts() (ports []*port) {
+ for _, p := range pc.Ports {
+ ports = append(ports, &port{number: p})
+ }
+ return ports
+}
diff --git a/src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md b/src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md
new file mode 100644
index 000000000..9259afd3b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/integrations/tcp_endpoints.md
@@ -0,0 +1,252 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/portcheck/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/portcheck/metadata.yaml"
+sidebar_label: "TCP Endpoints"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# TCP Endpoints
+
+
+<img src="https://netdata.cloud/img/globe.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: portcheck
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors TCP services availability and response time.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per tcp endpoint
+
+These metrics refer to the TCP endpoint.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| host | host |
+| port | port |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| portcheck.status | success, failed, timeout | boolean |
+| portcheck.state_duration | time | seconds |
+| portcheck.latency | time | ms |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ portcheck_service_reachable ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | TCP host ${label:host} port ${label:port} liveness status |
+| [ portcheck_connection_timeouts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |
+| [ portcheck_connection_fails ](https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf) | portcheck.status | percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/portcheck.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/portcheck.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| host | Remote host address in IPv4, IPv6 format, or DNS name. | | yes |
+| ports | Remote host ports. Must be specified in numeric format. | | yes |
+| timeout | HTTP request timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Check SSH and telnet
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: server1
+ host: 127.0.0.1
+ ports:
+ - 22
+ - 23
+
+```
+</details>
+
+##### Check webserver with IPv6 address
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: server2
+ host: "[2001:DB8::1]"
+ ports:
+ - 80
+ - 8080
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Multiple instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: server1
+ host: 127.0.0.1
+ ports:
+ - 22
+ - 23
+
+ - name: server2
+ host: 203.0.113.10
+ ports:
+ - 22
+ - 23
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `portcheck` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m portcheck
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `portcheck` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep portcheck
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep portcheck /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep portcheck
+```
+
+
diff --git a/src/go/plugin/go.d/modules/portcheck/metadata.yaml b/src/go/plugin/go.d/modules/portcheck/metadata.yaml
new file mode 100644
index 000000000..c0ccfde1d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/metadata.yaml
@@ -0,0 +1,162 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-portcheck
+ plugin_name: go.d.plugin
+ module_name: portcheck
+ monitored_instance:
+ name: TCP Endpoints
+ link: ""
+ icon_filename: globe.svg
+ categories:
+ - data-collection.synthetic-checks
+ keywords: []
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors TCP services availability and response time.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/portcheck.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: host
+ description: Remote host address in IPv4, IPv6 format, or DNS name.
+ default_value: ""
+ required: true
+ - name: ports
+ description: Remote host ports. Must be specified in numeric format.
+ default_value: ""
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Check SSH and telnet
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: server1
+ host: 127.0.0.1
+ ports:
+ - 22
+ - 23
+ - name: Check webserver with IPv6 address
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: server2
+ host: "[2001:DB8::1]"
+ ports:
+ - 80
+ - 8080
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Multiple instances.
+ config: |
+ jobs:
+ - name: server1
+ host: 127.0.0.1
+ ports:
+ - 22
+ - 23
+
+ - name: server2
+ host: 203.0.113.10
+ ports:
+ - 22
+ - 23
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: portcheck_service_reachable
+ metric: portcheck.status
+ info: "TCP host ${label:host} port ${label:port} liveness status"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf
+ - name: portcheck_connection_timeouts
+ metric: portcheck.status
+ info: "percentage of timed-out TCP connections to host ${label:host} port ${label:port} in the last 5 minutes"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf
+ - name: portcheck_connection_fails
+ metric: portcheck.status
+ info: "percentage of failed TCP connections to host ${label:host} port ${label:port} in the last 5 minutes"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/portcheck.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: tcp endpoint
+ description: These metrics refer to the TCP endpoint.
+ labels:
+ - name: host
+ description: host
+ - name: port
+ description: port
+ metrics:
+ - name: portcheck.status
+ description: TCP Check Status
+ unit: boolean
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failed
+ - name: timeout
+ - name: portcheck.state_duration
+ description: Current State Duration
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: portcheck.latency
+ description: TCP Connection Latency
+ unit: ms
+ chart_type: line
+ dimensions:
+ - name: time
diff --git a/src/go/plugin/go.d/modules/portcheck/portcheck.go b/src/go/plugin/go.d/modules/portcheck/portcheck.go
new file mode 100644
index 000000000..3a6da78ac
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/portcheck.go
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package portcheck
+
+import (
+ _ "embed"
+ "net"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("portcheck", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *PortCheck {
+ return &PortCheck{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ dial: net.DialTimeout,
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Host string `yaml:"host" json:"host"`
+ Ports []int `yaml:"ports" json:"ports"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type PortCheck struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ dial dialFunc
+
+ ports []*port
+}
+
+func (pc *PortCheck) Configuration() any {
+ return pc.Config
+}
+
+func (pc *PortCheck) Init() error {
+ if err := pc.validateConfig(); err != nil {
+ pc.Errorf("config validation: %v", err)
+ return err
+ }
+
+ charts, err := pc.initCharts()
+ if err != nil {
+ pc.Errorf("init charts: %v", err)
+ return err
+ }
+ pc.charts = charts
+
+ pc.ports = pc.initPorts()
+
+ pc.Debugf("using host: %s", pc.Host)
+ pc.Debugf("using ports: %v", pc.Ports)
+ pc.Debugf("using TCP connection timeout: %s", pc.Timeout)
+
+ return nil
+}
+
+func (pc *PortCheck) Check() error {
+ return nil
+}
+
+func (pc *PortCheck) Charts() *module.Charts {
+ return pc.charts
+}
+
+func (pc *PortCheck) Collect() map[string]int64 {
+ mx, err := pc.collect()
+ if err != nil {
+ pc.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (pc *PortCheck) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/portcheck/portcheck_test.go b/src/go/plugin/go.d/modules/portcheck/portcheck_test.go
new file mode 100644
index 000000000..86a2c9679
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/portcheck_test.go
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package portcheck
+
+import (
+ "errors"
+ "net"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPortCheck_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &PortCheck{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPortCheck_Init(t *testing.T) {
+ job := New()
+
+ job.Host = "127.0.0.1"
+ job.Ports = []int{39001, 39002}
+ assert.NoError(t, job.Init())
+ assert.Len(t, job.ports, 2)
+}
+func TestPortCheck_InitNG(t *testing.T) {
+ job := New()
+
+ assert.Error(t, job.Init())
+ job.Host = "127.0.0.1"
+ assert.Error(t, job.Init())
+ job.Ports = []int{39001, 39002}
+ assert.NoError(t, job.Init())
+}
+
+func TestPortCheck_Check(t *testing.T) {
+ assert.NoError(t, New().Check())
+}
+
+func TestPortCheck_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestPortCheck_Charts(t *testing.T) {
+ job := New()
+ job.Ports = []int{1, 2}
+ job.Host = "localhost"
+ require.NoError(t, job.Init())
+ assert.Len(t, *job.Charts(), len(chartsTmpl)*len(job.Ports))
+}
+
+func TestPortCheck_Collect(t *testing.T) {
+ job := New()
+
+ job.Host = "127.0.0.1"
+ job.Ports = []int{39001, 39002}
+ job.UpdateEvery = 5
+ job.dial = testDial(nil)
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ copyLatency := func(dst, src map[string]int64) {
+ for k := range dst {
+ if strings.HasSuffix(k, "latency") {
+ dst[k] = src[k]
+ }
+ }
+ }
+
+ expected := map[string]int64{
+ "port_39001_current_state_duration": int64(job.UpdateEvery),
+ "port_39001_failed": 0,
+ "port_39001_latency": 0,
+ "port_39001_success": 1,
+ "port_39001_timeout": 0,
+ "port_39002_current_state_duration": int64(job.UpdateEvery),
+ "port_39002_failed": 0,
+ "port_39002_latency": 0,
+ "port_39002_success": 1,
+ "port_39002_timeout": 0,
+ }
+ collected := job.Collect()
+ copyLatency(expected, collected)
+
+ assert.Equal(t, expected, collected)
+
+ expected = map[string]int64{
+ "port_39001_current_state_duration": int64(job.UpdateEvery) * 2,
+ "port_39001_failed": 0,
+ "port_39001_latency": 0,
+ "port_39001_success": 1,
+ "port_39001_timeout": 0,
+ "port_39002_current_state_duration": int64(job.UpdateEvery) * 2,
+ "port_39002_failed": 0,
+ "port_39002_latency": 0,
+ "port_39002_success": 1,
+ "port_39002_timeout": 0,
+ }
+ collected = job.Collect()
+ copyLatency(expected, collected)
+
+ assert.Equal(t, expected, collected)
+
+ job.dial = testDial(errors.New("checkStateFailed"))
+
+ expected = map[string]int64{
+ "port_39001_current_state_duration": int64(job.UpdateEvery),
+ "port_39001_failed": 1,
+ "port_39001_latency": 0,
+ "port_39001_success": 0,
+ "port_39001_timeout": 0,
+ "port_39002_current_state_duration": int64(job.UpdateEvery),
+ "port_39002_failed": 1,
+ "port_39002_latency": 0,
+ "port_39002_success": 0,
+ "port_39002_timeout": 0,
+ }
+ collected = job.Collect()
+ copyLatency(expected, collected)
+
+ assert.Equal(t, expected, collected)
+
+ job.dial = testDial(timeoutError{})
+
+ expected = map[string]int64{
+ "port_39001_current_state_duration": int64(job.UpdateEvery),
+ "port_39001_failed": 0,
+ "port_39001_latency": 0,
+ "port_39001_success": 0,
+ "port_39001_timeout": 1,
+ "port_39002_current_state_duration": int64(job.UpdateEvery),
+ "port_39002_failed": 0,
+ "port_39002_latency": 0,
+ "port_39002_success": 0,
+ "port_39002_timeout": 1,
+ }
+ collected = job.Collect()
+ copyLatency(expected, collected)
+
+ assert.Equal(t, expected, collected)
+}
+
+func testDial(err error) dialFunc {
+ return func(_, _ string, _ time.Duration) (net.Conn, error) { return &net.TCPConn{}, err }
+}
+
+type timeoutError struct{}
+
+func (timeoutError) Error() string { return "timeout" }
+func (timeoutError) Timeout() bool { return true }
diff --git a/src/go/plugin/go.d/modules/portcheck/testdata/config.json b/src/go/plugin/go.d/modules/portcheck/testdata/config.json
new file mode 100644
index 000000000..a69a6ac38
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/testdata/config.json
@@ -0,0 +1,8 @@
+{
+ "update_every": 123,
+ "host": "ok",
+ "ports": [
+ 123
+ ],
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/portcheck/testdata/config.yaml b/src/go/plugin/go.d/modules/portcheck/testdata/config.yaml
new file mode 100644
index 000000000..72bdfd549
--- /dev/null
+++ b/src/go/plugin/go.d/modules/portcheck/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+host: "ok"
+ports:
+ - 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/postfix/README.md b/src/go/plugin/go.d/modules/postfix/README.md
new file mode 120000
index 000000000..c62eb5c24
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/README.md
@@ -0,0 +1 @@
+integrations/postfix.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postfix/charts.go b/src/go/plugin/go.d/modules/postfix/charts.go
new file mode 100644
index 000000000..69c672460
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/charts.go
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postfix
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioPostfixQueueEmailsCount = module.Priority + iota
+ prioPostfixQueueSize
+)
+
+var charts = module.Charts{
+ queueEmailsCountChart.Copy(),
+ queueSizeChart.Copy(),
+}
+
+var (
+ queueEmailsCountChart = module.Chart{
+ ID: "postfix_queue_emails",
+ Title: "Postfix Queue Emails",
+ Units: "emails",
+ Fam: "queue",
+ Ctx: "postfix.qemails",
+ Type: module.Line,
+ Priority: prioPostfixQueueEmailsCount,
+ Dims: module.Dims{
+ {ID: "emails"},
+ },
+ }
+ queueSizeChart = module.Chart{
+ ID: "postfix_queue_size",
+ Title: "Postfix Queue Size",
+ Units: "KiB",
+ Fam: "queue",
+ Ctx: "postfix.qsize",
+ Type: module.Area,
+ Priority: prioPostfixQueueSize,
+ Dims: module.Dims{
+ {ID: "size"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/postfix/collect.go b/src/go/plugin/go.d/modules/postfix/collect.go
new file mode 100644
index 000000000..7afcd769d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/collect.go
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postfix
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type postqueueStats struct {
+ sizeKbyte int64
+ requests int64
+}
+
+func (p *Postfix) collect() (map[string]int64, error) {
+ bs, err := p.exec.list()
+ if err != nil {
+ return nil, err
+ }
+
+ stats, err := parsePostqueueOutput(bs)
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ mx["emails"] = stats.requests
+ mx["size"] = stats.sizeKbyte
+
+ return mx, nil
+}
+
+func parsePostqueueOutput(bs []byte) (*postqueueStats, error) {
+ if len(bs) == 0 {
+ return nil, errors.New("empty postqueue output")
+ }
+
+ var lastLine string
+ sc := bufio.NewScanner(bytes.NewReader(bs))
+ for sc.Scan() {
+ if line := strings.TrimSpace(sc.Text()); line != "" {
+ lastLine = strings.TrimSpace(sc.Text())
+ }
+ }
+
+ if lastLine == "Mail queue is empty" {
+ return &postqueueStats{}, nil
+ }
+
+ // -- 3 Kbytes in 3 Requests.
+ parts := strings.Fields(lastLine)
+ if len(parts) < 5 {
+ return nil, fmt.Errorf("unexpected postqueue output ('%s')", lastLine)
+ }
+
+ size, err := strconv.ParseInt(parts[1], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected postqueue output ('%s')", lastLine)
+ }
+ requests, err := strconv.ParseInt(parts[4], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("unexpected postqueue output ('%s')", lastLine)
+ }
+
+ return &postqueueStats{sizeKbyte: size, requests: requests}, nil
+}
diff --git a/src/go/plugin/go.d/modules/postfix/config_schema.json b/src/go/plugin/go.d/modules/postfix/config_schema.json
new file mode 100644
index 000000000..da416f14b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/config_schema.json
@@ -0,0 +1,47 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Postfix collector configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "binary_path": {
+ "title": "Binary path",
+ "description": "Path to the `postqueue` binary.",
+ "type": "string",
+ "default": "/usr/sbin/postqueue"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "required": [
+ "binary_path"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "binary_path": {
+ "ui:help": "If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postfix/exec.go b/src/go/plugin/go.d/modules/postfix/exec.go
new file mode 100644
index 000000000..1ca29331a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/exec.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postfix
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newPostqueueExec(binPath string, timeout time.Duration) *postqueueExec {
+ return &postqueueExec{
+ binPath: binPath,
+ timeout: timeout,
+ }
+}
+
+type postqueueExec struct {
+ *logger.Logger
+
+ binPath string
+ timeout time.Duration
+}
+
+func (p *postqueueExec) list() ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), p.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, p.binPath, "-p")
+ p.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/postfix/init.go b/src/go/plugin/go.d/modules/postfix/init.go
new file mode 100644
index 000000000..ffa50af8d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/init.go
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postfix
+
+import (
+ "errors"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func (p *Postfix) validateConfig() error {
+ if p.BinaryPath == "" {
+ return errors.New("no postqueue binary path specified")
+ }
+ return nil
+}
+
+func (p *Postfix) initPostqueueExec() (postqueueBinary, error) {
+ binPath := p.BinaryPath
+
+ if !strings.HasPrefix(binPath, "/") {
+ path, err := exec.LookPath(binPath)
+ if err != nil {
+ return nil, err
+ }
+ binPath = path
+ }
+
+ if _, err := os.Stat(binPath); err != nil {
+ return nil, err
+ }
+
+ pq := newPostqueueExec(binPath, p.Timeout.Duration())
+ pq.Logger = p.Logger
+
+ return pq, nil
+}
diff --git a/src/go/plugin/go.d/modules/postfix/integrations/postfix.md b/src/go/plugin/go.d/modules/postfix/integrations/postfix.md
new file mode 100644
index 000000000..503a8c66d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/integrations/postfix.md
@@ -0,0 +1,195 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/postfix/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/postfix/metadata.yaml"
+sidebar_label: "Postfix"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Mail Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Postfix
+
+
+<img src="https://netdata.cloud/img/postfix.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: postfix
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector retrieves statistics about the Postfix mail queue using the [postqueue](https://www.postfix.org/postqueue.1.html) command-line tool.
+
+
+It periodically executes the `postqueue -p` command. The collection interval is set to 10 seconds by default, but this can be configurable.
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+Postfix has internal access controls for the mail queue. By default, all users can view the queue. If your system has stricter controls, grant the `netdata` user access by adding it to `authorized_mailq_users` in the `/etc/postfix/main.cf `file. For more details, refer to the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html).
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The collector executes `postqueue -p` to get Postfix queue statistics.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Postfix instance
+
+These metrics refer to the entire monitored application.
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postfix.qemails | emails | emails |
+| postfix.qsize | size | KiB |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/postfix.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/postfix.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| binary_path | Path to the `postqueue` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/sbin/postqueue | yes |
+| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom binary path
+
+The executable is not in the directories specified in the PATH environment variable.
+
+<details open><summary></summary>
+
+```yaml
+jobs:
+ - name: custom_path
+ binary_path: /usr/local/sbin/postqueue
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `postfix` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m postfix
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `postfix` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep postfix
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep postfix /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep postfix
+```
+
+
diff --git a/src/go/plugin/go.d/modules/postfix/metadata.yaml b/src/go/plugin/go.d/modules/postfix/metadata.yaml
new file mode 100644
index 000000000..3407ebb32
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/metadata.yaml
@@ -0,0 +1,106 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ plugin_name: go.d.plugin
+ module_name: postfix
+ monitored_instance:
+ name: Postfix
+ link: https://www.postfix.org/
+ categories:
+ - data-collection.mail-servers
+ icon_filename: "postfix.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - postfix
+ - mail
+ - mail server
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector retrieves statistics about the Postfix mail queue using the [postqueue](https://www.postfix.org/postqueue.1.html) command-line tool.
+ method_description: >
+ It periodically executes the `postqueue -p` command. The collection interval is set to 10 seconds by default, but this can be configurable.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: >
+ Postfix has internal access controls for the mail queue. By default, all users can view the queue. If your system has stricter controls, grant the `netdata` user access by adding it to `authorized_mailq_users` in the `/etc/postfix/main.cf `file.
+ For more details, refer to the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html).
+ default_behavior:
+ auto_detection:
+ description: "The collector executes `postqueue -p` to get Postfix queue statistics."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "go.d/postfix.conf"
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: binary_path
+ description: Path to the `postqueue` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable.
+ default_value: /usr/sbin/postqueue
+ required: true
+ - name: timeout
+ description: Timeout for executing the binary, specified in seconds.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list:
+ - name: Custom binary path
+ description: The executable is not in the directories specified in the PATH environment variable.
+ config: |
+ jobs:
+ - name: custom_path
+ binary_path: /usr/local/sbin/postqueue
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: |
+ These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: postfix.qemails
+ description: Postfix Queue Emails
+ unit: emails
+ chart_type: line
+ dimensions:
+ - name: emails
+ - name: postfix.qsize
+ description: Postfix Queue Emails Size
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: size
diff --git a/src/go/plugin/go.d/modules/postfix/postfix.go b/src/go/plugin/go.d/modules/postfix/postfix.go
new file mode 100644
index 000000000..3622811ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/postfix.go
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postfix
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("postfix", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Postfix {
+ return &Postfix{
+ Config: Config{
+ BinaryPath: "/usr/sbin/postqueue",
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ BinaryPath string `yaml:"binary_path,omitempty" json:"binary_path"`
+}
+
+type (
+ Postfix struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec postqueueBinary
+ }
+ postqueueBinary interface {
+ list() ([]byte, error)
+ }
+)
+
+func (p *Postfix) Configuration() any {
+ return p.Config
+}
+
+func (p *Postfix) Init() error {
+ if err := p.validateConfig(); err != nil {
+ p.Errorf("config validation: %s", err)
+ return err
+ }
+
+ pq, err := p.initPostqueueExec()
+ if err != nil {
+ p.Errorf("postqueue exec initialization: %v", err)
+ return err
+ }
+ p.exec = pq
+
+ return nil
+}
+
+func (p *Postfix) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (p *Postfix) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *Postfix) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (p *Postfix) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/postfix/postfix_test.go b/src/go/plugin/go.d/modules/postfix/postfix_test.go
new file mode 100644
index 000000000..daccaaa6f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/postfix_test.go
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postfix
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataPostqueue, _ = os.ReadFile("testdata/postqueue.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataPostqueue": dataPostqueue,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPostfix_Configuration(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Postfix{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPostfix_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if 'binary_path' is not set": {
+ wantFail: true,
+ config: Config{
+ BinaryPath: "",
+ },
+ },
+ "fails if failed to find binary": {
+ wantFail: true,
+ config: Config{
+ BinaryPath: "postqueue!!!",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pf := New()
+ pf.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, pf.Init())
+ } else {
+ assert.NoError(t, pf.Init())
+ }
+ })
+ }
+}
+
+func TestPostfix_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Postfix
+ }{
+ "not initialized exec": {
+ prepare: func() *Postfix {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Postfix {
+ pf := New()
+ pf.exec = prepareMockOK()
+ _ = pf.Check()
+ return pf
+ },
+ },
+ "after collect": {
+ prepare: func() *Postfix {
+ pf := New()
+ pf.exec = prepareMockOK()
+ _ = pf.Collect()
+ return pf
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pf := test.prepare()
+
+ assert.NotPanics(t, pf.Cleanup)
+ })
+ }
+}
+
+func TestPostfix_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestPostfix_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockPostqueueExec
+ wantFail bool
+ }{
+ "success case": {
+ wantFail: false,
+ prepareMock: prepareMockOK,
+ },
+ "mail queue is empty": {
+ wantFail: false,
+ prepareMock: prepareMockEmptyMailQueue,
+ },
+ "error on list call": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnList,
+ },
+ "empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pf := New()
+ mock := test.prepareMock()
+ pf.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, pf.Check())
+ } else {
+ assert.NoError(t, pf.Check())
+ }
+ })
+ }
+}
+
+func TestPostfix_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockPostqueueExec
+ wantMetrics map[string]int64
+ }{
+ "success case": {
+ prepareMock: prepareMockOK,
+ wantMetrics: map[string]int64{
+ "emails": 12991,
+ "size": 132422,
+ },
+ },
+ "mail queue is empty": {
+ prepareMock: prepareMockEmptyMailQueue,
+ wantMetrics: map[string]int64{
+ "emails": 0,
+ "size": 0,
+ },
+ },
+ "error on list call": {
+ prepareMock: prepareMockErrOnList,
+ wantMetrics: nil,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pf := New()
+ mock := test.prepareMock()
+ pf.exec = mock
+
+ mx := pf.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ })
+ }
+}
+
+func prepareMockOK() *mockPostqueueExec {
+ return &mockPostqueueExec{
+ listData: dataPostqueue,
+ }
+}
+
+func prepareMockEmptyMailQueue() *mockPostqueueExec {
+ return &mockPostqueueExec{
+ listData: []byte("Mail queue is empty"),
+ }
+}
+
+func prepareMockErrOnList() *mockPostqueueExec {
+ return &mockPostqueueExec{
+ errOnList: true,
+ }
+}
+
+func prepareMockEmptyResponse() *mockPostqueueExec {
+ return &mockPostqueueExec{}
+}
+
+func prepareMockUnexpectedResponse() *mockPostqueueExec {
+ return &mockPostqueueExec{
+ listData: []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`),
+ }
+}
+
+type mockPostqueueExec struct {
+ errOnList bool
+ listData []byte
+}
+
+func (m *mockPostqueueExec) list() ([]byte, error) {
+ if m.errOnList {
+ return nil, errors.New("mock.list() error")
+ }
+
+ return m.listData, nil
+}
diff --git a/src/go/plugin/go.d/modules/postfix/testdata/config.json b/src/go/plugin/go.d/modules/postfix/testdata/config.json
new file mode 100644
index 000000000..d13d2cc1d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "binary_path": "/usr/sbin/postqueue"
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postfix/testdata/config.yaml b/src/go/plugin/go.d/modules/postfix/testdata/config.yaml
new file mode 100644
index 000000000..0ea793d30
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+timeout: 123.123
+binary_path: "/usr/sbin/postqueue"
diff --git a/src/go/plugin/go.d/modules/postfix/testdata/postqueue.txt b/src/go/plugin/go.d/modules/postfix/testdata/postqueue.txt
new file mode 100644
index 000000000..5a4b822cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postfix/testdata/postqueue.txt
@@ -0,0 +1,34 @@
+1FC3A100A1FF* 10438 Wed Jun 26 13:39:27 root@localhost.test
+ fotis@localhost.test
+
+D4BBA10097DF* 10438 Wed Jun 26 13:39:25 root@localhost.test
+ fotis@localhost.test
+
+078D8100A90D* 10438 Wed Jun 26 13:39:28 root@localhost.test
+ fotis@localhost.test
+
+A23BB100961F* 10438 Wed Jun 26 13:39:25 root@localhost.test
+ fotis@localhost.test
+
+CCF1D1009798* 10438 Wed Jun 26 13:39:25 root@localhost.test
+ fotis@localhost.test
+
+58897100885C* 10438 Wed Jun 26 13:39:24 root@localhost.test
+ fotis@localhost.test
+
+F1A951003C07* 10438 Wed Jun 26 13:39:23 root@localhost.test
+ fotis@localhost.test
+
+3A24A1003239* 10438 Wed Jun 26 13:39:23 root@localhost.test
+ fotis@localhost.test
+
+CAF5E1009FCC* 10438 Wed Jun 26 13:39:26 root@localhost.test
+ fotis@localhost.test
+
+752741009D2A* 10438 Wed Jun 26 13:39:26 root@localhost.test
+ fotis@localhost.test
+
+6B5FA10033D4* 10438 Wed Jun 26 13:39:23 root@localhost.test
+ fotis@localhost.test
+
+-- 132422 Kbytes in 12991 Requests.
diff --git a/src/go/plugin/go.d/modules/postgres/README.md b/src/go/plugin/go.d/modules/postgres/README.md
new file mode 120000
index 000000000..73b67b984
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/README.md
@@ -0,0 +1 @@
+integrations/postgresql.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/charts.go b/src/go/plugin/go.d/modules/postgres/charts.go
new file mode 100644
index 000000000..da9b04af0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/charts.go
@@ -0,0 +1,1400 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioConnectionsUtilization = module.Priority + iota
+ prioConnectionsUsage
+ prioConnectionsStateCount
+ prioDBConnectionsUtilization
+ prioDBConnectionsCount
+
+ prioTransactionsDuration
+ prioDBTransactionsRatio
+ prioDBTransactionsRate
+
+ prioQueriesDuration
+
+ prioDBOpsFetchedRowsRatio
+ prioDBOpsReadRowsRate
+ prioDBOpsWriteRowsRate
+ prioDBTempFilesCreatedRate
+ prioDBTempFilesIORate
+ prioTableOpsRowsRate
+ prioTableOpsRowsHOTRatio
+ prioTableOpsRowsHOTRate
+ prioTableScansRate
+ prioTableScansRowsRate
+
+ prioDBCacheIORatio
+ prioDBIORate
+ prioTableCacheIORatio
+ prioTableIORate
+ prioTableIndexCacheIORatio
+ prioTableIndexIORate
+ prioTableToastCacheIORatio
+ prioTableToastIORate
+ prioTableToastIndexCacheIORatio
+ prioTableToastIndexIORate
+
+ prioDBSize
+ prioTableTotalSize
+ prioIndexSize
+
+ prioTableBloatSizePerc
+ prioTableBloatSize
+ prioIndexBloatSizePerc
+ prioIndexBloatSize
+
+ prioLocksUtilization
+ prioDBLocksHeldCount
+ prioDBLocksAwaitedCount
+ prioDBDeadlocksRate
+
+ prioAutovacuumWorkersCount
+ prioTableAutovacuumSinceTime
+ prioTableVacuumSinceTime
+ prioTableAutoAnalyzeSinceTime
+ prioTableLastAnalyzeAgo
+
+ prioCheckpointsRate
+ prioCheckpointsTime
+ prioBGWriterHaltsRate
+ prioBuffersIORate
+ prioBuffersBackendFsyncRate
+ prioBuffersAllocRate
+ prioTXIDExhaustionTowardsAutovacuumPerc
+ prioTXIDExhaustionPerc
+ prioTXIDExhaustionOldestTXIDNum
+ prioTableRowsDeadRatio
+ prioTableRowsCount
+ prioTableNullColumns
+ prioIndexUsageStatus
+
+ prioReplicationAppWALLagSize
+ prioReplicationAppWALLagTime
+ prioReplicationSlotFilesCount
+ prioDBConflictsRate
+ prioDBConflictsReasonRate
+
+ prioWALIORate
+ prioWALFilesCount
+ prioWALArchivingFilesCount
+
+ prioDatabasesCount
+ prioCatalogRelationsCount
+ prioCatalogRelationsSize
+
+ prioUptime
+)
+
+var baseCharts = module.Charts{
+ serverConnectionsUtilizationChart.Copy(),
+ serverConnectionsUsageChart.Copy(),
+ serverConnectionsStateCount.Copy(),
+ locksUtilization.Copy(),
+ checkpointsChart.Copy(),
+ checkpointWriteChart.Copy(),
+ buffersIORateChart.Copy(),
+ buffersAllocRateChart.Copy(),
+ bgWriterHaltsRateChart.Copy(),
+ buffersBackendFsyncRateChart.Copy(),
+ walIORateChart.Copy(),
+ autovacuumWorkersCountChart.Copy(),
+ txidExhaustionTowardsAutovacuumPercChart.Copy(),
+ txidExhaustionPercChart.Copy(),
+ txidExhaustionOldestTXIDNumChart.Copy(),
+
+ catalogRelationSCountChart.Copy(),
+ catalogRelationsSizeChart.Copy(),
+ serverUptimeChart.Copy(),
+ databasesCountChart.Copy(),
+}
+
+var walFilesCharts = module.Charts{
+ walFilesCountChart.Copy(),
+ walArchivingFilesCountChart.Copy(),
+}
+
+func (p *Postgres) addWALFilesCharts() {
+ charts := walFilesCharts.Copy()
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+var (
+ serverConnectionsUtilizationChart = module.Chart{
+ ID: "connections_utilization",
+ Title: "Connections utilization",
+ Units: "percentage",
+ Fam: "connections",
+ Ctx: "postgres.connections_utilization",
+ Priority: prioConnectionsUtilization,
+ Dims: module.Dims{
+ {ID: "server_connections_utilization", Name: "used"},
+ },
+ }
+ serverConnectionsUsageChart = module.Chart{
+ ID: "connections_usage",
+ Title: "Connections usage",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "postgres.connections_usage",
+ Priority: prioConnectionsUsage,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "server_connections_available", Name: "available"},
+ {ID: "server_connections_used", Name: "used"},
+ },
+ }
+ serverConnectionsStateCount = module.Chart{
+ ID: "connections_state",
+ Title: "Connections in each state",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "postgres.connections_state_count",
+ Priority: prioConnectionsStateCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "server_connections_state_active", Name: "active"},
+ {ID: "server_connections_state_idle", Name: "idle"},
+ {ID: "server_connections_state_idle_in_transaction", Name: "idle_in_transaction"},
+ {ID: "server_connections_state_idle_in_transaction_aborted", Name: "idle_in_transaction_aborted"},
+ {ID: "server_connections_state_fastpath_function_call", Name: "fastpath_function_call"},
+ {ID: "server_connections_state_disabled", Name: "disabled"},
+ },
+ }
+
+ locksUtilization = module.Chart{
+ ID: "locks_utilization",
+ Title: "Acquired locks utilization",
+ Units: "percentage",
+ Fam: "locks",
+ Ctx: "postgres.locks_utilization",
+ Priority: prioLocksUtilization,
+ Dims: module.Dims{
+ {ID: "locks_utilization", Name: "used"},
+ },
+ }
+
+ checkpointsChart = module.Chart{
+ ID: "checkpoints_rate",
+ Title: "Checkpoints",
+ Units: "checkpoints/s",
+ Fam: "maintenance",
+ Ctx: "postgres.checkpoints_rate",
+ Priority: prioCheckpointsRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "checkpoints_timed", Name: "scheduled", Algo: module.Incremental},
+ {ID: "checkpoints_req", Name: "requested", Algo: module.Incremental},
+ },
+ }
+ // TODO: should be seconds, also it is units/s when using incremental...
+ checkpointWriteChart = module.Chart{
+ ID: "checkpoints_time",
+ Title: "Checkpoint time",
+ Units: "milliseconds",
+ Fam: "maintenance",
+ Ctx: "postgres.checkpoints_time",
+ Priority: prioCheckpointsTime,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "checkpoint_write_time", Name: "write", Algo: module.Incremental},
+ {ID: "checkpoint_sync_time", Name: "sync", Algo: module.Incremental},
+ },
+ }
+ bgWriterHaltsRateChart = module.Chart{
+ ID: "bgwriter_halts_rate",
+ Title: "Background writer scan halts",
+ Units: "halts/s",
+ Fam: "maintenance",
+ Ctx: "postgres.bgwriter_halts_rate",
+ Priority: prioBGWriterHaltsRate,
+ Dims: module.Dims{
+ {ID: "maxwritten_clean", Name: "maxwritten", Algo: module.Incremental},
+ },
+ }
+
+ buffersIORateChart = module.Chart{
+ ID: "buffers_io_rate",
+ Title: "Buffers written rate",
+ Units: "B/s",
+ Fam: "maintenance",
+ Ctx: "postgres.buffers_io_rate",
+ Priority: prioBuffersIORate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "buffers_checkpoint", Name: "checkpoint", Algo: module.Incremental},
+ {ID: "buffers_backend", Name: "backend", Algo: module.Incremental},
+ {ID: "buffers_clean", Name: "bgwriter", Algo: module.Incremental},
+ },
+ }
+ buffersBackendFsyncRateChart = module.Chart{
+ ID: "buffers_backend_fsync_rate",
+ Title: "Backend fsync calls",
+ Units: "calls/s",
+ Fam: "maintenance",
+ Ctx: "postgres.buffers_backend_fsync_rate",
+ Priority: prioBuffersBackendFsyncRate,
+ Dims: module.Dims{
+ {ID: "buffers_backend_fsync", Name: "fsync", Algo: module.Incremental},
+ },
+ }
+ buffersAllocRateChart = module.Chart{
+ ID: "buffers_alloc_rate",
+ Title: "Buffers allocated",
+ Units: "B/s",
+ Fam: "maintenance",
+ Ctx: "postgres.buffers_allocated_rate",
+ Priority: prioBuffersAllocRate,
+ Dims: module.Dims{
+ {ID: "buffers_alloc", Name: "allocated", Algo: module.Incremental},
+ },
+ }
+
+ walIORateChart = module.Chart{
+ ID: "wal_io_rate",
+ Title: "Write-Ahead Log writes",
+ Units: "B/s",
+ Fam: "wal",
+ Ctx: "postgres.wal_io_rate",
+ Priority: prioWALIORate,
+ Dims: module.Dims{
+ {ID: "wal_writes", Name: "written", Algo: module.Incremental},
+ },
+ }
+ walFilesCountChart = module.Chart{
+ ID: "wal_files_count",
+ Title: "Write-Ahead Log files",
+ Units: "files",
+ Fam: "wal",
+ Ctx: "postgres.wal_files_count",
+ Priority: prioWALFilesCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "wal_written_files", Name: "written"},
+ {ID: "wal_recycled_files", Name: "recycled"},
+ },
+ }
+
+ walArchivingFilesCountChart = module.Chart{
+ ID: "wal_archiving_files_count",
+ Title: "Write-Ahead Log archived files",
+ Units: "files/s",
+ Fam: "wal",
+ Ctx: "postgres.wal_archiving_files_count",
+ Priority: prioWALArchivingFilesCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "wal_archive_files_ready_count", Name: "ready"},
+ {ID: "wal_archive_files_done_count", Name: "done"},
+ },
+ }
+
+ autovacuumWorkersCountChart = module.Chart{
+ ID: "autovacuum_workers_count",
+ Title: "Autovacuum workers",
+ Units: "workers",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.autovacuum_workers_count",
+ Priority: prioAutovacuumWorkersCount,
+ Dims: module.Dims{
+ {ID: "autovacuum_analyze", Name: "analyze"},
+ {ID: "autovacuum_vacuum_analyze", Name: "vacuum_analyze"},
+ {ID: "autovacuum_vacuum", Name: "vacuum"},
+ {ID: "autovacuum_vacuum_freeze", Name: "vacuum_freeze"},
+ {ID: "autovacuum_brin_summarize", Name: "brin_summarize"},
+ },
+ }
+
+ txidExhaustionTowardsAutovacuumPercChart = module.Chart{
+ ID: "txid_exhaustion_towards_autovacuum_perc",
+ Title: "Percent towards emergency autovacuum",
+ Units: "percentage",
+ Fam: "maintenance",
+ Ctx: "postgres.txid_exhaustion_towards_autovacuum_perc",
+ Priority: prioTXIDExhaustionTowardsAutovacuumPerc,
+ Dims: module.Dims{
+ {ID: "percent_towards_emergency_autovacuum", Name: "emergency_autovacuum"},
+ },
+ }
+ txidExhaustionPercChart = module.Chart{
+ ID: "txid_exhaustion_perc",
+ Title: "Percent towards transaction ID wraparound",
+ Units: "percentage",
+ Fam: "maintenance",
+ Ctx: "postgres.txid_exhaustion_perc",
+ Priority: prioTXIDExhaustionPerc,
+ Dims: module.Dims{
+ {ID: "percent_towards_wraparound", Name: "txid_exhaustion"},
+ },
+ }
+ txidExhaustionOldestTXIDNumChart = module.Chart{
+ ID: "txid_exhaustion_oldest_txid_num",
+ Title: "Oldest transaction XID",
+ Units: "xid",
+ Fam: "maintenance",
+ Ctx: "postgres.txid_exhaustion_oldest_txid_num",
+ Priority: prioTXIDExhaustionOldestTXIDNum,
+ Dims: module.Dims{
+ {ID: "oldest_current_xid", Name: "xid"},
+ },
+ }
+
+ catalogRelationSCountChart = module.Chart{
+ ID: "catalog_relations_count",
+ Title: "Relation count",
+ Units: "relations",
+ Fam: "catalog",
+ Ctx: "postgres.catalog_relations_count",
+ Priority: prioCatalogRelationsCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "catalog_relkind_r_count", Name: "ordinary_table"},
+ {ID: "catalog_relkind_i_count", Name: "index"},
+ {ID: "catalog_relkind_S_count", Name: "sequence"},
+ {ID: "catalog_relkind_t_count", Name: "toast_table"},
+ {ID: "catalog_relkind_v_count", Name: "view"},
+ {ID: "catalog_relkind_m_count", Name: "materialized_view"},
+ {ID: "catalog_relkind_c_count", Name: "composite_type"},
+ {ID: "catalog_relkind_f_count", Name: "foreign_table"},
+ {ID: "catalog_relkind_p_count", Name: "partitioned_table"},
+ {ID: "catalog_relkind_I_count", Name: "partitioned_index"},
+ },
+ }
+ catalogRelationsSizeChart = module.Chart{
+ ID: "catalog_relations_size",
+ Title: "Relation size",
+ Units: "B",
+ Fam: "catalog",
+ Ctx: "postgres.catalog_relations_size",
+ Priority: prioCatalogRelationsSize,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "catalog_relkind_r_size", Name: "ordinary_table"},
+ {ID: "catalog_relkind_i_size", Name: "index"},
+ {ID: "catalog_relkind_S_size", Name: "sequence"},
+ {ID: "catalog_relkind_t_size", Name: "toast_table"},
+ {ID: "catalog_relkind_v_size", Name: "view"},
+ {ID: "catalog_relkind_m_size", Name: "materialized_view"},
+ {ID: "catalog_relkind_c_size", Name: "composite_type"},
+ {ID: "catalog_relkind_f_size", Name: "foreign_table"},
+ {ID: "catalog_relkind_p_size", Name: "partitioned_table"},
+ {ID: "catalog_relkind_I_size", Name: "partitioned_index"},
+ },
+ }
+
+ serverUptimeChart = module.Chart{
+ ID: "server_uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "postgres.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "server_uptime", Name: "uptime"},
+ },
+ }
+
+ databasesCountChart = module.Chart{
+ ID: "databases_count",
+ Title: "Number of databases",
+ Units: "databases",
+ Fam: "catalog",
+ Ctx: "postgres.databases_count",
+ Priority: prioDatabasesCount,
+ Dims: module.Dims{
+ {ID: "databases_count", Name: "databases"},
+ },
+ }
+
+ transactionsDurationChartTmpl = module.Chart{
+ ID: "transactions_duration",
+ Title: "Observed transactions time",
+ Units: "transactions/s",
+ Fam: "transactions",
+ Ctx: "postgres.transactions_duration",
+ Priority: prioTransactionsDuration,
+ Type: module.Stacked,
+ }
+ queriesDurationChartTmpl = module.Chart{
+ ID: "queries_duration",
+ Title: "Observed active queries time",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "postgres.queries_duration",
+ Priority: prioQueriesDuration,
+ Type: module.Stacked,
+ }
+)
+
+func newRunningTimeHistogramChart(tmpl module.Chart, prefix string, buckets []float64) (*module.Chart, error) {
+ chart := tmpl.Copy()
+
+ for i, v := range buckets {
+ dim := &module.Dim{
+ ID: fmt.Sprintf("%s_hist_bucket_%d", prefix, i+1),
+ Name: time.Duration(v * float64(time.Second)).String(),
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+ }
+
+ dim := &module.Dim{
+ ID: fmt.Sprintf("%s_hist_bucket_inf", prefix),
+ Name: "+Inf",
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+
+ return chart, nil
+}
+
+func (p *Postgres) addTransactionsRunTimeHistogramChart() {
+ chart, err := newRunningTimeHistogramChart(
+ transactionsDurationChartTmpl,
+ "transaction_running_time",
+ p.XactTimeHistogram,
+ )
+ if err != nil {
+ p.Warning(err)
+ return
+ }
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addQueriesRunTimeHistogramChart() {
+ chart, err := newRunningTimeHistogramChart(
+ queriesDurationChartTmpl,
+ "query_running_time",
+ p.QueryTimeHistogram,
+ )
+ if err != nil {
+ p.Warning(err)
+ return
+ }
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+var (
+ replicationStandbyAppCharts = module.Charts{
+ replicationAppWALLagSizeChartTmpl.Copy(),
+ replicationAppWALLagTimeChartTmpl.Copy(),
+ }
+ replicationAppWALLagSizeChartTmpl = module.Chart{
+ ID: "replication_app_%s_wal_lag_size",
+ Title: "Standby application WAL lag size",
+ Units: "B",
+ Fam: "replication",
+ Ctx: "postgres.replication_app_wal_lag_size",
+ Priority: prioReplicationAppWALLagSize,
+ Dims: module.Dims{
+ {ID: "repl_standby_app_%s_wal_sent_lag_size", Name: "sent_lag"},
+ {ID: "repl_standby_app_%s_wal_write_lag_size", Name: "write_lag"},
+ {ID: "repl_standby_app_%s_wal_flush_lag_size", Name: "flush_lag"},
+ {ID: "repl_standby_app_%s_wal_replay_lag_size", Name: "replay_lag"},
+ },
+ }
+ replicationAppWALLagTimeChartTmpl = module.Chart{
+ ID: "replication_app_%s_wal_lag_time",
+ Title: "Standby application WAL lag time",
+ Units: "seconds",
+ Fam: "replication",
+ Ctx: "postgres.replication_app_wal_lag_time",
+ Priority: prioReplicationAppWALLagTime,
+ Dims: module.Dims{
+ {ID: "repl_standby_app_%s_wal_write_lag_time", Name: "write_lag"},
+ {ID: "repl_standby_app_%s_wal_flush_lag_time", Name: "flush_lag"},
+ {ID: "repl_standby_app_%s_wal_replay_lag_time", Name: "replay_lag"},
+ },
+ }
+)
+
+func newReplicationStandbyAppCharts(app string) *module.Charts {
+ charts := replicationStandbyAppCharts.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, app)
+ c.Labels = []module.Label{
+ {Key: "application", Value: app},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, app)
+ }
+ }
+ return charts
+}
+
+func (p *Postgres) addNewReplicationStandbyAppCharts(app string) {
+ charts := newReplicationStandbyAppCharts(app)
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeReplicationStandbyAppCharts(app string) {
+ prefix := fmt.Sprintf("replication_standby_app_%s_", app)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ replicationSlotCharts = module.Charts{
+ replicationSlotFilesCountChartTmpl.Copy(),
+ }
+ replicationSlotFilesCountChartTmpl = module.Chart{
+ ID: "replication_slot_%s_files_count",
+ Title: "Replication slot files",
+ Units: "files",
+ Fam: "replication",
+ Ctx: "postgres.replication_slot_files_count",
+ Priority: prioReplicationSlotFilesCount,
+ Dims: module.Dims{
+ {ID: "repl_slot_%s_replslot_wal_keep", Name: "wal_keep"},
+ {ID: "repl_slot_%s_replslot_files", Name: "pg_replslot_files"},
+ },
+ }
+)
+
+func newReplicationSlotCharts(slot string) *module.Charts {
+ charts := replicationSlotCharts.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, slot)
+ c.Labels = []module.Label{
+ {Key: "slot", Value: slot},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, slot)
+ }
+ }
+ return charts
+}
+
+func (p *Postgres) addNewReplicationSlotCharts(slot string) {
+ charts := newReplicationSlotCharts(slot)
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeReplicationSlotCharts(slot string) {
+ prefix := fmt.Sprintf("replication_slot_%s_", slot)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ dbChartsTmpl = module.Charts{
+ dbTransactionsRatioChartTmpl.Copy(),
+ dbTransactionsRateChartTmpl.Copy(),
+ dbConnectionsUtilizationChartTmpl.Copy(),
+ dbConnectionsCountChartTmpl.Copy(),
+ dbCacheIORatioChartTmpl.Copy(),
+ dbIORateChartTmpl.Copy(),
+ dbOpsFetchedRowsRatioChartTmpl.Copy(),
+ dbOpsReadRowsRateChartTmpl.Copy(),
+ dbOpsWriteRowsRateChartTmpl.Copy(),
+ dbDeadlocksRateChartTmpl.Copy(),
+ dbLocksHeldCountChartTmpl.Copy(),
+ dbLocksAwaitedCountChartTmpl.Copy(),
+ dbTempFilesCreatedRateChartTmpl.Copy(),
+ dbTempFilesIORateChartTmpl.Copy(),
+ dbSizeChartTmpl.Copy(),
+ }
+ dbTransactionsRatioChartTmpl = module.Chart{
+ ID: "db_%s_transactions_ratio",
+ Title: "Database transactions ratio",
+ Units: "percentage",
+ Fam: "transactions",
+ Ctx: "postgres.db_transactions_ratio",
+ Priority: prioDBTransactionsRatio,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "db_%s_xact_commit", Name: "committed", Algo: module.PercentOfIncremental},
+ {ID: "db_%s_xact_rollback", Name: "rollback", Algo: module.PercentOfIncremental},
+ },
+ }
+ dbTransactionsRateChartTmpl = module.Chart{
+ ID: "db_%s_transactions_rate",
+ Title: "Database transactions",
+ Units: "transactions/s",
+ Fam: "transactions",
+ Ctx: "postgres.db_transactions_rate",
+ Priority: prioDBTransactionsRate,
+ Dims: module.Dims{
+ {ID: "db_%s_xact_commit", Name: "committed", Algo: module.Incremental},
+ {ID: "db_%s_xact_rollback", Name: "rollback", Algo: module.Incremental},
+ },
+ }
+ dbConnectionsUtilizationChartTmpl = module.Chart{
+ ID: "db_%s_connections_utilization",
+ Title: "Database connections utilization",
+ Units: "percentage",
+ Fam: "connections",
+ Ctx: "postgres.db_connections_utilization",
+ Priority: prioDBConnectionsUtilization,
+ Dims: module.Dims{
+ {ID: "db_%s_numbackends_utilization", Name: "used"},
+ },
+ }
+ dbConnectionsCountChartTmpl = module.Chart{
+ ID: "db_%s_connections",
+ Title: "Database connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "postgres.db_connections_count",
+ Priority: prioDBConnectionsCount,
+ Dims: module.Dims{
+ {ID: "db_%s_numbackends", Name: "connections"},
+ },
+ }
+ dbCacheIORatioChartTmpl = module.Chart{
+ ID: "db_%s_cache_io_ratio",
+ Title: "Database buffer cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.db_cache_io_ratio",
+ Priority: prioDBCacheIORatio,
+ Dims: module.Dims{
+ {ID: "db_%s_blks_read_perc", Name: "miss"},
+ },
+ }
+ dbIORateChartTmpl = module.Chart{
+ ID: "db_%s_io_rate",
+ Title: "Database reads",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.db_io_rate",
+ Priority: prioDBIORate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "db_%s_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "db_%s_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ dbOpsFetchedRowsRatioChartTmpl = module.Chart{
+ ID: "db_%s_db_ops_fetched_rows_ratio",
+ Title: "Database rows fetched ratio",
+ Units: "percentage",
+ Fam: "throughput",
+ Ctx: "postgres.db_ops_fetched_rows_ratio",
+ Priority: prioDBOpsFetchedRowsRatio,
+ Dims: module.Dims{
+ {ID: "db_%s_tup_fetched_perc", Name: "fetched"},
+ },
+ }
+ dbOpsReadRowsRateChartTmpl = module.Chart{
+ ID: "db_%s_ops_read_rows_rate",
+ Title: "Database rows read",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.db_ops_read_rows_rate",
+ Priority: prioDBOpsReadRowsRate,
+ Dims: module.Dims{
+ {ID: "db_%s_tup_returned", Name: "returned", Algo: module.Incremental},
+ {ID: "db_%s_tup_fetched", Name: "fetched", Algo: module.Incremental},
+ },
+ }
+ dbOpsWriteRowsRateChartTmpl = module.Chart{
+ ID: "db_%s_ops_write_rows_rate",
+ Title: "Database rows written",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.db_ops_write_rows_rate",
+ Priority: prioDBOpsWriteRowsRate,
+ Dims: module.Dims{
+ {ID: "db_%s_tup_inserted", Name: "inserted", Algo: module.Incremental},
+ {ID: "db_%s_tup_deleted", Name: "deleted", Algo: module.Incremental},
+ {ID: "db_%s_tup_updated", Name: "updated", Algo: module.Incremental},
+ },
+ }
+ dbConflictsRateChartTmpl = module.Chart{
+ ID: "db_%s_conflicts_rate",
+ Title: "Database canceled queries",
+ Units: "queries/s",
+ Fam: "replication",
+ Ctx: "postgres.db_conflicts_rate",
+ Priority: prioDBConflictsRate,
+ Dims: module.Dims{
+ {ID: "db_%s_conflicts", Name: "conflicts", Algo: module.Incremental},
+ },
+ }
+ dbConflictsReasonRateChartTmpl = module.Chart{
+ ID: "db_%s_conflicts_reason_rate",
+ Title: "Database canceled queries by reason",
+ Units: "queries/s",
+ Fam: "replication",
+ Ctx: "postgres.db_conflicts_reason_rate",
+ Priority: prioDBConflictsReasonRate,
+ Dims: module.Dims{
+ {ID: "db_%s_confl_tablespace", Name: "tablespace", Algo: module.Incremental},
+ {ID: "db_%s_confl_lock", Name: "lock", Algo: module.Incremental},
+ {ID: "db_%s_confl_snapshot", Name: "snapshot", Algo: module.Incremental},
+ {ID: "db_%s_confl_bufferpin", Name: "bufferpin", Algo: module.Incremental},
+ {ID: "db_%s_confl_deadlock", Name: "deadlock", Algo: module.Incremental},
+ },
+ }
+ dbDeadlocksRateChartTmpl = module.Chart{
+ ID: "db_%s_deadlocks_rate",
+ Title: "Database deadlocks",
+ Units: "deadlocks/s",
+ Fam: "locks",
+ Ctx: "postgres.db_deadlocks_rate",
+ Priority: prioDBDeadlocksRate,
+ Dims: module.Dims{
+ {ID: "db_%s_deadlocks", Name: "deadlocks", Algo: module.Incremental},
+ },
+ }
+ dbLocksHeldCountChartTmpl = module.Chart{
+ ID: "db_%s_locks_held",
+ Title: "Database locks held",
+ Units: "locks",
+ Fam: "locks",
+ Ctx: "postgres.db_locks_held_count",
+ Priority: prioDBLocksHeldCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "db_%s_lock_mode_AccessShareLock_held", Name: "access_share"},
+ {ID: "db_%s_lock_mode_RowShareLock_held", Name: "row_share"},
+ {ID: "db_%s_lock_mode_RowExclusiveLock_held", Name: "row_exclusive"},
+ {ID: "db_%s_lock_mode_ShareUpdateExclusiveLock_held", Name: "share_update"},
+ {ID: "db_%s_lock_mode_ShareLock_held", Name: "share"},
+ {ID: "db_%s_lock_mode_ShareRowExclusiveLock_held", Name: "share_row_exclusive"},
+ {ID: "db_%s_lock_mode_ExclusiveLock_held", Name: "exclusive"},
+ {ID: "db_%s_lock_mode_AccessExclusiveLock_held", Name: "access_exclusive"},
+ },
+ }
+ dbLocksAwaitedCountChartTmpl = module.Chart{
+ ID: "db_%s_locks_awaited_count",
+ Title: "Database locks awaited",
+ Units: "locks",
+ Fam: "locks",
+ Ctx: "postgres.db_locks_awaited_count",
+ Priority: prioDBLocksAwaitedCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "db_%s_lock_mode_AccessShareLock_awaited", Name: "access_share"},
+ {ID: "db_%s_lock_mode_RowShareLock_awaited", Name: "row_share"},
+ {ID: "db_%s_lock_mode_RowExclusiveLock_awaited", Name: "row_exclusive"},
+ {ID: "db_%s_lock_mode_ShareUpdateExclusiveLock_awaited", Name: "share_update"},
+ {ID: "db_%s_lock_mode_ShareLock_awaited", Name: "share"},
+ {ID: "db_%s_lock_mode_ShareRowExclusiveLock_awaited", Name: "share_row_exclusive"},
+ {ID: "db_%s_lock_mode_ExclusiveLock_awaited", Name: "exclusive"},
+ {ID: "db_%s_lock_mode_AccessExclusiveLock_awaited", Name: "access_exclusive"},
+ },
+ }
+ dbTempFilesCreatedRateChartTmpl = module.Chart{
+ ID: "db_%s_temp_files_files_created_rate",
+ Title: "Database created temporary files",
+ Units: "files/s",
+ Fam: "throughput",
+ Ctx: "postgres.db_temp_files_created_rate",
+ Priority: prioDBTempFilesCreatedRate,
+ Dims: module.Dims{
+ {ID: "db_%s_temp_files", Name: "created", Algo: module.Incremental},
+ },
+ }
+ dbTempFilesIORateChartTmpl = module.Chart{
+ ID: "db_%s_temp_files_io_rate",
+ Title: "Database temporary files data written to disk",
+ Units: "B/s",
+ Fam: "throughput",
+ Ctx: "postgres.db_temp_files_io_rate",
+ Priority: prioDBTempFilesIORate,
+ Dims: module.Dims{
+ {ID: "db_%s_temp_bytes", Name: "written", Algo: module.Incremental},
+ },
+ }
+ dbSizeChartTmpl = module.Chart{
+ ID: "db_%s_size",
+ Title: "Database size",
+ Units: "B",
+ Fam: "size",
+ Ctx: "postgres.db_size",
+ Priority: prioDBSize,
+ Dims: module.Dims{
+ {ID: "db_%s_size", Name: "size"},
+ },
+ }
+)
+
+func (p *Postgres) addDBConflictsCharts(db *dbMetrics) {
+ tmpl := module.Charts{
+ dbConflictsRateChartTmpl.Copy(),
+ dbConflictsReasonRateChartTmpl.Copy(),
+ }
+ charts := newDatabaseCharts(tmpl.Copy(), db)
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func newDatabaseCharts(tmpl *module.Charts, db *dbMetrics) *module.Charts {
+ charts := tmpl.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, db.name)
+ c.Labels = []module.Label{
+ {Key: "database", Value: db.name},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, db.name)
+ }
+ }
+ return charts
+}
+
+func (p *Postgres) addNewDatabaseCharts(db *dbMetrics) {
+ charts := newDatabaseCharts(dbChartsTmpl.Copy(), db)
+
+ if db.size == nil {
+ _ = charts.Remove(fmt.Sprintf(dbSizeChartTmpl.ID, db.name))
+ }
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeDatabaseCharts(db *dbMetrics) {
+ prefix := fmt.Sprintf("db_%s_", db.name)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ tableChartsTmpl = module.Charts{
+ tableRowsCountChartTmpl.Copy(),
+ tableDeadRowsDeadRatioChartTmpl.Copy(),
+ tableOpsRowsRateChartTmpl.Copy(),
+ tableOpsRowsHOTRatioChartTmpl.Copy(),
+ tableOpsRowsHOTRateChartTmpl.Copy(),
+ tableScansRateChartTmpl.Copy(),
+ tableScansRowsRateChartTmpl.Copy(),
+ tableNullColumnsCountChartTmpl.Copy(),
+ tableTotalSizeChartTmpl.Copy(),
+ tableBloatSizePercChartTmpl.Copy(),
+ tableBloatSizeChartTmpl.Copy(),
+ }
+
+ tableDeadRowsDeadRatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_rows_dead_ratio",
+ Title: "Table dead rows",
+ Units: "%",
+ Fam: "maintenance",
+ Ctx: "postgres.table_rows_dead_ratio",
+ Priority: prioTableRowsDeadRatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_dead_tup_perc", Name: "dead"},
+ },
+ }
+ tableRowsCountChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_rows_count",
+ Title: "Table total rows",
+ Units: "rows",
+ Fam: "maintenance",
+ Ctx: "postgres.table_rows_count",
+ Priority: prioTableRowsCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_live_tup", Name: "live"},
+ {ID: "table_%s_db_%s_schema_%s_n_dead_tup", Name: "dead"},
+ },
+ }
+ tableOpsRowsRateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_ops_rows_rate",
+ Title: "Table throughput",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.table_ops_rows_rate",
+ Priority: prioTableOpsRowsRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_tup_ins", Name: "inserted", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_n_tup_del", Name: "deleted", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_n_tup_upd", Name: "updated", Algo: module.Incremental},
+ },
+ }
+ tableOpsRowsHOTRatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_ops_rows_hot_ratio",
+ Title: "Table HOT updates ratio",
+ Units: "percentage",
+ Fam: "throughput",
+ Ctx: "postgres.table_ops_rows_hot_ratio",
+ Priority: prioTableOpsRowsHOTRatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_tup_hot_upd_perc", Name: "hot"},
+ },
+ }
+ tableOpsRowsHOTRateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_ops_rows_hot_rate",
+ Title: "Table HOT updates",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.table_ops_rows_hot_rate",
+ Priority: prioTableOpsRowsHOTRate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_tup_hot_upd", Name: "hot", Algo: module.Incremental},
+ },
+ }
+ tableCacheIORatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_cache_io_ratio",
+ Title: "Table I/O cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.table_cache_io_ratio",
+ Priority: prioTableCacheIORatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_heap_blks_read_perc", Name: "miss"},
+ },
+ }
+ tableIORateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_io_rate",
+ Title: "Table I/O",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.table_io_rate",
+ Priority: prioTableIORate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_heap_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_heap_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ tableIndexCacheIORatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_index_cache_io_ratio",
+ Title: "Table index I/O cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.table_index_cache_io_ratio",
+ Priority: prioTableIndexCacheIORatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_idx_blks_read_perc", Name: "miss", Algo: module.Incremental},
+ },
+ }
+ tableIndexIORateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_index_io_rate",
+ Title: "Table index I/O",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.table_index_io_rate",
+ Priority: prioTableIndexIORate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_idx_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_idx_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ tableTOASCacheIORatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_toast_cache_io_ratio",
+ Title: "Table TOAST I/O cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.table_toast_cache_io_ratio",
+ Priority: prioTableToastCacheIORatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_toast_blks_read_perc", Name: "miss", Algo: module.Incremental},
+ },
+ }
+ tableTOASTIORateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_toast_io_rate",
+ Title: "Table TOAST I/O",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.table_toast_io_rate",
+ Priority: prioTableToastIORate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_toast_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_toast_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ tableTOASTIndexCacheIORatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_toast_index_cache_io_ratio",
+ Title: "Table TOAST index I/O cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.table_toast_index_cache_io_ratio",
+ Priority: prioTableToastIndexCacheIORatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_tidx_blks_read_perc", Name: "miss", Algo: module.Incremental},
+ },
+ }
+ tableTOASTIndexIORateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_toast_index_io_rate",
+ Title: "Table TOAST index I/O",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.table_toast_index_io_rate",
+ Priority: prioTableToastIndexIORate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_tidx_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_tidx_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ tableScansRateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_scans_rate",
+ Title: "Table scans",
+ Units: "scans/s",
+ Fam: "throughput",
+ Ctx: "postgres.table_scans_rate",
+ Priority: prioTableScansRate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_idx_scan", Name: "index", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_seq_scan", Name: "sequential", Algo: module.Incremental},
+ },
+ }
+ tableScansRowsRateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_scans_rows_rate",
+ Title: "Table live rows fetched by scans",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.table_scans_rows_rate",
+ Priority: prioTableScansRowsRate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_idx_tup_fetch", Name: "index", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_seq_tup_read", Name: "sequential", Algo: module.Incremental},
+ },
+ }
+ tableAutoVacuumSinceTimeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_autovacuum_since_time",
+ Title: "Table time since last auto VACUUM",
+ Units: "seconds",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.table_autovacuum_since_time",
+ Priority: prioTableAutovacuumSinceTime,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_last_autovacuum_ago", Name: "time"},
+ },
+ }
+ tableVacuumSinceTimeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_vacuum_since_time",
+ Title: "Table time since last manual VACUUM",
+ Units: "seconds",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.table_vacuum_since_time",
+ Priority: prioTableVacuumSinceTime,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_last_vacuum_ago", Name: "time"},
+ },
+ }
+ tableAutoAnalyzeSinceTimeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_autoanalyze_since_time",
+ Title: "Table time since last auto ANALYZE",
+ Units: "seconds",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.table_autoanalyze_since_time",
+ Priority: prioTableAutoAnalyzeSinceTime,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_last_autoanalyze_ago", Name: "time"},
+ },
+ }
+ tableAnalyzeSinceTimeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_analyze_since_time",
+ Title: "Table time since last manual ANALYZE",
+ Units: "seconds",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.table_analyze_since_time",
+ Priority: prioTableLastAnalyzeAgo,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_last_analyze_ago", Name: "time"},
+ },
+ }
+ tableNullColumnsCountChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_null_columns_count",
+ Title: "Table null columns",
+ Units: "columns",
+ Fam: "maintenance",
+ Ctx: "postgres.table_null_columns_count",
+ Priority: prioTableNullColumns,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_null_columns", Name: "null"},
+ },
+ }
+ tableTotalSizeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_total_size",
+ Title: "Table total size",
+ Units: "B",
+ Fam: "size",
+ Ctx: "postgres.table_total_size",
+ Priority: prioTableTotalSize,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_total_size", Name: "size"},
+ },
+ }
+ tableBloatSizePercChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_bloat_size_perc",
+ Title: "Table bloat size percentage",
+ Units: "percentage",
+ Fam: "bloat",
+ Ctx: "postgres.table_bloat_size_perc",
+ Priority: prioTableBloatSizePerc,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_bloat_size_perc", Name: "bloat"},
+ },
+ Vars: module.Vars{
+ {ID: "table_%s_db_%s_schema_%s_total_size", Name: "table_size"},
+ },
+ }
+ tableBloatSizeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_bloat_size",
+ Title: "Table bloat size",
+ Units: "B",
+ Fam: "bloat",
+ Ctx: "postgres.table_bloat_size",
+ Priority: prioTableBloatSize,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_bloat_size", Name: "bloat"},
+ },
+ }
+)
+
+func newTableCharts(tbl *tableMetrics) *module.Charts {
+ charts := tableChartsTmpl.Copy()
+
+ if tbl.bloatSize == nil {
+ _ = charts.Remove(tableBloatSizeChartTmpl.ID)
+ _ = charts.Remove(tableBloatSizePercChartTmpl.ID)
+ }
+
+ for i, chart := range *charts {
+ (*charts)[i] = newTableChart(chart, tbl)
+ }
+
+ return charts
+}
+
+func newTableChart(chart *module.Chart, tbl *tableMetrics) *module.Chart {
+ chart = chart.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, tbl.name, tbl.db, tbl.schema)
+ chart.Labels = []module.Label{
+ {Key: "database", Value: tbl.db},
+ {Key: "schema", Value: tbl.schema},
+ {Key: "table", Value: tbl.name},
+ {Key: "parent_table", Value: tbl.parentName},
+ }
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, tbl.name, tbl.db, tbl.schema)
+ }
+ for _, v := range chart.Vars {
+ v.ID = fmt.Sprintf(v.ID, tbl.name, tbl.db, tbl.schema)
+ }
+ return chart
+}
+
+func (p *Postgres) addNewTableCharts(tbl *tableMetrics) {
+ charts := newTableCharts(tbl)
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableLastAutoVacuumAgoChart(tbl *tableMetrics) {
+ chart := newTableChart(tableAutoVacuumSinceTimeChartTmpl.Copy(), tbl)
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableLastVacuumAgoChart(tbl *tableMetrics) {
+ chart := newTableChart(tableVacuumSinceTimeChartTmpl.Copy(), tbl)
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableLastAutoAnalyzeAgoChart(tbl *tableMetrics) {
+ chart := newTableChart(tableAutoAnalyzeSinceTimeChartTmpl.Copy(), tbl)
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableLastAnalyzeAgoChart(tbl *tableMetrics) {
+ chart := newTableChart(tableAnalyzeSinceTimeChartTmpl.Copy(), tbl)
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableIOChartsCharts(tbl *tableMetrics) {
+ charts := module.Charts{
+ newTableChart(tableCacheIORatioChartTmpl.Copy(), tbl),
+ newTableChart(tableIORateChartTmpl.Copy(), tbl),
+ }
+
+ if err := p.Charts().Add(charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableIndexIOCharts(tbl *tableMetrics) {
+ charts := module.Charts{
+ newTableChart(tableIndexCacheIORatioChartTmpl.Copy(), tbl),
+ newTableChart(tableIndexIORateChartTmpl.Copy(), tbl),
+ }
+
+ if err := p.Charts().Add(charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableTOASTIOCharts(tbl *tableMetrics) {
+ charts := module.Charts{
+ newTableChart(tableTOASCacheIORatioChartTmpl.Copy(), tbl),
+ newTableChart(tableTOASTIORateChartTmpl.Copy(), tbl),
+ }
+
+ if err := p.Charts().Add(charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableTOASTIndexIOCharts(tbl *tableMetrics) {
+ charts := module.Charts{
+ newTableChart(tableTOASTIndexCacheIORatioChartTmpl.Copy(), tbl),
+ newTableChart(tableTOASTIndexIORateChartTmpl.Copy(), tbl),
+ }
+
+ if err := p.Charts().Add(charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeTableCharts(tbl *tableMetrics) {
+ prefix := fmt.Sprintf("table_%s_db_%s_schema_%s", tbl.name, tbl.db, tbl.schema)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ indexChartsTmpl = module.Charts{
+ indexSizeChartTmpl.Copy(),
+ indexBloatSizePercChartTmpl.Copy(),
+ indexBloatSizeChartTmpl.Copy(),
+ indexUsageStatusChartTmpl.Copy(),
+ }
+ indexSizeChartTmpl = module.Chart{
+ ID: "index_%s_table_%s_db_%s_schema_%s_size",
+ Title: "Index size",
+ Units: "B",
+ Fam: "size",
+ Ctx: "postgres.index_size",
+ Priority: prioIndexSize,
+ Dims: module.Dims{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_size", Name: "size"},
+ },
+ }
+ indexBloatSizePercChartTmpl = module.Chart{
+ ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size_perc",
+ Title: "Index bloat size percentage",
+ Units: "percentage",
+ Fam: "bloat",
+ Ctx: "postgres.index_bloat_size_perc",
+ Priority: prioIndexBloatSizePerc,
+ Dims: module.Dims{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size_perc", Name: "bloat"},
+ },
+ Vars: module.Vars{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_size", Name: "index_size"},
+ },
+ }
+ indexBloatSizeChartTmpl = module.Chart{
+ ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size",
+ Title: "Index bloat size",
+ Units: "B",
+ Fam: "bloat",
+ Ctx: "postgres.index_bloat_size",
+ Priority: prioIndexBloatSize,
+ Dims: module.Dims{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size", Name: "bloat"},
+ },
+ }
+ indexUsageStatusChartTmpl = module.Chart{
+ ID: "index_%s_table_%s_db_%s_schema_%s_usage_status",
+ Title: "Index usage status",
+ Units: "status",
+ Fam: "maintenance",
+ Ctx: "postgres.index_usage_status",
+ Priority: prioIndexUsageStatus,
+ Dims: module.Dims{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_usage_status_used", Name: "used"},
+ {ID: "index_%s_table_%s_db_%s_schema_%s_usage_status_unused", Name: "unused"},
+ },
+ }
+)
+
+func (p *Postgres) addNewIndexCharts(idx *indexMetrics) {
+ charts := indexChartsTmpl.Copy()
+
+ if idx.bloatSize == nil {
+ _ = charts.Remove(indexBloatSizeChartTmpl.ID)
+ _ = charts.Remove(indexBloatSizePercChartTmpl.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, idx.name, idx.table, idx.db, idx.schema)
+ chart.Labels = []module.Label{
+ {Key: "database", Value: idx.db},
+ {Key: "schema", Value: idx.schema},
+ {Key: "table", Value: idx.table},
+ {Key: "parent_table", Value: idx.parentTable},
+ {Key: "index", Value: idx.name},
+ }
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, idx.name, idx.table, idx.db, idx.schema)
+ }
+ for _, v := range chart.Vars {
+ v.ID = fmt.Sprintf(v.ID, idx.name, idx.table, idx.db, idx.schema)
+ }
+ }
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeIndexCharts(idx *indexMetrics) {
+ prefix := fmt.Sprintf("index_%s_table_%s_db_%s_schema_%s", idx.name, idx.table, idx.db, idx.schema)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/postgres/collect.go b/src/go/plugin/go.d/modules/postgres/collect.go
new file mode 100644
index 000000000..6186932c0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/collect.go
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "regexp"
+ "strconv"
+ "time"
+
+ "github.com/jackc/pgx/v5"
+ "github.com/jackc/pgx/v5/stdlib"
+)
+
+const (
+ pgVersion94 = 9_04_00
+ pgVersion10 = 10_00_00
+ pgVersion11 = 11_00_00
+)
+
+func (p *Postgres) collect() (map[string]int64, error) {
+ if p.db == nil {
+ db, err := p.openPrimaryConnection()
+ if err != nil {
+ return nil, err
+ }
+ p.db = db
+ }
+
+ if p.pgVersion == 0 {
+ ver, err := p.doQueryServerVersion()
+ if err != nil {
+ return nil, fmt.Errorf("querying server version error: %v", err)
+ }
+ p.pgVersion = ver
+ p.Debugf("connected to PostgreSQL v%d", p.pgVersion)
+ }
+
+ if p.superUser == nil {
+ v, err := p.doQueryIsSuperUser()
+ if err != nil {
+ return nil, fmt.Errorf("querying is super user error: %v", err)
+ }
+ p.superUser = &v
+ p.Debugf("connected as super user: %v", *p.superUser)
+ }
+
+ if p.pgIsInRecovery == nil {
+ v, err := p.doQueryPGIsInRecovery()
+ if err != nil {
+ return nil, fmt.Errorf("querying recovery status error: %v", err)
+ }
+ p.pgIsInRecovery = &v
+ p.Debugf("the instance is in recovery mode: %v", *p.pgIsInRecovery)
+ }
+
+ now := time.Now()
+
+ if now.Sub(p.recheckSettingsTime) > p.recheckSettingsEvery {
+ p.recheckSettingsTime = now
+ maxConn, err := p.doQuerySettingsMaxConnections()
+ if err != nil {
+ return nil, fmt.Errorf("querying settings max connections error: %v", err)
+ }
+ p.mx.maxConnections = maxConn
+
+ maxLocks, err := p.doQuerySettingsMaxLocksHeld()
+ if err != nil {
+ return nil, fmt.Errorf("querying settings max locks held error: %v", err)
+ }
+ p.mx.maxLocksHeld = maxLocks
+ }
+
+ p.resetMetrics()
+
+ if p.pgVersion >= pgVersion10 {
+ // need 'backend_type' in pg_stat_activity
+ p.addXactQueryRunningTimeChartsOnce.Do(func() {
+ p.addTransactionsRunTimeHistogramChart()
+ p.addQueriesRunTimeHistogramChart()
+ })
+ }
+ if p.isSuperUser() {
+ p.addWALFilesChartsOnce.Do(p.addWALFilesCharts)
+ }
+
+ if err := p.doQueryGlobalMetrics(); err != nil {
+ return nil, err
+ }
+ if err := p.doQueryReplicationMetrics(); err != nil {
+ return nil, err
+ }
+ if err := p.doQueryDatabasesMetrics(); err != nil {
+ return nil, err
+ }
+ if p.dbSr != nil {
+ if err := p.doQueryQueryableDatabases(); err != nil {
+ return nil, err
+ }
+ }
+ if err := p.doQueryTablesMetrics(); err != nil {
+ return nil, err
+ }
+ if err := p.doQueryIndexesMetrics(); err != nil {
+ return nil, err
+ }
+
+ if now.Sub(p.doSlowTime) > p.doSlowEvery {
+ p.doSlowTime = now
+ if err := p.doQueryBloat(); err != nil {
+ return nil, err
+ }
+ if err := p.doQueryColumns(); err != nil {
+ return nil, err
+ }
+ }
+
+ mx := make(map[string]int64)
+ p.collectMetrics(mx)
+
+ return mx, nil
+}
+
+func (p *Postgres) openPrimaryConnection() (*sql.DB, error) {
+ db, err := sql.Open("pgx", p.DSN)
+ if err != nil {
+ return nil, fmt.Errorf("error on opening a connection with the Postgres database [%s]: %v", p.DSN, err)
+ }
+
+ db.SetMaxOpenConns(1)
+ db.SetMaxIdleConns(1)
+ db.SetConnMaxLifetime(10 * time.Minute)
+
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ if err := db.PingContext(ctx); err != nil {
+ _ = db.Close()
+ return nil, fmt.Errorf("error on pinging the Postgres database [%s]: %v", p.DSN, err)
+ }
+
+ return db, nil
+}
+
+func (p *Postgres) openSecondaryConnection(dbname string) (*sql.DB, string, error) {
+ cfg, err := pgx.ParseConfig(p.DSN)
+ if err != nil {
+ return nil, "", fmt.Errorf("error on parsing DSN [%s]: %v", p.DSN, err)
+ }
+
+ cfg.Database = dbname
+ connStr := stdlib.RegisterConnConfig(cfg)
+
+ db, err := sql.Open("pgx", connStr)
+ if err != nil {
+ stdlib.UnregisterConnConfig(connStr)
+ return nil, "", fmt.Errorf("error on opening a secondary connection with the Postgres database [%s]: %v", dbname, err)
+ }
+
+ db.SetMaxOpenConns(1)
+ db.SetMaxIdleConns(1)
+ db.SetConnMaxLifetime(10 * time.Minute)
+
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ if err := db.PingContext(ctx); err != nil {
+ stdlib.UnregisterConnConfig(connStr)
+ _ = db.Close()
+ return nil, "", fmt.Errorf("error on pinging the secondary Postgres database [%s]: %v", dbname, err)
+ }
+
+ return db, connStr, nil
+}
+
+func (p *Postgres) isSuperUser() bool { return p.superUser != nil && *p.superUser }
+
+func (p *Postgres) isPGInRecovery() bool { return p.pgIsInRecovery != nil && *p.pgIsInRecovery }
+
+func (p *Postgres) getDBMetrics(name string) *dbMetrics {
+ db, ok := p.mx.dbs[name]
+ if !ok {
+ db = &dbMetrics{name: name}
+ p.mx.dbs[name] = db
+ }
+ return db
+}
+
+func (p *Postgres) getTableMetrics(name, db, schema string) *tableMetrics {
+ key := name + "_" + db + "_" + schema
+ m, ok := p.mx.tables[key]
+ if !ok {
+ m = &tableMetrics{db: db, schema: schema, name: name}
+ p.mx.tables[key] = m
+ }
+ return m
+}
+
+func (p *Postgres) hasTableMetrics(name, db, schema string) bool {
+ key := name + "_" + db + "_" + schema
+ _, ok := p.mx.tables[key]
+ return ok
+}
+
+func (p *Postgres) getIndexMetrics(name, table, db, schema string) *indexMetrics {
+ key := name + "_" + table + "_" + db + "_" + schema
+ m, ok := p.mx.indexes[key]
+ if !ok {
+ m = &indexMetrics{name: name, db: db, schema: schema, table: table}
+ p.mx.indexes[key] = m
+ }
+ return m
+}
+
+func (p *Postgres) hasIndexMetrics(name, table, db, schema string) bool {
+ key := name + "_" + table + "_" + db + "_" + schema
+ _, ok := p.mx.indexes[key]
+ return ok
+}
+
+func (p *Postgres) getReplAppMetrics(name string) *replStandbyAppMetrics {
+ app, ok := p.mx.replApps[name]
+ if !ok {
+ app = &replStandbyAppMetrics{name: name}
+ p.mx.replApps[name] = app
+ }
+ return app
+}
+
+func (p *Postgres) getReplSlotMetrics(name string) *replSlotMetrics {
+ slot, ok := p.mx.replSlots[name]
+ if !ok {
+ slot = &replSlotMetrics{name: name}
+ p.mx.replSlots[name] = slot
+ }
+ return slot
+}
+
+func parseInt(s string) int64 {
+ v, _ := strconv.ParseInt(s, 10, 64)
+ return v
+}
+
+func parseFloat(s string) int64 {
+ v, _ := strconv.ParseFloat(s, 64)
+ return int64(v)
+}
+
+func newInt(v int64) *int64 {
+ return &v
+}
+
+func calcPercentage(value, total int64) (v int64) {
+ if total == 0 {
+ return 0
+ }
+ if v = value * 100 / total; v < 0 {
+ v = -v
+ }
+ return v
+}
+
+func calcDeltaPercentage(a, b incDelta) int64 {
+ return calcPercentage(a.delta(), a.delta()+b.delta())
+}
+
+func removeSpaces(s string) string {
+ return reSpace.ReplaceAllString(s, "_")
+}
+
+var reSpace = regexp.MustCompile(`\s+`)
diff --git a/src/go/plugin/go.d/modules/postgres/collect_metrics.go b/src/go/plugin/go.d/modules/postgres/collect_metrics.go
new file mode 100644
index 000000000..84f9abbc7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/collect_metrics.go
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import "fmt"
+
+func (p *Postgres) collectMetrics(mx map[string]int64) {
+ mx["server_connections_used"] = p.mx.connUsed
+ if p.mx.maxConnections > 0 {
+ mx["server_connections_available"] = p.mx.maxConnections - p.mx.connUsed
+ mx["server_connections_utilization"] = calcPercentage(p.mx.connUsed, p.mx.maxConnections)
+ }
+ p.mx.xactTimeHist.WriteTo(mx, "transaction_running_time_hist", 1, 1)
+ p.mx.queryTimeHist.WriteTo(mx, "query_running_time_hist", 1, 1)
+ mx["server_uptime"] = p.mx.uptime
+ mx["server_connections_state_active"] = p.mx.connStateActive
+ mx["server_connections_state_idle"] = p.mx.connStateIdle
+ mx["server_connections_state_idle_in_transaction"] = p.mx.connStateIdleInTrans
+ mx["server_connections_state_idle_in_transaction_aborted"] = p.mx.connStateIdleInTransAborted
+ mx["server_connections_state_fastpath_function_call"] = p.mx.connStateFastpathFunctionCall
+ mx["server_connections_state_disabled"] = p.mx.connStateDisabled
+ mx["checkpoints_timed"] = p.mx.checkpointsTimed
+ mx["checkpoints_req"] = p.mx.checkpointsReq
+ mx["checkpoint_write_time"] = p.mx.checkpointWriteTime
+ mx["checkpoint_sync_time"] = p.mx.checkpointSyncTime
+ mx["buffers_checkpoint"] = p.mx.buffersCheckpoint
+ mx["buffers_clean"] = p.mx.buffersClean
+ mx["maxwritten_clean"] = p.mx.maxwrittenClean
+ mx["buffers_backend"] = p.mx.buffersBackend
+ mx["buffers_backend_fsync"] = p.mx.buffersBackendFsync
+ mx["buffers_alloc"] = p.mx.buffersAlloc
+ mx["oldest_current_xid"] = p.mx.oldestXID
+ mx["percent_towards_wraparound"] = p.mx.percentTowardsWraparound
+ mx["percent_towards_emergency_autovacuum"] = p.mx.percentTowardsEmergencyAutovacuum
+ mx["wal_writes"] = p.mx.walWrites
+ mx["wal_recycled_files"] = p.mx.walRecycledFiles
+ mx["wal_written_files"] = p.mx.walWrittenFiles
+ mx["wal_archive_files_ready_count"] = p.mx.walArchiveFilesReady
+ mx["wal_archive_files_done_count"] = p.mx.walArchiveFilesDone
+ mx["catalog_relkind_r_count"] = p.mx.relkindOrdinaryTable
+ mx["catalog_relkind_i_count"] = p.mx.relkindIndex
+ mx["catalog_relkind_S_count"] = p.mx.relkindSequence
+ mx["catalog_relkind_t_count"] = p.mx.relkindTOASTTable
+ mx["catalog_relkind_v_count"] = p.mx.relkindView
+ mx["catalog_relkind_m_count"] = p.mx.relkindMatView
+ mx["catalog_relkind_c_count"] = p.mx.relkindCompositeType
+ mx["catalog_relkind_f_count"] = p.mx.relkindForeignTable
+ mx["catalog_relkind_p_count"] = p.mx.relkindPartitionedTable
+ mx["catalog_relkind_I_count"] = p.mx.relkindPartitionedIndex
+ mx["catalog_relkind_r_size"] = p.mx.relkindOrdinaryTableSize
+ mx["catalog_relkind_i_size"] = p.mx.relkindIndexSize
+ mx["catalog_relkind_S_size"] = p.mx.relkindSequenceSize
+ mx["catalog_relkind_t_size"] = p.mx.relkindTOASTTableSize
+ mx["catalog_relkind_v_size"] = p.mx.relkindViewSize
+ mx["catalog_relkind_m_size"] = p.mx.relkindMatViewSize
+ mx["catalog_relkind_c_size"] = p.mx.relkindCompositeTypeSize
+ mx["catalog_relkind_f_size"] = p.mx.relkindForeignTableSize
+ mx["catalog_relkind_p_size"] = p.mx.relkindPartitionedTableSize
+ mx["catalog_relkind_I_size"] = p.mx.relkindPartitionedIndexSize
+ mx["autovacuum_analyze"] = p.mx.autovacuumWorkersAnalyze
+ mx["autovacuum_vacuum_analyze"] = p.mx.autovacuumWorkersVacuumAnalyze
+ mx["autovacuum_vacuum"] = p.mx.autovacuumWorkersVacuum
+ mx["autovacuum_vacuum_freeze"] = p.mx.autovacuumWorkersVacuumFreeze
+ mx["autovacuum_brin_summarize"] = p.mx.autovacuumWorkersBrinSummarize
+
+ var locksHeld int64
+ for name, m := range p.mx.dbs {
+ if !m.updated {
+ delete(p.mx.dbs, name)
+ p.removeDatabaseCharts(m)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewDatabaseCharts(m)
+ if p.isPGInRecovery() {
+ p.addDBConflictsCharts(m)
+ }
+ }
+ px := "db_" + m.name + "_"
+ mx[px+"numbackends"] = m.numBackends
+ if m.datConnLimit <= 0 {
+ mx[px+"numbackends_utilization"] = calcPercentage(m.numBackends, p.mx.maxConnections)
+ } else {
+ mx[px+"numbackends_utilization"] = calcPercentage(m.numBackends, m.datConnLimit)
+ }
+ mx[px+"xact_commit"] = m.xactCommit
+ mx[px+"xact_rollback"] = m.xactRollback
+ mx[px+"blks_read"] = m.blksRead.last
+ mx[px+"blks_hit"] = m.blksHit.last
+ mx[px+"blks_read_perc"] = calcDeltaPercentage(m.blksRead, m.blksHit)
+ m.blksRead.prev, m.blksHit.prev = m.blksRead.last, m.blksHit.last
+ mx[px+"tup_returned"] = m.tupReturned.last
+ mx[px+"tup_fetched"] = m.tupFetched.last
+ mx[px+"tup_fetched_perc"] = calcPercentage(m.tupFetched.delta(), m.tupReturned.delta())
+ m.tupReturned.prev, m.tupFetched.prev = m.tupReturned.last, m.tupFetched.last
+ mx[px+"tup_inserted"] = m.tupInserted
+ mx[px+"tup_updated"] = m.tupUpdated
+ mx[px+"tup_deleted"] = m.tupDeleted
+ mx[px+"conflicts"] = m.conflicts
+ if m.size != nil {
+ mx[px+"size"] = *m.size
+ }
+ mx[px+"temp_files"] = m.tempFiles
+ mx[px+"temp_bytes"] = m.tempBytes
+ mx[px+"deadlocks"] = m.deadlocks
+ mx[px+"confl_tablespace"] = m.conflTablespace
+ mx[px+"confl_lock"] = m.conflLock
+ mx[px+"confl_snapshot"] = m.conflSnapshot
+ mx[px+"confl_bufferpin"] = m.conflBufferpin
+ mx[px+"confl_deadlock"] = m.conflDeadlock
+ mx[px+"lock_mode_AccessShareLock_held"] = m.accessShareLockHeld
+ mx[px+"lock_mode_RowShareLock_held"] = m.rowShareLockHeld
+ mx[px+"lock_mode_RowExclusiveLock_held"] = m.rowExclusiveLockHeld
+ mx[px+"lock_mode_ShareUpdateExclusiveLock_held"] = m.shareUpdateExclusiveLockHeld
+ mx[px+"lock_mode_ShareLock_held"] = m.shareLockHeld
+ mx[px+"lock_mode_ShareRowExclusiveLock_held"] = m.shareRowExclusiveLockHeld
+ mx[px+"lock_mode_ExclusiveLock_held"] = m.exclusiveLockHeld
+ mx[px+"lock_mode_AccessExclusiveLock_held"] = m.accessExclusiveLockHeld
+ mx[px+"lock_mode_AccessShareLock_awaited"] = m.accessShareLockAwaited
+ mx[px+"lock_mode_RowShareLock_awaited"] = m.rowShareLockAwaited
+ mx[px+"lock_mode_RowExclusiveLock_awaited"] = m.rowExclusiveLockAwaited
+ mx[px+"lock_mode_ShareUpdateExclusiveLock_awaited"] = m.shareUpdateExclusiveLockAwaited
+ mx[px+"lock_mode_ShareLock_awaited"] = m.shareLockAwaited
+ mx[px+"lock_mode_ShareRowExclusiveLock_awaited"] = m.shareRowExclusiveLockAwaited
+ mx[px+"lock_mode_ExclusiveLock_awaited"] = m.exclusiveLockAwaited
+ mx[px+"lock_mode_AccessExclusiveLock_awaited"] = m.accessExclusiveLockAwaited
+ locksHeld += m.accessShareLockHeld + m.rowShareLockHeld +
+ m.rowExclusiveLockHeld + m.shareUpdateExclusiveLockHeld +
+ m.shareLockHeld + m.shareRowExclusiveLockHeld +
+ m.exclusiveLockHeld + m.accessExclusiveLockHeld
+ }
+ mx["databases_count"] = int64(len(p.mx.dbs))
+ mx["locks_utilization"] = calcPercentage(locksHeld, p.mx.maxLocksHeld)
+
+ for name, m := range p.mx.tables {
+ if !m.updated {
+ delete(p.mx.tables, name)
+ p.removeTableCharts(m)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewTableCharts(m)
+ }
+ if !m.hasLastAutoVacuumChart && m.lastAutoVacuumAgo > 0 {
+ m.hasLastAutoVacuumChart = true
+ p.addTableLastAutoVacuumAgoChart(m)
+ }
+ if !m.hasLastVacuumChart && m.lastVacuumAgo > 0 {
+ m.hasLastVacuumChart = true
+ p.addTableLastVacuumAgoChart(m)
+ }
+ if !m.hasLastAutoAnalyzeChart && m.lastAutoAnalyzeAgo > 0 {
+ m.hasLastAutoAnalyzeChart = true
+ p.addTableLastAutoAnalyzeAgoChart(m)
+ }
+ if !m.hasLastAnalyzeChart && m.lastAnalyzeAgo > 0 {
+ m.hasLastAnalyzeChart = true
+ p.addTableLastAnalyzeAgoChart(m)
+ }
+ if !m.hasTableIOCharts && m.heapBlksRead.last != -1 {
+ m.hasTableIOCharts = true
+ p.addTableIOChartsCharts(m)
+ }
+ if !m.hasTableIdxIOCharts && m.idxBlksRead.last != -1 {
+ m.hasTableIdxIOCharts = true
+ p.addTableIndexIOCharts(m)
+ }
+ if !m.hasTableTOASTIOCharts && m.toastBlksRead.last != -1 {
+ m.hasTableTOASTIOCharts = true
+ p.addTableTOASTIOCharts(m)
+ }
+ if !m.hasTableTOASTIdxIOCharts && m.tidxBlksRead.last != -1 {
+ m.hasTableTOASTIdxIOCharts = true
+ p.addTableTOASTIndexIOCharts(m)
+ }
+
+ px := fmt.Sprintf("table_%s_db_%s_schema_%s_", m.name, m.db, m.schema)
+
+ mx[px+"seq_scan"] = m.seqScan
+ mx[px+"seq_tup_read"] = m.seqTupRead
+ mx[px+"idx_scan"] = m.idxScan
+ mx[px+"idx_tup_fetch"] = m.idxTupFetch
+ mx[px+"n_live_tup"] = m.nLiveTup
+ mx[px+"n_dead_tup"] = m.nDeadTup
+ mx[px+"n_dead_tup_perc"] = calcPercentage(m.nDeadTup, m.nDeadTup+m.nLiveTup)
+ mx[px+"n_tup_ins"] = m.nTupIns
+ mx[px+"n_tup_upd"] = m.nTupUpd.last
+ mx[px+"n_tup_del"] = m.nTupDel
+ mx[px+"n_tup_hot_upd"] = m.nTupHotUpd.last
+ if m.lastAutoVacuumAgo != -1 {
+ mx[px+"last_autovacuum_ago"] = m.lastAutoVacuumAgo
+ }
+ if m.lastVacuumAgo != -1 {
+ mx[px+"last_vacuum_ago"] = m.lastVacuumAgo
+ }
+ if m.lastAutoAnalyzeAgo != -1 {
+ mx[px+"last_autoanalyze_ago"] = m.lastAutoAnalyzeAgo
+ }
+ if m.lastAnalyzeAgo != -1 {
+ mx[px+"last_analyze_ago"] = m.lastAnalyzeAgo
+ }
+ mx[px+"total_size"] = m.totalSize
+ if m.bloatSize != nil && m.bloatSizePerc != nil {
+ mx[px+"bloat_size"] = *m.bloatSize
+ mx[px+"bloat_size_perc"] = *m.bloatSizePerc
+ }
+ if m.nullColumns != nil {
+ mx[px+"null_columns"] = *m.nullColumns
+ }
+
+ mx[px+"n_tup_hot_upd_perc"] = calcPercentage(m.nTupHotUpd.delta(), m.nTupUpd.delta())
+ m.nTupHotUpd.prev, m.nTupUpd.prev = m.nTupHotUpd.last, m.nTupUpd.last
+
+ mx[px+"heap_blks_read"] = m.heapBlksRead.last
+ mx[px+"heap_blks_hit"] = m.heapBlksHit.last
+ mx[px+"heap_blks_read_perc"] = calcDeltaPercentage(m.heapBlksRead, m.heapBlksHit)
+ m.heapBlksHit.prev, m.heapBlksRead.prev = m.heapBlksHit.last, m.heapBlksRead.last
+
+ mx[px+"idx_blks_read"] = m.idxBlksRead.last
+ mx[px+"idx_blks_hit"] = m.idxBlksHit.last
+ mx[px+"idx_blks_read_perc"] = calcDeltaPercentage(m.idxBlksRead, m.idxBlksHit)
+ m.idxBlksHit.prev, m.idxBlksRead.prev = m.idxBlksHit.last, m.idxBlksRead.last
+
+ mx[px+"toast_blks_read"] = m.toastBlksRead.last
+ mx[px+"toast_blks_hit"] = m.toastBlksHit.last
+ mx[px+"toast_blks_read_perc"] = calcDeltaPercentage(m.toastBlksRead, m.toastBlksHit)
+ m.toastBlksHit.prev, m.toastBlksRead.prev = m.toastBlksHit.last, m.toastBlksRead.last
+
+ mx[px+"tidx_blks_read"] = m.tidxBlksRead.last
+ mx[px+"tidx_blks_hit"] = m.tidxBlksHit.last
+ mx[px+"tidx_blks_read_perc"] = calcDeltaPercentage(m.tidxBlksRead, m.tidxBlksHit)
+ m.tidxBlksHit.prev, m.tidxBlksRead.prev = m.tidxBlksHit.last, m.tidxBlksRead.last
+ }
+
+ for name, m := range p.mx.indexes {
+ if !m.updated {
+ delete(p.mx.indexes, name)
+ p.removeIndexCharts(m)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewIndexCharts(m)
+ }
+
+ px := fmt.Sprintf("index_%s_table_%s_db_%s_schema_%s_", m.name, m.table, m.db, m.schema)
+ mx[px+"size"] = m.size
+ if m.bloatSize != nil && m.bloatSizePerc != nil {
+ mx[px+"bloat_size"] = *m.bloatSize
+ mx[px+"bloat_size_perc"] = *m.bloatSizePerc
+ }
+ if m.idxScan+m.idxTupRead+m.idxTupFetch > 0 {
+ mx[px+"usage_status_used"], mx[px+"usage_status_unused"] = 1, 0
+ } else {
+ mx[px+"usage_status_used"], mx[px+"usage_status_unused"] = 0, 1
+ }
+ }
+
+ for name, m := range p.mx.replApps {
+ if !m.updated {
+ delete(p.mx.replApps, name)
+ p.removeReplicationStandbyAppCharts(name)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewReplicationStandbyAppCharts(name)
+ }
+ px := "repl_standby_app_" + m.name + "_wal_"
+ mx[px+"sent_lag_size"] = m.walSentDelta
+ mx[px+"write_lag_size"] = m.walWriteDelta
+ mx[px+"flush_lag_size"] = m.walFlushDelta
+ mx[px+"replay_lag_size"] = m.walReplayDelta
+ mx[px+"write_time"] = m.walWriteLag
+ mx[px+"flush_lag_time"] = m.walFlushLag
+ mx[px+"replay_lag_time"] = m.walReplayLag
+ }
+
+ for name, m := range p.mx.replSlots {
+ if !m.updated {
+ delete(p.mx.replSlots, name)
+ p.removeReplicationSlotCharts(name)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewReplicationSlotCharts(name)
+ }
+ px := "repl_slot_" + m.name + "_"
+ mx[px+"replslot_wal_keep"] = m.walKeep
+ mx[px+"replslot_files"] = m.files
+ }
+}
+
+func (p *Postgres) resetMetrics() {
+ p.mx.srvMetrics = srvMetrics{
+ xactTimeHist: p.mx.xactTimeHist,
+ queryTimeHist: p.mx.queryTimeHist,
+ maxConnections: p.mx.maxConnections,
+ maxLocksHeld: p.mx.maxLocksHeld,
+ }
+ for name, m := range p.mx.dbs {
+ p.mx.dbs[name] = &dbMetrics{
+ name: m.name,
+ hasCharts: m.hasCharts,
+ blksRead: incDelta{prev: m.blksRead.prev},
+ blksHit: incDelta{prev: m.blksHit.prev},
+ tupReturned: incDelta{prev: m.tupReturned.prev},
+ tupFetched: incDelta{prev: m.tupFetched.prev},
+ }
+ }
+ for name, m := range p.mx.tables {
+ p.mx.tables[name] = &tableMetrics{
+ db: m.db,
+ schema: m.schema,
+ name: m.name,
+ hasCharts: m.hasCharts,
+ hasLastAutoVacuumChart: m.hasLastAutoVacuumChart,
+ hasLastVacuumChart: m.hasLastVacuumChart,
+ hasLastAutoAnalyzeChart: m.hasLastAutoAnalyzeChart,
+ hasLastAnalyzeChart: m.hasLastAnalyzeChart,
+ hasTableIOCharts: m.hasTableIOCharts,
+ hasTableIdxIOCharts: m.hasTableIdxIOCharts,
+ hasTableTOASTIOCharts: m.hasTableTOASTIOCharts,
+ hasTableTOASTIdxIOCharts: m.hasTableTOASTIdxIOCharts,
+ nTupUpd: incDelta{prev: m.nTupUpd.prev},
+ nTupHotUpd: incDelta{prev: m.nTupHotUpd.prev},
+ heapBlksRead: incDelta{prev: m.heapBlksRead.prev},
+ heapBlksHit: incDelta{prev: m.heapBlksHit.prev},
+ idxBlksRead: incDelta{prev: m.idxBlksRead.prev},
+ idxBlksHit: incDelta{prev: m.idxBlksHit.prev},
+ toastBlksRead: incDelta{prev: m.toastBlksRead.prev},
+ toastBlksHit: incDelta{prev: m.toastBlksHit.prev},
+ tidxBlksRead: incDelta{prev: m.tidxBlksRead.prev},
+ tidxBlksHit: incDelta{prev: m.tidxBlksHit.prev},
+ bloatSize: m.bloatSize,
+ bloatSizePerc: m.bloatSizePerc,
+ nullColumns: m.nullColumns,
+ }
+ }
+ for name, m := range p.mx.indexes {
+ p.mx.indexes[name] = &indexMetrics{
+ name: m.name,
+ db: m.db,
+ schema: m.schema,
+ table: m.table,
+ updated: m.updated,
+ hasCharts: m.hasCharts,
+ bloatSize: m.bloatSize,
+ bloatSizePerc: m.bloatSizePerc,
+ }
+ }
+ for name, m := range p.mx.replApps {
+ p.mx.replApps[name] = &replStandbyAppMetrics{
+ name: m.name,
+ hasCharts: m.hasCharts,
+ }
+ }
+ for name, m := range p.mx.replSlots {
+ p.mx.replSlots[name] = &replSlotMetrics{
+ name: m.name,
+ hasCharts: m.hasCharts,
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/postgres/config_schema.json b/src/go/plugin/go.d/modules/postgres/config_schema.json
new file mode 100644
index 000000000..42bff329b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/config_schema.json
@@ -0,0 +1,141 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Postgres collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "dsn": {
+ "title": "DSN",
+ "description": "Postgres server Data Source Name in [key/value string](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-KEYWORD-VALUE) or [URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS) format.",
+ "type": "string",
+ "default": "postgres://netdata:password@127.0.0.1:5432/postgres"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for queries, in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "collect_databases_matching": {
+ "title": "Database selector",
+ "description": "Configuration for monitoring specific databases using [Netdata simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme). If left empty, no database metrics will be collected.",
+ "type": "string"
+ },
+ "max_db_tables": {
+ "title": "Table limit",
+ "description": "Table metrics will not be collected for databases that have more tables than the limit. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 50
+ },
+ "max_db_indexes": {
+ "title": "Index limit",
+ "description": "Index metrics will not be collected for databases that have more indexes than the limit. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 250
+ },
+ "transaction_time_histogram": {
+ "title": "Transaction time histogram",
+ "description": "Buckets for transaction time histogram in milliseconds.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "uniqueItems": true,
+ "default": [
+ 0.1,
+ 0.5,
+ 1,
+ 2.5,
+ 5,
+ 10
+ ]
+ },
+ "query_time_histogram": {
+ "title": "Query time histogram",
+ "description": "Buckets for query time histogram in milliseconds.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "uniqueItems": true,
+ "default": [
+ 0.1,
+ 0.5,
+ 1,
+ 2.5,
+ 5,
+ 10
+ ]
+ }
+ },
+ "required": [
+ "dsn"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "dsn",
+ "timeout"
+ ]
+ },
+ {
+ "title": "Database stats",
+ "fields": [
+ "max_db_tables",
+ "max_db_indexes",
+ "collect_databases_matching"
+ ]
+ },
+ {
+ "title": "Histograms",
+ "fields": [
+ "transaction_time_histogram",
+ "query_time_histogram"
+ ]
+ }
+ ]
+ },
+ "dsn": {
+ "ui:placeholder": "postgres://username:password@host:port/dbname"
+ },
+ "transaction_time_histogram": {
+ "ui:listFlavour": "list"
+ },
+ "query_time_histogram": {
+ "ui:listFlavour": "list"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/postgres/do_query.go b/src/go/plugin/go.d/modules/postgres/do_query.go
new file mode 100644
index 000000000..3b90be0d7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/do_query.go
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "context"
+ "database/sql"
+)
+
+func (p *Postgres) doQueryRow(query string, v any) error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ return p.db.QueryRowContext(ctx, query).Scan(v)
+}
+
+func (p *Postgres) doDBQueryRow(db *sql.DB, query string, v any) error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ return db.QueryRowContext(ctx, query).Scan(v)
+}
+
+func (p *Postgres) doQuery(query string, assign func(column, value string, rowEnd bool)) error {
+ return p.doDBQuery(p.db, query, assign)
+}
+
+func (p *Postgres) doDBQuery(db *sql.DB, query string, assign func(column, value string, rowEnd bool)) error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ rows, err := db.QueryContext(ctx, query)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = rows.Close() }()
+
+ return readRows(rows, assign)
+}
+
+func readRows(rows *sql.Rows, assign func(column, value string, rowEnd bool)) error {
+ if assign == nil {
+ return nil
+ }
+
+ columns, err := rows.Columns()
+ if err != nil {
+ return err
+ }
+
+ values := makeValues(len(columns))
+
+ for rows.Next() {
+ if err := rows.Scan(values...); err != nil {
+ return err
+ }
+ for i, l := 0, len(values); i < l; i++ {
+ assign(columns[i], valueToString(values[i]), i == l-1)
+ }
+ }
+ return rows.Err()
+}
+
+func valueToString(value any) string {
+ v, ok := value.(*sql.NullString)
+ if !ok || !v.Valid {
+ return ""
+ }
+ return v.String
+}
+
+func makeValues(size int) []any {
+ vs := make([]any, size)
+ for i := range vs {
+ vs[i] = &sql.NullString{}
+ }
+ return vs
+}
diff --git a/src/go/plugin/go.d/modules/postgres/do_query_bloat.go b/src/go/plugin/go.d/modules/postgres/do_query_bloat.go
new file mode 100644
index 000000000..484bfdd96
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/do_query_bloat.go
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import "database/sql"
+
+func (p *Postgres) doQueryBloat() error {
+ if err := p.doDBQueryBloat(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryBloat(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doDBQueryBloat(db *sql.DB) error {
+ q := queryBloat()
+
+ for _, m := range p.mx.tables {
+ if m.bloatSize != nil {
+ m.bloatSize = newInt(0)
+ }
+ if m.bloatSizePerc != nil {
+ m.bloatSizePerc = newInt(0)
+ }
+ }
+ for _, m := range p.mx.indexes {
+ if m.bloatSize != nil {
+ m.bloatSize = newInt(0)
+ }
+ if m.bloatSizePerc != nil {
+ m.bloatSizePerc = newInt(0)
+ }
+ }
+
+ var dbname, schema, table, iname string
+ var tableWasted, idxWasted int64
+ return p.doDBQuery(db, q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "db":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "tablename":
+ table = value
+ case "wastedbytes":
+ tableWasted = parseFloat(value)
+ case "iname":
+ iname = removeSpaces(value)
+ case "wastedibytes":
+ idxWasted = parseFloat(value)
+ }
+ if !rowEnd {
+ return
+ }
+ if p.hasTableMetrics(table, dbname, schema) {
+ v := p.getTableMetrics(table, dbname, schema)
+ v.bloatSize = newInt(tableWasted)
+ v.bloatSizePerc = newInt(calcPercentage(tableWasted, v.totalSize))
+ }
+ if iname != "?" && p.hasIndexMetrics(iname, table, dbname, schema) {
+ v := p.getIndexMetrics(iname, table, dbname, schema)
+ v.bloatSize = newInt(idxWasted)
+ v.bloatSizePerc = newInt(calcPercentage(idxWasted, v.size))
+ }
+ })
+}
diff --git a/src/go/plugin/go.d/modules/postgres/do_query_columns.go b/src/go/plugin/go.d/modules/postgres/do_query_columns.go
new file mode 100644
index 000000000..1da655aaf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/do_query_columns.go
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import "database/sql"
+
+func (p *Postgres) doQueryColumns() error {
+ if err := p.doDBQueryColumns(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryColumns(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doDBQueryColumns(db *sql.DB) error {
+ q := queryColumnsStats()
+
+ for _, m := range p.mx.tables {
+ if m.nullColumns != nil {
+ m.nullColumns = newInt(0)
+ }
+ }
+
+ var dbname, schema, table string
+ var nullPerc int64
+ return p.doDBQuery(db, q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "datname":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "relname":
+ table = value
+ case "null_percent":
+ nullPerc = parseInt(value)
+ }
+ if !rowEnd {
+ return
+ }
+ if nullPerc == 100 && p.hasTableMetrics(table, dbname, schema) {
+ v := p.getTableMetrics(table, dbname, schema)
+ if v.nullColumns == nil {
+ v.nullColumns = newInt(0)
+ }
+ *v.nullColumns++
+ }
+ })
+}
diff --git a/src/go/plugin/go.d/modules/postgres/do_query_databases.go b/src/go/plugin/go.d/modules/postgres/do_query_databases.go
new file mode 100644
index 000000000..0cee7a0cd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/do_query_databases.go
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "fmt"
+)
+
+func (p *Postgres) doQueryDatabasesMetrics() error {
+ if err := p.doQueryDatabaseStats(); err != nil {
+ return fmt.Errorf("querying database stats error: %v", err)
+ }
+ if err := p.doQueryDatabaseSize(); err != nil {
+ return fmt.Errorf("querying database size error: %v", err)
+ }
+ if p.isPGInRecovery() {
+ if err := p.doQueryDatabaseConflicts(); err != nil {
+ return fmt.Errorf("querying database conflicts error: %v", err)
+ }
+ }
+ if err := p.doQueryDatabaseLocks(); err != nil {
+ return fmt.Errorf("querying database locks error: %v", err)
+ }
+ return nil
+}
+
+func (p *Postgres) doQueryDatabaseStats() error {
+ q := queryDatabaseStats()
+
+ var db string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ db = value
+ p.getDBMetrics(db).updated = true
+ case "numbackends":
+ p.getDBMetrics(db).numBackends = parseInt(value)
+ case "datconnlimit":
+ p.getDBMetrics(db).datConnLimit = parseInt(value)
+ case "xact_commit":
+ p.getDBMetrics(db).xactCommit = parseInt(value)
+ case "xact_rollback":
+ p.getDBMetrics(db).xactRollback = parseInt(value)
+ case "blks_read_bytes":
+ p.getDBMetrics(db).blksRead.last = parseInt(value)
+ case "blks_hit_bytes":
+ p.getDBMetrics(db).blksHit.last = parseInt(value)
+ case "tup_returned":
+ p.getDBMetrics(db).tupReturned.last = parseInt(value)
+ case "tup_fetched":
+ p.getDBMetrics(db).tupFetched.last = parseInt(value)
+ case "tup_inserted":
+ p.getDBMetrics(db).tupInserted = parseInt(value)
+ case "tup_updated":
+ p.getDBMetrics(db).tupUpdated = parseInt(value)
+ case "tup_deleted":
+ p.getDBMetrics(db).tupDeleted = parseInt(value)
+ case "conflicts":
+ p.getDBMetrics(db).conflicts = parseInt(value)
+ case "temp_files":
+ p.getDBMetrics(db).tempFiles = parseInt(value)
+ case "temp_bytes":
+ p.getDBMetrics(db).tempBytes = parseInt(value)
+ case "deadlocks":
+ p.getDBMetrics(db).deadlocks = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryDatabaseSize() error {
+ q := queryDatabaseSize(p.pgVersion)
+
+ var db string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ db = value
+ case "size":
+ p.getDBMetrics(db).size = newInt(parseInt(value))
+ }
+ })
+}
+
+func (p *Postgres) doQueryDatabaseConflicts() error {
+ q := queryDatabaseConflicts()
+
+ var db string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ db = value
+ p.getDBMetrics(db).updated = true
+ case "confl_tablespace":
+ p.getDBMetrics(db).conflTablespace = parseInt(value)
+ case "confl_lock":
+ p.getDBMetrics(db).conflLock = parseInt(value)
+ case "confl_snapshot":
+ p.getDBMetrics(db).conflSnapshot = parseInt(value)
+ case "confl_bufferpin":
+ p.getDBMetrics(db).conflBufferpin = parseInt(value)
+ case "confl_deadlock":
+ p.getDBMetrics(db).conflDeadlock = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryDatabaseLocks() error {
+ q := queryDatabaseLocks()
+
+ var db, mode string
+ var granted bool
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ db = value
+ p.getDBMetrics(db).updated = true
+ case "mode":
+ mode = value
+ case "granted":
+ granted = value == "true" || value == "t"
+ case "locks_count":
+ // https://github.com/postgres/postgres/blob/7c34555f8c39eeefcc45b3c3f027d7a063d738fc/src/include/storage/lockdefs.h#L36-L45
+ // https://www.postgresql.org/docs/7.2/locking-tables.html
+ switch {
+ case mode == "AccessShareLock" && granted:
+ p.getDBMetrics(db).accessShareLockHeld = parseInt(value)
+ case mode == "AccessShareLock":
+ p.getDBMetrics(db).accessShareLockAwaited = parseInt(value)
+ case mode == "RowShareLock" && granted:
+ p.getDBMetrics(db).rowShareLockHeld = parseInt(value)
+ case mode == "RowShareLock":
+ p.getDBMetrics(db).rowShareLockAwaited = parseInt(value)
+ case mode == "RowExclusiveLock" && granted:
+ p.getDBMetrics(db).rowExclusiveLockHeld = parseInt(value)
+ case mode == "RowExclusiveLock":
+ p.getDBMetrics(db).rowExclusiveLockAwaited = parseInt(value)
+ case mode == "ShareUpdateExclusiveLock" && granted:
+ p.getDBMetrics(db).shareUpdateExclusiveLockHeld = parseInt(value)
+ case mode == "ShareUpdateExclusiveLock":
+ p.getDBMetrics(db).shareUpdateExclusiveLockAwaited = parseInt(value)
+ case mode == "ShareLock" && granted:
+ p.getDBMetrics(db).shareLockHeld = parseInt(value)
+ case mode == "ShareLock":
+ p.getDBMetrics(db).shareLockAwaited = parseInt(value)
+ case mode == "ShareRowExclusiveLock" && granted:
+ p.getDBMetrics(db).shareRowExclusiveLockHeld = parseInt(value)
+ case mode == "ShareRowExclusiveLock":
+ p.getDBMetrics(db).shareRowExclusiveLockAwaited = parseInt(value)
+ case mode == "ExclusiveLock" && granted:
+ p.getDBMetrics(db).exclusiveLockHeld = parseInt(value)
+ case mode == "ExclusiveLock":
+ p.getDBMetrics(db).exclusiveLockAwaited = parseInt(value)
+ case mode == "AccessExclusiveLock" && granted:
+ p.getDBMetrics(db).accessExclusiveLockHeld = parseInt(value)
+ case mode == "AccessExclusiveLock":
+ p.getDBMetrics(db).accessExclusiveLockAwaited = parseInt(value)
+ }
+ }
+ })
+}
diff --git a/src/go/plugin/go.d/modules/postgres/do_query_global.go b/src/go/plugin/go.d/modules/postgres/do_query_global.go
new file mode 100644
index 000000000..c70772a23
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/do_query_global.go
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "fmt"
+ "strconv"
+)
+
+func (p *Postgres) doQueryGlobalMetrics() error {
+ if err := p.doQueryConnectionsUsed(); err != nil {
+ return fmt.Errorf("querying server connections used error: %v", err)
+ }
+ if err := p.doQueryConnectionsState(); err != nil {
+ return fmt.Errorf("querying server connections state error: %v", err)
+ }
+ if err := p.doQueryCheckpoints(); err != nil {
+ return fmt.Errorf("querying database conflicts error: %v", err)
+ }
+ if err := p.doQueryUptime(); err != nil {
+ return fmt.Errorf("querying server uptime error: %v", err)
+ }
+ if err := p.doQueryTXIDWraparound(); err != nil {
+ return fmt.Errorf("querying txid wraparound error: %v", err)
+ }
+ if err := p.doQueryWALWrites(); err != nil {
+ return fmt.Errorf("querying wal writes error: %v", err)
+ }
+ if err := p.doQueryCatalogRelations(); err != nil {
+ return fmt.Errorf("querying catalog relations error: %v", err)
+ }
+ if p.pgVersion >= pgVersion94 {
+ if err := p.doQueryAutovacuumWorkers(); err != nil {
+ return fmt.Errorf("querying autovacuum workers error: %v", err)
+ }
+ }
+ if p.pgVersion >= pgVersion10 {
+ if err := p.doQueryXactQueryRunningTime(); err != nil {
+ return fmt.Errorf("querying xact/query running time: %v", err)
+ }
+ }
+
+ if !p.isSuperUser() {
+ return nil
+ }
+
+ if p.pgVersion >= pgVersion94 {
+ if err := p.doQueryWALFiles(); err != nil {
+ return fmt.Errorf("querying wal files error: %v", err)
+ }
+ }
+ if err := p.doQueryWALArchiveFiles(); err != nil {
+ return fmt.Errorf("querying wal archive files error: %v", err)
+ }
+
+ return nil
+}
+
+func (p *Postgres) doQueryConnectionsUsed() error {
+ q := queryServerCurrentConnectionsUsed()
+
+ var v string
+ if err := p.doQueryRow(q, &v); err != nil {
+ return err
+ }
+
+ p.mx.connUsed = parseInt(v)
+
+ return nil
+}
+
+func (p *Postgres) doQueryConnectionsState() error {
+ q := queryServerConnectionsState()
+
+ var state string
+ return p.doQuery(q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "state":
+ state = value
+ case "count":
+ switch state {
+ case "active":
+ p.mx.connStateActive = parseInt(value)
+ case "idle":
+ p.mx.connStateIdle = parseInt(value)
+ case "idle in transaction":
+ p.mx.connStateIdleInTrans = parseInt(value)
+ case "idle in transaction (aborted)":
+ p.mx.connStateIdleInTransAborted = parseInt(value)
+ case "fastpath function call":
+ p.mx.connStateFastpathFunctionCall = parseInt(value)
+ case "disabled":
+ p.mx.connStateDisabled = parseInt(value)
+ }
+ }
+ })
+}
+
+func (p *Postgres) doQueryCheckpoints() error {
+ q := queryCheckpoints()
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "checkpoints_timed":
+ p.mx.checkpointsTimed = parseInt(value)
+ case "checkpoints_req":
+ p.mx.checkpointsReq = parseInt(value)
+ case "checkpoint_write_time":
+ p.mx.checkpointWriteTime = parseInt(value)
+ case "checkpoint_sync_time":
+ p.mx.checkpointSyncTime = parseInt(value)
+ case "buffers_checkpoint_bytes":
+ p.mx.buffersCheckpoint = parseInt(value)
+ case "buffers_clean_bytes":
+ p.mx.buffersClean = parseInt(value)
+ case "maxwritten_clean":
+ p.mx.maxwrittenClean = parseInt(value)
+ case "buffers_backend_bytes":
+ p.mx.buffersBackend = parseInt(value)
+ case "buffers_backend_fsync":
+ p.mx.buffersBackendFsync = parseInt(value)
+ case "buffers_alloc_bytes":
+ p.mx.buffersAlloc = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryUptime() error {
+ q := queryServerUptime()
+
+ var s string
+ if err := p.doQueryRow(q, &s); err != nil {
+ return err
+ }
+
+ p.mx.uptime = parseFloat(s)
+
+ return nil
+}
+
+func (p *Postgres) doQueryTXIDWraparound() error {
+ q := queryTXIDWraparound()
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "oldest_current_xid":
+ p.mx.oldestXID = parseInt(value)
+ case "percent_towards_wraparound":
+ p.mx.percentTowardsWraparound = parseInt(value)
+ case "percent_towards_emergency_autovacuum":
+ p.mx.percentTowardsEmergencyAutovacuum = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryWALWrites() error {
+ q := queryWALWrites(p.pgVersion)
+
+ var v int64
+ if err := p.doQueryRow(q, &v); err != nil {
+ return err
+ }
+
+ p.mx.walWrites = v
+
+ return nil
+}
+
+func (p *Postgres) doQueryWALFiles() error {
+ q := queryWALFiles(p.pgVersion)
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "wal_recycled_files":
+ p.mx.walRecycledFiles = parseInt(value)
+ case "wal_written_files":
+ p.mx.walWrittenFiles = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryWALArchiveFiles() error {
+ q := queryWALArchiveFiles(p.pgVersion)
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "wal_archive_files_ready_count":
+ p.mx.walArchiveFilesReady = parseInt(value)
+ case "wal_archive_files_done_count":
+ p.mx.walArchiveFilesDone = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryCatalogRelations() error {
+ q := queryCatalogRelations()
+
+ var kind string
+ var count, size int64
+ return p.doQuery(q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "relkind":
+ kind = value
+ case "count":
+ count = parseInt(value)
+ case "size":
+ size = parseInt(value)
+ }
+ if !rowEnd {
+ return
+ }
+ // https://www.postgresql.org/docs/current/catalog-pg-class.html
+ switch kind {
+ case "r":
+ p.mx.relkindOrdinaryTable = count
+ p.mx.relkindOrdinaryTableSize = size
+ case "i":
+ p.mx.relkindIndex = count
+ p.mx.relkindIndexSize = size
+ case "S":
+ p.mx.relkindSequence = count
+ p.mx.relkindSequenceSize = size
+ case "t":
+ p.mx.relkindTOASTTable = count
+ p.mx.relkindTOASTTableSize = size
+ case "v":
+ p.mx.relkindView = count
+ p.mx.relkindViewSize = size
+ case "m":
+ p.mx.relkindMatView = count
+ p.mx.relkindMatViewSize = size
+ case "c":
+ p.mx.relkindCompositeType = count
+ p.mx.relkindCompositeTypeSize = size
+ case "f":
+ p.mx.relkindForeignTable = count
+ p.mx.relkindForeignTableSize = size
+ case "p":
+ p.mx.relkindPartitionedTable = count
+ p.mx.relkindPartitionedTableSize = size
+ case "I":
+ p.mx.relkindPartitionedIndex = count
+ p.mx.relkindPartitionedIndexSize = size
+ }
+ })
+}
+
+func (p *Postgres) doQueryAutovacuumWorkers() error {
+ q := queryAutovacuumWorkers()
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "autovacuum_analyze":
+ p.mx.autovacuumWorkersAnalyze = parseInt(value)
+ case "autovacuum_vacuum_analyze":
+ p.mx.autovacuumWorkersVacuumAnalyze = parseInt(value)
+ case "autovacuum_vacuum":
+ p.mx.autovacuumWorkersVacuum = parseInt(value)
+ case "autovacuum_vacuum_freeze":
+ p.mx.autovacuumWorkersVacuumFreeze = parseInt(value)
+ case "autovacuum_brin_summarize":
+ p.mx.autovacuumWorkersBrinSummarize = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryXactQueryRunningTime() error {
+ q := queryXactQueryRunningTime()
+
+ var state string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "state":
+ state = value
+ case "xact_running_time":
+ v, _ := strconv.ParseFloat(value, 64)
+ p.mx.xactTimeHist.Observe(v)
+ case "query_running_time":
+ if state == "active" {
+ v, _ := strconv.ParseFloat(value, 64)
+ p.mx.queryTimeHist.Observe(v)
+ }
+ }
+ })
+}
diff --git a/src/go/plugin/go.d/modules/postgres/do_query_indexes.go b/src/go/plugin/go.d/modules/postgres/do_query_indexes.go
new file mode 100644
index 000000000..309b4d104
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/do_query_indexes.go
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "database/sql"
+)
+
+func (p *Postgres) doQueryIndexesMetrics() error {
+ if err := p.doQueryStatUserIndexes(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *Postgres) doQueryStatUserIndexes() error {
+ if err := p.doDBQueryStatUserIndexes(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryStatUserIndexes(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doDBQueryStatUserIndexes(db *sql.DB) error {
+ q := queryStatUserIndexes()
+
+ var dbname, schema, table, name string
+ return p.doDBQuery(db, q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "relname":
+ table = value
+ case "indexrelname":
+ name = removeSpaces(value)
+ p.getIndexMetrics(name, table, dbname, schema).updated = true
+ case "parent_relname":
+ p.getIndexMetrics(name, table, dbname, schema).parentTable = value
+ case "idx_scan":
+ p.getIndexMetrics(name, table, dbname, schema).idxScan = parseInt(value)
+ case "idx_tup_read":
+ p.getIndexMetrics(name, table, dbname, schema).idxTupRead = parseInt(value)
+ case "idx_tup_fetch":
+ p.getIndexMetrics(name, table, dbname, schema).idxTupFetch = parseInt(value)
+ case "size":
+ p.getIndexMetrics(name, table, dbname, schema).size = parseInt(value)
+ }
+ })
+}
diff --git a/src/go/plugin/go.d/modules/postgres/do_query_misc.go b/src/go/plugin/go.d/modules/postgres/do_query_misc.go
new file mode 100644
index 000000000..2877650cd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/do_query_misc.go
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "database/sql"
+ "strconv"
+
+ "github.com/jackc/pgx/v5/stdlib"
+)
+
+func (p *Postgres) doQueryServerVersion() (int, error) {
+ q := queryServerVersion()
+
+ var s string
+ if err := p.doQueryRow(q, &s); err != nil {
+ return 0, err
+ }
+
+ return strconv.Atoi(s)
+}
+
+func (p *Postgres) doQueryIsSuperUser() (bool, error) {
+ q := queryIsSuperUser()
+
+ var v bool
+ if err := p.doQueryRow(q, &v); err != nil {
+ return false, err
+ }
+
+ return v, nil
+}
+
+func (p *Postgres) doQueryPGIsInRecovery() (bool, error) {
+ q := queryPGIsInRecovery()
+
+ var v bool
+ if err := p.doQueryRow(q, &v); err != nil {
+ return false, err
+ }
+
+ return v, nil
+}
+
+func (p *Postgres) doQuerySettingsMaxConnections() (int64, error) {
+ q := querySettingsMaxConnections()
+
+ var s string
+ if err := p.doQueryRow(q, &s); err != nil {
+ return 0, err
+ }
+
+ return strconv.ParseInt(s, 10, 64)
+}
+
+func (p *Postgres) doQuerySettingsMaxLocksHeld() (int64, error) {
+ q := querySettingsMaxLocksHeld()
+
+ var s string
+ if err := p.doQueryRow(q, &s); err != nil {
+ return 0, err
+ }
+
+ return strconv.ParseInt(s, 10, 64)
+}
+
+const connErrMax = 3
+
+func (p *Postgres) doQueryQueryableDatabases() error {
+ q := queryQueryableDatabaseList()
+
+ var dbs []string
+ err := p.doQuery(q, func(_, value string, _ bool) {
+ if p.dbSr != nil && p.dbSr.MatchString(value) {
+ dbs = append(dbs, value)
+ }
+ })
+ if err != nil {
+ return err
+ }
+
+ seen := make(map[string]bool, len(dbs))
+
+ for _, dbname := range dbs {
+ seen[dbname] = true
+
+ conn, ok := p.dbConns[dbname]
+ if !ok {
+ conn = &dbConn{}
+ p.dbConns[dbname] = conn
+ }
+
+ if conn.db != nil || conn.connErrors >= connErrMax {
+ continue
+ }
+
+ db, connStr, err := p.openSecondaryConnection(dbname)
+ if err != nil {
+ p.Warning(err)
+ conn.connErrors++
+ continue
+ }
+
+ tables, err := p.doDBQueryUserTablesCount(db)
+ if err != nil {
+ p.Warning(err)
+ conn.connErrors++
+ _ = db.Close()
+ stdlib.UnregisterConnConfig(connStr)
+ continue
+ }
+
+ indexes, err := p.doDBQueryUserIndexesCount(db)
+ if err != nil {
+ p.Warning(err)
+ conn.connErrors++
+ _ = db.Close()
+ stdlib.UnregisterConnConfig(connStr)
+ continue
+ }
+
+ if (p.MaxDBTables != 0 && tables > p.MaxDBTables) || (p.MaxDBIndexes != 0 && indexes > p.MaxDBIndexes) {
+ p.Warningf("database '%s' has too many user tables(%d/%d)/indexes(%d/%d), skipping it",
+ dbname, tables, p.MaxDBTables, indexes, p.MaxDBIndexes)
+ conn.connErrors = connErrMax
+ _ = db.Close()
+ stdlib.UnregisterConnConfig(connStr)
+ continue
+ }
+
+ conn.db, conn.connStr = db, connStr
+ }
+
+ for dbname, conn := range p.dbConns {
+ if seen[dbname] {
+ continue
+ }
+ delete(p.dbConns, dbname)
+ if conn.connStr != "" {
+ stdlib.UnregisterConnConfig(conn.connStr)
+ }
+ if conn.db != nil {
+ _ = conn.db.Close()
+ }
+ }
+
+ return nil
+}
+
+func (p *Postgres) doDBQueryUserTablesCount(db *sql.DB) (int64, error) {
+ q := queryUserTablesCount()
+
+ var v string
+ if err := p.doDBQueryRow(db, q, &v); err != nil {
+ return 0, err
+ }
+
+ return parseInt(v), nil
+}
+
+func (p *Postgres) doDBQueryUserIndexesCount(db *sql.DB) (int64, error) {
+ q := queryUserIndexesCount()
+
+ var v string
+ if err := p.doDBQueryRow(db, q, &v); err != nil {
+ return 0, err
+ }
+
+ return parseInt(v), nil
+}
diff --git a/src/go/plugin/go.d/modules/postgres/do_query_replication.go b/src/go/plugin/go.d/modules/postgres/do_query_replication.go
new file mode 100644
index 000000000..e60287e61
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/do_query_replication.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "fmt"
+)
+
+func (p *Postgres) doQueryReplicationMetrics() error {
+ if err := p.doQueryReplStandbyAppWALDelta(); err != nil {
+ return fmt.Errorf("querying replication standby app wal delta error: %v", err)
+ }
+
+ if p.pgVersion >= pgVersion10 {
+ if err := p.doQueryReplStandbyAppWALLag(); err != nil {
+ return fmt.Errorf("querying replication standby app wal lag error: %v", err)
+ }
+ }
+
+ if p.pgVersion >= pgVersion10 && p.isSuperUser() {
+ if err := p.doQueryReplSlotFiles(); err != nil {
+ return fmt.Errorf("querying replication slot files error: %v", err)
+ }
+ }
+
+ return nil
+}
+
+func (p *Postgres) doQueryReplStandbyAppWALDelta() error {
+ q := queryReplicationStandbyAppDelta(p.pgVersion)
+
+ var app string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "application_name":
+ app = value
+ p.getReplAppMetrics(app).updated = true
+ default:
+ // TODO: delta calculation was changed in https://github.com/netdata/netdata/go/plugins/plugin/go.d/pull/1039
+ // - 'replay_delta' (probably other deltas too?) can be negative
+ // - Also, WAL delta != WAL lag after that PR
+ v := parseInt(value)
+ if v < 0 {
+ v = 0
+ }
+ switch column {
+ case "sent_delta":
+ p.getReplAppMetrics(app).walSentDelta += v
+ case "write_delta":
+ p.getReplAppMetrics(app).walWriteDelta += v
+ case "flush_delta":
+ p.getReplAppMetrics(app).walFlushDelta += v
+ case "replay_delta":
+ p.getReplAppMetrics(app).walReplayDelta += v
+ }
+ }
+ })
+}
+
+func (p *Postgres) doQueryReplStandbyAppWALLag() error {
+ q := queryReplicationStandbyAppLag()
+
+ var app string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "application_name":
+ app = value
+ p.getReplAppMetrics(app).updated = true
+ case "write_lag":
+ p.getReplAppMetrics(app).walWriteLag += parseInt(value)
+ case "flush_lag":
+ p.getReplAppMetrics(app).walFlushLag += parseInt(value)
+ case "replay_lag":
+ p.getReplAppMetrics(app).walReplayLag += parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryReplSlotFiles() error {
+ q := queryReplicationSlotFiles(p.pgVersion)
+
+ var slot string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "slot_name":
+ slot = value
+ p.getReplSlotMetrics(slot).updated = true
+ case "replslot_wal_keep":
+ p.getReplSlotMetrics(slot).walKeep += parseInt(value)
+ case "replslot_files":
+ p.getReplSlotMetrics(slot).files += parseInt(value)
+ }
+ })
+}
diff --git a/src/go/plugin/go.d/modules/postgres/do_query_tables.go b/src/go/plugin/go.d/modules/postgres/do_query_tables.go
new file mode 100644
index 000000000..5b3e2c71d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/do_query_tables.go
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "database/sql"
+ "strings"
+)
+
+func (p *Postgres) doQueryTablesMetrics() error {
+ if err := p.doQueryStatUserTable(); err != nil {
+ return err
+ }
+ if err := p.doQueryStatIOUserTables(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *Postgres) doQueryStatUserTable() error {
+ if err := p.doDBQueryStatUserTables(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryStatUserTables(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doQueryStatIOUserTables() error {
+ if err := p.doDBQueryStatIOUserTables(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryStatIOUserTables(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doDBQueryStatUserTables(db *sql.DB) error {
+ q := queryStatUserTables()
+
+ var dbname, schema, name string
+ return p.doDBQuery(db, q, func(column, value string, _ bool) {
+ if value == "" && strings.HasPrefix(column, "last_") {
+ value = "-1"
+ }
+ switch column {
+ case "datname":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "relname":
+ name = value
+ p.getTableMetrics(name, dbname, schema).updated = true
+ case "parent_relname":
+ p.getTableMetrics(name, dbname, schema).parentName = value
+ case "seq_scan":
+ p.getTableMetrics(name, dbname, schema).seqScan = parseInt(value)
+ case "seq_tup_read":
+ p.getTableMetrics(name, dbname, schema).seqTupRead = parseInt(value)
+ case "idx_scan":
+ p.getTableMetrics(name, dbname, schema).idxScan = parseInt(value)
+ case "idx_tup_fetch":
+ p.getTableMetrics(name, dbname, schema).idxTupFetch = parseInt(value)
+ case "n_tup_ins":
+ p.getTableMetrics(name, dbname, schema).nTupIns = parseInt(value)
+ case "n_tup_upd":
+ p.getTableMetrics(name, dbname, schema).nTupUpd.last = parseInt(value)
+ case "n_tup_del":
+ p.getTableMetrics(name, dbname, schema).nTupDel = parseInt(value)
+ case "n_tup_hot_upd":
+ p.getTableMetrics(name, dbname, schema).nTupHotUpd.last = parseInt(value)
+ case "n_live_tup":
+ p.getTableMetrics(name, dbname, schema).nLiveTup = parseInt(value)
+ case "n_dead_tup":
+ p.getTableMetrics(name, dbname, schema).nDeadTup = parseInt(value)
+ case "last_vacuum":
+ p.getTableMetrics(name, dbname, schema).lastVacuumAgo = parseFloat(value)
+ case "last_autovacuum":
+ p.getTableMetrics(name, dbname, schema).lastAutoVacuumAgo = parseFloat(value)
+ case "last_analyze":
+ p.getTableMetrics(name, dbname, schema).lastAnalyzeAgo = parseFloat(value)
+ case "last_autoanalyze":
+ p.getTableMetrics(name, dbname, schema).lastAutoAnalyzeAgo = parseFloat(value)
+ case "vacuum_count":
+ p.getTableMetrics(name, dbname, schema).vacuumCount = parseInt(value)
+ case "autovacuum_count":
+ p.getTableMetrics(name, dbname, schema).autovacuumCount = parseInt(value)
+ case "analyze_count":
+ p.getTableMetrics(name, dbname, schema).analyzeCount = parseInt(value)
+ case "autoanalyze_count":
+ p.getTableMetrics(name, dbname, schema).autoAnalyzeCount = parseInt(value)
+ case "total_relation_size":
+ p.getTableMetrics(name, dbname, schema).totalSize = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doDBQueryStatIOUserTables(db *sql.DB) error {
+ q := queryStatIOUserTables()
+
+ var dbname, schema, name string
+ return p.doDBQuery(db, q, func(column, value string, rowEnd bool) {
+ if value == "" && column != "parent_relname" {
+ value = "-1"
+ }
+ switch column {
+ case "datname":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "relname":
+ name = value
+ p.getTableMetrics(name, dbname, schema).updated = true
+ case "parent_relname":
+ p.getTableMetrics(name, dbname, schema).parentName = value
+ case "heap_blks_read_bytes":
+ p.getTableMetrics(name, dbname, schema).heapBlksRead.last = parseInt(value)
+ case "heap_blks_hit_bytes":
+ p.getTableMetrics(name, dbname, schema).heapBlksHit.last = parseInt(value)
+ case "idx_blks_read_bytes":
+ p.getTableMetrics(name, dbname, schema).idxBlksRead.last = parseInt(value)
+ case "idx_blks_hit_bytes":
+ p.getTableMetrics(name, dbname, schema).idxBlksHit.last = parseInt(value)
+ case "toast_blks_read_bytes":
+ p.getTableMetrics(name, dbname, schema).toastBlksRead.last = parseInt(value)
+ case "toast_blks_hit_bytes":
+ p.getTableMetrics(name, dbname, schema).toastBlksHit.last = parseInt(value)
+ case "tidx_blks_read_bytes":
+ p.getTableMetrics(name, dbname, schema).tidxBlksRead.last = parseInt(value)
+ case "tidx_blks_hit_bytes":
+ p.getTableMetrics(name, dbname, schema).tidxBlksHit.last = parseInt(value)
+ }
+ })
+}
diff --git a/src/go/plugin/go.d/modules/postgres/init.go b/src/go/plugin/go.d/modules/postgres/init.go
new file mode 100644
index 000000000..e2bbecc16
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/init.go
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+func (p *Postgres) validateConfig() error {
+ if p.DSN == "" {
+ return errors.New("DSN not set")
+ }
+ return nil
+}
+
+func (p *Postgres) initDBSelector() (matcher.Matcher, error) {
+ if p.DBSelector == "" {
+ return nil, nil
+ }
+
+ return matcher.NewSimplePatternsMatcher(p.DBSelector)
+}
diff --git a/src/go/plugin/go.d/modules/postgres/integrations/postgresql.md b/src/go/plugin/go.d/modules/postgres/integrations/postgresql.md
new file mode 100644
index 000000000..4f2a91101
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/integrations/postgresql.md
@@ -0,0 +1,417 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/postgres/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/postgres/metadata.yaml"
+sidebar_label: "PostgreSQL"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# PostgreSQL
+
+
+<img src="https://netdata.cloud/img/postgres.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: postgres
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.
+
+
+It establishes a connection to the Postgres instance via a TCP or UNIX socket.
+To collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:
+
+- 127.0.0.1:5432
+- /var/run/postgresql/
+
+
+#### Limits
+
+Table and index metrics are not collected for databases with more than 50 tables or 250 indexes.
+These limits can be changed in the configuration file.
+
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per PostgreSQL instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.connections_utilization | used | percentage |
+| postgres.connections_usage | available, used | connections |
+| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |
+| postgres.transactions_duration | a dimension per bucket | transactions/s |
+| postgres.queries_duration | a dimension per bucket | queries/s |
+| postgres.locks_utilization | used | percentage |
+| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |
+| postgres.checkpoints_time | write, sync | milliseconds |
+| postgres.bgwriter_halts_rate | maxwritten | events/s |
+| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |
+| postgres.buffers_backend_fsync_rate | fsync | calls/s |
+| postgres.buffers_allocated_rate | allocated | B/s |
+| postgres.wal_io_rate | write | B/s |
+| postgres.wal_files_count | written, recycled | files |
+| postgres.wal_archiving_files_count | ready, done | files/s |
+| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |
+| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |
+| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |
+| postgres.txid_exhaustion_oldest_txid_num | xid | xid |
+| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |
+| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |
+| postgres.uptime | uptime | seconds |
+| postgres.databases_count | databases | databases |
+
+### Per repl application
+
+These metrics refer to the replication application.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| application | application name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |
+| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |
+
+### Per repl slot
+
+These metrics refer to the replication slot.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| slot | replication slot name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |
+
+### Per database
+
+These metrics refer to the database.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | database name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.db_transactions_ratio | committed, rollback | percentage |
+| postgres.db_transactions_rate | committed, rollback | transactions/s |
+| postgres.db_connections_utilization | used | percentage |
+| postgres.db_connections_count | connections | connections |
+| postgres.db_cache_io_ratio | miss | percentage |
+| postgres.db_io_rate | memory, disk | B/s |
+| postgres.db_ops_fetched_rows_ratio | fetched | percentage |
+| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |
+| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |
+| postgres.db_conflicts_rate | conflicts | queries/s |
+| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |
+| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |
+| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |
+| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |
+| postgres.db_temp_files_created_rate | created | files/s |
+| postgres.db_temp_files_io_rate | written | B/s |
+| postgres.db_size | size | B |
+
+### Per table
+
+These metrics refer to the database table.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | database name |
+| schema | schema name |
+| table | table name |
+| parent_table | parent table name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.table_rows_dead_ratio | dead | percentage |
+| postgres.table_rows_count | live, dead | rows |
+| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |
+| postgres.table_ops_rows_hot_ratio | hot | percentage |
+| postgres.table_ops_rows_hot_rate | hot | rows/s |
+| postgres.table_cache_io_ratio | miss | percentage |
+| postgres.table_io_rate | memory, disk | B/s |
+| postgres.table_index_cache_io_ratio | miss | percentage |
+| postgres.table_index_io_rate | memory, disk | B/s |
+| postgres.table_toast_cache_io_ratio | miss | percentage |
+| postgres.table_toast_io_rate | memory, disk | B/s |
+| postgres.table_toast_index_cache_io_ratio | miss | percentage |
+| postgres.table_toast_index_io_rate | memory, disk | B/s |
+| postgres.table_scans_rate | index, sequential | scans/s |
+| postgres.table_scans_rows_rate | index, sequential | rows/s |
+| postgres.table_autovacuum_since_time | time | seconds |
+| postgres.table_vacuum_since_time | time | seconds |
+| postgres.table_autoanalyze_since_time | time | seconds |
+| postgres.table_analyze_since_time | time | seconds |
+| postgres.table_null_columns | null | columns |
+| postgres.table_size | size | B |
+| postgres.table_bloat_size_perc | bloat | percentage |
+| postgres.table_bloat_size | bloat | B |
+
+### Per index
+
+These metrics refer to the table index.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | database name |
+| schema | schema name |
+| table | table name |
+| parent_table | parent table name |
+| index | index name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.index_size | size | B |
+| postgres.index_bloat_size_perc | bloat | percentage |
+| postgres.index_bloat_size | bloat | B |
+| postgres.index_usage_status | used, unused | status |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |
+| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |
+| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |
+| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |
+| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |
+| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |
+| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |
+| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |
+| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |
+| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |
+| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |
+| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |
+| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |
+| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |
+
+
+## Setup
+
+### Prerequisites
+
+#### Create netdata user
+
+Create a user with granted `pg_monitor`
+or `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).
+
+To create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:
+
+```postgresql
+CREATE USER netdata;
+GRANT pg_monitor TO netdata;
+```
+
+After creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or
+the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your
+system.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/postgres.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/postgres.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |
+| timeout | Query timeout in seconds. | 2 | no |
+| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#simple-patterns-matcher). | | no |
+| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |
+| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+```yaml
+jobs:
+ - name: local
+ dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'
+
+```
+##### Unix socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'
+
+ - name: remote
+ dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m postgres
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `postgres` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep postgres
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep postgres /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep postgres
+```
+
+
diff --git a/src/go/plugin/go.d/modules/postgres/metadata.yaml b/src/go/plugin/go.d/modules/postgres/metadata.yaml
new file mode 100644
index 000000000..aacd19adb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/metadata.yaml
@@ -0,0 +1,750 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-postgres
+ plugin_name: go.d.plugin
+ module_name: postgres
+ monitored_instance:
+ name: PostgreSQL
+ link: https://www.postgresql.org/
+ categories:
+ - data-collection.database-servers
+ icon_filename: postgres.svg
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ alternative_monitored_instances: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - db
+ - database
+ - postgres
+ - postgresql
+ - sql
+ most_popular: true
+ overview:
+ multi_instance: true
+ data_collection:
+ metrics_description: |
+ This collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.
+ method_description: |
+ It establishes a connection to the Postgres instance via a TCP or UNIX socket.
+ To collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:
+
+ - 127.0.0.1:5432
+ - /var/run/postgresql/
+ limits:
+ description: |
+ Table and index metrics are not collected for databases with more than 50 tables or 250 indexes.
+ These limits can be changed in the configuration file.
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list:
+ - title: Create netdata user
+ description: |
+ Create a user with granted `pg_monitor`
+ or `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).
+
+ To create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:
+
+ ```postgresql
+ CREATE USER netdata;
+ GRANT pg_monitor TO netdata;
+ ```
+
+ After creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or
+ the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your
+ system.
+ configuration:
+ file:
+ name: go.d/postgres.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: dsn
+ description: Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+ default_value: postgres://postgres:postgres@127.0.0.1:5432/postgres
+ required: true
+ - name: timeout
+ description: Query timeout in seconds.
+ default_value: 2
+ required: false
+ - name: collect_databases_matching
+ description: Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#simple-patterns-matcher).
+ default_value: ""
+ required: false
+ - name: max_db_tables
+ description: Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit.
+ default_value: 50
+ required: false
+ - name: max_db_indexes
+ description: Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit.
+ default_value: 250
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: TCP socket
+ description: An example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'
+ - name: Unix socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'
+
+ - name: remote
+ dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: postgres_total_connection_utilization
+ metric: postgres.connections_utilization
+ info: average total connection utilization over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_acquired_locks_utilization
+ metric: postgres.locks_utilization
+ info: average acquired locks utilization over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_txid_exhaustion_perc
+ metric: postgres.txid_exhaustion_perc
+ info: percent towards TXID wraparound
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_db_cache_io_ratio
+ metric: postgres.db_cache_io_ratio
+ info: average cache hit ratio in db ${label:database} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_db_transactions_rollback_ratio
+ metric: postgres.db_cache_io_ratio
+ info: average aborted transactions percentage in db ${label:database} over the last five minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_db_deadlocks_rate
+ metric: postgres.db_deadlocks_rate
+ info: number of deadlocks detected in db ${label:database} in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_cache_io_ratio
+ metric: postgres.table_cache_io_ratio
+ info: average cache hit ratio in db ${label:database} table ${label:table} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_index_cache_io_ratio
+ metric: postgres.table_index_cache_io_ratio
+ info: average index cache hit ratio in db ${label:database} table ${label:table} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_toast_cache_io_ratio
+ metric: postgres.table_toast_cache_io_ratio
+ info: average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_toast_index_cache_io_ratio
+ metric: postgres.table_toast_index_cache_io_ratio
+ info: average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_bloat_size_perc
+ metric: postgres.table_bloat_size_perc
+ info: bloat size percentage in db ${label:database} table ${label:table}
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_last_autovacuum_time
+ metric: postgres.table_autovacuum_since_time
+ info: time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_last_autoanalyze_time
+ metric: postgres.table_autoanalyze_since_time
+ info: time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_index_bloat_size_perc
+ metric: postgres.index_bloat_size_perc
+ info: bloat size percentage in db ${label:database} table ${label:table} index ${label:index}
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: postgres.connections_utilization
+ description: Connections utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: postgres.connections_usage
+ description: Connections usage
+ unit: connections
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: postgres.connections_state_count
+ description: Connections in each state
+ unit: connections
+ chart_type: stacked
+ dimensions:
+ - name: active
+ - name: idle
+ - name: idle_in_transaction
+ - name: idle_in_transaction_aborted
+ - name: disabled
+ - name: postgres.transactions_duration
+ description: Observed transactions time
+ unit: transactions/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: postgres.queries_duration
+ description: Observed active queries time
+ unit: queries/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: postgres.locks_utilization
+ description: Acquired locks utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: postgres.checkpoints_rate
+ description: Checkpoints
+ unit: checkpoints/s
+ chart_type: stacked
+ dimensions:
+ - name: scheduled
+ - name: requested
+ - name: postgres.checkpoints_time
+ description: Checkpoint time
+ unit: milliseconds
+ chart_type: stacked
+ dimensions:
+ - name: write
+ - name: sync
+ - name: postgres.bgwriter_halts_rate
+ description: Background writer scan halts
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: maxwritten
+ - name: postgres.buffers_io_rate
+ description: Buffers written rate
+ unit: B/s
+ chart_type: area
+ dimensions:
+ - name: checkpoint
+ - name: backend
+ - name: bgwriter
+ - name: postgres.buffers_backend_fsync_rate
+ description: Backend fsync calls
+ unit: calls/s
+ chart_type: line
+ dimensions:
+ - name: fsync
+ - name: postgres.buffers_allocated_rate
+ description: Buffers allocated
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: postgres.wal_io_rate
+ description: Write-Ahead Log writes
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: postgres.wal_files_count
+ description: Write-Ahead Log files
+ unit: files
+ chart_type: stacked
+ dimensions:
+ - name: written
+ - name: recycled
+ - name: postgres.wal_archiving_files_count
+ description: Write-Ahead Log archived files
+ unit: files/s
+ chart_type: stacked
+ dimensions:
+ - name: ready
+ - name: done
+ - name: postgres.autovacuum_workers_count
+ description: Autovacuum workers
+ unit: workers
+ chart_type: line
+ dimensions:
+ - name: analyze
+ - name: vacuum_analyze
+ - name: vacuum
+ - name: vacuum_freeze
+ - name: brin_summarize
+ - name: postgres.txid_exhaustion_towards_autovacuum_perc
+ description: Percent towards emergency autovacuum
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: emergency_autovacuum
+ - name: postgres.txid_exhaustion_perc
+ description: Percent towards transaction ID wraparound
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: txid_exhaustion
+ - name: postgres.txid_exhaustion_oldest_txid_num
+ description: Oldest transaction XID
+ chart_type: line
+ unit: xid
+ dimensions:
+ - name: xid
+ - name: postgres.catalog_relations_count
+ description: Relation count
+ unit: relations
+ chart_type: stacked
+ dimensions:
+ - name: ordinary_table
+ - name: index
+ - name: sequence
+ - name: toast_table
+ - name: view
+ - name: materialized_view
+ - name: composite_type
+ - name: foreign_table
+ - name: partitioned_table
+ - name: partitioned_index
+ - name: postgres.catalog_relations_size
+ description: Relation size
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: ordinary_table
+ - name: index
+ - name: sequence
+ - name: toast_table
+ - name: view
+ - name: materialized_view
+ - name: composite_type
+ - name: foreign_table
+ - name: partitioned_table
+ - name: partitioned_index
+ - name: postgres.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: postgres.databases_count
+ description: Number of databases
+ unit: databases
+ chart_type: line
+ dimensions:
+ - name: databases
+ - name: repl application
+ description: These metrics refer to the replication application.
+ labels:
+ - name: application
+ description: application name
+ metrics:
+ - name: postgres.replication_app_wal_lag_size
+ description: Standby application WAL lag size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: sent_lag
+ - name: write_lag
+ - name: flush_lag
+ - name: replay_lag
+ - name: postgres.replication_app_wal_lag_time
+ description: Standby application WAL lag time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: write_lag
+ - name: flush_lag
+ - name: replay_lag
+ - name: repl slot
+ description: These metrics refer to the replication slot.
+ labels:
+ - name: slot
+ description: replication slot name
+ metrics:
+ - name: postgres.replication_slot_files_count
+ description: Replication slot files
+ unit: files
+ chart_type: line
+ dimensions:
+ - name: wal_keep
+ - name: pg_replslot_files
+ - name: database
+ description: These metrics refer to the database.
+ labels:
+ - name: database
+ description: database name
+ metrics:
+ - name: postgres.db_transactions_ratio
+ description: Database transactions ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: committed
+ - name: rollback
+ - name: postgres.db_transactions_rate
+ description: Database transactions
+ unit: transactions/s
+ chart_type: line
+ dimensions:
+ - name: committed
+ - name: rollback
+ - name: postgres.db_connections_utilization
+ description: Database connections utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: postgres.db_connections_count
+ description: Database connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: postgres.db_cache_io_ratio
+ description: Database buffer cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.db_io_rate
+ description: Database reads
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.db_ops_fetched_rows_ratio
+ description: Database rows fetched ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: fetched
+ - name: postgres.db_ops_read_rows_rate
+ description: Database rows read
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: returned
+ - name: fetched
+ - name: postgres.db_ops_write_rows_rate
+ description: Database rows written
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: inserted
+ - name: deleted
+ - name: updated
+ - name: postgres.db_conflicts_rate
+ description: Database canceled queries
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: conflicts
+ - name: postgres.db_conflicts_reason_rate
+ description: Database canceled queries by reason
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: tablespace
+ - name: lock
+ - name: snapshot
+ - name: bufferpin
+ - name: deadlock
+ - name: postgres.db_deadlocks_rate
+ description: Database deadlocks
+ unit: deadlocks/s
+ chart_type: line
+ dimensions:
+ - name: deadlocks
+ - name: postgres.db_locks_held_count
+ description: Database locks held
+ unit: locks
+ chart_type: stacked
+ dimensions:
+ - name: access_share
+ - name: row_share
+ - name: row_exclusive
+ - name: share_update
+ - name: share
+ - name: share_row_exclusive
+ - name: exclusive
+ - name: access_exclusive
+ - name: postgres.db_locks_awaited_count
+ description: Database locks awaited
+ unit: locks
+ chart_type: stacked
+ dimensions:
+ - name: access_share
+ - name: row_share
+ - name: row_exclusive
+ - name: share_update
+ - name: share
+ - name: share_row_exclusive
+ - name: exclusive
+ - name: access_exclusive
+ - name: postgres.db_temp_files_created_rate
+ description: Database created temporary files
+ unit: files/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: postgres.db_temp_files_io_rate
+ description: Database temporary files data written to disk
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: written
+ - name: postgres.db_size
+ description: Database size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: table
+ description: These metrics refer to the database table.
+ labels:
+ - name: database
+ description: database name
+ - name: schema
+ description: schema name
+ - name: table
+ description: table name
+ - name: parent_table
+ description: parent table name
+ metrics:
+ - name: postgres.table_rows_dead_ratio
+ description: Table dead rows
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: dead
+ - name: postgres.table_rows_count
+ description: Table total rows
+ unit: rows
+ chart_type: line
+ dimensions:
+ - name: live
+ - name: dead
+ - name: postgres.table_ops_rows_rate
+ description: Table throughput
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: inserted
+ - name: deleted
+ - name: updated
+ - name: postgres.table_ops_rows_hot_ratio
+ description: Table HOT updates ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: hot
+ - name: postgres.table_ops_rows_hot_rate
+ description: Table HOT updates
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: hot
+ - name: postgres.table_cache_io_ratio
+ description: Table I/O cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.table_io_rate
+ description: Table I/O
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.table_index_cache_io_ratio
+ description: Table index I/O cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.table_index_io_rate
+ description: Table index I/O
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.table_toast_cache_io_ratio
+ description: Table TOAST I/O cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.table_toast_io_rate
+ description: Table TOAST I/O
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.table_toast_index_cache_io_ratio
+ description: Table TOAST index I/O cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.table_toast_index_io_rate
+ description: Table TOAST index I/O
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.table_scans_rate
+ description: Table scans
+ unit: scans/s
+ chart_type: line
+ dimensions:
+ - name: index
+ - name: sequential
+ - name: postgres.table_scans_rows_rate
+ description: Table live rows fetched by scans
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: index
+ - name: sequential
+ - name: postgres.table_autovacuum_since_time
+ description: Table time since last auto VACUUM
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: postgres.table_vacuum_since_time
+ description: Table time since last manual VACUUM
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: postgres.table_autoanalyze_since_time
+ description: Table time since last auto ANALYZE
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: postgres.table_analyze_since_time
+ description: Table time since last manual ANALYZE
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: postgres.table_null_columns
+ description: Table null columns
+ unit: columns
+ chart_type: line
+ dimensions:
+ - name: "null"
+ - name: postgres.table_size
+ description: Table total size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: postgres.table_bloat_size_perc
+ description: Table bloat size percentage
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: bloat
+ - name: postgres.table_bloat_size
+ description: Table bloat size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: bloat
+ - name: index
+ description: These metrics refer to the table index.
+ labels:
+ - name: database
+ description: database name
+ - name: schema
+ description: schema name
+ - name: table
+ description: table name
+ - name: parent_table
+ description: parent table name
+ - name: index
+ description: index name
+ metrics:
+ - name: postgres.index_size
+ description: Index size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: postgres.index_bloat_size_perc
+ description: Index bloat size percentage
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: bloat
+ - name: postgres.index_bloat_size
+ description: Index bloat size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: bloat
+ - name: postgres.index_usage_status
+ description: Index usage status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: unused
diff --git a/src/go/plugin/go.d/modules/postgres/metrics.go b/src/go/plugin/go.d/modules/postgres/metrics.go
new file mode 100644
index 000000000..a42ccba13
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/metrics.go
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+
+type pgMetrics struct {
+ srvMetrics
+ dbs map[string]*dbMetrics
+ tables map[string]*tableMetrics
+ indexes map[string]*indexMetrics
+ replApps map[string]*replStandbyAppMetrics
+ replSlots map[string]*replSlotMetrics
+}
+
+type srvMetrics struct {
+ xactTimeHist metrics.Histogram
+ queryTimeHist metrics.Histogram
+
+ maxConnections int64
+ maxLocksHeld int64
+
+ uptime int64
+
+ relkindOrdinaryTable int64
+ relkindIndex int64
+ relkindSequence int64
+ relkindTOASTTable int64
+ relkindView int64
+ relkindMatView int64
+ relkindCompositeType int64
+ relkindForeignTable int64
+ relkindPartitionedTable int64
+ relkindPartitionedIndex int64
+ relkindOrdinaryTableSize int64
+ relkindIndexSize int64
+ relkindSequenceSize int64
+ relkindTOASTTableSize int64
+ relkindViewSize int64
+ relkindMatViewSize int64
+ relkindCompositeTypeSize int64
+ relkindForeignTableSize int64
+ relkindPartitionedTableSize int64
+ relkindPartitionedIndexSize int64
+
+ connUsed int64
+ connStateActive int64
+ connStateIdle int64
+ connStateIdleInTrans int64
+ connStateIdleInTransAborted int64
+ connStateFastpathFunctionCall int64
+ connStateDisabled int64
+
+ checkpointsTimed int64
+ checkpointsReq int64
+ checkpointWriteTime int64
+ checkpointSyncTime int64
+ buffersCheckpoint int64
+ buffersClean int64
+ maxwrittenClean int64
+ buffersBackend int64
+ buffersBackendFsync int64
+ buffersAlloc int64
+
+ oldestXID int64
+ percentTowardsWraparound int64
+ percentTowardsEmergencyAutovacuum int64
+
+ walWrites int64
+ walRecycledFiles int64
+ walWrittenFiles int64
+ walArchiveFilesReady int64
+ walArchiveFilesDone int64
+
+ autovacuumWorkersAnalyze int64
+ autovacuumWorkersVacuumAnalyze int64
+ autovacuumWorkersVacuum int64
+ autovacuumWorkersVacuumFreeze int64
+ autovacuumWorkersBrinSummarize int64
+}
+
+type dbMetrics struct {
+ name string
+
+ updated bool
+ hasCharts bool
+
+ numBackends int64
+ datConnLimit int64
+ xactCommit int64
+ xactRollback int64
+ blksRead incDelta
+ blksHit incDelta
+ tupReturned incDelta
+ tupFetched incDelta
+ tupInserted int64
+ tupUpdated int64
+ tupDeleted int64
+ conflicts int64
+ tempFiles int64
+ tempBytes int64
+ deadlocks int64
+
+ size *int64 // need 'connect' privilege for pg_database_size()
+
+ conflTablespace int64
+ conflLock int64
+ conflSnapshot int64
+ conflBufferpin int64
+ conflDeadlock int64
+
+ accessShareLockHeld int64
+ rowShareLockHeld int64
+ rowExclusiveLockHeld int64
+ shareUpdateExclusiveLockHeld int64
+ shareLockHeld int64
+ shareRowExclusiveLockHeld int64
+ exclusiveLockHeld int64
+ accessExclusiveLockHeld int64
+ accessShareLockAwaited int64
+ rowShareLockAwaited int64
+ rowExclusiveLockAwaited int64
+ shareUpdateExclusiveLockAwaited int64
+ shareLockAwaited int64
+ shareRowExclusiveLockAwaited int64
+ exclusiveLockAwaited int64
+ accessExclusiveLockAwaited int64
+}
+
+type replStandbyAppMetrics struct {
+ name string
+
+ updated bool
+ hasCharts bool
+
+ walSentDelta int64
+ walWriteDelta int64
+ walFlushDelta int64
+ walReplayDelta int64
+
+ walWriteLag int64
+ walFlushLag int64
+ walReplayLag int64
+}
+
+type replSlotMetrics struct {
+ name string
+
+ updated bool
+ hasCharts bool
+
+ walKeep int64
+ files int64
+}
+
+type tableMetrics struct {
+ name string
+ parentName string
+ db string
+ schema string
+
+ updated bool
+ hasCharts bool
+ hasLastAutoVacuumChart bool
+ hasLastVacuumChart bool
+ hasLastAutoAnalyzeChart bool
+ hasLastAnalyzeChart bool
+ hasTableIOCharts bool
+ hasTableIdxIOCharts bool
+ hasTableTOASTIOCharts bool
+ hasTableTOASTIdxIOCharts bool
+
+ // pg_stat_user_tables
+ seqScan int64
+ seqTupRead int64
+ idxScan int64
+ idxTupFetch int64
+ nTupIns int64
+ nTupUpd incDelta
+ nTupDel int64
+ nTupHotUpd incDelta
+ nLiveTup int64
+ nDeadTup int64
+ lastVacuumAgo int64
+ lastAutoVacuumAgo int64
+ lastAnalyzeAgo int64
+ lastAutoAnalyzeAgo int64
+ vacuumCount int64
+ autovacuumCount int64
+ analyzeCount int64
+ autoAnalyzeCount int64
+
+ // pg_statio_user_tables
+ heapBlksRead incDelta
+ heapBlksHit incDelta
+ idxBlksRead incDelta
+ idxBlksHit incDelta
+ toastBlksRead incDelta
+ toastBlksHit incDelta
+ tidxBlksRead incDelta
+ tidxBlksHit incDelta
+
+ totalSize int64
+
+ bloatSize *int64 // need 'SELECT' access to the table
+ bloatSizePerc *int64 // need 'SELECT' access to the table
+ nullColumns *int64 // need 'SELECT' access to the table
+}
+
+type indexMetrics struct {
+ name string
+ db string
+ schema string
+ table string
+ parentTable string
+
+ updated bool
+ hasCharts bool
+
+ idxScan int64
+ idxTupRead int64
+ idxTupFetch int64
+
+ size int64
+
+ bloatSize *int64 // need 'SELECT' access to the table
+ bloatSizePerc *int64 // need 'SELECT' access to the table
+}
+type incDelta struct{ prev, last int64 }
+
+func (pc *incDelta) delta() int64 { return pc.last - pc.prev }
diff --git a/src/go/plugin/go.d/modules/postgres/postgres.go b/src/go/plugin/go.d/modules/postgres/postgres.go
new file mode 100644
index 000000000..57491039a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/postgres.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "database/sql"
+ _ "embed"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/jackc/pgx/v5/stdlib"
+ _ "github.com/jackc/pgx/v5/stdlib"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("postgres", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Postgres {
+ return &Postgres{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ DSN: "postgres://postgres:postgres@127.0.0.1:5432/postgres",
+ XactTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10},
+ QueryTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10},
+ // charts: 20 x table, 4 x index.
+ // https://discord.com/channels/847502280503590932/1022693928874549368
+ MaxDBTables: 50,
+ MaxDBIndexes: 250,
+ },
+ charts: baseCharts.Copy(),
+ dbConns: make(map[string]*dbConn),
+ mx: &pgMetrics{
+ dbs: make(map[string]*dbMetrics),
+ indexes: make(map[string]*indexMetrics),
+ tables: make(map[string]*tableMetrics),
+ replApps: make(map[string]*replStandbyAppMetrics),
+ replSlots: make(map[string]*replSlotMetrics),
+ },
+ recheckSettingsEvery: time.Minute * 30,
+ doSlowEvery: time.Minute * 5,
+ addXactQueryRunningTimeChartsOnce: &sync.Once{},
+ addWALFilesChartsOnce: &sync.Once{},
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ DSN string `yaml:"dsn" json:"dsn"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ DBSelector string `yaml:"collect_databases_matching,omitempty" json:"collect_databases_matching"`
+ XactTimeHistogram []float64 `yaml:"transaction_time_histogram,omitempty" json:"transaction_time_histogram"`
+ QueryTimeHistogram []float64 `yaml:"query_time_histogram,omitempty" json:"query_time_histogram"`
+ MaxDBTables int64 `yaml:"max_db_tables" json:"max_db_tables"`
+ MaxDBIndexes int64 `yaml:"max_db_indexes" json:"max_db_indexes"`
+}
+
+type (
+ Postgres struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addXactQueryRunningTimeChartsOnce *sync.Once
+ addWALFilesChartsOnce *sync.Once
+
+ db *sql.DB
+ dbConns map[string]*dbConn
+
+ superUser *bool
+ pgIsInRecovery *bool
+ pgVersion int
+ dbSr matcher.Matcher
+ recheckSettingsTime time.Time
+ recheckSettingsEvery time.Duration
+ doSlowTime time.Time
+ doSlowEvery time.Duration
+
+ mx *pgMetrics
+ }
+ dbConn struct {
+ db *sql.DB
+ connStr string
+ connErrors int
+ }
+)
+
+func (p *Postgres) Configuration() any {
+ return p.Config
+}
+
+func (p *Postgres) Init() error {
+ err := p.validateConfig()
+ if err != nil {
+ p.Errorf("config validation: %v", err)
+ return err
+ }
+
+ sr, err := p.initDBSelector()
+ if err != nil {
+ p.Errorf("config validation: %v", err)
+ return err
+ }
+ p.dbSr = sr
+
+ p.mx.xactTimeHist = metrics.NewHistogramWithRangeBuckets(p.XactTimeHistogram)
+ p.mx.queryTimeHist = metrics.NewHistogramWithRangeBuckets(p.QueryTimeHistogram)
+
+ return nil
+}
+
+func (p *Postgres) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (p *Postgres) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *Postgres) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (p *Postgres) Cleanup() {
+ if p.db == nil {
+ return
+ }
+ if err := p.db.Close(); err != nil {
+ p.Warningf("cleanup: error on closing the Postgres database [%s]: %v", p.DSN, err)
+ }
+ p.db = nil
+
+ for dbname, conn := range p.dbConns {
+ delete(p.dbConns, dbname)
+ if conn.connStr != "" {
+ stdlib.UnregisterConnConfig(conn.connStr)
+ }
+ if conn.db != nil {
+ _ = conn.db.Close()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/postgres/postgres_test.go b/src/go/plugin/go.d/modules/postgres/postgres_test.go
new file mode 100644
index 000000000..7e91b288f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/postgres_test.go
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "bufio"
+ "bytes"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer140004ServerVersionNum, _ = os.ReadFile("testdata/v14.4/server_version_num.txt")
+ dataVer140004IsSuperUserFalse, _ = os.ReadFile("testdata/v14.4/is_super_user-false.txt")
+ dataVer140004IsSuperUserTrue, _ = os.ReadFile("testdata/v14.4/is_super_user-true.txt")
+ dataVer140004PGIsInRecoveryTrue, _ = os.ReadFile("testdata/v14.4/pg_is_in_recovery-true.txt")
+ dataVer140004SettingsMaxConnections, _ = os.ReadFile("testdata/v14.4/settings_max_connections.txt")
+ dataVer140004SettingsMaxLocksHeld, _ = os.ReadFile("testdata/v14.4/settings_max_locks_held.txt")
+ dataVer140004ServerCurrentConnections, _ = os.ReadFile("testdata/v14.4/server_current_connections.txt")
+ dataVer140004ServerConnectionsState, _ = os.ReadFile("testdata/v14.4/server_connections_state.txt")
+ dataVer140004Checkpoints, _ = os.ReadFile("testdata/v14.4/checkpoints.txt")
+ dataVer140004ServerUptime, _ = os.ReadFile("testdata/v14.4/uptime.txt")
+ dataVer140004TXIDWraparound, _ = os.ReadFile("testdata/v14.4/txid_wraparound.txt")
+ dataVer140004WALWrites, _ = os.ReadFile("testdata/v14.4/wal_writes.txt")
+ dataVer140004WALFiles, _ = os.ReadFile("testdata/v14.4/wal_files.txt")
+ dataVer140004WALArchiveFiles, _ = os.ReadFile("testdata/v14.4/wal_archive_files.txt")
+ dataVer140004CatalogRelations, _ = os.ReadFile("testdata/v14.4/catalog_relations.txt")
+ dataVer140004AutovacuumWorkers, _ = os.ReadFile("testdata/v14.4/autovacuum_workers.txt")
+ dataVer140004XactQueryRunningTime, _ = os.ReadFile("testdata/v14.4/xact_query_running_time.txt")
+ dataVer140004ReplStandbyAppDelta, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_delta.txt")
+ dataVer140004ReplStandbyAppLag, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_lag.txt")
+ dataVer140004ReplSlotFiles, _ = os.ReadFile("testdata/v14.4/replication_slot_files.txt")
+ dataVer140004DatabaseStats, _ = os.ReadFile("testdata/v14.4/database_stats.txt")
+ dataVer140004DatabaseSize, _ = os.ReadFile("testdata/v14.4/database_size.txt")
+ dataVer140004DatabaseConflicts, _ = os.ReadFile("testdata/v14.4/database_conflicts.txt")
+ dataVer140004DatabaseLocks, _ = os.ReadFile("testdata/v14.4/database_locks.txt")
+ dataVer140004QueryableDatabaseList, _ = os.ReadFile("testdata/v14.4/queryable_database_list.txt")
+ dataVer140004StatUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_tables_db_postgres.txt")
+ dataVer140004StatIOUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/statio_user_tables_db_postgres.txt")
+ dataVer140004StatUserIndexesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_indexes_db_postgres.txt")
+ dataVer140004Bloat, _ = os.ReadFile("testdata/v14.4/bloat_tables.txt")
+ dataVer140004ColumnsStats, _ = os.ReadFile("testdata/v14.4/table_columns_stats.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer140004ServerVersionNum": dataVer140004ServerVersionNum,
+ "dataVer140004IsSuperUserFalse": dataVer140004IsSuperUserFalse,
+ "dataVer140004IsSuperUserTrue": dataVer140004IsSuperUserTrue,
+ "dataVer140004PGIsInRecoveryTrue": dataVer140004PGIsInRecoveryTrue,
+ "dataVer140004SettingsMaxConnections": dataVer140004SettingsMaxConnections,
+ "dataVer140004SettingsMaxLocksHeld": dataVer140004SettingsMaxLocksHeld,
+ "dataVer140004ServerCurrentConnections": dataVer140004ServerCurrentConnections,
+ "dataVer140004ServerConnectionsState": dataVer140004ServerConnectionsState,
+ "dataVer140004Checkpoints": dataVer140004Checkpoints,
+ "dataVer140004ServerUptime": dataVer140004ServerUptime,
+ "dataVer140004TXIDWraparound": dataVer140004TXIDWraparound,
+ "dataVer140004WALWrites": dataVer140004WALWrites,
+ "dataVer140004WALFiles": dataVer140004WALFiles,
+ "dataVer140004WALArchiveFiles": dataVer140004WALArchiveFiles,
+ "dataVer140004CatalogRelations": dataVer140004CatalogRelations,
+ "dataVer140004AutovacuumWorkers": dataVer140004AutovacuumWorkers,
+ "dataVer140004XactQueryRunningTime": dataVer140004XactQueryRunningTime,
+ "dataV14004ReplStandbyAppDelta": dataVer140004ReplStandbyAppDelta,
+ "dataV14004ReplStandbyAppLag": dataVer140004ReplStandbyAppLag,
+ "dataVer140004ReplSlotFiles": dataVer140004ReplSlotFiles,
+ "dataVer140004DatabaseStats": dataVer140004DatabaseStats,
+ "dataVer140004DatabaseSize": dataVer140004DatabaseSize,
+ "dataVer140004DatabaseConflicts": dataVer140004DatabaseConflicts,
+ "dataVer140004DatabaseLocks": dataVer140004DatabaseLocks,
+ "dataVer140004QueryableDatabaseList": dataVer140004QueryableDatabaseList,
+ "dataVer140004StatUserTablesDBPostgres": dataVer140004StatUserTablesDBPostgres,
+ "dataVer140004StatIOUserTablesDBPostgres": dataVer140004StatIOUserTablesDBPostgres,
+ "dataVer140004StatUserIndexesDBPostgres": dataVer140004StatUserIndexesDBPostgres,
+ "dataVer140004Bloat": dataVer140004Bloat,
+ "dataVer140004ColumnsStats": dataVer140004ColumnsStats,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPostgres_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Postgres{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPostgres_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "Success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "Fail when DSN not set": {
+ wantFail: true,
+ config: Config{DSN: ""},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pg := New()
+ pg.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, pg.Init())
+ } else {
+ assert.NoError(t, pg.Init())
+ }
+ })
+ }
+}
+
+func TestPostgres_Cleanup(t *testing.T) {
+
+}
+
+func TestPostgres_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestPostgres_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func(t *testing.T, pg *Postgres, mock sqlmock.Sqlmock)
+ wantFail bool
+ }{
+ "Success when all queries are successful (v14.4)": {
+ wantFail: false,
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ pg.dbSr = matcher.TRUE()
+
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004SettingsMaxConnections)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataVer140004ServerCurrentConnections)
+ mockExpect(t, m, queryServerConnectionsState(), dataVer140004ServerConnectionsState)
+ mockExpect(t, m, queryCheckpoints(), dataVer140004Checkpoints)
+ mockExpect(t, m, queryServerUptime(), dataVer140004ServerUptime)
+ mockExpect(t, m, queryTXIDWraparound(), dataVer140004TXIDWraparound)
+ mockExpect(t, m, queryWALWrites(140004), dataVer140004WALWrites)
+ mockExpect(t, m, queryCatalogRelations(), dataVer140004CatalogRelations)
+ mockExpect(t, m, queryAutovacuumWorkers(), dataVer140004AutovacuumWorkers)
+ mockExpect(t, m, queryXactQueryRunningTime(), dataVer140004XactQueryRunningTime)
+
+ mockExpect(t, m, queryWALFiles(140004), dataVer140004WALFiles)
+ mockExpect(t, m, queryWALArchiveFiles(140004), dataVer140004WALArchiveFiles)
+
+ mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataVer140004ReplStandbyAppDelta)
+ mockExpect(t, m, queryReplicationStandbyAppLag(), dataVer140004ReplStandbyAppLag)
+ mockExpect(t, m, queryReplicationSlotFiles(140004), dataVer140004ReplSlotFiles)
+
+ mockExpect(t, m, queryDatabaseStats(), dataVer140004DatabaseStats)
+ mockExpect(t, m, queryDatabaseSize(140004), dataVer140004DatabaseSize)
+ mockExpect(t, m, queryDatabaseConflicts(), dataVer140004DatabaseConflicts)
+ mockExpect(t, m, queryDatabaseLocks(), dataVer140004DatabaseLocks)
+
+ mockExpect(t, m, queryQueryableDatabaseList(), dataVer140004QueryableDatabaseList)
+ mockExpect(t, m, queryStatUserTables(), dataVer140004StatUserTablesDBPostgres)
+ mockExpect(t, m, queryStatIOUserTables(), dataVer140004StatIOUserTablesDBPostgres)
+ mockExpect(t, m, queryStatUserIndexes(), dataVer140004StatUserIndexesDBPostgres)
+ mockExpect(t, m, queryBloat(), dataVer140004Bloat)
+ mockExpect(t, m, queryColumnsStats(), dataVer140004ColumnsStats)
+ },
+ },
+ "Fail when the second query unsuccessful (v14.4)": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataVer140004ServerCurrentConnections)
+ mockExpectErr(m, queryServerConnectionsState())
+ },
+ },
+ "Fail when querying the database version returns an error": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpectErr(m, queryServerVersion())
+ },
+ },
+ "Fail when querying settings max connection returns an error": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpectErr(m, querySettingsMaxConnections())
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ pg := New()
+ pg.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, pg.Init())
+
+ test.prepareMock(t, pg, mock)
+
+ if test.wantFail {
+ assert.Error(t, pg.Check())
+ } else {
+ assert.NoError(t, pg.Check())
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func TestPostgres_Collect(t *testing.T) {
+ type testCaseStep struct {
+ prepareMock func(t *testing.T, pg *Postgres, mock sqlmock.Sqlmock)
+ check func(t *testing.T, pg *Postgres)
+ }
+ tests := map[string][]testCaseStep{
+ "Success on all queries, collect all dbs (v14.4)": {
+ {
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ pg.dbSr = matcher.TRUE()
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004SettingsMaxConnections)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataVer140004ServerCurrentConnections)
+ mockExpect(t, m, queryServerConnectionsState(), dataVer140004ServerConnectionsState)
+ mockExpect(t, m, queryCheckpoints(), dataVer140004Checkpoints)
+ mockExpect(t, m, queryServerUptime(), dataVer140004ServerUptime)
+ mockExpect(t, m, queryTXIDWraparound(), dataVer140004TXIDWraparound)
+ mockExpect(t, m, queryWALWrites(140004), dataVer140004WALWrites)
+ mockExpect(t, m, queryCatalogRelations(), dataVer140004CatalogRelations)
+ mockExpect(t, m, queryAutovacuumWorkers(), dataVer140004AutovacuumWorkers)
+ mockExpect(t, m, queryXactQueryRunningTime(), dataVer140004XactQueryRunningTime)
+
+ mockExpect(t, m, queryWALFiles(140004), dataVer140004WALFiles)
+ mockExpect(t, m, queryWALArchiveFiles(140004), dataVer140004WALArchiveFiles)
+
+ mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataVer140004ReplStandbyAppDelta)
+ mockExpect(t, m, queryReplicationStandbyAppLag(), dataVer140004ReplStandbyAppLag)
+ mockExpect(t, m, queryReplicationSlotFiles(140004), dataVer140004ReplSlotFiles)
+
+ mockExpect(t, m, queryDatabaseStats(), dataVer140004DatabaseStats)
+ mockExpect(t, m, queryDatabaseSize(140004), dataVer140004DatabaseSize)
+ mockExpect(t, m, queryDatabaseConflicts(), dataVer140004DatabaseConflicts)
+ mockExpect(t, m, queryDatabaseLocks(), dataVer140004DatabaseLocks)
+
+ mockExpect(t, m, queryQueryableDatabaseList(), dataVer140004QueryableDatabaseList)
+ mockExpect(t, m, queryStatUserTables(), dataVer140004StatUserTablesDBPostgres)
+ mockExpect(t, m, queryStatIOUserTables(), dataVer140004StatIOUserTablesDBPostgres)
+ mockExpect(t, m, queryStatUserIndexes(), dataVer140004StatUserIndexesDBPostgres)
+ mockExpect(t, m, queryBloat(), dataVer140004Bloat)
+ mockExpect(t, m, queryColumnsStats(), dataVer140004ColumnsStats)
+ },
+ check: func(t *testing.T, pg *Postgres) {
+ mx := pg.Collect()
+
+ expected := map[string]int64{
+ "autovacuum_analyze": 0,
+ "autovacuum_brin_summarize": 0,
+ "autovacuum_vacuum": 0,
+ "autovacuum_vacuum_analyze": 0,
+ "autovacuum_vacuum_freeze": 0,
+ "buffers_alloc": 27295744,
+ "buffers_backend": 0,
+ "buffers_backend_fsync": 0,
+ "buffers_checkpoint": 32768,
+ "buffers_clean": 0,
+ "catalog_relkind_I_count": 0,
+ "catalog_relkind_I_size": 0,
+ "catalog_relkind_S_count": 0,
+ "catalog_relkind_S_size": 0,
+ "catalog_relkind_c_count": 0,
+ "catalog_relkind_c_size": 0,
+ "catalog_relkind_f_count": 0,
+ "catalog_relkind_f_size": 0,
+ "catalog_relkind_i_count": 155,
+ "catalog_relkind_i_size": 3678208,
+ "catalog_relkind_m_count": 0,
+ "catalog_relkind_m_size": 0,
+ "catalog_relkind_p_count": 0,
+ "catalog_relkind_p_size": 0,
+ "catalog_relkind_r_count": 66,
+ "catalog_relkind_r_size": 3424256,
+ "catalog_relkind_t_count": 38,
+ "catalog_relkind_t_size": 548864,
+ "catalog_relkind_v_count": 137,
+ "catalog_relkind_v_size": 0,
+ "checkpoint_sync_time": 47,
+ "checkpoint_write_time": 167,
+ "checkpoints_req": 16,
+ "checkpoints_timed": 1814,
+ "databases_count": 2,
+ "db_postgres_blks_hit": 1221125,
+ "db_postgres_blks_read": 3252,
+ "db_postgres_blks_read_perc": 0,
+ "db_postgres_confl_bufferpin": 0,
+ "db_postgres_confl_deadlock": 0,
+ "db_postgres_confl_lock": 0,
+ "db_postgres_confl_snapshot": 0,
+ "db_postgres_confl_tablespace": 0,
+ "db_postgres_conflicts": 0,
+ "db_postgres_deadlocks": 0,
+ "db_postgres_lock_mode_AccessExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_AccessExclusiveLock_held": 0,
+ "db_postgres_lock_mode_AccessShareLock_awaited": 0,
+ "db_postgres_lock_mode_AccessShareLock_held": 99,
+ "db_postgres_lock_mode_ExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_ExclusiveLock_held": 0,
+ "db_postgres_lock_mode_RowExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_RowExclusiveLock_held": 99,
+ "db_postgres_lock_mode_RowShareLock_awaited": 0,
+ "db_postgres_lock_mode_RowShareLock_held": 99,
+ "db_postgres_lock_mode_ShareLock_awaited": 0,
+ "db_postgres_lock_mode_ShareLock_held": 0,
+ "db_postgres_lock_mode_ShareRowExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_ShareRowExclusiveLock_held": 0,
+ "db_postgres_lock_mode_ShareUpdateExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_ShareUpdateExclusiveLock_held": 0,
+ "db_postgres_numbackends": 3,
+ "db_postgres_numbackends_utilization": 10,
+ "db_postgres_size": 8758051,
+ "db_postgres_temp_bytes": 0,
+ "db_postgres_temp_files": 0,
+ "db_postgres_tup_deleted": 0,
+ "db_postgres_tup_fetched": 359833,
+ "db_postgres_tup_fetched_perc": 2,
+ "db_postgres_tup_inserted": 0,
+ "db_postgres_tup_returned": 13207245,
+ "db_postgres_tup_updated": 0,
+ "db_postgres_xact_commit": 1438660,
+ "db_postgres_xact_rollback": 70,
+ "db_production_blks_hit": 0,
+ "db_production_blks_read": 0,
+ "db_production_blks_read_perc": 0,
+ "db_production_confl_bufferpin": 0,
+ "db_production_confl_deadlock": 0,
+ "db_production_confl_lock": 0,
+ "db_production_confl_snapshot": 0,
+ "db_production_confl_tablespace": 0,
+ "db_production_conflicts": 0,
+ "db_production_deadlocks": 0,
+ "db_production_lock_mode_AccessExclusiveLock_awaited": 0,
+ "db_production_lock_mode_AccessExclusiveLock_held": 0,
+ "db_production_lock_mode_AccessShareLock_awaited": 0,
+ "db_production_lock_mode_AccessShareLock_held": 0,
+ "db_production_lock_mode_ExclusiveLock_awaited": 0,
+ "db_production_lock_mode_ExclusiveLock_held": 0,
+ "db_production_lock_mode_RowExclusiveLock_awaited": 0,
+ "db_production_lock_mode_RowExclusiveLock_held": 0,
+ "db_production_lock_mode_RowShareLock_awaited": 0,
+ "db_production_lock_mode_RowShareLock_held": 0,
+ "db_production_lock_mode_ShareLock_awaited": 99,
+ "db_production_lock_mode_ShareLock_held": 0,
+ "db_production_lock_mode_ShareRowExclusiveLock_awaited": 0,
+ "db_production_lock_mode_ShareRowExclusiveLock_held": 0,
+ "db_production_lock_mode_ShareUpdateExclusiveLock_awaited": 0,
+ "db_production_lock_mode_ShareUpdateExclusiveLock_held": 99,
+ "db_production_numbackends": 1,
+ "db_production_numbackends_utilization": 1,
+ "db_production_size": 8602115,
+ "db_production_temp_bytes": 0,
+ "db_production_temp_files": 0,
+ "db_production_tup_deleted": 0,
+ "db_production_tup_fetched": 0,
+ "db_production_tup_fetched_perc": 0,
+ "db_production_tup_inserted": 0,
+ "db_production_tup_returned": 0,
+ "db_production_tup_updated": 0,
+ "db_production_xact_commit": 0,
+ "db_production_xact_rollback": 0,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_myschema_size": 8192,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_myschema_usage_status_unused": 1,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_myschema_usage_status_used": 0,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_public_size": 8192,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_public_usage_status_unused": 1,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_public_usage_status_used": 0,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_myschema_size": 8192,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_myschema_usage_status_unused": 1,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_myschema_usage_status_used": 0,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_public_size": 8192,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_public_usage_status_unused": 1,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_public_usage_status_used": 0,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_myschema_size": 8192,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_myschema_usage_status_unused": 1,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_myschema_usage_status_used": 0,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_public_size": 8192,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_public_usage_status_unused": 1,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_public_usage_status_used": 0,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_bloat_size": 0,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_bloat_size_perc": 0,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_size": 112336896,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_usage_status_unused": 0,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_usage_status_used": 1,
+ "index_pgbench_branches_pkey_table_pgbench_branches_db_postgres_schema_public_size": 16384,
+ "index_pgbench_branches_pkey_table_pgbench_branches_db_postgres_schema_public_usage_status_unused": 1,
+ "index_pgbench_branches_pkey_table_pgbench_branches_db_postgres_schema_public_usage_status_used": 0,
+ "index_pgbench_tellers_pkey_table_pgbench_tellers_db_postgres_schema_public_size": 32768,
+ "index_pgbench_tellers_pkey_table_pgbench_tellers_db_postgres_schema_public_usage_status_unused": 1,
+ "index_pgbench_tellers_pkey_table_pgbench_tellers_db_postgres_schema_public_usage_status_used": 0,
+ "locks_utilization": 6,
+ "maxwritten_clean": 0,
+ "oldest_current_xid": 9,
+ "percent_towards_emergency_autovacuum": 0,
+ "percent_towards_wraparound": 0,
+ "query_running_time_hist_bucket_1": 1,
+ "query_running_time_hist_bucket_2": 0,
+ "query_running_time_hist_bucket_3": 0,
+ "query_running_time_hist_bucket_4": 0,
+ "query_running_time_hist_bucket_5": 0,
+ "query_running_time_hist_bucket_6": 0,
+ "query_running_time_hist_bucket_inf": 0,
+ "query_running_time_hist_count": 1,
+ "query_running_time_hist_sum": 0,
+ "repl_slot_ocean_replslot_files": 0,
+ "repl_slot_ocean_replslot_wal_keep": 0,
+ "repl_standby_app_phys-standby2_wal_flush_lag_size": 0,
+ "repl_standby_app_phys-standby2_wal_flush_lag_time": 0,
+ "repl_standby_app_phys-standby2_wal_replay_lag_size": 0,
+ "repl_standby_app_phys-standby2_wal_replay_lag_time": 0,
+ "repl_standby_app_phys-standby2_wal_sent_lag_size": 0,
+ "repl_standby_app_phys-standby2_wal_write_lag_size": 0,
+ "repl_standby_app_phys-standby2_wal_write_time": 0,
+ "repl_standby_app_walreceiver_wal_flush_lag_size": 2,
+ "repl_standby_app_walreceiver_wal_flush_lag_time": 2,
+ "repl_standby_app_walreceiver_wal_replay_lag_size": 2,
+ "repl_standby_app_walreceiver_wal_replay_lag_time": 2,
+ "repl_standby_app_walreceiver_wal_sent_lag_size": 2,
+ "repl_standby_app_walreceiver_wal_write_lag_size": 2,
+ "repl_standby_app_walreceiver_wal_write_time": 2,
+ "server_connections_available": 97,
+ "server_connections_state_active": 1,
+ "server_connections_state_disabled": 1,
+ "server_connections_state_fastpath_function_call": 1,
+ "server_connections_state_idle": 14,
+ "server_connections_state_idle_in_transaction": 7,
+ "server_connections_state_idle_in_transaction_aborted": 1,
+ "server_connections_used": 3,
+ "server_connections_utilization": 3,
+ "server_uptime": 499906,
+ "table_pgbench_accounts_db_postgres_schema_public_bloat_size": 9863168,
+ "table_pgbench_accounts_db_postgres_schema_public_bloat_size_perc": 1,
+ "table_pgbench_accounts_db_postgres_schema_public_heap_blks_hit": 224484753408,
+ "table_pgbench_accounts_db_postgres_schema_public_heap_blks_read": 1803882668032,
+ "table_pgbench_accounts_db_postgres_schema_public_heap_blks_read_perc": 88,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_blks_hit": 7138635948032,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_blks_read": 973310976000,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_blks_read_perc": 11,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_scan": 99955,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_tup_fetch": 99955,
+ "table_pgbench_accounts_db_postgres_schema_public_last_analyze_ago": 377149,
+ "table_pgbench_accounts_db_postgres_schema_public_last_vacuum_ago": 377149,
+ "table_pgbench_accounts_db_postgres_schema_public_n_dead_tup": 1000048,
+ "table_pgbench_accounts_db_postgres_schema_public_n_dead_tup_perc": 16,
+ "table_pgbench_accounts_db_postgres_schema_public_n_live_tup": 5000048,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_del": 0,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_hot_upd": 0,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_hot_upd_perc": 0,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_ins": 5000000,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_upd": 0,
+ "table_pgbench_accounts_db_postgres_schema_public_seq_scan": 2,
+ "table_pgbench_accounts_db_postgres_schema_public_seq_tup_read": 5000000,
+ "table_pgbench_accounts_db_postgres_schema_public_tidx_blks_hit": -1,
+ "table_pgbench_accounts_db_postgres_schema_public_tidx_blks_read": -1,
+ "table_pgbench_accounts_db_postgres_schema_public_tidx_blks_read_perc": 50,
+ "table_pgbench_accounts_db_postgres_schema_public_toast_blks_hit": -1,
+ "table_pgbench_accounts_db_postgres_schema_public_toast_blks_read": -1,
+ "table_pgbench_accounts_db_postgres_schema_public_toast_blks_read_perc": 50,
+ "table_pgbench_accounts_db_postgres_schema_public_total_size": 784031744,
+ "table_pgbench_branches_db_postgres_schema_public_heap_blks_hit": 304316416,
+ "table_pgbench_branches_db_postgres_schema_public_heap_blks_read": 507150336,
+ "table_pgbench_branches_db_postgres_schema_public_heap_blks_read_perc": 62,
+ "table_pgbench_branches_db_postgres_schema_public_idx_blks_hit": 101441536,
+ "table_pgbench_branches_db_postgres_schema_public_idx_blks_read": 101425152,
+ "table_pgbench_branches_db_postgres_schema_public_idx_blks_read_perc": 49,
+ "table_pgbench_branches_db_postgres_schema_public_idx_scan": 0,
+ "table_pgbench_branches_db_postgres_schema_public_idx_tup_fetch": 0,
+ "table_pgbench_branches_db_postgres_schema_public_last_analyze_ago": 377149,
+ "table_pgbench_branches_db_postgres_schema_public_last_vacuum_ago": 371719,
+ "table_pgbench_branches_db_postgres_schema_public_n_dead_tup": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_dead_tup_perc": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_live_tup": 50,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_del": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_hot_upd": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_hot_upd_perc": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_ins": 50,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_upd": 0,
+ "table_pgbench_branches_db_postgres_schema_public_seq_scan": 6,
+ "table_pgbench_branches_db_postgres_schema_public_seq_tup_read": 300,
+ "table_pgbench_branches_db_postgres_schema_public_tidx_blks_hit": -1,
+ "table_pgbench_branches_db_postgres_schema_public_tidx_blks_read": -1,
+ "table_pgbench_branches_db_postgres_schema_public_tidx_blks_read_perc": 50,
+ "table_pgbench_branches_db_postgres_schema_public_toast_blks_hit": -1,
+ "table_pgbench_branches_db_postgres_schema_public_toast_blks_read": -1,
+ "table_pgbench_branches_db_postgres_schema_public_toast_blks_read_perc": 50,
+ "table_pgbench_branches_db_postgres_schema_public_total_size": 57344,
+ "table_pgbench_history_db_postgres_schema_public_heap_blks_hit": 0,
+ "table_pgbench_history_db_postgres_schema_public_heap_blks_read": 0,
+ "table_pgbench_history_db_postgres_schema_public_heap_blks_read_perc": 0,
+ "table_pgbench_history_db_postgres_schema_public_idx_blks_hit": -1,
+ "table_pgbench_history_db_postgres_schema_public_idx_blks_read": -1,
+ "table_pgbench_history_db_postgres_schema_public_idx_blks_read_perc": 50,
+ "table_pgbench_history_db_postgres_schema_public_idx_scan": 0,
+ "table_pgbench_history_db_postgres_schema_public_idx_tup_fetch": 0,
+ "table_pgbench_history_db_postgres_schema_public_last_analyze_ago": 377149,
+ "table_pgbench_history_db_postgres_schema_public_last_vacuum_ago": 377149,
+ "table_pgbench_history_db_postgres_schema_public_n_dead_tup": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_dead_tup_perc": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_live_tup": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_del": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_hot_upd": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_hot_upd_perc": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_ins": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_upd": 0,
+ "table_pgbench_history_db_postgres_schema_public_seq_scan": 0,
+ "table_pgbench_history_db_postgres_schema_public_seq_tup_read": 0,
+ "table_pgbench_history_db_postgres_schema_public_tidx_blks_hit": -1,
+ "table_pgbench_history_db_postgres_schema_public_tidx_blks_read": -1,
+ "table_pgbench_history_db_postgres_schema_public_tidx_blks_read_perc": 50,
+ "table_pgbench_history_db_postgres_schema_public_toast_blks_hit": -1,
+ "table_pgbench_history_db_postgres_schema_public_toast_blks_read": -1,
+ "table_pgbench_history_db_postgres_schema_public_toast_blks_read_perc": 50,
+ "table_pgbench_history_db_postgres_schema_public_total_size": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_heap_blks_hit": 491937792,
+ "table_pgbench_tellers_db_postgres_schema_public_heap_blks_read": 623828992,
+ "table_pgbench_tellers_db_postgres_schema_public_heap_blks_read_perc": 55,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_blks_hit": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_blks_read": 101433344,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_blks_read_perc": 100,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_scan": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_tup_fetch": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_last_analyze_ago": 377149,
+ "table_pgbench_tellers_db_postgres_schema_public_last_vacuum_ago": 371719,
+ "table_pgbench_tellers_db_postgres_schema_public_n_dead_tup": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_dead_tup_perc": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_live_tup": 500,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_del": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_hot_upd": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_hot_upd_perc": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_ins": 500,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_upd": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_null_columns": 1,
+ "table_pgbench_tellers_db_postgres_schema_public_seq_scan": 1,
+ "table_pgbench_tellers_db_postgres_schema_public_seq_tup_read": 500,
+ "table_pgbench_tellers_db_postgres_schema_public_tidx_blks_hit": -1,
+ "table_pgbench_tellers_db_postgres_schema_public_tidx_blks_read": -1,
+ "table_pgbench_tellers_db_postgres_schema_public_tidx_blks_read_perc": 50,
+ "table_pgbench_tellers_db_postgres_schema_public_toast_blks_hit": -1,
+ "table_pgbench_tellers_db_postgres_schema_public_toast_blks_read": -1,
+ "table_pgbench_tellers_db_postgres_schema_public_toast_blks_read_perc": 50,
+ "table_pgbench_tellers_db_postgres_schema_public_total_size": 90112,
+ "transaction_running_time_hist_bucket_1": 1,
+ "transaction_running_time_hist_bucket_2": 0,
+ "transaction_running_time_hist_bucket_3": 0,
+ "transaction_running_time_hist_bucket_4": 0,
+ "transaction_running_time_hist_bucket_5": 0,
+ "transaction_running_time_hist_bucket_6": 0,
+ "transaction_running_time_hist_bucket_inf": 7,
+ "transaction_running_time_hist_count": 8,
+ "transaction_running_time_hist_sum": 4022,
+ "wal_archive_files_done_count": 1,
+ "wal_archive_files_ready_count": 1,
+ "wal_recycled_files": 0,
+ "wal_writes": 24103144,
+ "wal_written_files": 1,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "Fail when querying the database version returns an error": {
+ {
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpectErr(m, queryServerVersion())
+ },
+ check: func(t *testing.T, pg *Postgres) {
+ mx := pg.Collect()
+ var expected map[string]int64
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "Fail when querying settings max connections returns an error": {
+ {
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpectErr(m, querySettingsMaxConnections())
+ },
+ check: func(t *testing.T, pg *Postgres) {
+ mx := pg.Collect()
+ var expected map[string]int64
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "Fail when querying the server connections returns an error": {
+ {
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004SettingsMaxConnections)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpectErr(m, queryServerCurrentConnectionsUsed())
+ },
+ check: func(t *testing.T, pg *Postgres) {
+ mx := pg.Collect()
+ var expected map[string]int64
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ pg := New()
+ pg.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, pg.Init())
+
+ for i, step := range test {
+ t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
+ step.prepareMock(t, pg, mock)
+ step.check(t, pg)
+ })
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) {
+ mock.ExpectQuery(query).WillReturnRows(mustMockRows(t, rows)).RowsWillBeClosed()
+}
+
+func mockExpectErr(mock sqlmock.Sqlmock, query string) {
+ mock.ExpectQuery(query).WillReturnError(errors.New("mock error"))
+}
+
+func mustMockRows(t *testing.T, data []byte) *sqlmock.Rows {
+ rows, err := prepareMockRows(data)
+ require.NoError(t, err)
+ return rows
+}
+
+func prepareMockRows(data []byte) (*sqlmock.Rows, error) {
+ r := bytes.NewReader(data)
+ sc := bufio.NewScanner(r)
+
+ var numColumns int
+ var rows *sqlmock.Rows
+
+ for sc.Scan() {
+ s := strings.TrimSpace(sc.Text())
+ if s == "" || strings.HasPrefix(s, "---") {
+ continue
+ }
+
+ parts := strings.Split(s, "|")
+ for i, v := range parts {
+ parts[i] = strings.TrimSpace(v)
+ }
+
+ if rows == nil {
+ numColumns = len(parts)
+ rows = sqlmock.NewRows(parts)
+ continue
+ }
+
+ if len(parts) != numColumns {
+ return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts))
+ }
+
+ values := make([]driver.Value, len(parts))
+ for i, v := range parts {
+ values[i] = v
+ }
+ rows.AddRow(values...)
+ }
+
+ if rows == nil {
+ return nil, errors.New("prepareMockRows(): nil rows result")
+ }
+
+ return rows, nil
+}
diff --git a/src/go/plugin/go.d/modules/postgres/queries.go b/src/go/plugin/go.d/modules/postgres/queries.go
new file mode 100644
index 000000000..f6afc9342
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/queries.go
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+func queryServerVersion() string {
+ return "SHOW server_version_num;"
+}
+
+func queryIsSuperUser() string {
+ return "SELECT current_setting('is_superuser') = 'on' AS is_superuser;"
+}
+
+func queryPGIsInRecovery() string {
+ return "SELECT pg_is_in_recovery();"
+}
+
+func querySettingsMaxConnections() string {
+ return "SELECT current_setting('max_connections')::INT - current_setting('superuser_reserved_connections')::INT;"
+}
+
+func querySettingsMaxLocksHeld() string {
+ return `
+SELECT current_setting('max_locks_per_transaction')::INT *
+ (current_setting('max_connections')::INT + current_setting('max_prepared_transactions')::INT);
+`
+}
+
+// TODO: this is not correct and we should use pg_stat_activity.
+// But we need to check what connections (backend_type) count towards 'max_connections'.
+// I think python version query doesn't count it correctly.
+// https://github.com/netdata/netdata/blob/1782e2d002bc5203128e5a5d2b801010e2822d2d/collectors/python.d.plugin/postgres/postgres.chart.py#L266
+func queryServerCurrentConnectionsUsed() string {
+ return "SELECT sum(numbackends) FROM pg_stat_database;"
+}
+
+func queryServerConnectionsState() string {
+ return `
+SELECT state,
+ COUNT(*)
+FROM pg_stat_activity
+WHERE state IN
+ (
+ 'active',
+ 'idle',
+ 'idle in transaction',
+ 'idle in transaction (aborted)',
+ 'fastpath function call',
+ 'disabled'
+ )
+GROUP BY state;
+`
+}
+
+func queryCheckpoints() string {
+ // definition by version: https://pgpedia.info/p/pg_stat_bgwriter.html
+ // docs: https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-BGWRITER-VIEW
+ // code: https://github.com/postgres/postgres/blob/366283961ac0ed6d89014444c6090f3fd02fce0a/src/backend/catalog/system_views.sql#L1104
+
+ return `
+SELECT checkpoints_timed,
+ checkpoints_req,
+ checkpoint_write_time,
+ checkpoint_sync_time,
+ buffers_checkpoint * current_setting('block_size')::numeric AS buffers_checkpoint_bytes,
+ buffers_clean * current_setting('block_size')::numeric AS buffers_clean_bytes,
+ maxwritten_clean,
+ buffers_backend * current_setting('block_size')::numeric AS buffers_backend_bytes,
+ buffers_backend_fsync,
+ buffers_alloc * current_setting('block_size')::numeric AS buffers_alloc_bytes
+FROM pg_stat_bgwriter;
+`
+}
+
+func queryServerUptime() string {
+ return `SELECT EXTRACT(epoch FROM CURRENT_TIMESTAMP - pg_postmaster_start_time());`
+}
+
+func queryTXIDWraparound() string {
+ // https://www.crunchydata.com/blog/managing-transaction-id-wraparound-in-postgresql
+ return `
+ WITH max_age AS ( SELECT
+ 2000000000 as max_old_xid,
+ setting AS autovacuum_freeze_max_age
+ FROM
+ pg_catalog.pg_settings
+ WHERE
+ name = 'autovacuum_freeze_max_age'), per_database_stats AS ( SELECT
+ datname ,
+ m.max_old_xid::int ,
+ m.autovacuum_freeze_max_age::int ,
+ age(d.datfrozenxid) AS oldest_current_xid
+ FROM
+ pg_catalog.pg_database d
+ JOIN
+ max_age m
+ ON (true)
+ WHERE
+ d.datallowconn) SELECT
+ max(oldest_current_xid) AS oldest_current_xid ,
+ max(ROUND(100*(oldest_current_xid/max_old_xid::float))) AS percent_towards_wraparound ,
+ max(ROUND(100*(oldest_current_xid/autovacuum_freeze_max_age::float))) AS percent_towards_emergency_autovacuum
+ FROM
+ per_database_stats;
+`
+}
+
+func queryWALWrites(version int) string {
+ if version < pgVersion10 {
+ return `
+SELECT
+ pg_xlog_location_diff(
+ CASE
+ pg_is_in_recovery()
+ WHEN
+ TRUE
+ THEN
+ pg_last_xlog_receive_location()
+ ELSE
+ pg_current_xlog_location()
+ END
+, '0/0') AS wal_writes ;
+`
+ }
+ return `
+SELECT
+ pg_wal_lsn_diff(
+ CASE
+ pg_is_in_recovery()
+ WHEN
+ TRUE
+ THEN
+ pg_last_wal_receive_lsn()
+ ELSE
+ pg_current_wal_lsn()
+ END
+, '0/0') AS wal_writes ;
+`
+}
+
+func queryWALFiles(version int) string {
+ if version < pgVersion10 {
+ return `
+SELECT count(*) FILTER (WHERE type = 'recycled') AS wal_recycled_files,
+ count(*) FILTER (WHERE type = 'written') AS wal_written_files
+FROM (SELECT wal.name,
+ pg_xlogfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_xlog_location()
+ END),
+ CASE
+ WHEN wal.name > pg_xlogfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_xlog_location()
+ END) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_xlog') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY (pg_stat_file('pg_xlog/' || name, true)).modification,
+ wal.name DESC) sub;
+`
+ }
+ return `
+SELECT count(*) FILTER (WHERE type = 'recycled') AS wal_recycled_files,
+ count(*) FILTER (WHERE type = 'written') AS wal_written_files
+FROM (SELECT wal.name,
+ pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END),
+ CASE
+ WHEN wal.name > pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_wal') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY (pg_stat_file('pg_wal/' || name, true)).modification,
+ wal.name DESC) sub;
+`
+}
+
+func queryWALArchiveFiles(version int) string {
+ if version < pgVersion10 {
+ return `
+ SELECT
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),
+ 0) AS INT) AS wal_archive_files_ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),
+ 0) AS INT) AS wal_archive_files_done_count
+ FROM
+ pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
+`
+ }
+ return `
+ SELECT
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),
+ 0) AS INT) AS wal_archive_files_ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),
+ 0) AS INT) AS wal_archive_files_done_count
+ FROM
+ pg_catalog.pg_ls_dir('pg_wal/archive_status') AS archive_files (archive_file);
+`
+}
+
+func queryCatalogRelations() string {
+ // kind of same as
+ // https://github.com/netdata/netdata/blob/750810e1798e09cc6210e83594eb9ed4905f8f12/collectors/python.d.plugin/postgres/postgres.chart.py#L336-L354
+ // TODO: do we need that? It is optional and disabled by default in py version.
+ return `
+SELECT relkind,
+ COUNT(1),
+ SUM(relpages) * current_setting('block_size')::NUMERIC AS size
+FROM pg_class
+GROUP BY relkind;
+`
+}
+
+func queryAutovacuumWorkers() string {
+ // https://github.com/postgres/postgres/blob/9e4f914b5eba3f49ab99bdecdc4f96fac099571f/src/backend/postmaster/autovacuum.c#L3168-L3183
+ return `
+SELECT count(*) FILTER (
+ WHERE
+ query LIKE 'autovacuum: ANALYZE%%'
+ AND query NOT LIKE '%%to prevent wraparound%%'
+ ) AS autovacuum_analyze,
+ count(*) FILTER (
+ WHERE
+ query LIKE 'autovacuum: VACUUM ANALYZE%%'
+ AND query NOT LIKE '%%to prevent wraparound%%'
+ ) AS autovacuum_vacuum_analyze,
+ count(*) FILTER (
+ WHERE
+ query LIKE 'autovacuum: VACUUM %.%%'
+ AND query NOT LIKE '%%to prevent wraparound%%'
+ ) AS autovacuum_vacuum,
+ count(*) FILTER (
+ WHERE
+ query LIKE '%%to prevent wraparound%%'
+ ) AS autovacuum_vacuum_freeze,
+ count(*) FILTER (
+ WHERE
+ query LIKE 'autovacuum: BRIN summarize%%'
+ ) AS autovacuum_brin_summarize
+FROM pg_stat_activity
+WHERE query NOT LIKE '%%pg_stat_activity%%';
+`
+}
+
+func queryXactQueryRunningTime() string {
+ return `
+SELECT datname,
+ state,
+ EXTRACT(epoch from now() - xact_start) as xact_running_time,
+ EXTRACT(epoch from now() - query_start) as query_running_time
+FROM pg_stat_activity
+WHERE datname IS NOT NULL
+ AND state IN
+ (
+ 'active',
+ 'idle in transaction',
+ 'idle in transaction (aborted)'
+ )
+ AND backend_type = 'client backend';
+`
+}
+
+func queryReplicationStandbyAppDelta(version int) string {
+ if version < pgVersion10 {
+ return `
+SELECT application_name,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ sent_location) AS sent_delta,
+ pg_xlog_location_diff(
+ sent_location, write_location) AS write_delta,
+ pg_xlog_location_diff(
+ write_location, flush_location) AS flush_delta,
+ pg_xlog_location_diff(
+ flush_location, replay_location) AS replay_delta
+FROM pg_stat_replication psr
+WHERE application_name IS NOT NULL;
+`
+ }
+ return `
+SELECT application_name,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ sent_lsn) AS sent_delta,
+ pg_wal_lsn_diff(
+ sent_lsn, write_lsn) AS write_delta,
+ pg_wal_lsn_diff(
+ write_lsn, flush_lsn) AS flush_delta,
+ pg_wal_lsn_diff(
+ flush_lsn, replay_lsn) AS replay_delta
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL;
+`
+}
+
+func queryReplicationStandbyAppLag() string {
+ return `
+SELECT application_name,
+ COALESCE(EXTRACT(EPOCH FROM write_lag)::bigint, 0) AS write_lag,
+ COALESCE(EXTRACT(EPOCH FROM flush_lag)::bigint, 0) AS flush_lag,
+ COALESCE(EXTRACT(EPOCH FROM replay_lag)::bigint, 0) AS replay_lag
+FROM pg_stat_replication psr
+WHERE application_name IS NOT NULL;
+`
+}
+
+func queryReplicationSlotFiles(version int) string {
+ if version < pgVersion11 {
+ return `
+WITH wal_size AS (
+ SELECT
+ current_setting('wal_block_size')::INT * setting::INT AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ CASE WHEN pg_is_in_recovery()
+ THEN (
+ pg_wal_lsn_diff(pg_last_wal_receive_lsn(), slot.restart_lsn)
+ -- this is needed to account for whole WAL retention and
+ -- not only size retention
+ + (pg_wal_lsn_diff(restart_lsn, '0/0') % s.val)
+ ) / s.val
+ ELSE (
+ pg_wal_lsn_diff(pg_current_wal_lsn(), slot.restart_lsn)
+ -- this is needed to account for whole WAL retention and
+ -- not only size retention
+ + (pg_walfile_name_offset(restart_lsn)).file_offset
+ ) / s.val
+ END
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+`
+ }
+
+ return `
+WITH wal_size AS (
+ SELECT
+ setting::int AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ CASE WHEN pg_is_in_recovery()
+ THEN (
+ pg_wal_lsn_diff(pg_last_wal_receive_lsn(), slot.restart_lsn)
+ -- this is needed to account for whole WAL retention and
+ -- not only size retention
+ + (pg_wal_lsn_diff(restart_lsn, '0/0') % s.val)
+ ) / s.val
+ ELSE (
+ pg_wal_lsn_diff(pg_current_wal_lsn(), slot.restart_lsn)
+ -- this is needed to account for whole WAL retention and
+ -- not only size retention
+ + (pg_walfile_name_offset(restart_lsn)).file_offset
+ ) / s.val
+ END
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+`
+}
+
+func queryQueryableDatabaseList() string {
+ return `
+SELECT datname
+FROM pg_database
+WHERE datallowconn = true
+ AND datistemplate = false
+ AND datname != current_database()
+ AND has_database_privilege((SELECT CURRENT_USER), datname, 'connect');
+`
+}
+
+func queryDatabaseStats() string {
+ // definition by version: https://pgpedia.info/p/pg_stat_database.html
+ // docs: https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-DATABASE-VIEW
+ // code: https://github.com/postgres/postgres/blob/366283961ac0ed6d89014444c6090f3fd02fce0a/src/backend/catalog/system_views.sql#L1018
+
+ return `
+SELECT stat.datname,
+ numbackends,
+ pg_database.datconnlimit,
+ xact_commit,
+ xact_rollback,
+ blks_read * current_setting('block_size')::numeric AS blks_read_bytes,
+ blks_hit * current_setting('block_size')::numeric AS blks_hit_bytes,
+ tup_returned,
+ tup_fetched,
+ tup_inserted,
+ tup_updated,
+ tup_deleted,
+ conflicts,
+ temp_files,
+ temp_bytes,
+ deadlocks
+FROM pg_stat_database stat
+ INNER JOIN
+ pg_database
+ ON pg_database.datname = stat.datname
+WHERE pg_database.datistemplate = false;
+`
+}
+
+func queryDatabaseSize(version int) string {
+ if version < pgVersion10 {
+ return `
+SELECT datname,
+ pg_database_size(datname) AS size
+FROM pg_database
+WHERE pg_database.datistemplate = false
+ AND has_database_privilege((SELECT CURRENT_USER), pg_database.datname, 'connect');
+`
+ }
+ return `
+SELECT datname,
+ pg_database_size(datname) AS size
+FROM pg_database
+WHERE pg_database.datistemplate = false
+ AND (has_database_privilege((SELECT CURRENT_USER), datname, 'connect')
+ OR pg_has_role((SELECT CURRENT_USER), 'pg_read_all_stats', 'MEMBER'));
+`
+}
+
+func queryDatabaseConflicts() string {
+ // definition by version: https://pgpedia.info/p/pg_stat_database_conflicts.html
+ // docs: https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-DATABASE-CONFLICTS-VIEW
+ // code: https://github.com/postgres/postgres/blob/366283961ac0ed6d89014444c6090f3fd02fce0a/src/backend/catalog/system_views.sql#L1058
+
+ return `
+SELECT stat.datname,
+ confl_tablespace,
+ confl_lock,
+ confl_snapshot,
+ confl_bufferpin,
+ confl_deadlock
+FROM pg_stat_database_conflicts stat
+ INNER JOIN
+ pg_database
+ ON pg_database.datname = stat.datname
+WHERE pg_database.datistemplate = false;
+`
+}
+
+func queryDatabaseLocks() string {
+ // definition by version: https://pgpedia.info/p/pg_locks.html
+ // docs: https://www.postgresql.org/docs/current/view-pg-locks.html
+
+ return `
+SELECT pg_database.datname,
+ mode,
+ granted,
+ count(mode) AS locks_count
+FROM pg_locks
+ INNER JOIN
+ pg_database
+ ON pg_database.oid = pg_locks.database
+WHERE pg_database.datistemplate = false
+GROUP BY datname,
+ mode,
+ granted
+ORDER BY datname,
+ mode;
+`
+}
+
+func queryUserTablesCount() string {
+ return "SELECT count(*) from pg_stat_user_tables;"
+}
+
+func queryStatUserTables() string {
+ return `
+SELECT current_database() as datname,
+ schemaname,
+ relname,
+ inh.parent_relname,
+ seq_scan,
+ seq_tup_read,
+ idx_scan,
+ idx_tup_fetch,
+ n_tup_ins,
+ n_tup_upd,
+ n_tup_del,
+ n_tup_hot_upd,
+ n_live_tup,
+ n_dead_tup,
+ EXTRACT(epoch from now() - last_vacuum) as last_vacuum,
+ EXTRACT(epoch from now() - last_autovacuum) as last_autovacuum,
+ EXTRACT(epoch from now() - last_analyze) as last_analyze,
+ EXTRACT(epoch from now() - last_autoanalyze) as last_autoanalyze,
+ vacuum_count,
+ autovacuum_count,
+ analyze_count,
+ autoanalyze_count,
+ pg_total_relation_size(quote_ident(schemaname) || '.' || quote_ident(relname)) as total_relation_size
+FROM pg_stat_user_tables
+LEFT JOIN(
+ SELECT
+ c.oid AS child_oid,
+ p.relname AS parent_relname
+ FROM
+ pg_inherits
+ JOIN pg_class AS c ON (inhrelid = c.oid)
+ JOIN pg_class AS p ON (inhparent = p.oid)
+ ) AS inh ON inh.child_oid = relid
+WHERE has_schema_privilege(schemaname, 'USAGE');
+`
+}
+
+func queryStatIOUserTables() string {
+ return `
+SELECT current_database() AS datname,
+ schemaname,
+ relname,
+ inh.parent_relname,
+ heap_blks_read * current_setting('block_size')::numeric AS heap_blks_read_bytes,
+ heap_blks_hit * current_setting('block_size')::numeric AS heap_blks_hit_bytes,
+ idx_blks_read * current_setting('block_size')::numeric AS idx_blks_read_bytes,
+ idx_blks_hit * current_setting('block_size')::numeric AS idx_blks_hit_bytes,
+ toast_blks_read * current_setting('block_size')::numeric AS toast_blks_read_bytes,
+ toast_blks_hit * current_setting('block_size')::numeric AS toast_blks_hit_bytes,
+ tidx_blks_read * current_setting('block_size')::numeric AS tidx_blks_read_bytes,
+ tidx_blks_hit * current_setting('block_size')::numeric AS tidx_blks_hit_bytes
+FROM pg_statio_user_tables
+LEFT JOIN(
+ SELECT
+ c.oid AS child_oid,
+ p.relname AS parent_relname
+ FROM
+ pg_inherits
+ JOIN pg_class AS c ON (inhrelid = c.oid)
+ JOIN pg_class AS p ON (inhparent = p.oid)
+ ) AS inh ON inh.child_oid = relid
+WHERE has_schema_privilege(schemaname, 'USAGE');
+`
+}
+
+func queryUserIndexesCount() string {
+ return "SELECT count(*) from pg_stat_user_indexes;"
+}
+
+func queryStatUserIndexes() string {
+ return `
+SELECT current_database() as datname,
+ schemaname,
+ relname,
+ indexrelname,
+ inh.parent_relname,
+ idx_scan,
+ idx_tup_read,
+ idx_tup_fetch,
+ pg_relation_size(quote_ident(schemaname) || '.' || quote_ident(indexrelname)::text) as size
+FROM pg_stat_user_indexes
+LEFT JOIN(
+ SELECT
+ c.oid AS child_oid,
+ p.relname AS parent_relname
+ FROM
+ pg_inherits
+ JOIN pg_class AS c ON (inhrelid = c.oid)
+ JOIN pg_class AS p ON (inhparent = p.oid)
+ ) AS inh ON inh.child_oid = relid
+WHERE has_schema_privilege(schemaname, 'USAGE');
+`
+}
+
+// The following query for bloat was taken from the venerable check_postgres
+// script (https://bucardo.org/check_postgres/), which is:
+//
+// Copyright (c) 2007-2017 Greg Sabino Mullane
+//------------------------------------------------------------------------------
+
+func queryBloat() string {
+ return `
+SELECT
+ current_database() AS db, schemaname, tablename, reltuples::bigint AS tups, relpages::bigint AS pages, otta,
+ ROUND(CASE WHEN otta=0 OR sml.relpages=0 OR sml.relpages=otta THEN 0.0 ELSE sml.relpages/otta::numeric END,1) AS tbloat,
+ CASE WHEN relpages < otta THEN 0 ELSE relpages::bigint - otta END AS wastedpages,
+ CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::bigint END AS wastedbytes,
+ CASE WHEN relpages < otta THEN '0 bytes'::text ELSE (bs*(relpages-otta))::bigint::text || ' bytes' END AS wastedsize,
+ iname, ituples::bigint AS itups, ipages::bigint AS ipages, iotta,
+ ROUND(CASE WHEN iotta=0 OR ipages=0 OR ipages=iotta THEN 0.0 ELSE ipages/iotta::numeric END,1) AS ibloat,
+ CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wastedipages,
+ CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes,
+ CASE WHEN ipages < iotta THEN '0 bytes' ELSE (bs*(ipages-iotta))::bigint::text || ' bytes' END AS wastedisize,
+ CASE WHEN relpages < otta THEN
+ CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta::bigint) END
+ ELSE CASE WHEN ipages < iotta THEN bs*(relpages-otta::bigint)
+ ELSE bs*(relpages-otta::bigint + ipages-iotta::bigint) END
+ END AS totalwastedbytes
+FROM (
+ SELECT
+ nn.nspname AS schemaname,
+ cc.relname AS tablename,
+ COALESCE(cc.reltuples,0) AS reltuples,
+ COALESCE(cc.relpages,0) AS relpages,
+ COALESCE(bs,0) AS bs,
+ COALESCE(CEIL((cc.reltuples*((datahdr+ma-
+ (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)),0) AS otta,
+ COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages,
+ COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols
+ FROM
+ pg_class cc
+ JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname <> 'information_schema'
+ LEFT JOIN
+ (
+ SELECT
+ ma,bs,foo.nspname,foo.relname,
+ (datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr,
+ (maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
+ FROM (
+ SELECT
+ ns.nspname, tbl.relname, hdr, ma, bs,
+ SUM((1-coalesce(null_frac,0))*coalesce(avg_width, 2048)) AS datawidth,
+ MAX(coalesce(null_frac,0)) AS maxfracsum,
+ hdr+(
+ SELECT 1+count(*)/8
+ FROM pg_stats s2
+ WHERE null_frac<>0 AND s2.schemaname = ns.nspname AND s2.tablename = tbl.relname
+ ) AS nullhdr
+ FROM pg_attribute att
+ JOIN pg_class tbl ON att.attrelid = tbl.oid
+ JOIN pg_namespace ns ON ns.oid = tbl.relnamespace
+ LEFT JOIN pg_stats s ON s.schemaname=ns.nspname
+ AND s.tablename = tbl.relname
+ AND s.inherited=false
+ AND s.attname=att.attname,
+ (
+ SELECT
+ (SELECT current_setting('block_size')::numeric) AS bs,
+ CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#"[0-9]+.[0-9]+#"%' for '#')
+ IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr,
+ CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS ma
+ FROM (SELECT version() AS v) AS foo
+ ) AS constants
+ WHERE att.attnum > 0 AND tbl.relkind='r'
+ GROUP BY 1,2,3,4,5
+ ) AS foo
+ ) AS rs
+ ON cc.relname = rs.relname AND nn.nspname = rs.nspname
+ LEFT JOIN pg_index i ON indrelid = cc.oid
+ LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid
+) AS sml
+WHERE sml.relpages - otta > 10 OR ipages - iotta > 10;
+`
+}
+
+func queryColumnsStats() string {
+ return `
+SELECT current_database() AS datname,
+ nspname AS schemaname,
+ relname,
+ st.attname,
+ typname,
+ (st.null_frac * 100)::int AS null_percent,
+ case
+ when st.n_distinct >= 0
+ then st.n_distinct
+ else
+ abs(st.n_distinct) * reltuples
+ end AS "distinct"
+FROM pg_class c
+ JOIN
+ pg_namespace ns
+ ON
+ (ns.oid = relnamespace)
+ JOIN
+ pg_attribute at
+ ON
+ (c.oid = attrelid)
+ JOIN
+ pg_type t
+ ON
+ (t.oid = atttypid)
+ JOIN
+ pg_stats st
+ ON
+ (st.tablename = relname AND st.attname = at.attname)
+WHERE relkind = 'r'
+ AND nspname NOT LIKE E'pg\\_%'
+ AND nspname != 'information_schema'
+ AND NOT attisdropped
+ AND attstattarget != 0
+ AND reltuples >= 100
+ORDER BY nspname,
+ relname,
+ st.attname;
+`
+}
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/config.json b/src/go/plugin/go.d/modules/postgres/testdata/config.json
new file mode 100644
index 000000000..6b39278c5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/config.json
@@ -0,0 +1,14 @@
+{
+ "update_every": 123,
+ "dsn": "ok",
+ "timeout": 123.123,
+ "collect_databases_matching": "ok",
+ "transaction_time_histogram": [
+ 123.123
+ ],
+ "query_time_histogram": [
+ 123.123
+ ],
+ "max_db_tables": 123,
+ "max_db_indexes": 123
+}
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/config.yaml b/src/go/plugin/go.d/modules/postgres/testdata/config.yaml
new file mode 100644
index 000000000..36ff5f0b1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/config.yaml
@@ -0,0 +1,10 @@
+update_every: 123
+dsn: "ok"
+timeout: 123.123
+collect_databases_matching: "ok"
+transaction_time_histogram:
+ - 123.123
+query_time_histogram:
+ - 123.123
+max_db_tables: 123
+max_db_indexes: 123
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/autovacuum_workers.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/autovacuum_workers.txt
new file mode 100644
index 000000000..7adc787bc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/autovacuum_workers.txt
@@ -0,0 +1,3 @@
+ autovacuum_analyze | autovacuum_vacuum_analyze | autovacuum_vacuum | autovacuum_vacuum_freeze | autovacuum_brin_summarize
+--------------------+---------------------------+-------------------+--------------------------+---------------------------
+ 0 | 0 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/bloat_tables.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/bloat_tables.txt
new file mode 100644
index 000000000..307695363
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/bloat_tables.txt
@@ -0,0 +1,12 @@
+ db | schemaname | tablename | tups | pages | otta | tbloat | wastedpages | wastedbytes | wastedsize | iname | itups | ipages | iotta | ibloat | wastedipages | wastedibytes | wastedisize | totalwastedbytes
+----------+------------+---------------------------------+---------+-------+-------+--------+-------------+-------------+---------------+---------------------------+---------+--------+-------+--------+--------------+--------------+--------------+------------------
+ postgres | pg_catalog | pg_proc_oid_index | 3202 | 11 | 0 | 0.0 | 11 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_proc_proname_args_nsp_index | 3202 | 32 | 0 | 0.0 | 32 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_attribute_relid_attnam_index | 2971 | 15 | 0 | 0.0 | 15 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_description_o_c_o_index | 5078 | 27 | 0 | 0.0 | 27 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_depend_depender_index | 8814 | 43 | 0 | 0.0 | 43 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_depend_reference_index | 8814 | 53 | 0 | 0.0 | 53 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_depend | 8814 | 65 | 65 | 0.0 | 0 | 0 | 0 bytes | pg_depend_reference_index | 8814 | 53 | 40 | 1.3 | 13 | 106496 | 106496 bytes | 106496
+ postgres | pg_toast | pg_toast_2618 | 283 | 63 | 0 | 0.0 | 63 | 0 | 0 bytes | pg_toast_2618_index | 0 | 1 | 0 | 0.0 | 1 | 0 | 0 bytes | 0
+ postgres | public | pgbench_accounts | 5000000 | 81968 | 80764 | 1.0 | 1204 | 9863168 | 9863168 bytes | pgbench_accounts_pkey | 5000000 | 13713 | 66692 | 0.2 | 0 | 0 | 0 bytes | 9863168
+ postgres | public | pgbench_accounts_pkey | 5000000 | 13713 | 0 | 0.0 | 13713 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/catalog_relations.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/catalog_relations.txt
new file mode 100644
index 000000000..cd05e89af
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/catalog_relations.txt
@@ -0,0 +1,6 @@
+ relkind | count | size
+---------+-------+---------
+ r | 66 | 3424256
+ v | 137 | 0
+ i | 155 | 3678208
+ t | 38 | 548864 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/checkpoints.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/checkpoints.txt
new file mode 100644
index 000000000..851ff1320
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/checkpoints.txt
@@ -0,0 +1,3 @@
+ checkpoints_timed | checkpoints_req | checkpoint_write_time | checkpoint_sync_time | buffers_checkpoint_bytes | buffers_clean_bytes | maxwritten_clean | buffers_backend_bytes | buffers_backend_fsync | buffers_alloc_bytes
+-------------------+-----------------+-----------------------+----------------------+--------------------------+---------------------+------------------+-----------------------+-----------------------+--------------------
+ 1814 | 16 | 167 | 47 | 32768 | 0 | 0 | 0 | 0 | 27295744 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_conflicts.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_conflicts.txt
new file mode 100644
index 000000000..34229182a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_conflicts.txt
@@ -0,0 +1,4 @@
+ datname | confl_tablespace | confl_lock | confl_snapshot | confl_bufferpin | confl_deadlock
+------------+------------------+------------+----------------+-----------------+----------------
+ postgres | 0 | 0 | 0 | 0 | 0
+ production | 0 | 0 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_locks.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_locks.txt
new file mode 100644
index 000000000..8d92f314d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_locks.txt
@@ -0,0 +1,7 @@
+ datname | mode | granted | locks_count
+------------+--------------------------+---------+-------------
+ postgres | AccessShareLock | t | 99
+ postgres | RowShareLock | t | 99
+ postgres | RowExclusiveLock | t | 99
+ production | ShareUpdateExclusiveLock | t | 99
+ production | ShareLock | f | 99 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_size.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_size.txt
new file mode 100644
index 000000000..367cb6f20
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_size.txt
@@ -0,0 +1,4 @@
+ datname | size
+------------+--------
+ postgres | 8758051
+ production | 8602115 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_stats.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_stats.txt
new file mode 100644
index 000000000..d3ce24c6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/database_stats.txt
@@ -0,0 +1,4 @@
+datname | numbackends | datconnlimit | xact_commit | xact_rollback | blks_read_bytes | blks_hit_bytes | tup_returned | tup_fetched | tup_inserted | tup_updated | tup_deleted | conflicts | temp_files | temp_bytes | deadlocks
+------------+-------------+--------------+-------------+---------------+-----------------+----------------+--------------+-------------+--------------+-------------+-------------+-----------+------------+------------+-----------
+postgres | 3 | 30 | 1438660 | 70 | 3252 | 1221125 | 13207245 | 359833 | 0 | 0 | 0 | 0 | 0 | 0 | 0
+production | 1 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/is_super_user-false.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/is_super_user-false.txt
new file mode 100644
index 000000000..6cb2222d3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/is_super_user-false.txt
@@ -0,0 +1,3 @@
+ is_superuser
+--------------
+ f \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/is_super_user-true.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/is_super_user-true.txt
new file mode 100644
index 000000000..84cd8088e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/is_super_user-true.txt
@@ -0,0 +1,3 @@
+ is_superuser
+--------------
+ t \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt
new file mode 100644
index 000000000..b684948e3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt
@@ -0,0 +1,3 @@
+ pg_is_in_recovery
+-------------------
+ t \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/queryable_database_list.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/queryable_database_list.txt
new file mode 100644
index 000000000..b3f2af4f1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/queryable_database_list.txt
@@ -0,0 +1,2 @@
+ datname
+--------- \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_slot_files.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_slot_files.txt
new file mode 100644
index 000000000..59fcd8fe4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_slot_files.txt
@@ -0,0 +1,3 @@
+ slot_name | slot_type | replslot_wal_keep | replslot_files
+-----------+-----------+-------------------+----------------
+ ocean | physical | 0 | 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt
new file mode 100644
index 000000000..98c3cd99e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt
@@ -0,0 +1,5 @@
+ application_name | sent_delta | write_delta | flush_delta | replay_delta
+------------------+------------+-------------+-------------+--------------
+ walreceiver | 1 | 1 | 1 | 1
+ walreceiver | 1 | 1 | 1 | 1
+ phys-standby2 | 0 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt
new file mode 100644
index 000000000..c2e253790
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt
@@ -0,0 +1,5 @@
+ application_name | write_lag | flush_lag | replay_lag
+------------------+-----------+-----------+------------
+ walreceiver | 1 | 1 | 1
+ walreceiver | 1 | 1 | 1
+ phys-standby2 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_connections_state.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_connections_state.txt
new file mode 100644
index 000000000..7387f4dfb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_connections_state.txt
@@ -0,0 +1,8 @@
+ state | count
+-------------------------------+-------
+ active | 1
+ idle | 14
+ idle in transaction | 7
+ idle in transaction (aborted) | 1
+ fastpath function call | 1
+ disabled | 1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_current_connections.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_current_connections.txt
new file mode 100644
index 000000000..065188d97
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_current_connections.txt
@@ -0,0 +1,3 @@
+ sum
+-----
+ 3 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_version_num.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_version_num.txt
new file mode 100644
index 000000000..18d769b32
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/server_version_num.txt
@@ -0,0 +1,3 @@
+ server_version_num
+--------------------
+ 140004 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/settings_max_connections.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/settings_max_connections.txt
new file mode 100644
index 000000000..4d59df214
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/settings_max_connections.txt
@@ -0,0 +1,3 @@
+ current_setting
+-----------------
+ 100 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/settings_max_locks_held.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/settings_max_locks_held.txt
new file mode 100644
index 000000000..e72bd71aa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/settings_max_locks_held.txt
@@ -0,0 +1,3 @@
+ ?column?
+----------
+ 6400 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt
new file mode 100644
index 000000000..db73fa4e6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt
@@ -0,0 +1,11 @@
+ datname | schemaname | relname | indexrelname | idx_scan | idx_tup_read | idx_tup_fetch | size
+----------+------------+------------------+-------------------------+----------+--------------+---------------+-----------
+ postgres | public | pgbench_branches | pgbench_branches_pkey | 0 | 0 | 0 | 16384
+ postgres | public | pgbench_tellers | pgbench_tellers_pkey | 0 | 0 | 0 | 32768
+ postgres | public | pgbench_accounts | pgbench_accounts_pkey | 3 | 5000000 | 0 | 112336896
+ postgres | public | myaccounts | myaccounts_pkey | 0 | 0 | 0 | 8192
+ postgres | public | myaccounts | myaccounts_username_key | 0 | 0 | 0 | 8192
+ postgres | public | myaccounts | myaccounts_email_key | 0 | 0 | 0 | 8192
+ postgres | myschema | myaccounts | myaccounts_pkey | 0 | 0 | 0 | 8192
+ postgres | myschema | myaccounts | myaccounts_username_key | 0 | 0 | 0 | 8192
+ postgres | myschema | myaccounts | myaccounts_email_key | 0 | 0 | 0 | 8192 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt
new file mode 100644
index 000000000..f6f9edb04
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt
@@ -0,0 +1,6 @@
+ datname | schemaname | relname | seq_scan | seq_tup_read | idx_scan | idx_tup_fetch | n_tup_ins | n_tup_upd | n_tup_del | n_tup_hot_upd | n_live_tup | n_dead_tup | last_vacuum | last_autovacuum | last_analyze | last_autoanalyze | vacuum_count | autovacuum_count | analyze_count | autoanalyze_count | total_relation_size
+----------+------------+------------------+----------+--------------+----------+---------------+-----------+-----------+-----------+---------------+------------+------------+---------------+-----------------+---------------+------------------+--------------+------------------+---------------+-------------------+---------------------
+ postgres | public | pgbench_history | 0 | 0 | | | 0 | 0 | 0 | 0 | 0 | 0 | 377149.085671 | | 377149.085536 | | 1 | 0 | 1 | 0 | 0
+ postgres | public | pgbench_accounts | 2 | 5000000 | 99955 | 99955 | 5000000 | 0 | 0 | 0 | 5000048 | 1000048 | 377149.232856 | | 377149.097205 | | 1 | 0 | 1 | 0 | 784031744
+ postgres | public | pgbench_tellers | 1 | 500 | 0 | 0 | 500 | 0 | 0 | 0 | 500 | 0 | 371719.262166 | | 377149.824095 | | 6 | 0 | 1 | 0 | 90112
+ postgres | public | pgbench_branches | 6 | 300 | 0 | 0 | 50 | 0 | 0 | 0 | 50 | 0 | 371719.262495 | | 377149.826260 | | 6 | 0 | 1 | 0 | 57344 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt
new file mode 100644
index 000000000..f52b1806b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt
@@ -0,0 +1,6 @@
+ datname | schemaname | relname | heap_blks_read_bytes | heap_blks_hit_bytes | idx_blks_read_bytes | idx_blks_hit_bytes | toast_blks_read_bytes | toast_blks_hit_bytes | tidx_blks_read_bytes | tidx_blks_hit_bytes
+----------+------------+------------------+----------------------+---------------------+---------------------+--------------------+-----------------------+----------------------+----------------------+---------------------
+ postgres | public | pgbench_tellers | 623828992 | 491937792 | 101433344 | 0 | | | |
+ postgres | public | pgbench_history | 0 | 0 | | | | | |
+ postgres | public | pgbench_accounts | 1803882668032 | 224484753408 | 973310976000 | 7138635948032 | | | |
+ postgres | public | pgbench_branches | 507150336 | 304316416 | 101425152 | 101441536 | | | | \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/table_columns_stats.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/table_columns_stats.txt
new file mode 100644
index 000000000..645d847d0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/table_columns_stats.txt
@@ -0,0 +1,10 @@
+ datname | schemaname | relname | attname | typname | null_percent | distinct
+----------+------------+------------------+----------+---------+--------------+----------
+ postgres | public | pgbench_accounts | abalance | int4 | 0 | 1
+ postgres | public | pgbench_accounts | aid | int4 | 0 | 5e+06
+ postgres | public | pgbench_accounts | bid | int4 | 0 | 50
+ postgres | public | pgbench_accounts | filler | bpchar | 0 | 1
+ postgres | public | pgbench_tellers | bid | int4 | 0 | 50
+ postgres | public | pgbench_tellers | filler | bpchar | 100 | 0
+ postgres | public | pgbench_tellers | tbalance | int4 | 0 | 1
+ postgres | public | pgbench_tellers | tid | int4 | 0 | 500 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/txid_wraparound.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/txid_wraparound.txt
new file mode 100644
index 000000000..9e05f12ab
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/txid_wraparound.txt
@@ -0,0 +1,3 @@
+ oldest_current_xid | percent_towards_wraparound | percent_towards_emergency_autovacuum
+--------------------+----------------------------+-----------------------------------
+ 9 | 0 | 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/uptime.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/uptime.txt
new file mode 100644
index 000000000..95464bc3c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/uptime.txt
@@ -0,0 +1,3 @@
+ extract
+---------------
+ 499906.075943 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_archive_files.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_archive_files.txt
new file mode 100644
index 000000000..8b7a86261
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_archive_files.txt
@@ -0,0 +1,3 @@
+ wal_archive_files_ready_count | wal_archive_files_done_count
+-------------------------------+------------------------------
+ 1 | 1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_files.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_files.txt
new file mode 100644
index 000000000..f18aefdcd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_files.txt
@@ -0,0 +1,3 @@
+ wal_recycled_files | wal_written_files
+--------------------+-------------------
+ 0 | 1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_writes.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_writes.txt
new file mode 100644
index 000000000..3bb8f9e95
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/wal_writes.txt
@@ -0,0 +1,3 @@
+ wal_writes
+------------
+ 24103144 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/postgres/testdata/v14.4/xact_query_running_time.txt b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/xact_query_running_time.txt
new file mode 100644
index 000000000..52617f748
--- /dev/null
+++ b/src/go/plugin/go.d/modules/postgres/testdata/v14.4/xact_query_running_time.txt
@@ -0,0 +1,10 @@
+ datname | state | xact_running_time | query_running_time
+----------+---------------------+-------------------+--------------------
+ some_db | idle in transaction | 574.530219 | 574.315061
+ some_db | idle in transaction | 574.867167 | 574.330322
+ postgres | active | 0.000000 | 0.000000
+ some_db | idle in transaction | 574.807256 | 574.377105
+ some_db | idle in transaction | 574.680244 | 574.357246
+ some_db | idle in transaction | 574.800283 | 574.330328
+ some_db | idle in transaction | 574.396730 | 574.290165
+ some_db | idle in transaction | 574.665428 | 574.337164 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/powerdns/README.md b/src/go/plugin/go.d/modules/powerdns/README.md
new file mode 120000
index 000000000..3e5989715
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/README.md
@@ -0,0 +1 @@
+integrations/powerdns_authoritative_server.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/powerdns/authoritativens.go b/src/go/plugin/go.d/modules/powerdns/authoritativens.go
new file mode 100644
index 000000000..b9c02b86f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/authoritativens.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("powerdns", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *AuthoritativeNS {
+ return &AuthoritativeNS{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8081",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type AuthoritativeNS struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (ns *AuthoritativeNS) Configuration() any {
+ return ns.Config
+}
+
+func (ns *AuthoritativeNS) Init() error {
+ err := ns.validateConfig()
+ if err != nil {
+ ns.Errorf("config validation: %v", err)
+ return err
+ }
+
+ client, err := ns.initHTTPClient()
+ if err != nil {
+ ns.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ ns.httpClient = client
+
+ cs, err := ns.initCharts()
+ if err != nil {
+ ns.Errorf("init charts: %v", err)
+ return err
+ }
+ ns.charts = cs
+
+ return nil
+}
+
+func (ns *AuthoritativeNS) Check() error {
+ mx, err := ns.collect()
+ if err != nil {
+ ns.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (ns *AuthoritativeNS) Charts() *module.Charts {
+ return ns.charts
+}
+
+func (ns *AuthoritativeNS) Collect() map[string]int64 {
+ ms, err := ns.collect()
+ if err != nil {
+ ns.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+
+func (ns *AuthoritativeNS) Cleanup() {
+ if ns.httpClient == nil {
+ return
+ }
+ ns.httpClient.CloseIdleConnections()
+}
diff --git a/src/go/plugin/go.d/modules/powerdns/authoritativens_test.go b/src/go/plugin/go.d/modules/powerdns/authoritativens_test.go
new file mode 100644
index 000000000..d506c9778
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/authoritativens_test.go
@@ -0,0 +1,340 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer430statistics, _ = os.ReadFile("testdata/v4.3.0/statistics.json")
+ dataRecursorStatistics, _ = os.ReadFile("testdata/recursor/statistics.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer430statistics": dataVer430statistics,
+ "dataRecursorStatistics": dataRecursorStatistics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestAuthoritativeNS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &AuthoritativeNS{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestAuthoritativeNS_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset URL": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ "fails on invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:38001",
+ },
+ Client: web.Client{
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ns := New()
+ ns.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, ns.Init())
+ } else {
+ assert.NoError(t, ns.Init())
+ }
+ })
+ }
+}
+
+func TestAuthoritativeNS_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (ns *AuthoritativeNS, cleanup func())
+ wantFail bool
+ }{
+ "success on valid response v4.3.0": {
+ prepare: preparePowerDNSAuthoritativeNSV430,
+ },
+ "fails on response from PowerDNS Recursor": {
+ wantFail: true,
+ prepare: preparePowerDNSAuthoritativeNSRecursorData,
+ },
+ "fails on 404 response": {
+ wantFail: true,
+ prepare: preparePowerDNSAuthoritativeNS404,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: preparePowerDNSAuthoritativeNSConnectionRefused,
+ },
+ "fails on response with invalid data": {
+ wantFail: true,
+ prepare: preparePowerDNSAuthoritativeNSInvalidData,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ns, cleanup := test.prepare()
+ defer cleanup()
+ require.NoError(t, ns.Init())
+
+ if test.wantFail {
+ assert.Error(t, ns.Check())
+ } else {
+ assert.NoError(t, ns.Check())
+ }
+ })
+ }
+}
+
+func TestAuthoritativeNS_Charts(t *testing.T) {
+ ns := New()
+ require.NoError(t, ns.Init())
+ assert.NotNil(t, ns.Charts())
+}
+
+func TestAuthoritativeNS_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestAuthoritativeNS_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (p *AuthoritativeNS, cleanup func())
+ wantCollected map[string]int64
+ }{
+ "success on valid response v4.3.0": {
+ prepare: preparePowerDNSAuthoritativeNSV430,
+ wantCollected: map[string]int64{
+ "corrupt-packets": 1,
+ "cpu-iowait": 513,
+ "cpu-steal": 1,
+ "deferred-cache-inserts": 1,
+ "deferred-cache-lookup": 1,
+ "deferred-packetcache-inserts": 1,
+ "deferred-packetcache-lookup": 1,
+ "dnsupdate-answers": 1,
+ "dnsupdate-changes": 1,
+ "dnsupdate-queries": 1,
+ "dnsupdate-refused": 1,
+ "fd-usage": 23,
+ "incoming-notifications": 1,
+ "key-cache-size": 1,
+ "latency": 1,
+ "meta-cache-size": 1,
+ "open-tcp-connections": 1,
+ "overload-drops": 1,
+ "packetcache-hit": 1,
+ "packetcache-miss": 1,
+ "packetcache-size": 1,
+ "qsize-q": 1,
+ "query-cache-hit": 1,
+ "query-cache-miss": 1,
+ "query-cache-size": 1,
+ "rd-queries": 1,
+ "real-memory-usage": 164507648,
+ "recursing-answers": 1,
+ "recursing-questions": 1,
+ "recursion-unanswered": 1,
+ "ring-logmessages-capacity": 10000,
+ "ring-logmessages-size": 10,
+ "ring-noerror-queries-capacity": 10000,
+ "ring-noerror-queries-size": 1,
+ "ring-nxdomain-queries-capacity": 10000,
+ "ring-nxdomain-queries-size": 1,
+ "ring-queries-capacity": 10000,
+ "ring-queries-size": 1,
+ "ring-remotes-capacity": 10000,
+ "ring-remotes-corrupt-capacity": 10000,
+ "ring-remotes-corrupt-size": 1,
+ "ring-remotes-size": 1,
+ "ring-remotes-unauth-capacity": 10000,
+ "ring-remotes-unauth-size": 1,
+ "ring-servfail-queries-capacity": 10000,
+ "ring-servfail-queries-size": 1,
+ "ring-unauth-queries-capacity": 10000,
+ "ring-unauth-queries-size": 1,
+ "security-status": 1,
+ "servfail-packets": 1,
+ "signature-cache-size": 1,
+ "signatures": 1,
+ "sys-msec": 128,
+ "tcp-answers": 1,
+ "tcp-answers-bytes": 1,
+ "tcp-queries": 1,
+ "tcp4-answers": 1,
+ "tcp4-answers-bytes": 1,
+ "tcp4-queries": 1,
+ "tcp6-answers": 1,
+ "tcp6-answers-bytes": 1,
+ "tcp6-queries": 1,
+ "timedout-packets": 1,
+ "udp-answers": 1,
+ "udp-answers-bytes": 1,
+ "udp-do-queries": 1,
+ "udp-in-errors": 1,
+ "udp-noport-errors": 1,
+ "udp-queries": 1,
+ "udp-recvbuf-errors": 1,
+ "udp-sndbuf-errors": 1,
+ "udp4-answers": 1,
+ "udp4-answers-bytes": 1,
+ "udp4-queries": 1,
+ "udp6-answers": 1,
+ "udp6-answers-bytes": 1,
+ "udp6-queries": 1,
+ "uptime": 207,
+ "user-msec": 56,
+ },
+ },
+ "fails on response from PowerDNS Recursor": {
+ prepare: preparePowerDNSAuthoritativeNSRecursorData,
+ },
+ "fails on 404 response": {
+ prepare: preparePowerDNSAuthoritativeNS404,
+ },
+ "fails on connection refused": {
+ prepare: preparePowerDNSAuthoritativeNSConnectionRefused,
+ },
+ "fails on response with invalid data": {
+ prepare: preparePowerDNSAuthoritativeNSInvalidData,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ ns, cleanup := test.prepare()
+ defer cleanup()
+ require.NoError(t, ns.Init())
+
+ collected := ns.Collect()
+
+ assert.Equal(t, test.wantCollected, collected)
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, ns, collected)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, ns *AuthoritativeNS, collected map[string]int64) {
+ for _, chart := range *ns.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
+
+func preparePowerDNSAuthoritativeNSV430() (*AuthoritativeNS, func()) {
+ srv := preparePowerDNSAuthoritativeNSEndpoint()
+ ns := New()
+ ns.URL = srv.URL
+
+ return ns, srv.Close
+}
+
+func preparePowerDNSAuthoritativeNSRecursorData() (*AuthoritativeNS, func()) {
+ srv := preparePowerDNSRecursorEndpoint()
+ ns := New()
+ ns.URL = srv.URL
+
+ return ns, srv.Close
+}
+
+func preparePowerDNSAuthoritativeNSInvalidData() (*AuthoritativeNS, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ ns := New()
+ ns.URL = srv.URL
+
+ return ns, srv.Close
+}
+
+func preparePowerDNSAuthoritativeNS404() (*AuthoritativeNS, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ ns := New()
+ ns.URL = srv.URL
+
+ return ns, srv.Close
+}
+
+func preparePowerDNSAuthoritativeNSConnectionRefused() (*AuthoritativeNS, func()) {
+ ns := New()
+ ns.URL = "http://127.0.0.1:38001"
+
+ return ns, func() {}
+}
+
+func preparePowerDNSAuthoritativeNSEndpoint() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathLocalStatistics:
+ _, _ = w.Write(dataVer430statistics)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
+
+func preparePowerDNSRecursorEndpoint() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathLocalStatistics:
+ _, _ = w.Write(dataRecursorStatistics)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
diff --git a/src/go/plugin/go.d/modules/powerdns/charts.go b/src/go/plugin/go.d/modules/powerdns/charts.go
new file mode 100644
index 000000000..331a94a21
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/charts.go
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+var charts = module.Charts{
+ {
+ ID: "questions_in",
+ Title: "Incoming questions",
+ Units: "questions/s",
+ Fam: "questions",
+ Ctx: "powerdns.questions_in",
+ Dims: module.Dims{
+ {ID: "udp-queries", Name: "udp", Algo: module.Incremental},
+ {ID: "tcp-queries", Name: "tcp", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "questions_out",
+ Title: "Outgoing questions",
+ Units: "questions/s",
+ Fam: "questions",
+ Ctx: "powerdns.questions_out",
+ Dims: module.Dims{
+ {ID: "udp-answers", Name: "udp", Algo: module.Incremental},
+ {ID: "tcp-answers", Name: "tcp", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "cache_usage",
+ Title: "Cache Usage",
+ Units: "events/s",
+ Fam: "cache",
+ Ctx: "powerdns.cache_usage",
+ Dims: module.Dims{
+ {ID: "query-cache-hit", Algo: module.Incremental},
+ {ID: "query-cache-miss", Algo: module.Incremental},
+ {ID: "packetcache-hit", Name: "packet-cache-hit", Algo: module.Incremental},
+ {ID: "packetcache-miss", Name: "packet-cache-miss", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "cache_size",
+ Title: "Cache Size",
+ Units: "entries",
+ Fam: "cache",
+ Ctx: "powerdns.cache_size",
+ Dims: module.Dims{
+ {ID: "query-cache-size", Name: "query-cache"},
+ {ID: "packetcache-size", Name: "packet-cache"},
+ {ID: "key-cache-size", Name: "key-cache"},
+ {ID: "meta-cache-size", Name: "meta-cache"},
+ },
+ },
+ {
+ ID: "latency",
+ Title: "Answer latency",
+ Units: "microseconds",
+ Fam: "latency",
+ Ctx: "powerdns.latency",
+ Dims: module.Dims{
+ {ID: "latency"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/powerdns/collect.go b/src/go/plugin/go.d/modules/powerdns/collect.go
new file mode 100644
index 000000000..c2831e0f2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/collect.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ urlPathLocalStatistics = "/api/v1/servers/localhost/statistics"
+)
+
+func (ns *AuthoritativeNS) collect() (map[string]int64, error) {
+ statistics, err := ns.scrapeStatistics()
+ if err != nil {
+ return nil, err
+ }
+
+ collected := make(map[string]int64)
+
+ ns.collectStatistics(collected, statistics)
+
+ if !isPowerDNSAuthoritativeNSMetrics(collected) {
+ return nil, errors.New("returned metrics aren't PowerDNS Authoritative Server metrics")
+ }
+
+ return collected, nil
+}
+
+func isPowerDNSAuthoritativeNSMetrics(collected map[string]int64) bool {
+ // PowerDNS Recursor has same endpoint and returns data in the same format.
+ _, ok1 := collected["over-capacity-drops"]
+ _, ok2 := collected["tcp-questions"]
+ return !ok1 && !ok2
+}
+
+func (ns *AuthoritativeNS) collectStatistics(collected map[string]int64, statistics statisticMetrics) {
+ for _, s := range statistics {
+ // https://doc.powerdns.com/authoritative/http-api/statistics.html#statisticitem
+ if s.Type != "StatisticItem" {
+ continue
+ }
+
+ value, ok := s.Value.(string)
+ if !ok {
+ ns.Debugf("%s value (%v) unexpected type: want=string, got=%T.", s.Name, s.Value, s.Value)
+ continue
+ }
+
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ ns.Debugf("%s value (%v) parse error: %v", s.Name, s.Value, err)
+ continue
+ }
+
+ collected[s.Name] = v
+ }
+}
+
+func (ns *AuthoritativeNS) scrapeStatistics() ([]statisticMetric, error) {
+ req, _ := web.NewHTTPRequestWithPath(ns.Request, urlPathLocalStatistics)
+
+ var statistics statisticMetrics
+ if err := ns.doOKDecode(req, &statistics); err != nil {
+ return nil, err
+ }
+
+ return statistics, nil
+}
+
+func (ns *AuthoritativeNS) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := ns.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/powerdns/config_schema.json b/src/go/plugin/go.d/modules/powerdns/config_schema.json
new file mode 100644
index 000000000..2ec6565c1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "PowerDNS collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the PowerDNS [built-in webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).",
+ "type": "string",
+ "default": "http://127.0.0.1:8081",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/powerdns/init.go b/src/go/plugin/go.d/modules/powerdns/init.go
new file mode 100644
index 000000000..0819459fe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/init.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (ns *AuthoritativeNS) validateConfig() error {
+ if ns.URL == "" {
+ return errors.New("URL not set")
+ }
+ if _, err := web.NewHTTPRequest(ns.Request); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (ns *AuthoritativeNS) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(ns.Client)
+}
+
+func (ns *AuthoritativeNS) initCharts() (*module.Charts, error) {
+ return charts.Copy(), nil
+}
diff --git a/src/go/plugin/go.d/modules/powerdns/integrations/powerdns_authoritative_server.md b/src/go/plugin/go.d/modules/powerdns/integrations/powerdns_authoritative_server.md
new file mode 100644
index 000000000..b4060a613
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/integrations/powerdns_authoritative_server.md
@@ -0,0 +1,258 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/powerdns/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/powerdns/metadata.yaml"
+sidebar_label: "PowerDNS Authoritative Server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# PowerDNS Authoritative Server
+
+
+<img src="https://netdata.cloud/img/powerdns.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: powerdns
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors PowerDNS Authoritative Server instances.
+It collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).
+
+Used endpoints:
+
+- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per PowerDNS Authoritative Server instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| powerdns.questions_in | udp, tcp | questions/s |
+| powerdns.questions_out | udp, tcp | questions/s |
+| powerdns.cache_usage | query-cache-hit, query-cache-miss, packetcache-hit, packetcache-miss | events/s |
+| powerdns.cache_size | query-cache, packet-cache, key-cache, meta-cache | entries |
+| powerdns.latency | latency | microseconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable webserver
+
+Follow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.
+
+
+#### Enable HTTP API
+
+Follow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/powerdns.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/powerdns.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8081 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+
+```
+</details>
+
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+ username: admin
+ password: password
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+
+ - name: remote
+ url: http://203.0.113.0:8081
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `powerdns` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m powerdns
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `powerdns` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep powerdns
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep powerdns /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep powerdns
+```
+
+
diff --git a/src/go/plugin/go.d/modules/powerdns/metadata.yaml b/src/go/plugin/go.d/modules/powerdns/metadata.yaml
new file mode 100644
index 000000000..ea4dec0b5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/metadata.yaml
@@ -0,0 +1,215 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-powerdns
+ plugin_name: go.d.plugin
+ module_name: powerdns
+ monitored_instance:
+ name: PowerDNS Authoritative Server
+ link: https://doc.powerdns.com/authoritative/
+ icon_filename: powerdns.svg
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - powerdns
+ - dns
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors PowerDNS Authoritative Server instances.
+ It collects metrics from [the internal webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver).
+
+ Used endpoints:
+
+ - [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/authoritative/http-api/statistics.html)
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable webserver
+ description: |
+ Follow [webserver](https://doc.powerdns.com/authoritative/http-api/index.html#webserver) documentation.
+ - title: Enable HTTP API
+ description: |
+ Follow [HTTP API](https://doc.powerdns.com/authoritative/http-api/index.html#enabling-the-api) documentation.
+ configuration:
+ file:
+ name: go.d/powerdns.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8081
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+ username: admin
+ password: password
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+
+ - name: remote
+ url: http://203.0.113.0:8081
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: powerdns.questions_in
+ description: Incoming questions
+ unit: questions/s
+ chart_type: line
+ dimensions:
+ - name: udp
+ - name: tcp
+ - name: powerdns.questions_out
+ description: Outgoing questions
+ unit: questions/s
+ chart_type: line
+ dimensions:
+ - name: udp
+ - name: tcp
+ - name: powerdns.cache_usage
+ description: Cache Usage
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: query-cache-hit
+ - name: query-cache-miss
+ - name: packetcache-hit
+ - name: packetcache-miss
+ - name: powerdns.cache_size
+ description: Cache Size
+ unit: entries
+ chart_type: line
+ dimensions:
+ - name: query-cache
+ - name: packet-cache
+ - name: key-cache
+ - name: meta-cache
+ - name: powerdns.latency
+ description: Answer latency
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: latency
diff --git a/src/go/plugin/go.d/modules/powerdns/metrics.go b/src/go/plugin/go.d/modules/powerdns/metrics.go
new file mode 100644
index 000000000..3efa2c980
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/metrics.go
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns
+
+// https://doc.powerdns.com/authoritative/http-api/statistics.html#objects
+type (
+ statisticMetrics []statisticMetric
+ statisticMetric struct {
+ Name string
+ Type string
+ Value interface{}
+ }
+)
diff --git a/src/go/plugin/go.d/modules/powerdns/testdata/config.json b/src/go/plugin/go.d/modules/powerdns/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/powerdns/testdata/config.yaml b/src/go/plugin/go.d/modules/powerdns/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/powerdns/testdata/recursor/statistics.json b/src/go/plugin/go.d/modules/powerdns/testdata/recursor/statistics.json
new file mode 100644
index 000000000..a31477959
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/testdata/recursor/statistics.json
@@ -0,0 +1,587 @@
+[
+ {
+ "name": "all-outqueries",
+ "type": "StatisticItem",
+ "value": "41"
+ },
+ {
+ "name": "answers-slow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "answers0-1",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "answers1-10",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "answers10-100",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "answers100-1000",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth-zone-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth4-answers-slow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth4-answers0-1",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth4-answers1-10",
+ "type": "StatisticItem",
+ "value": "5"
+ },
+ {
+ "name": "auth4-answers10-100",
+ "type": "StatisticItem",
+ "value": "35"
+ },
+ {
+ "name": "auth4-answers100-1000",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers-slow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers0-1",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers1-10",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers10-100",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers100-1000",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "cache-entries",
+ "type": "StatisticItem",
+ "value": "171"
+ },
+ {
+ "name": "cache-hits",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "cache-misses",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "case-mismatches",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "chain-resends",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "client-parse-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "concurrent-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "cpu-msec-thread-0",
+ "type": "StatisticItem",
+ "value": "439"
+ },
+ {
+ "name": "cpu-msec-thread-1",
+ "type": "StatisticItem",
+ "value": "445"
+ },
+ {
+ "name": "cpu-msec-thread-2",
+ "type": "StatisticItem",
+ "value": "466"
+ },
+ {
+ "name": "dlg-only-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-authentic-data-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-check-disabled-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-bogus",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-indeterminate",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-insecure",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-nta",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-secure",
+ "type": "StatisticItem",
+ "value": "5"
+ },
+ {
+ "name": "dnssec-validations",
+ "type": "StatisticItem",
+ "value": "5"
+ },
+ {
+ "name": "dont-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ecs-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ecs-responses",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "edns-ping-matches",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "edns-ping-mismatches",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "empty-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "failed-host-entries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "fd-usage",
+ "type": "StatisticItem",
+ "value": "32"
+ },
+ {
+ "name": "ignored-packets",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ipv6-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ipv6-questions",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "malloc-bytes",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "max-cache-entries",
+ "type": "StatisticItem",
+ "value": "1000000"
+ },
+ {
+ "name": "max-mthread-stack",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "max-packetcache-entries",
+ "type": "StatisticItem",
+ "value": "500000"
+ },
+ {
+ "name": "negcache-entries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "no-packet-error",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "noedns-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "noerror-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "noping-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "nsset-invalidations",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "nsspeeds-entries",
+ "type": "StatisticItem",
+ "value": "78"
+ },
+ {
+ "name": "nxdomain-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "outgoing-timeouts",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "outgoing4-timeouts",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "outgoing6-timeouts",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "over-capacity-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "packetcache-entries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "packetcache-hits",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "packetcache-misses",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-custom",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-drop",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-noaction",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-nodata",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-nxdomain",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-truncate",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "qa-latency",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "qname-min-fallback-success",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "query-pipe-full-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "questions",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "real-memory-usage",
+ "type": "StatisticItem",
+ "value": "44773376"
+ },
+ {
+ "name": "rebalanced-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "resource-limits",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "security-status",
+ "type": "StatisticItem",
+ "value": "3"
+ },
+ {
+ "name": "server-parse-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "servfail-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "spoof-prevents",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "sys-msec",
+ "type": "StatisticItem",
+ "value": "1520"
+ },
+ {
+ "name": "tcp-client-overflow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp-clients",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp-questions",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "throttle-entries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "throttled-out",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "throttled-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "too-old-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "truncated-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-in-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-noport-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-recvbuf-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-sndbuf-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "unauthorized-tcp",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "unauthorized-udp",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "unexpected-packets",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "unreachables",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "uptime",
+ "type": "StatisticItem",
+ "value": "1624"
+ },
+ {
+ "name": "user-msec",
+ "type": "StatisticItem",
+ "value": "465"
+ },
+ {
+ "name": "variable-responses",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-our-latency",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime-slow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime0-1",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime1-2",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime16-32",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime2-4",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime4-8",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime8-16",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "response-by-qtype",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "response-sizes",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "response-by-rcode",
+ "type": "MapStatisticItem",
+ "value": []
+ }
+]
diff --git a/src/go/plugin/go.d/modules/powerdns/testdata/v4.3.0/statistics.json b/src/go/plugin/go.d/modules/powerdns/testdata/v4.3.0/statistics.json
new file mode 100644
index 000000000..30813d3d8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns/testdata/v4.3.0/statistics.json
@@ -0,0 +1,507 @@
+[
+ {
+ "name": "corrupt-packets",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "cpu-iowait",
+ "type": "StatisticItem",
+ "value": "513"
+ },
+ {
+ "name": "cpu-steal",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "deferred-cache-inserts",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "deferred-cache-lookup",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "deferred-packetcache-inserts",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "deferred-packetcache-lookup",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnsupdate-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnsupdate-changes",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnsupdate-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnsupdate-refused",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "fd-usage",
+ "type": "StatisticItem",
+ "value": "23"
+ },
+ {
+ "name": "incoming-notifications",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "key-cache-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "latency",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "meta-cache-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "open-tcp-connections",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "overload-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "packetcache-hit",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "packetcache-miss",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "packetcache-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "qsize-q",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "query-cache-hit",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "query-cache-miss",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "query-cache-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "rd-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "real-memory-usage",
+ "type": "StatisticItem",
+ "value": "164507648"
+ },
+ {
+ "name": "recursing-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "recursing-questions",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "recursion-unanswered",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ring-logmessages-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-logmessages-size",
+ "type": "StatisticItem",
+ "value": "10"
+ },
+ {
+ "name": "ring-noerror-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-noerror-queries-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ring-nxdomain-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-nxdomain-queries-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ring-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-queries-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ring-remotes-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-remotes-corrupt-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-remotes-corrupt-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ring-remotes-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ring-remotes-unauth-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-remotes-unauth-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ring-servfail-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-servfail-queries-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ring-unauth-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-unauth-queries-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "security-status",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "servfail-packets",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "signature-cache-size",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "signatures",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "sys-msec",
+ "type": "StatisticItem",
+ "value": "128"
+ },
+ {
+ "name": "tcp-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp-answers-bytes",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp4-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp4-answers-bytes",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp4-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp6-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp6-answers-bytes",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp6-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "timedout-packets",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-answers-bytes",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-do-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-in-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-noport-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-recvbuf-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-sndbuf-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp4-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp4-answers-bytes",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp4-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp6-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp6-answers-bytes",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp6-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "uptime",
+ "type": "StatisticItem",
+ "value": "207"
+ },
+ {
+ "name": "user-msec",
+ "type": "StatisticItem",
+ "value": "56"
+ },
+ {
+ "name": "response-by-qtype",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "response-sizes",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "response-by-rcode",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "logmessages",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": [
+ {
+ "name": "[webserver] 088688d6-9976-4e4d-a6aa-2272f8c6f173 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "[webserver] 662e4249-4e9a-42e7-b780-b81929875b8f HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "[webserver] 8c79870a-9a47-4952-9166-02710d146ab3 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "[webserver] dc029119-209f-4101-9e8f-82ab02d857d9 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "[webserver] fa61f546-8607-4771-bc9a-48ddc5a85dc0 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "About to create 3 backend threads for UDP",
+ "value": "1"
+ },
+ {
+ "name": "Creating backend connection for TCP",
+ "value": "1"
+ },
+ {
+ "name": "Done launching threads, ready to distribute questions",
+ "value": "1"
+ },
+ {
+ "name": "Master/slave communicator launching",
+ "value": "1"
+ },
+ {
+ "name": "No master domains need notifications",
+ "value": "1"
+ }
+ ]
+ },
+ {
+ "name": "remotes",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "remotes-corrupt",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "remotes-unauth",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "noerror-queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "nxdomain-queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "servfail-queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "unauth-queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ }
+]
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/README.md b/src/go/plugin/go.d/modules/powerdns_recursor/README.md
new file mode 120000
index 000000000..810e63308
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/README.md
@@ -0,0 +1 @@
+integrations/powerdns_recursor.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/charts.go b/src/go/plugin/go.d/modules/powerdns_recursor/charts.go
new file mode 100644
index 000000000..ea63fd1c3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/charts.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns_recursor
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+var charts = module.Charts{
+ {
+ ID: "questions_in",
+ Title: "Incoming questions",
+ Units: "questions/s",
+ Fam: "questions",
+ Ctx: "powerdns_recursor.questions_in",
+ Dims: module.Dims{
+ {ID: "questions", Name: "total", Algo: module.Incremental},
+ {ID: "tcp-questions", Name: "tcp", Algo: module.Incremental},
+ {ID: "ipv6-questions", Name: "ipv6", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "questions_out",
+ Title: "Outgoing questions",
+ Units: "questions/s",
+ Fam: "questions",
+ Ctx: "powerdns_recursor.questions_out",
+ Dims: module.Dims{
+ {ID: "all-outqueries", Name: "udp", Algo: module.Incremental},
+ {ID: "tcp-outqueries", Name: "tcp", Algo: module.Incremental},
+ {ID: "ipv6-outqueries", Name: "ipv6", Algo: module.Incremental},
+ {ID: "throttled-outqueries", Name: "throttled", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "answer_time",
+ Title: "Queries answered within a time range",
+ Units: "queries/s",
+ Fam: "performance",
+ Ctx: "powerdns_recursor.answer_time",
+ Dims: module.Dims{
+ {ID: "answers0-1", Name: "0-1ms", Algo: module.Incremental},
+ {ID: "answers1-10", Name: "1-10ms", Algo: module.Incremental},
+ {ID: "answers10-100", Name: "10-100ms", Algo: module.Incremental},
+ {ID: "answers100-1000", Name: "100-1000ms", Algo: module.Incremental},
+ {ID: "answers-slow", Name: "slow", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "timeouts",
+ Title: "Timeouts on outgoing UDP queries",
+ Units: "timeouts/s",
+ Fam: "performance",
+ Ctx: "powerdns_recursor.timeouts",
+ Dims: module.Dims{
+ {ID: "outgoing-timeouts", Name: "total", Algo: module.Incremental},
+ {ID: "outgoing4-timeouts", Name: "ipv4", Algo: module.Incremental},
+ {ID: "outgoing6-timeouts", Name: "ipv6", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "drops",
+ Title: "Drops",
+ Units: "drops/s",
+ Fam: "performance",
+ Ctx: "powerdns_recursor.drops",
+ Dims: module.Dims{
+ {ID: "over-capacity-drops", Algo: module.Incremental},
+ {ID: "query-pipe-full-drops", Algo: module.Incremental},
+ {ID: "too-old-drops", Algo: module.Incremental},
+ {ID: "truncated-drops", Algo: module.Incremental},
+ {ID: "empty-queries", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "cache_usage",
+ Title: "Cache Usage",
+ Units: "events/s",
+ Fam: "cache",
+ Ctx: "powerdns_recursor.cache_usage",
+ Dims: module.Dims{
+ {ID: "cache-hits", Algo: module.Incremental},
+ {ID: "cache-misses", Algo: module.Incremental},
+ {ID: "packetcache-hits", Name: "packet-cache-hits", Algo: module.Incremental},
+ {ID: "packetcache-misses", Name: "packet-cache-misses", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "cache_size",
+ Title: "Cache Size",
+ Units: "entries",
+ Fam: "cache",
+ Ctx: "powerdns_recursor.cache_size",
+ Dims: module.Dims{
+ {ID: "cache-entries", Name: "cache"},
+ {ID: "packetcache-entries", Name: "packet-cache"},
+ {ID: "negcache-entries", Name: "negative-cache"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/collect.go b/src/go/plugin/go.d/modules/powerdns_recursor/collect.go
new file mode 100644
index 000000000..784093ccf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/collect.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns_recursor
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ urlPathLocalStatistics = "/api/v1/servers/localhost/statistics"
+)
+
+func (r *Recursor) collect() (map[string]int64, error) {
+ statistics, err := r.scrapeStatistics()
+ if err != nil {
+ return nil, err
+ }
+
+ collected := make(map[string]int64)
+
+ r.collectStatistics(collected, statistics)
+
+ if !isPowerDNSRecursorMetrics(collected) {
+ return nil, errors.New("returned metrics aren't PowerDNS Recursor metrics")
+ }
+
+ return collected, nil
+}
+
+func isPowerDNSRecursorMetrics(collected map[string]int64) bool {
+ // PowerDNS Authoritative Server has same endpoint and returns data in the same format.
+ _, ok1 := collected["over-capacity-drops"]
+ _, ok2 := collected["tcp-questions"]
+ return ok1 && ok2
+}
+
+func (r *Recursor) collectStatistics(collected map[string]int64, statistics statisticMetrics) {
+ for _, s := range statistics {
+ // https://doc.powerdns.com/authoritative/http-api/statistics.html#statisticitem
+ if s.Type != "StatisticItem" {
+ continue
+ }
+
+ value, ok := s.Value.(string)
+ if !ok {
+ r.Debugf("%s value (%v) unexpected type: want=string, got=%T.", s.Name, s.Value, s.Value)
+ continue
+ }
+
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ r.Debugf("%s value (%v) parse error: %v", s.Name, s.Value, err)
+ continue
+ }
+
+ collected[s.Name] = v
+ }
+}
+
+func (r *Recursor) scrapeStatistics() ([]statisticMetric, error) {
+ req, _ := web.NewHTTPRequestWithPath(r.Request, urlPathLocalStatistics)
+
+ var statistics statisticMetrics
+ if err := r.doOKDecode(req, &statistics); err != nil {
+ return nil, err
+ }
+
+ return statistics, nil
+}
+
+func (r *Recursor) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := r.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/config_schema.json b/src/go/plugin/go.d/modules/powerdns_recursor/config_schema.json
new file mode 100644
index 000000000..1b76938ce
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "PowerDNS Recursor collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the PowerDNS Recursor [built-in webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver).",
+ "type": "string",
+ "default": "http://127.0.0.1:8081",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/init.go b/src/go/plugin/go.d/modules/powerdns_recursor/init.go
new file mode 100644
index 000000000..cadc6d2c2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/init.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns_recursor
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (r *Recursor) validateConfig() error {
+ if r.URL == "" {
+ return errors.New("URL not set")
+ }
+ if _, err := web.NewHTTPRequest(r.Request); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (r *Recursor) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(r.Client)
+}
+
+func (r *Recursor) initCharts() (*module.Charts, error) {
+ return charts.Copy(), nil
+}
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/integrations/powerdns_recursor.md b/src/go/plugin/go.d/modules/powerdns_recursor/integrations/powerdns_recursor.md
new file mode 100644
index 000000000..68a3da0a9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/integrations/powerdns_recursor.md
@@ -0,0 +1,261 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/powerdns_recursor/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/powerdns_recursor/metadata.yaml"
+sidebar_label: "PowerDNS Recursor"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# PowerDNS Recursor
+
+
+<img src="https://netdata.cloud/img/powerdns.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: powerdns_recursor
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors PowerDNS Recursor instances.
+
+It collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).
+
+Used endpoints:
+
+- [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per PowerDNS Recursor instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| powerdns_recursor.questions_in | total, tcp, ipv6 | questions/s |
+| powerdns_recursor.questions_out | udp, tcp, ipv6, throttled | questions/s |
+| powerdns_recursor.answer_time | 0-1ms, 1-10ms, 10-100ms, 100-1000ms, slow | queries/s |
+| powerdns_recursor.timeouts | total, ipv4, ipv6 | timeouts/s |
+| powerdns_recursor.drops | over-capacity-drops, query-pipe-full-drops, too-old-drops, truncated-drops, empty-queries | drops/s |
+| powerdns_recursor.cache_usage | cache-hits, cache-misses, packet-cache-hits, packet-cache-misses | events/s |
+| powerdns_recursor.cache_size | cache, packet-cache, negative-cache | entries |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable webserver
+
+Follow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.
+
+
+#### Enable HTTP API
+
+Follow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/powerdns_recursor.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/powerdns_recursor.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8081 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+
+```
+</details>
+
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+ username: admin
+ password: password
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+
+ - name: remote
+ url: http://203.0.113.0:8081
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `powerdns_recursor` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m powerdns_recursor
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `powerdns_recursor` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep powerdns_recursor
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep powerdns_recursor /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep powerdns_recursor
+```
+
+
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/metadata.yaml b/src/go/plugin/go.d/modules/powerdns_recursor/metadata.yaml
new file mode 100644
index 000000000..82cb99127
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/metadata.yaml
@@ -0,0 +1,240 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-powerdns_recursor
+ plugin_name: go.d.plugin
+ module_name: powerdns_recursor
+ monitored_instance:
+ name: PowerDNS Recursor
+ link: https://doc.powerdns.com/recursor/
+ icon_filename: powerdns.svg
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - powerdns
+ - dns
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors PowerDNS Recursor instances.
+
+ It collects metrics from [the internal webserver](https://doc.powerdns.com/recursor/http-api/index.html#built-in-webserver-and-http-api).
+
+ Used endpoints:
+
+ - [`/api/v1/servers/localhost/statistics`](https://doc.powerdns.com/recursor/common/api/endpoint-statistics.html)
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable webserver
+ description: |
+ Follow [webserver](https://doc.powerdns.com/recursor/http-api/index.html#webserver) documentation.
+ - title: Enable HTTP API
+ description: |
+ Follow [HTTP API](https://doc.powerdns.com/recursor/http-api/index.html#enabling-the-api) documentation.
+ configuration:
+ file:
+ name: go.d/powerdns_recursor.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8081
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+ username: admin
+ password: password
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8081
+
+ - name: remote
+ url: http://203.0.113.0:8081
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: powerdns_recursor.questions_in
+ description: Incoming questions
+ unit: questions/s
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: tcp
+ - name: ipv6
+ - name: powerdns_recursor.questions_out
+ description: Outgoing questions
+ unit: questions/s
+ chart_type: line
+ dimensions:
+ - name: udp
+ - name: tcp
+ - name: ipv6
+ - name: throttled
+ - name: powerdns_recursor.answer_time
+ description: Queries answered within a time range
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: 0-1ms
+ - name: 1-10ms
+ - name: 10-100ms
+ - name: 100-1000ms
+ - name: slow
+ - name: powerdns_recursor.timeouts
+ description: Timeouts on outgoing UDP queries
+ unit: timeouts/s
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: ipv4
+ - name: ipv6
+ - name: powerdns_recursor.drops
+ description: Drops
+ unit: drops/s
+ chart_type: line
+ dimensions:
+ - name: over-capacity-drops
+ - name: query-pipe-full-drops
+ - name: too-old-drops
+ - name: truncated-drops
+ - name: empty-queries
+ - name: powerdns_recursor.cache_usage
+ description: Cache Usage
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: cache-hits
+ - name: cache-misses
+ - name: packet-cache-hits
+ - name: packet-cache-misses
+ - name: powerdns_recursor.cache_size
+ description: Cache Size
+ unit: entries
+ chart_type: line
+ dimensions:
+ - name: cache
+ - name: packet-cache
+ - name: negative-cache
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/metrics.go b/src/go/plugin/go.d/modules/powerdns_recursor/metrics.go
new file mode 100644
index 000000000..a7fbd63c1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/metrics.go
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns_recursor
+
+// https://doc.powerdns.com/recursor/metrics.html
+// https://docs.powerdns.com/recursor/performance.html#recursor-caches
+
+// PowerDNS Recursor documentation has no section about statistics objects,
+// fortunately authoritative has.
+// https://doc.powerdns.com/authoritative/http-api/statistics.html#objects
+type (
+ statisticMetrics []statisticMetric
+ statisticMetric struct {
+ Name string
+ Type string
+ Value interface{}
+ }
+)
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/recursor.go b/src/go/plugin/go.d/modules/powerdns_recursor/recursor.go
new file mode 100644
index 000000000..4b9c3e72f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/recursor.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns_recursor
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("powerdns_recursor", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Recursor {
+ return &Recursor{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8081",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Recursor struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (r *Recursor) Configuration() any {
+ return r.Config
+}
+
+func (r *Recursor) Init() error {
+ err := r.validateConfig()
+ if err != nil {
+ r.Errorf("config validation: %v", err)
+ return err
+ }
+
+ client, err := r.initHTTPClient()
+ if err != nil {
+ r.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ r.httpClient = client
+
+ cs, err := r.initCharts()
+ if err != nil {
+ r.Errorf("init charts: %v", err)
+ return err
+ }
+ r.charts = cs
+
+ return nil
+}
+
+func (r *Recursor) Check() error {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (r *Recursor) Charts() *module.Charts {
+ return r.charts
+}
+
+func (r *Recursor) Collect() map[string]int64 {
+ ms, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+
+func (r *Recursor) Cleanup() {
+ if r.httpClient == nil {
+ return
+ }
+ r.httpClient.CloseIdleConnections()
+}
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/recursor_test.go b/src/go/plugin/go.d/modules/powerdns_recursor/recursor_test.go
new file mode 100644
index 000000000..09475e223
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/recursor_test.go
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package powerdns_recursor
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer431statistics, _ = os.ReadFile("testdata/v4.3.1/statistics.json")
+ dataAuthoritativeStatistics, _ = os.ReadFile("testdata/authoritative/statistics.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer431statistics": dataVer431statistics,
+ "dataAuthoritativeStatistics": dataAuthoritativeStatistics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestRecursor_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Recursor{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestRecursor_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset URL": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ "fails on invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:38001",
+ },
+ Client: web.Client{
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ recursor := New()
+ recursor.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, recursor.Init())
+ } else {
+ assert.NoError(t, recursor.Init())
+ }
+ })
+ }
+}
+
+func TestRecursor_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (r *Recursor, cleanup func())
+ wantFail bool
+ }{
+ "success on valid response v4.3.1": {
+ prepare: preparePowerDNSRecursorV431,
+ },
+ "fails on response from PowerDNS Authoritative Server": {
+ wantFail: true,
+ prepare: preparePowerDNSRecursorAuthoritativeData,
+ },
+ "fails on 404 response": {
+ wantFail: true,
+ prepare: preparePowerDNSRecursor404,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: preparePowerDNSRecursorConnectionRefused,
+ },
+ "fails on response with invalid data": {
+ wantFail: true,
+ prepare: preparePowerDNSRecursorInvalidData,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ recursor, cleanup := test.prepare()
+ defer cleanup()
+ require.NoError(t, recursor.Init())
+
+ if test.wantFail {
+ assert.Error(t, recursor.Check())
+ } else {
+ assert.NoError(t, recursor.Check())
+ }
+ })
+ }
+}
+
+func TestRecursor_Charts(t *testing.T) {
+ recursor := New()
+ require.NoError(t, recursor.Init())
+ assert.NotNil(t, recursor.Charts())
+}
+
+func TestRecursor_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestRecursor_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (r *Recursor, cleanup func())
+ wantCollected map[string]int64
+ }{
+ "success on valid response v4.3.1": {
+ prepare: preparePowerDNSRecursorV431,
+ wantCollected: map[string]int64{
+ "all-outqueries": 41,
+ "answers-slow": 1,
+ "answers0-1": 1,
+ "answers1-10": 1,
+ "answers10-100": 1,
+ "answers100-1000": 1,
+ "auth-zone-queries": 1,
+ "auth4-answers-slow": 1,
+ "auth4-answers0-1": 1,
+ "auth4-answers1-10": 5,
+ "auth4-answers10-100": 35,
+ "auth4-answers100-1000": 1,
+ "auth6-answers-slow": 1,
+ "auth6-answers0-1": 1,
+ "auth6-answers1-10": 1,
+ "auth6-answers10-100": 1,
+ "auth6-answers100-1000": 1,
+ "cache-entries": 171,
+ "cache-hits": 1,
+ "cache-misses": 1,
+ "case-mismatches": 1,
+ "chain-resends": 1,
+ "client-parse-errors": 1,
+ "concurrent-queries": 1,
+ "cpu-msec-thread-0": 439,
+ "cpu-msec-thread-1": 445,
+ "cpu-msec-thread-2": 466,
+ "dlg-only-drops": 1,
+ "dnssec-authentic-data-queries": 1,
+ "dnssec-check-disabled-queries": 1,
+ "dnssec-queries": 1,
+ "dnssec-result-bogus": 1,
+ "dnssec-result-indeterminate": 1,
+ "dnssec-result-insecure": 1,
+ "dnssec-result-nta": 1,
+ "dnssec-result-secure": 5,
+ "dnssec-validations": 5,
+ "dont-outqueries": 1,
+ "ecs-queries": 1,
+ "ecs-responses": 1,
+ "edns-ping-matches": 1,
+ "edns-ping-mismatches": 1,
+ "empty-queries": 1,
+ "failed-host-entries": 1,
+ "fd-usage": 32,
+ "ignored-packets": 1,
+ "ipv6-outqueries": 1,
+ "ipv6-questions": 1,
+ "malloc-bytes": 1,
+ "max-cache-entries": 1000000,
+ "max-mthread-stack": 1,
+ "max-packetcache-entries": 500000,
+ "negcache-entries": 1,
+ "no-packet-error": 1,
+ "noedns-outqueries": 1,
+ "noerror-answers": 1,
+ "noping-outqueries": 1,
+ "nsset-invalidations": 1,
+ "nsspeeds-entries": 78,
+ "nxdomain-answers": 1,
+ "outgoing-timeouts": 1,
+ "outgoing4-timeouts": 1,
+ "outgoing6-timeouts": 1,
+ "over-capacity-drops": 1,
+ "packetcache-entries": 1,
+ "packetcache-hits": 1,
+ "packetcache-misses": 1,
+ "policy-drops": 1,
+ "policy-result-custom": 1,
+ "policy-result-drop": 1,
+ "policy-result-noaction": 1,
+ "policy-result-nodata": 1,
+ "policy-result-nxdomain": 1,
+ "policy-result-truncate": 1,
+ "qa-latency": 1,
+ "qname-min-fallback-success": 1,
+ "query-pipe-full-drops": 1,
+ "questions": 1,
+ "real-memory-usage": 44773376,
+ "rebalanced-queries": 1,
+ "resource-limits": 1,
+ "security-status": 3,
+ "server-parse-errors": 1,
+ "servfail-answers": 1,
+ "spoof-prevents": 1,
+ "sys-msec": 1520,
+ "tcp-client-overflow": 1,
+ "tcp-clients": 1,
+ "tcp-outqueries": 1,
+ "tcp-questions": 1,
+ "throttle-entries": 1,
+ "throttled-out": 1,
+ "throttled-outqueries": 1,
+ "too-old-drops": 1,
+ "truncated-drops": 1,
+ "udp-in-errors": 1,
+ "udp-noport-errors": 1,
+ "udp-recvbuf-errors": 1,
+ "udp-sndbuf-errors": 1,
+ "unauthorized-tcp": 1,
+ "unauthorized-udp": 1,
+ "unexpected-packets": 1,
+ "unreachables": 1,
+ "uptime": 1624,
+ "user-msec": 465,
+ "variable-responses": 1,
+ "x-our-latency": 1,
+ "x-ourtime-slow": 1,
+ "x-ourtime0-1": 1,
+ "x-ourtime1-2": 1,
+ "x-ourtime16-32": 1,
+ "x-ourtime2-4": 1,
+ "x-ourtime4-8": 1,
+ "x-ourtime8-16": 1,
+ },
+ },
+ "fails on response from PowerDNS Authoritative Server": {
+ prepare: preparePowerDNSRecursorAuthoritativeData,
+ },
+ "fails on 404 response": {
+ prepare: preparePowerDNSRecursor404,
+ },
+ "fails on connection refused": {
+ prepare: preparePowerDNSRecursorConnectionRefused,
+ },
+ "fails on response with invalid data": {
+ prepare: preparePowerDNSRecursorInvalidData,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ recursor, cleanup := test.prepare()
+ defer cleanup()
+ require.NoError(t, recursor.Init())
+
+ collected := recursor.Collect()
+
+ assert.Equal(t, test.wantCollected, collected)
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, recursor, collected)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, rec *Recursor, collected map[string]int64) {
+ for _, chart := range *rec.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
+
+func preparePowerDNSRecursorV431() (*Recursor, func()) {
+ srv := preparePowerDNSRecursorEndpoint()
+ recursor := New()
+ recursor.URL = srv.URL
+
+ return recursor, srv.Close
+}
+
+func preparePowerDNSRecursorAuthoritativeData() (*Recursor, func()) {
+ srv := preparePowerDNSAuthoritativeEndpoint()
+ recursor := New()
+ recursor.URL = srv.URL
+
+ return recursor, srv.Close
+}
+
+func preparePowerDNSRecursorInvalidData() (*Recursor, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ recursor := New()
+ recursor.URL = srv.URL
+
+ return recursor, srv.Close
+}
+
+func preparePowerDNSRecursor404() (*Recursor, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ recursor := New()
+ recursor.URL = srv.URL
+
+ return recursor, srv.Close
+}
+
+func preparePowerDNSRecursorConnectionRefused() (*Recursor, func()) {
+ recursor := New()
+ recursor.URL = "http://127.0.0.1:38001"
+
+ return recursor, func() {}
+}
+
+func preparePowerDNSRecursorEndpoint() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathLocalStatistics:
+ _, _ = w.Write(dataVer431statistics)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
+
+func preparePowerDNSAuthoritativeEndpoint() *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathLocalStatistics:
+ _, _ = w.Write(dataAuthoritativeStatistics)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+}
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/testdata/authoritative/statistics.json b/src/go/plugin/go.d/modules/powerdns_recursor/testdata/authoritative/statistics.json
new file mode 100644
index 000000000..72bb2f0a2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/testdata/authoritative/statistics.json
@@ -0,0 +1,507 @@
+[
+ {
+ "name": "corrupt-packets",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "cpu-iowait",
+ "type": "StatisticItem",
+ "value": "513"
+ },
+ {
+ "name": "cpu-steal",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "deferred-cache-inserts",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "deferred-cache-lookup",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "deferred-packetcache-inserts",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "deferred-packetcache-lookup",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "dnsupdate-answers",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "dnsupdate-changes",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "dnsupdate-queries",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "dnsupdate-refused",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "fd-usage",
+ "type": "StatisticItem",
+ "value": "23"
+ },
+ {
+ "name": "incoming-notifications",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "key-cache-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "latency",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "meta-cache-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "open-tcp-connections",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "overload-drops",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "packetcache-hit",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "packetcache-miss",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "packetcache-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "qsize-q",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "query-cache-hit",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "query-cache-miss",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "query-cache-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "rd-queries",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "real-memory-usage",
+ "type": "StatisticItem",
+ "value": "164507648"
+ },
+ {
+ "name": "recursing-answers",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "recursing-questions",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "recursion-unanswered",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "ring-logmessages-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-logmessages-size",
+ "type": "StatisticItem",
+ "value": "10"
+ },
+ {
+ "name": "ring-noerror-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-noerror-queries-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "ring-nxdomain-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-nxdomain-queries-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "ring-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-queries-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "ring-remotes-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-remotes-corrupt-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-remotes-corrupt-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "ring-remotes-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "ring-remotes-unauth-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-remotes-unauth-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "ring-servfail-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-servfail-queries-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "ring-unauth-queries-capacity",
+ "type": "StatisticItem",
+ "value": "10000"
+ },
+ {
+ "name": "ring-unauth-queries-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "security-status",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "servfail-packets",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "signature-cache-size",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "signatures",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "sys-msec",
+ "type": "StatisticItem",
+ "value": "128"
+ },
+ {
+ "name": "tcp-answers",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "tcp-answers-bytes",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "tcp-queries",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "tcp4-answers",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "tcp4-answers-bytes",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "tcp4-queries",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "tcp6-answers",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "tcp6-answers-bytes",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "tcp6-queries",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "timedout-packets",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp-answers",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp-answers-bytes",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp-do-queries",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp-in-errors",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp-noport-errors",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp-queries",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp-recvbuf-errors",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp-sndbuf-errors",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp4-answers",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp4-answers-bytes",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp4-queries",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp6-answers",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp6-answers-bytes",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "udp6-queries",
+ "type": "StatisticItem",
+ "value": "0"
+ },
+ {
+ "name": "uptime",
+ "type": "StatisticItem",
+ "value": "207"
+ },
+ {
+ "name": "user-msec",
+ "type": "StatisticItem",
+ "value": "56"
+ },
+ {
+ "name": "response-by-qtype",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "response-sizes",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "response-by-rcode",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "logmessages",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": [
+ {
+ "name": "[webserver] 088688d6-9976-4e4d-a6aa-2272f8c6f173 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "[webserver] 662e4249-4e9a-42e7-b780-b81929875b8f HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "[webserver] 8c79870a-9a47-4952-9166-02710d146ab3 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "[webserver] dc029119-209f-4101-9e8f-82ab02d857d9 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "[webserver] fa61f546-8607-4771-bc9a-48ddc5a85dc0 HTTP Request \"/api/v1/servers/localhost/statistics\": Authentication by API Key failed",
+ "value": "1"
+ },
+ {
+ "name": "About to create 3 backend threads for UDP",
+ "value": "1"
+ },
+ {
+ "name": "Creating backend connection for TCP",
+ "value": "1"
+ },
+ {
+ "name": "Done launching threads, ready to distribute questions",
+ "value": "1"
+ },
+ {
+ "name": "Master/slave communicator launching",
+ "value": "1"
+ },
+ {
+ "name": "No master domains need notifications",
+ "value": "1"
+ }
+ ]
+ },
+ {
+ "name": "remotes",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "remotes-corrupt",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "remotes-unauth",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "noerror-queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "nxdomain-queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "servfail-queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ },
+ {
+ "name": "unauth-queries",
+ "size": "10000",
+ "type": "RingStatisticItem",
+ "value": []
+ }
+]
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/testdata/config.json b/src/go/plugin/go.d/modules/powerdns_recursor/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/testdata/config.yaml b/src/go/plugin/go.d/modules/powerdns_recursor/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/powerdns_recursor/testdata/v4.3.1/statistics.json b/src/go/plugin/go.d/modules/powerdns_recursor/testdata/v4.3.1/statistics.json
new file mode 100644
index 000000000..a31477959
--- /dev/null
+++ b/src/go/plugin/go.d/modules/powerdns_recursor/testdata/v4.3.1/statistics.json
@@ -0,0 +1,587 @@
+[
+ {
+ "name": "all-outqueries",
+ "type": "StatisticItem",
+ "value": "41"
+ },
+ {
+ "name": "answers-slow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "answers0-1",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "answers1-10",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "answers10-100",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "answers100-1000",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth-zone-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth4-answers-slow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth4-answers0-1",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth4-answers1-10",
+ "type": "StatisticItem",
+ "value": "5"
+ },
+ {
+ "name": "auth4-answers10-100",
+ "type": "StatisticItem",
+ "value": "35"
+ },
+ {
+ "name": "auth4-answers100-1000",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers-slow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers0-1",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers1-10",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers10-100",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "auth6-answers100-1000",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "cache-entries",
+ "type": "StatisticItem",
+ "value": "171"
+ },
+ {
+ "name": "cache-hits",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "cache-misses",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "case-mismatches",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "chain-resends",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "client-parse-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "concurrent-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "cpu-msec-thread-0",
+ "type": "StatisticItem",
+ "value": "439"
+ },
+ {
+ "name": "cpu-msec-thread-1",
+ "type": "StatisticItem",
+ "value": "445"
+ },
+ {
+ "name": "cpu-msec-thread-2",
+ "type": "StatisticItem",
+ "value": "466"
+ },
+ {
+ "name": "dlg-only-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-authentic-data-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-check-disabled-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-bogus",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-indeterminate",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-insecure",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-nta",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "dnssec-result-secure",
+ "type": "StatisticItem",
+ "value": "5"
+ },
+ {
+ "name": "dnssec-validations",
+ "type": "StatisticItem",
+ "value": "5"
+ },
+ {
+ "name": "dont-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ecs-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ecs-responses",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "edns-ping-matches",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "edns-ping-mismatches",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "empty-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "failed-host-entries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "fd-usage",
+ "type": "StatisticItem",
+ "value": "32"
+ },
+ {
+ "name": "ignored-packets",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ipv6-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "ipv6-questions",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "malloc-bytes",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "max-cache-entries",
+ "type": "StatisticItem",
+ "value": "1000000"
+ },
+ {
+ "name": "max-mthread-stack",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "max-packetcache-entries",
+ "type": "StatisticItem",
+ "value": "500000"
+ },
+ {
+ "name": "negcache-entries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "no-packet-error",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "noedns-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "noerror-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "noping-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "nsset-invalidations",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "nsspeeds-entries",
+ "type": "StatisticItem",
+ "value": "78"
+ },
+ {
+ "name": "nxdomain-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "outgoing-timeouts",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "outgoing4-timeouts",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "outgoing6-timeouts",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "over-capacity-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "packetcache-entries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "packetcache-hits",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "packetcache-misses",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-custom",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-drop",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-noaction",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-nodata",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-nxdomain",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "policy-result-truncate",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "qa-latency",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "qname-min-fallback-success",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "query-pipe-full-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "questions",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "real-memory-usage",
+ "type": "StatisticItem",
+ "value": "44773376"
+ },
+ {
+ "name": "rebalanced-queries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "resource-limits",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "security-status",
+ "type": "StatisticItem",
+ "value": "3"
+ },
+ {
+ "name": "server-parse-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "servfail-answers",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "spoof-prevents",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "sys-msec",
+ "type": "StatisticItem",
+ "value": "1520"
+ },
+ {
+ "name": "tcp-client-overflow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp-clients",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "tcp-questions",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "throttle-entries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "throttled-out",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "throttled-outqueries",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "too-old-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "truncated-drops",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-in-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-noport-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-recvbuf-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "udp-sndbuf-errors",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "unauthorized-tcp",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "unauthorized-udp",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "unexpected-packets",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "unreachables",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "uptime",
+ "type": "StatisticItem",
+ "value": "1624"
+ },
+ {
+ "name": "user-msec",
+ "type": "StatisticItem",
+ "value": "465"
+ },
+ {
+ "name": "variable-responses",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-our-latency",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime-slow",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime0-1",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime1-2",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime16-32",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime2-4",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime4-8",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "x-ourtime8-16",
+ "type": "StatisticItem",
+ "value": "1"
+ },
+ {
+ "name": "response-by-qtype",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "response-sizes",
+ "type": "MapStatisticItem",
+ "value": []
+ },
+ {
+ "name": "response-by-rcode",
+ "type": "MapStatisticItem",
+ "value": []
+ }
+]
diff --git a/src/go/plugin/go.d/modules/prometheus/README.md b/src/go/plugin/go.d/modules/prometheus/README.md
new file mode 120000
index 000000000..13e59d14d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/README.md
@@ -0,0 +1 @@
+integrations/prometheus_endpoint.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/prometheus/cache.go b/src/go/plugin/go.d/modules/prometheus/cache.go
new file mode 100644
index 000000000..12a4d24f9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/cache.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func newCache() *cache {
+ return &cache{entries: make(map[string]*cacheEntry)}
+}
+
+type (
+ cache struct {
+ entries map[string]*cacheEntry
+ }
+
+ cacheEntry struct {
+ seen bool
+ notSeenTimes int
+ charts []*module.Chart
+ }
+)
+
+func (c *cache) hasP(key string) bool {
+ v, ok := c.entries[key]
+ if !ok {
+ v = &cacheEntry{}
+ c.entries[key] = v
+ }
+ v.seen = true
+ v.notSeenTimes = 0
+
+ return ok
+}
+
+func (c *cache) addChart(key string, chart *module.Chart) {
+ if v, ok := c.entries[key]; ok {
+ v.charts = append(v.charts, chart)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/prometheus/charts.go b/src/go/plugin/go.d/modules/prometheus/charts.go
new file mode 100644
index 000000000..c78f9b1b0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/charts.go
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+const (
+ prioDefault = module.Priority
+ prioGORuntime = prioDefault + 10
+)
+
+func (p *Prometheus) addGaugeChart(id, name, help string, labels labels.Labels) {
+ units := getChartUnits(name)
+
+ cType := module.Line
+ if strings.HasSuffix(units, "bytes") {
+ cType = module.Area
+ }
+
+ chart := &module.Chart{
+ ID: id,
+ Title: getChartTitle(name, help),
+ Units: units,
+ Fam: getChartFamily(name),
+ Ctx: getChartContext(p.application(), name),
+ Type: cType,
+ Priority: getChartPriority(name),
+ Dims: module.Dims{
+ {ID: id, Name: name, Div: precision},
+ },
+ }
+
+ for _, lbl := range labels {
+ chart.Labels = append(chart.Labels,
+ module.Label{
+ Key: lbl.Name,
+ Value: apostropheReplacer.Replace(lbl.Value),
+ },
+ )
+ }
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ return
+ }
+
+ p.cache.addChart(id, chart)
+}
+
+func (p *Prometheus) addCounterChart(id, name, help string, labels labels.Labels) {
+ units := getChartUnits(name)
+
+ switch units {
+ case "seconds", "time":
+ default:
+ units += "/s"
+ }
+
+ cType := module.Line
+ if strings.HasSuffix(units, "bytes/s") {
+ cType = module.Area
+ }
+
+ chart := &module.Chart{
+ ID: id,
+ Title: getChartTitle(name, help),
+ Units: units,
+ Fam: getChartFamily(name),
+ Ctx: getChartContext(p.application(), name),
+ Type: cType,
+ Priority: getChartPriority(name),
+ Dims: module.Dims{
+ {ID: id, Name: name, Algo: module.Incremental, Div: precision},
+ },
+ }
+ for _, lbl := range labels {
+ chart.Labels = append(chart.Labels,
+ module.Label{
+ Key: lbl.Name,
+ Value: apostropheReplacer.Replace(lbl.Value),
+ },
+ )
+ }
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ return
+ }
+
+ p.cache.addChart(id, chart)
+}
+
+func (p *Prometheus) addSummaryCharts(id, name, help string, labels labels.Labels, quantiles []prometheus.Quantile) {
+ units := getChartUnits(name)
+
+ switch units {
+ case "seconds", "time":
+ default:
+ units += "/s"
+ }
+
+ charts := module.Charts{
+ {
+ ID: id,
+ Title: getChartTitle(name, help),
+ Units: units,
+ Fam: getChartFamily(name),
+ Ctx: getChartContext(p.application(), name),
+ Priority: getChartPriority(name),
+ Dims: func() (dims module.Dims) {
+ for _, v := range quantiles {
+ s := formatFloat(v.Quantile())
+ dims = append(dims, &module.Dim{
+ ID: fmt.Sprintf("%s_quantile=%s", id, s),
+ Name: fmt.Sprintf("quantile_%s", s),
+ Div: precision * precision,
+ })
+ }
+ return dims
+ }(),
+ },
+ {
+ ID: id + "_sum",
+ Title: getChartTitle(name, help),
+ Units: units,
+ Fam: getChartFamily(name),
+ Ctx: getChartContext(p.application(), name) + "_sum",
+ Priority: getChartPriority(name),
+ Dims: module.Dims{
+ {ID: id + "_sum", Name: name + "_sum", Algo: module.Incremental, Div: precision},
+ },
+ },
+ {
+ ID: id + "_count",
+ Title: getChartTitle(name, help),
+ Units: "events/s",
+ Fam: getChartFamily(name),
+ Ctx: getChartContext(p.application(), name) + "_count",
+ Priority: getChartPriority(name),
+ Dims: module.Dims{
+ {ID: id + "_count", Name: name + "_count", Algo: module.Incremental},
+ },
+ },
+ }
+
+ for _, chart := range charts {
+ for _, lbl := range labels {
+ chart.Labels = append(chart.Labels, module.Label{
+ Key: lbl.Name,
+ Value: apostropheReplacer.Replace(lbl.Value),
+ })
+ }
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ continue
+ }
+ p.cache.addChart(id, chart)
+ }
+}
+
+func (p *Prometheus) addHistogramCharts(id, name, help string, labels labels.Labels, buckets []prometheus.Bucket) {
+ units := getChartUnits(name)
+
+ switch units {
+ case "seconds", "time":
+ default:
+ units += "/s"
+ }
+
+ charts := module.Charts{
+ {
+ ID: id,
+ Title: getChartTitle(name, help),
+ Units: "observations/s",
+ Fam: getChartFamily(name),
+ Ctx: getChartContext(p.application(), name),
+ Priority: getChartPriority(name),
+ Dims: func() (dims module.Dims) {
+ for _, v := range buckets {
+ s := formatFloat(v.UpperBound())
+ dims = append(dims, &module.Dim{
+ ID: fmt.Sprintf("%s_bucket=%s", id, s),
+ Name: fmt.Sprintf("bucket_%s", s),
+ Algo: module.Incremental,
+ })
+ }
+ return dims
+ }(),
+ },
+ {
+ ID: id + "_sum",
+ Title: getChartTitle(name, help),
+ Units: units,
+ Fam: getChartFamily(name),
+ Ctx: getChartContext(p.application(), name) + "_sum",
+ Priority: getChartPriority(name),
+ Dims: module.Dims{
+ {ID: id + "_sum", Name: name + "_sum", Algo: module.Incremental, Div: precision},
+ },
+ },
+ {
+ ID: id + "_count",
+ Title: getChartTitle(name, help),
+ Units: "events/s",
+ Fam: getChartFamily(name),
+ Ctx: getChartContext(p.application(), name) + "_count",
+ Priority: getChartPriority(name),
+ Dims: module.Dims{
+ {ID: id + "_count", Name: name + "_count", Algo: module.Incremental},
+ },
+ },
+ }
+
+ for _, chart := range charts {
+ for _, lbl := range labels {
+ chart.Labels = append(chart.Labels, module.Label{
+ Key: lbl.Name,
+ Value: apostropheReplacer.Replace(lbl.Value),
+ })
+ }
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ continue
+ }
+ p.cache.addChart(id, chart)
+ }
+}
+
+func (p *Prometheus) application() string {
+ if p.Application != "" {
+ return p.Application
+ }
+ return p.Name
+}
+
+func getChartTitle(name, help string) string {
+ if help == "" {
+ return fmt.Sprintf("Metric \"%s\"", name)
+ }
+
+ help = strings.Replace(help, "'", "", -1)
+ help = strings.TrimSuffix(help, ".")
+
+ return help
+}
+
+func getChartContext(app, name string) string {
+ if app == "" {
+ return fmt.Sprintf("prometheus.%s", name)
+ }
+ return fmt.Sprintf("prometheus.%s.%s", app, name)
+}
+
+func getChartFamily(metric string) (fam string) {
+ if strings.HasPrefix(metric, "go_") {
+ return "go"
+ }
+ if strings.HasPrefix(metric, "process_") {
+ return "process"
+ }
+ if parts := strings.SplitN(metric, "_", 3); len(parts) < 3 {
+ fam = metric
+ } else {
+ fam = parts[0] + "_" + parts[1]
+ }
+
+ // remove number suffix if any
+ // load1, load5, load15 => load
+ i := len(fam) - 1
+ for i >= 0 && fam[i] >= '0' && fam[i] <= '9' {
+ i--
+ }
+ if i > 0 {
+ return fam[:i+1]
+ }
+ return fam
+}
+
+func getChartUnits(metric string) string {
+ // https://prometheus.io/docs/practices/naming/#metric-names
+ // ...must have a single unit (i.e. do not mix seconds with milliseconds, or seconds with bytes).
+ // ...should have a suffix describing the unit, in plural form.
+ // Note that an accumulating count has total as a suffix, in addition to the unit if applicable
+
+ idx := strings.LastIndexByte(metric, '_')
+ if idx == -1 {
+ // snmp_exporter: e.g. ifOutUcastPkts, ifOutOctets.
+ if idx = strings.LastIndexFunc(metric, func(r rune) bool { return r >= 'A' && r <= 'Z' }); idx != -1 {
+ v := strings.ToLower(metric[idx:])
+ switch v {
+ case "pkts":
+ return "packets"
+ case "octets":
+ return "bytes"
+ case "mtu":
+ return "octets"
+ case "speed":
+ return "bits"
+ }
+ return v
+ }
+ return "events"
+ }
+ switch suffix := metric[idx:]; suffix {
+ case "_total", "_sum", "_count":
+ return getChartUnits(metric[:idx])
+ }
+ switch units := metric[idx+1:]; units {
+ case "hertz":
+ return "Hz"
+ default:
+ return units
+ }
+}
+
+func getChartPriority(name string) int {
+ if strings.HasPrefix(name, "go_") || strings.HasPrefix(name, "process_") {
+ return prioGORuntime
+ }
+ return prioDefault
+}
diff --git a/src/go/plugin/go.d/modules/prometheus/collect.go b/src/go/plugin/go.d/modules/prometheus/collect.go
new file mode 100644
index 000000000..8711745c9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/collect.go
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+const (
+ precision = 1000
+)
+
+func (p *Prometheus) collect() (map[string]int64, error) {
+ mfs, err := p.prom.Scrape()
+ if err != nil {
+ return nil, err
+ }
+
+ if mfs.Len() == 0 {
+ p.Warningf("endpoint '%s' returned 0 metric families", p.URL)
+ return nil, nil
+ }
+
+ // TODO: shouldn't modify the value from Config
+ if p.ExpectedPrefix != "" {
+ if !hasPrefix(mfs, p.ExpectedPrefix) {
+ return nil, fmt.Errorf("'%s' metrics have no expected prefix (%s)", p.URL, p.ExpectedPrefix)
+ }
+ p.ExpectedPrefix = ""
+ }
+
+ // TODO: shouldn't modify the value from Config
+ if p.MaxTS > 0 {
+ if n := calcMetrics(mfs); n > p.MaxTS {
+ return nil, fmt.Errorf("'%s' num of time series (%d) > limit (%d)", p.URL, n, p.MaxTS)
+ }
+ p.MaxTS = 0
+ }
+
+ mx := make(map[string]int64)
+
+ p.resetCache()
+ defer p.removeStaleCharts()
+
+ for _, mf := range mfs {
+ if strings.HasSuffix(mf.Name(), "_info") {
+ continue
+ }
+ if p.MaxTSPerMetric > 0 && len(mf.Metrics()) > p.MaxTSPerMetric {
+ p.Debugf("metric '%s' num of time series (%d) > limit (%d), skipping it",
+ mf.Name(), len(mf.Metrics()), p.MaxTSPerMetric)
+ continue
+ }
+
+ switch mf.Type() {
+ case model.MetricTypeGauge:
+ p.collectGauge(mx, mf)
+ case model.MetricTypeCounter:
+ p.collectCounter(mx, mf)
+ case model.MetricTypeSummary:
+ p.collectSummary(mx, mf)
+ case model.MetricTypeHistogram:
+ p.collectHistogram(mx, mf)
+ case model.MetricTypeUnknown:
+ p.collectUntyped(mx, mf)
+ }
+ }
+
+ return mx, nil
+}
+
+func (p *Prometheus) collectGauge(mx map[string]int64, mf *prometheus.MetricFamily) {
+ for _, m := range mf.Metrics() {
+ if m.Gauge() == nil || math.IsNaN(m.Gauge().Value()) {
+ continue
+ }
+
+ id := mf.Name() + p.joinLabels(m.Labels())
+
+ if !p.cache.hasP(id) {
+ p.addGaugeChart(id, mf.Name(), mf.Help(), m.Labels())
+ }
+
+ mx[id] = int64(m.Gauge().Value() * precision)
+ }
+}
+
+func (p *Prometheus) collectCounter(mx map[string]int64, mf *prometheus.MetricFamily) {
+ for _, m := range mf.Metrics() {
+ if m.Counter() == nil || math.IsNaN(m.Counter().Value()) {
+ continue
+ }
+
+ id := mf.Name() + p.joinLabels(m.Labels())
+
+ if !p.cache.hasP(id) {
+ p.addCounterChart(id, mf.Name(), mf.Help(), m.Labels())
+ }
+
+ mx[id] = int64(m.Counter().Value() * precision)
+ }
+}
+
+func (p *Prometheus) collectSummary(mx map[string]int64, mf *prometheus.MetricFamily) {
+ for _, m := range mf.Metrics() {
+ if m.Summary() == nil || len(m.Summary().Quantiles()) == 0 {
+ continue
+ }
+
+ id := mf.Name() + p.joinLabels(m.Labels())
+
+ if !p.cache.hasP(id) {
+ p.addSummaryCharts(id, mf.Name(), mf.Help(), m.Labels(), m.Summary().Quantiles())
+ }
+
+ for _, v := range m.Summary().Quantiles() {
+ if !math.IsNaN(v.Value()) {
+ dimID := fmt.Sprintf("%s_quantile=%s", id, formatFloat(v.Quantile()))
+ mx[dimID] = int64(v.Value() * precision * precision)
+ }
+ }
+
+ mx[id+"_sum"] = int64(m.Summary().Sum() * precision)
+ mx[id+"_count"] = int64(m.Summary().Count())
+ }
+}
+
+func (p *Prometheus) collectHistogram(mx map[string]int64, mf *prometheus.MetricFamily) {
+ for _, m := range mf.Metrics() {
+ if m.Histogram() == nil || len(m.Histogram().Buckets()) == 0 {
+ continue
+ }
+
+ id := mf.Name() + p.joinLabels(m.Labels())
+
+ if !p.cache.hasP(id) {
+ p.addHistogramCharts(id, mf.Name(), mf.Help(), m.Labels(), m.Histogram().Buckets())
+ }
+
+ for _, v := range m.Histogram().Buckets() {
+ if !math.IsNaN(v.CumulativeCount()) {
+ dimID := fmt.Sprintf("%s_bucket=%s", id, formatFloat(v.UpperBound()))
+ mx[dimID] = int64(v.CumulativeCount())
+ }
+ }
+
+ mx[id+"_sum"] = int64(m.Histogram().Sum() * precision)
+ mx[id+"_count"] = int64(m.Histogram().Count())
+ }
+}
+
+func (p *Prometheus) collectUntyped(mx map[string]int64, mf *prometheus.MetricFamily) {
+ for _, m := range mf.Metrics() {
+ if m.Untyped() == nil || math.IsNaN(m.Untyped().Value()) {
+ continue
+ }
+
+ if p.isFallbackTypeGauge(mf.Name()) {
+ id := mf.Name() + p.joinLabels(m.Labels())
+
+ if !p.cache.hasP(id) {
+ p.addGaugeChart(id, mf.Name(), mf.Help(), m.Labels())
+ }
+
+ mx[id] = int64(m.Untyped().Value() * precision)
+ }
+
+ if p.isFallbackTypeCounter(mf.Name()) || strings.HasSuffix(mf.Name(), "_total") {
+ id := mf.Name() + p.joinLabels(m.Labels())
+
+ if !p.cache.hasP(id) {
+ p.addCounterChart(id, mf.Name(), mf.Help(), m.Labels())
+ }
+
+ mx[id] = int64(m.Untyped().Value() * precision)
+ }
+ }
+}
+
+func (p *Prometheus) isFallbackTypeGauge(name string) bool {
+ return p.fallbackType.gauge != nil && p.fallbackType.gauge.MatchString(name)
+}
+
+func (p *Prometheus) isFallbackTypeCounter(name string) bool {
+ return p.fallbackType.counter != nil && p.fallbackType.counter.MatchString(name)
+}
+
+func (p *Prometheus) joinLabels(labels labels.Labels) string {
+ var sb strings.Builder
+ for _, lbl := range labels {
+ name, val := lbl.Name, lbl.Value
+ if name == "" || val == "" {
+ continue
+ }
+
+ if strings.IndexByte(val, ' ') != -1 {
+ val = spaceReplacer.Replace(val)
+ }
+ if strings.IndexByte(val, '\\') != -1 {
+ if val = decodeLabelValue(val); strings.IndexByte(val, '\\') != -1 {
+ val = backslashReplacer.Replace(val)
+ }
+ }
+ if strings.IndexByte(val, '\'') != -1 {
+ val = apostropheReplacer.Replace(val)
+ }
+
+ sb.WriteString("-" + name + "=" + val)
+ }
+ return sb.String()
+}
+
+func (p *Prometheus) resetCache() {
+ for _, v := range p.cache.entries {
+ v.seen = false
+ }
+}
+
+const maxNotSeenTimes = 10
+
+func (p *Prometheus) removeStaleCharts() {
+ for k, v := range p.cache.entries {
+ if v.seen {
+ continue
+ }
+ if v.notSeenTimes++; v.notSeenTimes >= maxNotSeenTimes {
+ for _, chart := range v.charts {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ delete(p.cache.entries, k)
+ }
+ }
+}
+
+func decodeLabelValue(value string) string {
+ v, err := strconv.Unquote("\"" + value + "\"")
+ if err != nil {
+ return value
+ }
+ return v
+}
+
+var (
+ spaceReplacer = strings.NewReplacer(" ", "_")
+ backslashReplacer = strings.NewReplacer(`\`, "_")
+ apostropheReplacer = strings.NewReplacer("'", "")
+)
+
+func hasPrefix(mf map[string]*prometheus.MetricFamily, prefix string) bool {
+ for name := range mf {
+ if strings.HasPrefix(name, prefix) {
+ return true
+ }
+ }
+ return false
+}
+
+func calcMetrics(mfs prometheus.MetricFamilies) int {
+ var n int
+ for _, mf := range mfs {
+ n += len(mf.Metrics())
+ }
+ return n
+}
+
+func formatFloat(v float64) string {
+ return strconv.FormatFloat(v, 'f', -1, 64)
+}
diff --git a/src/go/plugin/go.d/modules/prometheus/config_schema.json b/src/go/plugin/go.d/modules/prometheus/config_schema.json
new file mode 100644
index 000000000..2df96b049
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/config_schema.json
@@ -0,0 +1,311 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Prometheus collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Prometheus metrics endpoint.",
+ "type": "string",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 10
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "expected_prefix": {
+ "title": "Expected prefix",
+ "description": "If an endpoint does not return at least one metric with the specified prefix, the data is not processed.",
+ "type": "string"
+ },
+ "app": {
+ "title": "Application",
+ "description": "If set, this value will be used in the chart context as 'prometheus.{app}.{metric_name}'.",
+ "type": "string"
+ },
+ "selector": {
+ "title": "Selectors",
+ "description": "Configuration for selecting and filtering a set of time series using Prometheus selectors. If left empty, no filtering is applied.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "allow": {
+ "title": "Allow",
+ "description": "Allow time series that match any of the specified [selectors](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/prometheus/selector#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Selector",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "deny": {
+ "title": "Deny",
+ "description": "Deny time series that match any of the specified [selectors](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/prometheus/selector#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Selector",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ },
+ "max_time_series": {
+ "title": "Time series limit",
+ "description": "If an endpoint returns more time series than this limit, the data is not processed. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 2000
+ },
+ "max_time_series_per_metric": {
+ "title": "Time series per metric limit",
+ "description": "Metrics with more time series than this limit are skipped. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 200
+ },
+ "fallback_type": {
+ "title": "Untyped metrics fallback",
+ "description": "Process Untyped metrics as Counter or Gauge instead of ignoring them.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "gauge": {
+ "title": "As Gauge",
+ "description": "Untyped metrics matching any [pattern](https://golang.org/pkg/path/filepath/#Match) will be processed as Gauge.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "Counter": {
+ "title": "As Counter",
+ "description": "Untyped metrics matching any [pattern](https://golang.org/pkg/path/filepath/#Match) will be processed as Counter.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "bearer_token_file": {
+ "title": "Bearer token file",
+ "description": "The path to the file with Bearer token.",
+ "type": "string"
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "expected_prefix",
+ "app"
+ ]
+ },
+ {
+ "title": "Selectors",
+ "fields": [
+ "selector"
+ ]
+ },
+ {
+ "title": "Limits",
+ "fields": [
+ "max_time_series",
+ "max_time_series_per_metric"
+ ]
+ },
+ {
+ "title": "Untyped fallback",
+ "fields": [
+ "fallback_type"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password",
+ "bearer_token_file"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "url": {
+ "ui:placeholder": "http://203.0.113.0"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "selector": {
+ "ui:help": "The logic is as follows: `(allow1 OR allow2) AND !(deny1 OR deny2)`."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/prometheus/init.go b/src/go/plugin/go.d/modules/prometheus/init.go
new file mode 100644
index 000000000..afb92af32
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/init.go
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "os"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (p *Prometheus) validateConfig() error {
+ if p.URL == "" {
+ return errors.New("'url' can not be empty")
+ }
+ return nil
+}
+
+func (p *Prometheus) initPrometheusClient() (prometheus.Prometheus, error) {
+ httpClient, err := web.NewHTTPClient(p.Client)
+ if err != nil {
+ return nil, fmt.Errorf("init HTTP client: %v", err)
+ }
+
+ req := p.Request.Copy()
+ if p.BearerTokenFile != "" {
+ token, err := os.ReadFile(p.BearerTokenFile)
+ if err != nil {
+ return nil, fmt.Errorf("bearer token file: %v", err)
+ }
+ req.Headers["Authorization"] = "Bearer " + string(token)
+ }
+
+ sr, err := p.Selector.Parse()
+ if err != nil {
+ return nil, fmt.Errorf("parsing selector: %v", err)
+ }
+
+ if sr != nil {
+ return prometheus.NewWithSelector(httpClient, req, sr), nil
+ }
+ return prometheus.New(httpClient, req), nil
+}
+
+func (p *Prometheus) initFallbackTypeMatcher(expr []string) (matcher.Matcher, error) {
+ if len(expr) == 0 {
+ return nil, nil
+ }
+
+ m := matcher.FALSE()
+
+ for _, pattern := range expr {
+ v, err := matcher.NewGlobMatcher(pattern)
+ if err != nil {
+ return nil, fmt.Errorf("error on parsing pattern '%s': %v", pattern, err)
+ }
+ m = matcher.Or(m, v)
+ }
+
+ return m, nil
+}
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/4d_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/4d_server.md
new file mode 100644
index 000000000..479fbe132
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/4d_server.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/4d_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "4D Server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# 4D Server
+
+
+<img src="https://netdata.cloud/img/4d_server.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor 4D Server performance metrics for efficient application management and optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/8430ft_modem.md b/src/go/plugin/go.d/modules/prometheus/integrations/8430ft_modem.md
new file mode 100644
index 000000000..d5087d8c1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/8430ft_modem.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/8430ft_modem.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "8430FT modem"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# 8430FT modem
+
+
+<img src="https://netdata.cloud/img/mtc.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.
+
+
+Metrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/a10_acos_network_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/a10_acos_network_devices.md
new file mode 100644
index 000000000..886572d83
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/a10_acos_network_devices.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/a10_acos_network_devices.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "A10 ACOS network devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# A10 ACOS network devices
+
+
+<img src="https://netdata.cloud/img/a10-networks.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor A10 Networks device metrics for comprehensive management and analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/airthings_waveplus_air_sensor.md b/src/go/plugin/go.d/modules/prometheus/integrations/airthings_waveplus_air_sensor.md
new file mode 100644
index 000000000..d6353d5c4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/airthings_waveplus_air_sensor.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/airthings_waveplus_air_sensor.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Airthings Waveplus air sensor"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Airthings Waveplus air sensor
+
+
+<img src="https://netdata.cloud/img/airthings.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Waveplus radon sensor metrics for efficient indoor air quality monitoring and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/akamai_edge_dns_traffic.md b/src/go/plugin/go.d/modules/prometheus/integrations/akamai_edge_dns_traffic.md
new file mode 100644
index 000000000..d61275eb6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/akamai_edge_dns_traffic.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/akamai_edge_dns_traffic.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Akamai Edge DNS Traffic"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Akamai Edge DNS Traffic
+
+
+<img src="https://netdata.cloud/img/akamai.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track and analyze Akamai Edge DNS traffic for enhanced performance and security.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/akamai_global_traffic_management.md b/src/go/plugin/go.d/modules/prometheus/integrations/akamai_global_traffic_management.md
new file mode 100644
index 000000000..6c1dbbf3a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/akamai_global_traffic_management.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/akamai_global_traffic_management.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Akamai Global Traffic Management"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Akamai Global Traffic Management
+
+
+<img src="https://netdata.cloud/img/akamai.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/akami_cloudmonitor.md b/src/go/plugin/go.d/modules/prometheus/integrations/akami_cloudmonitor.md
new file mode 100644
index 000000000..480892401
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/akami_cloudmonitor.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/akami_cloudmonitor.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Akami Cloudmonitor"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Akami Cloudmonitor
+
+
+<img src="https://netdata.cloud/img/akamai.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/alamos_fe2_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/alamos_fe2_server.md
new file mode 100644
index 000000000..1f5552ac6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/alamos_fe2_server.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/alamos_fe2_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Alamos FE2 server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Alamos FE2 server
+
+
+<img src="https://netdata.cloud/img/alamos_fe2.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Alamos FE2 systems for improved performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/alibaba_cloud.md b/src/go/plugin/go.d/modules/prometheus/integrations/alibaba_cloud.md
new file mode 100644
index 000000000..51a5203fe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/alibaba_cloud.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/alibaba_cloud.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Alibaba Cloud"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Alibaba Cloud
+
+
+<img src="https://netdata.cloud/img/alibaba-cloud.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Alibaba Cloud services and resources for efficient management and cost optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/altaro_backup.md b/src/go/plugin/go.d/modules/prometheus/integrations/altaro_backup.md
new file mode 100644
index 000000000..c5200c889
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/altaro_backup.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/altaro_backup.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Altaro Backup"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Altaro Backup
+
+
+<img src="https://netdata.cloud/img/altaro.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Altaro Backup performance metrics to ensure smooth data protection and recovery operations.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/amd_cpu_&_gpu.md b/src/go/plugin/go.d/modules/prometheus/integrations/amd_cpu_&_gpu.md
new file mode 100644
index 000000000..0eb582743
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/amd_cpu_&_gpu.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/amd_cpu_&_gpu.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AMD CPU & GPU"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AMD CPU & GPU
+
+
+<img src="https://netdata.cloud/img/amd.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor AMD System Management Interface performance for optimized hardware management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/andrews_&_arnold_line_status.md b/src/go/plugin/go.d/modules/prometheus/integrations/andrews_&_arnold_line_status.md
new file mode 100644
index 000000000..52d282bab
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/andrews_&_arnold_line_status.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/andrews_&_arnold_line_status.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Andrews & Arnold line status"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Andrews & Arnold line status
+
+
+<img src="https://netdata.cloud/img/andrewsarnold.jpg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/apache_airflow.md b/src/go/plugin/go.d/modules/prometheus/integrations/apache_airflow.md
new file mode 100644
index 000000000..5a5d15074
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/apache_airflow.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/apache_airflow.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Apache Airflow"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Apache Airflow
+
+
+<img src="https://netdata.cloud/img/airflow.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Apache Airflow metrics to optimize task scheduling and workflow management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/apache_flink.md b/src/go/plugin/go.d/modules/prometheus/integrations/apache_flink.md
new file mode 100644
index 000000000..325b15d67
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/apache_flink.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/apache_flink.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Apache Flink"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Apache Flink
+
+
+<img src="https://netdata.cloud/img/apache_flink.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Apache Flink metrics for efficient stream processing and application management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/apicast.md b/src/go/plugin/go.d/modules/prometheus/integrations/apicast.md
new file mode 100644
index 000000000..7c36df053
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/apicast.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/apicast.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "APIcast"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# APIcast
+
+
+<img src="https://netdata.cloud/img/apicast.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor APIcast performance metrics to optimize API gateway operations and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/apple_time_machine.md b/src/go/plugin/go.d/modules/prometheus/integrations/apple_time_machine.md
new file mode 100644
index 000000000..e3a916ebc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/apple_time_machine.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/apple_time_machine.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Apple Time Machine"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/macOS Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Apple Time Machine
+
+
+<img src="https://netdata.cloud/img/apple.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Apple Time Machine backup metrics for efficient data protection and recovery.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/arm_hwcpipe.md b/src/go/plugin/go.d/modules/prometheus/integrations/arm_hwcpipe.md
new file mode 100644
index 000000000..14a4386f4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/arm_hwcpipe.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/arm_hwcpipe.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "ARM HWCPipe"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ARM HWCPipe
+
+
+<img src="https://netdata.cloud/img/arm.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep track of ARM running Android devices and get metrics for efficient performance optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aruba_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/aruba_devices.md
new file mode 100644
index 000000000..c848873b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aruba_devices.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aruba_devices.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Aruba devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Aruba devices
+
+
+<img src="https://netdata.cloud/img/aruba.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Aruba Networks devices performance metrics for comprehensive network management and analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/arvancloud_cdn.md b/src/go/plugin/go.d/modules/prometheus/integrations/arvancloud_cdn.md
new file mode 100644
index 000000000..81bcbd70a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/arvancloud_cdn.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/arvancloud_cdn.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "ArvanCloud CDN"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ArvanCloud CDN
+
+
+<img src="https://netdata.cloud/img/arvancloud.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/audisto.md b/src/go/plugin/go.d/modules/prometheus/integrations/audisto.md
new file mode 100644
index 000000000..81c450889
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/audisto.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/audisto.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Audisto"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Audisto
+
+
+<img src="https://netdata.cloud/img/audisto.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Audisto SEO and website metrics for improved search performance and optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/authlog.md b/src/go/plugin/go.d/modules/prometheus/integrations/authlog.md
new file mode 100644
index 000000000..86f20e30b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/authlog.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/authlog.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AuthLog"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Logs Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AuthLog
+
+
+<img src="https://netdata.cloud/img/linux.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor authentication logs for security insights and efficient access management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_compute_instances.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_compute_instances.md
new file mode 100644
index 000000000..c31b72dc1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_compute_instances.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_compute_instances.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AWS EC2 Compute instances"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AWS EC2 Compute instances
+
+
+<img src="https://netdata.cloud/img/aws-ec2.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track AWS EC2 instances key metrics for optimized performance and cost management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_spot_instance.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_spot_instance.md
new file mode 100644
index 000000000..908624b4c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_spot_instance.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_ec2_spot_instance.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AWS EC2 Spot Instance"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AWS EC2 Spot Instance
+
+
+<img src="https://netdata.cloud/img/aws-ec2.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_ecs.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ecs.md
new file mode 100644
index 000000000..aed1877b8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_ecs.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_ecs.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AWS ECS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AWS ECS
+
+
+<img src="https://netdata.cloud/img/amazon-ecs.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on AWS ECS services and resources for optimized container management and orchestration.
+
+
+Metrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_health_events.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_health_events.md
new file mode 100644
index 000000000..dd1d4bc6a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_health_events.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_health_events.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AWS Health events"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AWS Health events
+
+
+<img src="https://netdata.cloud/img/aws.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track AWS service health metrics for proactive incident management and resolution.
+
+
+Metrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_instance_health.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_instance_health.md
new file mode 100644
index 000000000..82da72d23
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_instance_health.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_instance_health.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AWS instance health"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AWS instance health
+
+
+<img src="https://netdata.cloud/img/aws.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor the health of AWS instances for improved performance and availability.
+
+
+Metrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_quota.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_quota.md
new file mode 100644
index 000000000..67970fdf8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_quota.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_quota.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AWS Quota"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AWS Quota
+
+
+<img src="https://netdata.cloud/img/aws.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor AWS service quotas for effective resource usage and cost management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md
new file mode 100644
index 000000000..acd1e7101
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_rds.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AWS RDS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AWS RDS
+
+
+<img src="https://netdata.cloud/img/aws-rds.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_s3_buckets.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_s3_buckets.md
new file mode 100644
index 000000000..e4628d718
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_s3_buckets.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_s3_buckets.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AWS S3 buckets"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AWS S3 buckets
+
+
+<img src="https://netdata.cloud/img/aws-s3.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency.
+
+
+Metrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/aws_sqs.md b/src/go/plugin/go.d/modules/prometheus/integrations/aws_sqs.md
new file mode 100644
index 000000000..b2760e205
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/aws_sqs.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/aws_sqs.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "AWS SQS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# AWS SQS
+
+
+<img src="https://netdata.cloud/img/aws-sqs.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track AWS SQS messaging metrics for efficient message processing and queue management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_ad_app_passwords.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_ad_app_passwords.md
new file mode 100644
index 000000000..1f1ce0a85
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_ad_app_passwords.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/azure_ad_app_passwords.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Azure AD App passwords"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Azure AD App passwords
+
+
+<img src="https://netdata.cloud/img/azure.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Safeguard and track Azure App secrets for enhanced security and access management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_application.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_application.md
new file mode 100644
index 000000000..55f124658
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_application.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/azure_application.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Azure application"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Azure application
+
+
+<img src="https://netdata.cloud/img/azure.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Azure Monitor metrics for comprehensive resource management and performance optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_elastic_pool_sql.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_elastic_pool_sql.md
new file mode 100644
index 000000000..0fa89bff2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_elastic_pool_sql.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/azure_elastic_pool_sql.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Azure Elastic Pool SQL"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Azure Elastic Pool SQL
+
+
+<img src="https://netdata.cloud/img/azure-elastic-sql.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Azure Elastic SQL performance metrics for efficient database management and query optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_resources.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_resources.md
new file mode 100644
index 000000000..c63e0ad1d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_resources.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/azure_resources.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Azure Resources"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Azure Resources
+
+
+<img src="https://netdata.cloud/img/azure.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Azure resources vital metrics for efficient cloud management and cost optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_service_bus.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_service_bus.md
new file mode 100644
index 000000000..c1a641aaa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_service_bus.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/azure_service_bus.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Azure Service Bus"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Azure Service Bus
+
+
+<img src="https://netdata.cloud/img/azure-service-bus.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Azure Service Bus messaging metrics for optimized communication and integration.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/azure_sql.md b/src/go/plugin/go.d/modules/prometheus/integrations/azure_sql.md
new file mode 100644
index 000000000..98a933eb6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/azure_sql.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/azure_sql.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Azure SQL"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Azure SQL
+
+
+<img src="https://netdata.cloud/img/azure-sql.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Azure SQL performance metrics for efficient database management and query performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bigquery.md b/src/go/plugin/go.d/modules/prometheus/integrations/bigquery.md
new file mode 100644
index 000000000..a76ff8fb3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/bigquery.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/bigquery.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "BigQuery"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# BigQuery
+
+
+<img src="https://netdata.cloud/img/bigquery.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Google BigQuery metrics for optimized data processing and analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bird_routing_daemon.md b/src/go/plugin/go.d/modules/prometheus/integrations/bird_routing_daemon.md
new file mode 100644
index 000000000..43318c4c5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/bird_routing_daemon.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/bird_routing_daemon.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Bird Routing Daemon"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Bird Routing Daemon
+
+
+<img src="https://netdata.cloud/img/bird.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Bird Routing Daemon metrics for optimized network routing and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/blackbox.md b/src/go/plugin/go.d/modules/prometheus/integrations/blackbox.md
new file mode 100644
index 000000000..d37019b6d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/blackbox.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/blackbox.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Blackbox"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Blackbox
+
+
+<img src="https://netdata.cloud/img/prometheus.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track external service availability and response times with Blackbox monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bobcat_miner_300.md b/src/go/plugin/go.d/modules/prometheus/integrations/bobcat_miner_300.md
new file mode 100644
index 000000000..c00ccaa7d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/bobcat_miner_300.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/bobcat_miner_300.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Bobcat Miner 300"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Bobcat Miner 300
+
+
+<img src="https://netdata.cloud/img/bobcat.jpg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Bobcat equipment metrics for optimized performance and maintenance management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/borg_backup.md b/src/go/plugin/go.d/modules/prometheus/integrations/borg_backup.md
new file mode 100644
index 000000000..67a175340
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/borg_backup.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/borg_backup.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Borg backup"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Borg backup
+
+
+<img src="https://netdata.cloud/img/borg.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Borg backup performance metrics for efficient data protection and recovery.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bosh.md b/src/go/plugin/go.d/modules/prometheus/integrations/bosh.md
new file mode 100644
index 000000000..c8fc354f3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/bosh.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/bosh.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "BOSH"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Provisioning Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# BOSH
+
+
+<img src="https://netdata.cloud/img/bosh.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bpftrace_variables.md b/src/go/plugin/go.d/modules/prometheus/integrations/bpftrace_variables.md
new file mode 100644
index 000000000..76ed9a2f0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/bpftrace_variables.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/bpftrace_variables.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "bpftrace variables"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# bpftrace variables
+
+
+<img src="https://netdata.cloud/img/bpftrace.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track bpftrace metrics for advanced performance analysis and troubleshooting.
+
+
+Metrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/bungeecord.md b/src/go/plugin/go.d/modules/prometheus/integrations/bungeecord.md
new file mode 100644
index 000000000..cebba3d2f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/bungeecord.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/bungeecord.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "BungeeCord"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Gaming"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# BungeeCord
+
+
+<img src="https://netdata.cloud/img/bungee.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track BungeeCord proxy server metrics for efficient load balancing and performance management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cadvisor.md b/src/go/plugin/go.d/modules/prometheus/integrations/cadvisor.md
new file mode 100644
index 000000000..a40221af5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cadvisor.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cadvisor.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "cAdvisor"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Containers and VMs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# cAdvisor
+
+
+<img src="https://netdata.cloud/img/cadvisor.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor container resource usage and performance metrics with cAdvisor for efficient container management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/celery.md b/src/go/plugin/go.d/modules/prometheus/integrations/celery.md
new file mode 100644
index 000000000..2cb4e8219
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/celery.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/celery.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Celery"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Task Queues"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Celery
+
+
+<img src="https://netdata.cloud/img/celery.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Celery task queue metrics for optimized task processing and resource management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md b/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md
new file mode 100644
index 000000000..b741f95ff
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md
@@ -0,0 +1,326 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/certificate_transparency.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Certificate Transparency"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Certificate Transparency
+
+
+<img src="https://netdata.cloud/img/ct.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track certificate transparency log metrics for enhanced
+SSL/TLS certificate management and security.
+
+
+Metrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/checkpoint_device.md b/src/go/plugin/go.d/modules/prometheus/integrations/checkpoint_device.md
new file mode 100644
index 000000000..4d63f806e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/checkpoint_device.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/checkpoint_device.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Checkpoint device"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Checkpoint device
+
+
+<img src="https://netdata.cloud/img/checkpoint.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Check Point firewall and security metrics for enhanced network protection and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/chia.md b/src/go/plugin/go.d/modules/prometheus/integrations/chia.md
new file mode 100644
index 000000000..158b6990e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/chia.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/chia.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Chia"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Blockchain Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Chia
+
+
+<img src="https://netdata.cloud/img/chia.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Chia blockchain metrics for optimized farming and resource allocation.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md b/src/go/plugin/go.d/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md
new file mode 100644
index 000000000..71f6460f3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/christ_elektronik_clm5ip_power_panel.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Christ Elektronik CLM5IP power panel"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Christ Elektronik CLM5IP power panel
+
+
+<img src="https://netdata.cloud/img/christelec.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cilium_agent.md b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_agent.md
new file mode 100644
index 000000000..77369adaa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_agent.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cilium_agent.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Cilium Agent"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Kubernetes"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Cilium Agent
+
+
+<img src="https://netdata.cloud/img/cilium.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Cilium Agent metrics for optimized network security and connectivity.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cilium_operator.md b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_operator.md
new file mode 100644
index 000000000..4083f7b0b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_operator.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cilium_operator.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Cilium Operator"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Kubernetes"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Cilium Operator
+
+
+<img src="https://netdata.cloud/img/cilium.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Cilium Operator metrics for efficient Kubernetes network security management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cilium_proxy.md b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_proxy.md
new file mode 100644
index 000000000..cfffa6299
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cilium_proxy.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cilium_proxy.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Cilium Proxy"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Kubernetes"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Cilium Proxy
+
+
+<img src="https://netdata.cloud/img/cilium.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Cilium Proxy metrics for enhanced network security and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cisco_aci.md b/src/go/plugin/go.d/modules/prometheus/integrations/cisco_aci.md
new file mode 100644
index 000000000..9766e88d1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cisco_aci.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cisco_aci.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Cisco ACI"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Cisco ACI
+
+
+<img src="https://netdata.cloud/img/cisco.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Cisco ACI infrastructure metrics for optimized network performance and resource management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/citrix_netscaler.md b/src/go/plugin/go.d/modules/prometheus/integrations/citrix_netscaler.md
new file mode 100644
index 000000000..e6b704031
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/citrix_netscaler.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/citrix_netscaler.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Citrix NetScaler"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Citrix NetScaler
+
+
+<img src="https://netdata.cloud/img/citrix.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on NetScaler performance metrics for efficient application delivery and load balancing.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/clamav_daemon.md b/src/go/plugin/go.d/modules/prometheus/integrations/clamav_daemon.md
new file mode 100644
index 000000000..ea0398be5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/clamav_daemon.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/clamav_daemon.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "ClamAV daemon"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ClamAV daemon
+
+
+<img src="https://netdata.cloud/img/clamav.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track ClamAV antivirus metrics for enhanced threat detection and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/clamscan_results.md b/src/go/plugin/go.d/modules/prometheus/integrations/clamscan_results.md
new file mode 100644
index 000000000..4cc488b1c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/clamscan_results.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/clamscan_results.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Clamscan results"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Clamscan results
+
+
+<img src="https://netdata.cloud/img/clamav.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor ClamAV scanning performance metrics for efficient malware detection and analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/clash.md b/src/go/plugin/go.d/modules/prometheus/integrations/clash.md
new file mode 100644
index 000000000..23b80bd30
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/clash.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/clash.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Clash"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Clash
+
+
+<img src="https://netdata.cloud/img/clash.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Clash proxy server metrics for optimized network performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry.md b/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry.md
new file mode 100644
index 000000000..2d1b36c25
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Cloud Foundry"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Provisioning Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Cloud Foundry
+
+
+<img src="https://netdata.cloud/img/cloud-foundry.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Cloud Foundry platform metrics for optimized application deployment and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry_firehose.md b/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry_firehose.md
new file mode 100644
index 000000000..d6405b416
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry_firehose.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cloud_foundry_firehose.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Cloud Foundry Firehose"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Provisioning Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Cloud Foundry Firehose
+
+
+<img src="https://netdata.cloud/img/cloud-foundry.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cloudflare_pcap.md b/src/go/plugin/go.d/modules/prometheus/integrations/cloudflare_pcap.md
new file mode 100644
index 000000000..2c1c479a4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cloudflare_pcap.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cloudflare_pcap.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Cloudflare PCAP"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Cloudflare PCAP
+
+
+<img src="https://netdata.cloud/img/cloudflare.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cloudwatch.md b/src/go/plugin/go.d/modules/prometheus/integrations/cloudwatch.md
new file mode 100644
index 000000000..816c0450e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cloudwatch.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cloudwatch.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "CloudWatch"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# CloudWatch
+
+
+<img src="https://netdata.cloud/img/aws-cloudwatch.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/clustercontrol_cmon.md b/src/go/plugin/go.d/modules/prometheus/integrations/clustercontrol_cmon.md
new file mode 100644
index 000000000..c69cb434c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/clustercontrol_cmon.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/clustercontrol_cmon.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "ClusterControl CMON"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ClusterControl CMON
+
+
+<img src="https://netdata.cloud/img/cluster-control.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.
+
+
+Metrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/collectd.md b/src/go/plugin/go.d/modules/prometheus/integrations/collectd.md
new file mode 100644
index 000000000..972146881
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/collectd.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/collectd.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Collectd"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Observability"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Collectd
+
+
+<img src="https://netdata.cloud/img/collectd.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor system and application metrics with Collectd for comprehensive performance analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/concourse.md b/src/go/plugin/go.d/modules/prometheus/integrations/concourse.md
new file mode 100644
index 000000000..ce7baff4b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/concourse.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/concourse.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Concourse"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/CICD Platforms"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Concourse
+
+
+<img src="https://netdata.cloud/img/concourse.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.
+
+
+Metrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure built-in Prometheus exporter
+
+To configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/craftbeerpi.md b/src/go/plugin/go.d/modules/prometheus/integrations/craftbeerpi.md
new file mode 100644
index 000000000..f4dae54c5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/craftbeerpi.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/craftbeerpi.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "CraftBeerPi"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# CraftBeerPi
+
+
+<img src="https://netdata.cloud/img/craftbeer.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/crowdsec.md b/src/go/plugin/go.d/modules/prometheus/integrations/crowdsec.md
new file mode 100644
index 000000000..a59069dd3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/crowdsec.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/crowdsec.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Crowdsec"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Crowdsec
+
+
+<img src="https://netdata.cloud/img/crowdsec.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Crowdsec security metrics for efficient threat detection and response.
+
+
+Metrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure built-in Prometheus exporter
+
+To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/crypto_exchanges.md b/src/go/plugin/go.d/modules/prometheus/integrations/crypto_exchanges.md
new file mode 100644
index 000000000..a56ed0db5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/crypto_exchanges.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/crypto_exchanges.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Crypto exchanges"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Blockchain Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Crypto exchanges
+
+
+<img src="https://netdata.cloud/img/crypto.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track cryptocurrency market metrics for informed investment and trading decisions.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cryptowatch.md b/src/go/plugin/go.d/modules/prometheus/integrations/cryptowatch.md
new file mode 100644
index 000000000..554910783
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cryptowatch.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cryptowatch.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Cryptowatch"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Blockchain Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Cryptowatch
+
+
+<img src="https://netdata.cloud/img/cryptowatch.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/custom_exporter.md b/src/go/plugin/go.d/modules/prometheus/integrations/custom_exporter.md
new file mode 100644
index 000000000..9d309e624
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/custom_exporter.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/custom_exporter.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Custom Exporter"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Custom Exporter
+
+
+<img src="https://netdata.cloud/img/customdata.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Create and monitor custom metrics tailored to your specific use case and requirements.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md b/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md
new file mode 100644
index 000000000..b283f220c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/cvmfs_clients.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "CVMFS clients"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# CVMFS clients
+
+
+<img src="https://netdata.cloud/img/cvmfs.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track CernVM File System metrics for optimized distributed file system performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ddwrt_routers.md b/src/go/plugin/go.d/modules/prometheus/integrations/ddwrt_routers.md
new file mode 100644
index 000000000..e0b898fbf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ddwrt_routers.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ddwrt_routers.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "DDWRT Routers"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# DDWRT Routers
+
+
+<img src="https://netdata.cloud/img/ddwrt.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on DD-WRT router metrics for efficient network management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_ecs_cluster.md b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_ecs_cluster.md
new file mode 100644
index 000000000..6d268ca64
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_ecs_cluster.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_ecs_cluster.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Dell EMC ECS cluster"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dell EMC ECS cluster
+
+
+<img src="https://netdata.cloud/img/dell.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Dell EMC ECS object storage metrics for optimized storage management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_isilon_cluster.md b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_isilon_cluster.md
new file mode 100644
index 000000000..5f29528ad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_isilon_cluster.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_isilon_cluster.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Dell EMC Isilon cluster"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dell EMC Isilon cluster
+
+
+<img src="https://netdata.cloud/img/dell.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_xtremio_cluster.md b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_xtremio_cluster.md
new file mode 100644
index 000000000..fe7285234
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_xtremio_cluster.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/dell_emc_xtremio_cluster.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Dell EMC XtremIO cluster"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dell EMC XtremIO cluster
+
+
+<img src="https://netdata.cloud/img/dell.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dell_powermax.md b/src/go/plugin/go.d/modules/prometheus/integrations/dell_powermax.md
new file mode 100644
index 000000000..200e2f049
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/dell_powermax.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/dell_powermax.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Dell PowerMax"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dell PowerMax
+
+
+<img src="https://netdata.cloud/img/powermax.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Dell EMC PowerMax storage array metrics for efficient storage management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dependency-track.md b/src/go/plugin/go.d/modules/prometheus/integrations/dependency-track.md
new file mode 100644
index 000000000..22d41e643
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/dependency-track.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/dependency-track.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Dependency-Track"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dependency-Track
+
+
+<img src="https://netdata.cloud/img/dependency-track.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/digitalocean.md b/src/go/plugin/go.d/modules/prometheus/integrations/digitalocean.md
new file mode 100644
index 000000000..8978434c2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/digitalocean.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/digitalocean.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "DigitalOcean"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# DigitalOcean
+
+
+<img src="https://netdata.cloud/img/digitalocean.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track DigitalOcean cloud provider metrics for optimized resource management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/discourse.md b/src/go/plugin/go.d/modules/prometheus/integrations/discourse.md
new file mode 100644
index 000000000..adffe3fc3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/discourse.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/discourse.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Discourse"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Media Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Discourse
+
+
+<img src="https://netdata.cloud/img/discourse.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Discourse forum metrics for efficient community management and engagement.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dmarc.md b/src/go/plugin/go.d/modules/prometheus/integrations/dmarc.md
new file mode 100644
index 000000000..2d02e75a9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/dmarc.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/dmarc.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "DMARC"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Mail Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# DMARC
+
+
+<img src="https://netdata.cloud/img/dmarc.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track DMARC email authentication metrics for improved email security and deliverability.
+
+
+Metrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dnsbl.md b/src/go/plugin/go.d/modules/prometheus/integrations/dnsbl.md
new file mode 100644
index 000000000..e79517968
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/dnsbl.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/dnsbl.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "DNSBL"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# DNSBL
+
+
+<img src="https://netdata.cloud/img/dnsbl.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor DNSBL metrics for efficient domain reputation and security management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dutch_electricity_smart_meter.md b/src/go/plugin/go.d/modules/prometheus/integrations/dutch_electricity_smart_meter.md
new file mode 100644
index 000000000..cf2dabd7b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/dutch_electricity_smart_meter.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/dutch_electricity_smart_meter.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Dutch Electricity Smart Meter"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dutch Electricity Smart Meter
+
+
+<img src="https://netdata.cloud/img/dutch-electricity.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/dynatrace.md b/src/go/plugin/go.d/modules/prometheus/integrations/dynatrace.md
new file mode 100644
index 000000000..96e3969d6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/dynatrace.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/dynatrace.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Dynatrace"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Observability"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dynatrace
+
+
+<img src="https://netdata.cloud/img/dynatrace.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Dynatrace APM metrics for comprehensive application performance management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md b/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md
new file mode 100644
index 000000000..c6c1823c8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/eaton_ups.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Eaton UPS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/UPS"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Eaton UPS
+
+
+<img src="https://netdata.cloud/img/eaton.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/elgato_key_light_devices..md b/src/go/plugin/go.d/modules/prometheus/integrations/elgato_key_light_devices..md
new file mode 100644
index 000000000..b4bc8d5d6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/elgato_key_light_devices..md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/elgato_key_light_devices..md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Elgato Key Light devices."
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Elgato Key Light devices.
+
+
+<img src="https://netdata.cloud/img/elgato.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Elgato Key Light metrics for optimized lighting control and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/energomera_smart_power_meters.md b/src/go/plugin/go.d/modules/prometheus/integrations/energomera_smart_power_meters.md
new file mode 100644
index 000000000..74764ae52
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/energomera_smart_power_meters.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/energomera_smart_power_meters.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Energomera smart power meters"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Energomera smart power meters
+
+
+<img src="https://netdata.cloud/img/energomera.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Energomera electricity meter metrics for efficient energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/eos.md b/src/go/plugin/go.d/modules/prometheus/integrations/eos.md
new file mode 100644
index 000000000..b2e3d590a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/eos.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/eos.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "EOS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# EOS
+
+
+<img src="https://netdata.cloud/img/eos.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor CERN EOS metrics for efficient storage management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/etcd.md b/src/go/plugin/go.d/modules/prometheus/integrations/etcd.md
new file mode 100644
index 000000000..b24d6b241
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/etcd.md
@@ -0,0 +1,321 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/etcd.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "etcd"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Service Discovery / Registry"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# etcd
+
+
+<img src="https://netdata.cloud/img/etcd.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track etcd database metrics for optimized distributed key-value store management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/excel_spreadsheet.md b/src/go/plugin/go.d/modules/prometheus/integrations/excel_spreadsheet.md
new file mode 100644
index 000000000..6039ee832
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/excel_spreadsheet.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/excel_spreadsheet.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Excel spreadsheet"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Excel spreadsheet
+
+
+<img src="https://netdata.cloud/img/excel.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Export Prometheus metrics to Excel for versatile data analysis and reporting.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/fastd.md b/src/go/plugin/go.d/modules/prometheus/integrations/fastd.md
new file mode 100644
index 000000000..2442dff82
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/fastd.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/fastd.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Fastd"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/VPNs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Fastd
+
+
+<img src="https://netdata.cloud/img/fastd.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Fastd VPN metrics for efficient virtual private network management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/fortigate_firewall.md b/src/go/plugin/go.d/modules/prometheus/integrations/fortigate_firewall.md
new file mode 100644
index 000000000..b89853a99
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/fortigate_firewall.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/fortigate_firewall.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Fortigate firewall"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Fortigate firewall
+
+
+<img src="https://netdata.cloud/img/fortinet.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Fortigate firewall metrics for enhanced network protection and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_nfs.md b/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_nfs.md
new file mode 100644
index 000000000..cf60803ad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_nfs.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_nfs.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "FreeBSD NFS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/FreeBSD"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# FreeBSD NFS
+
+
+<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor FreeBSD Network File System metrics for efficient file sharing management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_rctl-racct.md b/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_rctl-racct.md
new file mode 100644
index 000000000..bfe6e9e93
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_rctl-racct.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/freebsd_rctl-racct.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "FreeBSD RCTL-RACCT"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/FreeBSD"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# FreeBSD RCTL-RACCT
+
+
+<img src="https://netdata.cloud/img/freebsd.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/freifunk_network.md b/src/go/plugin/go.d/modules/prometheus/integrations/freifunk_network.md
new file mode 100644
index 000000000..847e305d1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/freifunk_network.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/freifunk_network.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Freifunk network"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Freifunk network
+
+
+<img src="https://netdata.cloud/img/freifunk.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Freifunk community network metrics for optimized network performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/fritzbox_network_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/fritzbox_network_devices.md
new file mode 100644
index 000000000..0158b0ba6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/fritzbox_network_devices.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/fritzbox_network_devices.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Fritzbox network devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Fritzbox network devices
+
+
+<img src="https://netdata.cloud/img/avm.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track AVM Fritzbox router metrics for efficient home network management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/frrouting.md b/src/go/plugin/go.d/modules/prometheus/integrations/frrouting.md
new file mode 100644
index 000000000..5f492a475
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/frrouting.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/frrouting.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "FRRouting"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# FRRouting
+
+
+<img src="https://netdata.cloud/img/frrouting.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Free Range Routing (FRR) metrics for optimized network routing and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gcp_gce.md b/src/go/plugin/go.d/modules/prometheus/integrations/gcp_gce.md
new file mode 100644
index 000000000..34c6d7673
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/gcp_gce.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/gcp_gce.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "GCP GCE"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# GCP GCE
+
+
+<img src="https://netdata.cloud/img/gcp-gce.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gcp_quota.md b/src/go/plugin/go.d/modules/prometheus/integrations/gcp_quota.md
new file mode 100644
index 000000000..85959b677
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/gcp_quota.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/gcp_quota.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "GCP Quota"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# GCP Quota
+
+
+<img src="https://netdata.cloud/img/gcp.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Google Cloud Platform quota metrics for optimized resource usage and cost management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/generic_command_line_output.md b/src/go/plugin/go.d/modules/prometheus/integrations/generic_command_line_output.md
new file mode 100644
index 000000000..27f1cb647
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/generic_command_line_output.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/generic_command_line_output.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Generic Command Line Output"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Generic Command Line Output
+
+
+<img src="https://netdata.cloud/img/cli.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track custom command line output metrics for tailored monitoring and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/generic_storage_enclosure_tool.md b/src/go/plugin/go.d/modules/prometheus/integrations/generic_storage_enclosure_tool.md
new file mode 100644
index 000000000..ac8f74a43
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/generic_storage_enclosure_tool.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/generic_storage_enclosure_tool.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Generic storage enclosure tool"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Generic storage enclosure tool
+
+
+<img src="https://netdata.cloud/img/storage-enclosure.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor storage enclosure metrics for efficient storage device management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/github_api_rate_limit.md b/src/go/plugin/go.d/modules/prometheus/integrations/github_api_rate_limit.md
new file mode 100644
index 000000000..548430349
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/github_api_rate_limit.md
@@ -0,0 +1,326 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/github_api_rate_limit.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "GitHub API rate limit"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Other"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# GitHub API rate limit
+
+
+<img src="https://netdata.cloud/img/github.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor GitHub API rate limit metrics for efficient
+API usage and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md b/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md
new file mode 100644
index 000000000..f96fc527a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/github_repository.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "GitHub repository"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Other"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# GitHub repository
+
+
+<img src="https://netdata.cloud/img/github.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track GitHub repository metrics for optimized project and user analytics monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gitlab_runner.md b/src/go/plugin/go.d/modules/prometheus/integrations/gitlab_runner.md
new file mode 100644
index 000000000..6982b7a59
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/gitlab_runner.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/gitlab_runner.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "GitLab Runner"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/CICD Platforms"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# GitLab Runner
+
+
+<img src="https://netdata.cloud/img/gitlab.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on GitLab CI/CD job metrics for efficient development and deployment management.
+
+
+Metrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure built-in Prometheus exporter
+
+To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gobetween.md b/src/go/plugin/go.d/modules/prometheus/integrations/gobetween.md
new file mode 100644
index 000000000..7ea5ec62c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/gobetween.md
@@ -0,0 +1,321 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/gobetween.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Gobetween"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Gobetween
+
+
+<img src="https://netdata.cloud/img/gobetween.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Gobetween load balancer metrics for optimized network traffic management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/google_cloud_platform.md b/src/go/plugin/go.d/modules/prometheus/integrations/google_cloud_platform.md
new file mode 100644
index 000000000..50fad9263
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/google_cloud_platform.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/google_cloud_platform.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Google Cloud Platform"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Google Cloud Platform
+
+
+<img src="https://netdata.cloud/img/gcp.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/google_pagespeed.md b/src/go/plugin/go.d/modules/prometheus/integrations/google_pagespeed.md
new file mode 100644
index 000000000..a3a3ecefe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/google_pagespeed.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/google_pagespeed.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Google Pagespeed"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Google Pagespeed
+
+
+<img src="https://netdata.cloud/img/google.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/google_stackdriver.md b/src/go/plugin/go.d/modules/prometheus/integrations/google_stackdriver.md
new file mode 100644
index 000000000..ef8fc5734
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/google_stackdriver.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/google_stackdriver.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Google Stackdriver"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Google Stackdriver
+
+
+<img src="https://netdata.cloud/img/gcp-stackdriver.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gpsd.md b/src/go/plugin/go.d/modules/prometheus/integrations/gpsd.md
new file mode 100644
index 000000000..68a588515
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/gpsd.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/gpsd.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "gpsd"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# gpsd
+
+
+<img src="https://netdata.cloud/img/gpsd.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/grafana.md b/src/go/plugin/go.d/modules/prometheus/integrations/grafana.md
new file mode 100644
index 000000000..2c0baa395
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/grafana.md
@@ -0,0 +1,321 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/grafana.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Grafana"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Observability"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Grafana
+
+
+<img src="https://netdata.cloud/img/grafana.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/graylog_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/graylog_server.md
new file mode 100644
index 000000000..8888ae210
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/graylog_server.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/graylog_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Graylog Server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Logs Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Graylog Server
+
+
+<img src="https://netdata.cloud/img/graylog.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Graylog server metrics for efficient log management and analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure built-in Prometheus exporter
+
+To configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/gtp.md b/src/go/plugin/go.d/modules/prometheus/integrations/gtp.md
new file mode 100644
index 000000000..edd3b3a56
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/gtp.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/gtp.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "GTP"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Telephony Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# GTP
+
+
+<img src="https://netdata.cloud/img/gtpu.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/halon.md b/src/go/plugin/go.d/modules/prometheus/integrations/halon.md
new file mode 100644
index 000000000..3a288e53b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/halon.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/halon.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Halon"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Mail Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Halon
+
+
+<img src="https://netdata.cloud/img/halon.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Halon email security and delivery metrics for optimized email management and protection.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hana.md b/src/go/plugin/go.d/modules/prometheus/integrations/hana.md
new file mode 100644
index 000000000..75d84fef6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/hana.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/hana.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "HANA"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HANA
+
+
+<img src="https://netdata.cloud/img/sap.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track SAP HANA database metrics for efficient data storage and query performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hashicorp_vault_secrets.md b/src/go/plugin/go.d/modules/prometheus/integrations/hashicorp_vault_secrets.md
new file mode 100644
index 000000000..c619344d4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/hashicorp_vault_secrets.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/hashicorp_vault_secrets.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "HashiCorp Vault secrets"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HashiCorp Vault secrets
+
+
+<img src="https://netdata.cloud/img/vault.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track HashiCorp Vault security assessment metrics for efficient secrets management and security.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hasura_graphql_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/hasura_graphql_server.md
new file mode 100644
index 000000000..d95a9199b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/hasura_graphql_server.md
@@ -0,0 +1,326 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/hasura_graphql_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Hasura GraphQL Server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Hasura GraphQL Server
+
+
+<img src="https://netdata.cloud/img/hasura.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Hasura GraphQL engine metrics for optimized
+API performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hdsentinel.md b/src/go/plugin/go.d/modules/prometheus/integrations/hdsentinel.md
new file mode 100644
index 000000000..1daad64a5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/hdsentinel.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/hdsentinel.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "HDSentinel"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HDSentinel
+
+
+<img src="https://netdata.cloud/img/harddisk.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics.
+
+
+Metrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/helium_hotspot.md b/src/go/plugin/go.d/modules/prometheus/integrations/helium_hotspot.md
new file mode 100644
index 000000000..6ce0d3348
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/helium_hotspot.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/helium_hotspot.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Helium hotspot"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Helium hotspot
+
+
+<img src="https://netdata.cloud/img/helium.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Helium hotspot metrics for optimized LoRaWAN network management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/helium_miner_validator.md b/src/go/plugin/go.d/modules/prometheus/integrations/helium_miner_validator.md
new file mode 100644
index 000000000..a8fdb2814
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/helium_miner_validator.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/helium_miner_validator.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Helium miner (validator)"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Blockchain Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Helium miner (validator)
+
+
+<img src="https://netdata.cloud/img/helium.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Helium miner and validator metrics for efficient blockchain performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hhvm.md b/src/go/plugin/go.d/modules/prometheus/integrations/hhvm.md
new file mode 100644
index 000000000..4201947be
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/hhvm.md
@@ -0,0 +1,326 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/hhvm.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "HHVM"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HHVM
+
+
+<img src="https://netdata.cloud/img/hhvm.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor HipHop Virtual Machine metrics for efficient
+PHP execution and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hitron_cgn_series_cpe.md b/src/go/plugin/go.d/modules/prometheus/integrations/hitron_cgn_series_cpe.md
new file mode 100644
index 000000000..069062f61
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/hitron_cgn_series_cpe.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/hitron_cgn_series_cpe.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Hitron CGN series CPE"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Hitron CGN series CPE
+
+
+<img src="https://netdata.cloud/img/hitron.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Hitron CGNV4 gateway metrics for efficient network management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hitron_coda_cable_modem.md b/src/go/plugin/go.d/modules/prometheus/integrations/hitron_coda_cable_modem.md
new file mode 100644
index 000000000..c62b7b24a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/hitron_coda_cable_modem.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/hitron_coda_cable_modem.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Hitron CODA Cable Modem"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Hitron CODA Cable Modem
+
+
+<img src="https://netdata.cloud/img/hitron.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Hitron CODA cable modem metrics for optimized internet connectivity and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/homebridge.md b/src/go/plugin/go.d/modules/prometheus/integrations/homebridge.md
new file mode 100644
index 000000000..ca56a7647
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/homebridge.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/homebridge.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Homebridge"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Homebridge
+
+
+<img src="https://netdata.cloud/img/homebridge.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Homebridge smart home metrics for efficient home automation management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/homey.md b/src/go/plugin/go.d/modules/prometheus/integrations/homey.md
new file mode 100644
index 000000000..b17aae574
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/homey.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/homey.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Homey"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Homey
+
+
+<img src="https://netdata.cloud/img/homey.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Homey smart home controller metrics for efficient home automation and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/honeypot.md b/src/go/plugin/go.d/modules/prometheus/integrations/honeypot.md
new file mode 100644
index 000000000..28fdf70b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/honeypot.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/honeypot.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Honeypot"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Honeypot
+
+
+<img src="https://netdata.cloud/img/intrinsec.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor honeypot metrics for efficient threat detection and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hp_ilo.md b/src/go/plugin/go.d/modules/prometheus/integrations/hp_ilo.md
new file mode 100644
index 000000000..54de557cb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/hp_ilo.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/hp_ilo.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "HP iLO"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HP iLO
+
+
+<img src="https://netdata.cloud/img/hp.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics.
+
+
+Metrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/huawei_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/huawei_devices.md
new file mode 100644
index 000000000..2f1e95733
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/huawei_devices.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/huawei_devices.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Huawei devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Huawei devices
+
+
+<img src="https://netdata.cloud/img/huawei.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Huawei HiLink device metrics for optimized connectivity and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/hubble.md b/src/go/plugin/go.d/modules/prometheus/integrations/hubble.md
new file mode 100644
index 000000000..36bd86d69
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/hubble.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/hubble.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Hubble"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Observability"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Hubble
+
+
+<img src="https://netdata.cloud/img/hubble.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Hubble network observability metrics for efficient network visibility and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Configure built-in Prometheus exporter
+
+To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_aix_systems_njmon.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_aix_systems_njmon.md
new file mode 100644
index 000000000..5a4499e6a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_aix_systems_njmon.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ibm_aix_systems_njmon.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "IBM AIX systems Njmon"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IBM AIX systems Njmon
+
+
+<img src="https://netdata.cloud/img/ibm.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md
new file mode 100644
index 000000000..f32cdd0c4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ibm_cryptoexpress_cex_cards.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "IBM CryptoExpress (CEX) cards"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IBM CryptoExpress (CEX) cards
+
+
+<img src="https://netdata.cloud/img/ibm.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track IBM Z Crypto Express device metrics for optimized cryptographic performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_mq.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_mq.md
new file mode 100644
index 000000000..d41219bbb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_mq.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ibm_mq.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "IBM MQ"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IBM MQ
+
+
+<img src="https://netdata.cloud/img/ibm.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on IBM MQ message queue metrics for efficient message transport and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum.md
new file mode 100644
index 000000000..edffab950
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "IBM Spectrum"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IBM Spectrum
+
+
+<img src="https://netdata.cloud/img/ibm.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor IBM Spectrum storage metrics for efficient data management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum_virtualize.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum_virtualize.md
new file mode 100644
index 000000000..5d3dab9e7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum_virtualize.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ibm_spectrum_virtualize.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "IBM Spectrum Virtualize"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IBM Spectrum Virtualize
+
+
+<img src="https://netdata.cloud/img/ibm.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ibm_z_hardware_management_console.md b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_z_hardware_management_console.md
new file mode 100644
index 000000000..5cca9c2ae
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ibm_z_hardware_management_console.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ibm_z_hardware_management_console.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "IBM Z Hardware Management Console"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IBM Z Hardware Management Console
+
+
+<img src="https://netdata.cloud/img/ibm.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/influxdb.md b/src/go/plugin/go.d/modules/prometheus/integrations/influxdb.md
new file mode 100644
index 000000000..817144efb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/influxdb.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/influxdb.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "InfluxDB"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# InfluxDB
+
+
+<img src="https://netdata.cloud/img/influxdb.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor InfluxDB time-series database metrics for efficient data storage and query performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/iota_full_node.md b/src/go/plugin/go.d/modules/prometheus/integrations/iota_full_node.md
new file mode 100644
index 000000000..74ba5a3ef
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/iota_full_node.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/iota_full_node.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "IOTA full node"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Blockchain Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IOTA full node
+
+
+<img src="https://netdata.cloud/img/iota.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ipmi_by_soundcloud.md b/src/go/plugin/go.d/modules/prometheus/integrations/ipmi_by_soundcloud.md
new file mode 100644
index 000000000..52966c728
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ipmi_by_soundcloud.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ipmi_by_soundcloud.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "IPMI (By SoundCloud)"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# IPMI (By SoundCloud)
+
+
+<img src="https://netdata.cloud/img/soundcloud.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor IPMI metrics externally for efficient server hardware management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md b/src/go/plugin/go.d/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md
new file mode 100644
index 000000000..9e2ed89a5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/iqair_airvisual_air_quality_monitors.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "iqAir AirVisual air quality monitors"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# iqAir AirVisual air quality monitors
+
+
+<img src="https://netdata.cloud/img/iqair.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor air quality data from IQAir devices for efficient environmental monitoring and analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jarvis_standing_desk.md b/src/go/plugin/go.d/modules/prometheus/integrations/jarvis_standing_desk.md
new file mode 100644
index 000000000..cd392a297
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/jarvis_standing_desk.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/jarvis_standing_desk.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Jarvis Standing Desk"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Jarvis Standing Desk
+
+
+<img src="https://netdata.cloud/img/jarvis.jpg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Jarvis standing desk usage metrics for efficient workspace ergonomics and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jenkins.md b/src/go/plugin/go.d/modules/prometheus/integrations/jenkins.md
new file mode 100644
index 000000000..203ae3d69
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/jenkins.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/jenkins.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Jenkins"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/CICD Platforms"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Jenkins
+
+
+<img src="https://netdata.cloud/img/jenkins.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Jenkins continuous integration server metrics for efficient development and build management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jetbrains_floating_license_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/jetbrains_floating_license_server.md
new file mode 100644
index 000000000..cde4e22a6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/jetbrains_floating_license_server.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/jetbrains_floating_license_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "JetBrains Floating License Server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# JetBrains Floating License Server
+
+
+<img src="https://netdata.cloud/img/jetbrains.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor JetBrains floating license server metrics for efficient software licensing management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jmx.md b/src/go/plugin/go.d/modules/prometheus/integrations/jmx.md
new file mode 100644
index 000000000..6813a8087
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/jmx.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/jmx.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "JMX"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# JMX
+
+
+<img src="https://netdata.cloud/img/java.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Java Management Extensions (JMX) metrics for efficient Java application management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/jolokia.md b/src/go/plugin/go.d/modules/prometheus/integrations/jolokia.md
new file mode 100644
index 000000000..187b40be1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/jolokia.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/jolokia.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "jolokia"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# jolokia
+
+
+<img src="https://netdata.cloud/img/jolokia.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Jolokia JVM metrics for optimized Java application performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/journald.md b/src/go/plugin/go.d/modules/prometheus/integrations/journald.md
new file mode 100644
index 000000000..0d016ad21
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/journald.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/journald.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "journald"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Logs Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# journald
+
+
+<img src="https://netdata.cloud/img/linux.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on systemd-journald metrics for efficient log management and analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kafka.md b/src/go/plugin/go.d/modules/prometheus/integrations/kafka.md
new file mode 100644
index 000000000..fb328f740
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/kafka.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/kafka.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Kafka"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kafka
+
+
+<img src="https://netdata.cloud/img/kafka.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Kafka message queue metrics for optimized data streaming and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kafka_connect.md b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_connect.md
new file mode 100644
index 000000000..c28c90f49
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_connect.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/kafka_connect.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Kafka Connect"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kafka Connect
+
+
+<img src="https://netdata.cloud/img/kafka.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Kafka Connect metrics for efficient data streaming and integration.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kafka_consumer_lag.md b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_consumer_lag.md
new file mode 100644
index 000000000..6003d3af9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_consumer_lag.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/kafka_consumer_lag.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Kafka Consumer Lag"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Service Discovery / Registry"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kafka Consumer Lag
+
+
+<img src="https://netdata.cloud/img/kafka.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Kafka consumer lag metrics for efficient message queue management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kafka_zookeeper.md b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_zookeeper.md
new file mode 100644
index 000000000..cbf799ca3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/kafka_zookeeper.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/kafka_zookeeper.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Kafka ZooKeeper"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kafka ZooKeeper
+
+
+<img src="https://netdata.cloud/img/kafka.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Kafka ZooKeeper metrics for optimized distributed coordination and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kannel.md b/src/go/plugin/go.d/modules/prometheus/integrations/kannel.md
new file mode 100644
index 000000000..a2264e9d9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/kannel.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/kannel.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Kannel"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Telephony Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kannel
+
+
+<img src="https://netdata.cloud/img/kannel.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/keepalived.md b/src/go/plugin/go.d/modules/prometheus/integrations/keepalived.md
new file mode 100644
index 000000000..aeb0d99b0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/keepalived.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/keepalived.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Keepalived"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Keepalived
+
+
+<img src="https://netdata.cloud/img/keepalived.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Keepalived metrics for efficient high-availability and load balancing management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md b/src/go/plugin/go.d/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md
new file mode 100644
index 000000000..759ce0cbe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/kubernetes_cluster_cloud_cost.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Kubernetes Cluster Cloud Cost"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Kubernetes"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Kubernetes Cluster Cloud Cost
+
+
+<img src="https://netdata.cloud/img/kubernetes.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/lagerist_disk_latency.md b/src/go/plugin/go.d/modules/prometheus/integrations/lagerist_disk_latency.md
new file mode 100644
index 000000000..73019995c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/lagerist_disk_latency.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/lagerist_disk_latency.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Lagerist Disk latency"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Lagerist Disk latency
+
+
+<img src="https://netdata.cloud/img/linux.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track disk latency metrics for efficient storage performance and diagnostics.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ldap.md b/src/go/plugin/go.d/modules/prometheus/integrations/ldap.md
new file mode 100644
index 000000000..705d1e198
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ldap.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ldap.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "LDAP"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# LDAP
+
+
+<img src="https://netdata.cloud/img/ldap.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/linode.md b/src/go/plugin/go.d/modules/prometheus/integrations/linode.md
new file mode 100644
index 000000000..eff67ae75
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/linode.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/linode.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Linode"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Linode
+
+
+<img src="https://netdata.cloud/img/linode.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Linode cloud hosting metrics for efficient virtual server management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/loki.md b/src/go/plugin/go.d/modules/prometheus/integrations/loki.md
new file mode 100644
index 000000000..002634a10
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/loki.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/loki.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "loki"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Logs Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# loki
+
+
+<img src="https://netdata.cloud/img/loki.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Loki metrics.
+
+
+Metrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Loki
+
+Install [loki](https://github.com/grafana/loki) according to its documentation.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/lustre_metadata.md b/src/go/plugin/go.d/modules/prometheus/integrations/lustre_metadata.md
new file mode 100644
index 000000000..2fe27331e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/lustre_metadata.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/lustre_metadata.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Lustre metadata"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Lustre metadata
+
+
+<img src="https://netdata.cloud/img/lustre.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Lustre clustered file system for efficient management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/lynis_audit_reports.md b/src/go/plugin/go.d/modules/prometheus/integrations/lynis_audit_reports.md
new file mode 100644
index 000000000..47b87c2d3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/lynis_audit_reports.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/lynis_audit_reports.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Lynis audit reports"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Lynis audit reports
+
+
+<img src="https://netdata.cloud/img/lynis.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Lynis security auditing tool metrics for efficient system security and compliance management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/machbase.md b/src/go/plugin/go.d/modules/prometheus/integrations/machbase.md
new file mode 100644
index 000000000..23e928296
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/machbase.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/machbase.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Machbase"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Machbase
+
+
+<img src="https://netdata.cloud/img/machbase.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Machbase time-series database metrics for efficient data storage and query performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/maildir.md b/src/go/plugin/go.d/modules/prometheus/integrations/maildir.md
new file mode 100644
index 000000000..a7c106e83
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/maildir.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/maildir.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Maildir"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Mail Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Maildir
+
+
+<img src="https://netdata.cloud/img/mailserver.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track mail server metrics for optimized email management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/meilisearch.md b/src/go/plugin/go.d/modules/prometheus/integrations/meilisearch.md
new file mode 100644
index 000000000..60cad4a91
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/meilisearch.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/meilisearch.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Meilisearch"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Search Engines"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Meilisearch
+
+
+<img src="https://netdata.cloud/img/meilisearch.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Meilisearch search engine metrics for efficient search performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md b/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md
new file mode 100644
index 000000000..45acae167
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/memcached_community.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Memcached (community)"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Memcached (community)
+
+
+<img src="https://netdata.cloud/img/memcached.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Memcached in-memory key-value store metrics for efficient caching performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/meraki_dashboard.md b/src/go/plugin/go.d/modules/prometheus/integrations/meraki_dashboard.md
new file mode 100644
index 000000000..28626195a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/meraki_dashboard.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/meraki_dashboard.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Meraki dashboard"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Meraki dashboard
+
+
+<img src="https://netdata.cloud/img/meraki.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mesos.md b/src/go/plugin/go.d/modules/prometheus/integrations/mesos.md
new file mode 100644
index 000000000..c1f7cd0ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/mesos.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/mesos.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Mesos"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Task Queues"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Mesos
+
+
+<img src="https://netdata.cloud/img/mesos.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Apache Mesos cluster manager metrics for efficient resource management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_devices.md
new file mode 100644
index 000000000..8d846fd26
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_devices.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_devices.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "MikroTik devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MikroTik devices
+
+
+<img src="https://netdata.cloud/img/mikrotik.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on MikroTik RouterOS metrics for efficient network device management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_routeros_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_routeros_devices.md
new file mode 100644
index 000000000..e988add25
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_routeros_devices.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/mikrotik_routeros_devices.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Mikrotik RouterOS devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Mikrotik RouterOS devices
+
+
+<img src="https://netdata.cloud/img/routeros.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track MikroTik RouterOS metrics for efficient network device management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/minecraft.md b/src/go/plugin/go.d/modules/prometheus/integrations/minecraft.md
new file mode 100644
index 000000000..f8649bbcb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/minecraft.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/minecraft.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Minecraft"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Gaming"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Minecraft
+
+
+<img src="https://netdata.cloud/img/minecraft.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Minecraft server metrics for efficient game server management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/modbus_protocol.md b/src/go/plugin/go.d/modules/prometheus/integrations/modbus_protocol.md
new file mode 100644
index 000000000..f6266cd43
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/modbus_protocol.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/modbus_protocol.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Modbus protocol"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Modbus protocol
+
+
+<img src="https://netdata.cloud/img/modbus.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Modbus RTU protocol metrics for efficient industrial automation and control performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mogilefs.md b/src/go/plugin/go.d/modules/prometheus/integrations/mogilefs.md
new file mode 100644
index 000000000..becc6c194
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/mogilefs.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/mogilefs.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "MogileFS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MogileFS
+
+
+<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor MogileFS distributed file system metrics for efficient storage management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/monnit_sensors_mqtt.md b/src/go/plugin/go.d/modules/prometheus/integrations/monnit_sensors_mqtt.md
new file mode 100644
index 000000000..05517f39f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/monnit_sensors_mqtt.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/monnit_sensors_mqtt.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Monnit Sensors MQTT"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Monnit Sensors MQTT
+
+
+<img src="https://netdata.cloud/img/monnit.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Monnit sensor data via MQTT for efficient IoT device monitoring and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mosquitto.md b/src/go/plugin/go.d/modules/prometheus/integrations/mosquitto.md
new file mode 100644
index 000000000..115dde093
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/mosquitto.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/mosquitto.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "mosquitto"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# mosquitto
+
+
+<img src="https://netdata.cloud/img/mosquitto.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mp707_usb_thermometer.md b/src/go/plugin/go.d/modules/prometheus/integrations/mp707_usb_thermometer.md
new file mode 100644
index 000000000..f032dcfb6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/mp707_usb_thermometer.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/mp707_usb_thermometer.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "MP707 USB thermometer"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MP707 USB thermometer
+
+
+<img src="https://netdata.cloud/img/thermometer.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track MP707 power strip metrics for efficient energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mqtt_blackbox.md b/src/go/plugin/go.d/modules/prometheus/integrations/mqtt_blackbox.md
new file mode 100644
index 000000000..2f6e6ca57
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/mqtt_blackbox.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/mqtt_blackbox.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "MQTT Blackbox"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MQTT Blackbox
+
+
+<img src="https://netdata.cloud/img/mqtt.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track MQTT message transport performance using blackbox testing methods.
+
+
+Metrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/mtail.md b/src/go/plugin/go.d/modules/prometheus/integrations/mtail.md
new file mode 100644
index 000000000..e44f88d4c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/mtail.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/mtail.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "mtail"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Logs Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# mtail
+
+
+<img src="https://netdata.cloud/img/mtail.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor log data metrics using mtail log data extractor and parser.
+
+
+Metrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/naemon.md b/src/go/plugin/go.d/modules/prometheus/integrations/naemon.md
new file mode 100644
index 000000000..208777b95
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/naemon.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/naemon.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Naemon"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Observability"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Naemon
+
+
+<img src="https://netdata.cloud/img/naemon.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nagios.md b/src/go/plugin/go.d/modules/prometheus/integrations/nagios.md
new file mode 100644
index 000000000..bdd669c76
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/nagios.md
@@ -0,0 +1,326 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/nagios.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Nagios"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Observability"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Nagios
+
+
+<img src="https://netdata.cloud/img/nagios.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Nagios network monitoring metrics for efficient
+IT infrastructure management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nature_remo_e_lite_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/nature_remo_e_lite_devices.md
new file mode 100644
index 000000000..c102e4a7c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/nature_remo_e_lite_devices.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/nature_remo_e_lite_devices.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Nature Remo E lite devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Nature Remo E lite devices
+
+
+<img src="https://netdata.cloud/img/nature-remo.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Nature Remo E series smart home device metrics for efficient home automation and energy management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md b/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md
new file mode 100644
index 000000000..80e4dce3b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/netapp_ontap_api.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Netapp ONTAP API"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Netapp ONTAP API
+
+
+<img src="https://netdata.cloud/img/netapp.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netapp_solidfire.md b/src/go/plugin/go.d/modules/prometheus/integrations/netapp_solidfire.md
new file mode 100644
index 000000000..a15aef5fb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/netapp_solidfire.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/netapp_solidfire.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "NetApp Solidfire"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NetApp Solidfire
+
+
+<img src="https://netdata.cloud/img/netapp.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track NetApp Solidfire storage system metrics for efficient data storage management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netatmo_sensors.md b/src/go/plugin/go.d/modules/prometheus/integrations/netatmo_sensors.md
new file mode 100644
index 000000000..8420a5fe0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/netatmo_sensors.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/netatmo_sensors.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Netatmo sensors"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Netatmo sensors
+
+
+<img src="https://netdata.cloud/img/netatmo.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Netatmo smart home device metrics for efficient home automation and energy management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netflow.md b/src/go/plugin/go.d/modules/prometheus/integrations/netflow.md
new file mode 100644
index 000000000..0b23e39b0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/netflow.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/netflow.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "NetFlow"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NetFlow
+
+
+<img src="https://netdata.cloud/img/netflow.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track NetFlow network traffic metrics for efficient network monitoring and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/netmeter.md b/src/go/plugin/go.d/modules/prometheus/integrations/netmeter.md
new file mode 100644
index 000000000..97c9893d3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/netmeter.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/netmeter.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "NetMeter"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NetMeter
+
+
+<img src="https://netdata.cloud/img/netmeter.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor NetMeter network traffic metrics for efficient network management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/new_relic.md b/src/go/plugin/go.d/modules/prometheus/integrations/new_relic.md
new file mode 100644
index 000000000..9ca6b4c8a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/new_relic.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/new_relic.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "New Relic"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Observability"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# New Relic
+
+
+<img src="https://netdata.cloud/img/newrelic.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor New Relic application performance management metrics for efficient application monitoring and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nextcloud_servers.md b/src/go/plugin/go.d/modules/prometheus/integrations/nextcloud_servers.md
new file mode 100644
index 000000000..9e61c6be8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/nextcloud_servers.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/nextcloud_servers.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Nextcloud servers"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Nextcloud servers
+
+
+<img src="https://netdata.cloud/img/nextcloud.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nextdns.md b/src/go/plugin/go.d/modules/prometheus/integrations/nextdns.md
new file mode 100644
index 000000000..3d5bc0a6d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/nextdns.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/nextdns.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "NextDNS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NextDNS
+
+
+<img src="https://netdata.cloud/img/nextdns.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track NextDNS DNS resolver and security platform metrics for efficient DNS management and security.
+
+
+Metrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nftables.md b/src/go/plugin/go.d/modules/prometheus/integrations/nftables.md
new file mode 100644
index 000000000..acce8b8af
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/nftables.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/nftables.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "nftables"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Linux Systems/Firewall"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# nftables
+
+
+<img src="https://netdata.cloud/img/nftables.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor nftables firewall metrics for efficient network security and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nrpe_daemon.md b/src/go/plugin/go.d/modules/prometheus/integrations/nrpe_daemon.md
new file mode 100644
index 000000000..e3a03e356
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/nrpe_daemon.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/nrpe_daemon.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "NRPE daemon"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NRPE daemon
+
+
+<img src="https://netdata.cloud/img/nrpelinux.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nsx-t.md b/src/go/plugin/go.d/modules/prometheus/integrations/nsx-t.md
new file mode 100644
index 000000000..4e670ba56
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/nsx-t.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/nsx-t.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "NSX-T"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Containers and VMs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NSX-T
+
+
+<img src="https://netdata.cloud/img/vmware-nsx.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track VMware NSX-T software-defined networking metrics for efficient network virtualization and security management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md b/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md
new file mode 100644
index 000000000..54bb3f1fb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/nvml.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "NVML"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NVML
+
+
+<img src="https://netdata.cloud/img/nvidia.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/obs_studio.md b/src/go/plugin/go.d/modules/prometheus/integrations/obs_studio.md
new file mode 100644
index 000000000..254833af5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/obs_studio.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/obs_studio.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OBS Studio"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Media Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OBS Studio
+
+
+<img src="https://netdata.cloud/img/obs-studio.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track OBS Studio live streaming and recording software metrics for efficient video production and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/odbc.md b/src/go/plugin/go.d/modules/prometheus/integrations/odbc.md
new file mode 100644
index 000000000..d128b647b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/odbc.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/odbc.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "ODBC"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ODBC
+
+
+<img src="https://netdata.cloud/img/odbc.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/open_vswitch.md b/src/go/plugin/go.d/modules/prometheus/integrations/open_vswitch.md
new file mode 100644
index 000000000..c8d24a876
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/open_vswitch.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/open_vswitch.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Open vSwitch"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Open vSwitch
+
+
+<img src="https://netdata.cloud/img/ovs.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openhab.md b/src/go/plugin/go.d/modules/prometheus/integrations/openhab.md
new file mode 100644
index 000000000..52a2ac94d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/openhab.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/openhab.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OpenHAB"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenHAB
+
+
+<img src="https://netdata.cloud/img/openhab.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track openHAB smart home automation system metrics for efficient home automation and energy management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md b/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md
new file mode 100644
index 000000000..c1a547211
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/openldap_community.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OpenLDAP (community)"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenLDAP (community)
+
+
+<img src="https://netdata.cloud/img/openldap.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor OpenLDAP directory service metrics for efficient directory management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openrc.md b/src/go/plugin/go.d/modules/prometheus/integrations/openrc.md
new file mode 100644
index 000000000..bc5dfa902
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/openrc.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/openrc.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OpenRC"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Linux Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenRC
+
+
+<img src="https://netdata.cloud/img/linux.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on OpenRC init system metrics for efficient system startup and service management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md b/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md
new file mode 100644
index 000000000..7995839b1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/openrct2.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OpenRCT2"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Gaming"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenRCT2
+
+
+<img src="https://netdata.cloud/img/openRCT2.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track OpenRCT2 game metrics for efficient game server management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openroadm_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/openroadm_devices.md
new file mode 100644
index 000000000..d1e23dc3c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/openroadm_devices.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/openroadm_devices.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OpenROADM devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenROADM devices
+
+
+<img src="https://netdata.cloud/img/openroadm.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openstack.md b/src/go/plugin/go.d/modules/prometheus/integrations/openstack.md
new file mode 100644
index 000000000..874cf5ce7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/openstack.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/openstack.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OpenStack"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenStack
+
+
+<img src="https://netdata.cloud/img/openstack.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track OpenStack cloud computing platform metrics for efficient infrastructure management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openvas.md b/src/go/plugin/go.d/modules/prometheus/integrations/openvas.md
new file mode 100644
index 000000000..09681ae7e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/openvas.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/openvas.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OpenVAS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenVAS
+
+
+<img src="https://netdata.cloud/img/openVAS.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor OpenVAS vulnerability scanner metrics for efficient security assessment and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/openweathermap.md b/src/go/plugin/go.d/modules/prometheus/integrations/openweathermap.md
new file mode 100644
index 000000000..624478e2b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/openweathermap.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/openweathermap.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OpenWeatherMap"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OpenWeatherMap
+
+
+<img src="https://netdata.cloud/img/openweather.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md b/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md
new file mode 100644
index 000000000..ab59c3181
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/oracle_db_community.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Oracle DB (community)"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Oracle DB (community)
+
+
+<img src="https://netdata.cloud/img/oracle.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Oracle Database metrics for efficient database management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/otrs.md b/src/go/plugin/go.d/modules/prometheus/integrations/otrs.md
new file mode 100644
index 000000000..8eadb3410
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/otrs.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/otrs.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "OTRS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Incident Management"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# OTRS
+
+
+<img src="https://netdata.cloud/img/otrs.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md b/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md
new file mode 100644
index 000000000..e4fe20123
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/patroni.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Patroni"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Patroni
+
+
+<img src="https://netdata.cloud/img/patroni.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/personal_weather_station.md b/src/go/plugin/go.d/modules/prometheus/integrations/personal_weather_station.md
new file mode 100644
index 000000000..af1482067
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/personal_weather_station.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/personal_weather_station.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Personal Weather Station"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Personal Weather Station
+
+
+<img src="https://netdata.cloud/img/wunderground.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track personal weather station metrics for efficient weather monitoring and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/pgbackrest.md b/src/go/plugin/go.d/modules/prometheus/integrations/pgbackrest.md
new file mode 100644
index 000000000..19c60d95a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/pgbackrest.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/pgbackrest.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "pgBackRest"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# pgBackRest
+
+
+<img src="https://netdata.cloud/img/pgbackrest.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/pgpool-ii.md b/src/go/plugin/go.d/modules/prometheus/integrations/pgpool-ii.md
new file mode 100644
index 000000000..a7cfd941f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/pgpool-ii.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/pgpool-ii.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Pgpool-II"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Pgpool-II
+
+
+<img src="https://netdata.cloud/img/pgpool2.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/philips_hue.md b/src/go/plugin/go.d/modules/prometheus/integrations/philips_hue.md
new file mode 100644
index 000000000..47dd77b0e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/philips_hue.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/philips_hue.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Philips Hue"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Philips Hue
+
+
+<img src="https://netdata.cloud/img/hue.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/pimoroni_enviro+.md b/src/go/plugin/go.d/modules/prometheus/integrations/pimoroni_enviro+.md
new file mode 100644
index 000000000..12b5719c5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/pimoroni_enviro+.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/pimoroni_enviro+.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Pimoroni Enviro+"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Pimoroni Enviro+
+
+
+<img src="https://netdata.cloud/img/pimorino.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/pingdom.md b/src/go/plugin/go.d/modules/prometheus/integrations/pingdom.md
new file mode 100644
index 000000000..758b80eff
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/pingdom.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/pingdom.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Pingdom"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Pingdom
+
+
+<img src="https://netdata.cloud/img/solarwinds.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/podman.md b/src/go/plugin/go.d/modules/prometheus/integrations/podman.md
new file mode 100644
index 000000000..346e765cf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/podman.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/podman.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Podman"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Containers and VMs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Podman
+
+
+<img src="https://netdata.cloud/img/podman.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Podman container runtime metrics for efficient container management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/powerpal_devices.md b/src/go/plugin/go.d/modules/prometheus/integrations/powerpal_devices.md
new file mode 100644
index 000000000..cc7b681ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/powerpal_devices.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/powerpal_devices.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Powerpal devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Powerpal devices
+
+
+<img src="https://netdata.cloud/img/powerpal.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/proftpd.md b/src/go/plugin/go.d/modules/prometheus/integrations/proftpd.md
new file mode 100644
index 000000000..f92612383
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/proftpd.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/proftpd.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "ProFTPD"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/FTP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ProFTPD
+
+
+<img src="https://netdata.cloud/img/proftpd.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor ProFTPD FTP server metrics for efficient file transfer and server performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/prometheus_endpoint.md b/src/go/plugin/go.d/modules/prometheus/integrations/prometheus_endpoint.md
new file mode 100644
index 000000000..18bbd9d0a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/prometheus_endpoint.md
@@ -0,0 +1,321 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/prometheus_endpoint.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Prometheus endpoint"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Prometheus endpoint
+
+
+<img src="https://netdata.cloud/img/prometheus.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.
+
+
+It collects metrics by periodically sending HTTP requests to the target instance.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/proxmox_ve.md b/src/go/plugin/go.d/modules/prometheus/integrations/proxmox_ve.md
new file mode 100644
index 000000000..ad4bdfe63
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/proxmox_ve.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/proxmox_ve.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Proxmox VE"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Containers and VMs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Proxmox VE
+
+
+<img src="https://netdata.cloud/img/proxmox.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/radio_thermostat.md b/src/go/plugin/go.d/modules/prometheus/integrations/radio_thermostat.md
new file mode 100644
index 000000000..8004e7ff1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/radio_thermostat.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/radio_thermostat.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Radio Thermostat"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Radio Thermostat
+
+
+<img src="https://netdata.cloud/img/radiots.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/radius.md b/src/go/plugin/go.d/modules/prometheus/integrations/radius.md
new file mode 100644
index 000000000..22e2567e6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/radius.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/radius.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "RADIUS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# RADIUS
+
+
+<img src="https://netdata.cloud/img/radius.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/rancher.md b/src/go/plugin/go.d/modules/prometheus/integrations/rancher.md
new file mode 100644
index 000000000..945813b1d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/rancher.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/rancher.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Rancher"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Kubernetes"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Rancher
+
+
+<img src="https://netdata.cloud/img/rancher.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Rancher container orchestration platform metrics for efficient container management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/raritan_pdu.md b/src/go/plugin/go.d/modules/prometheus/integrations/raritan_pdu.md
new file mode 100644
index 000000000..2781c3af8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/raritan_pdu.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/raritan_pdu.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Raritan PDU"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Raritan PDU
+
+
+<img src="https://netdata.cloud/img/raritan.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/redis_queue.md b/src/go/plugin/go.d/modules/prometheus/integrations/redis_queue.md
new file mode 100644
index 000000000..d3fb16d4d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/redis_queue.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/redis_queue.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Redis Queue"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Redis Queue
+
+
+<img src="https://netdata.cloud/img/rq.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ripe_atlas.md b/src/go/plugin/go.d/modules/prometheus/integrations/ripe_atlas.md
new file mode 100644
index 000000000..7aa35e8d5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ripe_atlas.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ripe_atlas.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "RIPE Atlas"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# RIPE Atlas
+
+
+<img src="https://netdata.cloud/img/ripe.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sabnzbd.md b/src/go/plugin/go.d/modules/prometheus/integrations/sabnzbd.md
new file mode 100644
index 000000000..3c98fa9e1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sabnzbd.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sabnzbd.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "SABnzbd"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Media Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SABnzbd
+
+
+<img src="https://netdata.cloud/img/sabnzbd.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor SABnzbd Usenet client metrics for efficient file downloads and resource management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/salicru_eqx_inverter.md b/src/go/plugin/go.d/modules/prometheus/integrations/salicru_eqx_inverter.md
new file mode 100644
index 000000000..b7c5b46c3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/salicru_eqx_inverter.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/salicru_eqx_inverter.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Salicru EQX inverter"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Salicru EQX inverter
+
+
+<img src="https://netdata.cloud/img/salicru.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sense_energy.md b/src/go/plugin/go.d/modules/prometheus/integrations/sense_energy.md
new file mode 100644
index 000000000..837d30ceb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sense_energy.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sense_energy.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Sense Energy"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Sense Energy
+
+
+<img src="https://netdata.cloud/img/sense.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sentry.md b/src/go/plugin/go.d/modules/prometheus/integrations/sentry.md
new file mode 100644
index 000000000..ae878cedf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sentry.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sentry.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Sentry"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Sentry
+
+
+<img src="https://netdata.cloud/img/sentry.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Sentry error tracking and monitoring platform metrics for efficient application performance and error management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/servertech.md b/src/go/plugin/go.d/modules/prometheus/integrations/servertech.md
new file mode 100644
index 000000000..d287fb65b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/servertech.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/servertech.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "ServerTech"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ServerTech
+
+
+<img src="https://netdata.cloud/img/servertech.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/shell_command.md b/src/go/plugin/go.d/modules/prometheus/integrations/shell_command.md
new file mode 100644
index 000000000..dec29a66c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/shell_command.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/shell_command.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Shell command"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Shell command
+
+
+<img src="https://netdata.cloud/img/crunner.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track custom command output metrics for tailored monitoring and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/shelly_humidity_sensor.md b/src/go/plugin/go.d/modules/prometheus/integrations/shelly_humidity_sensor.md
new file mode 100644
index 000000000..baf6fa58f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/shelly_humidity_sensor.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/shelly_humidity_sensor.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Shelly humidity sensor"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Shelly humidity sensor
+
+
+<img src="https://netdata.cloud/img/shelly.jpg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Shelly smart home device metrics for efficient home automation and energy management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sia.md b/src/go/plugin/go.d/modules/prometheus/integrations/sia.md
new file mode 100644
index 000000000..6fe4a3684
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sia.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sia.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Sia"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Blockchain Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Sia
+
+
+<img src="https://netdata.cloud/img/sia.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Sia decentralized storage platform metrics for efficient storage management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/siemens_s7_plc.md b/src/go/plugin/go.d/modules/prometheus/integrations/siemens_s7_plc.md
new file mode 100644
index 000000000..c6aec71e2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/siemens_s7_plc.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/siemens_s7_plc.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Siemens S7 PLC"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Siemens S7 PLC
+
+
+<img src="https://netdata.cloud/img/siemens.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md b/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md
new file mode 100644
index 000000000..8faefa53e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/site_24x7.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Site 24x7"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Site 24x7
+
+
+<img src="https://netdata.cloud/img/site24x7.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/slurm.md b/src/go/plugin/go.d/modules/prometheus/integrations/slurm.md
new file mode 100644
index 000000000..00d27ca19
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/slurm.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/slurm.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Slurm"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Task Queues"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Slurm
+
+
+<img src="https://netdata.cloud/img/slurm.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sma_inverters.md b/src/go/plugin/go.d/modules/prometheus/integrations/sma_inverters.md
new file mode 100644
index 000000000..f739362eb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sma_inverters.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sma_inverters.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "SMA Inverters"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SMA Inverters
+
+
+<img src="https://netdata.cloud/img/sma.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor SMA solar inverter metrics for efficient solar energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/smart_meters_sml.md b/src/go/plugin/go.d/modules/prometheus/integrations/smart_meters_sml.md
new file mode 100644
index 000000000..1201475a5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/smart_meters_sml.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/smart_meters_sml.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Smart meters SML"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Smart meters SML
+
+
+<img src="https://netdata.cloud/img/sml.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Smart Message Language (SML) metrics for efficient smart metering and energy management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/smartrg_808ac_cable_modem.md b/src/go/plugin/go.d/modules/prometheus/integrations/smartrg_808ac_cable_modem.md
new file mode 100644
index 000000000..1dadc3d85
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/smartrg_808ac_cable_modem.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/smartrg_808ac_cable_modem.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "SmartRG 808AC Cable Modem"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SmartRG 808AC Cable Modem
+
+
+<img src="https://netdata.cloud/img/smartr.jpeg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor SmartRG SR808ac router metrics for efficient network device management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/softether_vpn_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/softether_vpn_server.md
new file mode 100644
index 000000000..30fd7cb64
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/softether_vpn_server.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/softether_vpn_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "SoftEther VPN Server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/VPNs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SoftEther VPN Server
+
+
+<img src="https://netdata.cloud/img/softether.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/solar_logging_stick.md b/src/go/plugin/go.d/modules/prometheus/integrations/solar_logging_stick.md
new file mode 100644
index 000000000..35c78085e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/solar_logging_stick.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/solar_logging_stick.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Solar logging stick"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Solar logging stick
+
+
+<img src="https://netdata.cloud/img/solar.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/solaredge_inverters.md b/src/go/plugin/go.d/modules/prometheus/integrations/solaredge_inverters.md
new file mode 100644
index 000000000..266f2d05c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/solaredge_inverters.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/solaredge_inverters.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "SolarEdge inverters"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SolarEdge inverters
+
+
+<img src="https://netdata.cloud/img/solaredge.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track SolarEdge solar inverter metrics for efficient solar energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/solis_ginlong_5g_inverters.md b/src/go/plugin/go.d/modules/prometheus/integrations/solis_ginlong_5g_inverters.md
new file mode 100644
index 000000000..d0d0658f5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/solis_ginlong_5g_inverters.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/solis_ginlong_5g_inverters.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Solis Ginlong 5G inverters"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Solis Ginlong 5G inverters
+
+
+<img src="https://netdata.cloud/img/solis.jpg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Solis solar inverter metrics for efficient solar energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sonic_nos.md b/src/go/plugin/go.d/modules/prometheus/integrations/sonic_nos.md
new file mode 100644
index 000000000..455f14fbf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sonic_nos.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sonic_nos.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "SONiC NOS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SONiC NOS
+
+
+<img src="https://netdata.cloud/img/sonic.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/spacelift.md b/src/go/plugin/go.d/modules/prometheus/integrations/spacelift.md
new file mode 100644
index 000000000..ab83110bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/spacelift.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/spacelift.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Spacelift"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Provisioning Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Spacelift
+
+
+<img src="https://netdata.cloud/img/spacelift.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/speedify_cli.md b/src/go/plugin/go.d/modules/prometheus/integrations/speedify_cli.md
new file mode 100644
index 000000000..beed0bd1a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/speedify_cli.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/speedify_cli.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Speedify CLI"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/VPNs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Speedify CLI
+
+
+<img src="https://netdata.cloud/img/speedify.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Speedify VPN metrics for efficient virtual private network (VPN) management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md b/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md
new file mode 100644
index 000000000..1116f91e0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sphinx.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Sphinx"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Search Engines"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Sphinx
+
+
+<img src="https://netdata.cloud/img/sphinx.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Sphinx search engine metrics for efficient search and indexing performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sql_database_agnostic.md b/src/go/plugin/go.d/modules/prometheus/integrations/sql_database_agnostic.md
new file mode 100644
index 000000000..6a0a523c6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sql_database_agnostic.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sql_database_agnostic.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "SQL Database agnostic"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SQL Database agnostic
+
+
+<img src="https://netdata.cloud/img/sql.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Query SQL databases for efficient database performance monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ssh.md b/src/go/plugin/go.d/modules/prometheus/integrations/ssh.md
new file mode 100644
index 000000000..7ffe9b203
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ssh.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ssh.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "SSH"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SSH
+
+
+<img src="https://netdata.cloud/img/ssh.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor SSH server metrics for efficient secure shell server management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ssl_certificate.md b/src/go/plugin/go.d/modules/prometheus/integrations/ssl_certificate.md
new file mode 100644
index 000000000..2c1d519a8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ssl_certificate.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ssl_certificate.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "SSL Certificate"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SSL Certificate
+
+
+<img src="https://netdata.cloud/img/ssl.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track SSL/TLS certificate metrics for efficient web security and certificate management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/starlink_spacex.md b/src/go/plugin/go.d/modules/prometheus/integrations/starlink_spacex.md
new file mode 100644
index 000000000..b48f32c9a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/starlink_spacex.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/starlink_spacex.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Starlink (SpaceX)"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Starlink (SpaceX)
+
+
+<img src="https://netdata.cloud/img/starlink.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md b/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md
new file mode 100644
index 000000000..9b7409b83
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/starwind_vsan_vsphere_edition.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Starwind VSAN VSphere Edition"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Starwind VSAN VSphere Edition
+
+
+<img src="https://netdata.cloud/img/starwind.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/statuspage.md b/src/go/plugin/go.d/modules/prometheus/integrations/statuspage.md
new file mode 100644
index 000000000..6038729dc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/statuspage.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/statuspage.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "StatusPage"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Incident Management"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# StatusPage
+
+
+<img src="https://netdata.cloud/img/statuspage.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor StatusPage.io incident and status metrics for efficient incident management and communication.
+
+
+Metrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/steam.md b/src/go/plugin/go.d/modules/prometheus/integrations/steam.md
new file mode 100644
index 000000000..44b346593
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/steam.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/steam.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Steam"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Gaming"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Steam
+
+
+<img src="https://netdata.cloud/img/a2s.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Gain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/storidge.md b/src/go/plugin/go.d/modules/prometheus/integrations/storidge.md
new file mode 100644
index 000000000..48a320ce6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/storidge.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/storidge.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Storidge"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Storidge
+
+
+<img src="https://netdata.cloud/img/storidge.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Storidge storage metrics for efficient storage management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/stream.md b/src/go/plugin/go.d/modules/prometheus/integrations/stream.md
new file mode 100644
index 000000000..fb21cb4da
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/stream.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/stream.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Stream"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Media Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Stream
+
+
+<img src="https://netdata.cloud/img/stream.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor streaming metrics for efficient media streaming and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/strongswan.md b/src/go/plugin/go.d/modules/prometheus/integrations/strongswan.md
new file mode 100644
index 000000000..ffddfb022
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/strongswan.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/strongswan.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "strongSwan"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/VPNs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# strongSwan
+
+
+<img src="https://netdata.cloud/img/strongswan.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sunspec_solar_energy.md b/src/go/plugin/go.d/modules/prometheus/integrations/sunspec_solar_energy.md
new file mode 100644
index 000000000..552c5583b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sunspec_solar_energy.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sunspec_solar_energy.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Sunspec Solar Energy"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Sunspec Solar Energy
+
+
+<img src="https://netdata.cloud/img/sunspec.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/suricata.md b/src/go/plugin/go.d/modules/prometheus/integrations/suricata.md
new file mode 100644
index 000000000..d5bdd01b5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/suricata.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/suricata.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Suricata"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Suricata
+
+
+<img src="https://netdata.cloud/img/suricata.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/synology_activebackup.md b/src/go/plugin/go.d/modules/prometheus/integrations/synology_activebackup.md
new file mode 100644
index 000000000..b558bbf92
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/synology_activebackup.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/synology_activebackup.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Synology ActiveBackup"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Synology ActiveBackup
+
+
+<img src="https://netdata.cloud/img/synology.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Synology Active Backup metrics for efficient backup and data protection management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/sysload.md b/src/go/plugin/go.d/modules/prometheus/integrations/sysload.md
new file mode 100644
index 000000000..369a43020
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/sysload.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/sysload.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Sysload"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Sysload
+
+
+<img src="https://netdata.cloud/img/sysload.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor system load metrics for efficient system performance and resource management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md b/src/go/plugin/go.d/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md
new file mode 100644
index 000000000..55b26bf9c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/t-rex_nvidia_gpu_miner.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "T-Rex NVIDIA GPU Miner"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# T-Rex NVIDIA GPU Miner
+
+
+<img src="https://netdata.cloud/img/trex.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tacacs.md b/src/go/plugin/go.d/modules/prometheus/integrations/tacacs.md
new file mode 100644
index 000000000..5d3534393
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/tacacs.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/tacacs.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "TACACS"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Authentication and Authorization"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# TACACS
+
+
+<img src="https://netdata.cloud/img/tacacs.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tado_smart_heating_solution.md b/src/go/plugin/go.d/modules/prometheus/integrations/tado_smart_heating_solution.md
new file mode 100644
index 000000000..ece7fb677
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/tado_smart_heating_solution.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/tado_smart_heating_solution.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Tado smart heating solution"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tado smart heating solution
+
+
+<img src="https://netdata.cloud/img/tado.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Tado smart thermostat metrics for efficient home heating and cooling management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Tado\xB0 Exporter](https://github.com/eko/tado-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tankerkoenig_api.md b/src/go/plugin/go.d/modules/prometheus/integrations/tankerkoenig_api.md
new file mode 100644
index 000000000..01eb6557a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/tankerkoenig_api.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/tankerkoenig_api.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Tankerkoenig API"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tankerkoenig API
+
+
+<img src="https://netdata.cloud/img/tanker.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Tankerknig API fuel price metrics for efficient fuel price monitoring and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tesla_powerwall.md b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_powerwall.md
new file mode 100644
index 000000000..c24163111
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_powerwall.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/tesla_powerwall.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Tesla Powerwall"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tesla Powerwall
+
+
+<img src="https://netdata.cloud/img/tesla.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Tesla Powerwall metrics for efficient home energy storage and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tesla_vehicle.md b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_vehicle.md
new file mode 100644
index 000000000..56617affd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_vehicle.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/tesla_vehicle.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Tesla vehicle"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tesla vehicle
+
+
+<img src="https://netdata.cloud/img/tesla.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Tesla vehicle metrics for efficient electric vehicle management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tesla_wall_connector.md b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_wall_connector.md
new file mode 100644
index 000000000..8e3c0e901
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/tesla_wall_connector.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/tesla_wall_connector.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Tesla Wall Connector"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tesla Wall Connector
+
+
+<img src="https://netdata.cloud/img/tesla.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/tp-link_p110.md b/src/go/plugin/go.d/modules/prometheus/integrations/tp-link_p110.md
new file mode 100644
index 000000000..5dd150413
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/tp-link_p110.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/tp-link_p110.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "TP-Link P110"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# TP-Link P110
+
+
+<img src="https://netdata.cloud/img/tplink.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track TP-Link P110 smart plug metrics for efficient energy management and monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/traceroute.md b/src/go/plugin/go.d/modules/prometheus/integrations/traceroute.md
new file mode 100644
index 000000000..0896fd9ca
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/traceroute.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/traceroute.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Traceroute"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Traceroute
+
+
+<img src="https://netdata.cloud/img/traceroute.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Export traceroute metrics for efficient network path analysis and performance monitoring.
+
+
+Metrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/twincat_ads_web_service.md b/src/go/plugin/go.d/modules/prometheus/integrations/twincat_ads_web_service.md
new file mode 100644
index 000000000..e276e598d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/twincat_ads_web_service.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/twincat_ads_web_service.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "TwinCAT ADS Web Service"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# TwinCAT ADS Web Service
+
+
+<img src="https://netdata.cloud/img/twincat.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control.
+
+
+Metrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/twitch.md b/src/go/plugin/go.d/modules/prometheus/integrations/twitch.md
new file mode 100644
index 000000000..f08f81bd9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/twitch.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/twitch.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Twitch"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Media Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Twitch
+
+
+<img src="https://netdata.cloud/img/twitch.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Twitch streaming platform metrics for efficient live streaming management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/ubiquiti_ufiber_olt.md b/src/go/plugin/go.d/modules/prometheus/integrations/ubiquiti_ufiber_olt.md
new file mode 100644
index 000000000..810ebbea3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/ubiquiti_ufiber_olt.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/ubiquiti_ufiber_olt.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Ubiquiti UFiber OLT"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Ubiquiti UFiber OLT
+
+
+<img src="https://netdata.cloud/img/ubiquiti.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md b/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md
new file mode 100644
index 000000000..9c6b5395a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/uptimerobot.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Uptimerobot"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Uptimerobot
+
+
+<img src="https://netdata.cloud/img/uptimerobot.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/vault_pki.md b/src/go/plugin/go.d/modules/prometheus/integrations/vault_pki.md
new file mode 100644
index 000000000..a7d11cd16
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/vault_pki.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/vault_pki.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Vault PKI"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Vault PKI
+
+
+<img src="https://netdata.cloud/img/vault.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/vertica.md b/src/go/plugin/go.d/modules/prometheus/integrations/vertica.md
new file mode 100644
index 000000000..8463d713f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/vertica.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/vertica.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Vertica"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Vertica
+
+
+<img src="https://netdata.cloud/img/vertica.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Vertica analytics database platform metrics for efficient database performance and management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/vscode.md b/src/go/plugin/go.d/modules/prometheus/integrations/vscode.md
new file mode 100644
index 000000000..5fcffca01
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/vscode.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/vscode.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "VSCode"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# VSCode
+
+
+<img src="https://netdata.cloud/img/vscode.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Visual Studio Code editor metrics for efficient development environment management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/warp10.md b/src/go/plugin/go.d/modules/prometheus/integrations/warp10.md
new file mode 100644
index 000000000..e9e60dea6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/warp10.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/warp10.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Warp10"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Warp10
+
+
+<img src="https://netdata.cloud/img/warp10.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Warp 10 time-series database metrics for efficient time-series data management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/xiaomi_mi_flora.md b/src/go/plugin/go.d/modules/prometheus/integrations/xiaomi_mi_flora.md
new file mode 100644
index 000000000..51314b8b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/xiaomi_mi_flora.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/xiaomi_mi_flora.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Xiaomi Mi Flora"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/IoT Devices"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Xiaomi Mi Flora
+
+
+<img src="https://netdata.cloud/img/xiaomi.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Keep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/xmpp_server.md b/src/go/plugin/go.d/modules/prometheus/integrations/xmpp_server.md
new file mode 100644
index 000000000..eacae8393
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/xmpp_server.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/xmpp_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "XMPP Server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# XMPP Server
+
+
+<img src="https://netdata.cloud/img/xmpp.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/yourls_url_shortener.md b/src/go/plugin/go.d/modules/prometheus/integrations/yourls_url_shortener.md
new file mode 100644
index 000000000..6b84c5ee6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/yourls_url_shortener.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/yourls_url_shortener.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "YOURLS URL Shortener"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/APM"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# YOURLS URL Shortener
+
+
+<img src="https://netdata.cloud/img/yourls.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/zerto.md b/src/go/plugin/go.d/modules/prometheus/integrations/zerto.md
new file mode 100644
index 000000000..3d316461f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/zerto.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/zerto.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Zerto"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Cloud Provider Managed"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Zerto
+
+
+<img src="https://netdata.cloud/img/zerto.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/zulip.md b/src/go/plugin/go.d/modules/prometheus/integrations/zulip.md
new file mode 100644
index 000000000..91e652c47
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/zulip.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/zulip.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Zulip"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Media Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Zulip
+
+
+<img src="https://netdata.cloud/img/zulip.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Monitor Zulip open-source group chat application metrics for efficient team communication management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/integrations/zyxel_gs1200-8.md b/src/go/plugin/go.d/modules/prometheus/integrations/zyxel_gs1200-8.md
new file mode 100644
index 000000000..4f0b43431
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/integrations/zyxel_gs1200-8.md
@@ -0,0 +1,325 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/integrations/zyxel_gs1200-8.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/prometheus/metadata.yaml"
+sidebar_label: "Zyxel GS1200-8"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Networking Stack and Network Interfaces"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Zyxel GS1200-8
+
+
+<img src="https://netdata.cloud/img/zyxel.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: prometheus
+
+<img src="https://img.shields.io/badge/maintained%20by-Community-blue" />
+
+## Overview
+
+Track Zyxel GS1200 network switch metrics for efficient network device management and performance.
+
+
+Metrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+| Metric | Chart | Dimension(s) | Algorithm |
+|---------------------------|-------------------------------------------|----------------------|-------------|
+| Gauge | for each label set | one, the metric name | absolute |
+| Counter | for each label set | one, the metric name | incremental |
+| Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+| Summary (sum and count) | for each label set | the metric name | incremental |
+| Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+| Histogram (sum and count) | for each label set | the metric name | incremental |
+
+Untyped metrics (have no '# TYPE') processing:
+
+- As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+- As Counter if it has suffix '_total'.
+- As Summary if it has 'quantile' label.
+- As Histogram if it has 'le' label.
+
+**The rest are ignored**.
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Exporter
+
+Install [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/prometheus.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/prometheus.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| selector | Time series selector (filter). | | no |
+| fallback_type | Time series selector (filter). | | no |
+| max_time_series | Global time series limit. If an endpoint returns number of time series > limit the data is not processed. | 2000 | no |
+| max_time_series_per_metric | Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped. | 200 | no |
+| timeout | HTTP request timeout. | 10 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### selector
+
+This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+- Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+- Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+- Option syntax:
+
+```yaml
+selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+```
+
+
+##### fallback_type
+
+This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+- Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+- Option syntax:
+
+```yaml
+fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+A basic example configuration.
+
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+```
+##### Read metrics from a file
+
+An example configuration to read metrics from a file.
+
+<details open><summary>Config</summary>
+
+```yaml
+# use "file://" scheme
+jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+
+```
+</details>
+
+##### HTTP authentication
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Basic HTTP authentication.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+> **Note**: Change the port of the monitored application on which it provides metrics.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `prometheus` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m prometheus
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `prometheus` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep prometheus
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep prometheus /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep prometheus
+```
+
+
diff --git a/src/go/plugin/go.d/modules/prometheus/metadata.yaml b/src/go/plugin/go.d/modules/prometheus/metadata.yaml
new file mode 100644
index 000000000..fee2b820b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/metadata.yaml
@@ -0,0 +1,7866 @@
+plugin_name: go.d.plugin
+modules:
+ - &module
+ meta: &meta
+ id: collector-go.d.plugin-prometheus-generic
+ module_name: prometheus
+ plugin_name: go.d.plugin
+ monitored_instance:
+ name: Prometheus endpoint
+ link: https://prometheus.io/
+ icon_filename: prometheus.svg
+ categories:
+ - data-collection.generic-data-collection
+ # - data-collection.apm
+ keywords:
+ - prometheus
+ - openmetrics
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview: &overview
+ data_collection:
+ metrics_description: |
+ This generic Prometheus collector gathers metrics from any [`Prometheus`](https://prometheus.io/) endpoints.
+ method_description: |
+ It collects metrics by periodically sending HTTP requests to the target instance.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects instances running on the local host by trying to connect to known ports that are [allocated to exporters](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup: &setup
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/prometheus.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: ""
+ required: true
+ - name: selector
+ description: Time series selector (filter).
+ default_value: ""
+ required: false
+ detailed_description: |
+ This option allows you to filter out unwanted time series. Only metrics matching the selector will be collected.
+
+ - Logic: (pattern1 OR pattern2) AND !(pattern3 or pattern4)
+ - Pattern syntax: [selector](/src/go/plugin/go.d/pkg/prometheus/selector/README.md).
+ - Option syntax:
+
+ ```yaml
+ selector:
+ allow:
+ - pattern1
+ - pattern2
+ deny:
+ - pattern3
+ - pattern4
+ ```
+ - name: fallback_type
+ description: Time series selector (filter).
+ default_value: ""
+ required: false
+ detailed_description: |
+ This option allows you to process Untyped metrics as Counter or Gauge instead of ignoring them.
+
+ - Metric name pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match).
+ - Option syntax:
+
+ ```yaml
+ fallback_type:
+ counter:
+ - metric_name_pattern1
+ - metric_name_pattern2
+ gauge:
+ - metric_name_pattern3
+ - metric_name_pattern4
+ ```
+ - name: max_time_series
+ description: Global time series limit. If an endpoint returns number of time series > limit the data is not processed.
+ default_value: 2000
+ required: false
+ - name: max_time_series_per_metric
+ description: Time series per metric (metric name) limit. Metrics with number of time series > limit are skipped.
+ default_value: 200
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 10
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: |
+ > **Note**: Change the port of the monitored application on which it provides metrics.
+
+ A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ - name: Read metrics from a file
+ description: An example configuration to read metrics from a file.
+ config: |
+ # use "file://" scheme
+ jobs:
+ - name: myapp
+ url: file:///opt/metrics/myapp/metrics.txt
+ - name: HTTP authentication
+ description: |
+ > **Note**: Change the port of the monitored application on which it provides metrics.
+
+ Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: |
+ > **Note**: Change the port of the monitored application on which it provides metrics.
+
+ Do not validate server certificate chain and hostname.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:9090/metrics
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+ > **Note**: Change the port of the monitored application on which it provides metrics.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:9090/metrics
+
+ - name: remote
+ url: http://192.0.2.1:9090/metrics
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: |
+ This collector has built-in grouping logic based on the [type of metrics](https://prometheus.io/docs/concepts/metric_types/).
+
+ | Metric | Chart | Dimension(s) | Algorithm |
+ |---------------------------|-------------------------------------------|----------------------|-------------|
+ | Gauge | for each label set | one, the metric name | absolute |
+ | Counter | for each label set | one, the metric name | incremental |
+ | Summary (quantiles) | for each label set (excluding 'quantile') | for each quantile | absolute |
+ | Summary (sum and count) | for each label set | the metric name | incremental |
+ | Histogram (buckets) | for each label set (excluding 'le') | for each bucket | incremental |
+ | Histogram (sum and count) | for each label set | the metric name | incremental |
+
+ Untyped metrics (have no '# TYPE') processing:
+
+ - As Counter or Gauge depending on pattern match when 'fallback_type' is used.
+ - As Counter if it has suffix '_total'.
+ - As Summary if it has 'quantile' label.
+ - As Histogram if it has 'le' label.
+
+ **The rest are ignored**.
+ availability: []
+ scopes: []
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-a10-acos
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: A10 ACOS network devices
+ link: https://github.com/a10networks/PrometheusExporter
+ icon_filename: a10-networks.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords:
+ - network monitoring
+ - network performance
+ - traffic analysis
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor A10 Networks device metrics for comprehensive management and analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [A10-Networks Prometheus Exporter](https://github.com/a10networks/PrometheusExporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-airflow
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Apache Airflow
+ link: https://github.com/shalb/airflow-exporter
+ icon_filename: airflow.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Apache Airflow metrics to optimize task scheduling and workflow management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Airflow exporter](https://github.com/shalb/airflow-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Airflow exporter](https://github.com/shalb/airflow-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ most_popular: false
+ community: true
+ id: collector-go.d.plugin-prometheus-alibaba-cloud
+ monitored_instance:
+ name: Alibaba Cloud
+ link: https://github.com/aylei/aliyun-exporter # FIXME: This repository has been archived by the owner on Oct 28, 2019
+ icon_filename: alibaba-cloud.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Alibaba Cloud services and resources for efficient management and cost optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Alibaba Cloud Exporter](https://github.com/aylei/aliyun-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-flink
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Apache Flink
+ link: https://github.com/matsumana/flink_exporter
+ icon_filename: apache_flink.png
+ categories:
+ - data-collection.apm
+ keywords:
+ - web server
+ - http
+ - https
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Apache Flink metrics for efficient stream processing and application management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Apache Flink Metrics Reporter](https://github.com/matsumana/flink_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ most_popular: false
+ community: true
+ id: collector-go.d.plugin-prometheus-aruba
+ monitored_instance:
+ name: Aruba devices
+ link: https://github.com/slashdoom/aruba_exporter
+ icon_filename: aruba.svg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords:
+ - network monitoring
+ - network performance
+ - aruba devices
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Aruba Networks devices performance metrics for comprehensive network management and analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Aruba Exporter](https://github.com/slashdoom/aruba_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Aruba Exporter](https://github.com/slashdoom/aruba_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aws_ec2
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AWS EC2 Compute instances
+ link: https://github.com/O1ahmad/aws_ec2_exporter
+ icon_filename: aws-ec2.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - aws services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track AWS EC2 instances key metrics for optimized performance and cost management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [AWS EC2 Exporter](https://github.com/O1ahmad/aws_ec2_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aws_ecs
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AWS ECS
+ link: https://github.com/bevers222/ecs-exporter
+ icon_filename: amazon-ecs.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - aws services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on AWS ECS services and resources for optimized container management and orchestration.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [AWS ECS exporter](https://github.com/bevers222/ecs-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [AWS ECS exporter](https://github.com/bevers222/ecs-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aws_health
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AWS Health events
+ link: https://github.com/vladvasiliu/aws-health-exporter-rs
+ icon_filename: aws.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - aws services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track AWS service health metrics for proactive incident management and resolution.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [AWS Health Exporter](https://github.com/vladvasiliu/aws-health-exporter-rs) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aws_instance_health
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AWS instance health
+ link: https://github.com/bobtfish/aws-instance-health-exporter
+ icon_filename: aws.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - aws services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor the health of AWS instances for improved performance and availability.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [AWS instance health exporter](https://github.com/bobtfish/aws-instance-health-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aws_s3
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AWS S3 buckets
+ link: https://github.com/ribbybibby/s3_exporter
+ icon_filename: aws-s3.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - aws services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor AWS S3 storage metrics for optimized performance, data management, and cost efficiency.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [AWS S3 Exporter](https://github.com/ribbybibby/s3_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aws_sqs
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AWS SQS
+ link: https://github.com/jmal98/sqs-exporter
+ icon_filename: aws-sqs.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - aws services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track AWS SQS messaging metrics for efficient message processing and queue management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [AWS SQS Exporter](https://github.com/jmal98/sqs-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-azure_ad_app_passwords
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Azure AD App passwords
+ link: https://github.com/vladvasiliu/azure-app-secrets-monitor
+ icon_filename: azure.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - azure services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Safeguard and track Azure App secrets for enhanced security and access management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Azure App Secrets monitor](https://github.com/vladvasiliu/azure-app-secrets-monitor) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-azure_elastic_sql
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Azure Elastic Pool SQL
+ link: https://github.com/benclapp/azure_elastic_sql_exporter
+ icon_filename: azure-elastic-sql.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - database
+ - relational db
+ - data querying
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Azure Elastic SQL performance metrics for efficient database management and query optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Azure Elastic SQL Exporter](https://github.com/benclapp/azure_elastic_sql_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-azure_app
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Azure application
+ link: https://github.com/RobustPerception/azure_metrics_exporter
+ icon_filename: azure.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - azure services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Azure Monitor metrics for comprehensive resource management and performance optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Azure Monitor exporter](https://github.com/RobustPerception/azure_metrics_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-azure_res
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Azure Resources
+ link: https://github.com/FXinnovation/azure_metrics_exporter
+ icon_filename: azure.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - azure services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Azure resources vital metrics for efficient cloud management and cost optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Azure Resources Exporter](https://github.com/FXinnovation/azure_metrics_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-azure_service_bus
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Azure Service Bus
+ link: https://github.com/marcinbudny/servicebus_exporter
+ icon_filename: azure-service-bus.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - azure services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Azure Service Bus messaging metrics for optimized communication and integration.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Azure Service Bus Exporter](https://github.com/marcinbudny/servicebus_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-azure_sql
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Azure SQL
+ link: https://github.com/iamseth/azure_sql_exporter
+ icon_filename: azure-sql.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - database
+ - relational db
+ - data querying
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Azure SQL performance metrics for efficient database management and query performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Azure SQL exporter](https://github.com/iamseth/azure_sql_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-bigquery
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: BigQuery
+ link: https://github.com/m-lab/prometheus-bigquery-exporter
+ icon_filename: bigquery.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Google BigQuery metrics for optimized data processing and analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [BigQuery Exporter](https://github.com/m-lab/prometheus-bigquery-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-blackbox
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Blackbox
+ link: https://github.com/prometheus/blackbox_exporter
+ icon_filename: prometheus.svg
+ categories:
+ - data-collection.synthetic-checks
+ keywords:
+ - blackbox
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track external service availability and response times with Blackbox monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Blackbox exporter](https://github.com/prometheus/blackbox_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Blackbox exporter](https://github.com/prometheus/blackbox_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-borg
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Borg backup
+ link: https://github.com/k0ral/borg-exporter
+ icon_filename: borg.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Borg backup performance metrics for efficient data protection and recovery.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Borg backup exporter](https://github.com/k0ral/borg-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Borg backup exporter](https://github.com/k0ral/borg-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cadvisor
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: cAdvisor
+ link: https://github.com/google/cadvisor
+ icon_filename: cadvisor.png
+ categories:
+ - data-collection.containers-and-vms
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor container resource usage and performance metrics with cAdvisor for efficient container management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [cAdvisor](https://github.com/google/cadvisor).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [cAdvisor](https://github.com/google/cadvisor) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cilium_agent
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Cilium Agent
+ link: https://github.com/cilium/cilium
+ icon_filename: cilium.png
+ categories:
+ - data-collection.kubernetes
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Cilium Agent metrics for optimized network security and connectivity.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cilium Agent](https://github.com/cilium/cilium).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cilium Agent](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cilium_operator
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Cilium Operator
+ link: https://github.com/cilium/cilium
+ icon_filename: cilium.png
+ categories:
+ - data-collection.kubernetes
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Cilium Operator metrics for efficient Kubernetes network security management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cilium Operator](https://github.com/cilium/cilium).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cilium Operator](https://github.com/cilium/cilium) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cilium_proxy
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Cilium Proxy
+ link: https://github.com/cilium/proxy
+ icon_filename: cilium.png
+ categories:
+ - data-collection.kubernetes
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Cilium Proxy metrics for enhanced network security and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cilium Proxy](https://github.com/cilium/proxy).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cilium Proxy](https://github.com/cilium/proxy) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cisco_aci
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Cisco ACI
+ link: https://github.com/RavuAlHemio/prometheus_aci_exporter
+ icon_filename: cisco.svg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords:
+ - network monitoring
+ - network performance
+ - cisco devices
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Cisco ACI infrastructure metrics for optimized network performance and resource management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cisco ACI Exporter](https://github.com/RavuAlHemio/prometheus_aci_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-citrix_netscaler
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Citrix NetScaler
+ link: https://github.com/rokett/Citrix-NetScaler-Exporter
+ icon_filename: citrix.svg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords:
+ - network monitoring
+ - network performance
+ - traffic analysis
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on NetScaler performance metrics for efficient application delivery and load balancing.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Citrix NetScaler Exporter](https://github.com/rokett/Citrix-NetScaler-Exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cloudflare_pcap
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Cloudflare PCAP
+ link: https://github.com/wehkamp/docker-prometheus-cloudflare-exporter
+ icon_filename: cloudflare.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Cloudflare CDN and security metrics for optimized content delivery and protection.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cloudflare exporter](https://github.com/wehkamp/docker-prometheus-cloudflare-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-aws_cloudwatch
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: CloudWatch
+ link: https://github.com/prometheus/cloudwatch_exporter
+ icon_filename: aws-cloudwatch.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor AWS CloudWatch metrics for comprehensive AWS resource management and performance optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [CloudWatch exporter](https://github.com/prometheus/cloudwatch_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-concourse
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Concourse
+ link: https://concourse-ci.org
+ icon_filename: concourse.png
+ categories:
+ - data-collection.ci-cd-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Concourse CI/CD pipeline metrics for optimized workflow management and deployment.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to the Concourse built-in Prometheus exporter.
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Configure built-in Prometheus exporter
+ description: |
+ To configure the built-in Prometheus exporter, follow the [official documentation](https://concourse-ci.org/metrics.html#configuring-metrics).
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-crowdsec
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Crowdsec
+ link: https://docs.crowdsec.net/docs/observability/prometheus
+ icon_filename: crowdsec.png
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Crowdsec security metrics for efficient threat detection and response.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to the Crowdsec build-in Prometheus exporter.
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Configure built-in Prometheus exporter
+ description: |
+ To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.crowdsec.net/docs/observability/prometheus/).
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-dell_emc_ecs
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Dell EMC ECS cluster
+ link: https://github.com/paychex/prometheus-emcecs-exporter
+ icon_filename: dell.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Dell EMC ECS object storage metrics for optimized storage management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Dell EMC ECS Exporter](https://github.com/paychex/prometheus-emcecs-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-dell_emc_isilon
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Dell EMC Isilon cluster
+ link: https://github.com/paychex/prometheus-isilon-exporter
+ icon_filename: dell.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Dell EMC Isilon scale-out NAS metrics for efficient storage management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Dell EMC Isilon Exporter](https://github.com/paychex/prometheus-isilon-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-digitalocean
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: DigitalOcean
+ link: https://github.com/metalmatze/digitalocean_exporter
+ icon_filename: digitalocean.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track DigitalOcean cloud provider metrics for optimized resource management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [DigitalOcean Exporter](https://github.com/metalmatze/digitalocean_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-discourse
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Discourse
+ link: https://github.com/discourse/discourse-prometheus
+ icon_filename: discourse.svg
+ categories:
+ - data-collection.media-streaming-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Discourse forum metrics for efficient community management and engagement.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Discourse Exporter](https://github.com/discourse/discourse-prometheus).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Discourse Exporter](https://github.com/discourse/discourse-prometheus) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-dynatrace
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Dynatrace
+ link: https://github.com/Apside-TOP/dynatrace_exporter
+ icon_filename: dynatrace.svg
+ categories:
+ - data-collection.observability
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Dynatrace APM metrics for comprehensive application performance management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Dynatrace Exporter](https://github.com/Apside-TOP/dynatrace_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-eos_web
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: EOS
+ link: https://eos-web.web.cern.ch/eos-web/
+ icon_filename: eos.png
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor CERN EOS metrics for efficient storage management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [EOS exporter](https://github.com/cern-eos/eos_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [EOS exporter](https://github.com/cern-eos/eos_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-etcd
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: etcd
+ link: https://etcd.io/
+ icon_filename: etcd.svg
+ categories:
+ - data-collection.service-discovery-registry
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track etcd database metrics for optimized distributed key-value store management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to etcd built-in Prometheus exporter.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-fortigate
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Fortigate firewall
+ link: https://github.com/bluecmd/fortigate_exporter
+ icon_filename: fortinet.svg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Fortigate firewall metrics for enhanced network protection and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [fortigate_exporter](https://github.com/bluecmd/fortigate_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-freebsd_nfs
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: FreeBSD NFS
+ link: https://github.com/Axcient/freebsd-nfs-exporter
+ icon_filename: freebsd.svg
+ categories:
+ - data-collection.freebsd
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor FreeBSD Network File System metrics for efficient file sharing management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [FreeBSD NFS Exporter](https://github.com/Axcient/freebsd-nfs-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-freebsd_rctl
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: FreeBSD RCTL-RACCT
+ link: https://github.com/yo000/rctl_exporter
+ icon_filename: freebsd.svg
+ categories:
+ - data-collection.freebsd
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on FreeBSD Resource Container metrics for optimized resource management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [FreeBSD RCTL Exporter](https://github.com/yo000/rctl_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-gcp_gce
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: GCP GCE
+ link: https://github.com/O1ahmad/gcp-gce-exporter
+ icon_filename: gcp-gce.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Google Cloud Platform Compute Engine metrics for efficient cloud resource management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [GCP GCE Exporter](https://github.com/O1ahmad/gcp-gce-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-gcp_quota
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: GCP Quota
+ link: https://github.com/mintel/gcp-quota-exporter
+ icon_filename: gcp.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Google Cloud Platform quota metrics for optimized resource usage and cost management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [GCP Quota Exporter](https://github.com/mintel/gcp-quota-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-github_repo
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: GitHub repository
+ link: https://github.com/githubexporter/github-exporter
+ icon_filename: github.svg
+ categories:
+ - data-collection.other
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track GitHub repository metrics for optimized project and user analytics monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [GitHub Exporter](https://github.com/githubexporter/github-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [GitHub Exporter](https://github.com/githubexporter/github-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-gitlab_runner
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: GitLab Runner
+ link: https://gitlab.com/gitlab-org/gitlab-runner
+ icon_filename: gitlab.png
+ categories:
+ - data-collection.ci-cd-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on GitLab CI/CD job metrics for efficient development and deployment management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to GitLab built-in Prometheus exporter.
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Configure built-in Prometheus exporter
+ description: |
+ To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server).
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-gobetween
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Gobetween
+ link: https://github.com/yyyar/gobetween
+ icon_filename: gobetween.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Gobetween load balancer metrics for optimized network traffic management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to Gobetween built-in Prometheus exporter.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-gcp
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Google Cloud Platform
+ link: https://github.com/DazWilkin/gcp-exporter
+ icon_filename: gcp.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Google Cloud Platform metrics for comprehensive cloud resource management and performance optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Google Cloud Platform Exporter](https://github.com/DazWilkin/gcp-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-gcp_stackdriver
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Google Stackdriver
+ link: https://github.com/prometheus-community/stackdriver_exporter
+ icon_filename: gcp-stackdriver.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - google cloud services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Google Stackdriver monitoring metrics for optimized cloud performance and diagnostics.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Google Stackdriver exporter](https://github.com/prometheus-community/stackdriver_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-grafana
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Grafana
+ link: https://grafana.com/
+ icon_filename: grafana.png
+ categories:
+ - data-collection.observability
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Grafana dashboard and visualization metrics for optimized monitoring and data analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to Grafana built-in Prometheus exporter.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-graylog
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Graylog Server
+ link: https://github.com/Graylog2/graylog2-server/
+ icon_filename: graylog.svg
+ categories:
+ - data-collection.logs-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Graylog server metrics for efficient log management and analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to Graylog built-in Prometheus exporter.
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Configure built-in Prometheus exporter
+ description: |
+ To configure the built-in Prometheus exporter, follow the [official documentation](https://go2docs.graylog.org/5-0/interacting_with_your_log_data/metrics.html#PrometheusMetricExporting).
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hana
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: HANA
+ link: https://github.com/jenningsloy318/hana_exporter
+ icon_filename: sap.svg
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track SAP HANA database metrics for efficient data storage and query performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [HANA Exporter](https://github.com/jenningsloy318/hana_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [HANA Exporter](https://github.com/jenningsloy318/hana_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-honeypot
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Honeypot
+ link: https://github.com/Intrinsec/honeypot_exporter
+ icon_filename: intrinsec.svg
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor honeypot metrics for efficient threat detection and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Intrinsec honeypot_exporter](https://github.com/Intrinsec/honeypot_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hp_ilo
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: HP iLO
+ link: https://github.com/infinityworks/hpilo-exporter
+ icon_filename: hp.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor HP Integrated Lights Out (iLO) metrics for efficient server management and diagnostics.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [HP iLO Metrics Exporter](https://github.com/infinityworks/hpilo-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hubble
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Hubble
+ link: https://github.com/cilium/hubble
+ icon_filename: hubble.png
+ categories:
+ - data-collection.observability
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Hubble network observability metrics for efficient network visibility and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to Hubble built-in Prometheus exporter.
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Configure built-in Prometheus exporter
+ description: |
+ To configure the built-in Prometheus exporter, follow the [official documentation](https://docs.cilium.io/en/stable/observability/metrics/#hubble-metrics).
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ibm_spectrum
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: IBM Spectrum
+ link: https://github.com/topine/ibm-spectrum-exporter
+ icon_filename: ibm.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor IBM Spectrum storage metrics for efficient data management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [IBM Spectrum Exporter](https://github.com/topine/ibm-spectrum-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-influxdb
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: InfluxDB
+ link: https://github.com/prometheus/influxdb_exporter
+ icon_filename: influxdb.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - database
+ - dbms
+ - data storage
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor InfluxDB time-series database metrics for efficient data storage and query performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [InfluxDB exporter](https://github.com/prometheus/influxdb_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-jenkins
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Jenkins
+ link: https://www.jenkins.io/
+ icon_filename: jenkins.svg
+ categories:
+ - data-collection.ci-cd-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Jenkins continuous integration server metrics for efficient development and build management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Jenkins exporter](https://github.com/simplesurance/jenkins-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-jmx
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: JMX
+ link: https://github.com/prometheus/jmx_exporter
+ icon_filename: java.svg
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Java Management Extensions (JMX) metrics for efficient Java application management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [JMX Exporter](https://github.com/prometheus/jmx_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [JMX Exporter](https://github.com/prometheus/jmx_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-jolokia
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: jolokia
+ link: https://github.com/aklinkert/jolokia_exporter
+ icon_filename: jolokia.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Jolokia JVM metrics for optimized Java application performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [jolokia_exporter](https://github.com/aklinkert/jolokia_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-kafka_consumer_lag
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Kafka Consumer Lag
+ link: https://github.com/omarsmak/kafka-consumer-lag-monitoring
+ icon_filename: kafka.svg
+ categories:
+ - data-collection.service-discovery-registry
+ keywords:
+ - big data
+ - stream processing
+ - message broker
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Kafka consumer lag metrics for efficient message queue management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Kafka Consumer Lag Monitoring](https://github.com/omarsmak/kafka-consumer-lag-monitoring) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-kafka
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Kafka
+ link: https://github.com/danielqsj/kafka_exporter/
+ icon_filename: kafka.svg
+ categories:
+ - data-collection.message-brokers
+ keywords:
+ - big data
+ - stream processing
+ - message broker
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Kafka message queue metrics for optimized data streaming and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Kafka Exporter](https://github.com/danielqsj/kafka_exporter/) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-kafka_zookeeper
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Kafka ZooKeeper
+ link: https://github.com/cloudflare/kafka_zookeeper_exporter
+ icon_filename: kafka.svg
+ categories:
+ - data-collection.message-brokers
+ keywords:
+ - big data
+ - stream processing
+ - message broker
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Kafka ZooKeeper metrics for optimized distributed coordination and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Kafka ZooKeeper Exporter](https://github.com/cloudflare/kafka_zookeeper_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-linode
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Linode
+ link: https://github.com/DazWilkin/linode-exporter
+ icon_filename: linode.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Linode cloud hosting metrics for efficient virtual server management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Linode Exporter](https://github.com/DazWilkin/linode-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Linode Exporter](https://github.com/DazWilkin/linode-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-loki
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: loki
+ link: https://github.com/grafana/loki
+ icon_filename: loki.png
+ categories:
+ - data-collection.logs-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Loki metrics.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [loki](https://github.com/grafana/loki).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Loki
+ description: |
+ Install [loki](https://github.com/grafana/loki) according to its documentation.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-minecraft
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Minecraft
+ link: https://github.com/sladkoff/minecraft-prometheus-exporter
+ icon_filename: minecraft.png
+ categories:
+ - data-collection.gaming
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Minecraft server metrics for efficient game server management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Minecraft Exporter](https://github.com/sladkoff/minecraft-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-mosquitto
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: mosquitto
+ link: https://github.com/sapcc/mosquitto-exporter
+ icon_filename: mosquitto.svg
+ categories:
+ - data-collection.message-brokers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Mosquitto MQTT broker metrics for efficient IoT message transport and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [mosquitto exporter](https://github.com/sapcc/mosquitto-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-mp707
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: MP707 USB thermometer
+ link: https://github.com/nradchenko/mp707_exporter
+ icon_filename: thermometer.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track MP707 power strip metrics for efficient energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [MP707 exporter](https://github.com/nradchenko/mp707_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [MP707 exporter](https://github.com/nradchenko/mp707_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ibm_mq
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: IBM MQ
+ link: https://github.com/agebhar1/mq_exporter
+ icon_filename: ibm.svg
+ categories:
+ - data-collection.message-brokers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on IBM MQ message queue metrics for efficient message transport and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [MQ Exporter](https://github.com/agebhar1/mq_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [MQ Exporter](https://github.com/agebhar1/mq_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-mqtt_blackbox
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: MQTT Blackbox
+ link: https://github.com/inovex/mqtt_blackbox_exporter
+ icon_filename: mqtt.svg
+ categories:
+ - data-collection.message-brokers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track MQTT message transport performance using blackbox testing methods.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [MQTT Blackbox Exporter](https://github.com/inovex/mqtt_blackbox_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-netapp_ontap
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Netapp ONTAP API
+ link: https://github.com/sapcc/netapp-api-exporter
+ icon_filename: netapp.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - network monitoring
+ - network performance
+ - traffic analysis
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on NetApp ONTAP storage system metrics for efficient data storage management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Netapp ONTAP API Exporter](https://github.com/sapcc/netapp-api-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-netapp_solidfire
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: NetApp Solidfire
+ link: https://github.com/mjavier2k/solidfire-exporter
+ icon_filename: netapp.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - network monitoring
+ - network performance
+ - traffic analysis
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track NetApp Solidfire storage system metrics for efficient data storage management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [NetApp Solidfire Exporter](https://github.com/mjavier2k/solidfire-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-netmeter
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: NetMeter
+ link: https://github.com/ssbostan/netmeter-exporter
+ icon_filename: netmeter.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords:
+ - network monitoring
+ - network performance
+ - traffic analysis
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor NetMeter network traffic metrics for efficient network management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [NetMeter Exporter](https://github.com/ssbostan/netmeter-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-newrelic
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: New Relic
+ link: https://github.com/jfindley/newrelic_exporter
+ icon_filename: newrelic.svg
+ categories:
+ - data-collection.observability
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor New Relic application performance management metrics for efficient application monitoring and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [New Relic exporter](https://github.com/jfindley/newrelic_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [New Relic exporter](https://github.com/jfindley/newrelic_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-openvswitch
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Open vSwitch
+ link: https://github.com/digitalocean/openvswitch_exporter
+ icon_filename: ovs.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Open vSwitch software-defined networking metrics for efficient network virtualization and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Open vSwitch Exporter](https://github.com/digitalocean/openvswitch_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-openldap
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OpenLDAP (community)
+ link: https://github.com/tomcz/openldap_exporter
+ icon_filename: openldap.svg
+ categories:
+ - data-collection.authentication-and-authorization
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor OpenLDAP directory service metrics for efficient directory management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [OpenLDAP Metrics Exporter](https://github.com/tomcz/openldap_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-openstack
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OpenStack
+ link: https://github.com/CanonicalLtd/prometheus-openstack-exporter
+ icon_filename: openstack.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track OpenStack cloud computing platform metrics for efficient infrastructure management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Openstack exporter](https://github.com/CanonicalLtd/prometheus-openstack-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-openvas
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OpenVAS
+ link: https://github.com/ModeClearCode/openvas_exporter
+ icon_filename: openVAS.png
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor OpenVAS vulnerability scanner metrics for efficient security assessment and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [OpenVAS exporter](https://github.com/ModeClearCode/openvas_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-google_pagespeed
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Google Pagespeed
+ link: https://github.com/foomo/pagespeed_exporter
+ icon_filename: google.svg
+ categories:
+ - data-collection.apm
+ keywords:
+ - cloud services
+ - cloud computing
+ - google cloud services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Google PageSpeed Insights performance metrics for efficient web page optimization and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Pagespeed exporter](https://github.com/foomo/pagespeed_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-philips_hue
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Philips Hue
+ link: https://github.com/aexel90/hue_exporter
+ icon_filename: hue.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Philips Hue smart lighting metrics for efficient home automation and energy management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Philips Hue Exporter](https://github.com/aexel90/hue_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Philips Hue Exporter](https://github.com/aexel90/hue_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-podman
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Podman
+ link: https://github.com/containers/prometheus-podman-exporter
+ icon_filename: podman.png
+ categories:
+ - data-collection.containers-and-vms
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Podman container runtime metrics for efficient container management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [PODMAN exporter](https://github.com/containers/prometheus-podman-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-proxmox
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Proxmox VE
+ link: https://github.com/prometheus-pve/prometheus-pve-exporter
+ icon_filename: proxmox.png
+ categories:
+ - data-collection.containers-and-vms
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Proxmox Virtual Environment metrics for efficient virtualization and container management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Proxmox VE Exporter](https://github.com/prometheus-pve/prometheus-pve-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-radius
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: RADIUS
+ link: https://github.com/devon-mar/radius-exporter
+ icon_filename: radius.png
+ categories:
+ - data-collection.authentication-and-authorization
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on RADIUS (Remote Authentication Dial-In User Service) protocol metrics for efficient authentication and access management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [RADIUS exporter](https://github.com/devon-mar/radius-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [RADIUS exporter](https://github.com/devon-mar/radius-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aws_rds
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AWS RDS
+ link: https://github.com/percona/rds_exporter
+ icon_filename: aws-rds.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - cloud services
+ - cloud computing
+ - aws services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Amazon RDS (Relational Database Service) metrics for efficient cloud database management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [rds_exporter](https://github.com/percona/rds_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [rds_exporter](https://github.com/percona/rds_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ripe_atlas
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: RIPE Atlas
+ link: https://github.com/czerwonk/atlas_exporter
+ icon_filename: ripe.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on RIPE Atlas Internet measurement platform metrics for efficient network monitoring and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [RIPE Atlas Exporter](https://github.com/czerwonk/atlas_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-sentry
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Sentry
+ link: https://github.com/snakecharmer/sentry_exporter
+ icon_filename: sentry.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Sentry error tracking and monitoring platform metrics for efficient application performance and error management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Sentry Exporter](https://github.com/snakecharmer/sentry_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-slurm
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Slurm
+ link: https://github.com/vpenso/prometheus-slurm-exporter
+ icon_filename: slurm.png
+ categories:
+ - data-collection.task-queues
+ #- data-collection.provisioning-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Slurm workload manager metrics for efficient high-performance computing (HPC) and cluster management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [slurm exporter](https://github.com/vpenso/prometheus-slurm-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ipmi
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: IPMI (By SoundCloud)
+ link: https://github.com/prometheus-community/ipmi_exporter
+ icon_filename: soundcloud.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor IPMI metrics externally for efficient server hardware management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [SoundCloud IPMI Exporter (querying IPMI externally, blackbox-exporter style)](https://github.com/prometheus-community/ipmi_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-spacelift
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Spacelift
+ link: https://github.com/spacelift-io/prometheus-exporter
+ icon_filename: spacelift.png
+ categories:
+ - data-collection.provisioning-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Spacelift infrastructure-as-code (IaC) platform metrics for efficient infrastructure automation and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Spacelift Exporter](https://github.com/spacelift-io/prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ssh
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: SSH
+ link: https://github.com/Nordstrom/ssh_exporter
+ icon_filename: ssh.png
+ categories:
+ - data-collection.authentication-and-authorization
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor SSH server metrics for efficient secure shell server management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [SSH Exporter](https://github.com/Nordstrom/ssh_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [SSH Exporter](https://github.com/Nordstrom/ssh_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ssl
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: SSL Certificate
+ link: https://github.com/ribbybibby/ssl_exporter
+ icon_filename: ssl.svg
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track SSL/TLS certificate metrics for efficient web security and certificate management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [SSL Certificate exporter](https://github.com/ribbybibby/ssl_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-starlink
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Starlink (SpaceX)
+ link: https://github.com/danopstech/starlink_exporter
+ icon_filename: starlink.svg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor SpaceX Starlink satellite internet metrics for efficient internet service management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Starlink Exporter (SpaceX)](https://github.com/danopstech/starlink_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-statuspage
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: StatusPage
+ link: https://github.com/vladvasiliu/statuspage-exporter
+ icon_filename: statuspage.png
+ categories:
+ - data-collection.notifications
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor StatusPage.io incident and status metrics for efficient incident management and communication.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [StatusPage Exporter](https://github.com/vladvasiliu/statuspage-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-tacas
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: TACACS
+ link: https://github.com/devon-mar/tacacs-exporter
+ icon_filename: tacacs.png
+ categories:
+ - data-collection.authentication-and-authorization
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Terminal Access Controller Access-Control System (TACACS) protocol metrics for efficient network authentication and authorization management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [TACACS Exporter](https://github.com/devon-mar/tacacs-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-tesla_vehicle
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Tesla vehicle
+ link: https://github.com/wywywywy/tesla-prometheus-exporter
+ icon_filename: tesla.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Tesla vehicle metrics for efficient electric vehicle management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Tesla exporter](https://github.com/wywywywy/tesla-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-tesla_powerwall
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Tesla Powerwall
+ link: https://github.com/foogod/powerwall_exporter
+ icon_filename: tesla.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Tesla Powerwall metrics for efficient home energy storage and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Tesla Powerwall Exporter](https://github.com/foogod/powerwall_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-twitch
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Twitch
+ link: https://github.com/damoun/twitch_exporter
+ icon_filename: twitch.svg
+ categories:
+ - data-collection.media-streaming-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Twitch streaming platform metrics for efficient live streaming management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Twitch exporter](https://github.com/damoun/twitch_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Twitch exporter](https://github.com/damoun/twitch_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ubiquity_ufiber
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Ubiquiti UFiber OLT
+ link: https://github.com/swoga/ufiber-exporter
+ icon_filename: ubiquiti.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Ubiquiti UFiber GPON (Gigabit Passive Optical Network) device metrics for efficient fiber-optic network management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [ufiber-exporter](https://github.com/swoga/ufiber-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [ufiber-exporter](https://github.com/swoga/ufiber-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-uptimerobot
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Uptimerobot
+ link: https://github.com/wosc/prometheus-uptimerobot
+ icon_filename: uptimerobot.svg
+ categories:
+ - data-collection.synthetic-checks
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor UptimeRobot website uptime monitoring metrics for efficient website availability tracking and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Uptimerobot Exporter](https://github.com/wosc/prometheus-uptimerobot) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hashicorp_vault
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: HashiCorp Vault secrets
+ link: https://github.com/tomtom-international/vault-assessment-prometheus-exporter
+ icon_filename: vault.svg
+ categories:
+ - data-collection.authentication-and-authorization
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track HashiCorp Vault security assessment metrics for efficient secrets management and security.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Vault Assessment Prometheus Exporter](https://github.com/tomtom-international/vault-assessment-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-vault_pki
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Vault PKI
+ link: https://github.com/aarnaud/vault-pki-exporter
+ icon_filename: vault.svg
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor HashiCorp Vault Public Key Infrastructure (PKI) metrics for efficient certificate management and security.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Vault PKI Exporter](https://github.com/aarnaud/vault-pki-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-vertica
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Vertica
+ link: https://github.com/vertica/vertica-prometheus-exporter
+ icon_filename: vertica.svg
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Vertica analytics database platform metrics for efficient database performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [vertica-prometheus-exporter](https://github.com/vertica/vertica-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-vscode
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: VSCode
+ link: https://github.com/guicaulada/vscode-exporter
+ icon_filename: vscode.svg
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Visual Studio Code editor metrics for efficient development environment management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [VSCode Exporter](https://github.com/guicaulada/vscode-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [VSCode Exporter](https://github.com/guicaulada/vscode-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-airthings_waveplus
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Airthings Waveplus air sensor
+ link: https://github.com/jeremybz/waveplus_exporter
+ icon_filename: airthings.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Waveplus radon sensor metrics for efficient indoor air quality monitoring and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Waveplus Radon Sensor Exporter](https://github.com/jeremybz/waveplus_exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-xmpp_blackbox
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: XMPP Server
+ link: https://github.com/horazont/xmpp-blackbox-exporter
+ icon_filename: xmpp.svg
+ categories:
+ - data-collection.message-brokers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor XMPP (Extensible Messaging and Presence Protocol) server metrics for efficient messaging and communication management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [XMPP Server Exporter](https://github.com/horazont/xmpp-blackbox-exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-4d_server
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: 4D Server
+ link: https://github.com/ThomasMaul/Prometheus_4D_Exporter
+ icon_filename: 4d_server.png
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor 4D Server performance metrics for efficient application management and optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [4D Server exporter](https://github.com/ThomasMaul/Prometheus_4D_Exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-8430ft-modem
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: 8430FT modem
+ link: https://github.com/dernasherbrezon/8430ft_exporter
+ icon_filename: mtc.svg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep track of vital metrics from the MTS 8430FT modem for streamlined network performance and diagnostics.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [8430FT Exporter](https://github.com/dernasherbrezon/8430ft_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-steam_a2s
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Steam
+ link: https://github.com/armsnyder/a2s-exporter
+ icon_filename: a2s.png
+ categories:
+ - data-collection.gaming
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Gain insights into Steam A2S-supported game servers for performance and availability through real-time metric monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [A2S Exporter](https://github.com/armsnyder/a2s-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [A2S Exporter](https://github.com/armsnyder/a2s-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-akami_edgedns
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Akamai Edge DNS Traffic
+ link: https://github.com/akamai/akamai-edgedns-traffic-exporter
+ icon_filename: akamai.svg
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track and analyze Akamai Edge DNS traffic for enhanced performance and security.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Akamai Edge DNS Traffic Exporter](https://github.com/akamai/akamai-edgedns-traffic-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-akami_gtm
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Akamai Global Traffic Management
+ link: https://github.com/akamai/akamai-gtm-metrics-exporter
+ icon_filename: akamai.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor vital metrics of Akamai Global Traffic Management (GTM) for optimized load balancing and failover.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Akamai Global Traffic Management Metrics Exporter](https://github.com/akamai/akamai-gtm-metrics-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-alamos_fe2
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Alamos FE2 server
+ link: https://github.com/codemonauts/prometheus-fe2-exporter
+ icon_filename: alamos_fe2.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Alamos FE2 systems for improved performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Alamos FE2 Exporter](https://github.com/codemonauts/prometheus-fe2-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-altaro_backup
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Altaro Backup
+ link: https://github.com/raph2i/altaro_backup_exporter
+ icon_filename: altaro.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Altaro Backup performance metrics to ensure smooth data protection and recovery operations.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Altaro Backup Exporter](https://github.com/raph2i/altaro_backup_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-amd_smi
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AMD CPU & GPU
+ link: https://github.com/amd/amd_smi_exporter
+ icon_filename: amd.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor AMD System Management Interface performance for optimized hardware management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [AMD SMI Exporter](https://github.com/amd/amd_smi_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aaisp
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Andrews & Arnold line status
+ link: https://github.com/daveio/aaisp-exporter
+ icon_filename: andrewsarnold.jpg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Andrews & Arnold Ltd (AAISP) metrics for improved network performance and diagnostics.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Andrews & Arnold line status exporter](https://github.com/daveio/aaisp-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-apicast
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: APIcast
+ link: https://github.com/3scale/apicast
+ icon_filename: apicast.png
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor APIcast performance metrics to optimize API gateway operations and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [APIcast](https://github.com/3scale/apicast).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [APIcast](https://github.com/3scale/apicast) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-apple_timemachine
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Apple Time Machine
+ link: https://github.com/znerol/prometheus-timemachine-exporter
+ icon_filename: apple.svg
+ categories:
+ - data-collection.macos-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Apple Time Machine backup metrics for efficient data protection and recovery.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Apple Time Machine Exporter](https://github.com/znerol/prometheus-timemachine-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-arm_hwcpipe
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: ARM HWCPipe
+ link: https://github.com/ylz-at/arm-hwcpipe-exporter
+ icon_filename: arm.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep track of ARM running Android devices and get metrics for efficient performance optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [ARM HWCPipe Exporter](https://github.com/ylz-at/arm-hwcpipe-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-arvancloud_cdn
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: ArvanCloud CDN
+ link: https://github.com/arvancloud/ar-prometheus-exporter
+ icon_filename: arvancloud.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track and analyze ArvanCloud CDN and cloud services performance metrics for optimized delivery and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [ArvanCloud exporter](https://github.com/arvancloud/ar-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-audisto
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Audisto
+ link: https://github.com/ZeitOnline/audisto_exporter
+ icon_filename: audisto.svg
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Audisto SEO and website metrics for improved search performance and optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Audisto exporter](https://github.com/ZeitOnline/audisto_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-authlog
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AuthLog
+ link: https://github.com/woblerr/authlog_exporter
+ icon_filename: linux.png
+ categories:
+ - data-collection.logs-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor authentication logs for security insights and efficient access management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [AuthLog Exporter](https://github.com/woblerr/authlog_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [AuthLog Exporter](https://github.com/woblerr/authlog_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aws_ec2_spot
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AWS EC2 Spot Instance
+ link: https://github.com/patcadelina/ec2-spot-exporter
+ icon_filename: aws-ec2.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - aws services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor AWS EC2 Spot instances'' performance metrics for efficient resource allocation and cost optimization.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [AWS EC2 Spot Exporter](https://github.com/patcadelina/ec2-spot-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-aws_quota
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: AWS Quota
+ link: https://github.com/emylincon/aws_quota_exporter
+ icon_filename: aws.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - aws services
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor AWS service quotas for effective resource usage and cost management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [aws_quota_exporter](https://github.com/emylincon/aws_quota_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-bobcat
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Bobcat Miner 300
+ link: https://github.com/pperzyna/bobcat_exporter
+ icon_filename: bobcat.jpg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Bobcat equipment metrics for optimized performance and maintenance management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Bobcat Exporter](https://github.com/pperzyna/bobcat_exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-bosh
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: BOSH
+ link: https://github.com/bosh-prometheus/bosh_exporter
+ icon_filename: bosh.png
+ categories:
+ - data-collection.provisioning-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on BOSH deployment metrics for improved cloud orchestration and resource management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [BOSH exporter](https://github.com/bosh-prometheus/bosh_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-bpftrace
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: bpftrace variables
+ link: https://github.com/andreasgerstmayr/bpftrace_exporter
+ icon_filename: bpftrace.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track bpftrace metrics for advanced performance analysis and troubleshooting.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [bpftrace exporter](https://github.com/andreasgerstmayr/bpftrace_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-bungeecord
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: BungeeCord
+ link: https://github.com/weihao/bungeecord-prometheus-exporter
+ icon_filename: bungee.png
+ categories:
+ - data-collection.gaming
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track BungeeCord proxy server metrics for efficient load balancing and performance management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [BungeeCord Prometheus Exporter](https://github.com/weihao/bungeecord-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-celery
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Celery
+ link: https://github.com/ZeitOnline/celery_redis_prometheus
+ icon_filename: celery.png
+ categories:
+ - data-collection.task-queues
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Celery task queue metrics for optimized task processing and resource management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Celery Exporter](https://github.com/ZeitOnline/celery_redis_prometheus) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-checkpoint
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Checkpoint device
+ link: https://github.com/RespiroConsulting/CheckPointExporter
+ icon_filename: checkpoint.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Check Point firewall and security metrics for enhanced network protection and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Checkpoint exporter](https://github.com/RespiroConsulting/CheckPointExporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-chia
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Chia
+ link: https://github.com/chia-network/chia-exporter
+ icon_filename: chia.png
+ categories:
+ - data-collection.blockchain-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Chia blockchain metrics for optimized farming and resource allocation.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Chia Exporter](https://github.com/chia-network/chia-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Chia Exporter](https://github.com/chia-network/chia-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-clm5ip
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Christ Elektronik CLM5IP power panel
+ link: https://github.com/christmann/clm5ip_exporter/
+ icon_filename: christelec.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Christ Elektronik CLM5IP device metrics for efficient performance and diagnostics.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Christ Elektronik CLM5IP Exporter](https://github.com/christmann/clm5ip_exporter/) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-clamd
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: ClamAV daemon
+ link: https://github.com/sergeymakinen/clamav_exporter
+ icon_filename: clamav.png
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track ClamAV antivirus metrics for enhanced threat detection and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [ClamAV daemon stats exporter](https://github.com/sergeymakinen/clamav_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-clamscan
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Clamscan results
+ link: https://github.com/FortnoxAB/clamscan-exporter
+ icon_filename: clamav.png
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor ClamAV scanning performance metrics for efficient malware detection and analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [clamscan-exporter](https://github.com/FortnoxAB/clamscan-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-clash
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Clash
+ link: https://github.com/elonzh/clash_exporter
+ icon_filename: clash.png
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Clash proxy server metrics for optimized network performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Clash exporter](https://github.com/elonzh/clash_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Clash exporter](https://github.com/elonzh/clash_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cloud_foundry
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Cloud Foundry
+ link: https://github.com/bosh-prometheus/cf_exporter
+ icon_filename: cloud-foundry.svg
+ categories:
+ - data-collection.provisioning-systems
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Cloud Foundry platform metrics for optimized application deployment and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cloud Foundry exporter](https://github.com/bosh-prometheus/cf_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cloud_foundry_firebase
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Cloud Foundry Firehose
+ link: https://github.com/bosh-prometheus/firehose_exporter
+ icon_filename: cloud-foundry.svg
+ categories:
+ - data-collection.provisioning-systems
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Cloud Foundry Firehose metrics for comprehensive platform diagnostics and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cloud Foundry Firehose exporter](https://github.com/bosh-prometheus/firehose_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-akami_cloudmonitor
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Akami Cloudmonitor
+ link: https://github.com/ExpressenAB/cloudmonitor_exporter
+ icon_filename: akamai.svg
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Akamai cloudmonitor provider metrics for comprehensive cloud performance management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cloudmonitor exporter](https://github.com/ExpressenAB/cloudmonitor_exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-lustre
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Lustre metadata
+ link: https://github.com/GSI-HPC/prometheus-cluster-exporter
+ icon_filename: lustre.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Lustre clustered file system for efficient management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cluster Exporter](https://github.com/GSI-HPC/prometheus-cluster-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cmon
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: ClusterControl CMON
+ link: https://github.com/severalnines/cmon_exporter
+ icon_filename: cluster-control.svg
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track CMON metrics for Severalnines Cluster Control for efficient monitoring and management of database operations.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [CMON Exporter](https://github.com/severalnines/cmon_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [CMON Exporter](https://github.com/severalnines/cmon_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-collectd
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Collectd
+ link: https://github.com/prometheus/collectd_exporter
+ icon_filename: collectd.png
+ categories:
+ - data-collection.observability
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor system and application metrics with Collectd for comprehensive performance analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Collectd exporter](https://github.com/prometheus/collectd_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Collectd exporter](https://github.com/prometheus/collectd_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-shell_cmd
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Shell command
+ link: https://github.com/tomwilkie/prom-run
+ icon_filename: crunner.svg
+ categories:
+ - data-collection.generic-data-collection
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track custom command output metrics for tailored monitoring and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Command runner exporter](https://github.com/tomwilkie/prom-run).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Command runner exporter](https://github.com/tomwilkie/prom-run) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ftbeerpi
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: CraftBeerPi
+ link: https://github.com/jo-hannes/craftbeerpi_exporter
+ icon_filename: craftbeer.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on CraftBeerPi homebrewing metrics for optimized brewing process management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [CraftBeerPi exporter](https://github.com/jo-hannes/craftbeerpi_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-crypto
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Crypto exchanges
+ link: https://github.com/ix-ai/crypto-exporter
+ icon_filename: crypto.png
+ categories:
+ - data-collection.blockchain-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track cryptocurrency market metrics for informed investment and trading decisions.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Crypto exporter](https://github.com/ix-ai/crypto-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Crypto exporter](https://github.com/ix-ai/crypto-exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-cryptowatch
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Cryptowatch
+ link: https://github.com/nbarrientos/cryptowat_exporter
+ icon_filename: cryptowatch.png
+ categories:
+ - data-collection.blockchain-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Cryptowatch market data metrics for comprehensive cryptocurrency market analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Cryptowat Exporter](https://github.com/nbarrientos/cryptowat_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-certificate_transparency
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Certificate Transparency
+ link: https://github.com/Hsn723/ct-exporter
+ icon_filename: ct.png
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track certificate transparency log metrics for enhanced
+ SSL/TLS certificate management and security.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [ct-exporter](https://github.com/Hsn723/ct-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [ct-exporter](https://github.com/Hsn723/ct-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-custom
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Custom Exporter
+ link: https://github.com/orange-cloudfoundry/custom_exporter
+ icon_filename: customdata.png
+ categories:
+ - data-collection.generic-data-collection
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Create and monitor custom metrics tailored to your specific use case and requirements.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Custom Exporter](https://github.com/orange-cloudfoundry/custom_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-cvmfs
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: CVMFS clients
+ link: https://github.com/guilbaults/cvmfs-exporter
+ icon_filename: cvmfs.png
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track CernVM File System metrics for optimized distributed file system performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [CVMFS exporter](https://github.com/guilbaults/cvmfs-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ddwrt
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: DDWRT Routers
+ link: https://github.com/camelusferus/ddwrt_collector
+ icon_filename: ddwrt.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on DD-WRT router metrics for efficient network management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [ddwrt-collector](https://github.com/camelusferus/ddwrt_collector) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-dell_emc_xtremio
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Dell EMC XtremIO cluster
+ link: https://github.com/cthiel42/prometheus-xtremio-exporter
+ icon_filename: dell.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Dell/EMC XtremIO storage metrics for optimized data management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Dell/EMC XtremIO Exporter](https://github.com/cthiel42/prometheus-xtremio-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-dependency_track
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Dependency-Track
+ link: https://github.com/jetstack/dependency-track-exporter
+ icon_filename: dependency-track.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Dependency-Track metrics for efficient vulnerability management and software supply chain analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Dependency-Track Exporter](https://github.com/jetstack/dependency-track-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-dmarc
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: DMARC
+ link: https://github.com/jgosmann/dmarc-metrics-exporter
+ icon_filename: dmarc.png
+ categories:
+ - data-collection.mail-servers
+ keywords:
+ - email authentication
+ - policy
+ - reporting
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track DMARC email authentication metrics for improved email security and deliverability.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [dmarc-metrics-exporter](https://github.com/jgosmann/dmarc-metrics-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-dnsbl
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: DNSBL
+ link: https://github.com/Luzilla/dnsbl_exporter/
+ icon_filename: dnsbl.png
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor DNSBL metrics for efficient domain reputation and security management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [dnsbl-exporter](https://github.com/Luzilla/dnsbl_exporter/) by following the instructions mentioned in the exporter README.
+
+
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-bird
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Bird Routing Daemon
+ link: https://github.com/czerwonk/bird_exporter
+ icon_filename: bird.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Bird Routing Daemon metrics for optimized network routing and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Bird Routing Daemon Exporter](https://github.com/czerwonk/bird_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-elgato_keylight
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Elgato Key Light devices.
+ link: https://github.com/mdlayher/keylight_exporter
+ icon_filename: elgato.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Elgato Key Light metrics for optimized lighting control and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Elgato Key Light exporter](https://github.com/mdlayher/keylight_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-energomera
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Energomera smart power meters
+ link: https://github.com/peak-load/energomera_exporter
+ icon_filename: energomera.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Energomera electricity meter metrics for efficient energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [energomera-exporter Energomera electricity meter exporter](https://github.com/peak-load/energomera_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-excel
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Excel spreadsheet
+ link: https://github.com/MarcusCalidus/excel-exporter
+ icon_filename: excel.png
+ categories:
+ - data-collection.generic-data-collection
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Export Prometheus metrics to Excel for versatile data analysis and reporting.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Excel Exporter](https://github.com/MarcusCalidus/excel-exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-fastd
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Fastd
+ link: https://github.com/freifunk-darmstadt/fastd-exporter
+ icon_filename: fastd.png
+ categories:
+ - data-collection.vpns
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Fastd VPN metrics for efficient virtual private network management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Fastd Exporter](https://github.com/freifunk-darmstadt/fastd-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-freifunk
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Freifunk network
+ link: https://github.com/xperimental/freifunk-exporter
+ icon_filename: freifunk.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Freifunk community network metrics for optimized network performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Freifunk Exporter](https://github.com/xperimental/freifunk-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-fritzbox
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Fritzbox network devices
+ link: https://github.com/pdreker/fritz_exporter
+ icon_filename: avm.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track AVM Fritzbox router metrics for efficient home network management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Fritzbox exporter](https://github.com/pdreker/fritz_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Fritzbox exporter](https://github.com/pdreker/fritz_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-frrouting
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: FRRouting
+ link: https://github.com/tynany/frr_exporter
+ icon_filename: frrouting.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Free Range Routing (FRR) metrics for optimized network routing and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [FRRouting Exporter](https://github.com/tynany/frr_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [FRRouting Exporter](https://github.com/tynany/frr_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-generic_cli
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Generic Command Line Output
+ link: https://github.com/MarioMartReq/generic-exporter
+ icon_filename: cli.svg
+ categories:
+ - data-collection.generic-data-collection
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track custom command line output metrics for tailored monitoring and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Generic Command Line Output Exporter](https://github.com/MarioMartReq/generic-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-github_ratelimit
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: GitHub API rate limit
+ link: https://github.com/lunarway/github-ratelimit-exporter
+ icon_filename: github.svg
+ categories:
+ - data-collection.other
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor GitHub API rate limit metrics for efficient
+ API usage and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [GitHub API rate limit Exporter](https://github.com/lunarway/github-ratelimit-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-gpsd
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: gpsd
+ link: https://github.com/natesales/gpsd-exporter
+ icon_filename: gpsd.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor GPSD (GPS daemon) metrics for efficient GPS data management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [gpsd exporter](https://github.com/natesales/gpsd-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [gpsd exporter](https://github.com/natesales/gpsd-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-gtp
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: GTP
+ link: https://github.com/wmnsk/gtp_exporter
+ icon_filename: gtpu.png
+ categories:
+ - data-collection.telephony-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on GTP (GPRS Tunneling Protocol) metrics for optimized mobile data communication and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [GTP Exporter](https://github.com/wmnsk/gtp_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [GTP Exporter](https://github.com/wmnsk/gtp_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-halon
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Halon
+ link: https://github.com/tobiasbp/halon_exporter
+ icon_filename: halon.svg
+ categories:
+ - data-collection.mail-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Halon email security and delivery metrics for optimized email management and protection.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Halon exporter](https://github.com/tobiasbp/halon_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Halon exporter](https://github.com/tobiasbp/halon_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hasura_graphql
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Hasura GraphQL Server
+ link: https://github.com/zolamk/hasura-exporter
+ icon_filename: hasura.svg
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Hasura GraphQL engine metrics for optimized
+ API performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Hasura Exporter](https://github.com/zolamk/hasura-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Hasura Exporter](https://github.com/zolamk/hasura-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hdsentinel
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: HDSentinel
+ link: https://github.com/qusielle/hdsentinel-exporter
+ icon_filename: harddisk.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Hard Disk Sentinel metrics for efficient storage device health management and diagnostics.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [HDSentinel Exporter](https://github.com/qusielle/hdsentinel-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-helium_hotspot
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Helium hotspot
+ link: https://github.com/tedder/helium_hotspot_exporter
+ icon_filename: helium.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Helium hotspot metrics for optimized LoRaWAN network management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Helium hotspot exporter](https://github.com/tedder/helium_hotspot_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-helium_miner
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Helium miner (validator)
+ link: https://github.com/tedder/miner_exporter
+ icon_filename: helium.svg
+ categories:
+ - data-collection.blockchain-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Helium miner and validator metrics for efficient blockchain performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Helium miner (validator) exporter](https://github.com/tedder/miner_exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hhvm
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: HHVM
+ link: https://github.com/wikimedia/operations-software-hhvm_exporter
+ icon_filename: hhvm.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor HipHop Virtual Machine metrics for efficient
+ PHP execution and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [HHVM Exporter](https://github.com/wikimedia/operations-software-hhvm_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hilink
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Huawei devices
+ link: https://github.com/eliecharra/hilink-exporter
+ icon_filename: huawei.svg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Huawei HiLink device metrics for optimized connectivity and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Huawei Hilink exporter](https://github.com/eliecharra/hilink-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hitron_cgm
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Hitron CGN series CPE
+ link: https://github.com/yrro/hitron-exporter
+ icon_filename: hitron.svg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Hitron CGNV4 gateway metrics for efficient network management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Hitron CGNV4 exporter](https://github.com/yrro/hitron-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-hitron_coda
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Hitron CODA Cable Modem
+ link: https://github.com/hairyhenderson/hitron_coda_exporter
+ icon_filename: hitron.svg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Hitron CODA cable modem metrics for optimized internet connectivity and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Hitron CODA Cable Modem Exporter](https://github.com/hairyhenderson/hitron_coda_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-homebridge
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Homebridge
+ link: https://github.com/lstrojny/homebridge-prometheus-exporter
+ icon_filename: homebridge.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Homebridge smart home metrics for efficient home automation management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Homebridge Prometheus Exporter](https://github.com/lstrojny/homebridge-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-homey
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Homey
+ link: https://github.com/rickardp/homey-prometheus-exporter
+ icon_filename: homey.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Homey smart home controller metrics for efficient home automation and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Homey Exporter](https://github.com/rickardp/homey-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ibm_cex
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: IBM CryptoExpress (CEX) cards
+ link: https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin
+ icon_filename: ibm.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track IBM Z Crypto Express device metrics for optimized cryptographic performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [IBM Z CEX Device Plugin Prometheus Exporter](https://github.com/ibm-s390-cloud/k8s-cex-dev-plugin) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ibm_zhmc
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: IBM Z Hardware Management Console
+ link: https://github.com/zhmcclient/zhmc-prometheus-exporter
+ icon_filename: ibm.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor IBM Z Hardware Management Console metrics for efficient mainframe management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [IBM Z HMC Exporter](https://github.com/zhmcclient/zhmc-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-iota
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: IOTA full node
+ link: https://github.com/crholliday/iota-prom-exporter
+ icon_filename: iota.svg
+ categories:
+ - data-collection.blockchain-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on IOTA cryptocurrency network metrics for efficient blockchain performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [IOTA Exporter](https://github.com/crholliday/iota-prom-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-iqair
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: iqAir AirVisual air quality monitors
+ link: https://github.com/Packetslave/iqair_exporter
+ icon_filename: iqair.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor air quality data from IQAir devices for efficient environmental monitoring and analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [IQair Exporter](https://github.com/Packetslave/iqair_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [IQair Exporter](https://github.com/Packetslave/iqair_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-jarvis
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Jarvis Standing Desk
+ link: https://github.com/hairyhenderson/jarvis_exporter/
+ icon_filename: jarvis.jpg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Jarvis standing desk usage metrics for efficient workspace ergonomics and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Jarvis Standing Desk Exporter](https://github.com/hairyhenderson/jarvis_exporter/) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-enclosure
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Generic storage enclosure tool
+ link: https://github.com/Gandi/jbod-rs
+ icon_filename: storage-enclosure.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor storage enclosure metrics for efficient storage device management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [jbod - Generic storage enclosure tool](https://github.com/Gandi/jbod-rs) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-jetbrains_fls
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: JetBrains Floating License Server
+ link: https://github.com/mkreu/jetbrains-fls-exporter
+ icon_filename: jetbrains.png
+ categories:
+ - data-collection.generic-data-collection
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor JetBrains floating license server metrics for efficient software licensing management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [JetBrains Floating License Server Export](https://github.com/mkreu/jetbrains-fls-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-journald
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: journald
+ link: https://github.com/dead-claudia/journald-exporter
+ icon_filename: linux.png
+ categories:
+ - data-collection.logs-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on systemd-journald metrics for efficient log management and analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [journald-exporter](https://github.com/dead-claudia/journald-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [journald-exporter](https://github.com/dead-claudia/journald-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-kafka_connect
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Kafka Connect
+ link: https://github.com/findelabs/kafka-connect-exporter-rs
+ icon_filename: kafka.svg
+ categories:
+ - data-collection.message-brokers
+ keywords:
+ - big data
+ - stream processing
+ - message broker
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Kafka Connect metrics for efficient data streaming and integration.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Kafka Connect exporter](https://github.com/findelabs/kafka-connect-exporter-rs) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-kannel
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Kannel
+ link: https://github.com/apostvav/kannel_exporter
+ icon_filename: kannel.png
+ categories:
+ - data-collection.telephony-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Kannel SMS gateway and WAP gateway metrics for efficient mobile communication and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Kannel Exporter](https://github.com/apostvav/kannel_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Kannel Exporter](https://github.com/apostvav/kannel_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-keepalived
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Keepalived
+ link: https://github.com/gen2brain/keepalived_exporter
+ icon_filename: keepalived.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Keepalived metrics for efficient high-availability and load balancing management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Keepalived Exporter](https://github.com/gen2brain/keepalived_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-korral
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Kubernetes Cluster Cloud Cost
+ link: https://github.com/agilestacks/korral
+ icon_filename: kubernetes.svg
+ categories:
+ - data-collection.kubernetes
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Kubernetes cloud cost metrics for efficient cloud resource management and budgeting.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Kubernetes Cloud Cost Exporter](https://github.com/agilestacks/korral) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-lagerist
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Lagerist Disk latency
+ link: https://github.com/Svedrin/lagerist
+ icon_filename: linux.png
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track disk latency metrics for efficient storage performance and diagnostics.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Lagerist Disk latency exporter](https://github.com/Svedrin/lagerist) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ldap
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: LDAP
+ link: https://github.com/titisan/ldap_exporter
+ icon_filename: ldap.png
+ categories:
+ - data-collection.authentication-and-authorization
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Lightweight Directory Access Protocol (LDAP) metrics for efficient directory service management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [LDAP Exporter](https://github.com/titisan/ldap_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [LDAP Exporter](https://github.com/titisan/ldap_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-lynis
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Lynis audit reports
+ link: https://github.com/MauveSoftware/lynis_exporter
+ icon_filename: lynis.png
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Lynis security auditing tool metrics for efficient system security and compliance management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [lynis_exporter](https://github.com/MauveSoftware/lynis_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-machbase
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Machbase
+ link: https://github.com/MACHBASE/prometheus-machbase-exporter
+ icon_filename: machbase.png
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Machbase time-series database metrics for efficient data storage and query performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Machbase Exporter](https://github.com/MACHBASE/prometheus-machbase-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-maildir
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Maildir
+ link: https://github.com/cherti/mailexporter
+ icon_filename: mailserver.svg
+ categories:
+ - data-collection.mail-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track mail server metrics for optimized email management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [mailexporter](https://github.com/cherti/mailexporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [mailexporter](https://github.com/cherti/mailexporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-meilisearch
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Meilisearch
+ link: https://github.com/scottaglia/meilisearch_exporter
+ icon_filename: meilisearch.svg
+ categories:
+ - data-collection.search-engines
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Meilisearch search engine metrics for efficient search performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Meilisearch Exporter](https://github.com/scottaglia/meilisearch_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-memcached
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Memcached (community)
+ link: https://github.com/prometheus/memcached_exporter
+ icon_filename: memcached.svg
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Memcached in-memory key-value store metrics for efficient caching performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Memcached exporter](https://github.com/prometheus/memcached_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Memcached exporter](https://github.com/prometheus/memcached_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-meraki
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Meraki dashboard
+ link: https://github.com/TheHolm/meraki-dashboard-promethus-exporter
+ icon_filename: meraki.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Cisco Meraki cloud-managed networking device metrics for efficient network management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Meraki dashboard data exporter using API](https://github.com/TheHolm/meraki-dashboard-promethus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-mesos
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Mesos
+ link: http://github.com/mesosphere/mesos_exporter
+ icon_filename: mesos.svg
+ categories:
+ #- data-collection.provisioning-systems
+ - data-collection.task-queues
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Apache Mesos cluster manager metrics for efficient resource management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Mesos exporter](http://github.com/mesosphere/mesos_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Mesos exporter](http://github.com/mesosphere/mesos_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-xiaomi_mi_flora
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Xiaomi Mi Flora
+ link: https://github.com/xperimental/flowercare-exporter
+ icon_filename: xiaomi.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on MiFlora plant monitor metrics for efficient plant care and growth management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [MiFlora / Flower Care Exporter](https://github.com/xperimental/flowercare-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-modbus_rtu
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Modbus protocol
+ link: https://github.com/dernasherbrezon/modbusrtu_exporter
+ icon_filename: modbus.svg
+ categories:
+ - data-collection.iot-devices
+ keywords:
+ - database
+ - dbms
+ - data storage
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Modbus RTU protocol metrics for efficient industrial automation and control performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [modbusrtu_exporter](https://github.com/dernasherbrezon/modbusrtu_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-mogilefs
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: MogileFS
+ link: https://github.com/KKBOX/mogilefs-exporter
+ icon_filename: filesystem.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor MogileFS distributed file system metrics for efficient storage management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [MogileFS Exporter](https://github.com/KKBOX/mogilefs-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-monnit_mqtt
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Monnit Sensors MQTT
+ link: https://github.com/braxton9460/monnit-mqtt-exporter
+ icon_filename: monnit.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Monnit sensor data via MQTT for efficient IoT device monitoring and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Monnit Sensors MQTT Exporter WIP](https://github.com/braxton9460/monnit-mqtt-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-mtail
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: mtail
+ link: https://github.com/google/mtail
+ icon_filename: mtail.png
+ categories:
+ - data-collection.logs-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor log data metrics using mtail log data extractor and parser.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [mtail](https://github.com/google/mtail).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [mtail](https://github.com/google/mtail) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-naemon
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Naemon
+ link: https://github.com/Griesbacher/Iapetos
+ icon_filename: naemon.svg
+ categories:
+ - data-collection.observability
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Naemon or Nagios network monitoring metrics for efficient IT infrastructure management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Naemon / Nagios Exporter](https://github.com/Griesbacher/Iapetos) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-nagios
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Nagios
+ link: https://github.com/wbollock/nagios_exporter
+ icon_filename: nagios.png
+ categories:
+ - data-collection.observability
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Nagios network monitoring metrics for efficient
+ IT infrastructure management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Nagios exporter](https://github.com/wbollock/nagios_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Nagios exporter](https://github.com/wbollock/nagios_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-nature_remo
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Nature Remo E lite devices
+ link: https://github.com/kenfdev/remo-exporter
+ icon_filename: nature-remo.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Nature Remo E series smart home device metrics for efficient home automation and energy management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Nature Remo E series Exporter](https://github.com/kenfdev/remo-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-netatmo
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Netatmo sensors
+ link: https://github.com/xperimental/netatmo-exporter
+ icon_filename: netatmo.svg
+ categories:
+ - data-collection.iot-devices
+ keywords:
+ - network monitoring
+ - network performance
+ - traffic analysis
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Netatmo smart home device metrics for efficient home automation and energy management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Netatmo exporter](https://github.com/xperimental/netatmo-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Netatmo exporter](https://github.com/xperimental/netatmo-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-netflow
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: NetFlow
+ link: https://github.com/paihu/netflow_exporter
+ icon_filename: netflow.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords:
+ - network monitoring
+ - network performance
+ - traffic analysis
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track NetFlow network traffic metrics for efficient network monitoring and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [netflow exporter](https://github.com/paihu/netflow_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [netflow exporter](https://github.com/paihu/netflow_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-nextcloud
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Nextcloud servers
+ link: https://github.com/xperimental/nextcloud-exporter
+ icon_filename: nextcloud.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords:
+ - cloud services
+ - cloud computing
+ - scalability
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Nextcloud cloud storage metrics for efficient file hosting and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Nextcloud exporter](https://github.com/xperimental/nextcloud-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-nextdns
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: NextDNS
+ link: https://github.com/raylas/nextdns-exporter
+ icon_filename: nextdns.png
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track NextDNS DNS resolver and security platform metrics for efficient DNS management and security.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [nextdns-exporter](https://github.com/raylas/nextdns-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [nextdns-exporter](https://github.com/raylas/nextdns-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-nftables
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: nftables
+ link: https://github.com/Sheridan/nftables_exporter
+ icon_filename: nftables.png
+ categories:
+ - data-collection.linux-systems.firewall-metrics
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor nftables firewall metrics for efficient network security and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [nftables_exporter](https://github.com/Sheridan/nftables_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [nftables_exporter](https://github.com/Sheridan/nftables_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ibm_aix_njmon
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: IBM AIX systems Njmon
+ link: https://github.com/crooks/njmon_exporter
+ icon_filename: ibm.svg
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on NJmon system performance monitoring metrics for efficient IT infrastructure management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [NJmon](https://github.com/crooks/njmon_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [NJmon](https://github.com/crooks/njmon_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-nrpe
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: NRPE daemon
+ link: https://github.com/canonical/nrpe_exporter
+ icon_filename: nrpelinux.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Nagios Remote Plugin Executor (NRPE) metrics for efficient system and network monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [NRPE exporter](https://github.com/canonical/nrpe_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [NRPE exporter](https://github.com/canonical/nrpe_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-mikrotik
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: MikroTik devices
+ link: https://github.com/swoga/mikrotik-exporter
+ icon_filename: mikrotik.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on MikroTik RouterOS metrics for efficient network device management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [mikrotik-exporter](https://github.com/swoga/mikrotik-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [nshttpd/mikrotik-exporter, swoga/m](https://github.com/swoga/mikrotik-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-nsxt
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: NSX-T
+ link: https://github.com/jk8s/nsxt_exporter
+ icon_filename: vmware-nsx.svg
+ categories:
+ - data-collection.containers-and-vms
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track VMware NSX-T software-defined networking metrics for efficient network virtualization and security management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [NSX-T Exporter](https://github.com/jk8s/nsxt_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-nvml
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: NVML
+ link: https://github.com/oko/nvml-exporter-rs
+ icon_filename: nvidia.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on NVIDIA Management Library (NVML) GPU metrics for efficient GPU performance and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [NVML exporter](https://github.com/oko/nvml-exporter-rs).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [NVML exporter](https://github.com/oko/nvml-exporter-rs) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-obs_studio
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OBS Studio
+ link: https://github.com/lukegb/obs_studio_exporter
+ icon_filename: obs-studio.png
+ categories:
+ - data-collection.media-streaming-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track OBS Studio live streaming and recording software metrics for efficient video production and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [OBS Studio Exporter](https://github.com/lukegb/obs_studio_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-odbc
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: ODBC
+ link: https://github.com/MACHBASE/prometheus-odbc-exporter
+ icon_filename: odbc.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - database
+ - dbms
+ - data storage
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Open Database Connectivity (ODBC) metrics for efficient database connection and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [ODBC Exporter](https://github.com/MACHBASE/prometheus-odbc-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-openhab
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OpenHAB
+ link: https://github.com/pdreker/openhab_exporter
+ icon_filename: openhab.svg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track openHAB smart home automation system metrics for efficient home automation and energy management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [OpenHAB exporter](https://github.com/pdreker/openhab_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [OpenHAB exporter](https://github.com/pdreker/openhab_exporter) by following the instructions mentioned in the exporter README.
+
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-openrc
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OpenRC
+ link: https://git.sr.ht/~tomleb/openrc-exporter
+ icon_filename: linux.png
+ categories:
+ - data-collection.linux-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on OpenRC init system metrics for efficient system startup and service management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [openrc-exporter](https://git.sr.ht/~tomleb/openrc-exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-openrct2
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OpenRCT2
+ link: https://github.com/terinjokes/openrct2-prometheus-exporter
+ icon_filename: openRCT2.png
+ categories:
+ - data-collection.gaming
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track OpenRCT2 game metrics for efficient game server management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [OpenRCT2 Prometheus Exporter](https://github.com/terinjokes/openrct2-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-openroadm
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OpenROADM devices
+ link: https://github.com/utdal/openroadm_exporter
+ icon_filename: openroadm.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords:
+ - network monitoring
+ - network performance
+ - traffic analysis
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor OpenROADM optical transport network metrics using the NETCONF protocol for efficient network management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [OpenROADM NETCONF Exporter WIP](https://github.com/utdal/openroadm_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-openweathermap
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OpenWeatherMap
+ link: https://github.com/Tenzer/openweathermap-exporter
+ icon_filename: openweather.png
+ categories:
+ - data-collection.generic-data-collection
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track OpenWeatherMap weather data and air pollution metrics for efficient environmental monitoring and analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [OpenWeatherMap Exporter](https://github.com/Tenzer/openweathermap-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-oracledb
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Oracle DB (community)
+ link: https://github.com/iamseth/oracledb_exporter
+ icon_filename: oracle.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - oracle
+ - database
+ - dbms
+ - data storage
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Oracle Database metrics for efficient database management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Oracle DB Exporter](https://github.com/iamseth/oracledb_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-otrs
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: OTRS
+ link: https://github.com/JulianDroste/otrs_exporter
+ icon_filename: otrs.png
+ categories:
+ - data-collection.notifications
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor OTRS (Open-Source Ticket Request System) metrics for efficient helpdesk management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [OTRS Exporter](https://github.com/JulianDroste/otrs_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-dutch_electricity_smart_meter
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Dutch Electricity Smart Meter
+ link: https://github.com/TobiasDeBruijn/prometheus-p1-exporter
+ icon_filename: dutch-electricity.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Dutch smart meter P1 port metrics for efficient energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [P1Exporter - Dutch Electricity Smart Meter Exporter](https://github.com/TobiasDeBruijn/prometheus-p1-exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-patroni
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Patroni
+ link: https://github.com/gopaytech/patroni_exporter
+ icon_filename: patroni.png
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Patroni PostgreSQL high-availability metrics for efficient database management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Patroni Exporter](https://github.com/gopaytech/patroni_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Patroni Exporter](https://github.com/gopaytech/patroni_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-pws
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Personal Weather Station
+ link: https://github.com/JohnOrthoefer/pws-exporter
+ icon_filename: wunderground.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track personal weather station metrics for efficient weather monitoring and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Personal Weather Station Exporter](https://github.com/JohnOrthoefer/pws-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-pgbackrest
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: pgBackRest
+ link: https://github.com/woblerr/pgbackrest_exporter
+ icon_filename: pgbackrest.png
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor pgBackRest PostgreSQL backup metrics for efficient database backup and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [pgBackRest Exporter](https://github.com/woblerr/pgbackrest_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-pgpool2
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Pgpool-II
+ link: https://github.com/pgpool/pgpool2_exporter
+ icon_filename: pgpool2.png
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Pgpool-II PostgreSQL middleware metrics for efficient database connection management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Pgpool-II Exporter](https://github.com/pgpool/pgpool2_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-pimoroni_enviro_plus
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Pimoroni Enviro+
+ link: https://github.com/terradolor/prometheus-enviro-exporter
+ icon_filename: pimorino.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Pimoroni Enviro+ air quality and environmental metrics for efficient environmental monitoring and analysis.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Pimoroni Enviro+ Exporter](https://github.com/terradolor/prometheus-enviro-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-pingdom
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Pingdom
+ link: https://github.com/veepee-oss/pingdom_exporter
+ icon_filename: solarwinds.svg
+ categories:
+ - data-collection.synthetic-checks
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Pingdom website monitoring service metrics for efficient website performance management and diagnostics.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Pingdom Exporter](https://github.com/veepee-oss/pingdom_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-dell_powermax
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Dell PowerMax
+ link: https://github.com/kckecheng/powermax_exporter
+ icon_filename: powermax.png
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Dell EMC PowerMax storage array metrics for efficient storage management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [PowerMax Exporter](https://github.com/kckecheng/powermax_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-powerpal
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Powerpal devices
+ link: https://github.com/aashley/powerpal_exporter
+ icon_filename: powerpal.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Powerpal smart meter metrics for efficient energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Powerpal Exporter](https://github.com/aashley/powerpal_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Powerpal Exporter](https://github.com/aashley/powerpal_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-proftpd
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: ProFTPD
+ link: https://github.com/transnano/proftpd_exporter
+ icon_filename: proftpd.png
+ categories:
+ - data-collection.ftp-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor ProFTPD FTP server metrics for efficient file transfer and server performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [ProFTPD Exporter](https://github.com/transnano/proftpd_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-eaton_ups
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Eaton UPS
+ link: https://github.com/psyinfra/prometheus-eaton-ups-exporter
+ icon_filename: eaton.svg
+ categories:
+ - data-collection.ups
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Eaton uninterruptible power supply (UPS) metrics for efficient power management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Prometheus Eaton UPS Exporter](https://github.com/psyinfra/prometheus-eaton-ups-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-redis_queue
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Redis Queue
+ link: https://github.com/mdawar/rq-exporter
+ icon_filename: rq.png
+ categories:
+ - data-collection.message-brokers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Python RQ (Redis Queue) job queue metrics for efficient task management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Python RQ Exporter](https://github.com/mdawar/rq-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Python RQ Exporter](https://github.com/mdawar/rq-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-radio_thermostat
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Radio Thermostat
+ link: https://github.com/andrewlow/radio-thermostat-exporter
+ icon_filename: radiots.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Radio Thermostat smart thermostat metrics for efficient home automation and energy management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Radio Thermostat Exporter](https://github.com/andrewlow/radio-thermostat-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-rancher
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Rancher
+ link: https://github.com/infinityworksltd/prometheus-rancher-exporter
+ icon_filename: rancher.svg
+ categories:
+ - data-collection.kubernetes
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Rancher container orchestration platform metrics for efficient container management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Rancher Exporter](https://github.com/infinityworksltd/prometheus-rancher-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-raritan_pdu
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Raritan PDU
+ link: https://github.com/psyinfra/prometheus-raritan-pdu-exporter
+ icon_filename: raritan.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Raritan Power Distribution Unit (PDU) metrics for efficient power management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Raritan PDU Exporter](https://github.com/psyinfra/prometheus-raritan-pdu-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-routeros
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Mikrotik RouterOS devices
+ link: https://github.com/welbymcroberts/routeros_exporter
+ icon_filename: routeros.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track MikroTik RouterOS metrics for efficient network device management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [RouterOS exporter](https://github.com/welbymcroberts/routeros_exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-sabnzbd
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: SABnzbd
+ link: https://github.com/msroest/sabnzbd_exporter
+ icon_filename: sabnzbd.png
+ categories:
+ - data-collection.media-streaming-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor SABnzbd Usenet client metrics for efficient file downloads and resource management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [SABnzbd Exporter](https://github.com/msroest/sabnzbd_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-salicru_eqx
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Salicru EQX inverter
+ link: https://github.com/alejandroscf/prometheus_salicru_exporter
+ icon_filename: salicru.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Salicru EQX solar inverter metrics for efficient solar energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Salicru EQX inverter](https://github.com/alejandroscf/prometheus_salicru_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-sense_energy
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Sense Energy
+ link: https://github.com/ejsuncy/sense_energy_prometheus_exporter
+ icon_filename: sense.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Sense Energy smart meter metrics for efficient energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Sense Energy exporter](https://github.com/ejsuncy/sense_energy_prometheus_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-servertech
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: ServerTech
+ link: https://github.com/tynany/servertech_exporter
+ icon_filename: servertech.png
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Server Technology power distribution unit (PDU) metrics for efficient power management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [ServerTech Exporter](https://github.com/tynany/servertech_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [ServerTech Exporter](https://github.com/tynany/servertech_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-shelly
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Shelly humidity sensor
+ link: https://github.com/aexel90/shelly_exporter
+ icon_filename: shelly.jpg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Shelly smart home device metrics for efficient home automation and energy management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Shelly Exporter](https://github.com/aexel90/shelly_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Shelly Exporter](https://github.com/aexel90/shelly_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-sia
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Sia
+ link: https://github.com/tbenz9/sia_exporter
+ icon_filename: sia.png
+ categories:
+ - data-collection.blockchain-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Sia decentralized storage platform metrics for efficient storage management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Sia Exporter](https://github.com/tbenz9/sia_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Sia Exporter](https://github.com/tbenz9/sia_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-s7_plc
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Siemens S7 PLC
+ link: https://github.com/MarcusCalidus/s7-plc-exporter
+ icon_filename: siemens.svg
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Siemens S7 Programmable Logic Controller (PLC) metrics for efficient industrial automation and control.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Siemens S7 PLC exporter](https://github.com/MarcusCalidus/s7-plc-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-site24x7
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Site 24x7
+ link: https://github.com/svenstaro/site24x7_exporter
+ icon_filename: site24x7.svg
+ categories:
+ - data-collection.synthetic-checks
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Site24x7 website and infrastructure monitoring metrics for efficient performance tracking and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [site24x7 Exporter](https://github.com/svenstaro/site24x7_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-sma_inverter
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: SMA Inverters
+ link: https://github.com/dr0ps/sma_inverter_exporter
+ icon_filename: sma.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor SMA solar inverter metrics for efficient solar energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [sma-exporter](https://github.com/dr0ps/sma_inverter_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-smartrg808ac
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: SmartRG 808AC Cable Modem
+ link: https://github.com/AdamIsrael/smartrg808ac_exporter
+ icon_filename: smartr.jpeg
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor SmartRG SR808ac router metrics for efficient network device management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [smartrg808ac_exporter](https://github.com/AdamIsrael/smartrg808ac_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-sml
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Smart meters SML
+ link: https://github.com/mweinelt/sml-exporter
+ icon_filename: sml.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Smart Message Language (SML) metrics for efficient smart metering and energy management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [SML Exporter](https://github.com/mweinelt/sml-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [SML Exporter](https://github.com/mweinelt/sml-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-softether
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: SoftEther VPN Server
+ link: https://github.com/dalance/softether_exporter
+ icon_filename: softether.svg
+ categories:
+ - data-collection.vpns
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor SoftEther VPN Server metrics for efficient virtual private network (VPN) management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [SoftEther Exporter](https://github.com/dalance/softether_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [SoftEther Exporter](https://github.com/dalance/softether_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-lsx
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Solar logging stick
+ link: https://gitlab.com/bhavin192/lsx-exporter
+ icon_filename: solar.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor solar energy metrics using a solar logging stick for efficient solar energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Solar logging stick exporter](https://gitlab.com/bhavin192/lsx-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-solaredge
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: SolarEdge inverters
+ link: https://github.com/dave92082/SolarEdge-Exporter
+ icon_filename: solaredge.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track SolarEdge solar inverter metrics for efficient solar energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [SolarEdge Exporter](https://github.com/dave92082/SolarEdge-Exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-solis
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Solis Ginlong 5G inverters
+ link: https://github.com/candlerb/solis_exporter
+ icon_filename: solis.jpg
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Solis solar inverter metrics for efficient solar energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Solis Exporter](https://github.com/candlerb/solis_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Solis Exporter](https://github.com/candlerb/solis_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-sonic
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: SONiC NOS
+ link: https://github.com/kamelnetworks/sonic_exporter
+ icon_filename: sonic.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on Software for Open Networking in the Cloud (SONiC) metrics for efficient network switch management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [SONiC Exporter](https://github.com/kamelnetworks/sonic_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-ibm_spectrum_virtualize
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: IBM Spectrum Virtualize
+ link: https://github.com/bluecmd/spectrum_virtualize_exporter
+ icon_filename: ibm.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor IBM Spectrum Virtualize metrics for efficient storage virtualization and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [spectrum_virtualize_exporter](https://github.com/bluecmd/spectrum_virtualize_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-speedify
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Speedify CLI
+ link: https://github.com/willshen/speedify_exporter
+ icon_filename: speedify.png
+ categories:
+ - data-collection.vpns
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Speedify VPN metrics for efficient virtual private network (VPN) management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Speedify Exporter](https://github.com/willshen/speedify_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Speedify Exporter](https://github.com/willshen/speedify_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-sphinx
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Sphinx
+ link: https://github.com/foxdalas/sphinx_exporter
+ icon_filename: sphinx.png
+ categories:
+ - data-collection.search-engines
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Sphinx search engine metrics for efficient search and indexing performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Sphinx Exporter](https://github.com/foxdalas/sphinx_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-sql
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: SQL Database agnostic
+ link: https://github.com/free/sql_exporter
+ icon_filename: sql.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - database
+ - relational db
+ - data querying
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Query SQL databases for efficient database performance monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [SQL Exporter](https://github.com/free/sql_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [SQL Exporter](https://github.com/free/sql_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-starwind_vsan
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Starwind VSAN VSphere Edition
+ link: https://github.com/evoicefire/starwind-vsan-exporter
+ icon_filename: starwind.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep tabs on StarWind Virtual SAN metrics for efficient storage virtualization and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Starwind vSAN Exporter](https://github.com/evoicefire/starwind-vsan-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-storidge
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Storidge
+ link: https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md
+ icon_filename: storidge.png
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Storidge storage metrics for efficient storage management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Storidge exporter](https://github.com/Storidge/cio-user-docs/blob/master/integrations/prometheus.md) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-stream_generic
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Stream
+ link: https://github.com/carlpett/stream_exporter
+ icon_filename: stream.png
+ categories:
+ - data-collection.media-streaming-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor streaming metrics for efficient media streaming and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Stream exporter](https://github.com/carlpett/stream_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Stream exporter](https://github.com/carlpett/stream_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-strongswan
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: strongSwan
+ link: https://github.com/jlti-dev/ipsec_exporter
+ icon_filename: strongswan.svg
+ categories:
+ - data-collection.vpns
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track strongSwan VPN and IPSec metrics using the vici interface for efficient virtual private network (VPN) management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [strongSwan/IPSec/vici Exporter](https://github.com/jlti-dev/ipsec_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-sunspec
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Sunspec Solar Energy
+ link: https://github.com/inosion/prometheus-sunspec-exporter
+ icon_filename: sunspec.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor SunSpec Alliance solar energy metrics for efficient solar energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Sunspec Solar Energy Exporter](https://github.com/inosion/prometheus-sunspec-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-suricata
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Suricata
+ link: https://github.com/corelight/suricata_exporter
+ icon_filename: suricata.png
+ categories:
+ - data-collection.security-systems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Keep an eye on Suricata network intrusion detection and prevention system (IDS/IPS) metrics for efficient network security and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Suricata Exporter](https://github.com/corelight/suricata_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Suricata Exporter](https://github.com/corelight/suricata_exporter) by following the instructions mentioned in the exporter README.
+
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-synology_activebackup
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Synology ActiveBackup
+ link: https://github.com/codemonauts/activebackup-prometheus-exporter
+ icon_filename: synology.png
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Synology Active Backup metrics for efficient backup and data protection management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Synology ActiveBackup Exporter](https://github.com/codemonauts/activebackup-prometheus-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-sysload
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Sysload
+ link: https://github.com/egmc/sysload_exporter
+ icon_filename: sysload.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor system load metrics for efficient system performance and resource management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Sysload Exporter](https://github.com/egmc/sysload_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Sysload Exporter](https://github.com/egmc/sysload_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-trex
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: T-Rex NVIDIA GPU Miner
+ link: https://github.com/dennisstritzke/trex_exporter
+ icon_filename: trex.png
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor T-Rex NVIDIA GPU miner metrics for efficient cryptocurrency mining and GPU performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [T-Rex NVIDIA GPU Miner Exporter](https://github.com/dennisstritzke/trex_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ id: collector-go.d.plugin-prometheus-tado
+ <<: *meta
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Tado smart heating solution
+ link: https://github.com/eko/tado-exporter
+ icon_filename: tado.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Tado smart thermostat metrics for efficient home heating and cooling management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Tado\xB0 Exporter](https://github.com/eko/tado-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Tado Exporter](https://github.com/eko/tado-exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-tankerkoenig
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Tankerkoenig API
+ link: https://github.com/lukasmalkmus/tankerkoenig_exporter
+ icon_filename: tanker.png
+ categories:
+ - data-collection.generic-data-collection
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Tankerknig API fuel price metrics for efficient fuel price monitoring and management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Tankerknig API Exporter](https://github.com/lukasmalkmus/tankerkoenig_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-tesla_wall_connector
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Tesla Wall Connector
+ link: https://github.com/benclapp/tesla_wall_connector_exporter
+ icon_filename: tesla.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Tesla Wall Connector charging station metrics for efficient electric vehicle charging management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Tesla Wall Connector Exporter](https://github.com/benclapp/tesla_wall_connector_exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-tplink_p110
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: TP-Link P110
+ link: https://github.com/ijohanne/prometheus-tplink-p110-exporter
+ icon_filename: tplink.png
+ categories:
+ - data-collection.iot-devices
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track TP-Link P110 smart plug metrics for efficient energy management and monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [TP-Link P110 Exporter](https://github.com/ijohanne/prometheus-tplink-p110-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-traceroute
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Traceroute
+ link: https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter
+ icon_filename: traceroute.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Export traceroute metrics for efficient network path analysis and performance monitoring.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [traceroute exporter](https://github.com/jeanfabrice/prometheus-tcptraceroute-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-twincat_ads_webservice
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: TwinCAT ADS Web Service
+ link: https://github.com/MarcusCalidus/twincat-ads-webservice-exporter
+ icon_filename: twincat.png
+ categories:
+ - data-collection.generic-data-collection
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor TwinCAT ADS (Automation Device Specification) Web Service metrics for efficient industrial automation and control.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [TwinCAT ADS Web Service exporter](https://github.com/MarcusCalidus/twincat-ads-webservice-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-warp10
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Warp10
+ link: https://github.com/centreon/warp10-sensision-exporter
+ icon_filename: warp10.svg
+ categories:
+ - data-collection.database-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Warp 10 time-series database metrics for efficient time-series data management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Warp10 Exporter](https://github.com/centreon/warp10-sensision-exporter) by following the instructions mentioned in the exporter README.
+
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-yourls
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: YOURLS URL Shortener
+ link: https://github.com/just1not2/prometheus-exporter-yourls
+ icon_filename: yourls.png
+ categories:
+ - data-collection.apm
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor YOURLS (Your Own URL Shortener) metrics for efficient URL shortening service management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [YOURLS exporter](https://github.com/just1not2/prometheus-exporter-yourls) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-zerto
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Zerto
+ link: https://github.com/claranet/zerto-exporter
+ icon_filename: zerto.png
+ categories:
+ - data-collection.cloud-provider-managed
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Zerto disaster recovery and data protection metrics for efficient backup and recovery management.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Zerto Exporter](https://github.com/claranet/zerto-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Zerto Exporter](https://github.com/claranet/zerto-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-zulip
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Zulip
+ link: https://github.com/brokenpip3/zulip-exporter
+ icon_filename: zulip.png
+ categories:
+ - data-collection.media-streaming-servers
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Monitor Zulip open-source group chat application metrics for efficient team communication management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Zulip Exporter](https://github.com/brokenpip3/zulip-exporter) by following the instructions mentioned in the exporter README.
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-prometheus-zyxel_gs1200
+ most_popular: false
+ community: true
+ monitored_instance:
+ name: Zyxel GS1200-8
+ link: https://github.com/robinelfrink/gs1200-exporter
+ icon_filename: zyxel.png
+ categories:
+ - data-collection.networking-stack-and-network-interfaces
+ keywords: []
+ overview:
+ <<: *overview
+ data_collection:
+ metrics_description: |
+ Track Zyxel GS1200 network switch metrics for efficient network device management and performance.
+ method_description: |
+ Metrics are gathered by periodically sending HTTP requests to [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter).
+ setup:
+ <<: *setup
+ prerequisites:
+ list:
+ - title: Install Exporter
+ description: |
+ Install [Zyxel GS1200 Exporter](https://github.com/robinelfrink/gs1200-exporter) by following the instructions mentioned in the exporter README.
+
diff --git a/src/go/plugin/go.d/modules/prometheus/prometheus.go b/src/go/plugin/go.d/modules/prometheus/prometheus.go
new file mode 100644
index 000000000..b3f97fbd3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/prometheus.go
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus/selector"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("prometheus", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Prometheus {
+ return &Prometheus{
+ Config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 10),
+ },
+ },
+ MaxTS: 2000,
+ MaxTSPerMetric: 200,
+ },
+ charts: &module.Charts{},
+ cache: newCache(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ Name string `yaml:"name,omitempty" json:"name"`
+ Application string `yaml:"app,omitempty" json:"app"`
+ BearerTokenFile string `yaml:"bearer_token_file,omitempty" json:"bearer_token_file"`
+ Selector selector.Expr `yaml:"selector,omitempty" json:"selector"`
+ ExpectedPrefix string `yaml:"expected_prefix,omitempty" json:"expected_prefix"`
+ MaxTS int `yaml:"max_time_series" json:"max_time_series"`
+ MaxTSPerMetric int `yaml:"max_time_series_per_metric" json:"max_time_series_per_metric"`
+ FallbackType struct {
+ Gauge []string `yaml:"gauge,omitempty" json:"gauge"`
+ Counter []string `yaml:"counter,omitempty" json:"counter"`
+ } `yaml:"fallback_type,omitempty" json:"fallback_type"`
+}
+
+type Prometheus struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prom prometheus.Prometheus
+
+ cache *cache
+ fallbackType struct {
+ counter matcher.Matcher
+ gauge matcher.Matcher
+ }
+}
+
+func (p *Prometheus) Configuration() any {
+ return p.Config
+}
+
+func (p *Prometheus) Init() error {
+ if err := p.validateConfig(); err != nil {
+ p.Errorf("validating config: %v", err)
+ return err
+ }
+
+ prom, err := p.initPrometheusClient()
+ if err != nil {
+ p.Errorf("init prometheus client: %v", err)
+ return err
+ }
+ p.prom = prom
+
+ m, err := p.initFallbackTypeMatcher(p.FallbackType.Counter)
+ if err != nil {
+ p.Errorf("init counter fallback type matcher: %v", err)
+ return err
+ }
+ p.fallbackType.counter = m
+
+ m, err = p.initFallbackTypeMatcher(p.FallbackType.Gauge)
+ if err != nil {
+ p.Errorf("init counter fallback type matcher: %v", err)
+ return err
+ }
+ p.fallbackType.gauge = m
+
+ return nil
+}
+
+func (p *Prometheus) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (p *Prometheus) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *Prometheus) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (p *Prometheus) Cleanup() {
+ if p.prom != nil && p.prom.HTTPClient() != nil {
+ p.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/prometheus/prometheus_test.go b/src/go/plugin/go.d/modules/prometheus/prometheus_test.go
new file mode 100644
index 000000000..5a5475cc9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/prometheus_test.go
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus/selector"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPrometheus_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Prometheus{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPrometheus_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "non empty URL": {
+ wantFail: false,
+ config: Config{HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:9090/metric"}}},
+ },
+ "invalid selector syntax": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:9090/metric"}},
+ Selector: selector.Expr{Allow: []string{`name{label=#"value"}`}},
+ },
+ },
+ "default": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ prom := New()
+ prom.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, prom.Init())
+ } else {
+ assert.NoError(t, prom.Init())
+ }
+ })
+ }
+}
+
+func TestPrometheus_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+
+ prom := New()
+ prom.URL = "http://127.0.0.1"
+ require.NoError(t, prom.Init())
+ assert.NotPanics(t, prom.Cleanup)
+}
+
+func TestPrometheus_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (prom *Prometheus, cleanup func())
+ wantFail bool
+ }{
+ "success if endpoint returns valid metrics in prometheus format": {
+ wantFail: false,
+ prepare: func() (prom *Prometheus, cleanup func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(`test_counter_no_meta_metric_1_total{label1="value1"} 11`))
+ }))
+ prom = New()
+ prom.URL = srv.URL
+
+ return prom, srv.Close
+ },
+ },
+ "fail if the total num of metrics exceeds the limit": {
+ wantFail: true,
+ prepare: func() (prom *Prometheus, cleanup func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(`
+test_counter_no_meta_metric_1_total{label1="value1"} 11
+test_counter_no_meta_metric_1_total{label1="value2"} 11
+`))
+ }))
+ prom = New()
+ prom.URL = srv.URL
+ prom.MaxTS = 1
+
+ return prom, srv.Close
+ },
+ },
+ "fail if the num time series in the metric exceeds the limit": {
+ wantFail: true,
+ prepare: func() (prom *Prometheus, cleanup func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(`
+test_counter_no_meta_metric_1_total{label1="value1"} 11
+test_counter_no_meta_metric_1_total{label1="value2"} 11
+`))
+ }))
+ prom = New()
+ prom.URL = srv.URL
+ prom.MaxTSPerMetric = 1
+
+ return prom, srv.Close
+ },
+ },
+ "fail if metrics have no expected prefix": {
+ wantFail: true,
+ prepare: func() (prom *Prometheus, cleanup func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(`test_counter_no_meta_metric_1_total{label1="value1"} 11`))
+ }))
+ prom = New()
+ prom.URL = srv.URL
+ prom.ExpectedPrefix = "prefix_"
+
+ return prom, srv.Close
+ },
+ },
+ "fail if endpoint returns data not in prometheus format": {
+ wantFail: true,
+ prepare: func() (prom *Prometheus, cleanup func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ prom = New()
+ prom.URL = srv.URL
+
+ return prom, srv.Close
+ },
+ },
+ "fail if connection refused": {
+ wantFail: true,
+ prepare: func() (prom *Prometheus, cleanup func()) {
+ prom = New()
+ prom.URL = "http://127.0.0.1:38001/metrics"
+
+ return prom, func() {}
+ },
+ },
+ "fail if endpoint returns 404": {
+ wantFail: true,
+ prepare: func() (prom *Prometheus, cleanup func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ prom = New()
+ prom.URL = srv.URL
+
+ return prom, srv.Close
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ prom, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, prom.Init())
+
+ if test.wantFail {
+ assert.Error(t, prom.Check())
+ } else {
+ assert.NoError(t, prom.Check())
+ }
+ })
+ }
+}
+
+func TestPrometheus_Collect(t *testing.T) {
+ type testCaseStep struct {
+ desc string
+ input string
+ wantCollected map[string]int64
+ wantCharts int
+ }
+ tests := map[string]struct {
+ prepare func() *Prometheus
+ steps []testCaseStep
+ }{
+ "Gauge": {
+ prepare: New,
+ steps: []testCaseStep{
+ {
+ desc: "Two first seen series, no meta series ignored",
+ input: `
+# HELP test_gauge_metric_1 Test Gauge Metric 1
+# TYPE test_gauge_metric_1 gauge
+test_gauge_metric_1{label1="value1"} 11
+test_gauge_metric_1{label1="value2"} 12
+test_gauge_no_meta_metric_1{label1="value1"} 11
+test_gauge_no_meta_metric_1{label1="value2"} 12
+`,
+ wantCollected: map[string]int64{
+ "test_gauge_metric_1-label1=value1": 11000,
+ "test_gauge_metric_1-label1=value2": 12000,
+ },
+ wantCharts: 2,
+ },
+ {
+ desc: "One series removed",
+ input: `
+# HELP test_gauge_metric_1 Test Gauge Metric 1
+# TYPE test_gauge_metric_1 gauge
+test_gauge_metric_1{label1="value1"} 11
+`,
+ wantCollected: map[string]int64{
+ "test_gauge_metric_1-label1=value1": 11000,
+ },
+ wantCharts: 1,
+ },
+ {
+ desc: "One series (re)added",
+ input: `
+# HELP test_gauge_metric_1 Test Gauge Metric 1
+# TYPE test_gauge_metric_1 gauge
+test_gauge_metric_1{label1="value1"} 11
+test_gauge_metric_1{label1="value2"} 12
+`,
+ wantCollected: map[string]int64{
+ "test_gauge_metric_1-label1=value1": 11000,
+ "test_gauge_metric_1-label1=value2": 12000,
+ },
+ wantCharts: 2,
+ },
+ },
+ },
+ "Counter": {
+ prepare: New,
+ steps: []testCaseStep{
+ {
+ desc: "Four first seen series, no meta series collected",
+ input: `
+# HELP test_counter_metric_1_total Test Counter Metric 1
+# TYPE test_counter_metric_1_total counter
+test_counter_metric_1_total{label1="value1"} 11
+test_counter_metric_1_total{label1="value2"} 12
+test_counter_no_meta_metric_1_total{label1="value1"} 11
+test_counter_no_meta_metric_1_total{label1="value2"} 12
+`,
+ wantCollected: map[string]int64{
+ "test_counter_metric_1_total-label1=value1": 11000,
+ "test_counter_metric_1_total-label1=value2": 12000,
+ "test_counter_no_meta_metric_1_total-label1=value1": 11000,
+ "test_counter_no_meta_metric_1_total-label1=value2": 12000,
+ },
+ wantCharts: 4,
+ },
+ {
+ desc: "Two series removed",
+ input: `
+# HELP test_counter_metric_1_total Test Counter Metric 1
+# TYPE test_counter_metric_1_total counter
+test_counter_metric_1_total{label1="value1"} 11
+test_counter_no_meta_metric_1_total{label1="value1"} 11
+`,
+ wantCollected: map[string]int64{
+ "test_counter_metric_1_total-label1=value1": 11000,
+ "test_counter_no_meta_metric_1_total-label1=value1": 11000,
+ },
+ wantCharts: 2,
+ },
+ {
+ desc: "Two series (re)added",
+ input: `
+# HELP test_counter_metric_1_total Test Counter Metric 1
+# TYPE test_counter_metric_1_total counter
+test_counter_metric_1_total{label1="value1"} 11
+test_counter_metric_1_total{label1="value2"} 12
+test_counter_no_meta_metric_1_total{label1="value1"} 11
+test_counter_no_meta_metric_1_total{label1="value2"} 12
+`,
+ wantCollected: map[string]int64{
+ "test_counter_metric_1_total-label1=value1": 11000,
+ "test_counter_metric_1_total-label1=value2": 12000,
+ "test_counter_no_meta_metric_1_total-label1=value1": 11000,
+ "test_counter_no_meta_metric_1_total-label1=value2": 12000,
+ },
+ wantCharts: 4,
+ },
+ },
+ },
+ "Summary": {
+ prepare: New,
+ steps: []testCaseStep{
+ {
+ desc: "Two first seen series, no meta series collected",
+ input: `
+# HELP test_summary_1_duration_microseconds Test Summary Metric 1
+# TYPE test_summary_1_duration_microseconds summary
+test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921
+test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_1_duration_microseconds_count{label1="value1"} 31
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921
+test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31
+`,
+ wantCollected: map[string]int64{
+ "test_summary_1_duration_microseconds-label1=value1_count": 31,
+ "test_summary_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000,
+ "test_summary_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000,
+ "test_summary_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000,
+ "test_summary_1_duration_microseconds-label1=value1_sum": 283201290,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_count": 31,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_sum": 283201290,
+ },
+ wantCharts: 6,
+ },
+ {
+ desc: "One series removed",
+ input: `
+# HELP test_summary_1_duration_microseconds Test Summary Metric 1
+# TYPE test_summary_1_duration_microseconds summary
+test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921
+test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_1_duration_microseconds_count{label1="value1"} 31
+`,
+ wantCollected: map[string]int64{
+ "test_summary_1_duration_microseconds-label1=value1_count": 31,
+ "test_summary_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000,
+ "test_summary_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000,
+ "test_summary_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000,
+ "test_summary_1_duration_microseconds-label1=value1_sum": 283201290,
+ },
+ wantCharts: 3,
+ },
+ {
+ desc: "One series (re)added",
+ input: `
+# HELP test_summary_1_duration_microseconds Test Summary Metric 1
+# TYPE test_summary_1_duration_microseconds summary
+test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921
+test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_1_duration_microseconds_count{label1="value1"} 31
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921
+test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31
+`,
+ wantCollected: map[string]int64{
+ "test_summary_1_duration_microseconds-label1=value1_count": 31,
+ "test_summary_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000,
+ "test_summary_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000,
+ "test_summary_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000,
+ "test_summary_1_duration_microseconds-label1=value1_sum": 283201290,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_count": 31,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.5": 4931921000,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.9": 4932921000,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_quantile=0.99": 4933921000,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_sum": 283201290,
+ },
+ wantCharts: 6,
+ },
+ },
+ },
+ "Summary with NaN": {
+ prepare: New,
+ steps: []testCaseStep{
+ {
+ desc: "Two first seen series, no meta series collected",
+ input: `
+# HELP test_summary_1_duration_microseconds Test Summary Metric 1
+# TYPE test_summary_1_duration_microseconds summary
+test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} NaN
+test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} NaN
+test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} NaN
+test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_1_duration_microseconds_count{label1="value1"} 31
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} NaN
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} NaN
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} NaN
+test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31
+`,
+ wantCollected: map[string]int64{
+ "test_summary_1_duration_microseconds-label1=value1_count": 31,
+ "test_summary_1_duration_microseconds-label1=value1_sum": 283201290,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_count": 31,
+ "test_summary_no_meta_1_duration_microseconds-label1=value1_sum": 283201290,
+ },
+ wantCharts: 6,
+ },
+ },
+ },
+ "Histogram": {
+ prepare: New,
+ steps: []testCaseStep{
+ {
+ desc: "Two first seen series, no meta series collected",
+ input: `
+# HELP test_histogram_1_duration_seconds Test Histogram Metric 1
+# TYPE test_histogram_1_duration_seconds histogram
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5
+test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6
+test_histogram_1_duration_seconds_sum{label1="value1"} 0.00147889
+test_histogram_1_duration_seconds_count{label1="value1"} 6
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.1"} 4
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.5"} 5
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6
+test_histogram_no_meta_1_duration_seconds_sum{label1="value1"} 0.00147889
+test_histogram_no_meta_1_duration_seconds_count{label1="value1"} 6
+`,
+ wantCollected: map[string]int64{
+ "test_histogram_1_duration_seconds-label1=value1_bucket=+Inf": 6,
+ "test_histogram_1_duration_seconds-label1=value1_bucket=0.1": 4,
+ "test_histogram_1_duration_seconds-label1=value1_bucket=0.5": 5,
+ "test_histogram_1_duration_seconds-label1=value1_count": 6,
+ "test_histogram_1_duration_seconds-label1=value1_sum": 1,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=+Inf": 6,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=0.1": 4,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=0.5": 5,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_count": 6,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_sum": 1,
+ },
+ wantCharts: 6,
+ },
+ {
+ desc: "One series removed",
+ input: `
+# HELP test_histogram_1_duration_seconds Test Histogram Metric 1
+# TYPE test_histogram_1_duration_seconds histogram
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5
+test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6
+`,
+ wantCollected: map[string]int64{
+ "test_histogram_1_duration_seconds-label1=value1_bucket=+Inf": 6,
+ "test_histogram_1_duration_seconds-label1=value1_bucket=0.1": 4,
+ "test_histogram_1_duration_seconds-label1=value1_bucket=0.5": 5,
+ "test_histogram_1_duration_seconds-label1=value1_count": 0,
+ "test_histogram_1_duration_seconds-label1=value1_sum": 0,
+ },
+ wantCharts: 3,
+ },
+ {
+ desc: "One series (re)added",
+ input: `
+# HELP test_histogram_1_duration_seconds Test Histogram Metric 1
+# TYPE test_histogram_1_duration_seconds histogram
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5
+test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6
+test_histogram_1_duration_seconds_sum{label1="value1"} 0.00147889
+test_histogram_1_duration_seconds_count{label1="value1"} 6
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.1"} 4
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.5"} 5
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6
+test_histogram_no_meta_1_duration_seconds_sum{label1="value1"} 0.00147889
+test_histogram_no_meta_1_duration_seconds_count{label1="value1"} 6
+`,
+ wantCollected: map[string]int64{
+ "test_histogram_1_duration_seconds-label1=value1_bucket=+Inf": 6,
+ "test_histogram_1_duration_seconds-label1=value1_bucket=0.1": 4,
+ "test_histogram_1_duration_seconds-label1=value1_bucket=0.5": 5,
+ "test_histogram_1_duration_seconds-label1=value1_count": 6,
+ "test_histogram_1_duration_seconds-label1=value1_sum": 1,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=+Inf": 6,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=0.1": 4,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_bucket=0.5": 5,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_count": 6,
+ "test_histogram_no_meta_1_duration_seconds-label1=value1_sum": 1,
+ },
+ wantCharts: 6,
+ },
+ },
+ },
+ "match Untyped as Gauge": {
+ prepare: func() *Prometheus {
+ prom := New()
+ prom.FallbackType.Gauge = []string{"test_gauge_no_meta*"}
+ return prom
+ },
+ steps: []testCaseStep{
+ {
+ desc: "Two first seen series, meta series processed as Gauge",
+ input: `
+# HELP test_gauge_metric_1 Test Untyped Metric 1
+# TYPE test_gauge_metric_1 gauge
+test_gauge_metric_1{label1="value1"} 11
+test_gauge_metric_1{label1="value2"} 12
+test_gauge_no_meta_metric_1{label1="value1"} 11
+test_gauge_no_meta_metric_1{label1="value2"} 12
+`,
+ wantCollected: map[string]int64{
+ "test_gauge_metric_1-label1=value1": 11000,
+ "test_gauge_metric_1-label1=value2": 12000,
+ "test_gauge_no_meta_metric_1-label1=value1": 11000,
+ "test_gauge_no_meta_metric_1-label1=value2": 12000,
+ },
+ wantCharts: 4,
+ },
+ },
+ },
+ "match Untyped as Counter": {
+ prepare: func() *Prometheus {
+ prom := New()
+ prom.FallbackType.Counter = []string{"test_gauge_no_meta*"}
+ return prom
+ },
+ steps: []testCaseStep{
+ {
+ desc: "Two first seen series, meta series processed as Counter",
+ input: `
+# HELP test_gauge_metric_1 Test Untyped Metric 1
+# TYPE test_gauge_metric_1 gauge
+test_gauge_metric_1{label1="value1"} 11
+test_gauge_metric_1{label1="value2"} 12
+test_gauge_no_meta_metric_1{label1="value1"} 11
+test_gauge_no_meta_metric_1{label1="value2"} 12
+`,
+ wantCollected: map[string]int64{
+ "test_gauge_metric_1-label1=value1": 11000,
+ "test_gauge_metric_1-label1=value2": 12000,
+ "test_gauge_no_meta_metric_1-label1=value1": 11000,
+ "test_gauge_no_meta_metric_1-label1=value2": 12000,
+ },
+ wantCharts: 4,
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ prom := test.prepare()
+
+ var metrics []byte
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(metrics)
+ }))
+ defer srv.Close()
+
+ prom.URL = srv.URL
+ require.NoError(t, prom.Init())
+
+ for num, step := range test.steps {
+ t.Run(fmt.Sprintf("step num %d ('%s')", num+1, step.desc), func(t *testing.T) {
+
+ metrics = []byte(step.input)
+
+ var mx map[string]int64
+
+ for i := 0; i < maxNotSeenTimes+1; i++ {
+ mx = prom.Collect()
+ }
+
+ assert.Equal(t, step.wantCollected, mx)
+ removeObsoleteCharts(prom.Charts())
+ assert.Len(t, *prom.Charts(), step.wantCharts)
+ })
+ }
+ })
+ }
+}
+
+func removeObsoleteCharts(charts *module.Charts) {
+ var i int
+ for _, chart := range *charts {
+ if !chart.Obsolete {
+ (*charts)[i] = chart
+ i++
+ }
+ }
+ *charts = (*charts)[:i]
+}
diff --git a/src/go/plugin/go.d/modules/prometheus/testdata/config.json b/src/go/plugin/go.d/modules/prometheus/testdata/config.json
new file mode 100644
index 000000000..2e9b2e138
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/testdata/config.json
@@ -0,0 +1,42 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "name": "ok",
+ "app": "ok",
+ "bearer_token_file": "ok",
+ "selector": {
+ "allow": [
+ "ok"
+ ],
+ "deny": [
+ "ok"
+ ]
+ },
+ "expected_prefix": "ok",
+ "max_time_series": 123,
+ "max_time_series_per_metric": 123,
+ "fallback_type": {
+ "gauge": [
+ "ok"
+ ],
+ "counter": [
+ "ok"
+ ]
+ }
+}
diff --git a/src/go/plugin/go.d/modules/prometheus/testdata/config.yaml b/src/go/plugin/go.d/modules/prometheus/testdata/config.yaml
new file mode 100644
index 000000000..37a411b9a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/prometheus/testdata/config.yaml
@@ -0,0 +1,33 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+name: "ok"
+app: "ok"
+bearer_token_file: "ok"
+selector:
+ allow:
+ - "ok"
+ deny:
+ - "ok"
+expected_prefix: "ok"
+max_time_series: 123
+max_time_series_per_metric: 123
+fallback_type:
+ gauge:
+ - "ok"
+ counter:
+ - "ok"
diff --git a/src/go/plugin/go.d/modules/proxysql/README.md b/src/go/plugin/go.d/modules/proxysql/README.md
new file mode 120000
index 000000000..06223157d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/README.md
@@ -0,0 +1 @@
+integrations/proxysql.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/proxysql/cache.go b/src/go/plugin/go.d/modules/proxysql/cache.go
new file mode 100644
index 000000000..c4fccefff
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/cache.go
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package proxysql
+
+type (
+ cache struct {
+ commands map[string]*commandCache
+ users map[string]*userCache
+ backends map[string]*backendCache
+ }
+ commandCache struct {
+ command string
+ hasCharts, updated bool
+ }
+ userCache struct {
+ user string
+ hasCharts, updated bool
+ }
+ backendCache struct {
+ hg, host, port string
+ hasCharts, updated bool
+ }
+)
+
+func (c *cache) reset() {
+ for k, m := range c.commands {
+ c.commands[k] = &commandCache{command: m.command, hasCharts: m.hasCharts}
+ }
+ for k, m := range c.users {
+ c.users[k] = &userCache{user: m.user, hasCharts: m.hasCharts}
+ }
+ for k, m := range c.backends {
+ c.backends[k] = &backendCache{hg: m.hg, host: m.host, port: m.port, hasCharts: m.hasCharts}
+ }
+}
+
+func (c *cache) getCommand(command string) *commandCache {
+ v, ok := c.commands[command]
+ if !ok {
+ v = &commandCache{command: command}
+ c.commands[command] = v
+ }
+ return v
+}
+
+func (c *cache) getUser(user string) *userCache {
+ v, ok := c.users[user]
+ if !ok {
+ v = &userCache{user: user}
+ c.users[user] = v
+ }
+ return v
+}
+
+func (c *cache) getBackend(hg, host, port string) *backendCache {
+ id := backendID(hg, host, port)
+ v, ok := c.backends[id]
+ if !ok {
+ v = &backendCache{hg: hg, host: host, port: port}
+ c.backends[id] = v
+ }
+ return v
+}
diff --git a/src/go/plugin/go.d/modules/proxysql/charts.go b/src/go/plugin/go.d/modules/proxysql/charts.go
new file mode 100644
index 000000000..c36efa5ce
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/charts.go
@@ -0,0 +1,726 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package proxysql
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+// TODO: check https://github.com/ProxySQL/proxysql-grafana-prometheus/blob/main/grafana/provisioning/dashboards/ProxySQL-Host-Statistics.json
+
+const (
+ prioClientConnectionsCount = module.Priority + iota
+ prioClientConnectionsRate
+ prioServerConnectionsCount
+ prioServerConnectionsRate
+ prioBackendsTraffic
+ prioFrontendsTraffic
+ prioActiveTransactionsCount
+ prioQuestionsRate
+ prioSlowQueriesRate
+ prioQueriesRate
+ prioBackendStatementsCount
+ prioBackendStatementsRate
+ prioFrontendStatementsCount
+ prioFrontendStatementsRate
+ prioCachedStatementsCount
+ prioQueryCacheEntriesCount
+ prioQueryCacheIO
+ prioQueryCacheRequestsRate
+ prioQueryCacheMemoryUsed
+ prioMySQLMonitorWorkersCount
+ prioMySQLMonitorWorkersRate
+ prioMySQLMonitorConnectChecksRate
+ prioMySQLMonitorPingChecksRate
+ prioMySQLMonitorReadOnlyChecksRate
+ prioMySQLMonitorReplicationLagChecksRate
+ prioJemallocMemoryUsed
+ prioMemoryUsed
+ prioMySQLCommandExecutionsRate
+ prioMySQLCommandExecutionTime
+ prioMySQLCommandExecutionDurationHistogram
+ prioMySQLUserConnectionsUtilization
+ prioMySQLUserConnectionsCount
+ prioBackendStatus
+ prioBackendConnectionsUsage
+ prioBackendConnectionsRate
+ prioBackendQueriesRateRate
+ prioBackendTraffic
+ prioBackendLatency
+ prioUptime
+)
+
+var (
+ baseCharts = module.Charts{
+ clientConnectionsCountChart.Copy(),
+ clientConnectionsRateChart.Copy(),
+ serverConnectionsCountChart.Copy(),
+ serverConnectionsRateChart.Copy(),
+ backendsTrafficChart.Copy(),
+ frontendsTrafficChart.Copy(),
+ activeTransactionsCountChart.Copy(),
+ questionsRateChart.Copy(),
+ slowQueriesRateChart.Copy(),
+ queriesRateChart.Copy(),
+ backendStatementsCountChart.Copy(),
+ backendStatementsRateChart.Copy(),
+ clientStatementsCountChart.Copy(),
+ clientStatementsRateChart.Copy(),
+ cachedStatementsCountChart.Copy(),
+ queryCacheEntriesCountChart.Copy(),
+ queryCacheIOChart.Copy(),
+ queryCacheRequestsRateChart.Copy(),
+ queryCacheMemoryUsedChart.Copy(),
+ mySQLMonitorWorkersCountChart.Copy(),
+ mySQLMonitorWorkersRateChart.Copy(),
+ mySQLMonitorConnectChecksRateChart.Copy(),
+ mySQLMonitorPingChecksRateChart.Copy(),
+ mySQLMonitorReadOnlyChecksRateChart.Copy(),
+ mySQLMonitorReplicationLagChecksRateChart.Copy(),
+ jemallocMemoryUsedChart.Copy(),
+ memoryUsedCountChart.Copy(),
+ uptimeChart.Copy(),
+ }
+
+ clientConnectionsCountChart = module.Chart{
+ ID: "client_connections_count",
+ Title: "Client connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "proxysql.client_connections_count",
+ Priority: prioClientConnectionsCount,
+ Dims: module.Dims{
+ {ID: "Client_Connections_connected", Name: "connected"},
+ {ID: "Client_Connections_non_idle", Name: "non_idle"},
+ {ID: "Client_Connections_hostgroup_locked", Name: "hostgroup_locked"},
+ },
+ }
+ clientConnectionsRateChart = module.Chart{
+ ID: "client_connections_rate",
+ Title: "Client connections rate",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "proxysql.client_connections_rate",
+ Priority: prioClientConnectionsRate,
+ Dims: module.Dims{
+ {ID: "Client_Connections_created", Name: "created", Algo: module.Incremental},
+ {ID: "Client_Connections_aborted", Name: "aborted", Algo: module.Incremental},
+ },
+ }
+
+ serverConnectionsCountChart = module.Chart{
+ ID: "server_connections_count",
+ Title: "Server connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "proxysql.server_connections_count",
+ Priority: prioServerConnectionsCount,
+ Dims: module.Dims{
+ {ID: "Server_Connections_connected", Name: "connected"},
+ },
+ }
+ serverConnectionsRateChart = module.Chart{
+ ID: "server_connections_rate",
+ Title: "Server connections rate",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "proxysql.server_connections_rate",
+ Priority: prioServerConnectionsRate,
+ Dims: module.Dims{
+ {ID: "Server_Connections_created", Name: "created", Algo: module.Incremental},
+ {ID: "Server_Connections_aborted", Name: "aborted", Algo: module.Incremental},
+ {ID: "Server_Connections_delayed", Name: "delayed", Algo: module.Incremental},
+ },
+ }
+
+ backendsTrafficChart = module.Chart{
+ ID: "backends_traffic",
+ Title: "Backends traffic",
+ Units: "B/s",
+ Fam: "traffic",
+ Ctx: "proxysql.backends_traffic",
+ Priority: prioBackendsTraffic,
+ Dims: module.Dims{
+ {ID: "Queries_backends_bytes_recv", Name: "recv", Algo: module.Incremental},
+ {ID: "Queries_backends_bytes_sent", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ frontendsTrafficChart = module.Chart{
+ ID: "clients_traffic",
+ Title: "Clients traffic",
+ Units: "B/s",
+ Fam: "traffic",
+ Ctx: "proxysql.clients_traffic",
+ Priority: prioFrontendsTraffic,
+ Dims: module.Dims{
+ {ID: "Queries_frontends_bytes_recv", Name: "recv", Algo: module.Incremental},
+ {ID: "Queries_frontends_bytes_sent", Name: "sent", Algo: module.Incremental},
+ },
+ }
+
+ activeTransactionsCountChart = module.Chart{
+ ID: "active_transactions_count",
+ Title: "Client connections that are currently processing a transaction",
+ Units: "transactions",
+ Fam: "transactions",
+ Ctx: "proxysql.active_transactions_count",
+ Priority: prioActiveTransactionsCount,
+ Dims: module.Dims{
+ {ID: "Active_Transactions", Name: "active"},
+ },
+ }
+ questionsRateChart = module.Chart{
+ ID: "questions_rate",
+ Title: "Client requests / statements executed",
+ Units: "questions/s",
+ Fam: "queries",
+ Ctx: "proxysql.questions_rate",
+ Priority: prioQuestionsRate,
+ Dims: module.Dims{
+ {ID: "Questions", Name: "questions", Algo: module.Incremental},
+ },
+ }
+ slowQueriesRateChart = module.Chart{
+ ID: "slow_queries_rate",
+ Title: "Slow queries",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "proxysql.slow_queries_rate",
+ Priority: prioSlowQueriesRate,
+ Dims: module.Dims{
+ {ID: "Slow_queries", Name: "slow", Algo: module.Incremental},
+ },
+ }
+ queriesRateChart = module.Chart{
+ ID: "queries_rate",
+ Title: "Queries rate",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "proxysql.queries_rate",
+ Priority: prioQueriesRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "Com_autocommit", Name: "autocommit", Algo: module.Incremental},
+ {ID: "Com_autocommit_filtered", Name: "autocommit_filtered", Algo: module.Incremental},
+ {ID: "Com_commit", Name: "commit", Algo: module.Incremental},
+ {ID: "Com_commit_filtered", Name: "commit_filtered", Algo: module.Incremental},
+ {ID: "Com_rollback", Name: "rollback", Algo: module.Incremental},
+ {ID: "Com_rollback_filtered", Name: "rollback_filtered", Algo: module.Incremental},
+ {ID: "Com_backend_change_user", Name: "backend_change_user", Algo: module.Incremental},
+ {ID: "Com_backend_init_db", Name: "backend_init_db", Algo: module.Incremental},
+ {ID: "Com_backend_set_names", Name: "backend_set_names", Algo: module.Incremental},
+ {ID: "Com_frontend_init_db", Name: "frontend_init_db", Algo: module.Incremental},
+ {ID: "Com_frontend_set_names", Name: "frontend_set_names", Algo: module.Incremental},
+ {ID: "Com_frontend_use_db", Name: "frontend_use_db", Algo: module.Incremental},
+ },
+ }
+
+ backendStatementsCountChart = module.Chart{
+ ID: "backend_statements_count",
+ Title: "Statements available across all backend connections",
+ Units: "statements",
+ Fam: "statements",
+ Ctx: "proxysql.backend_statements_count",
+ Priority: prioBackendStatementsCount,
+ Dims: module.Dims{
+ {ID: "Stmt_Server_Active_Total", Name: "total"},
+ {ID: "Stmt_Server_Active_Unique", Name: "unique"},
+ },
+ }
+ backendStatementsRateChart = module.Chart{
+ ID: "backend_statements_rate",
+ Title: "Statements executed against the backends",
+ Units: "statements/s",
+ Fam: "statements",
+ Ctx: "proxysql.backend_statements_rate",
+ Priority: prioBackendStatementsRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "Com_backend_stmt_prepare", Name: "prepare", Algo: module.Incremental},
+ {ID: "Com_backend_stmt_execute", Name: "execute", Algo: module.Incremental},
+ {ID: "Com_backend_stmt_close", Name: "close", Algo: module.Incremental},
+ },
+ }
+ clientStatementsCountChart = module.Chart{
+ ID: "client_statements_count",
+ Title: "Statements that are in use by clients",
+ Units: "statements",
+ Fam: "statements",
+ Ctx: "proxysql.client_statements_count",
+ Priority: prioFrontendStatementsCount,
+ Dims: module.Dims{
+ {ID: "Stmt_Client_Active_Total", Name: "total"},
+ {ID: "Stmt_Client_Active_Unique", Name: "unique"},
+ },
+ }
+ clientStatementsRateChart = module.Chart{
+ ID: "client_statements_rate",
+ Title: "Statements executed by clients",
+ Units: "statements/s",
+ Fam: "statements",
+ Ctx: "proxysql.client_statements_rate",
+ Priority: prioFrontendStatementsRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "Com_frontend_stmt_prepare", Name: "prepare", Algo: module.Incremental},
+ {ID: "Com_frontend_stmt_execute", Name: "execute", Algo: module.Incremental},
+ {ID: "Com_frontend_stmt_close", Name: "close", Algo: module.Incremental},
+ },
+ }
+ cachedStatementsCountChart = module.Chart{
+ ID: "cached_statements_count",
+ Title: "Global prepared statements",
+ Units: "statements",
+ Fam: "statements",
+ Ctx: "proxysql.cached_statements_count",
+ Priority: prioCachedStatementsCount,
+ Dims: module.Dims{
+ {ID: "Stmt_Cached", Name: "cached"},
+ },
+ }
+
+ queryCacheEntriesCountChart = module.Chart{
+ ID: "query_cache_entries_count",
+ Title: "Query Cache entries",
+ Units: "entries",
+ Fam: "query cache",
+ Ctx: "proxysql.query_cache_entries_count",
+ Priority: prioQueryCacheEntriesCount,
+ Dims: module.Dims{
+ {ID: "Query_Cache_Entries", Name: "entries"},
+ },
+ }
+ queryCacheMemoryUsedChart = module.Chart{
+ ID: "query_cache_memory_used",
+ Title: "Query Cache memory used",
+ Units: "B",
+ Fam: "query cache",
+ Ctx: "proxysql.query_cache_memory_used",
+ Priority: prioQueryCacheMemoryUsed,
+ Dims: module.Dims{
+ {ID: "Query_Cache_Memory_bytes", Name: "used"},
+ },
+ }
+ queryCacheIOChart = module.Chart{
+ ID: "query_cache_io",
+ Title: "Query Cache I/O",
+ Units: "B/s",
+ Fam: "query cache",
+ Ctx: "proxysql.query_cache_io",
+ Priority: prioQueryCacheIO,
+ Dims: module.Dims{
+ {ID: "Query_Cache_bytes_IN", Name: "in", Algo: module.Incremental},
+ {ID: "Query_Cache_bytes_OUT", Name: "out", Algo: module.Incremental},
+ },
+ }
+ queryCacheRequestsRateChart = module.Chart{
+ ID: "query_cache_requests_rate",
+ Title: "Query Cache requests",
+ Units: "requests/s",
+ Fam: "query cache",
+ Ctx: "proxysql.query_cache_requests_rate",
+ Priority: prioQueryCacheRequestsRate,
+ Dims: module.Dims{
+ {ID: "Query_Cache_count_GET", Name: "read", Algo: module.Incremental},
+ {ID: "Query_Cache_count_SET", Name: "write", Algo: module.Incremental},
+ {ID: "Query_Cache_count_GET_OK", Name: "read_success", Algo: module.Incremental},
+ },
+ }
+
+ mySQLMonitorWorkersCountChart = module.Chart{
+ ID: "mysql_monitor_workers_count",
+ Title: "MySQL monitor workers",
+ Units: "threads",
+ Fam: "monitor",
+ Ctx: "proxysql.mysql_monitor_workers_count",
+ Priority: prioMySQLMonitorWorkersCount,
+ Dims: module.Dims{
+ {ID: "MySQL_Monitor_Workers", Name: "workers"},
+ {ID: "MySQL_Monitor_Workers_Aux", Name: "auxiliary"},
+ },
+ }
+ mySQLMonitorWorkersRateChart = module.Chart{
+ ID: "mysql_monitor_workers_rate",
+ Title: "MySQL monitor workers rate",
+ Units: "workers/s",
+ Fam: "monitor",
+ Ctx: "proxysql.mysql_monitor_workers_rate",
+ Priority: prioMySQLMonitorWorkersRate,
+ Dims: module.Dims{
+ {ID: "MySQL_Monitor_Workers_Started", Name: "started", Algo: module.Incremental},
+ },
+ }
+ mySQLMonitorConnectChecksRateChart = module.Chart{
+ ID: "mysql_monitor_connect_checks_rate",
+ Title: "MySQL monitor connect checks",
+ Units: "checks/s",
+ Fam: "monitor",
+ Ctx: "proxysql.mysql_monitor_connect_checks_rate",
+ Priority: prioMySQLMonitorConnectChecksRate,
+ Dims: module.Dims{
+ {ID: "MySQL_Monitor_connect_check_OK", Name: "succeed", Algo: module.Incremental},
+ {ID: "MySQL_Monitor_connect_check_ERR", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ mySQLMonitorPingChecksRateChart = module.Chart{
+ ID: "mysql_monitor_ping_checks_rate",
+ Title: "MySQL monitor ping checks",
+ Units: "checks/s",
+ Fam: "monitor",
+ Ctx: "proxysql.mysql_monitor_ping_checks_rate",
+ Priority: prioMySQLMonitorPingChecksRate,
+ Dims: module.Dims{
+ {ID: "MySQL_Monitor_ping_check_OK", Name: "succeed", Algo: module.Incremental},
+ {ID: "MySQL_Monitor_ping_check_ERR", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ mySQLMonitorReadOnlyChecksRateChart = module.Chart{
+ ID: "mysql_monitor_read_only_checks_rate",
+ Title: "MySQL monitor read only checks",
+ Units: "checks/s",
+ Fam: "monitor",
+ Ctx: "proxysql.mysql_monitor_read_only_checks_rate",
+ Priority: prioMySQLMonitorReadOnlyChecksRate,
+ Dims: module.Dims{
+ {ID: "MySQL_Monitor_read_only_check_OK", Name: "succeed", Algo: module.Incremental},
+ {ID: "MySQL_Monitor_read_only_check_ERR", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ mySQLMonitorReplicationLagChecksRateChart = module.Chart{
+ ID: "mysql_monitor_replication_lag_checks_rate",
+ Title: "MySQL monitor replication lag checks",
+ Units: "checks/s",
+ Fam: "monitor",
+ Ctx: "proxysql.mysql_monitor_replication_lag_checks_rate",
+ Priority: prioMySQLMonitorReplicationLagChecksRate,
+ Dims: module.Dims{
+ {ID: "MySQL_Monitor_replication_lag_check_OK", Name: "succeed", Algo: module.Incremental},
+ {ID: "MySQL_Monitor_replication_lag_check_ERR", Name: "failed", Algo: module.Incremental},
+ },
+ }
+
+ jemallocMemoryUsedChart = module.Chart{
+ ID: "jemalloc_memory_used",
+ Title: "Jemalloc used memory",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "proxysql.jemalloc_memory_used",
+ Type: module.Stacked,
+ Priority: prioJemallocMemoryUsed,
+ Dims: module.Dims{
+ {ID: "jemalloc_active", Name: "active"},
+ {ID: "jemalloc_allocated", Name: "allocated"},
+ {ID: "jemalloc_mapped", Name: "mapped"},
+ {ID: "jemalloc_metadata", Name: "metadata"},
+ {ID: "jemalloc_resident", Name: "resident"},
+ {ID: "jemalloc_retained", Name: "retained"},
+ },
+ }
+ memoryUsedCountChart = module.Chart{
+ ID: "memory_used",
+ Title: "Memory used",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "proxysql.memory_used",
+ Priority: prioMemoryUsed,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "Auth_memory", Name: "auth"},
+ {ID: "SQLite3_memory_bytes", Name: "sqlite3"},
+ {ID: "query_digest_memory", Name: "query_digest"},
+ {ID: "mysql_query_rules_memory", Name: "query_rules"},
+ {ID: "mysql_firewall_users_table", Name: "firewall_users_table"},
+ {ID: "mysql_firewall_users_config", Name: "firewall_users_config"},
+ {ID: "mysql_firewall_rules_table", Name: "firewall_rules_table"},
+ {ID: "mysql_firewall_rules_config", Name: "firewall_rules_config"},
+ {ID: "stack_memory_mysql_threads", Name: "mysql_threads"},
+ {ID: "stack_memory_admin_threads", Name: "admin_threads"},
+ {ID: "stack_memory_cluster_threads", Name: "cluster_threads"},
+ },
+ }
+ uptimeChart = module.Chart{
+ ID: "proxysql_uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "proxysql.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "ProxySQL_Uptime", Name: "uptime"},
+ },
+ }
+)
+
+var (
+ mySQLCommandChartsTmpl = module.Charts{
+ mySQLCommandExecutionRateChartTmpl.Copy(),
+ mySQLCommandExecutionTimeChartTmpl.Copy(),
+ mySQLCommandExecutionDurationHistogramChartTmpl.Copy(),
+ }
+
+ mySQLCommandExecutionRateChartTmpl = module.Chart{
+ ID: "mysql_command_%s_execution_rate",
+ Title: "MySQL command execution",
+ Units: "commands/s",
+ Fam: "command exec",
+ Ctx: "proxysql.mysql_command_execution_rate",
+ Priority: prioMySQLCommandExecutionsRate,
+ Dims: module.Dims{
+ {ID: "mysql_command_%s_Total_cnt", Name: "commands", Algo: module.Incremental},
+ },
+ }
+ mySQLCommandExecutionTimeChartTmpl = module.Chart{
+ ID: "mysql_command_%s_execution_time",
+ Title: "MySQL command execution time",
+ Units: "microseconds",
+ Fam: "command exec time",
+ Ctx: "proxysql.mysql_command_execution_time",
+ Priority: prioMySQLCommandExecutionTime,
+ Dims: module.Dims{
+ {ID: "mysql_command_%s_Total_Time_us", Name: "time", Algo: module.Incremental},
+ },
+ }
+ mySQLCommandExecutionDurationHistogramChartTmpl = module.Chart{
+ ID: "mysql_command_%s_execution_duration",
+ Title: "MySQL command execution duration histogram",
+ Units: "commands/s",
+ Fam: "command exec duration",
+ Ctx: "proxysql.mysql_command_execution_duration",
+ Type: module.Stacked,
+ Priority: prioMySQLCommandExecutionDurationHistogram,
+ Dims: module.Dims{
+ {ID: "mysql_command_%s_cnt_100us", Name: "100us", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_500us", Name: "500us", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_1ms", Name: "1ms", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_5ms", Name: "5ms", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_10ms", Name: "10ms", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_50ms", Name: "50ms", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_100ms", Name: "100ms", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_500ms", Name: "500ms", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_1s", Name: "1s", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_5s", Name: "5s", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_10s", Name: "10s", Algo: module.Incremental},
+ {ID: "mysql_command_%s_cnt_INFs", Name: "+Inf", Algo: module.Incremental},
+ },
+ }
+)
+
+func newMySQLCommandCountersCharts(command string) *module.Charts {
+ charts := mySQLCommandChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(command))
+ chart.Labels = []module.Label{{Key: "command", Value: command}}
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, command)
+ }
+ }
+
+ return charts
+}
+
+func (p *ProxySQL) addMySQLCommandCountersCharts(command string) {
+ charts := newMySQLCommandCountersCharts(command)
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *ProxySQL) removeMySQLCommandCountersCharts(command string) {
+ prefix := "mysql_command_" + strings.ToLower(command)
+
+ for _, chart := range *p.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ mySQLUserChartsTmpl = module.Charts{
+ mySQLUserConnectionsUtilizationChartTmpl.Copy(),
+ mySQLUserConnectionsCountChartTmpl.Copy(),
+ }
+
+ mySQLUserConnectionsUtilizationChartTmpl = module.Chart{
+ ID: "mysql_user_%s_connections_utilization",
+ Title: "MySQL user connections utilization",
+ Units: "percentage",
+ Fam: "user conns",
+ Ctx: "proxysql.mysql_user_connections_utilization",
+ Priority: prioMySQLUserConnectionsUtilization,
+ Dims: module.Dims{
+ {ID: "mysql_user_%s_frontend_connections_utilization", Name: "used"},
+ },
+ }
+ mySQLUserConnectionsCountChartTmpl = module.Chart{
+ ID: "mysql_user_%s_connections_count",
+ Title: "MySQL user connections used",
+ Units: "connections",
+ Fam: "user conns",
+ Ctx: "proxysql.mysql_user_connections_count",
+ Priority: prioMySQLUserConnectionsCount,
+ Dims: module.Dims{
+ {ID: "mysql_user_%s_frontend_connections", Name: "used"},
+ },
+ }
+)
+
+func newMySQLUserCharts(username string) *module.Charts {
+ charts := mySQLUserChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, username)
+ chart.Labels = []module.Label{{Key: "user", Value: username}}
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, username)
+ }
+ }
+
+ return charts
+}
+
+func (p *ProxySQL) addMySQLUsersCharts(username string) {
+ charts := newMySQLUserCharts(username)
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *ProxySQL) removeMySQLUserCharts(user string) {
+ prefix := "mysql_user_" + user
+
+ for _, chart := range *p.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ backendChartsTmpl = module.Charts{
+ backendStatusChartTmpl.Copy(),
+ backendConnectionsUsageChartTmpl.Copy(),
+ backendConnectionsRateChartTmpl.Copy(),
+ backendQueriesRateRateChartTmpl.Copy(),
+ backendTrafficChartTmpl.Copy(),
+ backendLatencyChartTmpl.Copy(),
+ }
+
+ backendStatusChartTmpl = module.Chart{
+ ID: "backend_%s_status",
+ Title: "Backend status",
+ Units: "status",
+ Fam: "backend status",
+ Ctx: "proxysql.backend_status",
+ Priority: prioBackendStatus,
+ Dims: module.Dims{
+ {ID: "backend_%s_status_ONLINE", Name: "online"},
+ {ID: "backend_%s_status_SHUNNED", Name: "shunned"},
+ {ID: "backend_%s_status_OFFLINE_SOFT", Name: "offline_soft"},
+ {ID: "backend_%s_status_OFFLINE_HARD", Name: "offline_hard"},
+ },
+ }
+ backendConnectionsUsageChartTmpl = module.Chart{
+ ID: "backend_%s_connections_usage",
+ Title: "Backend connections usage",
+ Units: "connections",
+ Fam: "backend conns usage",
+ Ctx: "proxysql.backend_connections_usage",
+ Type: module.Stacked,
+ Priority: prioBackendConnectionsUsage,
+ Dims: module.Dims{
+ {ID: "backend_%s_ConnFree", Name: "free"},
+ {ID: "backend_%s_ConnUsed", Name: "used"},
+ },
+ }
+ backendConnectionsRateChartTmpl = module.Chart{
+ ID: "backend_%s_connections_rate",
+ Title: "Backend connections established",
+ Units: "connections/s",
+ Fam: "backend conns established",
+ Ctx: "proxysql.backend_connections_rate",
+ Priority: prioBackendConnectionsRate,
+ Dims: module.Dims{
+ {ID: "backend_%s_ConnOK", Name: "succeed", Algo: module.Incremental},
+ {ID: "backend_%s_ConnERR", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ backendQueriesRateRateChartTmpl = module.Chart{
+ ID: "backend_%s_queries_rate",
+ Title: "Backend queries",
+ Units: "queries/s",
+ Fam: "backend queries",
+ Ctx: "proxysql.backend_queries_rate",
+ Priority: prioBackendQueriesRateRate,
+ Dims: module.Dims{
+ {ID: "backend_%s_Queries", Name: "queries", Algo: module.Incremental},
+ },
+ }
+ backendTrafficChartTmpl = module.Chart{
+ ID: "backend_%s_traffic",
+ Title: "Backend traffic",
+ Units: "B/s",
+ Fam: "backend traffic",
+ Ctx: "proxysql.backend_traffic",
+ Priority: prioBackendTraffic,
+ Dims: module.Dims{
+ {ID: "backend_%s_Bytes_data_recv", Name: "recv", Algo: module.Incremental},
+ {ID: "backend_%s_Bytes_data_sent", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ backendLatencyChartTmpl = module.Chart{
+ ID: "backend_%s_latency",
+ Title: "Backend latency",
+ Units: "microseconds",
+ Fam: "backend latency",
+ Ctx: "proxysql.backend_latency",
+ Priority: prioBackendLatency,
+ Dims: module.Dims{
+ {ID: "backend_%s_Latency_us", Name: "latency"},
+ },
+ }
+)
+
+func newBackendCharts(hg, host, port string) *module.Charts {
+ charts := backendChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, backendID(hg, host, port))
+ chart.Labels = []module.Label{
+ {Key: "host", Value: host},
+ {Key: "port", Value: port},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, backendID(hg, host, port))
+ }
+ }
+
+ return charts
+}
+
+func (p *ProxySQL) addBackendCharts(hg, host, port string) {
+ charts := newBackendCharts(hg, host, port)
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *ProxySQL) removeBackendCharts(hg, host, port string) {
+ prefix := "backend_" + backendID(hg, host, port)
+
+ for _, chart := range *p.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/proxysql/collect.go b/src/go/plugin/go.d/modules/proxysql/collect.go
new file mode 100644
index 000000000..dfc559a97
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/collect.go
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package proxysql
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ queryVersion = "select version();"
+ queryStatsMySQLGlobal = "SELECT * FROM stats_mysql_global;"
+ queryStatsMySQLMemoryMetrics = "SELECT * FROM stats_memory_metrics;"
+ queryStatsMySQLCommandsCounters = "SELECT * FROM stats_mysql_commands_counters;"
+ queryStatsMySQLUsers = "SELECT * FROM stats_mysql_users;"
+ queryStatsMySQLConnectionPool = "SELECT * FROM stats_mysql_connection_pool;"
+)
+
+func (p *ProxySQL) collect() (map[string]int64, error) {
+ if p.db == nil {
+ if err := p.openConnection(); err != nil {
+ return nil, err
+ }
+ }
+
+ p.once.Do(func() {
+ v, err := p.doQueryVersion()
+ if err != nil {
+ p.Warningf("error on querying version: %v", err)
+ } else {
+ p.Debugf("connected to ProxySQL version: %s", v)
+ }
+ })
+
+ p.cache.reset()
+
+ mx := make(map[string]int64)
+
+ if err := p.collectStatsMySQLGlobal(mx); err != nil {
+ return nil, fmt.Errorf("error on collecting mysql global status: %v", err)
+ }
+ if err := p.collectStatsMySQLMemoryMetrics(mx); err != nil {
+ return nil, fmt.Errorf("error on collecting memory metrics: %v", err)
+ }
+ if err := p.collectStatsMySQLCommandsCounters(mx); err != nil {
+ return nil, fmt.Errorf("error on collecting mysql command counters: %v", err)
+ }
+ if err := p.collectStatsMySQLUsers(mx); err != nil {
+ return nil, fmt.Errorf("error on collecting mysql users: %v", err)
+ }
+ if err := p.collectStatsMySQLConnectionPool(mx); err != nil {
+ return nil, fmt.Errorf("error on collecting mysql connection pool: %v", err)
+ }
+
+ p.updateCharts()
+
+ return mx, nil
+}
+
+func (p *ProxySQL) doQueryVersion() (string, error) {
+ q := queryVersion
+ p.Debugf("executing query: '%s'", q)
+
+ var v string
+ if err := p.doQueryRow(q, &v); err != nil {
+ return "", err
+ }
+
+ return v, nil
+}
+
+func (p *ProxySQL) collectStatsMySQLGlobal(mx map[string]int64) error {
+ // https://proxysql.com/documentation/stats-statistics/#stats_mysql_global
+ q := queryStatsMySQLGlobal
+ p.Debugf("executing query: '%s'", q)
+
+ var name string
+ return p.doQuery(q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "Variable_Name":
+ name = value
+ case "Variable_Value":
+ mx[name] = parseInt(value)
+ }
+ })
+}
+
+func (p *ProxySQL) collectStatsMySQLMemoryMetrics(mx map[string]int64) error {
+ // https://proxysql.com/documentation/stats-statistics/#stats_mysql_memory_metrics
+ q := queryStatsMySQLMemoryMetrics
+ p.Debugf("executing query: '%s'", q)
+
+ var name string
+ return p.doQuery(q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "Variable_Name":
+ name = value
+ case "Variable_Value":
+ mx[name] = parseInt(value)
+ }
+ })
+}
+
+func (p *ProxySQL) collectStatsMySQLCommandsCounters(mx map[string]int64) error {
+ // https://proxysql.com/documentation/stats-statistics/#stats_mysql_commands_counters
+ q := queryStatsMySQLCommandsCounters
+ p.Debugf("executing query: '%s'", q)
+
+ var command string
+ return p.doQuery(q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "Command":
+ command = value
+ p.cache.getCommand(command).updated = true
+ default:
+ mx["mysql_command_"+command+"_"+column] = parseInt(value)
+ }
+ })
+}
+
+func (p *ProxySQL) collectStatsMySQLUsers(mx map[string]int64) error {
+ // https://proxysql.com/documentation/stats-statistics/#stats_mysql_users
+ q := queryStatsMySQLUsers
+ p.Debugf("executing query: '%s'", q)
+
+ var user string
+ var used int64
+ return p.doQuery(q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "username":
+ user = value
+ p.cache.getUser(user).updated = true
+ case "frontend_connections":
+ used = parseInt(value)
+ mx["mysql_user_"+user+"_"+column] = used
+ case "frontend_max_connections":
+ mx["mysql_user_"+user+"_frontend_connections_utilization"] = calcPercentage(used, parseInt(value))
+ }
+ })
+}
+
+func (p *ProxySQL) collectStatsMySQLConnectionPool(mx map[string]int64) error {
+ // https://proxysql.com/documentation/stats-statistics/#stats_mysql_connection_pool
+ q := queryStatsMySQLConnectionPool
+ p.Debugf("executing query: '%s'", q)
+
+ var hg, host, port string
+ var px string
+ return p.doQuery(q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "hg", "hostgroup":
+ hg = value
+ case "srv_host":
+ host = value
+ case "srv_port":
+ port = value
+ p.cache.getBackend(hg, host, port).updated = true
+ px = "backend_" + backendID(hg, host, port) + "_"
+ case "status":
+ mx[px+"status_ONLINE"] = boolToInt(value == "1")
+ mx[px+"status_SHUNNED"] = boolToInt(value == "2")
+ mx[px+"status_OFFLINE_SOFT"] = boolToInt(value == "3")
+ mx[px+"status_OFFLINE_HARD"] = boolToInt(value == "4")
+ default:
+ mx[px+column] = parseInt(value)
+ }
+ })
+}
+
+func (p *ProxySQL) updateCharts() {
+ for k, m := range p.cache.commands {
+ if !m.updated {
+ delete(p.cache.commands, k)
+ p.removeMySQLCommandCountersCharts(m.command)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addMySQLCommandCountersCharts(m.command)
+ }
+ }
+ for k, m := range p.cache.users {
+ if !m.updated {
+ delete(p.cache.users, k)
+ p.removeMySQLUserCharts(m.user)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addMySQLUsersCharts(m.user)
+ }
+ }
+ for k, m := range p.cache.backends {
+ if !m.updated {
+ delete(p.cache.backends, k)
+ p.removeBackendCharts(m.hg, m.host, m.port)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addBackendCharts(m.hg, m.host, m.port)
+ }
+ }
+}
+
+func (p *ProxySQL) openConnection() error {
+ db, err := sql.Open("mysql", p.DSN)
+ if err != nil {
+ return fmt.Errorf("error on opening a connection with the proxysql instance [%s]: %v", p.DSN, err)
+ }
+
+ db.SetConnMaxLifetime(10 * time.Minute)
+
+ if err := db.Ping(); err != nil {
+ _ = db.Close()
+ return fmt.Errorf("error on pinging the proxysql instance [%s]: %v", p.DSN, err)
+ }
+
+ p.db = db
+ return nil
+}
+
+func (p *ProxySQL) doQueryRow(query string, v any) error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ return p.db.QueryRowContext(ctx, query).Scan(v)
+}
+
+func (p *ProxySQL) doQuery(query string, assign func(column, value string, rowEnd bool)) error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ rows, err := p.db.QueryContext(ctx, query)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = rows.Close() }()
+
+ return readRows(rows, assign)
+}
+
+func readRows(rows *sql.Rows, assign func(column, value string, rowEnd bool)) error {
+ columns, err := rows.Columns()
+ if err != nil {
+ return err
+ }
+
+ values := makeValues(len(columns))
+
+ for rows.Next() {
+ if err := rows.Scan(values...); err != nil {
+ return err
+ }
+ for i, l := 0, len(values); i < l; i++ {
+ assign(columns[i], valueToString(values[i]), i == l-1)
+ }
+ }
+ return rows.Err()
+}
+
+func valueToString(value any) string {
+ v, ok := value.(*sql.NullString)
+ if !ok || !v.Valid {
+ return ""
+ }
+ return v.String
+}
+
+func makeValues(size int) []any {
+ vs := make([]any, size)
+ for i := range vs {
+ vs[i] = &sql.NullString{}
+ }
+ return vs
+}
+
+func parseInt(value string) int64 {
+ v, _ := strconv.ParseInt(value, 10, 64)
+ return v
+}
+
+func calcPercentage(value, total int64) (v int64) {
+ if total == 0 {
+ return 0
+ }
+ if v = value * 100 / total; v < 0 {
+ v = -v
+ }
+ return v
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
+
+func backendID(hg, host, port string) string {
+ hg = strings.ReplaceAll(strings.ToLower(hg), " ", "_")
+ host = strings.ReplaceAll(host, ".", "_")
+ return hg + "_" + host + "_" + port
+}
diff --git a/src/go/plugin/go.d/modules/proxysql/config_schema.json b/src/go/plugin/go.d/modules/proxysql/config_schema.json
new file mode 100644
index 000000000..c0c880a2e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/config_schema.json
@@ -0,0 +1,47 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ProxySQL collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "dsn": {
+ "title": "DSN",
+ "description": "ProxySQL server [Data Source Name (DSN)](https://github.com/go-sql-driver/mysql#dsn-data-source-name) specifying the connection details.",
+ "type": "string",
+ "default": "stats:stats@tcp(127.0.0.1:6032)/"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for queries, in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ },
+ "required": [
+ "dsn"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "dsn": {
+ "ui:placeholder": "username:password@protocol(address)/dbname"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/proxysql/integrations/proxysql.md b/src/go/plugin/go.d/modules/proxysql/integrations/proxysql.md
new file mode 100644
index 000000000..90d42114e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/integrations/proxysql.md
@@ -0,0 +1,309 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/proxysql/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/proxysql/metadata.yaml"
+sidebar_label: "ProxySQL"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ProxySQL
+
+
+<img src="https://netdata.cloud/img/proxysql.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: proxysql
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors ProxySQL servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per ProxySQL instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| proxysql.client_connections_count | connected, non_idle, hostgroup_locked | connections |
+| proxysql.client_connections_rate | created, aborted | connections/s |
+| proxysql.server_connections_count | connected | connections |
+| proxysql.server_connections_rate | created, aborted, delayed | connections/s |
+| proxysql.backends_traffic | recv, sent | B/s |
+| proxysql.clients_traffic | recv, sent | B/s |
+| proxysql.active_transactions_count | client | connections |
+| proxysql.questions_rate | questions | questions/s |
+| proxysql.slow_queries_rate | slow | queries/s |
+| proxysql.queries_rate | autocommit, autocommit_filtered, commit_filtered, rollback, rollback_filtered, backend_change_user, backend_init_db, backend_set_names, frontend_init_db, frontend_set_names, frontend_use_db | queries/s |
+| proxysql.backend_statements_count | total, unique | statements |
+| proxysql.backend_statements_rate | prepare, execute, close | statements/s |
+| proxysql.client_statements_count | total, unique | statements |
+| proxysql.client_statements_rate | prepare, execute, close | statements/s |
+| proxysql.cached_statements_count | cached | statements |
+| proxysql.query_cache_entries_count | entries | entries |
+| proxysql.query_cache_memory_used | used | B |
+| proxysql.query_cache_io | in, out | B/s |
+| proxysql.query_cache_requests_rate | read, write, read_success | requests/s |
+| proxysql.mysql_monitor_workers_count | workers, auxiliary | threads |
+| proxysql.mysql_monitor_workers_rate | started | workers/s |
+| proxysql.mysql_monitor_connect_checks_rate | succeed, failed | checks/s |
+| proxysql.mysql_monitor_ping_checks_rate | succeed, failed | checks/s |
+| proxysql.mysql_monitor_read_only_checks_rate | succeed, failed | checks/s |
+| proxysql.mysql_monitor_replication_lag_checks_rate | succeed, failed | checks/s |
+| proxysql.jemalloc_memory_used | active, allocated, mapped, metadata, resident, retained | B |
+| proxysql.memory_used | auth, sqlite3, query_digest, query_rules, firewall_users_table, firewall_users_config, firewall_rules_table, firewall_rules_config, mysql_threads, admin_threads, cluster_threads | B |
+| proxysql.uptime | uptime | seconds |
+
+### Per command
+
+These metrics refer to the SQL command.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| command | SQL command. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| proxysql.mysql_command_execution_rate | uptime | seconds |
+| proxysql.mysql_command_execution_time | time | microseconds |
+| proxysql.mysql_command_execution_duration | 100us, 500us, 1ms, 5ms, 10ms, 50ms, 100ms, 500ms, 1s, 5s, 10s, +Inf | microseconds |
+
+### Per user
+
+These metrics refer to the user.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| user | username from the mysql_users table |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| proxysql.mysql_user_connections_utilization | used | percentage |
+| proxysql.mysql_user_connections_count | used | connections |
+
+### Per backend
+
+These metrics refer to the backend server.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| host | backend server host |
+| port | backend server port |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| proxysql.backend_status | online, shunned, offline_soft, offline_hard | status |
+| proxysql.backend_connections_usage | free, used | connections |
+| proxysql.backend_connections_rate | succeed, failed | connections/s |
+| proxysql.backend_queries_rate | queries | queries/s |
+| proxysql.backend_traffic | recv, send | B/s |
+| proxysql.backend_latency | latency | microseconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/proxysql.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/proxysql.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| dsn | Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name). | stats:stats@tcp(127.0.0.1:6032)/ | yes |
+| timeout | Query timeout in seconds. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: stats:stats@tcp(127.0.0.1:6032)/
+
+```
+</details>
+
+##### my.cnf
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ my.cnf: '/etc/my.cnf'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: stats:stats@tcp(127.0.0.1:6032)/
+
+ - name: remote
+ dsn: stats:stats@tcp(203.0.113.0:6032)/
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `proxysql` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m proxysql
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `proxysql` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep proxysql
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep proxysql /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep proxysql
+```
+
+
diff --git a/src/go/plugin/go.d/modules/proxysql/metadata.yaml b/src/go/plugin/go.d/modules/proxysql/metadata.yaml
new file mode 100644
index 000000000..2c9562d99
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/metadata.yaml
@@ -0,0 +1,430 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-proxysql
+ plugin_name: go.d.plugin
+ module_name: proxysql
+ monitored_instance:
+ name: ProxySQL
+ link: https://www.proxysql.com/
+ icon_filename: proxysql.png
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - proxysql
+ - databases
+ - sql
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors ProxySQL servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/proxysql.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: dsn
+ description: Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name).
+ default_value: stats:stats@tcp(127.0.0.1:6032)/
+ required: true
+ - name: timeout
+ description: Query timeout in seconds.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: TCP socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ dsn: stats:stats@tcp(127.0.0.1:6032)/
+ - name: my.cnf
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ my.cnf: '/etc/my.cnf'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ dsn: stats:stats@tcp(127.0.0.1:6032)/
+
+ - name: remote
+ dsn: stats:stats@tcp(203.0.113.0:6032)/
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: proxysql.client_connections_count
+ description: Client connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: connected
+ - name: non_idle
+ - name: hostgroup_locked
+ - name: proxysql.client_connections_rate
+ description: Client connections rate
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: aborted
+ - name: proxysql.server_connections_count
+ description: Server connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: connected
+ - name: proxysql.server_connections_rate
+ description: Server connections rate
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: aborted
+ - name: delayed
+ - name: proxysql.backends_traffic
+ description: Backends traffic
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: recv
+ - name: sent
+ - name: proxysql.clients_traffic
+ description: Clients traffic
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: recv
+ - name: sent
+ - name: proxysql.active_transactions_count
+ description: Client connections that are currently processing a transaction
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: client
+ - name: proxysql.questions_rate
+ description: Client requests / statements executed
+ unit: questions/s
+ chart_type: line
+ dimensions:
+ - name: questions
+ - name: proxysql.slow_queries_rate
+ description: Slow queries
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: slow
+ - name: proxysql.queries_rate
+ description: Queries rate
+ unit: queries/s
+ chart_type: stacked
+ dimensions:
+ - name: autocommit
+ - name: autocommit_filtered
+ - name: commit_filtered
+ - name: rollback
+ - name: rollback_filtered
+ - name: backend_change_user
+ - name: backend_init_db
+ - name: backend_set_names
+ - name: frontend_init_db
+ - name: frontend_set_names
+ - name: frontend_use_db
+ - name: proxysql.backend_statements_count
+ description: Statements available across all backend connections
+ unit: statements
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: unique
+ - name: proxysql.backend_statements_rate
+ description: Statements executed against the backends
+ unit: statements/s
+ chart_type: stacked
+ dimensions:
+ - name: prepare
+ - name: execute
+ - name: close
+ - name: proxysql.client_statements_count
+ description: Statements that are in use by clients
+ unit: statements
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: unique
+ - name: proxysql.client_statements_rate
+ description: Statements executed by clients
+ unit: statements/s
+ chart_type: stacked
+ dimensions:
+ - name: prepare
+ - name: execute
+ - name: close
+ - name: proxysql.cached_statements_count
+ description: Global prepared statements
+ unit: statements
+ chart_type: line
+ dimensions:
+ - name: cached
+ - name: proxysql.query_cache_entries_count
+ description: Query Cache entries
+ unit: entries
+ chart_type: line
+ dimensions:
+ - name: entries
+ - name: proxysql.query_cache_memory_used
+ description: Query Cache memory used
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: proxysql.query_cache_io
+ description: Query Cache I/O
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: out
+ - name: proxysql.query_cache_requests_rate
+ description: Query Cache requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: read_success
+ - name: proxysql.mysql_monitor_workers_count
+ description: MySQL monitor workers
+ unit: threads
+ chart_type: line
+ dimensions:
+ - name: workers
+ - name: auxiliary
+ - name: proxysql.mysql_monitor_workers_rate
+ description: MySQL monitor workers rate
+ unit: workers/s
+ chart_type: line
+ dimensions:
+ - name: started
+ - name: proxysql.mysql_monitor_connect_checks_rate
+ description: MySQL monitor connect checks
+ unit: checks/s
+ chart_type: line
+ dimensions:
+ - name: succeed
+ - name: failed
+ - name: proxysql.mysql_monitor_ping_checks_rate
+ description: MySQL monitor ping checks
+ unit: checks/s
+ chart_type: line
+ dimensions:
+ - name: succeed
+ - name: failed
+ - name: proxysql.mysql_monitor_read_only_checks_rate
+ description: MySQL monitor read only checks
+ unit: checks/s
+ chart_type: line
+ dimensions:
+ - name: succeed
+ - name: failed
+ - name: proxysql.mysql_monitor_replication_lag_checks_rate
+ description: MySQL monitor replication lag checks
+ unit: checks/s
+ chart_type: line
+ dimensions:
+ - name: succeed
+ - name: failed
+ - name: proxysql.jemalloc_memory_used
+ description: Jemalloc used memory
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: active
+ - name: allocated
+ - name: mapped
+ - name: metadata
+ - name: resident
+ - name: retained
+ - name: proxysql.memory_used
+ description: Memory used
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: auth
+ - name: sqlite3
+ - name: query_digest
+ - name: query_rules
+ - name: firewall_users_table
+ - name: firewall_users_config
+ - name: firewall_rules_table
+ - name: firewall_rules_config
+ - name: mysql_threads
+ - name: admin_threads
+ - name: cluster_threads
+ - name: proxysql.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: command
+ description: These metrics refer to the SQL command.
+ labels:
+ - name: command
+ description: SQL command.
+ metrics:
+ - name: proxysql.mysql_command_execution_rate
+ description: MySQL command execution
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: proxysql.mysql_command_execution_time
+ description: MySQL command execution time
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: proxysql.mysql_command_execution_duration
+ description: MySQL command execution duration histogram
+ unit: microseconds
+ chart_type: stacked
+ dimensions:
+ - name: 100us
+ - name: 500us
+ - name: 1ms
+ - name: 5ms
+ - name: 10ms
+ - name: 50ms
+ - name: 100ms
+ - name: 500ms
+ - name: 1s
+ - name: 5s
+ - name: 10s
+ - name: +Inf
+ - name: user
+ description: These metrics refer to the user.
+ labels:
+ - name: user
+ description: username from the mysql_users table
+ metrics:
+ - name: proxysql.mysql_user_connections_utilization
+ description: MySQL user connections utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: proxysql.mysql_user_connections_count
+ description: MySQL user connections used
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: backend
+ description: These metrics refer to the backend server.
+ labels:
+ - name: host
+ description: backend server host
+ - name: port
+ description: backend server port
+ metrics:
+ - name: proxysql.backend_status
+ description: Backend status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: online
+ - name: shunned
+ - name: offline_soft
+ - name: offline_hard
+ - name: proxysql.backend_connections_usage
+ description: Backend connections usage
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: free
+ - name: used
+ - name: proxysql.backend_connections_rate
+ description: Backend connections established
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: succeed
+ - name: failed
+ - name: proxysql.backend_queries_rate
+ description: Backend queries
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: proxysql.backend_traffic
+ description: Backend traffic
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: recv
+ - name: send
+ - name: proxysql.backend_latency
+ description: Backend latency
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: latency
diff --git a/src/go/plugin/go.d/modules/proxysql/proxysql.go b/src/go/plugin/go.d/modules/proxysql/proxysql.go
new file mode 100644
index 000000000..fc4677b1d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/proxysql.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package proxysql
+
+import (
+ "database/sql"
+ _ "embed"
+ "errors"
+ _ "github.com/go-sql-driver/mysql"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("proxysql", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *ProxySQL {
+ return &ProxySQL{
+ Config: Config{
+ DSN: "stats:stats@tcp(127.0.0.1:6032)/",
+ Timeout: web.Duration(time.Second),
+ },
+
+ charts: baseCharts.Copy(),
+ once: &sync.Once{},
+ cache: &cache{
+ commands: make(map[string]*commandCache),
+ users: make(map[string]*userCache),
+ backends: make(map[string]*backendCache),
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ DSN string `yaml:"dsn" json:"dsn"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type ProxySQL struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ db *sql.DB
+
+ once *sync.Once
+ cache *cache
+}
+
+func (p *ProxySQL) Configuration() any {
+ return p.Config
+}
+
+func (p *ProxySQL) Init() error {
+ if p.DSN == "" {
+ p.Error("dsn not set")
+ return errors.New("dsn not set")
+ }
+
+ p.Debugf("using DSN [%s]", p.DSN)
+
+ return nil
+}
+
+func (p *ProxySQL) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (p *ProxySQL) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *ProxySQL) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (p *ProxySQL) Cleanup() {
+ if p.db == nil {
+ return
+ }
+ if err := p.db.Close(); err != nil {
+ p.Errorf("cleanup: error on closing the ProxySQL instance [%s]: %v", p.DSN, err)
+ }
+ p.db = nil
+}
diff --git a/src/go/plugin/go.d/modules/proxysql/proxysql_test.go b/src/go/plugin/go.d/modules/proxysql/proxysql_test.go
new file mode 100644
index 000000000..860e9032f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/proxysql_test.go
@@ -0,0 +1,1240 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package proxysql
+
+import (
+ "bufio"
+ "bytes"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer2010Version, _ = os.ReadFile("testdata/v2.0.10/version.txt")
+ dataVer2010StatsMySQLGlobal, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_global.txt")
+ dataVer2010StatsMemoryMetrics, _ = os.ReadFile("testdata/v2.0.10/stats_memory_metrics.txt")
+ dataVer2010StatsMySQLCommandsCounters, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_commands_counters.txt")
+ dataVer2010StatsMySQLUsers, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_users.txt")
+ dataVer2010StatsMySQLConnectionPool, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_connection_pool .txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer2010Version": dataVer2010Version,
+ "dataVer2010StatsMySQLGlobal": dataVer2010StatsMySQLGlobal,
+ "dataVer2010StatsMemoryMetrics": dataVer2010StatsMemoryMetrics,
+ "dataVer2010StatsMySQLCommandsCounters": dataVer2010StatsMySQLCommandsCounters,
+ "dataVer2010StatsMySQLUsers": dataVer2010StatsMySQLUsers,
+ "dataVer2010StatsMySQLConnectionPool": dataVer2010StatsMySQLConnectionPool,
+ } {
+ require.NotNil(t, data, name)
+ _, err := prepareMockRows(data)
+ require.NoError(t, err, name)
+ }
+}
+
+func TestProxySQL_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &ProxySQL{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestProxySQL_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "empty DSN": {
+ wantFail: true,
+ config: Config{DSN: ""},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ proxySQL := New()
+ proxySQL.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, proxySQL.Init())
+ } else {
+ assert.NoError(t, proxySQL.Init())
+ }
+ })
+ }
+}
+
+func TestProxySQL_Cleanup(t *testing.T) {
+ tests := map[string]func(t *testing.T) (proxySQL *ProxySQL, cleanup func()){
+ "db connection not initialized": func(t *testing.T) (proxySQL *ProxySQL, cleanup func()) {
+ return New(), func() {}
+ },
+ "db connection initialized": func(t *testing.T) (proxySQL *ProxySQL, cleanup func()) {
+ db, mock, err := sqlmock.New()
+ require.NoError(t, err)
+
+ mock.ExpectClose()
+ proxySQL = New()
+ proxySQL.db = db
+ cleanup = func() { _ = db.Close() }
+
+ return proxySQL, cleanup
+ },
+ }
+
+ for name, prepare := range tests {
+ t.Run(name, func(t *testing.T) {
+ proxySQL, cleanup := prepare(t)
+ defer cleanup()
+
+ assert.NotPanics(t, proxySQL.Cleanup)
+ assert.Nil(t, proxySQL.db)
+ })
+ }
+}
+
+func TestProxySQL_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestProxySQL_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func(t *testing.T, m sqlmock.Sqlmock)
+ wantFail bool
+ }{
+ "success on all queries": {
+ wantFail: false,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
+ mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataVer2010StatsMemoryMetrics)
+ mockExpect(t, m, queryStatsMySQLCommandsCounters, dataVer2010StatsMySQLCommandsCounters)
+ mockExpect(t, m, queryStatsMySQLUsers, dataVer2010StatsMySQLUsers)
+ mockExpect(t, m, queryStatsMySQLConnectionPool, dataVer2010StatsMySQLConnectionPool)
+ },
+ },
+ "fails when error on querying global stats": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpectErr(m, queryStatsMySQLGlobal)
+ },
+ },
+ "fails when error on querying memory metrics": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
+ mockExpectErr(m, queryStatsMySQLMemoryMetrics)
+ },
+ },
+ "fails when error on querying mysql command counters": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
+ mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataVer2010StatsMemoryMetrics)
+ mockExpectErr(m, queryStatsMySQLCommandsCounters)
+ },
+ },
+ "fails when error on querying mysql users": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
+ mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataVer2010StatsMemoryMetrics)
+ mockExpect(t, m, queryStatsMySQLCommandsCounters, dataVer2010StatsMySQLCommandsCounters)
+ mockExpectErr(m, queryStatsMySQLUsers)
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ proxySQL := New()
+ proxySQL.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, proxySQL.Init())
+
+ test.prepareMock(t, mock)
+
+ if test.wantFail {
+ assert.Error(t, proxySQL.Check())
+ } else {
+ assert.NoError(t, proxySQL.Check())
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func TestProxySQL_Collect(t *testing.T) {
+ type testCaseStep struct {
+ prepareMock func(t *testing.T, m sqlmock.Sqlmock)
+ check func(t *testing.T, my *ProxySQL)
+ }
+ tests := map[string][]testCaseStep{
+
+ "success on all queries (v2.0.10)": {
+ {
+ prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
+ mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataVer2010StatsMemoryMetrics)
+ mockExpect(t, m, queryStatsMySQLCommandsCounters, dataVer2010StatsMySQLCommandsCounters)
+ mockExpect(t, m, queryStatsMySQLUsers, dataVer2010StatsMySQLUsers)
+ mockExpect(t, m, queryStatsMySQLConnectionPool, dataVer2010StatsMySQLConnectionPool)
+ },
+ check: func(t *testing.T, my *ProxySQL) {
+ mx := my.Collect()
+
+ expected := map[string]int64{
+ "Access_Denied_Max_Connections": 0,
+ "Access_Denied_Max_User_Connections": 0,
+ "Access_Denied_Wrong_Password": 2,
+ "Active_Transactions": 0,
+ "Auth_memory": 1044,
+ "Backend_query_time_nsec": 0,
+ "Client_Connections_aborted": 2,
+ "Client_Connections_connected": 3,
+ "Client_Connections_created": 5458991,
+ "Client_Connections_hostgroup_locked": 0,
+ "Client_Connections_non_idle": 3,
+ "Com_autocommit": 0,
+ "Com_autocommit_filtered": 0,
+ "Com_backend_change_user": 188694,
+ "Com_backend_init_db": 0,
+ "Com_backend_set_names": 1517893,
+ "Com_backend_stmt_close": 0,
+ "Com_backend_stmt_execute": 36303146,
+ "Com_backend_stmt_prepare": 16858208,
+ "Com_commit": 0,
+ "Com_commit_filtered": 0,
+ "Com_frontend_init_db": 2,
+ "Com_frontend_set_names": 0,
+ "Com_frontend_stmt_close": 32137933,
+ "Com_frontend_stmt_execute": 36314138,
+ "Com_frontend_stmt_prepare": 32185987,
+ "Com_frontend_use_db": 0,
+ "Com_rollback": 0,
+ "Com_rollback_filtered": 0,
+ "ConnPool_get_conn_failure": 212943,
+ "ConnPool_get_conn_immediate": 13361,
+ "ConnPool_get_conn_latency_awareness": 0,
+ "ConnPool_get_conn_success": 36319474,
+ "ConnPool_memory_bytes": 932248,
+ "GTID_consistent_queries": 0,
+ "GTID_session_collected": 0,
+ "Mirror_concurrency": 0,
+ "Mirror_queue_length": 0,
+ "MyHGM_myconnpoll_destroy": 15150,
+ "MyHGM_myconnpoll_get": 36519056,
+ "MyHGM_myconnpoll_get_ok": 36306113,
+ "MyHGM_myconnpoll_push": 37358734,
+ "MyHGM_myconnpoll_reset": 2,
+ "MySQL_Monitor_Workers": 10,
+ "MySQL_Monitor_Workers_Aux": 0,
+ "MySQL_Monitor_Workers_Started": 10,
+ "MySQL_Monitor_connect_check_ERR": 130,
+ "MySQL_Monitor_connect_check_OK": 3548306,
+ "MySQL_Monitor_ping_check_ERR": 108271,
+ "MySQL_Monitor_ping_check_OK": 21289849,
+ "MySQL_Monitor_read_only_check_ERR": 19610,
+ "MySQL_Monitor_read_only_check_OK": 106246409,
+ "MySQL_Monitor_replication_lag_check_ERR": 482,
+ "MySQL_Monitor_replication_lag_check_OK": 28702388,
+ "MySQL_Thread_Workers": 4,
+ "ProxySQL_Uptime": 26748286,
+ "Queries_backends_bytes_recv": 5896210168,
+ "Queries_backends_bytes_sent": 4329581500,
+ "Queries_frontends_bytes_recv": 7434816962,
+ "Queries_frontends_bytes_sent": 11643634097,
+ "Query_Cache_Entries": 0,
+ "Query_Cache_Memory_bytes": 0,
+ "Query_Cache_Purged": 0,
+ "Query_Cache_bytes_IN": 0,
+ "Query_Cache_bytes_OUT": 0,
+ "Query_Cache_count_GET": 0,
+ "Query_Cache_count_GET_OK": 0,
+ "Query_Cache_count_SET": 0,
+ "Query_Processor_time_nsec": 0,
+ "Questions": 100638067,
+ "SQLite3_memory_bytes": 6017144,
+ "Selects_for_update__autocommit0": 0,
+ "Server_Connections_aborted": 9979,
+ "Server_Connections_connected": 13,
+ "Server_Connections_created": 2122254,
+ "Server_Connections_delayed": 0,
+ "Servers_table_version": 37,
+ "Slow_queries": 405818,
+ "Stmt_Cached": 65,
+ "Stmt_Client_Active_Total": 18,
+ "Stmt_Client_Active_Unique": 18,
+ "Stmt_Max_Stmt_id": 66,
+ "Stmt_Server_Active_Total": 101,
+ "Stmt_Server_Active_Unique": 39,
+ "automatic_detected_sql_injection": 0,
+ "aws_aurora_replicas_skipped_during_query": 0,
+ "backend_10_back001-db-master_6001_Bytes_data_recv": 145193069937,
+ "backend_10_back001-db-master_6001_Bytes_data_sent": 9858463664,
+ "backend_10_back001-db-master_6001_ConnERR": 0,
+ "backend_10_back001-db-master_6001_ConnFree": 423,
+ "backend_10_back001-db-master_6001_ConnOK": 524,
+ "backend_10_back001-db-master_6001_ConnUsed": 69,
+ "backend_10_back001-db-master_6001_Latency_us": 17684,
+ "backend_10_back001-db-master_6001_Queries": 8970367,
+ "backend_10_back001-db-master_6001_status_OFFLINE_HARD": 0,
+ "backend_10_back001-db-master_6001_status_OFFLINE_SOFT": 0,
+ "backend_10_back001-db-master_6001_status_ONLINE": 0,
+ "backend_10_back001-db-master_6001_status_SHUNNED": 0,
+ "backend_11_back001-db-master_6002_Bytes_data_recv": 2903,
+ "backend_11_back001-db-master_6002_Bytes_data_sent": 187675,
+ "backend_11_back001-db-master_6002_ConnERR": 0,
+ "backend_11_back001-db-master_6002_ConnFree": 1,
+ "backend_11_back001-db-master_6002_ConnOK": 1,
+ "backend_11_back001-db-master_6002_ConnUsed": 0,
+ "backend_11_back001-db-master_6002_Latency_us": 17684,
+ "backend_11_back001-db-master_6002_Queries": 69,
+ "backend_11_back001-db-master_6002_status_OFFLINE_HARD": 0,
+ "backend_11_back001-db-master_6002_status_OFFLINE_SOFT": 0,
+ "backend_11_back001-db-master_6002_status_ONLINE": 0,
+ "backend_11_back001-db-master_6002_status_SHUNNED": 0,
+ "backend_11_back001-db-reader_6003_Bytes_data_recv": 4994101,
+ "backend_11_back001-db-reader_6003_Bytes_data_sent": 163690013,
+ "backend_11_back001-db-reader_6003_ConnERR": 0,
+ "backend_11_back001-db-reader_6003_ConnFree": 11,
+ "backend_11_back001-db-reader_6003_ConnOK": 11,
+ "backend_11_back001-db-reader_6003_ConnUsed": 0,
+ "backend_11_back001-db-reader_6003_Latency_us": 113,
+ "backend_11_back001-db-reader_6003_Queries": 63488,
+ "backend_11_back001-db-reader_6003_status_OFFLINE_HARD": 0,
+ "backend_11_back001-db-reader_6003_status_OFFLINE_SOFT": 0,
+ "backend_11_back001-db-reader_6003_status_ONLINE": 0,
+ "backend_11_back001-db-reader_6003_status_SHUNNED": 0,
+ "backend_20_back002-db-master_6004_Bytes_data_recv": 266034339,
+ "backend_20_back002-db-master_6004_Bytes_data_sent": 1086994186,
+ "backend_20_back002-db-master_6004_ConnERR": 2,
+ "backend_20_back002-db-master_6004_ConnFree": 188,
+ "backend_20_back002-db-master_6004_ConnOK": 197,
+ "backend_20_back002-db-master_6004_ConnUsed": 9,
+ "backend_20_back002-db-master_6004_Latency_us": 101981,
+ "backend_20_back002-db-master_6004_Queries": 849461,
+ "backend_20_back002-db-master_6004_status_OFFLINE_HARD": 0,
+ "backend_20_back002-db-master_6004_status_OFFLINE_SOFT": 0,
+ "backend_20_back002-db-master_6004_status_ONLINE": 0,
+ "backend_20_back002-db-master_6004_status_SHUNNED": 0,
+ "backend_21_back002-db-reader_6005_Bytes_data_recv": 984,
+ "backend_21_back002-db-reader_6005_Bytes_data_sent": 6992,
+ "backend_21_back002-db-reader_6005_ConnERR": 0,
+ "backend_21_back002-db-reader_6005_ConnFree": 1,
+ "backend_21_back002-db-reader_6005_ConnOK": 1,
+ "backend_21_back002-db-reader_6005_ConnUsed": 0,
+ "backend_21_back002-db-reader_6005_Latency_us": 230,
+ "backend_21_back002-db-reader_6005_Queries": 8,
+ "backend_21_back002-db-reader_6005_status_OFFLINE_HARD": 0,
+ "backend_21_back002-db-reader_6005_status_OFFLINE_SOFT": 0,
+ "backend_21_back002-db-reader_6005_status_ONLINE": 0,
+ "backend_21_back002-db-reader_6005_status_SHUNNED": 0,
+ "backend_31_back003-db-master_6006_Bytes_data_recv": 81438709,
+ "backend_31_back003-db-master_6006_Bytes_data_sent": 712803,
+ "backend_31_back003-db-master_6006_ConnERR": 0,
+ "backend_31_back003-db-master_6006_ConnFree": 3,
+ "backend_31_back003-db-master_6006_ConnOK": 3,
+ "backend_31_back003-db-master_6006_ConnUsed": 0,
+ "backend_31_back003-db-master_6006_Latency_us": 231,
+ "backend_31_back003-db-master_6006_Queries": 3276,
+ "backend_31_back003-db-master_6006_status_OFFLINE_HARD": 0,
+ "backend_31_back003-db-master_6006_status_OFFLINE_SOFT": 0,
+ "backend_31_back003-db-master_6006_status_ONLINE": 0,
+ "backend_31_back003-db-master_6006_status_SHUNNED": 0,
+ "backend_31_back003-db-reader_6007_Bytes_data_recv": 115810708275,
+ "backend_31_back003-db-reader_6007_Bytes_data_sent": 411900849,
+ "backend_31_back003-db-reader_6007_ConnERR": 0,
+ "backend_31_back003-db-reader_6007_ConnFree": 70,
+ "backend_31_back003-db-reader_6007_ConnOK": 71,
+ "backend_31_back003-db-reader_6007_ConnUsed": 1,
+ "backend_31_back003-db-reader_6007_Latency_us": 230,
+ "backend_31_back003-db-reader_6007_Queries": 2356904,
+ "backend_31_back003-db-reader_6007_status_OFFLINE_HARD": 0,
+ "backend_31_back003-db-reader_6007_status_OFFLINE_SOFT": 0,
+ "backend_31_back003-db-reader_6007_status_ONLINE": 0,
+ "backend_31_back003-db-reader_6007_status_SHUNNED": 0,
+ "backend_lagging_during_query": 8880,
+ "backend_offline_during_query": 8,
+ "generated_error_packets": 231,
+ "hostgroup_locked_queries": 0,
+ "hostgroup_locked_set_cmds": 0,
+ "jemalloc_active": 385101824,
+ "jemalloc_allocated": 379402432,
+ "jemalloc_mapped": 430993408,
+ "jemalloc_metadata": 17418872,
+ "jemalloc_resident": 403759104,
+ "jemalloc_retained": 260542464,
+ "max_connect_timeouts": 227,
+ "mysql_backend_buffers_bytes": 0,
+ "mysql_command_ALTER_TABLE_Total_Time_us": 0,
+ "mysql_command_ALTER_TABLE_Total_cnt": 0,
+ "mysql_command_ALTER_TABLE_cnt_100ms": 0,
+ "mysql_command_ALTER_TABLE_cnt_100us": 0,
+ "mysql_command_ALTER_TABLE_cnt_10ms": 0,
+ "mysql_command_ALTER_TABLE_cnt_10s": 0,
+ "mysql_command_ALTER_TABLE_cnt_1ms": 0,
+ "mysql_command_ALTER_TABLE_cnt_1s": 0,
+ "mysql_command_ALTER_TABLE_cnt_500ms": 0,
+ "mysql_command_ALTER_TABLE_cnt_500us": 0,
+ "mysql_command_ALTER_TABLE_cnt_50ms": 0,
+ "mysql_command_ALTER_TABLE_cnt_5ms": 0,
+ "mysql_command_ALTER_TABLE_cnt_5s": 0,
+ "mysql_command_ALTER_TABLE_cnt_INFs": 0,
+ "mysql_command_ALTER_VIEW_Total_Time_us": 0,
+ "mysql_command_ALTER_VIEW_Total_cnt": 0,
+ "mysql_command_ALTER_VIEW_cnt_100ms": 0,
+ "mysql_command_ALTER_VIEW_cnt_100us": 0,
+ "mysql_command_ALTER_VIEW_cnt_10ms": 0,
+ "mysql_command_ALTER_VIEW_cnt_10s": 0,
+ "mysql_command_ALTER_VIEW_cnt_1ms": 0,
+ "mysql_command_ALTER_VIEW_cnt_1s": 0,
+ "mysql_command_ALTER_VIEW_cnt_500ms": 0,
+ "mysql_command_ALTER_VIEW_cnt_500us": 0,
+ "mysql_command_ALTER_VIEW_cnt_50ms": 0,
+ "mysql_command_ALTER_VIEW_cnt_5ms": 0,
+ "mysql_command_ALTER_VIEW_cnt_5s": 0,
+ "mysql_command_ALTER_VIEW_cnt_INFs": 0,
+ "mysql_command_ANALYZE_TABLE_Total_Time_us": 0,
+ "mysql_command_ANALYZE_TABLE_Total_cnt": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_100ms": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_100us": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_10ms": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_10s": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_1ms": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_1s": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_500ms": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_500us": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_50ms": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_5ms": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_5s": 0,
+ "mysql_command_ANALYZE_TABLE_cnt_INFs": 0,
+ "mysql_command_BEGIN_Total_Time_us": 0,
+ "mysql_command_BEGIN_Total_cnt": 0,
+ "mysql_command_BEGIN_cnt_100ms": 0,
+ "mysql_command_BEGIN_cnt_100us": 0,
+ "mysql_command_BEGIN_cnt_10ms": 0,
+ "mysql_command_BEGIN_cnt_10s": 0,
+ "mysql_command_BEGIN_cnt_1ms": 0,
+ "mysql_command_BEGIN_cnt_1s": 0,
+ "mysql_command_BEGIN_cnt_500ms": 0,
+ "mysql_command_BEGIN_cnt_500us": 0,
+ "mysql_command_BEGIN_cnt_50ms": 0,
+ "mysql_command_BEGIN_cnt_5ms": 0,
+ "mysql_command_BEGIN_cnt_5s": 0,
+ "mysql_command_BEGIN_cnt_INFs": 0,
+ "mysql_command_CALL_Total_Time_us": 0,
+ "mysql_command_CALL_Total_cnt": 0,
+ "mysql_command_CALL_cnt_100ms": 0,
+ "mysql_command_CALL_cnt_100us": 0,
+ "mysql_command_CALL_cnt_10ms": 0,
+ "mysql_command_CALL_cnt_10s": 0,
+ "mysql_command_CALL_cnt_1ms": 0,
+ "mysql_command_CALL_cnt_1s": 0,
+ "mysql_command_CALL_cnt_500ms": 0,
+ "mysql_command_CALL_cnt_500us": 0,
+ "mysql_command_CALL_cnt_50ms": 0,
+ "mysql_command_CALL_cnt_5ms": 0,
+ "mysql_command_CALL_cnt_5s": 0,
+ "mysql_command_CALL_cnt_INFs": 0,
+ "mysql_command_CHANGE_MASTER_Total_Time_us": 0,
+ "mysql_command_CHANGE_MASTER_Total_cnt": 0,
+ "mysql_command_CHANGE_MASTER_cnt_100ms": 0,
+ "mysql_command_CHANGE_MASTER_cnt_100us": 0,
+ "mysql_command_CHANGE_MASTER_cnt_10ms": 0,
+ "mysql_command_CHANGE_MASTER_cnt_10s": 0,
+ "mysql_command_CHANGE_MASTER_cnt_1ms": 0,
+ "mysql_command_CHANGE_MASTER_cnt_1s": 0,
+ "mysql_command_CHANGE_MASTER_cnt_500ms": 0,
+ "mysql_command_CHANGE_MASTER_cnt_500us": 0,
+ "mysql_command_CHANGE_MASTER_cnt_50ms": 0,
+ "mysql_command_CHANGE_MASTER_cnt_5ms": 0,
+ "mysql_command_CHANGE_MASTER_cnt_5s": 0,
+ "mysql_command_CHANGE_MASTER_cnt_INFs": 0,
+ "mysql_command_COMMIT_Total_Time_us": 0,
+ "mysql_command_COMMIT_Total_cnt": 0,
+ "mysql_command_COMMIT_cnt_100ms": 0,
+ "mysql_command_COMMIT_cnt_100us": 0,
+ "mysql_command_COMMIT_cnt_10ms": 0,
+ "mysql_command_COMMIT_cnt_10s": 0,
+ "mysql_command_COMMIT_cnt_1ms": 0,
+ "mysql_command_COMMIT_cnt_1s": 0,
+ "mysql_command_COMMIT_cnt_500ms": 0,
+ "mysql_command_COMMIT_cnt_500us": 0,
+ "mysql_command_COMMIT_cnt_50ms": 0,
+ "mysql_command_COMMIT_cnt_5ms": 0,
+ "mysql_command_COMMIT_cnt_5s": 0,
+ "mysql_command_COMMIT_cnt_INFs": 0,
+ "mysql_command_CREATE_DATABASE_Total_Time_us": 0,
+ "mysql_command_CREATE_DATABASE_Total_cnt": 0,
+ "mysql_command_CREATE_DATABASE_cnt_100ms": 0,
+ "mysql_command_CREATE_DATABASE_cnt_100us": 0,
+ "mysql_command_CREATE_DATABASE_cnt_10ms": 0,
+ "mysql_command_CREATE_DATABASE_cnt_10s": 0,
+ "mysql_command_CREATE_DATABASE_cnt_1ms": 0,
+ "mysql_command_CREATE_DATABASE_cnt_1s": 0,
+ "mysql_command_CREATE_DATABASE_cnt_500ms": 0,
+ "mysql_command_CREATE_DATABASE_cnt_500us": 0,
+ "mysql_command_CREATE_DATABASE_cnt_50ms": 0,
+ "mysql_command_CREATE_DATABASE_cnt_5ms": 0,
+ "mysql_command_CREATE_DATABASE_cnt_5s": 0,
+ "mysql_command_CREATE_DATABASE_cnt_INFs": 0,
+ "mysql_command_CREATE_INDEX_Total_Time_us": 0,
+ "mysql_command_CREATE_INDEX_Total_cnt": 0,
+ "mysql_command_CREATE_INDEX_cnt_100ms": 0,
+ "mysql_command_CREATE_INDEX_cnt_100us": 0,
+ "mysql_command_CREATE_INDEX_cnt_10ms": 0,
+ "mysql_command_CREATE_INDEX_cnt_10s": 0,
+ "mysql_command_CREATE_INDEX_cnt_1ms": 0,
+ "mysql_command_CREATE_INDEX_cnt_1s": 0,
+ "mysql_command_CREATE_INDEX_cnt_500ms": 0,
+ "mysql_command_CREATE_INDEX_cnt_500us": 0,
+ "mysql_command_CREATE_INDEX_cnt_50ms": 0,
+ "mysql_command_CREATE_INDEX_cnt_5ms": 0,
+ "mysql_command_CREATE_INDEX_cnt_5s": 0,
+ "mysql_command_CREATE_INDEX_cnt_INFs": 0,
+ "mysql_command_CREATE_TABLE_Total_Time_us": 0,
+ "mysql_command_CREATE_TABLE_Total_cnt": 0,
+ "mysql_command_CREATE_TABLE_cnt_100ms": 0,
+ "mysql_command_CREATE_TABLE_cnt_100us": 0,
+ "mysql_command_CREATE_TABLE_cnt_10ms": 0,
+ "mysql_command_CREATE_TABLE_cnt_10s": 0,
+ "mysql_command_CREATE_TABLE_cnt_1ms": 0,
+ "mysql_command_CREATE_TABLE_cnt_1s": 0,
+ "mysql_command_CREATE_TABLE_cnt_500ms": 0,
+ "mysql_command_CREATE_TABLE_cnt_500us": 0,
+ "mysql_command_CREATE_TABLE_cnt_50ms": 0,
+ "mysql_command_CREATE_TABLE_cnt_5ms": 0,
+ "mysql_command_CREATE_TABLE_cnt_5s": 0,
+ "mysql_command_CREATE_TABLE_cnt_INFs": 0,
+ "mysql_command_CREATE_TEMPORARY_Total_Time_us": 0,
+ "mysql_command_CREATE_TEMPORARY_Total_cnt": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_100ms": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_100us": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_10ms": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_10s": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_1ms": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_1s": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_500ms": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_500us": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_50ms": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_5ms": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_5s": 0,
+ "mysql_command_CREATE_TEMPORARY_cnt_INFs": 0,
+ "mysql_command_CREATE_TRIGGER_Total_Time_us": 0,
+ "mysql_command_CREATE_TRIGGER_Total_cnt": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_100ms": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_100us": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_10ms": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_10s": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_1ms": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_1s": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_500ms": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_500us": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_50ms": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_5ms": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_5s": 0,
+ "mysql_command_CREATE_TRIGGER_cnt_INFs": 0,
+ "mysql_command_CREATE_USER_Total_Time_us": 0,
+ "mysql_command_CREATE_USER_Total_cnt": 0,
+ "mysql_command_CREATE_USER_cnt_100ms": 0,
+ "mysql_command_CREATE_USER_cnt_100us": 0,
+ "mysql_command_CREATE_USER_cnt_10ms": 0,
+ "mysql_command_CREATE_USER_cnt_10s": 0,
+ "mysql_command_CREATE_USER_cnt_1ms": 0,
+ "mysql_command_CREATE_USER_cnt_1s": 0,
+ "mysql_command_CREATE_USER_cnt_500ms": 0,
+ "mysql_command_CREATE_USER_cnt_500us": 0,
+ "mysql_command_CREATE_USER_cnt_50ms": 0,
+ "mysql_command_CREATE_USER_cnt_5ms": 0,
+ "mysql_command_CREATE_USER_cnt_5s": 0,
+ "mysql_command_CREATE_USER_cnt_INFs": 0,
+ "mysql_command_CREATE_VIEW_Total_Time_us": 0,
+ "mysql_command_CREATE_VIEW_Total_cnt": 0,
+ "mysql_command_CREATE_VIEW_cnt_100ms": 0,
+ "mysql_command_CREATE_VIEW_cnt_100us": 0,
+ "mysql_command_CREATE_VIEW_cnt_10ms": 0,
+ "mysql_command_CREATE_VIEW_cnt_10s": 0,
+ "mysql_command_CREATE_VIEW_cnt_1ms": 0,
+ "mysql_command_CREATE_VIEW_cnt_1s": 0,
+ "mysql_command_CREATE_VIEW_cnt_500ms": 0,
+ "mysql_command_CREATE_VIEW_cnt_500us": 0,
+ "mysql_command_CREATE_VIEW_cnt_50ms": 0,
+ "mysql_command_CREATE_VIEW_cnt_5ms": 0,
+ "mysql_command_CREATE_VIEW_cnt_5s": 0,
+ "mysql_command_CREATE_VIEW_cnt_INFs": 0,
+ "mysql_command_DEALLOCATE_Total_Time_us": 0,
+ "mysql_command_DEALLOCATE_Total_cnt": 0,
+ "mysql_command_DEALLOCATE_cnt_100ms": 0,
+ "mysql_command_DEALLOCATE_cnt_100us": 0,
+ "mysql_command_DEALLOCATE_cnt_10ms": 0,
+ "mysql_command_DEALLOCATE_cnt_10s": 0,
+ "mysql_command_DEALLOCATE_cnt_1ms": 0,
+ "mysql_command_DEALLOCATE_cnt_1s": 0,
+ "mysql_command_DEALLOCATE_cnt_500ms": 0,
+ "mysql_command_DEALLOCATE_cnt_500us": 0,
+ "mysql_command_DEALLOCATE_cnt_50ms": 0,
+ "mysql_command_DEALLOCATE_cnt_5ms": 0,
+ "mysql_command_DEALLOCATE_cnt_5s": 0,
+ "mysql_command_DEALLOCATE_cnt_INFs": 0,
+ "mysql_command_DELETE_Total_Time_us": 0,
+ "mysql_command_DELETE_Total_cnt": 0,
+ "mysql_command_DELETE_cnt_100ms": 0,
+ "mysql_command_DELETE_cnt_100us": 0,
+ "mysql_command_DELETE_cnt_10ms": 0,
+ "mysql_command_DELETE_cnt_10s": 0,
+ "mysql_command_DELETE_cnt_1ms": 0,
+ "mysql_command_DELETE_cnt_1s": 0,
+ "mysql_command_DELETE_cnt_500ms": 0,
+ "mysql_command_DELETE_cnt_500us": 0,
+ "mysql_command_DELETE_cnt_50ms": 0,
+ "mysql_command_DELETE_cnt_5ms": 0,
+ "mysql_command_DELETE_cnt_5s": 0,
+ "mysql_command_DELETE_cnt_INFs": 0,
+ "mysql_command_DESCRIBE_Total_Time_us": 0,
+ "mysql_command_DESCRIBE_Total_cnt": 0,
+ "mysql_command_DESCRIBE_cnt_100ms": 0,
+ "mysql_command_DESCRIBE_cnt_100us": 0,
+ "mysql_command_DESCRIBE_cnt_10ms": 0,
+ "mysql_command_DESCRIBE_cnt_10s": 0,
+ "mysql_command_DESCRIBE_cnt_1ms": 0,
+ "mysql_command_DESCRIBE_cnt_1s": 0,
+ "mysql_command_DESCRIBE_cnt_500ms": 0,
+ "mysql_command_DESCRIBE_cnt_500us": 0,
+ "mysql_command_DESCRIBE_cnt_50ms": 0,
+ "mysql_command_DESCRIBE_cnt_5ms": 0,
+ "mysql_command_DESCRIBE_cnt_5s": 0,
+ "mysql_command_DESCRIBE_cnt_INFs": 0,
+ "mysql_command_DROP_DATABASE_Total_Time_us": 0,
+ "mysql_command_DROP_DATABASE_Total_cnt": 0,
+ "mysql_command_DROP_DATABASE_cnt_100ms": 0,
+ "mysql_command_DROP_DATABASE_cnt_100us": 0,
+ "mysql_command_DROP_DATABASE_cnt_10ms": 0,
+ "mysql_command_DROP_DATABASE_cnt_10s": 0,
+ "mysql_command_DROP_DATABASE_cnt_1ms": 0,
+ "mysql_command_DROP_DATABASE_cnt_1s": 0,
+ "mysql_command_DROP_DATABASE_cnt_500ms": 0,
+ "mysql_command_DROP_DATABASE_cnt_500us": 0,
+ "mysql_command_DROP_DATABASE_cnt_50ms": 0,
+ "mysql_command_DROP_DATABASE_cnt_5ms": 0,
+ "mysql_command_DROP_DATABASE_cnt_5s": 0,
+ "mysql_command_DROP_DATABASE_cnt_INFs": 0,
+ "mysql_command_DROP_INDEX_Total_Time_us": 0,
+ "mysql_command_DROP_INDEX_Total_cnt": 0,
+ "mysql_command_DROP_INDEX_cnt_100ms": 0,
+ "mysql_command_DROP_INDEX_cnt_100us": 0,
+ "mysql_command_DROP_INDEX_cnt_10ms": 0,
+ "mysql_command_DROP_INDEX_cnt_10s": 0,
+ "mysql_command_DROP_INDEX_cnt_1ms": 0,
+ "mysql_command_DROP_INDEX_cnt_1s": 0,
+ "mysql_command_DROP_INDEX_cnt_500ms": 0,
+ "mysql_command_DROP_INDEX_cnt_500us": 0,
+ "mysql_command_DROP_INDEX_cnt_50ms": 0,
+ "mysql_command_DROP_INDEX_cnt_5ms": 0,
+ "mysql_command_DROP_INDEX_cnt_5s": 0,
+ "mysql_command_DROP_INDEX_cnt_INFs": 0,
+ "mysql_command_DROP_TABLE_Total_Time_us": 0,
+ "mysql_command_DROP_TABLE_Total_cnt": 0,
+ "mysql_command_DROP_TABLE_cnt_100ms": 0,
+ "mysql_command_DROP_TABLE_cnt_100us": 0,
+ "mysql_command_DROP_TABLE_cnt_10ms": 0,
+ "mysql_command_DROP_TABLE_cnt_10s": 0,
+ "mysql_command_DROP_TABLE_cnt_1ms": 0,
+ "mysql_command_DROP_TABLE_cnt_1s": 0,
+ "mysql_command_DROP_TABLE_cnt_500ms": 0,
+ "mysql_command_DROP_TABLE_cnt_500us": 0,
+ "mysql_command_DROP_TABLE_cnt_50ms": 0,
+ "mysql_command_DROP_TABLE_cnt_5ms": 0,
+ "mysql_command_DROP_TABLE_cnt_5s": 0,
+ "mysql_command_DROP_TABLE_cnt_INFs": 0,
+ "mysql_command_DROP_TRIGGER_Total_Time_us": 0,
+ "mysql_command_DROP_TRIGGER_Total_cnt": 0,
+ "mysql_command_DROP_TRIGGER_cnt_100ms": 0,
+ "mysql_command_DROP_TRIGGER_cnt_100us": 0,
+ "mysql_command_DROP_TRIGGER_cnt_10ms": 0,
+ "mysql_command_DROP_TRIGGER_cnt_10s": 0,
+ "mysql_command_DROP_TRIGGER_cnt_1ms": 0,
+ "mysql_command_DROP_TRIGGER_cnt_1s": 0,
+ "mysql_command_DROP_TRIGGER_cnt_500ms": 0,
+ "mysql_command_DROP_TRIGGER_cnt_500us": 0,
+ "mysql_command_DROP_TRIGGER_cnt_50ms": 0,
+ "mysql_command_DROP_TRIGGER_cnt_5ms": 0,
+ "mysql_command_DROP_TRIGGER_cnt_5s": 0,
+ "mysql_command_DROP_TRIGGER_cnt_INFs": 0,
+ "mysql_command_DROP_USER_Total_Time_us": 0,
+ "mysql_command_DROP_USER_Total_cnt": 0,
+ "mysql_command_DROP_USER_cnt_100ms": 0,
+ "mysql_command_DROP_USER_cnt_100us": 0,
+ "mysql_command_DROP_USER_cnt_10ms": 0,
+ "mysql_command_DROP_USER_cnt_10s": 0,
+ "mysql_command_DROP_USER_cnt_1ms": 0,
+ "mysql_command_DROP_USER_cnt_1s": 0,
+ "mysql_command_DROP_USER_cnt_500ms": 0,
+ "mysql_command_DROP_USER_cnt_500us": 0,
+ "mysql_command_DROP_USER_cnt_50ms": 0,
+ "mysql_command_DROP_USER_cnt_5ms": 0,
+ "mysql_command_DROP_USER_cnt_5s": 0,
+ "mysql_command_DROP_USER_cnt_INFs": 0,
+ "mysql_command_DROP_VIEW_Total_Time_us": 0,
+ "mysql_command_DROP_VIEW_Total_cnt": 0,
+ "mysql_command_DROP_VIEW_cnt_100ms": 0,
+ "mysql_command_DROP_VIEW_cnt_100us": 0,
+ "mysql_command_DROP_VIEW_cnt_10ms": 0,
+ "mysql_command_DROP_VIEW_cnt_10s": 0,
+ "mysql_command_DROP_VIEW_cnt_1ms": 0,
+ "mysql_command_DROP_VIEW_cnt_1s": 0,
+ "mysql_command_DROP_VIEW_cnt_500ms": 0,
+ "mysql_command_DROP_VIEW_cnt_500us": 0,
+ "mysql_command_DROP_VIEW_cnt_50ms": 0,
+ "mysql_command_DROP_VIEW_cnt_5ms": 0,
+ "mysql_command_DROP_VIEW_cnt_5s": 0,
+ "mysql_command_DROP_VIEW_cnt_INFs": 0,
+ "mysql_command_EXECUTE_Total_Time_us": 0,
+ "mysql_command_EXECUTE_Total_cnt": 0,
+ "mysql_command_EXECUTE_cnt_100ms": 0,
+ "mysql_command_EXECUTE_cnt_100us": 0,
+ "mysql_command_EXECUTE_cnt_10ms": 0,
+ "mysql_command_EXECUTE_cnt_10s": 0,
+ "mysql_command_EXECUTE_cnt_1ms": 0,
+ "mysql_command_EXECUTE_cnt_1s": 0,
+ "mysql_command_EXECUTE_cnt_500ms": 0,
+ "mysql_command_EXECUTE_cnt_500us": 0,
+ "mysql_command_EXECUTE_cnt_50ms": 0,
+ "mysql_command_EXECUTE_cnt_5ms": 0,
+ "mysql_command_EXECUTE_cnt_5s": 0,
+ "mysql_command_EXECUTE_cnt_INFs": 0,
+ "mysql_command_EXPLAIN_Total_Time_us": 0,
+ "mysql_command_EXPLAIN_Total_cnt": 0,
+ "mysql_command_EXPLAIN_cnt_100ms": 0,
+ "mysql_command_EXPLAIN_cnt_100us": 0,
+ "mysql_command_EXPLAIN_cnt_10ms": 0,
+ "mysql_command_EXPLAIN_cnt_10s": 0,
+ "mysql_command_EXPLAIN_cnt_1ms": 0,
+ "mysql_command_EXPLAIN_cnt_1s": 0,
+ "mysql_command_EXPLAIN_cnt_500ms": 0,
+ "mysql_command_EXPLAIN_cnt_500us": 0,
+ "mysql_command_EXPLAIN_cnt_50ms": 0,
+ "mysql_command_EXPLAIN_cnt_5ms": 0,
+ "mysql_command_EXPLAIN_cnt_5s": 0,
+ "mysql_command_EXPLAIN_cnt_INFs": 0,
+ "mysql_command_FLUSH_Total_Time_us": 0,
+ "mysql_command_FLUSH_Total_cnt": 0,
+ "mysql_command_FLUSH_cnt_100ms": 0,
+ "mysql_command_FLUSH_cnt_100us": 0,
+ "mysql_command_FLUSH_cnt_10ms": 0,
+ "mysql_command_FLUSH_cnt_10s": 0,
+ "mysql_command_FLUSH_cnt_1ms": 0,
+ "mysql_command_FLUSH_cnt_1s": 0,
+ "mysql_command_FLUSH_cnt_500ms": 0,
+ "mysql_command_FLUSH_cnt_500us": 0,
+ "mysql_command_FLUSH_cnt_50ms": 0,
+ "mysql_command_FLUSH_cnt_5ms": 0,
+ "mysql_command_FLUSH_cnt_5s": 0,
+ "mysql_command_FLUSH_cnt_INFs": 0,
+ "mysql_command_GRANT_Total_Time_us": 0,
+ "mysql_command_GRANT_Total_cnt": 0,
+ "mysql_command_GRANT_cnt_100ms": 0,
+ "mysql_command_GRANT_cnt_100us": 0,
+ "mysql_command_GRANT_cnt_10ms": 0,
+ "mysql_command_GRANT_cnt_10s": 0,
+ "mysql_command_GRANT_cnt_1ms": 0,
+ "mysql_command_GRANT_cnt_1s": 0,
+ "mysql_command_GRANT_cnt_500ms": 0,
+ "mysql_command_GRANT_cnt_500us": 0,
+ "mysql_command_GRANT_cnt_50ms": 0,
+ "mysql_command_GRANT_cnt_5ms": 0,
+ "mysql_command_GRANT_cnt_5s": 0,
+ "mysql_command_GRANT_cnt_INFs": 0,
+ "mysql_command_INSERT_Total_Time_us": 0,
+ "mysql_command_INSERT_Total_cnt": 0,
+ "mysql_command_INSERT_cnt_100ms": 0,
+ "mysql_command_INSERT_cnt_100us": 0,
+ "mysql_command_INSERT_cnt_10ms": 0,
+ "mysql_command_INSERT_cnt_10s": 0,
+ "mysql_command_INSERT_cnt_1ms": 0,
+ "mysql_command_INSERT_cnt_1s": 0,
+ "mysql_command_INSERT_cnt_500ms": 0,
+ "mysql_command_INSERT_cnt_500us": 0,
+ "mysql_command_INSERT_cnt_50ms": 0,
+ "mysql_command_INSERT_cnt_5ms": 0,
+ "mysql_command_INSERT_cnt_5s": 0,
+ "mysql_command_INSERT_cnt_INFs": 0,
+ "mysql_command_KILL_Total_Time_us": 0,
+ "mysql_command_KILL_Total_cnt": 0,
+ "mysql_command_KILL_cnt_100ms": 0,
+ "mysql_command_KILL_cnt_100us": 0,
+ "mysql_command_KILL_cnt_10ms": 0,
+ "mysql_command_KILL_cnt_10s": 0,
+ "mysql_command_KILL_cnt_1ms": 0,
+ "mysql_command_KILL_cnt_1s": 0,
+ "mysql_command_KILL_cnt_500ms": 0,
+ "mysql_command_KILL_cnt_500us": 0,
+ "mysql_command_KILL_cnt_50ms": 0,
+ "mysql_command_KILL_cnt_5ms": 0,
+ "mysql_command_KILL_cnt_5s": 0,
+ "mysql_command_KILL_cnt_INFs": 0,
+ "mysql_command_LOAD_Total_Time_us": 0,
+ "mysql_command_LOAD_Total_cnt": 0,
+ "mysql_command_LOAD_cnt_100ms": 0,
+ "mysql_command_LOAD_cnt_100us": 0,
+ "mysql_command_LOAD_cnt_10ms": 0,
+ "mysql_command_LOAD_cnt_10s": 0,
+ "mysql_command_LOAD_cnt_1ms": 0,
+ "mysql_command_LOAD_cnt_1s": 0,
+ "mysql_command_LOAD_cnt_500ms": 0,
+ "mysql_command_LOAD_cnt_500us": 0,
+ "mysql_command_LOAD_cnt_50ms": 0,
+ "mysql_command_LOAD_cnt_5ms": 0,
+ "mysql_command_LOAD_cnt_5s": 0,
+ "mysql_command_LOAD_cnt_INFs": 0,
+ "mysql_command_LOCK_TABLE_Total_Time_us": 0,
+ "mysql_command_LOCK_TABLE_Total_cnt": 0,
+ "mysql_command_LOCK_TABLE_cnt_100ms": 0,
+ "mysql_command_LOCK_TABLE_cnt_100us": 0,
+ "mysql_command_LOCK_TABLE_cnt_10ms": 0,
+ "mysql_command_LOCK_TABLE_cnt_10s": 0,
+ "mysql_command_LOCK_TABLE_cnt_1ms": 0,
+ "mysql_command_LOCK_TABLE_cnt_1s": 0,
+ "mysql_command_LOCK_TABLE_cnt_500ms": 0,
+ "mysql_command_LOCK_TABLE_cnt_500us": 0,
+ "mysql_command_LOCK_TABLE_cnt_50ms": 0,
+ "mysql_command_LOCK_TABLE_cnt_5ms": 0,
+ "mysql_command_LOCK_TABLE_cnt_5s": 0,
+ "mysql_command_LOCK_TABLE_cnt_INFs": 0,
+ "mysql_command_OPTIMIZE_Total_Time_us": 0,
+ "mysql_command_OPTIMIZE_Total_cnt": 0,
+ "mysql_command_OPTIMIZE_cnt_100ms": 0,
+ "mysql_command_OPTIMIZE_cnt_100us": 0,
+ "mysql_command_OPTIMIZE_cnt_10ms": 0,
+ "mysql_command_OPTIMIZE_cnt_10s": 0,
+ "mysql_command_OPTIMIZE_cnt_1ms": 0,
+ "mysql_command_OPTIMIZE_cnt_1s": 0,
+ "mysql_command_OPTIMIZE_cnt_500ms": 0,
+ "mysql_command_OPTIMIZE_cnt_500us": 0,
+ "mysql_command_OPTIMIZE_cnt_50ms": 0,
+ "mysql_command_OPTIMIZE_cnt_5ms": 0,
+ "mysql_command_OPTIMIZE_cnt_5s": 0,
+ "mysql_command_OPTIMIZE_cnt_INFs": 0,
+ "mysql_command_PREPARE_Total_Time_us": 0,
+ "mysql_command_PREPARE_Total_cnt": 0,
+ "mysql_command_PREPARE_cnt_100ms": 0,
+ "mysql_command_PREPARE_cnt_100us": 0,
+ "mysql_command_PREPARE_cnt_10ms": 0,
+ "mysql_command_PREPARE_cnt_10s": 0,
+ "mysql_command_PREPARE_cnt_1ms": 0,
+ "mysql_command_PREPARE_cnt_1s": 0,
+ "mysql_command_PREPARE_cnt_500ms": 0,
+ "mysql_command_PREPARE_cnt_500us": 0,
+ "mysql_command_PREPARE_cnt_50ms": 0,
+ "mysql_command_PREPARE_cnt_5ms": 0,
+ "mysql_command_PREPARE_cnt_5s": 0,
+ "mysql_command_PREPARE_cnt_INFs": 0,
+ "mysql_command_PURGE_Total_Time_us": 0,
+ "mysql_command_PURGE_Total_cnt": 0,
+ "mysql_command_PURGE_cnt_100ms": 0,
+ "mysql_command_PURGE_cnt_100us": 0,
+ "mysql_command_PURGE_cnt_10ms": 0,
+ "mysql_command_PURGE_cnt_10s": 0,
+ "mysql_command_PURGE_cnt_1ms": 0,
+ "mysql_command_PURGE_cnt_1s": 0,
+ "mysql_command_PURGE_cnt_500ms": 0,
+ "mysql_command_PURGE_cnt_500us": 0,
+ "mysql_command_PURGE_cnt_50ms": 0,
+ "mysql_command_PURGE_cnt_5ms": 0,
+ "mysql_command_PURGE_cnt_5s": 0,
+ "mysql_command_PURGE_cnt_INFs": 0,
+ "mysql_command_RENAME_TABLE_Total_Time_us": 0,
+ "mysql_command_RENAME_TABLE_Total_cnt": 0,
+ "mysql_command_RENAME_TABLE_cnt_100ms": 0,
+ "mysql_command_RENAME_TABLE_cnt_100us": 0,
+ "mysql_command_RENAME_TABLE_cnt_10ms": 0,
+ "mysql_command_RENAME_TABLE_cnt_10s": 0,
+ "mysql_command_RENAME_TABLE_cnt_1ms": 0,
+ "mysql_command_RENAME_TABLE_cnt_1s": 0,
+ "mysql_command_RENAME_TABLE_cnt_500ms": 0,
+ "mysql_command_RENAME_TABLE_cnt_500us": 0,
+ "mysql_command_RENAME_TABLE_cnt_50ms": 0,
+ "mysql_command_RENAME_TABLE_cnt_5ms": 0,
+ "mysql_command_RENAME_TABLE_cnt_5s": 0,
+ "mysql_command_RENAME_TABLE_cnt_INFs": 0,
+ "mysql_command_REPLACE_Total_Time_us": 0,
+ "mysql_command_REPLACE_Total_cnt": 0,
+ "mysql_command_REPLACE_cnt_100ms": 0,
+ "mysql_command_REPLACE_cnt_100us": 0,
+ "mysql_command_REPLACE_cnt_10ms": 0,
+ "mysql_command_REPLACE_cnt_10s": 0,
+ "mysql_command_REPLACE_cnt_1ms": 0,
+ "mysql_command_REPLACE_cnt_1s": 0,
+ "mysql_command_REPLACE_cnt_500ms": 0,
+ "mysql_command_REPLACE_cnt_500us": 0,
+ "mysql_command_REPLACE_cnt_50ms": 0,
+ "mysql_command_REPLACE_cnt_5ms": 0,
+ "mysql_command_REPLACE_cnt_5s": 0,
+ "mysql_command_REPLACE_cnt_INFs": 0,
+ "mysql_command_RESET_MASTER_Total_Time_us": 0,
+ "mysql_command_RESET_MASTER_Total_cnt": 0,
+ "mysql_command_RESET_MASTER_cnt_100ms": 0,
+ "mysql_command_RESET_MASTER_cnt_100us": 0,
+ "mysql_command_RESET_MASTER_cnt_10ms": 0,
+ "mysql_command_RESET_MASTER_cnt_10s": 0,
+ "mysql_command_RESET_MASTER_cnt_1ms": 0,
+ "mysql_command_RESET_MASTER_cnt_1s": 0,
+ "mysql_command_RESET_MASTER_cnt_500ms": 0,
+ "mysql_command_RESET_MASTER_cnt_500us": 0,
+ "mysql_command_RESET_MASTER_cnt_50ms": 0,
+ "mysql_command_RESET_MASTER_cnt_5ms": 0,
+ "mysql_command_RESET_MASTER_cnt_5s": 0,
+ "mysql_command_RESET_MASTER_cnt_INFs": 0,
+ "mysql_command_RESET_SLAVE_Total_Time_us": 0,
+ "mysql_command_RESET_SLAVE_Total_cnt": 0,
+ "mysql_command_RESET_SLAVE_cnt_100ms": 0,
+ "mysql_command_RESET_SLAVE_cnt_100us": 0,
+ "mysql_command_RESET_SLAVE_cnt_10ms": 0,
+ "mysql_command_RESET_SLAVE_cnt_10s": 0,
+ "mysql_command_RESET_SLAVE_cnt_1ms": 0,
+ "mysql_command_RESET_SLAVE_cnt_1s": 0,
+ "mysql_command_RESET_SLAVE_cnt_500ms": 0,
+ "mysql_command_RESET_SLAVE_cnt_500us": 0,
+ "mysql_command_RESET_SLAVE_cnt_50ms": 0,
+ "mysql_command_RESET_SLAVE_cnt_5ms": 0,
+ "mysql_command_RESET_SLAVE_cnt_5s": 0,
+ "mysql_command_RESET_SLAVE_cnt_INFs": 0,
+ "mysql_command_REVOKE_Total_Time_us": 0,
+ "mysql_command_REVOKE_Total_cnt": 0,
+ "mysql_command_REVOKE_cnt_100ms": 0,
+ "mysql_command_REVOKE_cnt_100us": 0,
+ "mysql_command_REVOKE_cnt_10ms": 0,
+ "mysql_command_REVOKE_cnt_10s": 0,
+ "mysql_command_REVOKE_cnt_1ms": 0,
+ "mysql_command_REVOKE_cnt_1s": 0,
+ "mysql_command_REVOKE_cnt_500ms": 0,
+ "mysql_command_REVOKE_cnt_500us": 0,
+ "mysql_command_REVOKE_cnt_50ms": 0,
+ "mysql_command_REVOKE_cnt_5ms": 0,
+ "mysql_command_REVOKE_cnt_5s": 0,
+ "mysql_command_REVOKE_cnt_INFs": 0,
+ "mysql_command_ROLLBACK_Total_Time_us": 0,
+ "mysql_command_ROLLBACK_Total_cnt": 0,
+ "mysql_command_ROLLBACK_cnt_100ms": 0,
+ "mysql_command_ROLLBACK_cnt_100us": 0,
+ "mysql_command_ROLLBACK_cnt_10ms": 0,
+ "mysql_command_ROLLBACK_cnt_10s": 0,
+ "mysql_command_ROLLBACK_cnt_1ms": 0,
+ "mysql_command_ROLLBACK_cnt_1s": 0,
+ "mysql_command_ROLLBACK_cnt_500ms": 0,
+ "mysql_command_ROLLBACK_cnt_500us": 0,
+ "mysql_command_ROLLBACK_cnt_50ms": 0,
+ "mysql_command_ROLLBACK_cnt_5ms": 0,
+ "mysql_command_ROLLBACK_cnt_5s": 0,
+ "mysql_command_ROLLBACK_cnt_INFs": 0,
+ "mysql_command_SAVEPOINT_Total_Time_us": 0,
+ "mysql_command_SAVEPOINT_Total_cnt": 0,
+ "mysql_command_SAVEPOINT_cnt_100ms": 0,
+ "mysql_command_SAVEPOINT_cnt_100us": 0,
+ "mysql_command_SAVEPOINT_cnt_10ms": 0,
+ "mysql_command_SAVEPOINT_cnt_10s": 0,
+ "mysql_command_SAVEPOINT_cnt_1ms": 0,
+ "mysql_command_SAVEPOINT_cnt_1s": 0,
+ "mysql_command_SAVEPOINT_cnt_500ms": 0,
+ "mysql_command_SAVEPOINT_cnt_500us": 0,
+ "mysql_command_SAVEPOINT_cnt_50ms": 0,
+ "mysql_command_SAVEPOINT_cnt_5ms": 0,
+ "mysql_command_SAVEPOINT_cnt_5s": 0,
+ "mysql_command_SAVEPOINT_cnt_INFs": 0,
+ "mysql_command_SELECT_FOR_UPDATE_Total_Time_us": 0,
+ "mysql_command_SELECT_FOR_UPDATE_Total_cnt": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_100ms": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_100us": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_10ms": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_10s": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_1ms": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_1s": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_500ms": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_500us": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_50ms": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_5ms": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_5s": 0,
+ "mysql_command_SELECT_FOR_UPDATE_cnt_INFs": 0,
+ "mysql_command_SELECT_Total_Time_us": 4673958076637,
+ "mysql_command_SELECT_Total_cnt": 68490650,
+ "mysql_command_SELECT_cnt_100ms": 4909816,
+ "mysql_command_SELECT_cnt_100us": 32185976,
+ "mysql_command_SELECT_cnt_10ms": 2955830,
+ "mysql_command_SELECT_cnt_10s": 497,
+ "mysql_command_SELECT_cnt_1ms": 481335,
+ "mysql_command_SELECT_cnt_1s": 1321917,
+ "mysql_command_SELECT_cnt_500ms": 11123900,
+ "mysql_command_SELECT_cnt_500us": 36650,
+ "mysql_command_SELECT_cnt_50ms": 10468460,
+ "mysql_command_SELECT_cnt_5ms": 4600948,
+ "mysql_command_SELECT_cnt_5s": 403451,
+ "mysql_command_SELECT_cnt_INFs": 1870,
+ "mysql_command_SET_Total_Time_us": 0,
+ "mysql_command_SET_Total_cnt": 0,
+ "mysql_command_SET_cnt_100ms": 0,
+ "mysql_command_SET_cnt_100us": 0,
+ "mysql_command_SET_cnt_10ms": 0,
+ "mysql_command_SET_cnt_10s": 0,
+ "mysql_command_SET_cnt_1ms": 0,
+ "mysql_command_SET_cnt_1s": 0,
+ "mysql_command_SET_cnt_500ms": 0,
+ "mysql_command_SET_cnt_500us": 0,
+ "mysql_command_SET_cnt_50ms": 0,
+ "mysql_command_SET_cnt_5ms": 0,
+ "mysql_command_SET_cnt_5s": 0,
+ "mysql_command_SET_cnt_INFs": 0,
+ "mysql_command_SHOW_TABLE_STATUS_Total_Time_us": 0,
+ "mysql_command_SHOW_TABLE_STATUS_Total_cnt": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_100ms": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_100us": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_10ms": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_10s": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_1ms": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_1s": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_500ms": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_500us": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_50ms": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_5ms": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_5s": 0,
+ "mysql_command_SHOW_TABLE_STATUS_cnt_INFs": 0,
+ "mysql_command_SHOW_Total_Time_us": 2158,
+ "mysql_command_SHOW_Total_cnt": 1,
+ "mysql_command_SHOW_cnt_100ms": 0,
+ "mysql_command_SHOW_cnt_100us": 0,
+ "mysql_command_SHOW_cnt_10ms": 0,
+ "mysql_command_SHOW_cnt_10s": 0,
+ "mysql_command_SHOW_cnt_1ms": 0,
+ "mysql_command_SHOW_cnt_1s": 0,
+ "mysql_command_SHOW_cnt_500ms": 0,
+ "mysql_command_SHOW_cnt_500us": 0,
+ "mysql_command_SHOW_cnt_50ms": 0,
+ "mysql_command_SHOW_cnt_5ms": 1,
+ "mysql_command_SHOW_cnt_5s": 0,
+ "mysql_command_SHOW_cnt_INFs": 0,
+ "mysql_command_START_TRANSACTION_Total_Time_us": 0,
+ "mysql_command_START_TRANSACTION_Total_cnt": 0,
+ "mysql_command_START_TRANSACTION_cnt_100ms": 0,
+ "mysql_command_START_TRANSACTION_cnt_100us": 0,
+ "mysql_command_START_TRANSACTION_cnt_10ms": 0,
+ "mysql_command_START_TRANSACTION_cnt_10s": 0,
+ "mysql_command_START_TRANSACTION_cnt_1ms": 0,
+ "mysql_command_START_TRANSACTION_cnt_1s": 0,
+ "mysql_command_START_TRANSACTION_cnt_500ms": 0,
+ "mysql_command_START_TRANSACTION_cnt_500us": 0,
+ "mysql_command_START_TRANSACTION_cnt_50ms": 0,
+ "mysql_command_START_TRANSACTION_cnt_5ms": 0,
+ "mysql_command_START_TRANSACTION_cnt_5s": 0,
+ "mysql_command_START_TRANSACTION_cnt_INFs": 0,
+ "mysql_command_TRUNCATE_TABLE_Total_Time_us": 0,
+ "mysql_command_TRUNCATE_TABLE_Total_cnt": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_100ms": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_100us": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_10ms": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_10s": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_1ms": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_1s": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_500ms": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_500us": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_50ms": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_5ms": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_5s": 0,
+ "mysql_command_TRUNCATE_TABLE_cnt_INFs": 0,
+ "mysql_command_UNKNOWN_Total_Time_us": 0,
+ "mysql_command_UNKNOWN_Total_cnt": 0,
+ "mysql_command_UNKNOWN_cnt_100ms": 0,
+ "mysql_command_UNKNOWN_cnt_100us": 0,
+ "mysql_command_UNKNOWN_cnt_10ms": 0,
+ "mysql_command_UNKNOWN_cnt_10s": 0,
+ "mysql_command_UNKNOWN_cnt_1ms": 0,
+ "mysql_command_UNKNOWN_cnt_1s": 0,
+ "mysql_command_UNKNOWN_cnt_500ms": 0,
+ "mysql_command_UNKNOWN_cnt_500us": 0,
+ "mysql_command_UNKNOWN_cnt_50ms": 0,
+ "mysql_command_UNKNOWN_cnt_5ms": 0,
+ "mysql_command_UNKNOWN_cnt_5s": 0,
+ "mysql_command_UNKNOWN_cnt_INFs": 0,
+ "mysql_command_UNLOCK_TABLES_Total_Time_us": 0,
+ "mysql_command_UNLOCK_TABLES_Total_cnt": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_100ms": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_100us": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_10ms": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_10s": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_1ms": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_1s": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_500ms": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_500us": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_50ms": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_5ms": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_5s": 0,
+ "mysql_command_UNLOCK_TABLES_cnt_INFs": 0,
+ "mysql_command_UPDATE_Total_Time_us": 0,
+ "mysql_command_UPDATE_Total_cnt": 0,
+ "mysql_command_UPDATE_cnt_100ms": 0,
+ "mysql_command_UPDATE_cnt_100us": 0,
+ "mysql_command_UPDATE_cnt_10ms": 0,
+ "mysql_command_UPDATE_cnt_10s": 0,
+ "mysql_command_UPDATE_cnt_1ms": 0,
+ "mysql_command_UPDATE_cnt_1s": 0,
+ "mysql_command_UPDATE_cnt_500ms": 0,
+ "mysql_command_UPDATE_cnt_500us": 0,
+ "mysql_command_UPDATE_cnt_50ms": 0,
+ "mysql_command_UPDATE_cnt_5ms": 0,
+ "mysql_command_UPDATE_cnt_5s": 0,
+ "mysql_command_UPDATE_cnt_INFs": 0,
+ "mysql_command_USE_Total_Time_us": 0,
+ "mysql_command_USE_Total_cnt": 0,
+ "mysql_command_USE_cnt_100ms": 0,
+ "mysql_command_USE_cnt_100us": 0,
+ "mysql_command_USE_cnt_10ms": 0,
+ "mysql_command_USE_cnt_10s": 0,
+ "mysql_command_USE_cnt_1ms": 0,
+ "mysql_command_USE_cnt_1s": 0,
+ "mysql_command_USE_cnt_500ms": 0,
+ "mysql_command_USE_cnt_500us": 0,
+ "mysql_command_USE_cnt_50ms": 0,
+ "mysql_command_USE_cnt_5ms": 0,
+ "mysql_command_USE_cnt_5s": 0,
+ "mysql_command_USE_cnt_INFs": 0,
+ "mysql_firewall_rules_config": 329,
+ "mysql_firewall_rules_table": 0,
+ "mysql_firewall_users_config": 0,
+ "mysql_firewall_users_table": 0,
+ "mysql_frontend_buffers_bytes": 196608,
+ "mysql_killed_backend_connections": 0,
+ "mysql_killed_backend_queries": 0,
+ "mysql_query_rules_memory": 22825,
+ "mysql_session_internal_bytes": 20232,
+ "mysql_unexpected_frontend_com_quit": 0,
+ "mysql_unexpected_frontend_packets": 0,
+ "mysql_user_first_user_frontend_connections": 0,
+ "mysql_user_first_user_frontend_connections_utilization": 0,
+ "mysql_user_second_user_frontend_connections": 3,
+ "mysql_user_second_user_frontend_connections_utilization": 20,
+ "queries_with_max_lag_ms": 0,
+ "queries_with_max_lag_ms__delayed": 0,
+ "queries_with_max_lag_ms__total_wait_time_us": 0,
+ "query_digest_memory": 13688,
+ "stack_memory_admin_threads": 16777216,
+ "stack_memory_cluster_threads": 0,
+ "stack_memory_mysql_threads": 33554432,
+ "whitelisted_sqli_fingerprint": 0,
+ }
+
+ require.Equal(t, expected, mx)
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ my := New()
+ my.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, my.Init())
+
+ for i, step := range test {
+ t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
+ step.prepareMock(t, mock)
+ step.check(t, my)
+ })
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func mustMockRows(t *testing.T, data []byte) *sqlmock.Rows {
+ rows, err := prepareMockRows(data)
+ require.NoError(t, err)
+ return rows
+}
+
+func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) {
+ mock.ExpectQuery(query).WillReturnRows(mustMockRows(t, rows)).RowsWillBeClosed()
+}
+
+func mockExpectErr(mock sqlmock.Sqlmock, query string) {
+ mock.ExpectQuery(query).WillReturnError(fmt.Errorf("mock error (%s)", query))
+}
+
+func prepareMockRows(data []byte) (*sqlmock.Rows, error) {
+ if len(data) == 0 {
+ return sqlmock.NewRows(nil), nil
+ }
+
+ r := bytes.NewReader(data)
+ sc := bufio.NewScanner(r)
+
+ var numColumns int
+ var rows *sqlmock.Rows
+
+ for sc.Scan() {
+ s := strings.TrimSpace(strings.Trim(sc.Text(), "|"))
+ switch {
+ case s == "",
+ strings.HasPrefix(s, "+"),
+ strings.HasPrefix(s, "ft_boolean_syntax"):
+ continue
+ }
+
+ parts := strings.Split(s, "|")
+ for i, v := range parts {
+ parts[i] = strings.TrimSpace(v)
+ }
+
+ if rows == nil {
+ numColumns = len(parts)
+ rows = sqlmock.NewRows(parts)
+ continue
+ }
+
+ if len(parts) != numColumns {
+ return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts))
+ }
+
+ values := make([]driver.Value, len(parts))
+ for i, v := range parts {
+ values[i] = v
+ }
+ rows.AddRow(values...)
+ }
+
+ if rows == nil {
+ return nil, errors.New("prepareMockRows(): nil rows result")
+ }
+
+ return rows, sc.Err()
+}
diff --git a/src/go/plugin/go.d/modules/proxysql/testdata/config.json b/src/go/plugin/go.d/modules/proxysql/testdata/config.json
new file mode 100644
index 000000000..ed8b72dcb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "dsn": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/proxysql/testdata/config.yaml b/src/go/plugin/go.d/modules/proxysql/testdata/config.yaml
new file mode 100644
index 000000000..caff49039
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+dsn: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_memory_metrics.txt b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_memory_metrics.txt
new file mode 100644
index 000000000..99ec093e1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_memory_metrics.txt
@@ -0,0 +1,21 @@
++------------------------------+----------------+
+| Variable_Name | Variable_Value |
++------------------------------+----------------+
+| SQLite3_memory_bytes | 6017144 |
+| jemalloc_resident | 403759104 |
+| jemalloc_active | 385101824 |
+| jemalloc_allocated | 379402432 |
+| jemalloc_mapped | 430993408 |
+| jemalloc_metadata | 17418872 |
+| jemalloc_retained | 260542464 |
+| Auth_memory | 1044 |
+| query_digest_memory | 13688 |
+| mysql_query_rules_memory | 22825 |
+| mysql_firewall_users_table | 0 |
+| mysql_firewall_users_config | 0 |
+| mysql_firewall_rules_table | 0 |
+| mysql_firewall_rules_config | 329 |
+| stack_memory_mysql_threads | 33554432 |
+| stack_memory_admin_threads | 16777216 |
+| stack_memory_cluster_threads | 0 |
++------------------------------+----------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_commands_counters.txt b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_commands_counters.txt
new file mode 100644
index 000000000..6ab6bb830
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_commands_counters.txt
@@ -0,0 +1,56 @@
++-------------------+---------------+-----------+-----------+-----------+---------+---------+----------+----------+-----------+-----------+---------+--------+---------+----------+
+| Command | Total_Time_us | Total_cnt | cnt_100us | cnt_500us | cnt_1ms | cnt_5ms | cnt_10ms | cnt_50ms | cnt_100ms | cnt_500ms | cnt_1s | cnt_5s | cnt_10s | cnt_INFs |
++-------------------+---------------+-----------+-----------+-----------+---------+---------+----------+----------+-----------+-----------+---------+--------+---------+----------+
+| ALTER_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| ALTER_VIEW | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| ANALYZE_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| BEGIN | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| CALL | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| CHANGE_MASTER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| COMMIT | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| CREATE_DATABASE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| CREATE_INDEX | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| CREATE_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| CREATE_TEMPORARY | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| CREATE_TRIGGER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| CREATE_USER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| CREATE_VIEW | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| DEALLOCATE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| DELETE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| DESCRIBE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| DROP_DATABASE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| DROP_INDEX | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| DROP_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| DROP_TRIGGER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| DROP_USER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| DROP_VIEW | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| GRANT | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| EXECUTE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| EXPLAIN | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| FLUSH | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| INSERT | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| KILL | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| LOAD | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| LOCK_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| OPTIMIZE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| PREPARE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| PURGE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| RENAME_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| RESET_MASTER | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| RESET_SLAVE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| REPLACE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| REVOKE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| ROLLBACK | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| SAVEPOINT | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| SELECT | 4673958076637 | 68490650 | 32185976 | 36650 | 481335 | 4600948 | 2955830 | 10468460 | 4909816 | 11123900 | 1321917 | 403451 | 497 | 1870 |
+| SELECT_FOR_UPDATE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| SET | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| SHOW_TABLE_STATUS | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| START_TRANSACTION | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| TRUNCATE_TABLE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| UNLOCK_TABLES | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| UPDATE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| USE | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| SHOW | 2158 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+| UNKNOWN | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
++-------------------+---------------+-----------+-----------+-----------+---------+---------+----------+----------+-----------+-----------+---------+--------+---------+----------+
diff --git a/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_connection_pool .txt b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_connection_pool .txt
new file mode 100644
index 000000000..80b53e1af
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_connection_pool .txt
@@ -0,0 +1,11 @@
++----+-------------------+--------+--------+----------+----------+--------+---------+---------+-----------------+-----------------+------------+
+| hostgroup | srv_host | srv_port | status | ConnUsed | ConnFree | ConnOK | ConnERR | Queries | Bytes_data_sent | Bytes_data_recv | Latency_us |
++-----------+-------------------+----------+--------+----------+----------+--------+---------+---------+-----------------+-----------------+------------+
+| 10 | back001-db-master | 6001 | ONLINE | 69 | 423 | 524 | 0 | 8970367 | 9858463664 | 145193069937 | 17684 |
+| 11 | back001-db-master | 6002 | ONLINE | 0 | 1 | 1 | 0 | 69 | 187675 | 2903 | 17684 |
+| 11 | back001-db-reader | 6003 | ONLINE | 0 | 11 | 11 | 0 | 63488 | 163690013 | 4994101 | 113 |
+| 20 | back002-db-master | 6004 | ONLINE | 9 | 188 | 197 | 2 | 849461 | 1086994186 | 266034339 | 101981 |
+| 21 | back002-db-reader | 6005 | ONLINE | 0 | 1 | 1 | 0 | 8 | 6992 | 984 | 230 |
+| 31 | back003-db-master | 6006 | ONLINE | 0 | 3 | 3 | 0 | 3276 | 712803 | 81438709 | 231 |
+| 31 | back003-db-reader | 6007 | ONLINE | 1 | 70 | 71 | 0 | 2356904 | 411900849 | 115810708275 | 230 |
++-----------+-------------------+--------+--------+----------+----------+--------+---------+---------+-----------------+-----------------+--------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_global.txt b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_global.txt
new file mode 100644
index 000000000..442266c45
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_global.txt
@@ -0,0 +1,106 @@
++---------------------------------------------+----------------+
+| Variable_Name | Variable_Value |
++---------------------------------------------+----------------+
+| ProxySQL_Uptime | 26748286 |
+| Active_Transactions | 0 |
+| Client_Connections_aborted | 2 |
+| Client_Connections_connected | 3 |
+| Client_Connections_created | 5458991 |
+| Server_Connections_aborted | 9979 |
+| Server_Connections_connected | 13 |
+| Server_Connections_created | 2122254 |
+| Server_Connections_delayed | 0 |
+| Client_Connections_non_idle | 3 |
+| Queries_backends_bytes_recv | 5896210168 |
+| Queries_backends_bytes_sent | 4329581500 |
+| Queries_frontends_bytes_recv | 7434816962 |
+| Queries_frontends_bytes_sent | 11643634097 |
+| Query_Processor_time_nsec | 0 |
+| Backend_query_time_nsec | 0 |
+| mysql_backend_buffers_bytes | 0 |
+| mysql_frontend_buffers_bytes | 196608 |
+| mysql_session_internal_bytes | 20232 |
+| Com_autocommit | 0 |
+| Com_autocommit_filtered | 0 |
+| Com_commit | 0 |
+| Com_commit_filtered | 0 |
+| Com_rollback | 0 |
+| Com_rollback_filtered | 0 |
+| Com_backend_change_user | 188694 |
+| Com_backend_init_db | 0 |
+| Com_backend_set_names | 1517893 |
+| Com_frontend_init_db | 2 |
+| Com_frontend_set_names | 0 |
+| Com_frontend_use_db | 0 |
+| Com_backend_stmt_prepare | 16858208 |
+| Com_backend_stmt_execute | 36303146 |
+| Com_backend_stmt_close | 0 |
+| Com_frontend_stmt_prepare | 32185987 |
+| Com_frontend_stmt_execute | 36314138 |
+| Com_frontend_stmt_close | 32137933 |
+| Mirror_concurrency | 0 |
+| Mirror_queue_length | 0 |
+| Questions | 100638067 |
+| Selects_for_update__autocommit0 | 0 |
+| Slow_queries | 405818 |
+| GTID_consistent_queries | 0 |
+| GTID_session_collected | 0 |
+| Servers_table_version | 37 |
+| MySQL_Thread_Workers | 4 |
+| Access_Denied_Wrong_Password | 2 |
+| Access_Denied_Max_Connections | 0 |
+| Access_Denied_Max_User_Connections | 0 |
+| MySQL_Monitor_Workers | 10 |
+| MySQL_Monitor_Workers_Aux | 0 |
+| MySQL_Monitor_Workers_Started | 10 |
+| MySQL_Monitor_connect_check_OK | 3548306 |
+| MySQL_Monitor_connect_check_ERR | 130 |
+| MySQL_Monitor_ping_check_OK | 21289849 |
+| MySQL_Monitor_ping_check_ERR | 108271 |
+| MySQL_Monitor_read_only_check_OK | 106246409 |
+| MySQL_Monitor_read_only_check_ERR | 19610 |
+| MySQL_Monitor_replication_lag_check_OK | 28702388 |
+| MySQL_Monitor_replication_lag_check_ERR | 482 |
+| ConnPool_get_conn_latency_awareness | 0 |
+| ConnPool_get_conn_immediate | 13361 |
+| ConnPool_get_conn_success | 36319474 |
+| ConnPool_get_conn_failure | 212943 |
+| generated_error_packets | 231 |
+| max_connect_timeouts | 227 |
+| backend_lagging_during_query | 8880 |
+| backend_offline_during_query | 8 |
+| queries_with_max_lag_ms | 0 |
+| queries_with_max_lag_ms__delayed | 0 |
+| queries_with_max_lag_ms__total_wait_time_us | 0 |
+| mysql_unexpected_frontend_com_quit | 0 |
+| Client_Connections_hostgroup_locked | 0 |
+| hostgroup_locked_set_cmds | 0 |
+| hostgroup_locked_queries | 0 |
+| mysql_unexpected_frontend_packets | 0 |
+| aws_aurora_replicas_skipped_during_query | 0 |
+| automatic_detected_sql_injection | 0 |
+| whitelisted_sqli_fingerprint | 0 |
+| mysql_killed_backend_connections | 0 |
+| mysql_killed_backend_queries | 0 |
+| MyHGM_myconnpoll_get | 36519056 |
+| MyHGM_myconnpoll_get_ok | 36306113 |
+| MyHGM_myconnpoll_push | 37358734 |
+| MyHGM_myconnpoll_destroy | 15150 |
+| MyHGM_myconnpoll_reset | 2 |
+| SQLite3_memory_bytes | 6021248 |
+| ConnPool_memory_bytes | 932248 |
+| Stmt_Client_Active_Total | 18 |
+| Stmt_Client_Active_Unique | 18 |
+| Stmt_Server_Active_Total | 101 |
+| Stmt_Server_Active_Unique | 39 |
+| Stmt_Max_Stmt_id | 66 |
+| Stmt_Cached | 65 |
+| Query_Cache_Memory_bytes | 0 |
+| Query_Cache_count_GET | 0 |
+| Query_Cache_count_GET_OK | 0 |
+| Query_Cache_count_SET | 0 |
+| Query_Cache_bytes_IN | 0 |
+| Query_Cache_bytes_OUT | 0 |
+| Query_Cache_Purged | 0 |
+| Query_Cache_Entries | 0 |
++---------------------------------------------+----------------+
diff --git a/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_users.txt b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_users.txt
new file mode 100644
index 000000000..900776b76
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/stats_mysql_users.txt
@@ -0,0 +1,6 @@
++-------------------------+----------------------+--------------------------+
+| username | frontend_connections | frontend_max_connections |
++-------------------------+----------------------+--------------------------+
+| first_user | 0 | 200 |
+| second_user | 3 | 15 |
++-------------------------+----------------------+--------------------------+
diff --git a/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/version.txt b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/version.txt
new file mode 100644
index 000000000..429a880b7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/proxysql/testdata/v2.0.10/version.txt
@@ -0,0 +1,5 @@
++---------------------+
+| version() |
++---------------------+
+| 2.0.10-27-g5b319972 |
++---------------------+ \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pulsar/README.md b/src/go/plugin/go.d/modules/pulsar/README.md
new file mode 120000
index 000000000..dfa55301c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/README.md
@@ -0,0 +1 @@
+integrations/apache_pulsar.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pulsar/cache.go b/src/go/plugin/go.d/modules/pulsar/cache.go
new file mode 100644
index 000000000..7f113bf86
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/cache.go
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pulsar
+
+func newCache() *cache {
+ return &cache{
+ namespaces: make(map[namespace]bool),
+ topics: make(map[topic]bool),
+ }
+}
+
+type (
+ namespace struct{ name string }
+ topic struct{ namespace, name string }
+ cache struct {
+ namespaces map[namespace]bool
+ topics map[topic]bool
+ }
+)
diff --git a/src/go/plugin/go.d/modules/pulsar/charts.go b/src/go/plugin/go.d/modules/pulsar/charts.go
new file mode 100644
index 000000000..e6bb9bde6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/charts.go
@@ -0,0 +1,664 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pulsar
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+type (
+ Charts = module.Charts
+ Chart = module.Chart
+ Dims = module.Dims
+ Dim = module.Dim
+ Opts = module.Opts
+)
+
+var summaryCharts = Charts{
+ sumBrokerComponentsChart.Copy(),
+
+ sumMessagesRateChart.Copy(),
+ sumThroughputRateChart.Copy(),
+
+ sumStorageSizeChart.Copy(),
+ sumStorageOperationsRateChart.Copy(), // optional
+ sumMsgBacklogSizeChart.Copy(),
+ sumStorageWriteLatencyChart.Copy(),
+ sumEntrySizeChart.Copy(),
+
+ sumSubsDelayedChart.Copy(),
+ sumSubsMsgRateRedeliverChart.Copy(), // optional
+ sumSubsBlockedOnUnackedMsgChart.Copy(), // optional
+
+ sumReplicationRateChart.Copy(), // optional
+ sumReplicationThroughputRateChart.Copy(), // optional
+ sumReplicationBacklogChart.Copy(), // optional
+}
+
+var (
+ sumBrokerComponentsChart = Chart{
+ ID: "broker_components",
+ Title: "Broker Components",
+ Units: "components",
+ Fam: "ns summary",
+ Ctx: "pulsar.broker_components",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: "pulsar_namespaces_count", Name: "namespaces"},
+ {ID: metricPulsarTopicsCount, Name: "topics"},
+ {ID: metricPulsarSubscriptionsCount, Name: "subscriptions"},
+ {ID: metricPulsarProducersCount, Name: "producers"},
+ {ID: metricPulsarConsumersCount, Name: "consumers"},
+ },
+ }
+ sumMessagesRateChart = Chart{
+ ID: "messages_rate",
+ Title: "Messages Rate",
+ Units: "messages/s",
+ Fam: "ns summary",
+ Ctx: "pulsar.messages_rate",
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarRateIn, Name: "publish", Div: 1000},
+ {ID: metricPulsarRateOut, Name: "dispatch", Mul: -1, Div: 1000},
+ },
+ }
+ sumThroughputRateChart = Chart{
+ ID: "throughput_rate",
+ Title: "Throughput Rate",
+ Units: "KiB/s",
+ Fam: "ns summary",
+ Ctx: "pulsar.throughput_rate",
+ Type: module.Area,
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarThroughputIn, Name: "publish", Div: 1024 * 1000},
+ {ID: metricPulsarThroughputOut, Name: "dispatch", Mul: -1, Div: 1024 * 1000},
+ },
+ }
+ sumStorageSizeChart = Chart{
+ ID: "storage_size",
+ Title: "Storage Size",
+ Units: "KiB",
+ Fam: "ns summary",
+ Ctx: "pulsar.storage_size",
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarStorageSize, Name: "used", Div: 1024},
+ },
+ }
+ sumStorageOperationsRateChart = Chart{
+ ID: "storage_operations_rate",
+ Title: "Storage Read/Write Operations Rate",
+ Units: "message batches/s",
+ Fam: "ns summary",
+ Ctx: "pulsar.storage_operations_rate",
+ Type: module.Area,
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarStorageReadRate, Name: "read", Div: 1000},
+ {ID: metricPulsarStorageWriteRate, Name: "write", Mul: -1, Div: 1000},
+ },
+ }
+ sumMsgBacklogSizeChart = Chart{
+ ID: "msg_backlog",
+ Title: "Messages Backlog Size",
+ Units: "messages",
+ Fam: "ns summary",
+ Ctx: "pulsar.msg_backlog",
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarMsgBacklog, Name: "backlog"},
+ },
+ }
+ sumStorageWriteLatencyChart = Chart{
+ ID: "storage_write_latency",
+ Title: "Storage Write Latency",
+ Units: "entries/s",
+ Fam: "ns summary",
+ Ctx: "pulsar.storage_write_latency",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: "pulsar_storage_write_latency_le_0_5", Name: "<=0.5ms", Div: 60},
+ {ID: "pulsar_storage_write_latency_le_1", Name: "<=1ms", Div: 60},
+ {ID: "pulsar_storage_write_latency_le_5", Name: "<=5ms", Div: 60},
+ {ID: "pulsar_storage_write_latency_le_10", Name: "<=10ms", Div: 60},
+ {ID: "pulsar_storage_write_latency_le_20", Name: "<=20ms", Div: 60},
+ {ID: "pulsar_storage_write_latency_le_50", Name: "<=50ms", Div: 60},
+ {ID: "pulsar_storage_write_latency_le_100", Name: "<=100ms", Div: 60},
+ {ID: "pulsar_storage_write_latency_le_200", Name: "<=200ms", Div: 60},
+ {ID: "pulsar_storage_write_latency_le_1000", Name: "<=1s", Div: 60},
+ {ID: "pulsar_storage_write_latency_overflow", Name: ">1s", Div: 60},
+ },
+ }
+ sumEntrySizeChart = Chart{
+ ID: "entry_size",
+ Title: "Entry Size",
+ Units: "entries/s",
+ Fam: "ns summary",
+ Ctx: "pulsar.entry_size",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: "pulsar_entry_size_le_128", Name: "<=128B", Div: 60},
+ {ID: "pulsar_entry_size_le_512", Name: "<=512B", Div: 60},
+ {ID: "pulsar_entry_size_le_1_kb", Name: "<=1KB", Div: 60},
+ {ID: "pulsar_entry_size_le_2_kb", Name: "<=2KB", Div: 60},
+ {ID: "pulsar_entry_size_le_4_kb", Name: "<=4KB", Div: 60},
+ {ID: "pulsar_entry_size_le_16_kb", Name: "<=16KB", Div: 60},
+ {ID: "pulsar_entry_size_le_100_kb", Name: "<=100KB", Div: 60},
+ {ID: "pulsar_entry_size_le_1_mb", Name: "<=1MB", Div: 60},
+ {ID: "pulsar_entry_size_le_overflow", Name: ">1MB", Div: 60},
+ },
+ }
+ sumSubsDelayedChart = Chart{
+ ID: "subscription_delayed",
+ Title: "Subscriptions Delayed for Dispatching",
+ Units: "message batches",
+ Fam: "ns summary",
+ Ctx: "pulsar.subscription_delayed",
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarSubscriptionDelayed, Name: "delayed"},
+ },
+ }
+ sumSubsMsgRateRedeliverChart = Chart{
+ ID: "subscription_msg_rate_redeliver",
+ Title: "Subscriptions Redelivered Message Rate",
+ Units: "messages/s",
+ Fam: "ns summary",
+ Ctx: "pulsar.subscription_msg_rate_redeliver",
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarSubscriptionMsgRateRedeliver, Name: "redelivered", Div: 1000},
+ },
+ }
+ sumSubsBlockedOnUnackedMsgChart = Chart{
+ ID: "subscription_blocked_on_unacked_messages",
+ Title: "Subscriptions Blocked On Unacked Messages",
+ Units: "subscriptions",
+ Fam: "ns summary",
+ Ctx: "pulsar.subscription_blocked_on_unacked_messages",
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarSubscriptionBlockedOnUnackedMessages, Name: "blocked"},
+ },
+ }
+ sumReplicationRateChart = Chart{
+ ID: "replication_rate",
+ Title: "Replication Rate",
+ Units: "messages/s",
+ Fam: "ns summary",
+ Ctx: "pulsar.replication_rate",
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarReplicationRateIn, Name: "in", Div: 1000},
+ {ID: metricPulsarReplicationRateOut, Name: "out", Mul: -1, Div: 1000},
+ },
+ }
+ sumReplicationThroughputRateChart = Chart{
+ ID: "replication_throughput_rate",
+ Title: "Replication Throughput Rate",
+ Units: "KiB/s",
+ Fam: "ns summary",
+ Ctx: "pulsar.replication_throughput_rate",
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarReplicationThroughputIn, Name: "in", Div: 1024 * 1000},
+ {ID: metricPulsarReplicationThroughputOut, Name: "out", Mul: -1, Div: 1024 * 1000},
+ },
+ }
+ sumReplicationBacklogChart = Chart{
+ ID: "replication_backlog",
+ Title: "Replication Backlog",
+ Units: "messages",
+ Fam: "ns summary",
+ Ctx: "pulsar.replication_backlog",
+ Opts: Opts{StoreFirst: true},
+ Dims: Dims{
+ {ID: metricPulsarReplicationBacklog, Name: "backlog"},
+ },
+ }
+)
+
+var namespaceCharts = Charts{
+ nsBrokerComponentsChart.Copy(),
+ topicProducersChart.Copy(),
+ topicSubscriptionsChart.Copy(),
+ topicConsumersChart.Copy(),
+
+ nsMessagesRateChart.Copy(),
+ topicMessagesRateInChart.Copy(),
+ topicMessagesRateOutChart.Copy(),
+ nsThroughputRateCharts.Copy(),
+ topicThroughputRateInChart.Copy(),
+ topicThroughputRateOutChart.Copy(),
+
+ nsStorageSizeChart.Copy(),
+ topicStorageSizeChart.Copy(),
+ nsStorageOperationsChart.Copy(), // optional
+ topicStorageReadRateChart.Copy(), // optional
+ topicStorageWriteRateChart.Copy(), // optional
+ nsMsgBacklogSizeChart.Copy(),
+ topicMsgBacklogSizeChart.Copy(),
+ nsStorageWriteLatencyChart.Copy(),
+ nsEntrySizeChart.Copy(),
+
+ nsSubsDelayedChart.Copy(),
+ topicSubsDelayedChart.Copy(),
+ nsSubsMsgRateRedeliverChart.Copy(), // optional
+ topicSubsMsgRateRedeliverChart.Copy(), // optional
+ nsSubsBlockedOnUnackedMsgChart.Copy(), // optional
+ topicSubsBlockedOnUnackedMsgChart.Copy(), // optional
+
+ nsReplicationRateChart.Copy(), // optional
+ topicReplicationRateInChart.Copy(), // optional
+ topicReplicationRateOutChart.Copy(), // optional
+ nsReplicationThroughputChart.Copy(), // optional
+ topicReplicationThroughputRateInChart.Copy(), // optional
+ topicReplicationThroughputRateOutChart.Copy(), // optional
+ nsReplicationBacklogChart.Copy(), // optional
+ topicReplicationBacklogChart.Copy(), // optional
+}
+
+func toNamespaceChart(chart Chart) Chart {
+ chart = *chart.Copy()
+ if chart.ID == sumBrokerComponentsChart.ID {
+ _ = chart.RemoveDim("pulsar_namespaces_count")
+ }
+ chart.ID += "_namespace_%s"
+ chart.Fam = "ns %s"
+ if idx := strings.IndexByte(chart.Ctx, '.'); idx > 0 {
+ // pulsar.messages_rate => pulsar.namespace_messages_rate
+ chart.Ctx = chart.Ctx[:idx+1] + "namespace_" + chart.Ctx[idx+1:]
+ }
+ for _, dim := range chart.Dims {
+ dim.ID += "_%s"
+ }
+ return chart
+}
+
+var (
+ nsBrokerComponentsChart = toNamespaceChart(sumBrokerComponentsChart)
+ nsMessagesRateChart = toNamespaceChart(sumMessagesRateChart)
+ nsThroughputRateCharts = toNamespaceChart(sumThroughputRateChart)
+ nsStorageSizeChart = toNamespaceChart(sumStorageSizeChart)
+ nsStorageOperationsChart = toNamespaceChart(sumStorageOperationsRateChart)
+ nsMsgBacklogSizeChart = toNamespaceChart(sumMsgBacklogSizeChart)
+ nsStorageWriteLatencyChart = toNamespaceChart(sumStorageWriteLatencyChart)
+ nsEntrySizeChart = toNamespaceChart(sumEntrySizeChart)
+ nsSubsDelayedChart = toNamespaceChart(sumSubsDelayedChart)
+ nsSubsMsgRateRedeliverChart = toNamespaceChart(sumSubsMsgRateRedeliverChart)
+ nsSubsBlockedOnUnackedMsgChart = toNamespaceChart(sumSubsBlockedOnUnackedMsgChart)
+ nsReplicationRateChart = toNamespaceChart(sumReplicationRateChart)
+ nsReplicationThroughputChart = toNamespaceChart(sumReplicationThroughputRateChart)
+ nsReplicationBacklogChart = toNamespaceChart(sumReplicationBacklogChart)
+
+ topicProducersChart = Chart{
+ ID: "topic_producers_namespace_%s",
+ Title: "Topic Producers",
+ Units: "producers",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_producers",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicSubscriptionsChart = Chart{
+ ID: "topic_subscriptions_namespace_%s",
+ Title: "Topic Subscriptions",
+ Units: "subscriptions",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_subscriptions",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicConsumersChart = Chart{
+ ID: "topic_consumers_namespace_%s",
+ Title: "Topic Consumers",
+ Units: "consumers",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_consumers",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicMessagesRateInChart = Chart{
+ ID: "topic_messages_rate_in_namespace_%s",
+ Title: "Topic Publish Messages Rate",
+ Units: "publishes/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_messages_rate_in",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicMessagesRateOutChart = Chart{
+ ID: "topic_messages_rate_out_namespace_%s",
+ Title: "Topic Dispatch Messages Rate",
+ Units: "dispatches/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_messages_rate_out",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicThroughputRateInChart = Chart{
+ ID: "topic_throughput_rate_in_namespace_%s",
+ Title: "Topic Publish Throughput Rate",
+ Units: "KiB/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_throughput_rate_in",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicThroughputRateOutChart = Chart{
+ ID: "topic_throughput_rate_out_namespace_%s",
+ Title: "Topic Dispatch Throughput Rate",
+ Units: "KiB/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_throughput_rate_out",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicStorageSizeChart = Chart{
+ ID: "topic_storage_size_namespace_%s",
+ Title: "Topic Storage Size",
+ Units: "KiB",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_storage_size",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicStorageReadRateChart = Chart{
+ ID: "topic_storage_read_rate_namespace_%s",
+ Title: "Topic Storage Read Rate",
+ Units: "message batches/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_storage_read_rate",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicStorageWriteRateChart = Chart{
+ ID: "topic_storage_write_rate_namespace_%s",
+ Title: "Topic Storage Write Rate",
+ Units: "message batches/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_storage_write_rate",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicMsgBacklogSizeChart = Chart{
+ ID: "topic_msg_backlog_namespace_%s",
+ Title: "Topic Messages Backlog Size",
+ Units: "messages",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_msg_backlog",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicSubsDelayedChart = Chart{
+ ID: "topic_subscription_delayed_namespace_%s",
+ Title: "Topic Subscriptions Delayed for Dispatching",
+ Units: "message batches",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_subscription_delayed",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicSubsMsgRateRedeliverChart = Chart{
+ ID: "topic_subscription_msg_rate_redeliver_namespace_%s",
+ Title: "Topic Subscriptions Redelivered Message Rate",
+ Units: "messages/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_subscription_msg_rate_redeliver",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicSubsBlockedOnUnackedMsgChart = Chart{
+ ID: "topic_subscription_blocked_on_unacked_messages_namespace_%s",
+ Title: "Topic Subscriptions Blocked On Unacked Messages",
+ Units: "blocked subscriptions",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_subscription_blocked_on_unacked_messages",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicReplicationRateInChart = Chart{
+ ID: "topic_replication_rate_in_namespace_%s",
+ Title: "Topic Replication Rate From Remote Cluster",
+ Units: "messages/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_replication_rate_in",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicReplicationRateOutChart = Chart{
+ ID: "replication_rate_out_namespace_%s",
+ Title: "Topic Replication Rate To Remote Cluster",
+ Units: "messages/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_replication_rate_out",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicReplicationThroughputRateInChart = Chart{
+ ID: "topic_replication_throughput_rate_in_namespace_%s",
+ Title: "Topic Replication Throughput Rate From Remote Cluster",
+ Units: "KiB/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_replication_throughput_rate_in",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicReplicationThroughputRateOutChart = Chart{
+ ID: "topic_replication_throughput_rate_out_namespace_%s",
+ Title: "Topic Replication Throughput Rate To Remote Cluster",
+ Units: "KiB/s",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_replication_throughput_rate_out",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+ topicReplicationBacklogChart = Chart{
+ ID: "topic_replication_backlog_namespace_%s",
+ Title: "Topic Replication Backlog",
+ Units: "messages",
+ Fam: "ns %s",
+ Ctx: "pulsar.topic_replication_backlog",
+ Type: module.Stacked,
+ Opts: Opts{StoreFirst: true},
+ }
+)
+
+func (p *Pulsar) adjustCharts(pms prometheus.Series) {
+ if pms := pms.FindByName(metricPulsarStorageReadRate); pms.Len() == 0 || pms[0].Labels.Get("namespace") == "" {
+ p.removeSummaryChart(sumStorageOperationsRateChart.ID)
+ p.removeNamespaceChart(nsStorageOperationsChart.ID)
+ p.removeNamespaceChart(topicStorageReadRateChart.ID)
+ p.removeNamespaceChart(topicStorageWriteRateChart.ID)
+ delete(p.topicChartsMapping, topicStorageReadRateChart.ID)
+ delete(p.topicChartsMapping, topicStorageWriteRateChart.ID)
+ }
+ if pms.FindByName(metricPulsarSubscriptionMsgRateRedeliver).Len() == 0 {
+ p.removeSummaryChart(sumSubsMsgRateRedeliverChart.ID)
+ p.removeSummaryChart(sumSubsBlockedOnUnackedMsgChart.ID)
+ p.removeNamespaceChart(nsSubsMsgRateRedeliverChart.ID)
+ p.removeNamespaceChart(nsSubsBlockedOnUnackedMsgChart.ID)
+ p.removeNamespaceChart(topicSubsMsgRateRedeliverChart.ID)
+ p.removeNamespaceChart(topicSubsBlockedOnUnackedMsgChart.ID)
+ delete(p.topicChartsMapping, topicSubsMsgRateRedeliverChart.ID)
+ delete(p.topicChartsMapping, topicSubsBlockedOnUnackedMsgChart.ID)
+ }
+ if pms.FindByName(metricPulsarReplicationBacklog).Len() == 0 {
+ p.removeSummaryChart(sumReplicationRateChart.ID)
+ p.removeSummaryChart(sumReplicationThroughputRateChart.ID)
+ p.removeSummaryChart(sumReplicationBacklogChart.ID)
+ p.removeNamespaceChart(nsReplicationRateChart.ID)
+ p.removeNamespaceChart(nsReplicationThroughputChart.ID)
+ p.removeNamespaceChart(nsReplicationBacklogChart.ID)
+ p.removeNamespaceChart(topicReplicationRateInChart.ID)
+ p.removeNamespaceChart(topicReplicationRateOutChart.ID)
+ p.removeNamespaceChart(topicReplicationThroughputRateInChart.ID)
+ p.removeNamespaceChart(topicReplicationThroughputRateOutChart.ID)
+ p.removeNamespaceChart(topicReplicationBacklogChart.ID)
+ delete(p.topicChartsMapping, topicReplicationRateInChart.ID)
+ delete(p.topicChartsMapping, topicReplicationRateOutChart.ID)
+ delete(p.topicChartsMapping, topicReplicationThroughputRateInChart.ID)
+ delete(p.topicChartsMapping, topicReplicationThroughputRateOutChart.ID)
+ delete(p.topicChartsMapping, topicReplicationBacklogChart.ID)
+ }
+}
+
+func (p *Pulsar) removeSummaryChart(chartID string) {
+ if err := p.Charts().Remove(chartID); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Pulsar) removeNamespaceChart(chartID string) {
+ if err := p.nsCharts.Remove(chartID); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Pulsar) updateCharts() {
+ // NOTE: order is important
+ for ns := range p.curCache.namespaces {
+ if !p.cache.namespaces[ns] {
+ p.cache.namespaces[ns] = true
+ p.addNamespaceCharts(ns)
+ }
+ }
+ for top := range p.curCache.topics {
+ if !p.cache.topics[top] {
+ p.cache.topics[top] = true
+ p.addTopicToCharts(top)
+ }
+ }
+ for top := range p.cache.topics {
+ if p.curCache.topics[top] {
+ continue
+ }
+ delete(p.cache.topics, top)
+ p.removeTopicFromCharts(top)
+ }
+ for ns := range p.cache.namespaces {
+ if p.curCache.namespaces[ns] {
+ continue
+ }
+ delete(p.cache.namespaces, ns)
+ p.removeNamespaceFromCharts(ns)
+ }
+}
+
+func (p *Pulsar) addNamespaceCharts(ns namespace) {
+ charts := p.nsCharts.Copy()
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, ns.name)
+ chart.Fam = fmt.Sprintf(chart.Fam, ns.name)
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, ns.name)
+ }
+ }
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Pulsar) removeNamespaceFromCharts(ns namespace) {
+ for _, chart := range *p.nsCharts {
+ id := fmt.Sprintf(chart.ID, ns.name)
+ if chart = p.Charts().Get(id); chart != nil {
+ chart.MarkRemove()
+ } else {
+ p.Warningf("could not remove namespace chart '%s'", id)
+ }
+ }
+}
+
+func (p *Pulsar) addTopicToCharts(top topic) {
+ for id, metric := range p.topicChartsMapping {
+ id = fmt.Sprintf(id, top.namespace)
+ chart := p.Charts().Get(id)
+ if chart == nil {
+ p.Warningf("could not add topic '%s' to chart '%s': chart not found", top.name, id)
+ continue
+ }
+
+ dim := Dim{ID: metric + "_" + top.name, Name: extractTopicName(top)}
+ switch metric {
+ case metricPulsarThroughputIn,
+ metricPulsarThroughputOut,
+ metricPulsarReplicationThroughputIn,
+ metricPulsarReplicationThroughputOut:
+ dim.Div = 1024 * 1000
+ case metricPulsarRateIn,
+ metricPulsarRateOut,
+ metricPulsarStorageWriteRate,
+ metricPulsarStorageReadRate,
+ metricPulsarSubscriptionMsgRateRedeliver,
+ metricPulsarReplicationRateIn,
+ metricPulsarReplicationRateOut:
+ dim.Div = 1000
+ case metricPulsarStorageSize:
+ dim.Div = 1024
+ }
+
+ if err := chart.AddDim(&dim); err != nil {
+ p.Warning(err)
+ }
+ chart.MarkNotCreated()
+ }
+}
+
+func (p *Pulsar) removeTopicFromCharts(top topic) {
+ for id, metric := range p.topicChartsMapping {
+ id = fmt.Sprintf(id, top.namespace)
+ chart := p.Charts().Get(id)
+ if chart == nil {
+ p.Warningf("could not remove topic '%s' from chart '%s': chart not found", top.name, id)
+ continue
+ }
+
+ if err := chart.MarkDimRemove(metric+"_"+top.name, true); err != nil {
+ p.Warning(err)
+ }
+ chart.MarkNotCreated()
+ }
+}
+
+func topicChartsMapping() map[string]string {
+ return map[string]string{
+ topicSubscriptionsChart.ID: metricPulsarSubscriptionsCount,
+ topicProducersChart.ID: metricPulsarProducersCount,
+ topicConsumersChart.ID: metricPulsarConsumersCount,
+ topicMessagesRateInChart.ID: metricPulsarRateIn,
+ topicMessagesRateOutChart.ID: metricPulsarRateOut,
+ topicThroughputRateInChart.ID: metricPulsarThroughputIn,
+ topicThroughputRateOutChart.ID: metricPulsarThroughputOut,
+ topicStorageSizeChart.ID: metricPulsarStorageSize,
+ topicStorageReadRateChart.ID: metricPulsarStorageReadRate,
+ topicStorageWriteRateChart.ID: metricPulsarStorageWriteRate,
+ topicMsgBacklogSizeChart.ID: metricPulsarMsgBacklog,
+ topicSubsDelayedChart.ID: metricPulsarSubscriptionDelayed,
+ topicSubsMsgRateRedeliverChart.ID: metricPulsarSubscriptionMsgRateRedeliver,
+ topicSubsBlockedOnUnackedMsgChart.ID: metricPulsarSubscriptionBlockedOnUnackedMessages,
+ topicReplicationRateInChart.ID: metricPulsarReplicationRateIn,
+ topicReplicationRateOutChart.ID: metricPulsarReplicationRateOut,
+ topicReplicationThroughputRateInChart.ID: metricPulsarReplicationThroughputIn,
+ topicReplicationThroughputRateOutChart.ID: metricPulsarReplicationThroughputOut,
+ topicReplicationBacklogChart.ID: metricPulsarReplicationBacklog,
+ }
+}
+
+func extractTopicName(top topic) string {
+ // persistent://sample/ns1/demo-1 => p:demo-1
+ if idx := strings.LastIndexByte(top.name, '/'); idx > 0 {
+ return top.name[:1] + ":" + top.name[idx+1:]
+ }
+ return top.name
+}
diff --git a/src/go/plugin/go.d/modules/pulsar/collect.go b/src/go/plugin/go.d/modules/pulsar/collect.go
new file mode 100644
index 000000000..10ff48b3e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/collect.go
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pulsar
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func isValidPulsarMetrics(pms prometheus.Series) bool {
+ return pms.FindByName(metricPulsarTopicsCount).Len() > 0
+}
+
+func (p *Pulsar) resetCurCache() {
+ for ns := range p.curCache.namespaces {
+ delete(p.curCache.namespaces, ns)
+ }
+ for top := range p.curCache.topics {
+ delete(p.curCache.topics, top)
+ }
+}
+
+func (p *Pulsar) collect() (map[string]int64, error) {
+ pms, err := p.prom.ScrapeSeries()
+ if err != nil {
+ return nil, err
+ }
+
+ if !isValidPulsarMetrics(pms) {
+ return nil, errors.New("returned metrics aren't Apache Pulsar metrics")
+ }
+
+ p.once.Do(func() {
+ p.adjustCharts(pms)
+ })
+
+ mx := p.collectMetrics(pms)
+ p.updateCharts()
+ p.resetCurCache()
+
+ return stm.ToMap(mx), nil
+}
+
+func (p *Pulsar) collectMetrics(pms prometheus.Series) map[string]float64 {
+ mx := make(map[string]float64)
+ p.collectBroker(mx, pms)
+ return mx
+}
+
+func (p *Pulsar) collectBroker(mx map[string]float64, pms prometheus.Series) {
+ pms = findPulsarMetrics(pms)
+ for _, pm := range pms {
+ ns, top := newNamespace(pm), newTopic(pm)
+ if ns.name == "" {
+ continue
+ }
+
+ p.curCache.namespaces[ns] = true
+
+ value := pm.Value * precision(pm.Name())
+ mx[pm.Name()] += value
+ mx[pm.Name()+"_"+ns.name] += value
+
+ if top.name == "" || !p.topicFilter.MatchString(top.name) {
+ continue
+ }
+
+ p.curCache.topics[top] = true
+ mx[pm.Name()+"_"+top.name] += value
+ }
+ mx["pulsar_namespaces_count"] = float64(len(p.curCache.namespaces))
+}
+
+func newNamespace(pm prometheus.SeriesSample) namespace {
+ return namespace{
+ name: pm.Labels.Get("namespace"),
+ }
+}
+
+func newTopic(pm prometheus.SeriesSample) topic {
+ return topic{
+ namespace: pm.Labels.Get("namespace"),
+ name: pm.Labels.Get("topic"),
+ }
+}
+
+func findPulsarMetrics(pms prometheus.Series) prometheus.Series {
+ var ms prometheus.Series
+ for _, pm := range pms {
+ if isPulsarHistogram(pm) {
+ ms = append(ms, pm)
+ }
+ }
+ pms = pms.FindByNames(
+ metricPulsarTopicsCount,
+ metricPulsarSubscriptionDelayed,
+ metricPulsarSubscriptionsCount,
+ metricPulsarProducersCount,
+ metricPulsarConsumersCount,
+ metricPulsarRateIn,
+ metricPulsarRateOut,
+ metricPulsarThroughputIn,
+ metricPulsarThroughputOut,
+ metricPulsarStorageSize,
+ metricPulsarStorageWriteRate,
+ metricPulsarStorageReadRate,
+ metricPulsarMsgBacklog,
+ metricPulsarSubscriptionMsgRateRedeliver,
+ metricPulsarSubscriptionBlockedOnUnackedMessages,
+ )
+ return append(ms, pms...)
+}
+
+func isPulsarHistogram(pm prometheus.SeriesSample) bool {
+ s := pm.Name()
+ return strings.HasPrefix(s, "pulsar_storage_write_latency") || strings.HasPrefix(s, "pulsar_entry_size")
+}
+
+func precision(metric string) float64 {
+ switch metric {
+ case metricPulsarRateIn,
+ metricPulsarRateOut,
+ metricPulsarThroughputIn,
+ metricPulsarThroughputOut,
+ metricPulsarStorageWriteRate,
+ metricPulsarStorageReadRate,
+ metricPulsarSubscriptionMsgRateRedeliver,
+ metricPulsarReplicationRateIn,
+ metricPulsarReplicationRateOut,
+ metricPulsarReplicationThroughputIn,
+ metricPulsarReplicationThroughputOut:
+ return 1000
+ }
+ return 1
+}
diff --git a/src/go/plugin/go.d/modules/pulsar/config_schema.json b/src/go/plugin/go.d/modules/pulsar/config_schema.json
new file mode 100644
index 000000000..b4bc8b45f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Pulsar collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 60
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Pulsar metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:8080/metrics",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 5
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pulsar/init.go b/src/go/plugin/go.d/modules/pulsar/init.go
new file mode 100644
index 000000000..f165327a5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/init.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pulsar
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (p *Pulsar) validateConfig() error {
+ if p.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (p *Pulsar) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(p.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(client, p.Request), nil
+}
+
+func (p *Pulsar) initTopicFilerMatcher() (matcher.Matcher, error) {
+ if p.TopicFilter.Empty() {
+ return matcher.FALSE(), nil
+ }
+ return p.TopicFilter.Parse()
+}
diff --git a/src/go/plugin/go.d/modules/pulsar/integrations/apache_pulsar.md b/src/go/plugin/go.d/modules/pulsar/integrations/apache_pulsar.md
new file mode 100644
index 000000000..8538fbf9c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/integrations/apache_pulsar.md
@@ -0,0 +1,314 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/pulsar/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/pulsar/metadata.yaml"
+sidebar_label: "Apache Pulsar"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Apache Pulsar
+
+
+<img src="https://netdata.cloud/img/pulsar.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: pulsar
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Pulsar servers.
+
+
+It collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Pulsar instances running on localhost.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+- topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.
+- subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.
+- replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.
+
+
+### Per Apache Pulsar instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| pulsar.broker_components | namespaces, topics, subscriptions, producers, consumers | components |
+| pulsar.messages_rate | publish, dispatch | messages/s |
+| pulsar.throughput_rate | publish, dispatch | KiB/s |
+| pulsar.storage_size | used | KiB |
+| pulsar.storage_operations_rate | read, write | message batches/s |
+| pulsar.msg_backlog | backlog | messages |
+| pulsar.storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |
+| pulsar.entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |
+| pulsar.subscription_delayed | delayed | message batches |
+| pulsar.subscription_msg_rate_redeliver | redelivered | messages/s |
+| pulsar.subscription_blocked_on_unacked_messages | blocked | subscriptions |
+| pulsar.replication_rate | in, out | messages/s |
+| pulsar.replication_throughput_rate | in, out | KiB/s |
+| pulsar.replication_backlog | backlog | messages |
+
+### Per namespace
+
+TBD
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| pulsar.namespace_broker_components | topics, subscriptions, producers, consumers | components |
+| pulsar.namespace_messages_rate | publish, dispatch | messages/s |
+| pulsar.namespace_throughput_rate | publish, dispatch | KiB/s |
+| pulsar.namespace_storage_size | used | KiB |
+| pulsar.namespace_storage_operations_rate | read, write | message batches/s |
+| pulsar.namespace_msg_backlog | backlog | messages |
+| pulsar.namespace_storage_write_latency | <=0.5ms, <=1ms, <=5ms, =10ms, <=20ms, <=50ms, <=100ms, <=200ms, <=1s, >1s | entries/s |
+| pulsar.namespace_entry_size | <=128B, <=512B, <=1KB, <=2KB, <=4KB, <=16KB, <=100KB, <=1MB, >1MB | entries/s |
+| pulsar.namespace_subscription_delayed | delayed | message batches |
+| pulsar.namespace_subscription_msg_rate_redeliver | redelivered | messages/s |
+| pulsar.namespace_subscription_blocked_on_unacked_messages | blocked | subscriptions |
+| pulsar.namespace_replication_rate | in, out | messages/s |
+| pulsar.namespace_replication_throughput_rate | in, out | KiB/s |
+| pulsar.namespace_replication_backlog | backlog | messages |
+| pulsar.topic_producers | a dimension per topic | producers |
+| pulsar.topic_subscriptions | a dimension per topic | subscriptions |
+| pulsar.topic_consumers | a dimension per topic | consumers |
+| pulsar.topic_messages_rate_in | a dimension per topic | publishes/s |
+| pulsar.topic_messages_rate_out | a dimension per topic | dispatches/s |
+| pulsar.topic_throughput_rate_in | a dimension per topic | KiB/s |
+| pulsar.topic_throughput_rate_out | a dimension per topic | KiB/s |
+| pulsar.topic_storage_size | a dimension per topic | KiB |
+| pulsar.topic_storage_read_rate | a dimension per topic | message batches/s |
+| pulsar.topic_storage_write_rate | a dimension per topic | message batches/s |
+| pulsar.topic_msg_backlog | a dimension per topic | messages |
+| pulsar.topic_subscription_delayed | a dimension per topic | message batches |
+| pulsar.topic_subscription_msg_rate_redeliver | a dimension per topic | messages/s |
+| pulsar.topic_subscription_blocked_on_unacked_messages | a dimension per topic | blocked subscriptions |
+| pulsar.topic_replication_rate_in | a dimension per topic | messages/s |
+| pulsar.topic_replication_rate_out | a dimension per topic | messages/s |
+| pulsar.topic_replication_throughput_rate_in | a dimension per topic | messages/s |
+| pulsar.topic_replication_throughput_rate_out | a dimension per topic | messages/s |
+| pulsar.topic_replication_backlog | a dimension per topic | messages |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/pulsar.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/pulsar.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8080/metrics | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080/metrics
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:8080/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080/metrics
+
+ - name: remote
+ url: http://192.0.2.1:8080/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `pulsar` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m pulsar
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `pulsar` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep pulsar
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep pulsar /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep pulsar
+```
+
+
diff --git a/src/go/plugin/go.d/modules/pulsar/metadata.yaml b/src/go/plugin/go.d/modules/pulsar/metadata.yaml
new file mode 100644
index 000000000..f21389fd2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/metadata.yaml
@@ -0,0 +1,519 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-pulsar
+ plugin_name: go.d.plugin
+ module_name: pulsar
+ monitored_instance:
+ name: Apache Pulsar
+ link: https://pulsar.apache.org/
+ icon_filename: pulsar.svg
+ categories:
+ - data-collection.message-brokers
+ keywords:
+ - pulsar
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Pulsar servers.
+ method_description: |
+ It collects broker statistics using Pulsar's [Prometheus endpoint](https://pulsar.apache.org/docs/en/deploy-monitoring/#broker-stats).
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Pulsar instances running on localhost.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/pulsar.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8080/metrics
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080/metrics
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080/metrics
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: |
+ Do not validate server certificate chain and hostname.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:8080/metrics
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080/metrics
+
+ - name: remote
+ url: http://192.0.2.1:8080/metrics
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: |
+ - topic_* metrics are available when `exposeTopicLevelMetricsInPrometheus` is set to true.
+ - subscription_* and namespace_subscription metrics are available when `exposeTopicLevelMetricsInPrometheus` si set to true.
+ - replication_* and namespace_replication_* metrics are available when replication is configured and `replicationMetricsEnabled` is set to true.
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: pulsar.broker_components
+ description: Broker Components
+ unit: components
+ chart_type: line
+ dimensions:
+ - name: namespaces
+ - name: topics
+ - name: subscriptions
+ - name: producers
+ - name: consumers
+ - name: pulsar.messages_rate
+ description: Messages Rate
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: publish
+ - name: dispatch
+ - name: pulsar.throughput_rate
+ description: Throughput Rate
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: publish
+ - name: dispatch
+ - name: pulsar.storage_size
+ description: Storage Size
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: pulsar.storage_operations_rate
+ description: Storage Read/Write Operations Rate
+ unit: message batches/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: pulsar.msg_backlog
+ description: Messages Backlog Size
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: backlog
+ - name: pulsar.storage_write_latency
+ description: Storage Write Latency
+ unit: entries/s
+ chart_type: stacked
+ dimensions:
+ - name: <=0.5ms
+ - name: <=1ms
+ - name: <=5ms
+ - name: =10ms
+ - name: <=20ms
+ - name: <=50ms
+ - name: <=100ms
+ - name: <=200ms
+ - name: <=1s
+ - name: '>1s'
+ - name: pulsar.entry_size
+ description: Entry Size
+ unit: entries/s
+ chart_type: stacked
+ dimensions:
+ - name: <=128B
+ - name: <=512B
+ - name: <=1KB
+ - name: <=2KB
+ - name: <=4KB
+ - name: <=16KB
+ - name: <=100KB
+ - name: <=1MB
+ - name: '>1MB'
+ - name: pulsar.subscription_delayed
+ description: Subscriptions Delayed for Dispatching
+ unit: message batches
+ chart_type: line
+ dimensions:
+ - name: delayed
+ - name: pulsar.subscription_msg_rate_redeliver
+ description: Subscriptions Redelivered Message Rate
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: redelivered
+ - name: pulsar.subscription_blocked_on_unacked_messages
+ description: Subscriptions Blocked On Unacked Messages
+ unit: subscriptions
+ chart_type: line
+ dimensions:
+ - name: blocked
+ - name: pulsar.replication_rate
+ description: Replication Rate
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: out
+ - name: pulsar.replication_throughput_rate
+ description: Replication Throughput Rate
+ unit: KiB/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: out
+ - name: pulsar.replication_backlog
+ description: Replication Backlog
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: backlog
+ - name: namespace
+ description: TBD
+ labels: []
+ metrics:
+ - name: pulsar.namespace_broker_components
+ description: Broker Components
+ unit: components
+ chart_type: line
+ dimensions:
+ - name: topics
+ - name: subscriptions
+ - name: producers
+ - name: consumers
+ - name: pulsar.namespace_messages_rate
+ description: Messages Rate
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: publish
+ - name: dispatch
+ - name: pulsar.namespace_throughput_rate
+ description: Throughput Rate
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: publish
+ - name: dispatch
+ - name: pulsar.namespace_storage_size
+ description: Storage Size
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: pulsar.namespace_storage_operations_rate
+ description: Storage Read/Write Operations Rate
+ unit: message batches/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: pulsar.namespace_msg_backlog
+ description: Messages Backlog Size
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: backlog
+ - name: pulsar.namespace_storage_write_latency
+ description: Storage Write Latency
+ unit: entries/s
+ chart_type: stacked
+ dimensions:
+ - name: <=0.5ms
+ - name: <=1ms
+ - name: <=5ms
+ - name: =10ms
+ - name: <=20ms
+ - name: <=50ms
+ - name: <=100ms
+ - name: <=200ms
+ - name: <=1s
+ - name: '>1s'
+ - name: pulsar.namespace_entry_size
+ description: Entry Size
+ unit: entries/s
+ chart_type: stacked
+ dimensions:
+ - name: <=128B
+ - name: <=512B
+ - name: <=1KB
+ - name: <=2KB
+ - name: <=4KB
+ - name: <=16KB
+ - name: <=100KB
+ - name: <=1MB
+ - name: '>1MB'
+ - name: pulsar.namespace_subscription_delayed
+ description: Subscriptions Delayed for Dispatching
+ unit: message batches
+ chart_type: line
+ dimensions:
+ - name: delayed
+ - name: pulsar.namespace_subscription_msg_rate_redeliver
+ description: Subscriptions Redelivered Message Rate
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: redelivered
+ - name: pulsar.namespace_subscription_blocked_on_unacked_messages
+ description: Subscriptions Blocked On Unacked Messages
+ unit: subscriptions
+ chart_type: line
+ dimensions:
+ - name: blocked
+ - name: pulsar.namespace_replication_rate
+ description: Replication Rate
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: out
+ - name: pulsar.namespace_replication_throughput_rate
+ description: Replication Throughput Rate
+ unit: KiB/s
+ chart_type: line
+ dimensions:
+ - name: in
+ - name: out
+ - name: pulsar.namespace_replication_backlog
+ description: Replication Backlog
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: backlog
+ - name: pulsar.topic_producers
+ description: Topic Producers
+ unit: producers
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_subscriptions
+ description: Topic Subscriptions
+ unit: subscriptions
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_consumers
+ description: Topic Consumers
+ unit: consumers
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_messages_rate_in
+ description: Topic Publish Messages Rate
+ unit: publishes/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_messages_rate_out
+ description: Topic Dispatch Messages Rate
+ unit: dispatches/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_throughput_rate_in
+ description: Topic Publish Throughput Rate
+ unit: KiB/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_throughput_rate_out
+ description: Topic Dispatch Throughput Rate
+ unit: KiB/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_storage_size
+ description: Topic Storage Size
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_storage_read_rate
+ description: Topic Storage Read Rate
+ unit: message batches/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_storage_write_rate
+ description: Topic Storage Write Rate
+ unit: message batches/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_msg_backlog
+ description: Topic Messages Backlog Size
+ unit: messages
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_subscription_delayed
+ description: Topic Subscriptions Delayed for Dispatching
+ unit: message batches
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_subscription_msg_rate_redeliver
+ description: Topic Subscriptions Redelivered Message Rate
+ unit: messages/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_subscription_blocked_on_unacked_messages
+ description: Topic Subscriptions Blocked On Unacked Messages
+ unit: blocked subscriptions
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_replication_rate_in
+ description: Topic Replication Rate From Remote Cluster
+ unit: messages/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_replication_rate_out
+ description: Topic Replication Rate To Remote Cluster
+ unit: messages/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_replication_throughput_rate_in
+ description: Topic Replication Throughput Rate From Remote Cluster
+ unit: messages/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_replication_throughput_rate_out
+ description: Topic Replication Throughput Rate To Remote Cluster
+ unit: messages/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
+ - name: pulsar.topic_replication_backlog
+ description: Topic Replication Backlog
+ unit: messages
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per topic
diff --git a/src/go/plugin/go.d/modules/pulsar/metrics.go b/src/go/plugin/go.d/modules/pulsar/metrics.go
new file mode 100644
index 000000000..9e38e5b9a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/metrics.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pulsar
+
+/*
+Architecture:
+ - https://pulsar.apache.org/docs/en/concepts-overview/
+
+Terminology:
+ - https://pulsar.apache.org/docs/en/reference-terminology/
+
+Deploy Monitoring:
+ - http://pulsar.apache.org/docs/en/deploy-monitoring/
+
+Metrics Reference:
+ - https://github.com/apache/pulsar/blob/master/site2/docs/reference-metrics.md
+
+REST API
+ - http://pulsar.apache.org/admin-rest-api/?version=master
+
+Grafana Dashboards:
+ - https://github.com/apache/pulsar/tree/master/docker/grafana/dashboards
+
+Stats in the source code:
+ - https://github.com/apache/pulsar/blob/master/pulsar-common/src/main/java/org/apache/pulsar/common/policies/data/
+ - https://github.com/apache/pulsar/tree/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus
+
+If !'exposeTopicLevelMetricsInPrometheus:
+ - https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/NamespaceStatsAggregator.java
+else:
+ - https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/TopicStats.java
+
+Metrics updates parameters:
+ - statsUpdateFrequencyInSecs=60
+ - statsUpdateInitialDelayInSecs=60
+
+Metrics Exposing:
+ - Namespace : 'exposeTopicLevelMetricsInPrometheus' is set to false.
+ - Replication : 'replicationMetricsEnabled' is enabled.
+ - Topic : 'exposeTopicLevelMetricsInPrometheus' is set to true.
+ - Subscription: 'exposeTopicLevelMetricsInPrometheus' is set to true
+ - Consumer : 'exposeTopicLevelMetricsInPrometheus' and 'exposeConsumerLevelMetricsInPrometheus' are set to true.
+ - Publisher : 'exposePublisherStats' is set to true. RESP API option. (/admin/v2/broker-stats/topics)
+*/
+
+/*
+TODO:
+Unused broker metrics:
+ - "pulsar_storage_backlog_size" : ?? is estimated total unconsumed or backlog size in bytes for the managed ledger, without accounting for replicas.
+ - "pulsar_storage_offloaded_size" : ?? is the size of all ledgers offloaded to 2nd tier storage.
+ - "pulsar_storage_backlog_quota_limit" : ?? is the total amount of the data in this topic that limit the backlog quota.
+ - "pulsar_in_bytes_total" : use "pulsar_throughput_in" for the same data.
+ - "pulsar_in_messages_total" : use "pulsar_rate_in" for the same data.
+ - "pulsar_subscription_unacked_messages" : negative values (https://github.com/apache/pulsar/issues/6510)
+ - "pulsar_subscription_back_log" : to detailed, we have summary per topic. Part of "pulsar_msg_backlog" (msgBacklog).
+ - "pulsar_subscription_msg_rate_out" : to detailed, we have summary per topic. Part of "pulsar_rate_out".
+ - "pulsar_subscription_msg_throughput_out": to detailed, we have summary per topic. Part of "pulsar_throughput_out".
+
+ + All Consumer metrics (for each namespace, topic, subscription).
+ + JVM metrics.
+ + Zookeeper metrics.
+ + Bookkeeper metrics.
+
+Hardcoded update interval? (60)
+ - pulsar_storage_write_latency_le_*
+ - pulsar_entry_size_le_*
+*/
+
+/*
+https://github.com/apache/pulsar/blob/master/pulsar-broker/src/main/java/org/apache/pulsar/broker/stats/prometheus/NamespaceStatsAggregator.java
+Zero metrics which always present (labels: cluster):
+ - "pulsar_topics_count"
+ - "pulsar_subscriptions_count"
+ - "pulsar_producers_count"
+ - "pulsar_consumers_count"
+ - "pulsar_rate_in"
+ - "pulsar_rate_out"
+ - "pulsar_throughput_in"
+ - "pulsar_throughput_out"
+ - "pulsar_storage_size"
+ - "pulsar_storage_write_rate"
+ - "pulsar_storage_read_rate"
+ - "pulsar_msg_backlog"
+*/
+
+const (
+ // Namespace metrics (labels: namespace)
+ metricPulsarTopicsCount = "pulsar_topics_count"
+ // Namespace, Topic metrics (labels: namespace || namespace, topic)
+ metricPulsarSubscriptionsCount = "pulsar_subscriptions_count"
+ metricPulsarProducersCount = "pulsar_producers_count"
+ metricPulsarConsumersCount = "pulsar_consumers_count"
+ metricPulsarRateIn = "pulsar_rate_in"
+ metricPulsarRateOut = "pulsar_rate_out"
+ metricPulsarThroughputIn = "pulsar_throughput_in"
+ metricPulsarThroughputOut = "pulsar_throughput_out"
+ metricPulsarStorageSize = "pulsar_storage_size"
+ metricPulsarStorageWriteRate = "pulsar_storage_write_rate" // exposed with labels only if there is Bookie
+ metricPulsarStorageReadRate = "pulsar_storage_read_rate" // exposed with labels only if there is Bookie
+ metricPulsarMsgBacklog = "pulsar_msg_backlog" // has 'remote_cluster' label if no topic stats
+ // pulsar_storage_write_latency_le_*
+ // pulsar_entry_size_le_*
+
+ // Subscriptions metrics (labels: namespace, topic, subscription)
+ metricPulsarSubscriptionDelayed = "pulsar_subscription_delayed" // Number of delayed messages currently being tracked
+ metricPulsarSubscriptionMsgRateRedeliver = "pulsar_subscription_msg_rate_redeliver"
+ metricPulsarSubscriptionBlockedOnUnackedMessages = "pulsar_subscription_blocked_on_unacked_messages"
+
+ // Replication metrics (labels: namespace, remote_cluster || namespace, topic, remote_cluster)
+ // Exposed only when replication is enabled.
+ metricPulsarReplicationRateIn = "pulsar_replication_rate_in"
+ metricPulsarReplicationRateOut = "pulsar_replication_rate_out"
+ metricPulsarReplicationThroughputIn = "pulsar_replication_throughput_in"
+ metricPulsarReplicationThroughputOut = "pulsar_replication_throughput_out"
+ metricPulsarReplicationBacklog = "pulsar_replication_backlog"
+)
diff --git a/src/go/plugin/go.d/modules/pulsar/pulsar.go b/src/go/plugin/go.d/modules/pulsar/pulsar.go
new file mode 100644
index 000000000..aa5ac35fc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/pulsar.go
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pulsar
+
+import (
+ _ "embed"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("pulsar", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 60,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Pulsar {
+ return &Pulsar{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8080/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 5),
+ },
+ },
+ TopicFilter: matcher.SimpleExpr{
+ Includes: nil,
+ Excludes: []string{"*"},
+ },
+ },
+ once: &sync.Once{},
+ charts: summaryCharts.Copy(),
+ nsCharts: namespaceCharts.Copy(),
+ topicChartsMapping: topicChartsMapping(),
+ cache: newCache(),
+ curCache: newCache(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ TopicFilter matcher.SimpleExpr `yaml:"topic_filter,omitempty" json:"topic_filter"`
+}
+
+type Pulsar struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+ nsCharts *Charts
+
+ prom prometheus.Prometheus
+
+ topicFilter matcher.Matcher
+ cache *cache
+ curCache *cache
+ once *sync.Once
+ topicChartsMapping map[string]string
+}
+
+func (p *Pulsar) Configuration() any {
+ return p.Config
+}
+
+func (p *Pulsar) Init() error {
+ if err := p.validateConfig(); err != nil {
+ p.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prom, err := p.initPrometheusClient()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ p.prom = prom
+
+ m, err := p.initTopicFilerMatcher()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ p.topicFilter = m
+
+ return nil
+}
+
+func (p *Pulsar) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (p *Pulsar) Charts() *Charts {
+ return p.charts
+}
+
+func (p *Pulsar) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (p *Pulsar) Cleanup() {
+ if p.prom != nil && p.prom.HTTPClient() != nil {
+ p.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pulsar/pulsar_test.go b/src/go/plugin/go.d/modules/pulsar/pulsar_test.go
new file mode 100644
index 000000000..330656156
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/pulsar_test.go
@@ -0,0 +1,1024 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pulsar
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNonPulsarMetrics, _ = os.ReadFile("testdata/non-pulsar.txt")
+ dataVer250Namespaces, _ = os.ReadFile("testdata/standalone-v2.5.0-namespaces.txt")
+ dataVer250Topics, _ = os.ReadFile("testdata/standalone-v2.5.0-topics.txt")
+ dataVer250Topics2, _ = os.ReadFile("testdata/standalone-v2.5.0-topics-2.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNonPulsarMetrics": dataNonPulsarMetrics,
+ "dataVer250Namespaces": dataVer250Namespaces,
+ "dataVer250Topics": dataVer250Topics,
+ "dataVer250Topics2": dataVer250Topics2,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPulsar_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Pulsar{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPulsar_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "default": {
+ config: New().Config,
+ },
+ "empty topic filter": {
+ config: Config{HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:8080/metric"}}},
+ },
+ "bad syntax topic filer": {
+ config: Config{
+ HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:8080/metrics"}},
+ TopicFilter: matcher.SimpleExpr{Includes: []string{"+"}}},
+ wantFail: true,
+ },
+ "empty URL": {
+ config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}},
+ wantFail: true,
+ },
+ "nonexistent TLS CA": {
+ config: Config{HTTP: web.HTTP{
+ Request: web.Request{URL: "http://127.0.0.1:8080/metric"},
+ Client: web.Client{TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}}}},
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pulsar := New()
+ pulsar.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, pulsar.Init())
+ } else {
+ assert.NoError(t, pulsar.Init())
+ }
+ })
+ }
+}
+
+func TestPulsar_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestPulsar_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (*Pulsar, *httptest.Server)
+ wantFail bool
+ }{
+ "standalone v2.5.0 namespaces": {prepare: prepareClientServerStdV250Namespaces},
+ "standalone v2.5.0 topics": {prepare: prepareClientServerStdV250Topics},
+ "non pulsar": {prepare: prepareClientServerNonPulsar, wantFail: true},
+ "invalid data": {prepare: prepareClientServerInvalidData, wantFail: true},
+ "404": {prepare: prepareClientServer404, wantFail: true},
+ "connection refused": {prepare: prepareClientServerConnectionRefused, wantFail: true},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pulsar, srv := test.prepare(t)
+ defer srv.Close()
+
+ if test.wantFail {
+ assert.Error(t, pulsar.Check())
+ } else {
+ assert.NoError(t, pulsar.Check())
+ }
+ })
+ }
+}
+
+func TestPulsar_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+
+}
+
+func TestPulsar_Collect_ReturnsNilOnErrors(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (*Pulsar, *httptest.Server)
+ }{
+ "non pulsar": {prepare: prepareClientServerNonPulsar},
+ "invalid data": {prepare: prepareClientServerInvalidData},
+ "404": {prepare: prepareClientServer404},
+ "connection refused": {prepare: prepareClientServerConnectionRefused},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pulsar, srv := test.prepare(t)
+ defer srv.Close()
+
+ assert.Nil(t, pulsar.Collect())
+ })
+ }
+}
+
+func TestPulsar_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(*testing.T) (*Pulsar, *httptest.Server)
+ expected map[string]int64
+ }{
+ "standalone v2.5.0 namespaces": {
+ prepare: prepareClientServerStdV250Namespaces,
+ expected: expectedStandaloneV250Namespaces,
+ },
+ "standalone v2.5.0 topics": {
+ prepare: prepareClientServerStdV250Topics,
+ expected: expectedStandaloneV250Topics,
+ },
+ "standalone v2.5.0 topics filtered": {
+ prepare: prepareClientServerStdV250TopicsFiltered,
+ expected: expectedStandaloneV250TopicsFiltered,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pulsar, srv := test.prepare(t)
+ defer srv.Close()
+
+ for i := 0; i < 10; i++ {
+ _ = pulsar.Collect()
+ }
+ collected := pulsar.Collect()
+
+ require.NotNil(t, collected)
+ require.Equal(t, test.expected, collected)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, pulsar, collected)
+ })
+ }
+}
+
+func TestPulsar_Collect_RemoveAddNamespacesTopicsInRuntime(t *testing.T) {
+ pulsar, srv := prepareClientServersDynamicStdV250Topics(t)
+ defer srv.Close()
+
+ oldNsCharts := Charts{}
+
+ require.NotNil(t, pulsar.Collect())
+ oldLength := len(*pulsar.Charts())
+
+ for _, chart := range *pulsar.Charts() {
+ for ns := range pulsar.cache.namespaces {
+ if ns.name != "public/functions" && chart.Fam == "ns "+ns.name {
+ _ = oldNsCharts.Add(chart)
+ }
+ }
+ }
+
+ require.NotNil(t, pulsar.Collect())
+
+ l := oldLength + len(*pulsar.nsCharts)*2 // 2 new namespaces
+ assert.Truef(t, len(*pulsar.Charts()) == l, "expected %d charts, but got %d", l, len(*pulsar.Charts()))
+
+ for _, chart := range oldNsCharts {
+ assert.Truef(t, chart.Obsolete, "expected chart '%s' Obsolete flag is set", chart.ID)
+ for _, dim := range chart.Dims {
+ if strings.HasPrefix(chart.ID, "topic_") {
+ assert.Truef(t, dim.Obsolete, "expected chart '%s' dim '%s' Obsolete flag is set", chart.ID, dim.ID)
+ }
+ }
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, pulsar *Pulsar, collected map[string]int64) {
+ for _, chart := range *pulsar.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareClientServerStdV250Namespaces(t *testing.T) (*Pulsar, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer250Namespaces)
+ }))
+
+ pulsar := New()
+ pulsar.URL = srv.URL
+ require.NoError(t, pulsar.Init())
+
+ return pulsar, srv
+}
+
+func prepareClientServerStdV250Topics(t *testing.T) (*Pulsar, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer250Topics)
+ }))
+
+ pulsar := New()
+ pulsar.URL = srv.URL
+ require.NoError(t, pulsar.Init())
+
+ return pulsar, srv
+}
+
+func prepareClientServerStdV250TopicsFiltered(t *testing.T) (*Pulsar, *httptest.Server) {
+ t.Helper()
+ pulsar, srv := prepareClientServerStdV250Topics(t)
+ pulsar.topicFilter = matcher.FALSE()
+
+ return pulsar, srv
+}
+
+func prepareClientServersDynamicStdV250Topics(t *testing.T) (*Pulsar, *httptest.Server) {
+ t.Helper()
+ var i int
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ if i%2 == 0 {
+ _, _ = w.Write(dataVer250Topics)
+ } else {
+ _, _ = w.Write(dataVer250Topics2)
+ }
+ i++
+ }))
+
+ pulsar := New()
+ pulsar.URL = srv.URL
+ require.NoError(t, pulsar.Init())
+
+ return pulsar, srv
+}
+
+func prepareClientServerNonPulsar(t *testing.T) (*Pulsar, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataNonPulsarMetrics)
+ }))
+
+ pulsar := New()
+ pulsar.URL = srv.URL
+ require.NoError(t, pulsar.Init())
+
+ return pulsar, srv
+}
+
+func prepareClientServerInvalidData(t *testing.T) (*Pulsar, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ pulsar := New()
+ pulsar.URL = srv.URL
+ require.NoError(t, pulsar.Init())
+
+ return pulsar, srv
+}
+
+func prepareClientServer404(t *testing.T) (*Pulsar, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+
+ pulsar := New()
+ pulsar.URL = srv.URL
+ require.NoError(t, pulsar.Init())
+
+ return pulsar, srv
+}
+
+func prepareClientServerConnectionRefused(t *testing.T) (*Pulsar, *httptest.Server) {
+ t.Helper()
+ srv := httptest.NewServer(nil)
+
+ pulsar := New()
+ pulsar.URL = "http://127.0.0.1:38001/metrics"
+ require.NoError(t, pulsar.Init())
+
+ return pulsar, srv
+}
+
+var expectedStandaloneV250Namespaces = map[string]int64{
+ "pulsar_consumers_count": 21,
+ "pulsar_consumers_count_public/functions": 3,
+ "pulsar_consumers_count_sample/dev": 10,
+ "pulsar_consumers_count_sample/prod": 8,
+ "pulsar_entry_size_count": 6013,
+ "pulsar_entry_size_count_public/functions": 0,
+ "pulsar_entry_size_count_sample/dev": 3012,
+ "pulsar_entry_size_count_sample/prod": 3001,
+ "pulsar_entry_size_le_100_kb": 0,
+ "pulsar_entry_size_le_100_kb_public/functions": 0,
+ "pulsar_entry_size_le_100_kb_sample/dev": 0,
+ "pulsar_entry_size_le_100_kb_sample/prod": 0,
+ "pulsar_entry_size_le_128": 6013,
+ "pulsar_entry_size_le_128_public/functions": 0,
+ "pulsar_entry_size_le_128_sample/dev": 3012,
+ "pulsar_entry_size_le_128_sample/prod": 3001,
+ "pulsar_entry_size_le_16_kb": 0,
+ "pulsar_entry_size_le_16_kb_public/functions": 0,
+ "pulsar_entry_size_le_16_kb_sample/dev": 0,
+ "pulsar_entry_size_le_16_kb_sample/prod": 0,
+ "pulsar_entry_size_le_1_kb": 0,
+ "pulsar_entry_size_le_1_kb_public/functions": 0,
+ "pulsar_entry_size_le_1_kb_sample/dev": 0,
+ "pulsar_entry_size_le_1_kb_sample/prod": 0,
+ "pulsar_entry_size_le_1_mb": 0,
+ "pulsar_entry_size_le_1_mb_public/functions": 0,
+ "pulsar_entry_size_le_1_mb_sample/dev": 0,
+ "pulsar_entry_size_le_1_mb_sample/prod": 0,
+ "pulsar_entry_size_le_2_kb": 0,
+ "pulsar_entry_size_le_2_kb_public/functions": 0,
+ "pulsar_entry_size_le_2_kb_sample/dev": 0,
+ "pulsar_entry_size_le_2_kb_sample/prod": 0,
+ "pulsar_entry_size_le_4_kb": 0,
+ "pulsar_entry_size_le_4_kb_public/functions": 0,
+ "pulsar_entry_size_le_4_kb_sample/dev": 0,
+ "pulsar_entry_size_le_4_kb_sample/prod": 0,
+ "pulsar_entry_size_le_512": 0,
+ "pulsar_entry_size_le_512_public/functions": 0,
+ "pulsar_entry_size_le_512_sample/dev": 0,
+ "pulsar_entry_size_le_512_sample/prod": 0,
+ "pulsar_entry_size_le_overflow": 0,
+ "pulsar_entry_size_le_overflow_public/functions": 0,
+ "pulsar_entry_size_le_overflow_sample/dev": 0,
+ "pulsar_entry_size_le_overflow_sample/prod": 0,
+ "pulsar_entry_size_sum": 6013,
+ "pulsar_entry_size_sum_public/functions": 0,
+ "pulsar_entry_size_sum_sample/dev": 3012,
+ "pulsar_entry_size_sum_sample/prod": 3001,
+ "pulsar_msg_backlog": 8,
+ "pulsar_msg_backlog_public/functions": 0,
+ "pulsar_msg_backlog_sample/dev": 8,
+ "pulsar_msg_backlog_sample/prod": 0,
+ "pulsar_namespaces_count": 3,
+ "pulsar_producers_count": 10,
+ "pulsar_producers_count_public/functions": 2,
+ "pulsar_producers_count_sample/dev": 4,
+ "pulsar_producers_count_sample/prod": 4,
+ "pulsar_rate_in": 96023,
+ "pulsar_rate_in_public/functions": 0,
+ "pulsar_rate_in_sample/dev": 48004,
+ "pulsar_rate_in_sample/prod": 48019,
+ "pulsar_rate_out": 242057,
+ "pulsar_rate_out_public/functions": 0,
+ "pulsar_rate_out_sample/dev": 146018,
+ "pulsar_rate_out_sample/prod": 96039,
+ "pulsar_storage_read_rate": 0,
+ "pulsar_storage_read_rate_public/functions": 0,
+ "pulsar_storage_read_rate_sample/dev": 0,
+ "pulsar_storage_read_rate_sample/prod": 0,
+ "pulsar_storage_size": 5468424,
+ "pulsar_storage_size_public/functions": 0,
+ "pulsar_storage_size_sample/dev": 2684208,
+ "pulsar_storage_size_sample/prod": 2784216,
+ "pulsar_storage_write_latency_count": 6012,
+ "pulsar_storage_write_latency_count_public/functions": 0,
+ "pulsar_storage_write_latency_count_sample/dev": 3012,
+ "pulsar_storage_write_latency_count_sample/prod": 3000,
+ "pulsar_storage_write_latency_le_0_5": 0,
+ "pulsar_storage_write_latency_le_0_5_public/functions": 0,
+ "pulsar_storage_write_latency_le_0_5_sample/dev": 0,
+ "pulsar_storage_write_latency_le_0_5_sample/prod": 0,
+ "pulsar_storage_write_latency_le_1": 43,
+ "pulsar_storage_write_latency_le_10": 163,
+ "pulsar_storage_write_latency_le_100": 0,
+ "pulsar_storage_write_latency_le_1000": 0,
+ "pulsar_storage_write_latency_le_1000_public/functions": 0,
+ "pulsar_storage_write_latency_le_1000_sample/dev": 0,
+ "pulsar_storage_write_latency_le_1000_sample/prod": 0,
+ "pulsar_storage_write_latency_le_100_public/functions": 0,
+ "pulsar_storage_write_latency_le_100_sample/dev": 0,
+ "pulsar_storage_write_latency_le_100_sample/prod": 0,
+ "pulsar_storage_write_latency_le_10_public/functions": 0,
+ "pulsar_storage_write_latency_le_10_sample/dev": 82,
+ "pulsar_storage_write_latency_le_10_sample/prod": 81,
+ "pulsar_storage_write_latency_le_1_public/functions": 0,
+ "pulsar_storage_write_latency_le_1_sample/dev": 23,
+ "pulsar_storage_write_latency_le_1_sample/prod": 20,
+ "pulsar_storage_write_latency_le_20": 7,
+ "pulsar_storage_write_latency_le_200": 2,
+ "pulsar_storage_write_latency_le_200_public/functions": 0,
+ "pulsar_storage_write_latency_le_200_sample/dev": 1,
+ "pulsar_storage_write_latency_le_200_sample/prod": 1,
+ "pulsar_storage_write_latency_le_20_public/functions": 0,
+ "pulsar_storage_write_latency_le_20_sample/dev": 6,
+ "pulsar_storage_write_latency_le_20_sample/prod": 1,
+ "pulsar_storage_write_latency_le_5": 5797,
+ "pulsar_storage_write_latency_le_50": 0,
+ "pulsar_storage_write_latency_le_50_public/functions": 0,
+ "pulsar_storage_write_latency_le_50_sample/dev": 0,
+ "pulsar_storage_write_latency_le_50_sample/prod": 0,
+ "pulsar_storage_write_latency_le_5_public/functions": 0,
+ "pulsar_storage_write_latency_le_5_sample/dev": 2900,
+ "pulsar_storage_write_latency_le_5_sample/prod": 2897,
+ "pulsar_storage_write_latency_overflow": 0,
+ "pulsar_storage_write_latency_overflow_public/functions": 0,
+ "pulsar_storage_write_latency_overflow_sample/dev": 0,
+ "pulsar_storage_write_latency_overflow_sample/prod": 0,
+ "pulsar_storage_write_latency_sum": 6012,
+ "pulsar_storage_write_latency_sum_public/functions": 0,
+ "pulsar_storage_write_latency_sum_sample/dev": 3012,
+ "pulsar_storage_write_latency_sum_sample/prod": 3000,
+ "pulsar_storage_write_rate": 100216,
+ "pulsar_storage_write_rate_public/functions": 0,
+ "pulsar_storage_write_rate_sample/dev": 50200,
+ "pulsar_storage_write_rate_sample/prod": 50016,
+ "pulsar_subscription_delayed": 0,
+ "pulsar_subscription_delayed_public/functions": 0,
+ "pulsar_subscription_delayed_sample/dev": 0,
+ "pulsar_subscription_delayed_sample/prod": 0,
+ "pulsar_subscriptions_count": 13,
+ "pulsar_subscriptions_count_public/functions": 3,
+ "pulsar_subscriptions_count_sample/dev": 6,
+ "pulsar_subscriptions_count_sample/prod": 4,
+ "pulsar_throughput_in": 5569401,
+ "pulsar_throughput_in_public/functions": 0,
+ "pulsar_throughput_in_sample/dev": 2736243,
+ "pulsar_throughput_in_sample/prod": 2833158,
+ "pulsar_throughput_out": 13989373,
+ "pulsar_throughput_out_public/functions": 0,
+ "pulsar_throughput_out_sample/dev": 8323043,
+ "pulsar_throughput_out_sample/prod": 5666330,
+ "pulsar_topics_count": 7,
+ "pulsar_topics_count_public/functions": 3,
+ "pulsar_topics_count_sample/dev": 2,
+ "pulsar_topics_count_sample/prod": 2,
+}
+
+var expectedStandaloneV250Topics = map[string]int64{
+ "pulsar_consumers_count": 21,
+ "pulsar_consumers_count_persistent://public/functions/assignments": 1,
+ "pulsar_consumers_count_persistent://public/functions/coordinate": 1,
+ "pulsar_consumers_count_persistent://public/functions/metadata": 1,
+ "pulsar_consumers_count_persistent://sample/dev/dev-1": 4,
+ "pulsar_consumers_count_persistent://sample/dev/dev-2": 6,
+ "pulsar_consumers_count_persistent://sample/prod/prod-1": 4,
+ "pulsar_consumers_count_persistent://sample/prod/prod-2": 4,
+ "pulsar_consumers_count_public/functions": 3,
+ "pulsar_consumers_count_sample/dev": 10,
+ "pulsar_consumers_count_sample/prod": 8,
+ "pulsar_entry_size_count": 5867,
+ "pulsar_entry_size_count_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_count_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_count_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_count_persistent://sample/dev/dev-1": 1448,
+ "pulsar_entry_size_count_persistent://sample/dev/dev-2": 1477,
+ "pulsar_entry_size_count_persistent://sample/prod/prod-1": 1469,
+ "pulsar_entry_size_count_persistent://sample/prod/prod-2": 1473,
+ "pulsar_entry_size_count_public/functions": 0,
+ "pulsar_entry_size_count_sample/dev": 2925,
+ "pulsar_entry_size_count_sample/prod": 2942,
+ "pulsar_entry_size_le_100_kb": 0,
+ "pulsar_entry_size_le_100_kb_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_le_100_kb_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_le_100_kb_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_le_100_kb_persistent://sample/dev/dev-1": 0,
+ "pulsar_entry_size_le_100_kb_persistent://sample/dev/dev-2": 0,
+ "pulsar_entry_size_le_100_kb_persistent://sample/prod/prod-1": 0,
+ "pulsar_entry_size_le_100_kb_persistent://sample/prod/prod-2": 0,
+ "pulsar_entry_size_le_100_kb_public/functions": 0,
+ "pulsar_entry_size_le_100_kb_sample/dev": 0,
+ "pulsar_entry_size_le_100_kb_sample/prod": 0,
+ "pulsar_entry_size_le_128": 5867,
+ "pulsar_entry_size_le_128_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_le_128_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_le_128_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_le_128_persistent://sample/dev/dev-1": 1448,
+ "pulsar_entry_size_le_128_persistent://sample/dev/dev-2": 1477,
+ "pulsar_entry_size_le_128_persistent://sample/prod/prod-1": 1469,
+ "pulsar_entry_size_le_128_persistent://sample/prod/prod-2": 1473,
+ "pulsar_entry_size_le_128_public/functions": 0,
+ "pulsar_entry_size_le_128_sample/dev": 2925,
+ "pulsar_entry_size_le_128_sample/prod": 2942,
+ "pulsar_entry_size_le_16_kb": 0,
+ "pulsar_entry_size_le_16_kb_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_le_16_kb_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_le_16_kb_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_le_16_kb_persistent://sample/dev/dev-1": 0,
+ "pulsar_entry_size_le_16_kb_persistent://sample/dev/dev-2": 0,
+ "pulsar_entry_size_le_16_kb_persistent://sample/prod/prod-1": 0,
+ "pulsar_entry_size_le_16_kb_persistent://sample/prod/prod-2": 0,
+ "pulsar_entry_size_le_16_kb_public/functions": 0,
+ "pulsar_entry_size_le_16_kb_sample/dev": 0,
+ "pulsar_entry_size_le_16_kb_sample/prod": 0,
+ "pulsar_entry_size_le_1_kb": 0,
+ "pulsar_entry_size_le_1_kb_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_le_1_kb_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_le_1_kb_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_le_1_kb_persistent://sample/dev/dev-1": 0,
+ "pulsar_entry_size_le_1_kb_persistent://sample/dev/dev-2": 0,
+ "pulsar_entry_size_le_1_kb_persistent://sample/prod/prod-1": 0,
+ "pulsar_entry_size_le_1_kb_persistent://sample/prod/prod-2": 0,
+ "pulsar_entry_size_le_1_kb_public/functions": 0,
+ "pulsar_entry_size_le_1_kb_sample/dev": 0,
+ "pulsar_entry_size_le_1_kb_sample/prod": 0,
+ "pulsar_entry_size_le_1_mb": 0,
+ "pulsar_entry_size_le_1_mb_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_le_1_mb_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_le_1_mb_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_le_1_mb_persistent://sample/dev/dev-1": 0,
+ "pulsar_entry_size_le_1_mb_persistent://sample/dev/dev-2": 0,
+ "pulsar_entry_size_le_1_mb_persistent://sample/prod/prod-1": 0,
+ "pulsar_entry_size_le_1_mb_persistent://sample/prod/prod-2": 0,
+ "pulsar_entry_size_le_1_mb_public/functions": 0,
+ "pulsar_entry_size_le_1_mb_sample/dev": 0,
+ "pulsar_entry_size_le_1_mb_sample/prod": 0,
+ "pulsar_entry_size_le_2_kb": 0,
+ "pulsar_entry_size_le_2_kb_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_le_2_kb_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_le_2_kb_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_le_2_kb_persistent://sample/dev/dev-1": 0,
+ "pulsar_entry_size_le_2_kb_persistent://sample/dev/dev-2": 0,
+ "pulsar_entry_size_le_2_kb_persistent://sample/prod/prod-1": 0,
+ "pulsar_entry_size_le_2_kb_persistent://sample/prod/prod-2": 0,
+ "pulsar_entry_size_le_2_kb_public/functions": 0,
+ "pulsar_entry_size_le_2_kb_sample/dev": 0,
+ "pulsar_entry_size_le_2_kb_sample/prod": 0,
+ "pulsar_entry_size_le_4_kb": 0,
+ "pulsar_entry_size_le_4_kb_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_le_4_kb_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_le_4_kb_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_le_4_kb_persistent://sample/dev/dev-1": 0,
+ "pulsar_entry_size_le_4_kb_persistent://sample/dev/dev-2": 0,
+ "pulsar_entry_size_le_4_kb_persistent://sample/prod/prod-1": 0,
+ "pulsar_entry_size_le_4_kb_persistent://sample/prod/prod-2": 0,
+ "pulsar_entry_size_le_4_kb_public/functions": 0,
+ "pulsar_entry_size_le_4_kb_sample/dev": 0,
+ "pulsar_entry_size_le_4_kb_sample/prod": 0,
+ "pulsar_entry_size_le_512": 0,
+ "pulsar_entry_size_le_512_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_le_512_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_le_512_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_le_512_persistent://sample/dev/dev-1": 0,
+ "pulsar_entry_size_le_512_persistent://sample/dev/dev-2": 0,
+ "pulsar_entry_size_le_512_persistent://sample/prod/prod-1": 0,
+ "pulsar_entry_size_le_512_persistent://sample/prod/prod-2": 0,
+ "pulsar_entry_size_le_512_public/functions": 0,
+ "pulsar_entry_size_le_512_sample/dev": 0,
+ "pulsar_entry_size_le_512_sample/prod": 0,
+ "pulsar_entry_size_le_overflow": 0,
+ "pulsar_entry_size_le_overflow_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_le_overflow_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_le_overflow_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_le_overflow_persistent://sample/dev/dev-1": 0,
+ "pulsar_entry_size_le_overflow_persistent://sample/dev/dev-2": 0,
+ "pulsar_entry_size_le_overflow_persistent://sample/prod/prod-1": 0,
+ "pulsar_entry_size_le_overflow_persistent://sample/prod/prod-2": 0,
+ "pulsar_entry_size_le_overflow_public/functions": 0,
+ "pulsar_entry_size_le_overflow_sample/dev": 0,
+ "pulsar_entry_size_le_overflow_sample/prod": 0,
+ "pulsar_entry_size_sum": 5867,
+ "pulsar_entry_size_sum_persistent://public/functions/assignments": 0,
+ "pulsar_entry_size_sum_persistent://public/functions/coordinate": 0,
+ "pulsar_entry_size_sum_persistent://public/functions/metadata": 0,
+ "pulsar_entry_size_sum_persistent://sample/dev/dev-1": 1448,
+ "pulsar_entry_size_sum_persistent://sample/dev/dev-2": 1477,
+ "pulsar_entry_size_sum_persistent://sample/prod/prod-1": 1469,
+ "pulsar_entry_size_sum_persistent://sample/prod/prod-2": 1473,
+ "pulsar_entry_size_sum_public/functions": 0,
+ "pulsar_entry_size_sum_sample/dev": 2925,
+ "pulsar_entry_size_sum_sample/prod": 2942,
+ "pulsar_msg_backlog": 0,
+ "pulsar_msg_backlog_persistent://public/functions/assignments": 0,
+ "pulsar_msg_backlog_persistent://public/functions/coordinate": 0,
+ "pulsar_msg_backlog_persistent://public/functions/metadata": 0,
+ "pulsar_msg_backlog_persistent://sample/dev/dev-1": 0,
+ "pulsar_msg_backlog_persistent://sample/dev/dev-2": 0,
+ "pulsar_msg_backlog_persistent://sample/prod/prod-1": 0,
+ "pulsar_msg_backlog_persistent://sample/prod/prod-2": 0,
+ "pulsar_msg_backlog_public/functions": 0,
+ "pulsar_msg_backlog_sample/dev": 0,
+ "pulsar_msg_backlog_sample/prod": 0,
+ "pulsar_namespaces_count": 3,
+ "pulsar_producers_count": 10,
+ "pulsar_producers_count_persistent://public/functions/assignments": 1,
+ "pulsar_producers_count_persistent://public/functions/coordinate": 0,
+ "pulsar_producers_count_persistent://public/functions/metadata": 1,
+ "pulsar_producers_count_persistent://sample/dev/dev-1": 2,
+ "pulsar_producers_count_persistent://sample/dev/dev-2": 2,
+ "pulsar_producers_count_persistent://sample/prod/prod-1": 2,
+ "pulsar_producers_count_persistent://sample/prod/prod-2": 2,
+ "pulsar_producers_count_public/functions": 2,
+ "pulsar_producers_count_sample/dev": 4,
+ "pulsar_producers_count_sample/prod": 4,
+ "pulsar_rate_in": 102064,
+ "pulsar_rate_in_persistent://public/functions/assignments": 0,
+ "pulsar_rate_in_persistent://public/functions/coordinate": 0,
+ "pulsar_rate_in_persistent://public/functions/metadata": 0,
+ "pulsar_rate_in_persistent://sample/dev/dev-1": 25013,
+ "pulsar_rate_in_persistent://sample/dev/dev-2": 25014,
+ "pulsar_rate_in_persistent://sample/prod/prod-1": 26019,
+ "pulsar_rate_in_persistent://sample/prod/prod-2": 26018,
+ "pulsar_rate_in_public/functions": 0,
+ "pulsar_rate_in_sample/dev": 50027,
+ "pulsar_rate_in_sample/prod": 52037,
+ "pulsar_rate_out": 254162,
+ "pulsar_rate_out_persistent://public/functions/assignments": 0,
+ "pulsar_rate_out_persistent://public/functions/coordinate": 0,
+ "pulsar_rate_out_persistent://public/functions/metadata": 0,
+ "pulsar_rate_out_persistent://sample/dev/dev-1": 50027,
+ "pulsar_rate_out_persistent://sample/dev/dev-2": 100060,
+ "pulsar_rate_out_persistent://sample/prod/prod-1": 52038,
+ "pulsar_rate_out_persistent://sample/prod/prod-2": 52037,
+ "pulsar_rate_out_public/functions": 0,
+ "pulsar_rate_out_sample/dev": 150087,
+ "pulsar_rate_out_sample/prod": 104075,
+ "pulsar_storage_size": 8112300,
+ "pulsar_storage_size_persistent://public/functions/assignments": 0,
+ "pulsar_storage_size_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_size_persistent://public/functions/metadata": 0,
+ "pulsar_storage_size_persistent://sample/dev/dev-1": 1951642,
+ "pulsar_storage_size_persistent://sample/dev/dev-2": 2029478,
+ "pulsar_storage_size_persistent://sample/prod/prod-1": 2022420,
+ "pulsar_storage_size_persistent://sample/prod/prod-2": 2108760,
+ "pulsar_storage_size_public/functions": 0,
+ "pulsar_storage_size_sample/dev": 3981120,
+ "pulsar_storage_size_sample/prod": 4131180,
+ "pulsar_storage_write_latency_count": 5867,
+ "pulsar_storage_write_latency_count_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_count_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_count_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_count_persistent://sample/dev/dev-1": 1448,
+ "pulsar_storage_write_latency_count_persistent://sample/dev/dev-2": 1477,
+ "pulsar_storage_write_latency_count_persistent://sample/prod/prod-1": 1469,
+ "pulsar_storage_write_latency_count_persistent://sample/prod/prod-2": 1473,
+ "pulsar_storage_write_latency_count_public/functions": 0,
+ "pulsar_storage_write_latency_count_sample/dev": 2925,
+ "pulsar_storage_write_latency_count_sample/prod": 2942,
+ "pulsar_storage_write_latency_le_0_5": 0,
+ "pulsar_storage_write_latency_le_0_5_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_le_0_5_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_le_0_5_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_le_0_5_persistent://sample/dev/dev-1": 0,
+ "pulsar_storage_write_latency_le_0_5_persistent://sample/dev/dev-2": 0,
+ "pulsar_storage_write_latency_le_0_5_persistent://sample/prod/prod-1": 0,
+ "pulsar_storage_write_latency_le_0_5_persistent://sample/prod/prod-2": 0,
+ "pulsar_storage_write_latency_le_0_5_public/functions": 0,
+ "pulsar_storage_write_latency_le_0_5_sample/dev": 0,
+ "pulsar_storage_write_latency_le_0_5_sample/prod": 0,
+ "pulsar_storage_write_latency_le_1": 41,
+ "pulsar_storage_write_latency_le_10": 341,
+ "pulsar_storage_write_latency_le_100": 3,
+ "pulsar_storage_write_latency_le_1000": 0,
+ "pulsar_storage_write_latency_le_1000_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_le_1000_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_le_1000_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_le_1000_persistent://sample/dev/dev-1": 0,
+ "pulsar_storage_write_latency_le_1000_persistent://sample/dev/dev-2": 0,
+ "pulsar_storage_write_latency_le_1000_persistent://sample/prod/prod-1": 0,
+ "pulsar_storage_write_latency_le_1000_persistent://sample/prod/prod-2": 0,
+ "pulsar_storage_write_latency_le_1000_public/functions": 0,
+ "pulsar_storage_write_latency_le_1000_sample/dev": 0,
+ "pulsar_storage_write_latency_le_1000_sample/prod": 0,
+ "pulsar_storage_write_latency_le_100_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_le_100_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_le_100_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_le_100_persistent://sample/dev/dev-1": 0,
+ "pulsar_storage_write_latency_le_100_persistent://sample/dev/dev-2": 1,
+ "pulsar_storage_write_latency_le_100_persistent://sample/prod/prod-1": 1,
+ "pulsar_storage_write_latency_le_100_persistent://sample/prod/prod-2": 1,
+ "pulsar_storage_write_latency_le_100_public/functions": 0,
+ "pulsar_storage_write_latency_le_100_sample/dev": 1,
+ "pulsar_storage_write_latency_le_100_sample/prod": 2,
+ "pulsar_storage_write_latency_le_10_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_le_10_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_le_10_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_le_10_persistent://sample/dev/dev-1": 95,
+ "pulsar_storage_write_latency_le_10_persistent://sample/dev/dev-2": 82,
+ "pulsar_storage_write_latency_le_10_persistent://sample/prod/prod-1": 84,
+ "pulsar_storage_write_latency_le_10_persistent://sample/prod/prod-2": 80,
+ "pulsar_storage_write_latency_le_10_public/functions": 0,
+ "pulsar_storage_write_latency_le_10_sample/dev": 177,
+ "pulsar_storage_write_latency_le_10_sample/prod": 164,
+ "pulsar_storage_write_latency_le_1_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_le_1_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_le_1_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_le_1_persistent://sample/dev/dev-1": 10,
+ "pulsar_storage_write_latency_le_1_persistent://sample/dev/dev-2": 15,
+ "pulsar_storage_write_latency_le_1_persistent://sample/prod/prod-1": 7,
+ "pulsar_storage_write_latency_le_1_persistent://sample/prod/prod-2": 9,
+ "pulsar_storage_write_latency_le_1_public/functions": 0,
+ "pulsar_storage_write_latency_le_1_sample/dev": 25,
+ "pulsar_storage_write_latency_le_1_sample/prod": 16,
+ "pulsar_storage_write_latency_le_20": 114,
+ "pulsar_storage_write_latency_le_200": 0,
+ "pulsar_storage_write_latency_le_200_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_le_200_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_le_200_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_le_200_persistent://sample/dev/dev-1": 0,
+ "pulsar_storage_write_latency_le_200_persistent://sample/dev/dev-2": 0,
+ "pulsar_storage_write_latency_le_200_persistent://sample/prod/prod-1": 0,
+ "pulsar_storage_write_latency_le_200_persistent://sample/prod/prod-2": 0,
+ "pulsar_storage_write_latency_le_200_public/functions": 0,
+ "pulsar_storage_write_latency_le_200_sample/dev": 0,
+ "pulsar_storage_write_latency_le_200_sample/prod": 0,
+ "pulsar_storage_write_latency_le_20_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_le_20_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_le_20_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_le_20_persistent://sample/dev/dev-1": 26,
+ "pulsar_storage_write_latency_le_20_persistent://sample/dev/dev-2": 28,
+ "pulsar_storage_write_latency_le_20_persistent://sample/prod/prod-1": 26,
+ "pulsar_storage_write_latency_le_20_persistent://sample/prod/prod-2": 34,
+ "pulsar_storage_write_latency_le_20_public/functions": 0,
+ "pulsar_storage_write_latency_le_20_sample/dev": 54,
+ "pulsar_storage_write_latency_le_20_sample/prod": 60,
+ "pulsar_storage_write_latency_le_5": 5328,
+ "pulsar_storage_write_latency_le_50": 40,
+ "pulsar_storage_write_latency_le_50_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_le_50_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_le_50_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_le_50_persistent://sample/dev/dev-1": 9,
+ "pulsar_storage_write_latency_le_50_persistent://sample/dev/dev-2": 9,
+ "pulsar_storage_write_latency_le_50_persistent://sample/prod/prod-1": 12,
+ "pulsar_storage_write_latency_le_50_persistent://sample/prod/prod-2": 10,
+ "pulsar_storage_write_latency_le_50_public/functions": 0,
+ "pulsar_storage_write_latency_le_50_sample/dev": 18,
+ "pulsar_storage_write_latency_le_50_sample/prod": 22,
+ "pulsar_storage_write_latency_le_5_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_le_5_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_le_5_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_le_5_persistent://sample/dev/dev-1": 1308,
+ "pulsar_storage_write_latency_le_5_persistent://sample/dev/dev-2": 1342,
+ "pulsar_storage_write_latency_le_5_persistent://sample/prod/prod-1": 1339,
+ "pulsar_storage_write_latency_le_5_persistent://sample/prod/prod-2": 1339,
+ "pulsar_storage_write_latency_le_5_public/functions": 0,
+ "pulsar_storage_write_latency_le_5_sample/dev": 2650,
+ "pulsar_storage_write_latency_le_5_sample/prod": 2678,
+ "pulsar_storage_write_latency_overflow": 0,
+ "pulsar_storage_write_latency_overflow_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_overflow_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_overflow_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_overflow_persistent://sample/dev/dev-1": 0,
+ "pulsar_storage_write_latency_overflow_persistent://sample/dev/dev-2": 0,
+ "pulsar_storage_write_latency_overflow_persistent://sample/prod/prod-1": 0,
+ "pulsar_storage_write_latency_overflow_persistent://sample/prod/prod-2": 0,
+ "pulsar_storage_write_latency_overflow_public/functions": 0,
+ "pulsar_storage_write_latency_overflow_sample/dev": 0,
+ "pulsar_storage_write_latency_overflow_sample/prod": 0,
+ "pulsar_storage_write_latency_sum": 5867,
+ "pulsar_storage_write_latency_sum_persistent://public/functions/assignments": 0,
+ "pulsar_storage_write_latency_sum_persistent://public/functions/coordinate": 0,
+ "pulsar_storage_write_latency_sum_persistent://public/functions/metadata": 0,
+ "pulsar_storage_write_latency_sum_persistent://sample/dev/dev-1": 1448,
+ "pulsar_storage_write_latency_sum_persistent://sample/dev/dev-2": 1477,
+ "pulsar_storage_write_latency_sum_persistent://sample/prod/prod-1": 1469,
+ "pulsar_storage_write_latency_sum_persistent://sample/prod/prod-2": 1473,
+ "pulsar_storage_write_latency_sum_public/functions": 0,
+ "pulsar_storage_write_latency_sum_sample/dev": 2925,
+ "pulsar_storage_write_latency_sum_sample/prod": 2942,
+ "pulsar_subscription_blocked_on_unacked_messages": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_persistent://public/functions/assignments": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_persistent://public/functions/coordinate": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_persistent://public/functions/metadata": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_persistent://sample/dev/dev-1": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_persistent://sample/dev/dev-2": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_persistent://sample/prod/prod-1": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_persistent://sample/prod/prod-2": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_public/functions": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_sample/dev": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_sample/prod": 0,
+ "pulsar_subscription_delayed": 0,
+ "pulsar_subscription_delayed_persistent://public/functions/assignments": 0,
+ "pulsar_subscription_delayed_persistent://public/functions/coordinate": 0,
+ "pulsar_subscription_delayed_persistent://public/functions/metadata": 0,
+ "pulsar_subscription_delayed_persistent://sample/dev/dev-1": 0,
+ "pulsar_subscription_delayed_persistent://sample/dev/dev-2": 0,
+ "pulsar_subscription_delayed_persistent://sample/prod/prod-1": 0,
+ "pulsar_subscription_delayed_persistent://sample/prod/prod-2": 0,
+ "pulsar_subscription_delayed_public/functions": 0,
+ "pulsar_subscription_delayed_sample/dev": 0,
+ "pulsar_subscription_delayed_sample/prod": 0,
+ "pulsar_subscription_msg_rate_redeliver": 0,
+ "pulsar_subscription_msg_rate_redeliver_persistent://public/functions/assignments": 0,
+ "pulsar_subscription_msg_rate_redeliver_persistent://public/functions/coordinate": 0,
+ "pulsar_subscription_msg_rate_redeliver_persistent://public/functions/metadata": 0,
+ "pulsar_subscription_msg_rate_redeliver_persistent://sample/dev/dev-1": 0,
+ "pulsar_subscription_msg_rate_redeliver_persistent://sample/dev/dev-2": 0,
+ "pulsar_subscription_msg_rate_redeliver_persistent://sample/prod/prod-1": 0,
+ "pulsar_subscription_msg_rate_redeliver_persistent://sample/prod/prod-2": 0,
+ "pulsar_subscription_msg_rate_redeliver_public/functions": 0,
+ "pulsar_subscription_msg_rate_redeliver_sample/dev": 0,
+ "pulsar_subscription_msg_rate_redeliver_sample/prod": 0,
+ "pulsar_subscriptions_count": 13,
+ "pulsar_subscriptions_count_persistent://public/functions/assignments": 1,
+ "pulsar_subscriptions_count_persistent://public/functions/coordinate": 1,
+ "pulsar_subscriptions_count_persistent://public/functions/metadata": 1,
+ "pulsar_subscriptions_count_persistent://sample/dev/dev-1": 2,
+ "pulsar_subscriptions_count_persistent://sample/dev/dev-2": 4,
+ "pulsar_subscriptions_count_persistent://sample/prod/prod-1": 2,
+ "pulsar_subscriptions_count_persistent://sample/prod/prod-2": 2,
+ "pulsar_subscriptions_count_public/functions": 3,
+ "pulsar_subscriptions_count_sample/dev": 6,
+ "pulsar_subscriptions_count_sample/prod": 4,
+ "pulsar_throughput_in": 6023912,
+ "pulsar_throughput_in_persistent://public/functions/assignments": 0,
+ "pulsar_throughput_in_persistent://public/functions/coordinate": 0,
+ "pulsar_throughput_in_persistent://public/functions/metadata": 0,
+ "pulsar_throughput_in_persistent://sample/dev/dev-1": 1450789,
+ "pulsar_throughput_in_persistent://sample/dev/dev-2": 1450862,
+ "pulsar_throughput_in_persistent://sample/prod/prod-1": 1561151,
+ "pulsar_throughput_in_persistent://sample/prod/prod-2": 1561110,
+ "pulsar_throughput_in_public/functions": 0,
+ "pulsar_throughput_in_sample/dev": 2901651,
+ "pulsar_throughput_in_sample/prod": 3122261,
+ "pulsar_throughput_out": 14949677,
+ "pulsar_throughput_out_persistent://public/functions/assignments": 0,
+ "pulsar_throughput_out_persistent://public/functions/coordinate": 0,
+ "pulsar_throughput_out_persistent://public/functions/metadata": 0,
+ "pulsar_throughput_out_persistent://sample/dev/dev-1": 2901607,
+ "pulsar_throughput_out_persistent://sample/dev/dev-2": 5803500,
+ "pulsar_throughput_out_persistent://sample/prod/prod-1": 3122322,
+ "pulsar_throughput_out_persistent://sample/prod/prod-2": 3122248,
+ "pulsar_throughput_out_public/functions": 0,
+ "pulsar_throughput_out_sample/dev": 8705107,
+ "pulsar_throughput_out_sample/prod": 6244570,
+ "pulsar_topics_count": 14,
+ "pulsar_topics_count_public/functions": 5,
+ "pulsar_topics_count_sample/dev": 2,
+ "pulsar_topics_count_sample/prod": 7,
+}
+
+var expectedStandaloneV250TopicsFiltered = map[string]int64{
+ "pulsar_consumers_count": 21,
+ "pulsar_consumers_count_public/functions": 3,
+ "pulsar_consumers_count_sample/dev": 10,
+ "pulsar_consumers_count_sample/prod": 8,
+ "pulsar_entry_size_count": 5867,
+ "pulsar_entry_size_count_public/functions": 0,
+ "pulsar_entry_size_count_sample/dev": 2925,
+ "pulsar_entry_size_count_sample/prod": 2942,
+ "pulsar_entry_size_le_100_kb": 0,
+ "pulsar_entry_size_le_100_kb_public/functions": 0,
+ "pulsar_entry_size_le_100_kb_sample/dev": 0,
+ "pulsar_entry_size_le_100_kb_sample/prod": 0,
+ "pulsar_entry_size_le_128": 5867,
+ "pulsar_entry_size_le_128_public/functions": 0,
+ "pulsar_entry_size_le_128_sample/dev": 2925,
+ "pulsar_entry_size_le_128_sample/prod": 2942,
+ "pulsar_entry_size_le_16_kb": 0,
+ "pulsar_entry_size_le_16_kb_public/functions": 0,
+ "pulsar_entry_size_le_16_kb_sample/dev": 0,
+ "pulsar_entry_size_le_16_kb_sample/prod": 0,
+ "pulsar_entry_size_le_1_kb": 0,
+ "pulsar_entry_size_le_1_kb_public/functions": 0,
+ "pulsar_entry_size_le_1_kb_sample/dev": 0,
+ "pulsar_entry_size_le_1_kb_sample/prod": 0,
+ "pulsar_entry_size_le_1_mb": 0,
+ "pulsar_entry_size_le_1_mb_public/functions": 0,
+ "pulsar_entry_size_le_1_mb_sample/dev": 0,
+ "pulsar_entry_size_le_1_mb_sample/prod": 0,
+ "pulsar_entry_size_le_2_kb": 0,
+ "pulsar_entry_size_le_2_kb_public/functions": 0,
+ "pulsar_entry_size_le_2_kb_sample/dev": 0,
+ "pulsar_entry_size_le_2_kb_sample/prod": 0,
+ "pulsar_entry_size_le_4_kb": 0,
+ "pulsar_entry_size_le_4_kb_public/functions": 0,
+ "pulsar_entry_size_le_4_kb_sample/dev": 0,
+ "pulsar_entry_size_le_4_kb_sample/prod": 0,
+ "pulsar_entry_size_le_512": 0,
+ "pulsar_entry_size_le_512_public/functions": 0,
+ "pulsar_entry_size_le_512_sample/dev": 0,
+ "pulsar_entry_size_le_512_sample/prod": 0,
+ "pulsar_entry_size_le_overflow": 0,
+ "pulsar_entry_size_le_overflow_public/functions": 0,
+ "pulsar_entry_size_le_overflow_sample/dev": 0,
+ "pulsar_entry_size_le_overflow_sample/prod": 0,
+ "pulsar_entry_size_sum": 5867,
+ "pulsar_entry_size_sum_public/functions": 0,
+ "pulsar_entry_size_sum_sample/dev": 2925,
+ "pulsar_entry_size_sum_sample/prod": 2942,
+ "pulsar_msg_backlog": 0,
+ "pulsar_msg_backlog_public/functions": 0,
+ "pulsar_msg_backlog_sample/dev": 0,
+ "pulsar_msg_backlog_sample/prod": 0,
+ "pulsar_namespaces_count": 3,
+ "pulsar_producers_count": 10,
+ "pulsar_producers_count_public/functions": 2,
+ "pulsar_producers_count_sample/dev": 4,
+ "pulsar_producers_count_sample/prod": 4,
+ "pulsar_rate_in": 102064,
+ "pulsar_rate_in_public/functions": 0,
+ "pulsar_rate_in_sample/dev": 50027,
+ "pulsar_rate_in_sample/prod": 52037,
+ "pulsar_rate_out": 254162,
+ "pulsar_rate_out_public/functions": 0,
+ "pulsar_rate_out_sample/dev": 150087,
+ "pulsar_rate_out_sample/prod": 104075,
+ "pulsar_storage_size": 8112300,
+ "pulsar_storage_size_public/functions": 0,
+ "pulsar_storage_size_sample/dev": 3981120,
+ "pulsar_storage_size_sample/prod": 4131180,
+ "pulsar_storage_write_latency_count": 5867,
+ "pulsar_storage_write_latency_count_public/functions": 0,
+ "pulsar_storage_write_latency_count_sample/dev": 2925,
+ "pulsar_storage_write_latency_count_sample/prod": 2942,
+ "pulsar_storage_write_latency_le_0_5": 0,
+ "pulsar_storage_write_latency_le_0_5_public/functions": 0,
+ "pulsar_storage_write_latency_le_0_5_sample/dev": 0,
+ "pulsar_storage_write_latency_le_0_5_sample/prod": 0,
+ "pulsar_storage_write_latency_le_1": 41,
+ "pulsar_storage_write_latency_le_10": 341,
+ "pulsar_storage_write_latency_le_100": 3,
+ "pulsar_storage_write_latency_le_1000": 0,
+ "pulsar_storage_write_latency_le_1000_public/functions": 0,
+ "pulsar_storage_write_latency_le_1000_sample/dev": 0,
+ "pulsar_storage_write_latency_le_1000_sample/prod": 0,
+ "pulsar_storage_write_latency_le_100_public/functions": 0,
+ "pulsar_storage_write_latency_le_100_sample/dev": 1,
+ "pulsar_storage_write_latency_le_100_sample/prod": 2,
+ "pulsar_storage_write_latency_le_10_public/functions": 0,
+ "pulsar_storage_write_latency_le_10_sample/dev": 177,
+ "pulsar_storage_write_latency_le_10_sample/prod": 164,
+ "pulsar_storage_write_latency_le_1_public/functions": 0,
+ "pulsar_storage_write_latency_le_1_sample/dev": 25,
+ "pulsar_storage_write_latency_le_1_sample/prod": 16,
+ "pulsar_storage_write_latency_le_20": 114,
+ "pulsar_storage_write_latency_le_200": 0,
+ "pulsar_storage_write_latency_le_200_public/functions": 0,
+ "pulsar_storage_write_latency_le_200_sample/dev": 0,
+ "pulsar_storage_write_latency_le_200_sample/prod": 0,
+ "pulsar_storage_write_latency_le_20_public/functions": 0,
+ "pulsar_storage_write_latency_le_20_sample/dev": 54,
+ "pulsar_storage_write_latency_le_20_sample/prod": 60,
+ "pulsar_storage_write_latency_le_5": 5328,
+ "pulsar_storage_write_latency_le_50": 40,
+ "pulsar_storage_write_latency_le_50_public/functions": 0,
+ "pulsar_storage_write_latency_le_50_sample/dev": 18,
+ "pulsar_storage_write_latency_le_50_sample/prod": 22,
+ "pulsar_storage_write_latency_le_5_public/functions": 0,
+ "pulsar_storage_write_latency_le_5_sample/dev": 2650,
+ "pulsar_storage_write_latency_le_5_sample/prod": 2678,
+ "pulsar_storage_write_latency_overflow": 0,
+ "pulsar_storage_write_latency_overflow_public/functions": 0,
+ "pulsar_storage_write_latency_overflow_sample/dev": 0,
+ "pulsar_storage_write_latency_overflow_sample/prod": 0,
+ "pulsar_storage_write_latency_sum": 5867,
+ "pulsar_storage_write_latency_sum_public/functions": 0,
+ "pulsar_storage_write_latency_sum_sample/dev": 2925,
+ "pulsar_storage_write_latency_sum_sample/prod": 2942,
+ "pulsar_subscription_blocked_on_unacked_messages": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_public/functions": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_sample/dev": 0,
+ "pulsar_subscription_blocked_on_unacked_messages_sample/prod": 0,
+ "pulsar_subscription_delayed": 0,
+ "pulsar_subscription_delayed_public/functions": 0,
+ "pulsar_subscription_delayed_sample/dev": 0,
+ "pulsar_subscription_delayed_sample/prod": 0,
+ "pulsar_subscription_msg_rate_redeliver": 0,
+ "pulsar_subscription_msg_rate_redeliver_public/functions": 0,
+ "pulsar_subscription_msg_rate_redeliver_sample/dev": 0,
+ "pulsar_subscription_msg_rate_redeliver_sample/prod": 0,
+ "pulsar_subscriptions_count": 13,
+ "pulsar_subscriptions_count_public/functions": 3,
+ "pulsar_subscriptions_count_sample/dev": 6,
+ "pulsar_subscriptions_count_sample/prod": 4,
+ "pulsar_throughput_in": 6023912,
+ "pulsar_throughput_in_public/functions": 0,
+ "pulsar_throughput_in_sample/dev": 2901651,
+ "pulsar_throughput_in_sample/prod": 3122261,
+ "pulsar_throughput_out": 14949677,
+ "pulsar_throughput_out_public/functions": 0,
+ "pulsar_throughput_out_sample/dev": 8705107,
+ "pulsar_throughput_out_sample/prod": 6244570,
+ "pulsar_topics_count": 14,
+ "pulsar_topics_count_public/functions": 5,
+ "pulsar_topics_count_sample/dev": 2,
+ "pulsar_topics_count_sample/prod": 7,
+}
diff --git a/src/go/plugin/go.d/modules/pulsar/testdata/config.json b/src/go/plugin/go.d/modules/pulsar/testdata/config.json
new file mode 100644
index 000000000..ab4f38fe0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/testdata/config.json
@@ -0,0 +1,28 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "topic_filter": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/src/go/plugin/go.d/modules/pulsar/testdata/config.yaml b/src/go/plugin/go.d/modules/pulsar/testdata/config.yaml
new file mode 100644
index 000000000..f2645d9e9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/testdata/config.yaml
@@ -0,0 +1,22 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+topic_filter:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/src/go/plugin/go.d/modules/pulsar/testdata/non-pulsar.txt b/src/go/plugin/go.d/modules/pulsar/testdata/non-pulsar.txt
new file mode 100644
index 000000000..f5f0ae082
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/testdata/non-pulsar.txt
@@ -0,0 +1,27 @@
+# HELP wmi_os_process_memory_limix_bytes OperatingSystem.MaxProcessMemorySize
+# TYPE wmi_os_process_memory_limix_bytes gauge
+wmi_os_process_memory_limix_bytes 1.40737488224256e+14
+# HELP wmi_os_processes OperatingSystem.NumberOfProcesses
+# TYPE wmi_os_processes gauge
+wmi_os_processes 124
+# HELP wmi_os_processes_limit OperatingSystem.MaxNumberOfProcesses
+# TYPE wmi_os_processes_limit gauge
+wmi_os_processes_limit 4.294967295e+09
+# HELP wmi_os_time OperatingSystem.LocalDateTime
+# TYPE wmi_os_time gauge
+wmi_os_time 1.57804974e+09
+# HELP wmi_os_timezone OperatingSystem.LocalDateTime
+# TYPE wmi_os_timezone gauge
+wmi_os_timezone{timezone="MSK"} 1
+# HELP wmi_os_users OperatingSystem.NumberOfUsers
+# TYPE wmi_os_users gauge
+wmi_os_users 2
+# HELP wmi_os_virtual_memory_bytes OperatingSystem.TotalVirtualMemorySize
+# TYPE wmi_os_virtual_memory_bytes gauge
+wmi_os_virtual_memory_bytes 5.770891264e+09
+# HELP wmi_os_virtual_memory_free_bytes OperatingSystem.FreeVirtualMemory
+# TYPE wmi_os_virtual_memory_free_bytes gauge
+wmi_os_virtual_memory_free_bytes 3.76489984e+09
+# HELP wmi_os_visible_memory_bytes OperatingSystem.TotalVisibleMemorySize
+# TYPE wmi_os_visible_memory_bytes gauge
+wmi_os_visible_memory_bytes 4.294496256e+09 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-namespaces.txt b/src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-namespaces.txt
new file mode 100644
index 000000000..bbc3de4a0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-namespaces.txt
@@ -0,0 +1,500 @@
+# TYPE zk_read_latency summary
+zk_read_latency{cluster="standalone",quantile="0.5"} NaN
+zk_read_latency{cluster="standalone",quantile="0.75"} NaN
+zk_read_latency{cluster="standalone",quantile="0.95"} NaN
+zk_read_latency{cluster="standalone",quantile="0.99"} NaN
+zk_read_latency{cluster="standalone",quantile="0.999"} NaN
+zk_read_latency{cluster="standalone",quantile="0.9999"} NaN
+zk_read_latency_count{cluster="standalone"} 0.0
+zk_read_latency_sum{cluster="standalone"} 0.0
+# TYPE zk_write_latency summary
+zk_write_latency{cluster="standalone",quantile="0.5"} NaN
+zk_write_latency{cluster="standalone",quantile="0.75"} NaN
+zk_write_latency{cluster="standalone",quantile="0.95"} NaN
+zk_write_latency{cluster="standalone",quantile="0.99"} NaN
+zk_write_latency{cluster="standalone",quantile="0.999"} NaN
+zk_write_latency{cluster="standalone",quantile="0.9999"} NaN
+zk_write_latency_count{cluster="standalone"} 0.0
+zk_write_latency_sum{cluster="standalone"} 0.0
+# TYPE jvm_memory_direct_bytes_max gauge
+jvm_memory_direct_bytes_max{cluster="standalone"} 4.294967296E9
+# TYPE log4j2_appender_total counter
+log4j2_appender_total{cluster="standalone",level="debug"} 0.0
+log4j2_appender_total{cluster="standalone",level="warn"} 68.0
+log4j2_appender_total{cluster="standalone",level="trace"} 0.0
+log4j2_appender_total{cluster="standalone",level="error"} 0.0
+log4j2_appender_total{cluster="standalone",level="fatal"} 0.0
+log4j2_appender_total{cluster="standalone",level="info"} 3773.0
+# TYPE jvm_threads_current gauge
+jvm_threads_current{cluster="standalone"} 293.0
+# TYPE jvm_threads_daemon gauge
+jvm_threads_daemon{cluster="standalone"} 49.0
+# TYPE jvm_threads_peak gauge
+jvm_threads_peak{cluster="standalone"} 295.0
+# TYPE jvm_threads_started_total counter
+jvm_threads_started_total{cluster="standalone"} 343.0
+# TYPE jvm_threads_deadlocked gauge
+jvm_threads_deadlocked{cluster="standalone"} 0.0
+# TYPE jvm_threads_deadlocked_monitor gauge
+jvm_threads_deadlocked_monitor{cluster="standalone"} 0.0
+# TYPE zookeeper_server_requests counter
+zookeeper_server_requests{cluster="standalone",type="getData"} 1091.0
+zookeeper_server_requests{cluster="standalone",type="setData"} 56.0
+zookeeper_server_requests{cluster="standalone",type="ping"} 1673.0
+zookeeper_server_requests{cluster="standalone",type="unknown"} 2.0
+zookeeper_server_requests{cluster="standalone",type="sync"} 53.0
+zookeeper_server_requests{cluster="standalone",type="delete"} 189.0
+zookeeper_server_requests{cluster="standalone",type="createSession"} 14.0
+zookeeper_server_requests{cluster="standalone",type="multi"} 54.0
+zookeeper_server_requests{cluster="standalone",type="getChildren"} 172.0
+zookeeper_server_requests{cluster="standalone",type="getChildren2"} 250.0
+zookeeper_server_requests{cluster="standalone",type="closeSession"} 5.0
+zookeeper_server_requests{cluster="standalone",type="create"} 119.0
+zookeeper_server_requests{cluster="standalone",type="exists"} 577.0
+# TYPE jetty_requests_total counter
+jetty_requests_total{cluster="standalone"} 2182.0
+# TYPE jetty_requests_active gauge
+jetty_requests_active{cluster="standalone"} 1.0
+# TYPE jetty_requests_active_max gauge
+jetty_requests_active_max{cluster="standalone"} 2.0
+# TYPE jetty_request_time_max_seconds gauge
+jetty_request_time_max_seconds{cluster="standalone"} 0.539
+# TYPE jetty_request_time_seconds_total counter
+jetty_request_time_seconds_total{cluster="standalone"} 10.786
+# TYPE jetty_dispatched_total counter
+jetty_dispatched_total{cluster="standalone"} 2182.0
+# TYPE jetty_dispatched_active gauge
+jetty_dispatched_active{cluster="standalone"} 0.0
+# TYPE jetty_dispatched_active_max gauge
+jetty_dispatched_active_max{cluster="standalone"} 2.0
+# TYPE jetty_dispatched_time_max gauge
+jetty_dispatched_time_max{cluster="standalone"} 539.0
+# TYPE jetty_dispatched_time_seconds_total counter
+jetty_dispatched_time_seconds_total{cluster="standalone"} 1.745
+# TYPE jetty_async_requests_total counter
+jetty_async_requests_total{cluster="standalone"} 1070.0
+# TYPE jetty_async_requests_waiting gauge
+jetty_async_requests_waiting{cluster="standalone"} 1.0
+# TYPE jetty_async_requests_waiting_max gauge
+jetty_async_requests_waiting_max{cluster="standalone"} 1.0
+# TYPE jetty_async_dispatches_total counter
+jetty_async_dispatches_total{cluster="standalone"} 0.0
+# TYPE jetty_expires_total counter
+jetty_expires_total{cluster="standalone"} 0.0
+# TYPE jetty_responses_total counter
+jetty_responses_total{cluster="standalone",code="1xx"} 0.0
+jetty_responses_total{cluster="standalone",code="2xx"} 1113.0
+jetty_responses_total{cluster="standalone",code="3xx"} 1067.0
+jetty_responses_total{cluster="standalone",code="4xx"} 1.0
+jetty_responses_total{cluster="standalone",code="5xx"} 0.0
+# TYPE jetty_stats_seconds gauge
+jetty_stats_seconds{cluster="standalone"} 1001.006
+# TYPE jetty_responses_bytes_total counter
+jetty_responses_bytes_total{cluster="standalone"} 3.7698452E7
+# TYPE pulsar_broker_publish_latency summary
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.0"} 1.821
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.5"} 2.559
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.95"} 6.8
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.99"} 10.992
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.999"} 10.992
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.9999"} 10.992
+pulsar_broker_publish_latency{cluster="standalone",quantile="1.0"} 10.992
+pulsar_broker_publish_latency_count{cluster="standalone"} 95832.0
+pulsar_broker_publish_latency_sum{cluster="standalone"} 234677.0
+# TYPE zookeeper_server_connections gauge
+zookeeper_server_connections{cluster="standalone"} 10.0
+# TYPE jvm_info gauge
+jvm_info{cluster="standalone",version="1.8.0_232-b09",vendor="Oracle Corporation",runtime="OpenJDK Runtime Environment"} 1.0
+# TYPE topic_load_times summary
+topic_load_times{cluster="standalone",quantile="0.5"} NaN
+topic_load_times{cluster="standalone",quantile="0.75"} NaN
+topic_load_times{cluster="standalone",quantile="0.95"} NaN
+topic_load_times{cluster="standalone",quantile="0.99"} NaN
+topic_load_times{cluster="standalone",quantile="0.999"} NaN
+topic_load_times{cluster="standalone",quantile="0.9999"} NaN
+topic_load_times_count{cluster="standalone"} 0.0
+topic_load_times_sum{cluster="standalone"} 0.0
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total{cluster="standalone"} 492.64
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds{cluster="standalone"} 1.583774770759E9
+# TYPE process_open_fds gauge
+process_open_fds{cluster="standalone"} 676.0
+# TYPE process_max_fds gauge
+process_max_fds{cluster="standalone"} 1048576.0
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes{cluster="standalone"} 8.727437312E9
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes{cluster="standalone"} 1.642012672E9
+# TYPE jvm_classes_loaded gauge
+jvm_classes_loaded{cluster="standalone"} 14402.0
+# TYPE jvm_classes_loaded_total counter
+jvm_classes_loaded_total{cluster="standalone"} 14402.0
+# TYPE jvm_classes_unloaded_total counter
+jvm_classes_unloaded_total{cluster="standalone"} 0.0
+# TYPE zookeeper_server_requests_latency_ms summary
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.5"} 0.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.75"} 0.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.95"} 1.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.99"} 1.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.999"} 2.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.9999"} 2.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="1.0"} 2.0
+zookeeper_server_requests_latency_ms_count{cluster="standalone",type="read"} 3819.0
+zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="read"} 2033.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.5"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.75"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.95"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.99"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.999"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.9999"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="1.0"} NaN
+zookeeper_server_requests_latency_ms_count{cluster="standalone",type="write"} 436.0
+zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="write"} 832.0
+# TYPE zookeeper_server_watches_count gauge
+zookeeper_server_watches_count{cluster="standalone"} 37.0
+# TYPE zookeeper_server_ephemerals_count gauge
+zookeeper_server_ephemerals_count{cluster="standalone"} 12.0
+# TYPE caffeine_cache_hit_total counter
+caffeine_cache_hit_total{cluster="standalone",cache="owned-bundles"} 143.0
+caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="local-zk-exists"} 2.0
+caffeine_cache_hit_total{cluster="standalone",cache="local-zk-children"} 2.0
+caffeine_cache_hit_total{cluster="standalone",cache="bundles"} 156.0
+caffeine_cache_hit_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="local-zk-data"} 7.0
+caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_miss_total counter
+caffeine_cache_miss_total{cluster="standalone",cache="owned-bundles"} 11.0
+caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="local-zk-exists"} 7.0
+caffeine_cache_miss_total{cluster="standalone",cache="local-zk-children"} 4.0
+caffeine_cache_miss_total{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_miss_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="local-zk-data"} 22.0
+caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_requests_total counter
+caffeine_cache_requests_total{cluster="standalone",cache="owned-bundles"} 154.0
+caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="local-zk-exists"} 9.0
+caffeine_cache_requests_total{cluster="standalone",cache="local-zk-children"} 6.0
+caffeine_cache_requests_total{cluster="standalone",cache="bundles"} 160.0
+caffeine_cache_requests_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="local-zk-data"} 29.0
+caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_eviction_total counter
+caffeine_cache_eviction_total{cluster="standalone",cache="owned-bundles"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-exists"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-children"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bundles"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-data"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_eviction_weight gauge
+caffeine_cache_eviction_weight{cluster="standalone",cache="owned-bundles"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-exists"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-children"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bundles"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-data"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_load_failure_total counter
+caffeine_cache_load_failure_total{cluster="standalone",cache="owned-bundles"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-exists"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-children"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bundles"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-data"} 17.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_loads_total counter
+caffeine_cache_loads_total{cluster="standalone",cache="owned-bundles"} 6.0
+caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="local-zk-exists"} 7.0
+caffeine_cache_loads_total{cluster="standalone",cache="local-zk-children"} 4.0
+caffeine_cache_loads_total{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_loads_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="local-zk-data"} 22.0
+caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_estimated_size gauge
+caffeine_cache_estimated_size{cluster="standalone",cache="owned-bundles"} 6.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-exists"} 7.0
+caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-children"} 4.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-data"} 5.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_load_duration_seconds summary
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="owned-bundles"} 6.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="owned-bundles"} 0.05334063
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-exists"} 7.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-exists"} 0.039758752
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-children"} 4.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-children"} 0.027705247
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bundles"} 0.076995851
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-data"} 22.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-data"} 0.156849343
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-exists"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE jvm_memory_direct_bytes_used gauge
+jvm_memory_direct_bytes_used{cluster="standalone"} 2.28189827E9
+# TYPE jvm_memory_bytes_used gauge
+jvm_memory_bytes_used{cluster="standalone",area="heap"} 3.01123632E8
+jvm_memory_bytes_used{cluster="standalone",area="nonheap"} 1.27959784E8
+# TYPE jvm_memory_bytes_committed gauge
+jvm_memory_bytes_committed{cluster="standalone",area="heap"} 2.147483648E9
+jvm_memory_bytes_committed{cluster="standalone",area="nonheap"} 1.33287936E8
+# TYPE jvm_memory_bytes_max gauge
+jvm_memory_bytes_max{cluster="standalone",area="heap"} 2.147483648E9
+jvm_memory_bytes_max{cluster="standalone",area="nonheap"} -1.0
+# TYPE jvm_memory_bytes_init gauge
+jvm_memory_bytes_init{cluster="standalone",area="heap"} 2.147483648E9
+jvm_memory_bytes_init{cluster="standalone",area="nonheap"} 2555904.0
+# TYPE jvm_memory_pool_bytes_used gauge
+jvm_memory_pool_bytes_used{cluster="standalone",pool="Code Cache"} 3.5528384E7
+jvm_memory_pool_bytes_used{cluster="standalone",pool="Metaspace"} 8.2704856E7
+jvm_memory_pool_bytes_used{cluster="standalone",pool="Compressed Class Space"} 9726544.0
+jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Eden Space"} 1.75112192E8
+jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Survivor Space"} 6.3963136E7
+jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Old Gen"} 6.2048304E7
+# TYPE jvm_memory_pool_bytes_committed gauge
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="Code Cache"} 3.5782656E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="Metaspace"} 8.6863872E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="Compressed Class Space"} 1.0641408E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Eden Space"} 1.06430464E9
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Survivor Space"} 6.3963136E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9
+# TYPE jvm_memory_pool_bytes_max gauge
+jvm_memory_pool_bytes_max{cluster="standalone",pool="Code Cache"} 2.5165824E8
+jvm_memory_pool_bytes_max{cluster="standalone",pool="Metaspace"} -1.0
+jvm_memory_pool_bytes_max{cluster="standalone",pool="Compressed Class Space"} 1.073741824E9
+jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Eden Space"} -1.0
+jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Survivor Space"} -1.0
+jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Old Gen"} 2.147483648E9
+# TYPE jvm_memory_pool_bytes_init gauge
+jvm_memory_pool_bytes_init{cluster="standalone",pool="Code Cache"} 2555904.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="Metaspace"} 0.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="Compressed Class Space"} 0.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Eden Space"} 1.128267776E9
+jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Survivor Space"} 0.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9
+# TYPE jvm_buffer_pool_used_bytes gauge
+jvm_buffer_pool_used_bytes{cluster="standalone",pool="direct"} 697534.0
+jvm_buffer_pool_used_bytes{cluster="standalone",pool="mapped"} 0.0
+# TYPE jvm_buffer_pool_capacity_bytes gauge
+jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="direct"} 697533.0
+jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="mapped"} 0.0
+# TYPE jvm_buffer_pool_used_buffers gauge
+jvm_buffer_pool_used_buffers{cluster="standalone",pool="direct"} 82.0
+jvm_buffer_pool_used_buffers{cluster="standalone",pool="mapped"} 0.0
+# TYPE zookeeper_server_znode_count gauge
+zookeeper_server_znode_count{cluster="standalone"} 4175.0
+# TYPE zookeeper_server_data_size_bytes gauge
+zookeeper_server_data_size_bytes{cluster="standalone"} 459126.0
+# TYPE jvm_gc_collection_seconds summary
+jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Young Generation"} 14.0
+jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Young Generation"} 3.13
+jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Old Generation"} 0.0
+jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Old Generation"} 0.0
+# TYPE pulsar_topics_count gauge
+pulsar_topics_count{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_subscriptions_count gauge
+pulsar_subscriptions_count{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_producers_count gauge
+pulsar_producers_count{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_consumers_count gauge
+pulsar_consumers_count{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_rate_in gauge
+pulsar_rate_in{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_rate_out gauge
+pulsar_rate_out{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_throughput_in gauge
+pulsar_throughput_in{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_throughput_out gauge
+pulsar_throughput_out{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_storage_size gauge
+pulsar_storage_size{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_storage_write_rate gauge
+pulsar_storage_write_rate{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_storage_read_rate gauge
+pulsar_storage_read_rate{cluster="standalone"} 0 1583775788853
+# TYPE pulsar_msg_backlog gauge
+pulsar_msg_backlog{cluster="standalone"} 0 1583775788853
+pulsar_topics_count{cluster="standalone",namespace="sample/dev"} 2 1583775788853
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/dev"} 6 1583775788853
+pulsar_producers_count{cluster="standalone",namespace="sample/dev"} 4 1583775788853
+pulsar_consumers_count{cluster="standalone",namespace="sample/dev"} 10 1583775788853
+pulsar_rate_in{cluster="standalone",namespace="sample/dev"} 48.004 1583775788853
+pulsar_rate_out{cluster="standalone",namespace="sample/dev"} 146.018 1583775788853
+pulsar_throughput_in{cluster="standalone",namespace="sample/dev"} 2736.243 1583775788853
+pulsar_throughput_out{cluster="standalone",namespace="sample/dev"} 8323.043 1583775788853
+pulsar_storage_size{cluster="standalone",namespace="sample/dev"} 2684208 1583775788853
+# TYPE pulsar_storage_backlog_size gauge
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/dev"} 35452322 1583775788853
+# TYPE pulsar_storage_offloaded_size gauge
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+pulsar_storage_write_rate{cluster="standalone",namespace="sample/dev"} 50.200 1583775788853
+pulsar_storage_read_rate{cluster="standalone",namespace="sample/dev"} 0.0 1583775788853
+# TYPE pulsar_subscription_delayed gauge
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+pulsar_msg_backlog{cluster="standalone",namespace="sample/dev",remote_cluster="local"} 8.0 1583775788853
+# TYPE pulsar_storage_write_latency_le_0_5 gauge
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_storage_write_latency_le_1 gauge
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/dev"} 23 1583775788853
+# TYPE pulsar_storage_write_latency_le_5 gauge
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/dev"} 2900 1583775788853
+# TYPE pulsar_storage_write_latency_le_10 gauge
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/dev"} 82 1583775788853
+# TYPE pulsar_storage_write_latency_le_20 gauge
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/dev"} 6 1583775788853
+# TYPE pulsar_storage_write_latency_le_50 gauge
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_storage_write_latency_le_100 gauge
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_storage_write_latency_le_200 gauge
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/dev"} 1 1583775788853
+# TYPE pulsar_storage_write_latency_le_1000 gauge
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_storage_write_latency_overflow gauge
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_storage_write_latency_count gauge
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/dev"} 3012 1583775788853
+# TYPE pulsar_storage_write_latency_sum gauge
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/dev"} 3012 1583775788853
+# TYPE pulsar_entry_size_le_128 gauge
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/dev"} 3012 1583775788853
+# TYPE pulsar_entry_size_le_512 gauge
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_entry_size_le_1_kb gauge
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_entry_size_le_2_kb gauge
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_entry_size_le_4_kb gauge
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_entry_size_le_16_kb gauge
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_entry_size_le_100_kb gauge
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_entry_size_le_1_mb gauge
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_entry_size_le_overflow gauge
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/dev"} 0 1583775788853
+# TYPE pulsar_entry_size_count gauge
+pulsar_entry_size_count{cluster="standalone",namespace="sample/dev"} 3012 1583775788853
+# TYPE pulsar_entry_size_sum gauge
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/dev"} 3012 1583775788853
+pulsar_topics_count{cluster="standalone",namespace="public/functions"} 3 1583775788853
+pulsar_subscriptions_count{cluster="standalone",namespace="public/functions"} 3 1583775788853
+pulsar_producers_count{cluster="standalone",namespace="public/functions"} 2 1583775788853
+pulsar_consumers_count{cluster="standalone",namespace="public/functions"} 3 1583775788853
+pulsar_rate_in{cluster="standalone",namespace="public/functions"} 0.0 1583775788853
+pulsar_rate_out{cluster="standalone",namespace="public/functions"} 0.0 1583775788853
+pulsar_throughput_in{cluster="standalone",namespace="public/functions"} 0.0 1583775788853
+pulsar_throughput_out{cluster="standalone",namespace="public/functions"} 0.0 1583775788853
+pulsar_storage_size{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions"} 35452322 1583775788853
+pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_rate{cluster="standalone",namespace="public/functions"} 0.0 1583775788853
+pulsar_storage_read_rate{cluster="standalone",namespace="public/functions"} 0.0 1583775788853
+pulsar_subscription_delayed{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_msg_backlog{cluster="standalone",namespace="public/functions",remote_cluster="local"} 0.0 1583775788853
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_count{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_entry_size_sum{cluster="standalone",namespace="public/functions"} 0 1583775788853
+pulsar_topics_count{cluster="standalone",namespace="sample/prod"} 2 1583775788853
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/prod"} 4 1583775788853
+pulsar_producers_count{cluster="standalone",namespace="sample/prod"} 4 1583775788853
+pulsar_consumers_count{cluster="standalone",namespace="sample/prod"} 8 1583775788853
+pulsar_rate_in{cluster="standalone",namespace="sample/prod"} 48.019 1583775788853
+pulsar_rate_out{cluster="standalone",namespace="sample/prod"} 96.039 1583775788853
+pulsar_throughput_in{cluster="standalone",namespace="sample/prod"} 2833.158 1583775788853
+pulsar_throughput_out{cluster="standalone",namespace="sample/prod"} 5666.330 1583775788853
+pulsar_storage_size{cluster="standalone",namespace="sample/prod"} 2784216 1583775788853
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/prod"} 35455322 1583775788853
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_storage_write_rate{cluster="standalone",namespace="sample/prod"} 50.016 1583775788853
+pulsar_storage_read_rate{cluster="standalone",namespace="sample/prod"} 0.0 1583775788853
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_msg_backlog{cluster="standalone",namespace="sample/prod",remote_cluster="local"} 0.0 1583775788853
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/prod"} 20 1583775788853
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/prod"} 2897 1583775788853
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/prod"} 81 1583775788853
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/prod"} 1 1583775788853
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/prod"} 1 1583775788853
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/prod"} 3000 1583775788853
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/prod"} 3000 1583775788853
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/prod"} 3001 1583775788853
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/prod"} 0 1583775788853
+pulsar_entry_size_count{cluster="standalone",namespace="sample/prod"} 3001 1583775788853
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/prod"} 3001 1583775788853 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-topics-2.txt b/src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-topics-2.txt
new file mode 100644
index 000000000..ba5006094
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-topics-2.txt
@@ -0,0 +1,748 @@
+# TYPE zookeeper_server_requests counter
+zookeeper_server_requests{cluster="standalone",type="getData"} 777.0
+zookeeper_server_requests{cluster="standalone",type="setData"} 14.0
+zookeeper_server_requests{cluster="standalone",type="ping"} 955.0
+zookeeper_server_requests{cluster="standalone",type="unknown"} 2.0
+zookeeper_server_requests{cluster="standalone",type="sync"} 21.0
+zookeeper_server_requests{cluster="standalone",type="delete"} 29.0
+zookeeper_server_requests{cluster="standalone",type="createSession"} 14.0
+zookeeper_server_requests{cluster="standalone",type="multi"} 3.0
+zookeeper_server_requests{cluster="standalone",type="getChildren"} 47.0
+zookeeper_server_requests{cluster="standalone",type="getChildren2"} 121.0
+zookeeper_server_requests{cluster="standalone",type="closeSession"} 5.0
+zookeeper_server_requests{cluster="standalone",type="create"} 99.0
+zookeeper_server_requests{cluster="standalone",type="exists"} 340.0
+# TYPE zk_write_latency summary
+zk_write_latency{cluster="standalone",quantile="0.5"} NaN
+zk_write_latency{cluster="standalone",quantile="0.75"} NaN
+zk_write_latency{cluster="standalone",quantile="0.95"} NaN
+zk_write_latency{cluster="standalone",quantile="0.99"} NaN
+zk_write_latency{cluster="standalone",quantile="0.999"} NaN
+zk_write_latency{cluster="standalone",quantile="0.9999"} NaN
+zk_write_latency_count{cluster="standalone"} 0.0
+zk_write_latency_sum{cluster="standalone"} 0.0
+# TYPE jetty_requests_total counter
+jetty_requests_total{cluster="standalone"} 106.0
+# TYPE jetty_requests_active gauge
+jetty_requests_active{cluster="standalone"} 1.0
+# TYPE jetty_requests_active_max gauge
+jetty_requests_active_max{cluster="standalone"} 2.0
+# TYPE jetty_request_time_max_seconds gauge
+jetty_request_time_max_seconds{cluster="standalone"} 0.453
+# TYPE jetty_request_time_seconds_total counter
+jetty_request_time_seconds_total{cluster="standalone"} 1.595
+# TYPE jetty_dispatched_total counter
+jetty_dispatched_total{cluster="standalone"} 106.0
+# TYPE jetty_dispatched_active gauge
+jetty_dispatched_active{cluster="standalone"} 0.0
+# TYPE jetty_dispatched_active_max gauge
+jetty_dispatched_active_max{cluster="standalone"} 2.0
+# TYPE jetty_dispatched_time_max gauge
+jetty_dispatched_time_max{cluster="standalone"} 453.0
+# TYPE jetty_dispatched_time_seconds_total counter
+jetty_dispatched_time_seconds_total{cluster="standalone"} 0.737
+# TYPE jetty_async_requests_total counter
+jetty_async_requests_total{cluster="standalone"} 39.0
+# TYPE jetty_async_requests_waiting gauge
+jetty_async_requests_waiting{cluster="standalone"} 1.0
+# TYPE jetty_async_requests_waiting_max gauge
+jetty_async_requests_waiting_max{cluster="standalone"} 1.0
+# TYPE jetty_async_dispatches_total counter
+jetty_async_dispatches_total{cluster="standalone"} 0.0
+# TYPE jetty_expires_total counter
+jetty_expires_total{cluster="standalone"} 0.0
+# TYPE jetty_responses_total counter
+jetty_responses_total{cluster="standalone",code="1xx"} 0.0
+jetty_responses_total{cluster="standalone",code="2xx"} 66.0
+jetty_responses_total{cluster="standalone",code="3xx"} 38.0
+jetty_responses_total{cluster="standalone",code="4xx"} 1.0
+jetty_responses_total{cluster="standalone",code="5xx"} 0.0
+# TYPE jetty_stats_seconds gauge
+jetty_stats_seconds{cluster="standalone"} 565.434
+# TYPE jetty_responses_bytes_total counter
+jetty_responses_bytes_total{cluster="standalone"} 2865485.0
+# TYPE jvm_info gauge
+jvm_info{cluster="standalone",version="1.8.0_232-b09",vendor="Oracle Corporation",runtime="OpenJDK Runtime Environment"} 1.0
+# TYPE log4j2_appender_total counter
+log4j2_appender_total{cluster="standalone",level="debug"} 0.0
+log4j2_appender_total{cluster="standalone",level="warn"} 44.0
+log4j2_appender_total{cluster="standalone",level="trace"} 0.0
+log4j2_appender_total{cluster="standalone",level="error"} 0.0
+log4j2_appender_total{cluster="standalone",level="fatal"} 0.0
+log4j2_appender_total{cluster="standalone",level="info"} 1437.0
+# TYPE zookeeper_server_connections gauge
+zookeeper_server_connections{cluster="standalone"} 10.0
+# TYPE jvm_memory_bytes_used gauge
+jvm_memory_bytes_used{cluster="standalone",area="heap"} 1.30309152E8
+jvm_memory_bytes_used{cluster="standalone",area="nonheap"} 1.21050512E8
+# TYPE jvm_memory_bytes_committed gauge
+jvm_memory_bytes_committed{cluster="standalone",area="heap"} 2.147483648E9
+jvm_memory_bytes_committed{cluster="standalone",area="nonheap"} 1.26242816E8
+# TYPE jvm_memory_bytes_max gauge
+jvm_memory_bytes_max{cluster="standalone",area="heap"} 2.147483648E9
+jvm_memory_bytes_max{cluster="standalone",area="nonheap"} -1.0
+# TYPE jvm_memory_bytes_init gauge
+jvm_memory_bytes_init{cluster="standalone",area="heap"} 2.147483648E9
+jvm_memory_bytes_init{cluster="standalone",area="nonheap"} 2555904.0
+# TYPE jvm_memory_pool_bytes_used gauge
+jvm_memory_pool_bytes_used{cluster="standalone",pool="Code Cache"} 2.9851008E7
+jvm_memory_pool_bytes_used{cluster="standalone",pool="Metaspace"} 8.1522184E7
+jvm_memory_pool_bytes_used{cluster="standalone",pool="Compressed Class Space"} 9677320.0
+jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Eden Space"} 2.2020096E7
+jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Survivor Space"} 7.0254592E7
+jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Old Gen"} 3.8034464E7
+# TYPE jvm_memory_pool_bytes_committed gauge
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="Code Cache"} 3.014656E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="Metaspace"} 8.5532672E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="Compressed Class Space"} 1.0563584E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Eden Space"} 1.058013184E9
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Survivor Space"} 7.0254592E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9
+# TYPE jvm_memory_pool_bytes_max gauge
+jvm_memory_pool_bytes_max{cluster="standalone",pool="Code Cache"} 2.5165824E8
+jvm_memory_pool_bytes_max{cluster="standalone",pool="Metaspace"} -1.0
+jvm_memory_pool_bytes_max{cluster="standalone",pool="Compressed Class Space"} 1.073741824E9
+jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Eden Space"} -1.0
+jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Survivor Space"} -1.0
+jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Old Gen"} 2.147483648E9
+# TYPE jvm_memory_pool_bytes_init gauge
+jvm_memory_pool_bytes_init{cluster="standalone",pool="Code Cache"} 2555904.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="Metaspace"} 0.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="Compressed Class Space"} 0.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Eden Space"} 1.128267776E9
+jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Survivor Space"} 0.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9
+# TYPE jvm_threads_current gauge
+jvm_threads_current{cluster="standalone"} 291.0
+# TYPE jvm_threads_daemon gauge
+jvm_threads_daemon{cluster="standalone"} 49.0
+# TYPE jvm_threads_peak gauge
+jvm_threads_peak{cluster="standalone"} 291.0
+# TYPE jvm_threads_started_total counter
+jvm_threads_started_total{cluster="standalone"} 331.0
+# TYPE jvm_threads_deadlocked gauge
+jvm_threads_deadlocked{cluster="standalone"} 0.0
+# TYPE jvm_threads_deadlocked_monitor gauge
+jvm_threads_deadlocked_monitor{cluster="standalone"} 0.0
+# TYPE caffeine_cache_hit_total counter
+caffeine_cache_hit_total{cluster="standalone",cache="owned-bundles"} 95.0
+caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="local-zk-exists"} 2.0
+caffeine_cache_hit_total{cluster="standalone",cache="local-zk-children"} 2.0
+caffeine_cache_hit_total{cluster="standalone",cache="bundles"} 126.0
+caffeine_cache_hit_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="local-zk-data"} 7.0
+caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_miss_total counter
+caffeine_cache_miss_total{cluster="standalone",cache="owned-bundles"} 11.0
+caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="local-zk-exists"} 9.0
+caffeine_cache_miss_total{cluster="standalone",cache="local-zk-children"} 7.0
+caffeine_cache_miss_total{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_miss_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="local-zk-data"} 21.0
+caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_requests_total counter
+caffeine_cache_requests_total{cluster="standalone",cache="owned-bundles"} 106.0
+caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="local-zk-exists"} 11.0
+caffeine_cache_requests_total{cluster="standalone",cache="local-zk-children"} 9.0
+caffeine_cache_requests_total{cluster="standalone",cache="bundles"} 130.0
+caffeine_cache_requests_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="local-zk-data"} 28.0
+caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_eviction_total counter
+caffeine_cache_eviction_total{cluster="standalone",cache="owned-bundles"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-exists"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-children"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bundles"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-data"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_eviction_weight gauge
+caffeine_cache_eviction_weight{cluster="standalone",cache="owned-bundles"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-exists"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-children"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bundles"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-data"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_load_failure_total counter
+caffeine_cache_load_failure_total{cluster="standalone",cache="owned-bundles"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-exists"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-children"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bundles"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-data"} 16.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_loads_total counter
+caffeine_cache_loads_total{cluster="standalone",cache="owned-bundles"} 6.0
+caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="local-zk-exists"} 9.0
+caffeine_cache_loads_total{cluster="standalone",cache="local-zk-children"} 7.0
+caffeine_cache_loads_total{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_loads_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="local-zk-data"} 21.0
+caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_estimated_size gauge
+caffeine_cache_estimated_size{cluster="standalone",cache="owned-bundles"} 6.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-exists"} 7.0
+caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-children"} 4.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-data"} 5.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_load_duration_seconds summary
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="owned-bundles"} 6.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="owned-bundles"} 0.136975304
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-exists"} 9.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-exists"} 0.064067898
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-children"} 7.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-children"} 0.100136473
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bundles"} 0.079620575
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-data"} 21.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-data"} 0.117346453
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-exists"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE zk_read_latency summary
+zk_read_latency{cluster="standalone",quantile="0.5"} NaN
+zk_read_latency{cluster="standalone",quantile="0.75"} NaN
+zk_read_latency{cluster="standalone",quantile="0.95"} NaN
+zk_read_latency{cluster="standalone",quantile="0.99"} NaN
+zk_read_latency{cluster="standalone",quantile="0.999"} NaN
+zk_read_latency{cluster="standalone",quantile="0.9999"} NaN
+zk_read_latency_count{cluster="standalone"} 0.0
+zk_read_latency_sum{cluster="standalone"} 0.0
+# TYPE topic_load_times summary
+topic_load_times{cluster="standalone",quantile="0.5"} NaN
+topic_load_times{cluster="standalone",quantile="0.75"} NaN
+topic_load_times{cluster="standalone",quantile="0.95"} NaN
+topic_load_times{cluster="standalone",quantile="0.99"} NaN
+topic_load_times{cluster="standalone",quantile="0.999"} NaN
+topic_load_times{cluster="standalone",quantile="0.9999"} NaN
+topic_load_times_count{cluster="standalone"} 0.0
+topic_load_times_sum{cluster="standalone"} 0.0
+# TYPE jvm_classes_loaded gauge
+jvm_classes_loaded{cluster="standalone"} 14323.0
+# TYPE jvm_classes_loaded_total counter
+jvm_classes_loaded_total{cluster="standalone"} 14323.0
+# TYPE jvm_classes_unloaded_total counter
+jvm_classes_unloaded_total{cluster="standalone"} 0.0
+# TYPE zookeeper_server_requests_latency_ms summary
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.5"} 0.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.75"} 0.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.95"} 1.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.99"} 1.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.999"} 1.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.9999"} 1.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="1.0"} 1.0
+zookeeper_server_requests_latency_ms_count{cluster="standalone",type="read"} 2245.0
+zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="read"} 1340.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.5"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.75"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.95"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.99"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.999"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.9999"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="1.0"} NaN
+zookeeper_server_requests_latency_ms_count{cluster="standalone",type="write"} 182.0
+zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="write"} 461.0
+# TYPE zookeeper_server_watches_count gauge
+zookeeper_server_watches_count{cluster="standalone"} 49.0
+# TYPE zookeeper_server_ephemerals_count gauge
+zookeeper_server_ephemerals_count{cluster="standalone"} 12.0
+# TYPE jvm_buffer_pool_used_bytes gauge
+jvm_buffer_pool_used_bytes{cluster="standalone",pool="direct"} 688964.0
+jvm_buffer_pool_used_bytes{cluster="standalone",pool="mapped"} 0.0
+# TYPE jvm_buffer_pool_capacity_bytes gauge
+jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="direct"} 688963.0
+jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="mapped"} 0.0
+# TYPE jvm_buffer_pool_used_buffers gauge
+jvm_buffer_pool_used_buffers{cluster="standalone",pool="direct"} 82.0
+jvm_buffer_pool_used_buffers{cluster="standalone",pool="mapped"} 0.0
+# TYPE jvm_gc_collection_seconds summary
+jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Young Generation"} 9.0
+jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Young Generation"} 2.211
+jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Old Generation"} 0.0
+jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Old Generation"} 0.0
+# TYPE pulsar_broker_publish_latency summary
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.0"} 1.01
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.5"} 2.333
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.95"} 6.313
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.99"} 11.05
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.999"} 11.05
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.9999"} 11.05
+pulsar_broker_publish_latency{cluster="standalone",quantile="1.0"} 11.05
+pulsar_broker_publish_latency_count{cluster="standalone"} 50123.0
+pulsar_broker_publish_latency_sum{cluster="standalone"} 116757.0
+# TYPE jvm_memory_direct_bytes_used gauge
+jvm_memory_direct_bytes_used{cluster="standalone"} 2.28189827E9
+# TYPE zookeeper_server_znode_count gauge
+zookeeper_server_znode_count{cluster="standalone"} 4215.0
+# TYPE zookeeper_server_data_size_bytes gauge
+zookeeper_server_data_size_bytes{cluster="standalone"} 465029.0
+# TYPE jvm_memory_direct_bytes_max gauge
+jvm_memory_direct_bytes_max{cluster="standalone"} 4.294967296E9
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total{cluster="standalone"} 284.97
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds{cluster="standalone"} 1.583777691467E9
+# TYPE process_open_fds gauge
+process_open_fds{cluster="standalone"} 678.0
+# TYPE process_max_fds gauge
+process_max_fds{cluster="standalone"} 1048576.0
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes{cluster="standalone"} 8.720920576E9
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes{cluster="standalone"} 1.597915136E9
+# TYPE pulsar_topics_count gauge
+pulsar_topics_count{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_subscriptions_count gauge
+pulsar_subscriptions_count{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_producers_count gauge
+pulsar_producers_count{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_consumers_count gauge
+pulsar_consumers_count{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_rate_in gauge
+pulsar_rate_in{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_rate_out gauge
+pulsar_rate_out{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_throughput_in gauge
+pulsar_throughput_in{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_throughput_out gauge
+pulsar_throughput_out{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_storage_size gauge
+pulsar_storage_size{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_storage_write_rate gauge
+pulsar_storage_write_rate{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_storage_read_rate gauge
+pulsar_storage_read_rate{cluster="standalone"} 0 1583778276679
+# TYPE pulsar_msg_backlog gauge
+pulsar_msg_backlog{cluster="standalone"} 0 1583778276679
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 2.0 1583778276679
+pulsar_producers_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 2.0 1583778276679
+pulsar_consumers_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 4.0 1583778276679
+pulsar_rate_in{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 19.999 1583778276679
+pulsar_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 39.999 1583778276679
+pulsar_throughput_in{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1119.988 1583778276679
+pulsar_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 2239.979 1583778276679
+pulsar_storage_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 698700.0 1583778276679
+pulsar_msg_backlog{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_storage_backlog_size gauge
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 10045.0 1583778276679
+# TYPE pulsar_storage_offloaded_size gauge
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_storage_backlog_quota_limit gauge
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 10737418240.0 1583778276679
+# TYPE pulsar_storage_write_latency_le_0_5 gauge
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_storage_write_latency_le_1 gauge
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 13.0 1583778276679
+# TYPE pulsar_storage_write_latency_le_5 gauge
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1457.0 1583778276679
+# TYPE pulsar_storage_write_latency_le_10 gauge
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 20.0 1583778276679
+# TYPE pulsar_storage_write_latency_le_20 gauge
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 7.0 1583778276679
+# TYPE pulsar_storage_write_latency_le_50 gauge
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1.0 1583778276679
+# TYPE pulsar_storage_write_latency_le_100 gauge
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_storage_write_latency_le_200 gauge
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_storage_write_latency_le_1000 gauge
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_storage_write_latency_overflow gauge
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_storage_write_latency_count gauge
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1498.0 1583778276679
+# TYPE pulsar_storage_write_latency_sum gauge
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1498.0 1583778276679
+# TYPE pulsar_entry_size_le_128 gauge
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1497.0 1583778276679
+# TYPE pulsar_entry_size_le_512 gauge
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_entry_size_le_1_kb gauge
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_entry_size_le_2_kb gauge
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_entry_size_le_4_kb gauge
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_entry_size_le_16_kb gauge
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_entry_size_le_100_kb gauge
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_entry_size_le_1_mb gauge
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_entry_size_le_overflow gauge
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 0.0 1583778276679
+# TYPE pulsar_entry_size_count gauge
+pulsar_entry_size_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1497.0 1583778276679
+# TYPE pulsar_entry_size_sum gauge
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 1497.0 1583778276679
+# TYPE pulsar_subscription_back_log gauge
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0 1583778276679
+# TYPE pulsar_subscription_delayed gauge
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0 1583778276679
+# TYPE pulsar_subscription_msg_rate_redeliver gauge
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0.0 1583778276679
+# TYPE pulsar_subscription_unacked_messages gauge
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0 1583778276679
+# TYPE pulsar_subscription_blocked_on_unacked_messages gauge
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 0 1583778276679
+# TYPE pulsar_subscription_msg_rate_out gauge
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 19.999 1583778276679
+# TYPE pulsar_subscription_msg_throughput_out gauge
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-2"} 1119.990 1583778276679
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0 1583778276679
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0 1583778276679
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0.0 1583778276679
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0 1583778276679
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 0 1583778276679
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 19.999 1583778276679
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1",subscription="dev-dev-1-sub-1"} 1119.989 1583778276679
+# TYPE pulsar_in_bytes_total gauge
+pulsar_in_bytes_total{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 698700.0 1583778276679
+# TYPE pulsar_in_messages_total gauge
+pulsar_in_messages_total{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-1"} 12521.0 1583778276679
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 4.0 1583778276679
+pulsar_producers_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 2.0 1583778276679
+pulsar_consumers_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 6.0 1583778276679
+pulsar_rate_in{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 21.0 1583778276679
+pulsar_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 84.0 1583778276679
+pulsar_throughput_in{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1176.007 1583778276679
+pulsar_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 4704.023 1583778276679
+pulsar_storage_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 698532.0 1583778276679
+pulsar_msg_backlog{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 10042.0 1583778276679
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 10737418240.0 1583778276679
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1475.0 1583778276679
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 16.0 1583778276679
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 12.0 1583778276679
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1.0 1583778276679
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 0.0 1583778276679
+pulsar_entry_size_count{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 1504.0 1583778276679
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0 1583778276679
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0 1583778276679
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0.0 1583778276679
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0 1583778276679
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 0 1583778276679
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 21.0 1583778276679
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-1"} 1176.005 1583778276679
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0 1583778276679
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0 1583778276679
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0.0 1583778276679
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0 1583778276679
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 0 1583778276679
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 21.0 1583778276679
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-3"} 1176.007 1583778276679
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0 1583778276679
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0 1583778276679
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0.0 1583778276679
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0 1583778276679
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 0 1583778276679
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 21.0 1583778276679
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-2"} 1176.004 1583778276679
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0 1583778276679
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0 1583778276679
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0.0 1583778276679
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0 1583778276679
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 0 1583778276679
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 21.0 1583778276679
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2",subscription="dev-dev-2-sub-4"} 1176.006 1583778276679
+pulsar_in_bytes_total{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 698532.0 1583778276679
+pulsar_in_messages_total{cluster="standalone",namespace="sample/playground",topic="persistent://sample/playground/playground-2"} 12518.0 1583778276679
+pulsar_topics_count{cluster="standalone",namespace="sample/playground"} 2 1583778276679
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 2.0 1583778276680
+pulsar_producers_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 2.0 1583778276680
+pulsar_consumers_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 4.0 1583778276680
+pulsar_rate_in{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 18.999 1583778276680
+pulsar_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 37.998 1583778276680
+pulsar_throughput_in{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1101.966 1583778276680
+pulsar_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 2203.924 1583778276680
+pulsar_storage_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 725250.0 1583778276680
+pulsar_msg_backlog{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 10071.0 1583778276680
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 10737418240.0 1583778276680
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 5.0 1583778276680
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1474.0 1583778276680
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 24.0 1583778276680
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 7.0 1583778276680
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 0.0 1583778276680
+pulsar_entry_size_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 1510.0 1583778276680
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0 1583778276680
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0 1583778276680
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0.0 1583778276680
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0 1583778276680
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 0 1583778276680
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 18.999 1583778276680
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-1"} 1101.962 1583778276680
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0 1583778276680
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0 1583778276680
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0.0 1583778276680
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0 1583778276680
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 0 1583778276680
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 18.999 1583778276680
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2",subscription="prod-prod-2-sub-2"} 1101.961 1583778276680
+pulsar_in_bytes_total{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 725250.0 1583778276680
+pulsar_in_messages_total{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-2"} 12547.0 1583778276680
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 2.0 1583778276680
+pulsar_producers_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 2.0 1583778276680
+pulsar_consumers_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 4.0 1583778276680
+pulsar_rate_in{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 19.999 1583778276680
+pulsar_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 39.998 1583778276680
+pulsar_throughput_in{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1159.956 1583778276680
+pulsar_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 2319.911 1583778276680
+pulsar_storage_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 724728.0 1583778276680
+pulsar_msg_backlog{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 10062.0 1583778276680
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 10737418240.0 1583778276680
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 4.0 1583778276680
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1487.0 1583778276680
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 19.0 1583778276680
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 5.0 1583778276680
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 2.0 1583778276680
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 0.0 1583778276680
+pulsar_entry_size_count{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 1517.0 1583778276680
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0 1583778276680
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0 1583778276680
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0.0 1583778276680
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0 1583778276680
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 0 1583778276680
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 19.999 1583778276680
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-1"} 1159.955 1583778276680
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0 1583778276680
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0 1583778276680
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0.0 1583778276680
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0 1583778276680
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 0 1583778276680
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 19.999 1583778276680
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1",subscription="prod-prod-1-sub-2"} 1159.955 1583778276680
+pulsar_in_bytes_total{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 724844.0 1583778276680
+pulsar_in_messages_total{cluster="standalone",namespace="sample/test",topic="persistent://sample/test/test-1"} 12540.0 1583778276680
+pulsar_topics_count{cluster="standalone",namespace="sample/test"} 4 1583778276680
+pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583778276680
+pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583778276680
+pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583778276680
+pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 10737418240.0 1583778276680
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0 1583778276680
+pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0 1583778276680
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0.0 1583778276680
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0 1583778276680
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0 1583778276680
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0.0 1583778276680
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-867af721a1"} 0.0 1583778276680
+pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583778276680
+pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 1.0 1583778276680
+pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 1.0 1583778276680
+pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 10737418240.0 1583778276680
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583778276680
+pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583778276680
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583778276680
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583778276680
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583778276680
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583778276680
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583778276680
+pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583778276680
+pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583778276680
+pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583778276680
+pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583778276680
+pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 10737418240.0 1583778276680
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0 1583778276680
+pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0 1583778276680
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0.0 1583778276680
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0 1583778276680
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0 1583778276680
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0.0 1583778276680
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-e3ab56439a"} 0.0 1583778276680
+pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583778276680
+pulsar_topics_count{cluster="standalone",namespace="public/functions"} 7 1583778276680 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-topics.txt b/src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-topics.txt
new file mode 100644
index 000000000..7e0f0212a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/pulsar/testdata/standalone-v2.5.0-topics.txt
@@ -0,0 +1,748 @@
+# TYPE jvm_buffer_pool_used_bytes gauge
+jvm_buffer_pool_used_bytes{cluster="standalone",pool="direct"} 698586.0
+jvm_buffer_pool_used_bytes{cluster="standalone",pool="mapped"} 0.0
+# TYPE jvm_buffer_pool_capacity_bytes gauge
+jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="direct"} 698585.0
+jvm_buffer_pool_capacity_bytes{cluster="standalone",pool="mapped"} 0.0
+# TYPE jvm_buffer_pool_used_buffers gauge
+jvm_buffer_pool_used_buffers{cluster="standalone",pool="direct"} 82.0
+jvm_buffer_pool_used_buffers{cluster="standalone",pool="mapped"} 0.0
+# TYPE jvm_memory_direct_bytes_used gauge
+jvm_memory_direct_bytes_used{cluster="standalone"} 2.28189827E9
+# TYPE zk_write_latency summary
+zk_write_latency{cluster="standalone",quantile="0.5"} NaN
+zk_write_latency{cluster="standalone",quantile="0.75"} NaN
+zk_write_latency{cluster="standalone",quantile="0.95"} NaN
+zk_write_latency{cluster="standalone",quantile="0.99"} NaN
+zk_write_latency{cluster="standalone",quantile="0.999"} NaN
+zk_write_latency{cluster="standalone",quantile="0.9999"} NaN
+zk_write_latency_count{cluster="standalone"} 0.0
+zk_write_latency_sum{cluster="standalone"} 0.0
+# TYPE jvm_memory_bytes_used gauge
+jvm_memory_bytes_used{cluster="standalone",area="heap"} 1.05170488E9
+jvm_memory_bytes_used{cluster="standalone",area="nonheap"} 1.35478104E8
+# TYPE jvm_memory_bytes_committed gauge
+jvm_memory_bytes_committed{cluster="standalone",area="heap"} 2.147483648E9
+jvm_memory_bytes_committed{cluster="standalone",area="nonheap"} 1.41377536E8
+# TYPE jvm_memory_bytes_max gauge
+jvm_memory_bytes_max{cluster="standalone",area="heap"} 2.147483648E9
+jvm_memory_bytes_max{cluster="standalone",area="nonheap"} -1.0
+# TYPE jvm_memory_bytes_init gauge
+jvm_memory_bytes_init{cluster="standalone",area="heap"} 2.147483648E9
+jvm_memory_bytes_init{cluster="standalone",area="nonheap"} 2555904.0
+# TYPE jvm_memory_pool_bytes_used gauge
+jvm_memory_pool_bytes_used{cluster="standalone",pool="Code Cache"} 4.147872E7
+jvm_memory_pool_bytes_used{cluster="standalone",pool="Metaspace"} 8.4205296E7
+jvm_memory_pool_bytes_used{cluster="standalone",pool="Compressed Class Space"} 9794088.0
+jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Eden Space"} 9.17504E8
+jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Survivor Space"} 4194304.0
+jvm_memory_pool_bytes_used{cluster="standalone",pool="G1 Old Gen"} 1.30006576E8
+# TYPE jvm_memory_pool_bytes_committed gauge
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="Code Cache"} 4.1811968E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="Metaspace"} 8.8817664E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="Compressed Class Space"} 1.0747904E7
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Eden Space"} 1.124073472E9
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Survivor Space"} 4194304.0
+jvm_memory_pool_bytes_committed{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9
+# TYPE jvm_memory_pool_bytes_max gauge
+jvm_memory_pool_bytes_max{cluster="standalone",pool="Code Cache"} 2.5165824E8
+jvm_memory_pool_bytes_max{cluster="standalone",pool="Metaspace"} -1.0
+jvm_memory_pool_bytes_max{cluster="standalone",pool="Compressed Class Space"} 1.073741824E9
+jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Eden Space"} -1.0
+jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Survivor Space"} -1.0
+jvm_memory_pool_bytes_max{cluster="standalone",pool="G1 Old Gen"} 2.147483648E9
+# TYPE jvm_memory_pool_bytes_init gauge
+jvm_memory_pool_bytes_init{cluster="standalone",pool="Code Cache"} 2555904.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="Metaspace"} 0.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="Compressed Class Space"} 0.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Eden Space"} 1.128267776E9
+jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Survivor Space"} 0.0
+jvm_memory_pool_bytes_init{cluster="standalone",pool="G1 Old Gen"} 1.019215872E9
+# TYPE log4j2_appender_total counter
+log4j2_appender_total{cluster="standalone",level="debug"} 0.0
+log4j2_appender_total{cluster="standalone",level="warn"} 307.0
+log4j2_appender_total{cluster="standalone",level="trace"} 0.0
+log4j2_appender_total{cluster="standalone",level="error"} 0.0
+log4j2_appender_total{cluster="standalone",level="fatal"} 0.0
+log4j2_appender_total{cluster="standalone",level="info"} 17746.0
+# TYPE jetty_requests_total counter
+jetty_requests_total{cluster="standalone"} 13063.0
+# TYPE jetty_requests_active gauge
+jetty_requests_active{cluster="standalone"} 1.0
+# TYPE jetty_requests_active_max gauge
+jetty_requests_active_max{cluster="standalone"} 2.0
+# TYPE jetty_request_time_max_seconds gauge
+jetty_request_time_max_seconds{cluster="standalone"} 1.02
+# TYPE jetty_request_time_seconds_total counter
+jetty_request_time_seconds_total{cluster="standalone"} 64.787
+# TYPE jetty_dispatched_total counter
+jetty_dispatched_total{cluster="standalone"} 13063.0
+# TYPE jetty_dispatched_active gauge
+jetty_dispatched_active{cluster="standalone"} 0.0
+# TYPE jetty_dispatched_active_max gauge
+jetty_dispatched_active_max{cluster="standalone"} 2.0
+# TYPE jetty_dispatched_time_max gauge
+jetty_dispatched_time_max{cluster="standalone"} 345.0
+# TYPE jetty_dispatched_time_seconds_total counter
+jetty_dispatched_time_seconds_total{cluster="standalone"} 5.054
+# TYPE jetty_async_requests_total counter
+jetty_async_requests_total{cluster="standalone"} 6480.0
+# TYPE jetty_async_requests_waiting gauge
+jetty_async_requests_waiting{cluster="standalone"} 1.0
+# TYPE jetty_async_requests_waiting_max gauge
+jetty_async_requests_waiting_max{cluster="standalone"} 2.0
+# TYPE jetty_async_dispatches_total counter
+jetty_async_dispatches_total{cluster="standalone"} 0.0
+# TYPE jetty_expires_total counter
+jetty_expires_total{cluster="standalone"} 0.0
+# TYPE jetty_responses_total counter
+jetty_responses_total{cluster="standalone",code="1xx"} 0.0
+jetty_responses_total{cluster="standalone",code="2xx"} 6683.0
+jetty_responses_total{cluster="standalone",code="3xx"} 6378.0
+jetty_responses_total{cluster="standalone",code="4xx"} 1.0
+jetty_responses_total{cluster="standalone",code="5xx"} 0.0
+# TYPE jetty_stats_seconds gauge
+jetty_stats_seconds{cluster="standalone"} 5822.682
+# TYPE jetty_responses_bytes_total counter
+jetty_responses_bytes_total{cluster="standalone"} 4.89996508E8
+# TYPE zookeeper_server_requests_latency_ms summary
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.5"} 0.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.75"} 1.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.95"} 1.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.99"} 2.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.999"} 5.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="0.9999"} 5.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="read",quantile="1.0"} 5.0
+zookeeper_server_requests_latency_ms_count{cluster="standalone",type="read"} 17769.0
+zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="read"} 9455.0
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.5"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.75"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.95"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.99"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.999"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="0.9999"} NaN
+zookeeper_server_requests_latency_ms{cluster="standalone",type="write",quantile="1.0"} NaN
+zookeeper_server_requests_latency_ms_count{cluster="standalone",type="write"} 2091.0
+zookeeper_server_requests_latency_ms_sum{cluster="standalone",type="write"} 3930.0
+# TYPE jvm_info gauge
+jvm_info{cluster="standalone",version="1.8.0_232-b09",vendor="Oracle Corporation",runtime="OpenJDK Runtime Environment"} 1.0
+# TYPE zookeeper_server_connections gauge
+zookeeper_server_connections{cluster="standalone"} 10.0
+# TYPE caffeine_cache_hit_total counter
+caffeine_cache_hit_total{cluster="standalone",cache="owned-bundles"} 714.0
+caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="local-zk-exists"} 2.0
+caffeine_cache_hit_total{cluster="standalone",cache="local-zk-children"} 2.0
+caffeine_cache_hit_total{cluster="standalone",cache="bundles"} 758.0
+caffeine_cache_hit_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="local-zk-data"} 10.0
+caffeine_cache_hit_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_hit_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_miss_total counter
+caffeine_cache_miss_total{cluster="standalone",cache="owned-bundles"} 11.0
+caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="local-zk-exists"} 7.0
+caffeine_cache_miss_total{cluster="standalone",cache="local-zk-children"} 8.0
+caffeine_cache_miss_total{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_miss_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="local-zk-data"} 79.0
+caffeine_cache_miss_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_miss_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_requests_total counter
+caffeine_cache_requests_total{cluster="standalone",cache="owned-bundles"} 725.0
+caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="local-zk-exists"} 9.0
+caffeine_cache_requests_total{cluster="standalone",cache="local-zk-children"} 10.0
+caffeine_cache_requests_total{cluster="standalone",cache="bundles"} 762.0
+caffeine_cache_requests_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="local-zk-data"} 89.0
+caffeine_cache_requests_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_requests_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_eviction_total counter
+caffeine_cache_eviction_total{cluster="standalone",cache="owned-bundles"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-exists"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-children"} 2.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bundles"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="local-zk-data"} 5.0
+caffeine_cache_eviction_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_eviction_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_eviction_weight gauge
+caffeine_cache_eviction_weight{cluster="standalone",cache="owned-bundles"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-exists"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-children"} 2.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bundles"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="local-zk-data"} 5.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_eviction_weight{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_load_failure_total counter
+caffeine_cache_load_failure_total{cluster="standalone",cache="owned-bundles"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-exists"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-children"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bundles"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="local-zk-data"} 74.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_load_failure_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_loads_total counter
+caffeine_cache_loads_total{cluster="standalone",cache="owned-bundles"} 6.0
+caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="local-zk-exists"} 7.0
+caffeine_cache_loads_total{cluster="standalone",cache="local-zk-children"} 8.0
+caffeine_cache_loads_total{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_loads_total{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="local-zk-data"} 79.0
+caffeine_cache_loads_total{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_loads_total{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_estimated_size gauge
+caffeine_cache_estimated_size{cluster="standalone",cache="owned-bundles"} 6.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-exists"} 7.0
+caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-children"} 2.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="local-zk-data"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_estimated_size{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE caffeine_cache_load_duration_seconds summary
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="owned-bundles"} 6.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="owned-bundles"} 0.064524869
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-exists"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-children"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-children"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-exists"} 7.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-exists"} 0.020761008
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-children"} 8.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-children"} 0.075053592
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bundles"} 4.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bundles"} 0.022866292
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-data"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="local-zk-data"} 79.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="local-zk-data"} 0.424431063
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="bookies-racks-data"} 0.0
+caffeine_cache_load_duration_seconds_count{cluster="standalone",cache="global-zk-exists"} 0.0
+caffeine_cache_load_duration_seconds_sum{cluster="standalone",cache="global-zk-exists"} 0.0
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total{cluster="standalone"} 2554.5
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds{cluster="standalone"} 1.583768876396E9
+# TYPE process_open_fds gauge
+process_open_fds{cluster="standalone"} 678.0
+# TYPE process_max_fds gauge
+process_max_fds{cluster="standalone"} 1048576.0
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes{cluster="standalone"} 8.749596672E9
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes{cluster="standalone"} 1.679040512E9
+# TYPE jvm_classes_loaded gauge
+jvm_classes_loaded{cluster="standalone"} 14522.0
+# TYPE jvm_classes_loaded_total counter
+jvm_classes_loaded_total{cluster="standalone"} 14522.0
+# TYPE jvm_classes_unloaded_total counter
+jvm_classes_unloaded_total{cluster="standalone"} 0.0
+# TYPE zk_read_latency summary
+zk_read_latency{cluster="standalone",quantile="0.5"} NaN
+zk_read_latency{cluster="standalone",quantile="0.75"} NaN
+zk_read_latency{cluster="standalone",quantile="0.95"} NaN
+zk_read_latency{cluster="standalone",quantile="0.99"} NaN
+zk_read_latency{cluster="standalone",quantile="0.999"} NaN
+zk_read_latency{cluster="standalone",quantile="0.9999"} NaN
+zk_read_latency_count{cluster="standalone"} 0.0
+zk_read_latency_sum{cluster="standalone"} 0.0
+# TYPE zookeeper_server_requests counter
+zookeeper_server_requests{cluster="standalone",type="getData"} 2948.0
+zookeeper_server_requests{cluster="standalone",type="setData"} 270.0
+zookeeper_server_requests{cluster="standalone",type="ping"} 9679.0
+zookeeper_server_requests{cluster="standalone",type="unknown"} 2.0
+zookeeper_server_requests{cluster="standalone",type="sync"} 225.0
+zookeeper_server_requests{cluster="standalone",type="delete"} 1099.0
+zookeeper_server_requests{cluster="standalone",type="createSession"} 14.0
+zookeeper_server_requests{cluster="standalone",type="multi"} 311.0
+zookeeper_server_requests{cluster="standalone",type="getChildren"} 840.0
+zookeeper_server_requests{cluster="standalone",type="getChildren2"} 889.0
+zookeeper_server_requests{cluster="standalone",type="closeSession"} 5.0
+zookeeper_server_requests{cluster="standalone",type="create"} 478.0
+zookeeper_server_requests{cluster="standalone",type="exists"} 3100.0
+# TYPE pulsar_broker_publish_latency summary
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.0"} 1.521
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.5"} 2.295
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.95"} 6.139
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.99"} 19.977
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.999"} 19.977
+pulsar_broker_publish_latency{cluster="standalone",quantile="0.9999"} 19.977
+pulsar_broker_publish_latency{cluster="standalone",quantile="1.0"} 19.977
+pulsar_broker_publish_latency_count{cluster="standalone"} 540306.0
+pulsar_broker_publish_latency_sum{cluster="standalone"} 1410934.0
+# TYPE zookeeper_server_watches_count gauge
+zookeeper_server_watches_count{cluster="standalone"} 37.0
+# TYPE zookeeper_server_ephemerals_count gauge
+zookeeper_server_ephemerals_count{cluster="standalone"} 12.0
+# TYPE topic_load_times summary
+topic_load_times{cluster="standalone",quantile="0.5"} NaN
+topic_load_times{cluster="standalone",quantile="0.75"} NaN
+topic_load_times{cluster="standalone",quantile="0.95"} NaN
+topic_load_times{cluster="standalone",quantile="0.99"} NaN
+topic_load_times{cluster="standalone",quantile="0.999"} NaN
+topic_load_times{cluster="standalone",quantile="0.9999"} NaN
+topic_load_times_count{cluster="standalone"} 0.0
+topic_load_times_sum{cluster="standalone"} 0.0
+# TYPE jvm_gc_collection_seconds summary
+jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Young Generation"} 64.0
+jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Young Generation"} 13.761
+jvm_gc_collection_seconds_count{cluster="standalone",gc="G1 Old Generation"} 0.0
+jvm_gc_collection_seconds_sum{cluster="standalone",gc="G1 Old Generation"} 0.0
+# TYPE jvm_memory_direct_bytes_max gauge
+jvm_memory_direct_bytes_max{cluster="standalone"} 4.294967296E9
+# TYPE zookeeper_server_znode_count gauge
+zookeeper_server_znode_count{cluster="standalone"} 4157.0
+# TYPE zookeeper_server_data_size_bytes gauge
+zookeeper_server_data_size_bytes{cluster="standalone"} 457035.0
+# TYPE jvm_threads_current gauge
+jvm_threads_current{cluster="standalone"} 303.0
+# TYPE jvm_threads_daemon gauge
+jvm_threads_daemon{cluster="standalone"} 49.0
+# TYPE jvm_threads_peak gauge
+jvm_threads_peak{cluster="standalone"} 306.0
+# TYPE jvm_threads_started_total counter
+jvm_threads_started_total{cluster="standalone"} 474.0
+# TYPE jvm_threads_deadlocked gauge
+jvm_threads_deadlocked{cluster="standalone"} 0.0
+# TYPE jvm_threads_deadlocked_monitor gauge
+jvm_threads_deadlocked_monitor{cluster="standalone"} 0.0
+# TYPE pulsar_topics_count gauge
+pulsar_topics_count{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_subscriptions_count gauge
+pulsar_subscriptions_count{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_producers_count gauge
+pulsar_producers_count{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_consumers_count gauge
+pulsar_consumers_count{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_rate_in gauge
+pulsar_rate_in{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_rate_out gauge
+pulsar_rate_out{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_throughput_in gauge
+pulsar_throughput_in{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_throughput_out gauge
+pulsar_throughput_out{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_storage_size gauge
+pulsar_storage_size{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_storage_write_rate gauge
+pulsar_storage_write_rate{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_storage_read_rate gauge
+pulsar_storage_read_rate{cluster="standalone"} 0 1583774714170
+# TYPE pulsar_msg_backlog gauge
+pulsar_msg_backlog{cluster="standalone"} 0 1583774714170
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 2.0 1583774714170
+pulsar_producers_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 2.0 1583774714170
+pulsar_consumers_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 4.0 1583774714170
+pulsar_rate_in{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 25.013 1583774714170
+pulsar_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 50.027 1583774714170
+pulsar_throughput_in{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1450.789 1583774714170
+pulsar_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 2901.607 1583774714170
+pulsar_storage_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1951642.0 1583774714170
+pulsar_msg_backlog{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_storage_backlog_size gauge
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_storage_offloaded_size gauge
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_storage_backlog_quota_limit gauge
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 10737418240.0 1583774714170
+# TYPE pulsar_storage_write_latency_le_0_5 gauge
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_storage_write_latency_le_1 gauge
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 10.0 1583774714170
+# TYPE pulsar_storage_write_latency_le_5 gauge
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1308.0 1583774714170
+# TYPE pulsar_storage_write_latency_le_10 gauge
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 95.0 1583774714170
+# TYPE pulsar_storage_write_latency_le_20 gauge
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 26.0 1583774714170
+# TYPE pulsar_storage_write_latency_le_50 gauge
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 9.0 1583774714170
+# TYPE pulsar_storage_write_latency_le_100 gauge
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_storage_write_latency_le_200 gauge
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_storage_write_latency_le_1000 gauge
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_storage_write_latency_overflow gauge
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_storage_write_latency_count gauge
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170
+# TYPE pulsar_storage_write_latency_sum gauge
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170
+# TYPE pulsar_entry_size_le_128 gauge
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170
+# TYPE pulsar_entry_size_le_512 gauge
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_entry_size_le_1_kb gauge
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_entry_size_le_2_kb gauge
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_entry_size_le_4_kb gauge
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_entry_size_le_16_kb gauge
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_entry_size_le_100_kb gauge
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_entry_size_le_1_mb gauge
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_entry_size_le_overflow gauge
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 0.0 1583774714170
+# TYPE pulsar_entry_size_count gauge
+pulsar_entry_size_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170
+# TYPE pulsar_entry_size_sum gauge
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 1448.0 1583774714170
+# TYPE pulsar_subscription_back_log gauge
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0 1583774714170
+# TYPE pulsar_subscription_delayed gauge
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0 1583774714170
+# TYPE pulsar_subscription_msg_rate_redeliver gauge
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0.0 1583774714170
+# TYPE pulsar_subscription_unacked_messages gauge
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0 1583774714170
+# TYPE pulsar_subscription_blocked_on_unacked_messages gauge
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 0 1583774714170
+# TYPE pulsar_subscription_msg_rate_out gauge
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 25.013 1583774714170
+# TYPE pulsar_subscription_msg_throughput_out gauge
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-2"} 1450.808 1583774714170
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0 1583774714170
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0 1583774714170
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0.0 1583774714170
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0 1583774714170
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 0 1583774714170
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 25.013 1583774714170
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1",subscription="dev-dev-1-sub-1"} 1450.799 1583774714170
+# TYPE pulsar_in_bytes_total gauge
+pulsar_in_bytes_total{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 7657655.0 1583774714170
+# TYPE pulsar_in_messages_total gauge
+pulsar_in_messages_total{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-1"} 133649.0 1583774714170
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 4.0 1583774714170
+pulsar_producers_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 2.0 1583774714170
+pulsar_consumers_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 6.0 1583774714170
+pulsar_rate_in{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 25.014 1583774714170
+pulsar_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 100.060 1583774714170
+pulsar_throughput_in{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1450.862 1583774714170
+pulsar_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 5803.500 1583774714170
+pulsar_storage_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 2029478.0 1583774714170
+pulsar_msg_backlog{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 10737418240.0 1583774714170
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 15.0 1583774714170
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1342.0 1583774714170
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 82.0 1583774714170
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 28.0 1583774714170
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 9.0 1583774714170
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1.0 1583774714170
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 0.0 1583774714170
+pulsar_entry_size_count{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 1477.0 1583774714170
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0 1583774714170
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0 1583774714170
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0.0 1583774714170
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0 1583774714170
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 0 1583774714170
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 25.015 1583774714170
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-1"} 1450.873 1583774714170
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0 1583774714170
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0 1583774714170
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0.0 1583774714170
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0 1583774714170
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 0 1583774714170
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 25.015 1583774714170
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-3"} 1450.878 1583774714170
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0 1583774714170
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0 1583774714170
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0.0 1583774714170
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0 1583774714170
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 0 1583774714170
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 25.015 1583774714170
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-2"} 1450.881 1583774714170
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0 1583774714170
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0 1583774714170
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0.0 1583774714170
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0 1583774714170
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 0 1583774714170
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 25.014 1583774714170
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2",subscription="dev-dev-2-sub-4"} 1450.866 1583774714170
+pulsar_in_bytes_total{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 7730949.0 1583774714170
+pulsar_in_messages_total{cluster="standalone",namespace="sample/dev",topic="persistent://sample/dev/dev-2"} 134992.0 1583774714170
+pulsar_topics_count{cluster="standalone",namespace="sample/dev"} 2 1583774714170
+pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583774714170
+pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583774714170
+pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 1.0 1583774714170
+pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 10737418240.0 1583774714170
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0 1583774714170
+pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0 1583774714170
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0.0 1583774714170
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0 1583774714170
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0 1583774714170
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0.0 1583774714170
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata",subscription="reader-b8cf46412d"} 0.0 1583774714170
+pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/metadata"} 0.0 1583774714170
+pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 1.0 1583774714170
+pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 1.0 1583774714170
+pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 10737418240.0 1583774714170
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583774714170
+pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583774714170
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583774714170
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583774714170
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0 1583774714170
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583774714170
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate",subscription="participants"} 0.0 1583774714170
+pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/coordinate"} 0.0 1583774714170
+pulsar_subscriptions_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583774714170
+pulsar_producers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583774714170
+pulsar_consumers_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 1.0 1583774714170
+pulsar_rate_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_throughput_in{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_msg_backlog{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_backlog_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_offloaded_size{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 10737418240.0 1583774714170
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_entry_size_le_128{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_entry_size_le_512{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714170
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171
+pulsar_entry_size_count{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171
+pulsar_entry_size_sum{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171
+pulsar_subscription_back_log{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0 1583774714171
+pulsar_subscription_delayed{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0 1583774714171
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0.0 1583774714171
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0 1583774714171
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0 1583774714171
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0.0 1583774714171
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments",subscription="reader-d3be8b651a"} 0.0 1583774714171
+pulsar_in_bytes_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171
+pulsar_in_messages_total{cluster="standalone",namespace="public/functions",topic="persistent://public/functions/assignments"} 0.0 1583774714171
+pulsar_topics_count{cluster="standalone",namespace="public/functions"} 5 1583774714171
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 2.0 1583774714171
+pulsar_producers_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 2.0 1583774714171
+pulsar_consumers_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 4.0 1583774714171
+pulsar_rate_in{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 26.018 1583774714171
+pulsar_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 52.037 1583774714171
+pulsar_throughput_in{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1561.110 1583774714171
+pulsar_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 3122.248 1583774714171
+pulsar_storage_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 2108760.0 1583774714171
+pulsar_msg_backlog{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 10737418240.0 1583774714171
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 9.0 1583774714171
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1339.0 1583774714171
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 80.0 1583774714171
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 34.0 1583774714171
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 10.0 1583774714171
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1.0 1583774714171
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 0.0 1583774714171
+pulsar_entry_size_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 1473.0 1583774714171
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0 1583774714171
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0 1583774714171
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0.0 1583774714171
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0 1583774714171
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 0 1583774714171
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 26.018 1583774714171
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-1"} 1561.118 1583774714171
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0 1583774714171
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0 1583774714171
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0.0 1583774714171
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0 1583774714171
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 0 1583774714171
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 26.018 1583774714171
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2",subscription="prod-prod-2-sub-2"} 1561.130 1583774714171
+pulsar_in_bytes_total{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 8010057.0 1583774714171
+pulsar_in_messages_total{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-2"} 135146.0 1583774714171
+pulsar_subscriptions_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 2.0 1583774714171
+pulsar_producers_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 2.0 1583774714171
+pulsar_consumers_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 4.0 1583774714171
+pulsar_rate_in{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 26.019 1583774714171
+pulsar_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 52.038 1583774714171
+pulsar_throughput_in{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1561.151 1583774714171
+pulsar_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 3122.322 1583774714171
+pulsar_storage_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 2022420.0 1583774714171
+pulsar_msg_backlog{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_storage_backlog_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_storage_offloaded_size{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_storage_backlog_quota_limit{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 10737418240.0 1583774714171
+pulsar_storage_write_latency_le_0_5{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_storage_write_latency_le_1{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 7.0 1583774714171
+pulsar_storage_write_latency_le_5{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1339.0 1583774714171
+pulsar_storage_write_latency_le_10{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 84.0 1583774714171
+pulsar_storage_write_latency_le_20{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 26.0 1583774714171
+pulsar_storage_write_latency_le_50{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 12.0 1583774714171
+pulsar_storage_write_latency_le_100{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1.0 1583774714171
+pulsar_storage_write_latency_le_200{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_storage_write_latency_le_1000{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_storage_write_latency_overflow{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_storage_write_latency_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171
+pulsar_storage_write_latency_sum{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171
+pulsar_entry_size_le_128{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171
+pulsar_entry_size_le_512{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_entry_size_le_1_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_entry_size_le_2_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_entry_size_le_4_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_entry_size_le_16_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_entry_size_le_100_kb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_entry_size_le_1_mb{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_entry_size_le_overflow{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 0.0 1583774714171
+pulsar_entry_size_count{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171
+pulsar_entry_size_sum{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 1469.0 1583774714171
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0 1583774714171
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0 1583774714171
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0.0 1583774714171
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0 1583774714171
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 0 1583774714171
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 26.019 1583774714171
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-1"} 1561.165 1583774714171
+pulsar_subscription_back_log{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0 1583774714171
+pulsar_subscription_delayed{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0 1583774714171
+pulsar_subscription_msg_rate_redeliver{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0.0 1583774714171
+pulsar_subscription_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0 1583774714171
+pulsar_subscription_blocked_on_unacked_messages{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 0 1583774714171
+pulsar_subscription_msg_rate_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 26.019 1583774714171
+pulsar_subscription_msg_throughput_out{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1",subscription="prod-prod-1-sub-2"} 1561.157 1583774714171
+pulsar_in_bytes_total{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 7928433.0 1583774714171
+pulsar_in_messages_total{cluster="standalone",namespace="sample/prod",topic="persistent://sample/prod/prod-1"} 133707.0 1583774714171
+pulsar_topics_count{cluster="standalone",namespace="sample/prod"} 7 1583774714171 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/puppet/README.md b/src/go/plugin/go.d/modules/puppet/README.md
new file mode 120000
index 000000000..b6c4c83f9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/README.md
@@ -0,0 +1 @@
+integrations/puppet.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/puppet/charts.go b/src/go/plugin/go.d/modules/puppet/charts.go
new file mode 100644
index 000000000..c1da8d162
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/charts.go
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package puppet
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioJVMHeap = module.Priority + iota
+ prioJVMNonHeap
+ prioCPUUsage
+ prioFileDescriptors
+)
+
+const (
+ byteToMiB = 1 << 20
+)
+
+var charts = module.Charts{
+ jvmHeapChart.Copy(),
+ jvmNonHeapChart.Copy(),
+ cpuUsageChart.Copy(),
+ fileDescriptorsChart.Copy(),
+}
+
+var (
+ jvmHeapChart = module.Chart{
+ ID: "jvm_heap",
+ Title: "JVM Heap",
+ Units: "MiB",
+ Fam: "resources",
+ Ctx: "puppet.jvm_heap",
+ Type: module.Area,
+ Priority: prioJVMHeap,
+ Dims: module.Dims{
+ {ID: "jvm_heap_committed", Name: "committed", Div: byteToMiB},
+ {ID: "jvm_heap_used", Name: "used", Div: byteToMiB},
+ },
+ Vars: module.Vars{
+ {ID: "jvm_heap_max"},
+ {ID: "jvm_heap_init"},
+ },
+ }
+
+ jvmNonHeapChart = module.Chart{
+ ID: "jvm_nonheap",
+ Title: "JVM Non-Heap",
+ Units: "MiB",
+ Fam: "resources",
+ Ctx: "puppet.jvm_nonheap",
+ Type: module.Area,
+ Priority: prioJVMNonHeap,
+ Dims: module.Dims{
+ {ID: "jvm_nonheap_committed", Name: "committed", Div: byteToMiB},
+ {ID: "jvm_nonheap_used", Name: "used", Div: byteToMiB},
+ },
+ Vars: module.Vars{
+ {ID: "jvm_nonheap_max"},
+ {ID: "jvm_nonheap_init"},
+ },
+ }
+
+ cpuUsageChart = module.Chart{
+ ID: "cpu",
+ Title: "CPU usage",
+ Units: "percentage",
+ Fam: "resources",
+ Ctx: "puppet.cpu",
+ Type: module.Stacked,
+ Priority: prioCPUUsage,
+ Dims: module.Dims{
+ {ID: "cpu_usage", Name: "execution", Div: 1000},
+ {ID: "gc_cpu_usage", Name: "GC", Div: 1000},
+ },
+ }
+
+ fileDescriptorsChart = module.Chart{
+ ID: "fd_open",
+ Title: "File Descriptors",
+ Units: "descriptors",
+ Fam: "resources",
+ Ctx: "puppet.fdopen",
+ Type: module.Line,
+ Priority: prioFileDescriptors,
+ Dims: module.Dims{
+ {ID: "fd_used", Name: "used"},
+ },
+ Vars: module.Vars{
+ {ID: "fd_max"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/puppet/collect.go b/src/go/plugin/go.d/modules/puppet/collect.go
new file mode 100644
index 000000000..a1b95e09c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/collect.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package puppet
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+var (
+ //https://puppet.com/docs/puppet/8/server/status-api/v1/services
+ urlPathStatusService = "/status/v1/services"
+ urlQueryStatusService = url.Values{"level": {"debug"}}.Encode()
+)
+
+func (p *Puppet) collect() (map[string]int64, error) {
+ stats, err := p.queryStatsService()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := stm.ToMap(stats)
+
+ return mx, nil
+}
+
+func (p *Puppet) queryStatsService() (*statusServiceResponse, error) {
+ req, err := web.NewHTTPRequestWithPath(p.Request, urlPathStatusService)
+ if err != nil {
+ return nil, err
+ }
+
+ req.URL.RawQuery = urlQueryStatusService
+
+ var stats statusServiceResponse
+ if err := p.doOKDecode(req, &stats); err != nil {
+ return nil, err
+ }
+
+ if stats.StatusService == nil {
+ return nil, fmt.Errorf("unexpected response: not puppet service status data")
+ }
+
+ return &stats, nil
+}
+
+func (p *Puppet) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := p.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/puppet/config_schema.json b/src/go/plugin/go.d/modules/puppet/config_schema.json
new file mode 100644
index 000000000..92cbcb87f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/config_schema.json
@@ -0,0 +1,177 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Puppet collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL where the Puppet instance can be accessed.",
+ "type": "string",
+ "default": "https://127.0.0.1:8140",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/puppet/integrations/puppet.md b/src/go/plugin/go.d/modules/puppet/integrations/puppet.md
new file mode 100644
index 000000000..23e85dc4d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/integrations/puppet.md
@@ -0,0 +1,233 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/puppet/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/puppet/metadata.yaml"
+sidebar_label: "Puppet"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/CICD Platforms"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Puppet
+
+
+<img src="https://netdata.cloud/img/puppet.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: puppet
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Puppet metrics, including JVM heap and non-heap memory, CPU usage, and file descriptors.
+
+
+It uses Puppet's metrics API endpoint [/status/v1/services](https://www.puppet.com/docs/puppetserver/5.3/status-api/v1/services.html) to gather the metrics.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Puppet instances running on localhost that are listening on port 8140.
+On startup, it tries to collect metrics from:
+
+- https://127.0.0.1:8140
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Puppet instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| puppet.jvm_heap | committed, used | MiB |
+| puppet.jvm_nonheap | committed, used | MiB |
+| puppet.cpu | execution, GC | percentage |
+| puppet.fdopen | used | descriptors |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/puppet.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/puppet.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary></summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| url | The base URL where the Puppet instance can be accessed. | https://127.0.0.1:8140 | yes |
+| timeout | HTTPS request timeout. | 1 | no |
+| username | Username for basic HTTPS authentication. | | no |
+| password | Password for basic HTTPS authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTPS authentication. | | no |
+| proxy_password | Password for proxy basic HTTPS authentication. | | no |
+| method | HTTPS request method. | POST | no |
+| body | HTTPS request body. | | no |
+| headers | HTTPS request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic with self-signed certificate
+
+Puppet with self-signed TLS certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:8140
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:8140
+ tls_skip_verify: yes
+
+ - name: remote
+ url: https://192.0.2.1:8140
+ tls_skip_verify: yes
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `puppet` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m puppet
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `puppet` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep puppet
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep puppet /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep puppet
+```
+
+
diff --git a/src/go/plugin/go.d/modules/puppet/metadata.yaml b/src/go/plugin/go.d/modules/puppet/metadata.yaml
new file mode 100644
index 000000000..fa96ea8f2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/metadata.yaml
@@ -0,0 +1,184 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-puppet
+ plugin_name: go.d.plugin
+ module_name: puppet
+ monitored_instance:
+ name: Puppet
+ link: "https://www.puppet.com/"
+ categories:
+ - data-collection.ci-cd-systems
+ icon_filename: "puppet.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - puppet
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Puppet metrics, including JVM heap and non-heap memory, CPU usage, and file descriptors.
+ method_description: |
+ It uses Puppet's metrics API endpoint [/status/v1/services](https://www.puppet.com/docs/puppetserver/5.3/status-api/v1/services.html) to gather the metrics.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Puppet instances running on localhost that are listening on port 8140.
+ On startup, it tries to collect metrics from:
+
+ - https://127.0.0.1:8140
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "go.d/puppet.conf"
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: ""
+ enabled: true
+ list:
+ - name: url
+ description: The base URL where the Puppet instance can be accessed.
+ default_value: https://127.0.0.1:8140
+ required: true
+ - name: timeout
+ description: HTTPS request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTPS authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTPS authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTPS authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTPS authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTPS request method.
+ default_value: POST
+ required: false
+ - name: body
+ description: HTTPS request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTPS request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic with self-signed certificate
+ description: Puppet with self-signed TLS certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:8140
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:8140
+ tls_skip_verify: yes
+
+ - name: remote
+ url: https://192.0.2.1:8140
+ tls_skip_verify: yes
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: puppet.jvm_heap
+ description: JVM Heap
+ unit: "MiB"
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: puppet.jvm_nonheap
+ description: JVM Non-Heap
+ unit: "MiB"
+ chart_type: area
+ dimensions:
+ - name: committed
+ - name: used
+ - name: puppet.cpu
+ description: CPU usage
+ unit: "percentage"
+ chart_type: stacked
+ dimensions:
+ - name: execution
+ - name: GC
+ - name: puppet.fdopen
+ description: File Descriptors
+ unit: "descriptors"
+ chart_type: line
+ dimensions:
+ - name: used
diff --git a/src/go/plugin/go.d/modules/puppet/puppet.go b/src/go/plugin/go.d/modules/puppet/puppet.go
new file mode 100644
index 000000000..e6eb7b058
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/puppet.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package puppet
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("puppet", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Puppet {
+ return &Puppet{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "https://127.0.0.1:8140",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 1),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Puppet struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (p *Puppet) Configuration() any {
+ return p.Config
+}
+
+func (p *Puppet) Init() error {
+ if p.URL == "" {
+ p.Error("URL not set")
+ return errors.New("url not set")
+ }
+
+ client, err := web.NewHTTPClient(p.Client)
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ p.httpClient = client
+
+ p.Debugf("using URL %s", p.URL)
+ p.Debugf("using timeout: %s", p.Timeout)
+
+ return nil
+}
+
+func (p *Puppet) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (p *Puppet) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *Puppet) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (p *Puppet) Cleanup() {
+ if p.httpClient != nil {
+ p.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/puppet/puppet_test.go b/src/go/plugin/go.d/modules/puppet/puppet_test.go
new file mode 100644
index 000000000..7c80a638a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/puppet_test.go
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package puppet
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ serviceStatusResponse, _ = os.ReadFile("testdata/serviceStatusResponse.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "serviceStatusResponse": serviceStatusResponse,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPuppet_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Puppet{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPuppet_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ puppet := New()
+ puppet.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, puppet.Init())
+ } else {
+ assert.NoError(t, puppet.Init())
+ }
+ })
+ }
+}
+
+func TestPuppet_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestPuppet_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (*Puppet, func())
+ }{
+ "success default config": {
+ wantFail: false,
+ prepare: prepareCaseOkDefault,
+ },
+ "fails on unexpected json response": {
+ wantFail: true,
+ prepare: prepareCaseUnexpectedJsonResponse,
+ },
+ "fails on invalid format response": {
+ wantFail: true,
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ puppet, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, puppet.Check())
+ } else {
+ assert.NoError(t, puppet.Check())
+ }
+ })
+ }
+}
+
+func TestPuppet_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (*Puppet, func())
+ wantMetrics map[string]int64
+ }{
+ "success default config": {
+ prepare: prepareCaseOkDefault,
+ wantMetrics: map[string]int64{
+ "cpu_usage": 49,
+ "fd_max": 524288,
+ "fd_used": 234,
+ "gc_cpu_usage": 0,
+ "jvm_heap_committed": 1073741824,
+ "jvm_heap_init": 1073741824,
+ "jvm_heap_max": 1073741824,
+ "jvm_heap_used": 550502400,
+ "jvm_nonheap_committed": 334102528,
+ "jvm_nonheap_init": 7667712,
+ "jvm_nonheap_max": -1,
+ "jvm_nonheap_used": 291591160,
+ },
+ },
+ "fails on unexpected json response": {
+ prepare: prepareCaseUnexpectedJsonResponse,
+ },
+ "fails on invalid format response": {
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ puppet, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := puppet.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ testMetricsHasAllChartsDims(t, puppet, mx)
+ }
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, puppet *Puppet, mx map[string]int64) {
+ for _, chart := range *puppet.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareCaseOkDefault(t *testing.T) (*Puppet, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/status/v1/services":
+ if r.URL.RawQuery != urlQueryStatusService {
+ w.WriteHeader(http.StatusNotFound)
+ } else {
+ _, _ = w.Write(serviceStatusResponse)
+ }
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ puppet := New()
+ puppet.URL = srv.URL
+ require.NoError(t, puppet.Init())
+
+ return puppet, srv.Close
+}
+
+func prepareCaseUnexpectedJsonResponse(t *testing.T) (*Puppet, func()) {
+ t.Helper()
+ resp := `
+{
+ "elephant": {
+ "burn": false,
+ "mountain": true,
+ "fog": false,
+ "skin": -1561907625,
+ "burst": "anyway",
+ "shadow": 1558616893
+ },
+ "start": "ever",
+ "base": 2093056027,
+ "mission": -2007590351,
+ "victory": 999053756,
+ "die": false
+}
+`
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(resp))
+ }))
+
+ puppet := New()
+ puppet.URL = srv.URL
+ require.NoError(t, puppet.Init())
+
+ return puppet, srv.Close
+}
+
+func prepareCaseInvalidFormatResponse(t *testing.T) (*Puppet, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ puppet := New()
+ puppet.URL = srv.URL
+ require.NoError(t, puppet.Init())
+
+ return puppet, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*Puppet, func()) {
+ t.Helper()
+ puppet := New()
+ puppet.URL = "http://127.0.0.1:65001"
+ require.NoError(t, puppet.Init())
+
+ return puppet, func() {}
+}
diff --git a/src/go/plugin/go.d/modules/puppet/response.go b/src/go/plugin/go.d/modules/puppet/response.go
new file mode 100644
index 000000000..dc903d0a9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/response.go
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package puppet
+
+type statusServiceResponse struct {
+ StatusService *struct {
+ Status struct {
+ Experimental struct {
+ JVMMetrics *struct {
+ CPUUsage float64 `json:"cpu-usage" stm:"cpu_usage,1000,1"`
+ GCCPUUsage float64 `json:"gc-cpu-usage" stm:"gc_cpu_usage,1000,1"`
+ HeapMemory struct {
+ Committed int64 `json:"committed" stm:"committed"`
+ Init int64 `json:"init" stm:"init"`
+ Max int64 `json:"max" stm:"max"`
+ Used int64 `json:"used" stm:"used"`
+ } `json:"heap-memory" stm:"jvm_heap"`
+ FileDescriptors struct {
+ Used int `json:"used" stm:"used"`
+ Max int `json:"max" stm:"max"`
+ } `json:"file-descriptors" stm:"fd"`
+ NonHeapMemory struct {
+ Committed int64 `json:"committed" stm:"committed"`
+ Init int64 `json:"init" stm:"init"`
+ Max int64 `json:"max" stm:"max"`
+ Used int64 `json:"used" stm:"used"`
+ } `json:"non-heap-memory" stm:"jvm_nonheap"`
+ } `json:"jvm-metrics" stm:""`
+ } `json:"experimental" stm:""`
+ } `json:"status" stm:""`
+ } `json:"status-service" stm:""`
+}
diff --git a/src/go/plugin/go.d/modules/puppet/testdata/config.json b/src/go/plugin/go.d/modules/puppet/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/puppet/testdata/config.yaml b/src/go/plugin/go.d/modules/puppet/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/puppet/testdata/serviceStatusResponse.json b/src/go/plugin/go.d/modules/puppet/testdata/serviceStatusResponse.json
new file mode 100644
index 000000000..a0eee8693
--- /dev/null
+++ b/src/go/plugin/go.d/modules/puppet/testdata/serviceStatusResponse.json
@@ -0,0 +1,497 @@
+{
+ "puppet-profiler": {
+ "service_version": "8.4.0",
+ "service_status_version": 1,
+ "detail_level": "debug",
+ "state": "running",
+ "status": {
+ "experimental": {
+ "function-metrics": [],
+ "resource-metrics": [],
+ "catalog-metrics": [],
+ "puppetdb-metrics": [],
+ "inline-metrics": []
+ }
+ },
+ "active_alerts": []
+ },
+ "jruby-metrics": {
+ "service_version": "8.4.0",
+ "service_status_version": 1,
+ "detail_level": "debug",
+ "state": "running",
+ "status": {
+ "experimental": {
+ "jruby-pool-lock-status": {
+ "current-state": ":not-in-use",
+ "last-change-time": "2024-07-05T06:23:20.120Z"
+ },
+ "metrics": {
+ "average-lock-wait-time": 0,
+ "num-free-jrubies": 4,
+ "borrow-count": 0,
+ "average-requested-jrubies": 0.0,
+ "borrow-timeout-count": 0,
+ "return-count": 0,
+ "borrow-timers": {
+ "total": {
+ "count": 0,
+ "mean": 0,
+ "max": 0,
+ "rate": 0.0
+ }
+ },
+ "borrow-retry-count": 0,
+ "borrowed-instances": [],
+ "average-borrow-time": 0,
+ "num-jrubies": 4,
+ "requested-count": 0,
+ "queue-limit-hit-rate": 0.0,
+ "average-lock-held-time": 0,
+ "requested-instances": [],
+ "queue-limit-hit-count": 0,
+ "average-free-jrubies": 3.3019592583652217,
+ "num-pool-locks": 0,
+ "average-wait-time": 0
+ }
+ }
+ },
+ "active_alerts": []
+ },
+ "ca": {
+ "service_version": "8.4.0",
+ "service_status_version": 1,
+ "detail_level": "debug",
+ "state": "running",
+ "status": {},
+ "active_alerts": []
+ },
+ "master": {
+ "service_version": "8.4.0",
+ "service_status_version": 1,
+ "detail_level": "debug",
+ "state": "running",
+ "status": {
+ "experimental": {
+ "http-metrics": [
+ {
+ "route-id": "puppet-v3-static_file_content-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-file_content-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-environments",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-tasks-:module-name-:task-name",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-file_metadata-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-facts-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "other",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-tasks",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-compile",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-report-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-node-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-catalog-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-plans-:module-name-:plan-name",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-file_metadatas-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-file_bucket_file-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v4-catalog",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "total",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-environment_modules-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-environment_classes-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-plans",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-environment_transports-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ }
+ ],
+ "http-client-metrics": []
+ }
+ },
+ "active_alerts": []
+ },
+ "server": {
+ "service_version": "8.4.0",
+ "service_status_version": 1,
+ "detail_level": "debug",
+ "state": "running",
+ "status": {
+ "experimental": {
+ "http-metrics": [
+ {
+ "route-id": "puppet-v3-static_file_content-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-file_content-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-environments",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-tasks-:module-name-:task-name",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-file_metadata-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-facts-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "other",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-tasks",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-compile",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-report-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-node-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-catalog-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-plans-:module-name-:plan-name",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-file_metadatas-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-file_bucket_file-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v4-catalog",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "total",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-environment_modules-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-environment_classes-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-plans",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ },
+ {
+ "route-id": "puppet-v3-environment_transports-/*/",
+ "count": 0,
+ "mean": 0,
+ "aggregate": 0
+ }
+ ],
+ "http-client-metrics": []
+ }
+ },
+ "active_alerts": []
+ },
+ "status-service": {
+ "service_version": "1.1.1",
+ "service_status_version": 1,
+ "detail_level": "debug",
+ "state": "running",
+ "status": {
+ "experimental": {
+ "jvm-metrics": {
+ "cpu-usage": 0.04997002,
+ "up-time-ms": 51328,
+ "memory-pools": {
+ "Metaspace": {
+ "type": "NON_HEAP",
+ "usage": {
+ "committed": 214106112,
+ "init": 0,
+ "max": -1,
+ "used": 183450600
+ }
+ },
+ "CodeHeap 'non-nmethods'": {
+ "type": "NON_HEAP",
+ "usage": {
+ "committed": 2555904,
+ "init": 2555904,
+ "max": 5840896,
+ "used": 1923072
+ }
+ },
+ "CodeHeap 'profiled nmethods'": {
+ "type": "NON_HEAP",
+ "usage": {
+ "committed": 52559872,
+ "init": 2555904,
+ "max": 122908672,
+ "used": 52545664
+ }
+ },
+ "Compressed Class Space": {
+ "type": "NON_HEAP",
+ "usage": {
+ "committed": 49020928,
+ "init": 0,
+ "max": 1073741824,
+ "used": 37887856
+ }
+ },
+ "G1 Eden Space": {
+ "type": "HEAP",
+ "usage": {
+ "committed": 542113792,
+ "init": 53477376,
+ "max": -1,
+ "used": 146800640
+ }
+ },
+ "G1 Old Gen": {
+ "type": "HEAP",
+ "usage": {
+ "committed": 462422016,
+ "init": 1020264448,
+ "max": 1073741824,
+ "used": 335020032
+ }
+ },
+ "G1 Survivor Space": {
+ "type": "HEAP",
+ "usage": {
+ "committed": 69206016,
+ "init": 0,
+ "max": -1,
+ "used": 68681728
+ }
+ },
+ "CodeHeap 'non-profiled nmethods'": {
+ "type": "NON_HEAP",
+ "usage": {
+ "committed": 15597568,
+ "init": 2555904,
+ "max": 122908672,
+ "used": 15588736
+ }
+ }
+ },
+ "gc-cpu-usage": 0.0,
+ "threading": {
+ "thread-count": 59,
+ "peak-thread-count": 59
+ },
+ "heap-memory": {
+ "committed": 1073741824,
+ "init": 1073741824,
+ "max": 1073741824,
+ "used": 550502400
+ },
+ "gc-stats": {
+ "G1 Young Generation": {
+ "count": 18,
+ "total-time-ms": 550,
+ "last-gc-info": {
+ "duration-ms": 75
+ }
+ },
+ "G1 Old Generation": {
+ "count": 0,
+ "total-time-ms": 0
+ },
+ "G1 Concurrent GC": {
+ "count": 10,
+ "total-time-ms": 49,
+ "last-gc-info": {
+ "duration-ms": 0
+ }
+ }
+ },
+ "start-time-ms": 1720160584298,
+ "file-descriptors": {
+ "used": 234,
+ "max": 524288
+ },
+ "non-heap-memory": {
+ "committed": 334102528,
+ "init": 7667712,
+ "max": -1,
+ "used": 291591160
+ },
+ "nio-buffer-pools": {
+ "mapped": {
+ "count": 0,
+ "memory-used": 0,
+ "total-capacity": 0
+ },
+ "direct": {
+ "count": 11,
+ "memory-used": 197631,
+ "total-capacity": 197631
+ },
+ "mapped - 'non-volatile memory'": {
+ "count": 0,
+ "memory-used": 0,
+ "total-capacity": 0
+ }
+ }
+ }
+ }
+ },
+ "active_alerts": []
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rabbitmq/README.md b/src/go/plugin/go.d/modules/rabbitmq/README.md
new file mode 120000
index 000000000..0119db91a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/README.md
@@ -0,0 +1 @@
+integrations/rabbitmq.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/rabbitmq/charts.go b/src/go/plugin/go.d/modules/rabbitmq/charts.go
new file mode 100644
index 000000000..f580a2f26
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/charts.go
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rabbitmq
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioMessagesCount = module.Priority + iota
+ prioMessagesRate
+
+ prioObjectsCount
+
+ prioConnectionChurnRate
+ prioChannelChurnRate
+ prioQueueChurnRate
+
+ prioFileDescriptorsCount
+ prioSocketsCount
+ prioErlangProcessesCount
+ prioErlangRunQueueProcessesCount
+ prioMemoryUsage
+ prioDiskSpaceFreeSize
+
+ prioVhostMessagesCount
+ prioVhostMessagesRate
+
+ prioQueueMessagesCount
+ prioQueueMessagesRate
+)
+
+var baseCharts = module.Charts{
+ chartMessagesCount.Copy(),
+ chartMessagesRate.Copy(),
+
+ chartObjectsCount.Copy(),
+
+ chartConnectionChurnRate.Copy(),
+ chartChannelChurnRate.Copy(),
+ chartQueueChurnRate.Copy(),
+
+ chartFileDescriptorsCount.Copy(),
+ chartSocketsCount.Copy(),
+ chartErlangProcessesCount.Copy(),
+ chartErlangRunQueueProcessesCount.Copy(),
+ chartMemoryUsage.Copy(),
+ chartDiskSpaceFreeSize.Copy(),
+}
+
+var chartsTmplVhost = module.Charts{
+ chartTmplVhostMessagesCount.Copy(),
+ chartTmplVhostMessagesRate.Copy(),
+}
+
+var chartsTmplQueue = module.Charts{
+ chartTmplQueueMessagesCount.Copy(),
+ chartTmplQueueMessagesRate.Copy(),
+}
+
+var (
+ chartMessagesCount = module.Chart{
+ ID: "messages_count",
+ Title: "Messages",
+ Units: "messages",
+ Fam: "messages",
+ Ctx: "rabbitmq.messages_count",
+ Type: module.Stacked,
+ Priority: prioMessagesCount,
+ Dims: module.Dims{
+ {ID: "queue_totals_messages_ready", Name: "ready"},
+ {ID: "queue_totals_messages_unacknowledged", Name: "unacknowledged"},
+ },
+ }
+ chartMessagesRate = module.Chart{
+ ID: "messages_rate",
+ Title: "Messages",
+ Units: "messages/s",
+ Fam: "messages",
+ Ctx: "rabbitmq.messages_rate",
+ Priority: prioMessagesRate,
+ Dims: module.Dims{
+ {ID: "message_stats_ack", Name: "ack", Algo: module.Incremental},
+ {ID: "message_stats_publish", Name: "publish", Algo: module.Incremental},
+ {ID: "message_stats_publish_in", Name: "publish_in", Algo: module.Incremental},
+ {ID: "message_stats_publish_out", Name: "publish_out", Algo: module.Incremental},
+ {ID: "message_stats_confirm", Name: "confirm", Algo: module.Incremental},
+ {ID: "message_stats_deliver", Name: "deliver", Algo: module.Incremental},
+ {ID: "message_stats_deliver_no_ack", Name: "deliver_no_ack", Algo: module.Incremental},
+ {ID: "message_stats_get", Name: "get", Algo: module.Incremental},
+ {ID: "message_stats_get_no_ack", Name: "get_no_ack", Algo: module.Incremental},
+ {ID: "message_stats_deliver_get", Name: "deliver_get", Algo: module.Incremental},
+ {ID: "message_stats_redeliver", Name: "redeliver", Algo: module.Incremental},
+ {ID: "message_stats_return_unroutable", Name: "return_unroutable", Algo: module.Incremental},
+ },
+ }
+ chartObjectsCount = module.Chart{
+ ID: "objects_count",
+ Title: "Objects",
+ Units: "objects",
+ Fam: "objects",
+ Ctx: "rabbitmq.objects_count",
+ Priority: prioObjectsCount,
+ Dims: module.Dims{
+ {ID: "object_totals_channels", Name: "channels"},
+ {ID: "object_totals_consumers", Name: "consumers"},
+ {ID: "object_totals_connections", Name: "connections"},
+ {ID: "object_totals_queues", Name: "queues"},
+ {ID: "object_totals_exchanges", Name: "exchanges"},
+ },
+ }
+
+ chartConnectionChurnRate = module.Chart{
+ ID: "connection_churn_rate",
+ Title: "Connection churn",
+ Units: "operations/s",
+ Fam: "churn",
+ Ctx: "rabbitmq.connection_churn_rate",
+ Priority: prioConnectionChurnRate,
+ Dims: module.Dims{
+ {ID: "churn_rates_connection_created", Name: "created", Algo: module.Incremental},
+ {ID: "churn_rates_connection_closed", Name: "closed", Algo: module.Incremental},
+ },
+ }
+ chartChannelChurnRate = module.Chart{
+ ID: "channel_churn_rate",
+ Title: "Channel churn",
+ Units: "operations/s",
+ Fam: "churn",
+ Ctx: "rabbitmq.channel_churn_rate",
+ Priority: prioChannelChurnRate,
+ Dims: module.Dims{
+ {ID: "churn_rates_channel_created", Name: "created", Algo: module.Incremental},
+ {ID: "churn_rates_channel_closed", Name: "closed", Algo: module.Incremental},
+ },
+ }
+ chartQueueChurnRate = module.Chart{
+ ID: "queue_churn_rate",
+ Title: "Queue churn",
+ Units: "operations/s",
+ Fam: "churn",
+ Ctx: "rabbitmq.queue_churn_rate",
+ Priority: prioQueueChurnRate,
+ Dims: module.Dims{
+ {ID: "churn_rates_queue_created", Name: "created", Algo: module.Incremental},
+ {ID: "churn_rates_queue_deleted", Name: "deleted", Algo: module.Incremental},
+ {ID: "churn_rates_queue_declared", Name: "declared", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartFileDescriptorsCount = module.Chart{
+ ID: "file_descriptors_count",
+ Title: "File descriptors",
+ Units: "fd",
+ Fam: "node stats",
+ Ctx: "rabbitmq.file_descriptors_count",
+ Type: module.Stacked,
+ Priority: prioFileDescriptorsCount,
+ Dims: module.Dims{
+ {ID: "fd_total", Name: "available"},
+ {ID: "fd_used", Name: "used"},
+ },
+ }
+ chartSocketsCount = module.Chart{
+ ID: "sockets_used_count",
+ Title: "Used sockets",
+ Units: "sockets",
+ Fam: "node stats",
+ Ctx: "rabbitmq.sockets_count",
+ Type: module.Stacked,
+ Priority: prioSocketsCount,
+ Dims: module.Dims{
+ {ID: "sockets_total", Name: "available"},
+ {ID: "sockets_used", Name: "used"},
+ },
+ }
+ chartErlangProcessesCount = module.Chart{
+ ID: "erlang_processes_count",
+ Title: "Erlang processes",
+ Units: "processes",
+ Fam: "node stats",
+ Ctx: "rabbitmq.erlang_processes_count",
+ Type: module.Stacked,
+ Priority: prioErlangProcessesCount,
+ Dims: module.Dims{
+ {ID: "proc_available", Name: "available"},
+ {ID: "proc_used", Name: "used"},
+ },
+ }
+ chartErlangRunQueueProcessesCount = module.Chart{
+ ID: "erlang_run_queue_processes_count",
+ Title: "Erlang run queue",
+ Units: "processes",
+ Fam: "node stats",
+ Ctx: "rabbitmq.erlang_run_queue_processes_count",
+ Priority: prioErlangRunQueueProcessesCount,
+ Dims: module.Dims{
+ {ID: "run_queue", Name: "length"},
+ },
+ }
+ chartMemoryUsage = module.Chart{
+ ID: "memory_usage",
+ Title: "Memory",
+ Units: "bytes",
+ Fam: "node stats",
+ Ctx: "rabbitmq.memory_usage",
+ Priority: prioMemoryUsage,
+ Dims: module.Dims{
+ {ID: "mem_used", Name: "used"},
+ },
+ }
+ chartDiskSpaceFreeSize = module.Chart{
+ ID: "disk_space_free_size",
+ Title: "Free disk space",
+ Units: "bytes",
+ Fam: "node stats",
+ Ctx: "rabbitmq.disk_space_free_size",
+ Type: module.Area,
+ Priority: prioDiskSpaceFreeSize,
+ Dims: module.Dims{
+ {ID: "disk_free", Name: "free"},
+ },
+ }
+)
+
+var (
+ chartTmplVhostMessagesCount = module.Chart{
+ ID: "vhost_%s_message_count",
+ Title: "Vhost messages",
+ Units: "messages",
+ Fam: "vhost messages",
+ Ctx: "rabbitmq.vhost_messages_count",
+ Type: module.Stacked,
+ Priority: prioVhostMessagesCount,
+ Dims: module.Dims{
+ {ID: "vhost_%s_messages_ready", Name: "ready"},
+ {ID: "vhost_%s_messages_unacknowledged", Name: "unacknowledged"},
+ },
+ }
+ chartTmplVhostMessagesRate = module.Chart{
+ ID: "vhost_%s_message_stats",
+ Title: "Vhost messages rate",
+ Units: "messages/s",
+ Fam: "vhost messages",
+ Ctx: "rabbitmq.vhost_messages_rate",
+ Type: module.Stacked,
+ Priority: prioVhostMessagesRate,
+ Dims: module.Dims{
+ {ID: "vhost_%s_message_stats_ack", Name: "ack", Algo: module.Incremental},
+ {ID: "vhost_%s_message_stats_confirm", Name: "confirm", Algo: module.Incremental},
+ {ID: "vhost_%s_message_stats_deliver", Name: "deliver", Algo: module.Incremental},
+ {ID: "vhost_%s_message_stats_get", Name: "get", Algo: module.Incremental},
+ {ID: "vhost_%s_message_stats_get_no_ack", Name: "get_no_ack", Algo: module.Incremental},
+ {ID: "vhost_%s_message_stats_publish", Name: "publish", Algo: module.Incremental},
+ {ID: "vhost_%s_message_stats_redeliver", Name: "redeliver", Algo: module.Incremental},
+ {ID: "vhost_%s_message_stats_return_unroutable", Name: "return_unroutable", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartTmplQueueMessagesCount = module.Chart{
+ ID: "queue_%s_vhost_%s_message_count",
+ Title: "Queue messages",
+ Units: "messages",
+ Fam: "queue messages",
+ Ctx: "rabbitmq.queue_messages_count",
+ Type: module.Stacked,
+ Priority: prioQueueMessagesCount,
+ Dims: module.Dims{
+ {ID: "queue_%s_vhost_%s_messages_ready", Name: "ready"},
+ {ID: "queue_%s_vhost_%s_messages_unacknowledged", Name: "unacknowledged"},
+ {ID: "queue_%s_vhost_%s_messages_paged_out", Name: "paged_out"},
+ {ID: "queue_%s_vhost_%s_messages_persistent", Name: "persistent"},
+ },
+ }
+ chartTmplQueueMessagesRate = module.Chart{
+ ID: "queue_%s_vhost_%s_message_stats",
+ Title: "Queue messages rate",
+ Units: "messages/s",
+ Fam: "queue messages",
+ Ctx: "rabbitmq.queue_messages_rate",
+ Type: module.Stacked,
+ Priority: prioQueueMessagesRate,
+ Dims: module.Dims{
+ {ID: "queue_%s_vhost_%s_message_stats_ack", Name: "ack", Algo: module.Incremental},
+ {ID: "queue_%s_vhost_%s_message_stats_confirm", Name: "confirm", Algo: module.Incremental},
+ {ID: "queue_%s_vhost_%s_message_stats_deliver", Name: "deliver", Algo: module.Incremental},
+ {ID: "queue_%s_vhost_%s_message_stats_get", Name: "get", Algo: module.Incremental},
+ {ID: "queue_%s_vhost_%s_message_stats_get_no_ack", Name: "get_no_ack", Algo: module.Incremental},
+ {ID: "queue_%s_vhost_%s_message_stats_publish", Name: "publish", Algo: module.Incremental},
+ {ID: "queue_%s_vhost_%s_message_stats_redeliver", Name: "redeliver", Algo: module.Incremental},
+ {ID: "queue_%s_vhost_%s_message_stats_return_unroutable", Name: "return_unroutable", Algo: module.Incremental},
+ },
+ }
+)
+
+func (r *RabbitMQ) addVhostCharts(name string) {
+ charts := chartsTmplVhost.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, forbiddenCharsReplacer.Replace(name))
+ chart.Labels = []module.Label{
+ {Key: "vhost", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := r.Charts().Add(*charts...); err != nil {
+ r.Warning(err)
+ }
+}
+
+func (r *RabbitMQ) removeVhostCharts(vhost string) {
+ px := fmt.Sprintf("vhost_%s_", forbiddenCharsReplacer.Replace(vhost))
+ for _, chart := range *r.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func (r *RabbitMQ) addQueueCharts(queue, vhost string) {
+ charts := chartsTmplQueue.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, forbiddenCharsReplacer.Replace(queue), forbiddenCharsReplacer.Replace(vhost))
+ chart.Labels = []module.Label{
+ {Key: "queue", Value: queue},
+ {Key: "vhost", Value: vhost},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, queue, vhost)
+ }
+ }
+
+ if err := r.Charts().Add(*charts...); err != nil {
+ r.Warning(err)
+ }
+}
+
+func (r *RabbitMQ) removeQueueCharts(queue, vhost string) {
+ px := fmt.Sprintf("queue_%s_vhost_%s_", forbiddenCharsReplacer.Replace(queue), forbiddenCharsReplacer.Replace(vhost))
+ for _, chart := range *r.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+var forbiddenCharsReplacer = strings.NewReplacer(" ", "_", ".", "_")
diff --git a/src/go/plugin/go.d/modules/rabbitmq/collect.go b/src/go/plugin/go.d/modules/rabbitmq/collect.go
new file mode 100644
index 000000000..70b2aa033
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/collect.go
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rabbitmq
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ urlPathAPIOverview = "/api/overview"
+ urlPathAPINodes = "/api/nodes/"
+ urlPathAPIVhosts = "/api/vhosts"
+ urlPathAPIQueues = "/api/queues"
+)
+
+// TODO: there is built-in prometheus collector since v3.8.0 (https://www.rabbitmq.com/prometheus.html).
+// Should use it (in addition?), it is the recommended option according to the docs.
+func (r *RabbitMQ) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := r.collectOverviewStats(mx); err != nil {
+ return nil, err
+ }
+ if err := r.collectNodeStats(mx); err != nil {
+ return mx, err
+ }
+ if err := r.collectVhostsStats(mx); err != nil {
+ return mx, err
+ }
+ if r.CollectQueues {
+ if err := r.collectQueuesStats(mx); err != nil {
+ return mx, err
+ }
+ }
+
+ return mx, nil
+}
+
+func (r *RabbitMQ) collectOverviewStats(mx map[string]int64) error {
+ var stats overviewStats
+ if err := r.doOKDecode(urlPathAPIOverview, &stats); err != nil {
+ return err
+ }
+
+ if r.nodeName == "" {
+ r.nodeName = stats.Node
+ }
+
+ for k, v := range stm.ToMap(stats) {
+ mx[k] = v
+ }
+
+ return nil
+}
+
+func (r *RabbitMQ) collectNodeStats(mx map[string]int64) error {
+ if r.nodeName == "" {
+ return nil
+ }
+
+ var stats nodeStats
+ if err := r.doOKDecode(filepath.Join(urlPathAPINodes, r.nodeName), &stats); err != nil {
+ return err
+ }
+
+ for k, v := range stm.ToMap(stats) {
+ mx[k] = v
+ }
+ mx["proc_available"] = int64(stats.ProcTotal - stats.ProcUsed)
+
+ return nil
+}
+
+func (r *RabbitMQ) collectVhostsStats(mx map[string]int64) error {
+ var stats []vhostStats
+ if err := r.doOKDecode(urlPathAPIVhosts, &stats); err != nil {
+ return err
+ }
+
+ seen := make(map[string]bool)
+
+ for _, vhost := range stats {
+ seen[vhost.Name] = true
+ for k, v := range stm.ToMap(vhost) {
+ mx[fmt.Sprintf("vhost_%s_%s", vhost.Name, k)] = v
+ }
+ }
+
+ for name := range seen {
+ if !r.vhosts[name] {
+ r.vhosts[name] = true
+ r.Debugf("new vhost name='%s': creating charts", name)
+ r.addVhostCharts(name)
+ }
+ }
+ for name := range r.vhosts {
+ if !seen[name] {
+ delete(r.vhosts, name)
+ r.Debugf("stale vhost name='%s': removing charts", name)
+ r.removeVhostCharts(name)
+ }
+ }
+
+ return nil
+}
+
+func (r *RabbitMQ) collectQueuesStats(mx map[string]int64) error {
+ var stats []queueStats
+ if err := r.doOKDecode(urlPathAPIQueues, &stats); err != nil {
+ return err
+ }
+
+ seen := make(map[string]queueCache)
+
+ for _, queue := range stats {
+ seen[queue.Name+"|"+queue.Vhost] = queueCache{name: queue.Name, vhost: queue.Vhost}
+ for k, v := range stm.ToMap(queue) {
+ mx[fmt.Sprintf("queue_%s_vhost_%s_%s", queue.Name, queue.Vhost, k)] = v
+ }
+ }
+
+ for key, queue := range seen {
+ if _, ok := r.queues[key]; !ok {
+ r.queues[key] = queue
+ r.Debugf("new queue name='%s', vhost='%s': creating charts", queue.name, queue.vhost)
+ r.addQueueCharts(queue.name, queue.vhost)
+ }
+ }
+ for key, queue := range r.queues {
+ if _, ok := seen[key]; !ok {
+ delete(r.queues, key)
+ r.Debugf("stale queue name='%s', vhost='%s': removing charts", queue.name, queue.vhost)
+ r.removeQueueCharts(queue.name, queue.vhost)
+ }
+ }
+
+ return nil
+}
+
+func (r *RabbitMQ) doOKDecode(urlPath string, in interface{}) error {
+ req, err := web.NewHTTPRequestWithPath(r.Request, urlPath)
+ if err != nil {
+ return fmt.Errorf("error on creating request: %v", err)
+ }
+
+ r.Debugf("doing HTTP %s to '%s'", req.Method, req.URL)
+ resp, err := r.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on request to %s: %v", req.URL, err)
+ }
+
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("%s returned HTTP status %d (%s)", req.URL, resp.StatusCode, resp.Status)
+ }
+
+ if err = json.NewDecoder(resp.Body).Decode(&in); err != nil {
+ return fmt.Errorf("error on decoding response from %s: %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rabbitmq/config_schema.json b/src/go/plugin/go.d/modules/rabbitmq/config_schema.json
new file mode 100644
index 000000000..defa70142
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/config_schema.json
@@ -0,0 +1,192 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "RabbitMQ collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the RabbitMQ [management API](https://rabbitmq-website.pages.dev/docs/management).",
+ "type": "string",
+ "default": "http://localhost:15672",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "collect_queues_metrics": {
+ "title": "Collect Queues Metrics",
+ "description": "Collect stats for each queue of each virtual host. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used.",
+ "type": "boolean",
+ "default": false
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "default": "guest",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "default": "guest",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "collect_queues_metrics"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rabbitmq/integrations/rabbitmq.md b/src/go/plugin/go.d/modules/rabbitmq/integrations/rabbitmq.md
new file mode 100644
index 000000000..e4c9df588
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/integrations/rabbitmq.md
@@ -0,0 +1,300 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/rabbitmq/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/rabbitmq/metadata.yaml"
+sidebar_label: "RabbitMQ"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# RabbitMQ
+
+
+<img src="https://netdata.cloud/img/rabbitmq.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: rabbitmq
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors RabbitMQ instances.
+
+It collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).
+The following endpoints are used:
+
+- `/api/overview`
+- `/api/node/{node_name}`
+- `/api/vhosts`
+- `/api/queues` (disabled by default)
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per RabbitMQ instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| rabbitmq.messages_count | ready, unacknowledged | messages |
+| rabbitmq.messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |
+| rabbitmq.objects_count | channels, consumers, connections, queues, exchanges | messages |
+| rabbitmq.connection_churn_rate | created, closed | operations/s |
+| rabbitmq.channel_churn_rate | created, closed | operations/s |
+| rabbitmq.queue_churn_rate | created, deleted, declared | operations/s |
+| rabbitmq.file_descriptors_count | available, used | fd |
+| rabbitmq.sockets_count | available, used | sockets |
+| rabbitmq.erlang_processes_count | available, used | processes |
+| rabbitmq.erlang_run_queue_processes_count | length | processes |
+| rabbitmq.memory_usage | used | bytes |
+| rabbitmq.disk_space_free_size | free | bytes |
+
+### Per vhost
+
+These metrics refer to the virtual host.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vhost | virtual host name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| rabbitmq.vhost_messages_count | ready, unacknowledged | messages |
+| rabbitmq.vhost_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |
+
+### Per queue
+
+These metrics refer to the virtual host queue.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vhost | virtual host name |
+| queue | queue name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| rabbitmq.queue_messages_count | ready, unacknowledged, paged_out, persistent | messages |
+| rabbitmq.queue_messages_rate | ack, publish, publish_in, publish_out, confirm, deliver, deliver_no_ack, get, get_no_ack, deliver_get, redeliver, return_unroutable | messages/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable management plugin.
+
+The management plugin is included in the RabbitMQ distribution, but disabled.
+To enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/rabbitmq.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/rabbitmq.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://localhost:15672 | yes |
+| collect_queues_metrics | Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used. | no | no |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:15672
+
+```
+</details>
+
+##### Basic HTTP auth
+
+Local server with basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:15672
+ username: admin
+ password: password
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:15672
+
+ - name: remote
+ url: http://192.0.2.0:15672
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `rabbitmq` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m rabbitmq
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `rabbitmq` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep rabbitmq
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep rabbitmq /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep rabbitmq
+```
+
+
diff --git a/src/go/plugin/go.d/modules/rabbitmq/metadata.yaml b/src/go/plugin/go.d/modules/rabbitmq/metadata.yaml
new file mode 100644
index 000000000..f0a17b9e7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/metadata.yaml
@@ -0,0 +1,341 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-rabbitmq
+ plugin_name: go.d.plugin
+ module_name: rabbitmq
+ monitored_instance:
+ name: RabbitMQ
+ link: https://www.rabbitmq.com/
+ icon_filename: rabbitmq.svg
+ categories:
+ - data-collection.message-brokers
+ keywords:
+ - rabbitmq
+ - message brokers
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors RabbitMQ instances.
+
+ It collects data using an HTTP-based API provided by the [management plugin](https://www.rabbitmq.com/management.html).
+ The following endpoints are used:
+
+ - `/api/overview`
+ - `/api/node/{node_name}`
+ - `/api/vhosts`
+ - `/api/queues` (disabled by default)
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable management plugin.
+ description: |
+ The management plugin is included in the RabbitMQ distribution, but disabled.
+ To enable see [Management Plugin](https://www.rabbitmq.com/management.html#getting-started) documentation.
+ configuration:
+ file:
+ name: go.d/rabbitmq.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://localhost:15672
+ required: true
+ - name: collect_queues_metrics
+ description: Collect stats per vhost per queues. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used.
+ default_value: false
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:15672
+ - name: Basic HTTP auth
+ description: Local server with basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:15672
+ username: admin
+ password: password
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:15672
+
+ - name: remote
+ url: http://192.0.2.0:15672
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: rabbitmq.messages_count
+ description: Messages
+ unit: messages
+ chart_type: stacked
+ dimensions:
+ - name: ready
+ - name: unacknowledged
+ - name: rabbitmq.messages_rate
+ description: Messages
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: ack
+ - name: publish
+ - name: publish_in
+ - name: publish_out
+ - name: confirm
+ - name: deliver
+ - name: deliver_no_ack
+ - name: get
+ - name: get_no_ack
+ - name: deliver_get
+ - name: redeliver
+ - name: return_unroutable
+ - name: rabbitmq.objects_count
+ description: Objects
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: channels
+ - name: consumers
+ - name: connections
+ - name: queues
+ - name: exchanges
+ - name: rabbitmq.connection_churn_rate
+ description: Connection churn
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: closed
+ - name: rabbitmq.channel_churn_rate
+ description: Channel churn
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: closed
+ - name: rabbitmq.queue_churn_rate
+ description: Queue churn
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: deleted
+ - name: declared
+ - name: rabbitmq.file_descriptors_count
+ description: File descriptors
+ unit: fd
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: rabbitmq.sockets_count
+ description: Used sockets
+ unit: sockets
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: rabbitmq.erlang_processes_count
+ description: Erlang processes
+ unit: processes
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: rabbitmq.erlang_run_queue_processes_count
+ description: Erlang run queue
+ unit: processes
+ chart_type: line
+ dimensions:
+ - name: length
+ - name: rabbitmq.memory_usage
+ description: Memory
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: rabbitmq.disk_space_free_size
+ description: Free disk space
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: free
+ - name: vhost
+ description: These metrics refer to the virtual host.
+ labels:
+ - name: vhost
+ description: virtual host name
+ metrics:
+ - name: rabbitmq.vhost_messages_count
+ description: Vhost messages
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: ready
+ - name: unacknowledged
+ - name: rabbitmq.vhost_messages_rate
+ description: Vhost messages rate
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: ack
+ - name: publish
+ - name: publish_in
+ - name: publish_out
+ - name: confirm
+ - name: deliver
+ - name: deliver_no_ack
+ - name: get
+ - name: get_no_ack
+ - name: deliver_get
+ - name: redeliver
+ - name: return_unroutable
+ - name: queue
+ description: These metrics refer to the virtual host queue.
+ labels:
+ - name: vhost
+ description: virtual host name
+ - name: queue
+ description: queue name
+ metrics:
+ - name: rabbitmq.queue_messages_count
+ description: Queue messages
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: ready
+ - name: unacknowledged
+ - name: paged_out
+ - name: persistent
+ - name: rabbitmq.queue_messages_rate
+ description: Queue messages rate
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: ack
+ - name: publish
+ - name: publish_in
+ - name: publish_out
+ - name: confirm
+ - name: deliver
+ - name: deliver_no_ack
+ - name: get
+ - name: get_no_ack
+ - name: deliver_get
+ - name: redeliver
+ - name: return_unroutable
diff --git a/src/go/plugin/go.d/modules/rabbitmq/metrics.go b/src/go/plugin/go.d/modules/rabbitmq/metrics.go
new file mode 100644
index 000000000..871dfd57e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/metrics.go
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rabbitmq
+
+// https://www.rabbitmq.com/monitoring.html#cluster-wide-metrics
+type overviewStats struct {
+ ObjectTotals struct {
+ Consumers int64 `json:"consumers" stm:"consumers"`
+ Queues int64 `json:"queues" stm:"queues"`
+ Exchanges int64 `json:"exchanges" stm:"exchanges"`
+ Connections int64 `json:"connections" stm:"connections"`
+ Channels int64 `json:"channels" stm:"channels"`
+ } `json:"object_totals" stm:"object_totals"`
+ ChurnRates struct {
+ ChannelClosed int64 `json:"channel_closed" stm:"channel_closed"`
+ ChannelCreated int64 `json:"channel_created" stm:"channel_created"`
+ ConnectionClosed int64 `json:"connection_closed" stm:"connection_closed"`
+ ConnectionCreated int64 `json:"connection_created" stm:"connection_created"`
+ QueueCreated int64 `json:"queue_created" stm:"queue_created"`
+ QueueDeclared int64 `json:"queue_declared" stm:"queue_declared"`
+ QueueDeleted int64 `json:"queue_deleted" stm:"queue_deleted"`
+ } `json:"churn_rates" stm:"churn_rates"`
+ QueueTotals struct {
+ Messages int64 `json:"messages" stm:"messages"`
+ MessagesReady int64 `json:"messages_ready" stm:"messages_ready"`
+ MessagesUnacknowledged int64 `json:"messages_unacknowledged" stm:"messages_unacknowledged"`
+ } `json:"queue_totals" stm:"queue_totals"`
+ MessageStats messageStats `json:"message_stats" stm:"message_stats"`
+ Node string
+}
+
+// https://www.rabbitmq.com/monitoring.html#node-metrics
+type nodeStats struct {
+ FDTotal int64 `json:"fd_total" stm:"fd_total"`
+ FDUsed int64 `json:"fd_used" stm:"fd_used"`
+ MemLimit int64 `json:"mem_limit" stm:"mem_limit"`
+ MemUsed int64 `json:"mem_used" stm:"mem_used"`
+ SocketsTotal int64 `json:"sockets_total" stm:"sockets_total"`
+ SocketsUsed int64 `json:"sockets_used" stm:"sockets_used"`
+ ProcTotal int64 `json:"proc_total" stm:"proc_total"`
+ ProcUsed int64 `json:"proc_used" stm:"proc_used"`
+ DiskFree int64 `json:"disk_free" stm:"disk_free"`
+ RunQueue int64 `json:"run_queue" stm:"run_queue"`
+}
+
+type vhostStats struct {
+ Name string `json:"name"`
+ Messages int64 `json:"messages" stm:"messages"`
+ MessagesReady int64 `json:"messages_ready" stm:"messages_ready"`
+ MessagesUnacknowledged int64 `json:"messages_unacknowledged" stm:"messages_unacknowledged"`
+ MessageStats messageStats `json:"message_stats" stm:"message_stats"`
+}
+
+// https://www.rabbitmq.com/monitoring.html#queue-metrics
+type queueStats struct {
+ Name string `json:"name"`
+ Vhost string `json:"vhost"`
+ State string `json:"state"`
+ Type string `json:"type"`
+ Messages int64 `json:"messages" stm:"messages"`
+ MessagesReady int64 `json:"messages_ready" stm:"messages_ready"`
+ MessagesUnacknowledged int64 `json:"messages_unacknowledged" stm:"messages_unacknowledged"`
+ MessagesPagedOut int64 `json:"messages_paged_out" stm:"messages_paged_out"`
+ MessagesPersistent int64 `json:"messages_persistent" stm:"messages_persistent"`
+ MessageStats messageStats `json:"message_stats" stm:"message_stats"`
+}
+
+// https://rawcdn.githack.com/rabbitmq/rabbitmq-server/v3.11.5/deps/rabbitmq_management/priv/www/api/index.html
+type messageStats struct {
+ Ack int64 `json:"ack" stm:"ack"`
+ Publish int64 `json:"publish" stm:"publish"`
+ PublishIn int64 `json:"publish_in" stm:"publish_in"`
+ PublishOut int64 `json:"publish_out" stm:"publish_out"`
+ Confirm int64 `json:"confirm" stm:"confirm"`
+ Deliver int64 `json:"deliver" stm:"deliver"`
+ DeliverNoAck int64 `json:"deliver_no_ack" stm:"deliver_no_ack"`
+ Get int64 `json:"get" stm:"get"`
+ GetNoAck int64 `json:"get_no_ack" stm:"get_no_ack"`
+ DeliverGet int64 `json:"deliver_get" stm:"deliver_get"`
+ Redeliver int64 `json:"redeliver" stm:"redeliver"`
+ ReturnUnroutable int64 `json:"return_unroutable" stm:"return_unroutable"`
+}
diff --git a/src/go/plugin/go.d/modules/rabbitmq/rabbitmq.go b/src/go/plugin/go.d/modules/rabbitmq/rabbitmq.go
new file mode 100644
index 000000000..74805dab7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/rabbitmq.go
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rabbitmq
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("rabbitmq", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *RabbitMQ {
+ return &RabbitMQ{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://localhost:15672",
+ Username: "guest",
+ Password: "guest",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ CollectQueues: false,
+ },
+ charts: baseCharts.Copy(),
+ vhosts: make(map[string]bool),
+ queues: make(map[string]queueCache),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ CollectQueues bool `yaml:"collect_queues_metrics" json:"collect_queues_metrics"`
+}
+
+type (
+ RabbitMQ struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ nodeName string
+ vhosts map[string]bool
+ queues map[string]queueCache
+ }
+ queueCache struct {
+ name, vhost string
+ }
+)
+
+func (r *RabbitMQ) Configuration() any {
+ return r.Config
+}
+
+func (r *RabbitMQ) Init() error {
+ if r.URL == "" {
+ r.Error("'url' can not be empty")
+ return errors.New("url not set")
+ }
+
+ client, err := web.NewHTTPClient(r.Client)
+ if err != nil {
+ r.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ r.httpClient = client
+
+ r.Debugf("using URL %s", r.URL)
+ r.Debugf("using timeout: %s", r.Timeout)
+
+ return nil
+}
+
+func (r *RabbitMQ) Check() error {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (r *RabbitMQ) Charts() *module.Charts {
+ return r.charts
+}
+
+func (r *RabbitMQ) Collect() map[string]int64 {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (r *RabbitMQ) Cleanup() {
+ if r.httpClient != nil {
+ r.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rabbitmq/rabbitmq_test.go b/src/go/plugin/go.d/modules/rabbitmq/rabbitmq_test.go
new file mode 100644
index 000000000..7c4fe719e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/rabbitmq_test.go
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rabbitmq
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "path/filepath"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataOverviewStats, _ = os.ReadFile("testdata/v3.11.5/api-overview.json")
+ dataNodeStats, _ = os.ReadFile("testdata/v3.11.5/api-nodes-node.json")
+ dataVhostsStats, _ = os.ReadFile("testdata/v3.11.5/api-vhosts.json")
+ dataQueuesStats, _ = os.ReadFile("testdata/v3.11.5/api-queues.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataOverviewStats": dataOverviewStats,
+ "dataNodeStats": dataNodeStats,
+ "dataVhostsStats": dataVhostsStats,
+ "dataQueuesStats": dataQueuesStats,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestRabbitMQ_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &RabbitMQ{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestRabbitMQ_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rabbit := New()
+ rabbit.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, rabbit.Init())
+ } else {
+ assert.NoError(t, rabbit.Init())
+ }
+ })
+ }
+}
+
+func TestRabbitMQ_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestRabbitMQ_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+
+ rabbit := New()
+ require.NoError(t, rabbit.Init())
+
+ assert.NotPanics(t, rabbit.Cleanup)
+}
+
+func TestRabbitMQ_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (*RabbitMQ, func())
+ wantFail bool
+ }{
+ "success on valid response": {wantFail: false, prepare: caseSuccessAllRequests},
+ "fails on invalid response": {wantFail: true, prepare: caseInvalidDataResponse},
+ "fails on 404": {wantFail: true, prepare: case404},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rabbit, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, rabbit.Init())
+
+ if test.wantFail {
+ assert.Error(t, rabbit.Check())
+ } else {
+ assert.NoError(t, rabbit.Check())
+ }
+ })
+ }
+}
+
+func TestRabbitMQ_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (*RabbitMQ, func())
+ wantCollected map[string]int64
+ wantCharts int
+ }{
+ "success on valid response": {
+ prepare: caseSuccessAllRequests,
+ wantCharts: len(baseCharts) + len(chartsTmplVhost)*3 + len(chartsTmplQueue)*4,
+ wantCollected: map[string]int64{
+ "churn_rates_channel_closed": 0,
+ "churn_rates_channel_created": 0,
+ "churn_rates_connection_closed": 0,
+ "churn_rates_connection_created": 0,
+ "churn_rates_queue_created": 6,
+ "churn_rates_queue_declared": 6,
+ "churn_rates_queue_deleted": 2,
+ "disk_free": 189799186432,
+ "fd_total": 1048576,
+ "fd_used": 43,
+ "mem_limit": 6713820774,
+ "mem_used": 172720128,
+ "message_stats_ack": 0,
+ "message_stats_confirm": 0,
+ "message_stats_deliver": 0,
+ "message_stats_deliver_get": 0,
+ "message_stats_deliver_no_ack": 0,
+ "message_stats_get": 0,
+ "message_stats_get_no_ack": 0,
+ "message_stats_publish": 0,
+ "message_stats_publish_in": 0,
+ "message_stats_publish_out": 0,
+ "message_stats_redeliver": 0,
+ "message_stats_return_unroutable": 0,
+ "object_totals_channels": 0,
+ "object_totals_connections": 0,
+ "object_totals_consumers": 0,
+ "object_totals_exchanges": 21,
+ "object_totals_queues": 4,
+ "proc_available": 1048135,
+ "proc_total": 1048576,
+ "proc_used": 441,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_ack": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_confirm": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_deliver": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_deliver_get": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_deliver_no_ack": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_get": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_get_no_ack": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_publish": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_publish_in": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_publish_out": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_redeliver": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_message_stats_return_unroutable": 0,
+ "queue_MyFirstQueue_vhost_mySecondVhost_messages": 1,
+ "queue_MyFirstQueue_vhost_mySecondVhost_messages_paged_out": 1,
+ "queue_MyFirstQueue_vhost_mySecondVhost_messages_persistent": 1,
+ "queue_MyFirstQueue_vhost_mySecondVhost_messages_ready": 1,
+ "queue_MyFirstQueue_vhost_mySecondVhost_messages_unacknowledged": 1,
+ "queue_myFirstQueue_vhost_/_message_stats_ack": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_confirm": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_deliver": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_deliver_get": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_deliver_no_ack": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_get": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_get_no_ack": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_publish": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_publish_in": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_publish_out": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_redeliver": 0,
+ "queue_myFirstQueue_vhost_/_message_stats_return_unroutable": 0,
+ "queue_myFirstQueue_vhost_/_messages": 1,
+ "queue_myFirstQueue_vhost_/_messages_paged_out": 1,
+ "queue_myFirstQueue_vhost_/_messages_persistent": 1,
+ "queue_myFirstQueue_vhost_/_messages_ready": 1,
+ "queue_myFirstQueue_vhost_/_messages_unacknowledged": 1,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_ack": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_confirm": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_deliver": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_deliver_get": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_deliver_no_ack": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_get": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_get_no_ack": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_publish": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_publish_in": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_publish_out": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_redeliver": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_message_stats_return_unroutable": 0,
+ "queue_myFirstQueue_vhost_myFirstVhost_messages": 1,
+ "queue_myFirstQueue_vhost_myFirstVhost_messages_paged_out": 1,
+ "queue_myFirstQueue_vhost_myFirstVhost_messages_persistent": 1,
+ "queue_myFirstQueue_vhost_myFirstVhost_messages_ready": 1,
+ "queue_myFirstQueue_vhost_myFirstVhost_messages_unacknowledged": 1,
+ "queue_mySecondQueue_vhost_/_message_stats_ack": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_confirm": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_deliver": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_deliver_get": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_deliver_no_ack": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_get": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_get_no_ack": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_publish": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_publish_in": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_publish_out": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_redeliver": 0,
+ "queue_mySecondQueue_vhost_/_message_stats_return_unroutable": 0,
+ "queue_mySecondQueue_vhost_/_messages": 1,
+ "queue_mySecondQueue_vhost_/_messages_paged_out": 1,
+ "queue_mySecondQueue_vhost_/_messages_persistent": 1,
+ "queue_mySecondQueue_vhost_/_messages_ready": 1,
+ "queue_mySecondQueue_vhost_/_messages_unacknowledged": 1,
+ "queue_totals_messages": 0,
+ "queue_totals_messages_ready": 0,
+ "queue_totals_messages_unacknowledged": 0,
+ "run_queue": 1,
+ "sockets_total": 943629,
+ "sockets_used": 0,
+ "vhost_/_message_stats_ack": 0,
+ "vhost_/_message_stats_confirm": 0,
+ "vhost_/_message_stats_deliver": 0,
+ "vhost_/_message_stats_deliver_get": 0,
+ "vhost_/_message_stats_deliver_no_ack": 0,
+ "vhost_/_message_stats_get": 0,
+ "vhost_/_message_stats_get_no_ack": 0,
+ "vhost_/_message_stats_publish": 0,
+ "vhost_/_message_stats_publish_in": 0,
+ "vhost_/_message_stats_publish_out": 0,
+ "vhost_/_message_stats_redeliver": 0,
+ "vhost_/_message_stats_return_unroutable": 0,
+ "vhost_/_messages": 1,
+ "vhost_/_messages_ready": 1,
+ "vhost_/_messages_unacknowledged": 1,
+ "vhost_myFirstVhost_message_stats_ack": 0,
+ "vhost_myFirstVhost_message_stats_confirm": 0,
+ "vhost_myFirstVhost_message_stats_deliver": 0,
+ "vhost_myFirstVhost_message_stats_deliver_get": 0,
+ "vhost_myFirstVhost_message_stats_deliver_no_ack": 0,
+ "vhost_myFirstVhost_message_stats_get": 0,
+ "vhost_myFirstVhost_message_stats_get_no_ack": 0,
+ "vhost_myFirstVhost_message_stats_publish": 0,
+ "vhost_myFirstVhost_message_stats_publish_in": 0,
+ "vhost_myFirstVhost_message_stats_publish_out": 0,
+ "vhost_myFirstVhost_message_stats_redeliver": 0,
+ "vhost_myFirstVhost_message_stats_return_unroutable": 0,
+ "vhost_myFirstVhost_messages": 1,
+ "vhost_myFirstVhost_messages_ready": 1,
+ "vhost_myFirstVhost_messages_unacknowledged": 1,
+ "vhost_mySecondVhost_message_stats_ack": 0,
+ "vhost_mySecondVhost_message_stats_confirm": 0,
+ "vhost_mySecondVhost_message_stats_deliver": 0,
+ "vhost_mySecondVhost_message_stats_deliver_get": 0,
+ "vhost_mySecondVhost_message_stats_deliver_no_ack": 0,
+ "vhost_mySecondVhost_message_stats_get": 0,
+ "vhost_mySecondVhost_message_stats_get_no_ack": 0,
+ "vhost_mySecondVhost_message_stats_publish": 0,
+ "vhost_mySecondVhost_message_stats_publish_in": 0,
+ "vhost_mySecondVhost_message_stats_publish_out": 0,
+ "vhost_mySecondVhost_message_stats_redeliver": 0,
+ "vhost_mySecondVhost_message_stats_return_unroutable": 0,
+ "vhost_mySecondVhost_messages": 1,
+ "vhost_mySecondVhost_messages_ready": 1,
+ "vhost_mySecondVhost_messages_unacknowledged": 1,
+ },
+ },
+ "fails on invalid response": {
+ prepare: caseInvalidDataResponse,
+ wantCollected: nil,
+ wantCharts: len(baseCharts),
+ },
+ "fails on 404": {
+ prepare: case404,
+ wantCollected: nil,
+ wantCharts: len(baseCharts),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rabbit, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, rabbit.Init())
+
+ mx := rabbit.Collect()
+
+ assert.Equal(t, test.wantCollected, mx)
+ assert.Equal(t, test.wantCharts, len(*rabbit.Charts()))
+ })
+ }
+}
+
+func caseSuccessAllRequests() (*RabbitMQ, func()) {
+ srv := prepareRabbitMQEndpoint()
+ rabbit := New()
+ rabbit.URL = srv.URL
+ rabbit.CollectQueues = true
+
+ return rabbit, srv.Close
+}
+
+func caseInvalidDataResponse() (*RabbitMQ, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ rabbit := New()
+ rabbit.URL = srv.URL
+
+ return rabbit, srv.Close
+}
+
+func case404() (*RabbitMQ, func()) {
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ rabbit := New()
+ rabbit.URL = srv.URL
+
+ return rabbit, srv.Close
+}
+
+func prepareRabbitMQEndpoint() *httptest.Server {
+ srv := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathAPIOverview:
+ _, _ = w.Write(dataOverviewStats)
+ case filepath.Join(urlPathAPINodes, "rabbit@localhost"):
+ _, _ = w.Write(dataNodeStats)
+ case urlPathAPIVhosts:
+ _, _ = w.Write(dataVhostsStats)
+ case urlPathAPIQueues:
+ _, _ = w.Write(dataQueuesStats)
+ default:
+ w.WriteHeader(404)
+ }
+ }))
+ return srv
+}
diff --git a/src/go/plugin/go.d/modules/rabbitmq/testdata/config.json b/src/go/plugin/go.d/modules/rabbitmq/testdata/config.json
new file mode 100644
index 000000000..b3f637f06
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "collect_queues_metrics": true
+}
diff --git a/src/go/plugin/go.d/modules/rabbitmq/testdata/config.yaml b/src/go/plugin/go.d/modules/rabbitmq/testdata/config.yaml
new file mode 100644
index 000000000..12bb79bec
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+collect_queues_metrics: yes
diff --git a/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-nodes-node.json b/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-nodes-node.json
new file mode 100644
index 000000000..cc0a0ceb0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-nodes-node.json
@@ -0,0 +1,453 @@
+{
+ "partitions": [],
+ "os_pid": "49",
+ "fd_total": 1048576,
+ "sockets_total": 943629,
+ "mem_limit": 6713820774,
+ "mem_alarm": false,
+ "disk_free_limit": 16784551936,
+ "disk_free_alarm": false,
+ "proc_total": 1048576,
+ "rates_mode": "basic",
+ "uptime": 10098336,
+ "run_queue": 1,
+ "processors": 12,
+ "exchange_types": [
+ {
+ "name": "topic",
+ "description": "AMQP topic exchange, as per the AMQP specification",
+ "enabled": true
+ },
+ {
+ "name": "headers",
+ "description": "AMQP headers exchange, as per the AMQP specification",
+ "enabled": true
+ },
+ {
+ "name": "fanout",
+ "description": "AMQP fanout exchange, as per the AMQP specification",
+ "enabled": true
+ },
+ {
+ "name": "direct",
+ "description": "AMQP direct exchange, as per the AMQP specification",
+ "enabled": true
+ }
+ ],
+ "auth_mechanisms": [
+ {
+ "name": "PLAIN",
+ "description": "SASL PLAIN authentication mechanism",
+ "enabled": true
+ },
+ {
+ "name": "AMQPLAIN",
+ "description": "QPid AMQPLAIN mechanism",
+ "enabled": true
+ },
+ {
+ "name": "RABBIT-CR-DEMO",
+ "description": "RabbitMQ Demo challenge-response authentication mechanism",
+ "enabled": false
+ }
+ ],
+ "applications": [
+ {
+ "name": "accept",
+ "description": "Accept header(s) for Erlang/Elixir",
+ "version": "0.3.5"
+ },
+ {
+ "name": "amqp10_common",
+ "description": "Modules shared by rabbitmq-amqp1.0 and rabbitmq-amqp1.0-client",
+ "version": "3.11.5"
+ },
+ {
+ "name": "amqp_client",
+ "description": "RabbitMQ AMQP Client",
+ "version": "3.11.5"
+ },
+ {
+ "name": "asn1",
+ "description": "The Erlang ASN1 compiler version 5.0.21",
+ "version": "5.0.21"
+ },
+ {
+ "name": "aten",
+ "description": "Erlang node failure detector",
+ "version": "0.5.8"
+ },
+ {
+ "name": "compiler",
+ "description": "ERTS CXC 138 10",
+ "version": "8.2.2"
+ },
+ {
+ "name": "cowboy",
+ "description": "Small, fast, modern HTTP server.",
+ "version": "2.8.0"
+ },
+ {
+ "name": "cowlib",
+ "description": "Support library for manipulating Web protocols.",
+ "version": "2.9.1"
+ },
+ {
+ "name": "credentials_obfuscation",
+ "description": "Helper library that obfuscates sensitive values in process state",
+ "version": "3.2.0"
+ },
+ {
+ "name": "crypto",
+ "description": "CRYPTO",
+ "version": "5.1.2"
+ },
+ {
+ "name": "cuttlefish",
+ "description": "cuttlefish configuration abstraction",
+ "version": "3.1.0"
+ },
+ {
+ "name": "enough",
+ "description": "A gen_server implementation with additional, overload-protected call type",
+ "version": "0.1.0"
+ },
+ {
+ "name": "gen_batch_server",
+ "description": "Generic batching server",
+ "version": "0.8.8"
+ },
+ {
+ "name": "inets",
+ "description": "INETS CXC 138 49",
+ "version": "8.2"
+ },
+ {
+ "name": "kernel",
+ "description": "ERTS CXC 138 10",
+ "version": "8.5.2"
+ },
+ {
+ "name": "mnesia",
+ "description": "MNESIA CXC 138 12",
+ "version": "4.21.3"
+ },
+ {
+ "name": "observer_cli",
+ "description": "Visualize Erlang Nodes On The Command Line",
+ "version": "1.7.3"
+ },
+ {
+ "name": "os_mon",
+ "description": "CPO CXC 138 46",
+ "version": "2.8"
+ },
+ {
+ "name": "osiris",
+ "description": "New project",
+ "version": "1.3.3"
+ },
+ {
+ "name": "prometheus",
+ "description": "Prometheus.io client in Erlang",
+ "version": "4.9.1"
+ },
+ {
+ "name": "public_key",
+ "description": "Public key infrastructure",
+ "version": "1.13.2"
+ },
+ {
+ "name": "ra",
+ "description": "Raft library",
+ "version": "2.4.5"
+ },
+ {
+ "name": "rabbit",
+ "description": "RabbitMQ",
+ "version": "3.11.5"
+ },
+ {
+ "name": "rabbit_common",
+ "description": "Modules shared by rabbitmq-server and rabbitmq-erlang-client",
+ "version": "3.11.5"
+ },
+ {
+ "name": "rabbitmq_management",
+ "description": "RabbitMQ Management Console",
+ "version": "3.11.5"
+ },
+ {
+ "name": "rabbitmq_management_agent",
+ "description": "RabbitMQ Management Agent",
+ "version": "3.11.5"
+ },
+ {
+ "name": "rabbitmq_prelaunch",
+ "description": "RabbitMQ prelaunch setup",
+ "version": "3.11.5"
+ },
+ {
+ "name": "rabbitmq_prometheus",
+ "description": "",
+ "version": "3.11.5"
+ },
+ {
+ "name": "rabbitmq_web_dispatch",
+ "description": "RabbitMQ Web Dispatcher",
+ "version": "3.11.5"
+ },
+ {
+ "name": "ranch",
+ "description": "Socket acceptor pool for TCP protocols.",
+ "version": "2.1.0"
+ },
+ {
+ "name": "recon",
+ "description": "Diagnostic tools for production use",
+ "version": "2.5.2"
+ },
+ {
+ "name": "redbug",
+ "description": "Erlang Tracing Debugger",
+ "version": "2.0.7"
+ },
+ {
+ "name": "runtime_tools",
+ "description": "RUNTIME_TOOLS",
+ "version": "1.19"
+ },
+ {
+ "name": "sasl",
+ "description": "SASL CXC 138 11",
+ "version": "4.2"
+ },
+ {
+ "name": "seshat",
+ "description": "Counters registry",
+ "version": "0.4.0"
+ },
+ {
+ "name": "ssl",
+ "description": "Erlang/OTP SSL application",
+ "version": "10.8.6"
+ },
+ {
+ "name": "stdlib",
+ "description": "ERTS CXC 138 10",
+ "version": "4.2"
+ },
+ {
+ "name": "stdout_formatter",
+ "description": "Tools to format paragraphs, lists and tables as plain text",
+ "version": "0.2.4"
+ },
+ {
+ "name": "syntax_tools",
+ "description": "Syntax tools",
+ "version": "3.0"
+ },
+ {
+ "name": "sysmon_handler",
+ "description": "Rate-limiting system_monitor event handler",
+ "version": "1.3.0"
+ },
+ {
+ "name": "systemd",
+ "description": "systemd integration for Erlang applications",
+ "version": "0.6.1"
+ },
+ {
+ "name": "thoas",
+ "description": "A blazing fast JSON parser and generator in pure Erlang.",
+ "version": "0.4.0"
+ },
+ {
+ "name": "tools",
+ "description": "DEVTOOLS CXC 138 16",
+ "version": "3.5.3"
+ },
+ {
+ "name": "xmerl",
+ "description": "XML parser",
+ "version": "1.3.30"
+ }
+ ],
+ "contexts": [
+ {
+ "description": "RabbitMQ Management",
+ "path": "/",
+ "cowboy_opts": "[{sendfile,false}]",
+ "ip": "0.0.0.0",
+ "port": "15672"
+ },
+ {
+ "description": "RabbitMQ Prometheus",
+ "path": "/",
+ "cowboy_opts": "[{sendfile,false}]",
+ "port": "15692",
+ "protocol": "'http/prometheus'"
+ }
+ ],
+ "log_files": [
+ "/opt/bitnami/rabbitmq/var/log/rabbitmq/rabbit@localhost.log",
+ "/opt/bitnami/rabbitmq/var/log/rabbitmq/rabbit@localhost_upgrade.log",
+ "<stdout>"
+ ],
+ "db_dir": "/bitnami/rabbitmq/mnesia/rabbit@localhost",
+ "config_files": [
+ "/opt/bitnami/rabbitmq/etc/rabbitmq/rabbitmq.conf"
+ ],
+ "net_ticktime": 60,
+ "enabled_plugins": [
+ "rabbitmq_management",
+ "rabbitmq_prometheus"
+ ],
+ "mem_calculation_strategy": "rss",
+ "ra_open_file_metrics": {
+ "ra_log_wal": 1,
+ "ra_log_segment_writer": 0
+ },
+ "name": "rabbit@localhost",
+ "running": true,
+ "type": "disc",
+ "mem_used": 172720128,
+ "mem_used_details": {
+ "rate": 0
+ },
+ "fd_used": 43,
+ "fd_used_details": {
+ "rate": -0.2
+ },
+ "sockets_used": 0,
+ "sockets_used_details": {
+ "rate": 0
+ },
+ "proc_used": 441,
+ "proc_used_details": {
+ "rate": -0.2
+ },
+ "disk_free": 189799186432,
+ "disk_free_details": {
+ "rate": 0
+ },
+ "gc_num": 74226,
+ "gc_num_details": {
+ "rate": 4.8
+ },
+ "gc_bytes_reclaimed": 1847200664,
+ "gc_bytes_reclaimed_details": {
+ "rate": 101998.4
+ },
+ "context_switches": 839195,
+ "context_switches_details": {
+ "rate": 59.4
+ },
+ "io_read_count": 1,
+ "io_read_count_details": {
+ "rate": 0
+ },
+ "io_read_bytes": 1,
+ "io_read_bytes_details": {
+ "rate": 0
+ },
+ "io_read_avg_time": 0.043,
+ "io_read_avg_time_details": {
+ "rate": 0
+ },
+ "io_write_count": 0,
+ "io_write_count_details": {
+ "rate": 0
+ },
+ "io_write_bytes": 0,
+ "io_write_bytes_details": {
+ "rate": 0
+ },
+ "io_write_avg_time": 0,
+ "io_write_avg_time_details": {
+ "rate": 0
+ },
+ "io_sync_count": 0,
+ "io_sync_count_details": {
+ "rate": 0
+ },
+ "io_sync_avg_time": 0,
+ "io_sync_avg_time_details": {
+ "rate": 0
+ },
+ "io_seek_count": 0,
+ "io_seek_count_details": {
+ "rate": 0
+ },
+ "io_seek_avg_time": 0,
+ "io_seek_avg_time_details": {
+ "rate": 0
+ },
+ "io_reopen_count": 0,
+ "io_reopen_count_details": {
+ "rate": 0
+ },
+ "mnesia_ram_tx_count": 272,
+ "mnesia_ram_tx_count_details": {
+ "rate": 0
+ },
+ "mnesia_disk_tx_count": 58,
+ "mnesia_disk_tx_count_details": {
+ "rate": 0
+ },
+ "msg_store_read_count": 0,
+ "msg_store_read_count_details": {
+ "rate": 0
+ },
+ "msg_store_write_count": 0,
+ "msg_store_write_count_details": {
+ "rate": 0
+ },
+ "queue_index_write_count": 0,
+ "queue_index_write_count_details": {
+ "rate": 0
+ },
+ "queue_index_read_count": 0,
+ "queue_index_read_count_details": {
+ "rate": 0
+ },
+ "connection_created": 0,
+ "connection_created_details": {
+ "rate": 0
+ },
+ "connection_closed": 0,
+ "connection_closed_details": {
+ "rate": 0
+ },
+ "channel_created": 0,
+ "channel_created_details": {
+ "rate": 0
+ },
+ "channel_closed": 0,
+ "channel_closed_details": {
+ "rate": 0
+ },
+ "queue_declared": 6,
+ "queue_declared_details": {
+ "rate": 0
+ },
+ "queue_created": 6,
+ "queue_created_details": {
+ "rate": 0
+ },
+ "queue_deleted": 2,
+ "queue_deleted_details": {
+ "rate": 0
+ },
+ "cluster_links": [],
+ "metrics_gc_queue_length": {
+ "connection_closed": 0,
+ "channel_closed": 0,
+ "consumer_deleted": 0,
+ "exchange_deleted": 0,
+ "queue_deleted": 0,
+ "vhost_deleted": 0,
+ "node_node_deleted": 0,
+ "channel_consumer_deleted": 0
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-overview.json b/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-overview.json
new file mode 100644
index 000000000..5c71aaf5d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-overview.json
@@ -0,0 +1,183 @@
+{
+ "management_version": "3.11.5",
+ "rates_mode": "basic",
+ "sample_retention_policies": {
+ "global": [
+ 600,
+ 3600,
+ 28800,
+ 86400
+ ],
+ "basic": [
+ 600,
+ 3600
+ ],
+ "detailed": [
+ 600
+ ]
+ },
+ "exchange_types": [
+ {
+ "name": "direct",
+ "description": "AMQP direct exchange, as per the AMQP specification",
+ "enabled": true
+ },
+ {
+ "name": "fanout",
+ "description": "AMQP fanout exchange, as per the AMQP specification",
+ "enabled": true
+ },
+ {
+ "name": "headers",
+ "description": "AMQP headers exchange, as per the AMQP specification",
+ "enabled": true
+ },
+ {
+ "name": "topic",
+ "description": "AMQP topic exchange, as per the AMQP specification",
+ "enabled": true
+ }
+ ],
+ "product_version": "3.11.5",
+ "product_name": "RabbitMQ",
+ "rabbitmq_version": "3.11.5",
+ "cluster_name": "rabbit@f705ea2a1bec",
+ "erlang_version": "25.2",
+ "erlang_full_version": "Erlang/OTP 25 [erts-13.1.3] [source] [64-bit] [smp:12:12] [ds:12:12:10] [async-threads:1] [jit:ns]",
+ "release_series_support_status": "supported",
+ "disable_stats": false,
+ "enable_queue_totals": false,
+ "message_stats": {
+ "disk_reads": 0,
+ "disk_reads_details": {
+ "rate": 0
+ },
+ "disk_writes": 0,
+ "disk_writes_details": {
+ "rate": 0
+ }
+ },
+ "churn_rates": {
+ "channel_closed": 0,
+ "channel_closed_details": {
+ "rate": 0
+ },
+ "channel_created": 0,
+ "channel_created_details": {
+ "rate": 0
+ },
+ "connection_closed": 0,
+ "connection_closed_details": {
+ "rate": 0
+ },
+ "connection_created": 0,
+ "connection_created_details": {
+ "rate": 0
+ },
+ "queue_created": 6,
+ "queue_created_details": {
+ "rate": 0
+ },
+ "queue_declared": 6,
+ "queue_declared_details": {
+ "rate": 0
+ },
+ "queue_deleted": 2,
+ "queue_deleted_details": {
+ "rate": 0
+ }
+ },
+ "queue_totals": {
+ "messages": 0,
+ "messages_details": {
+ "rate": 0
+ },
+ "messages_ready": 0,
+ "messages_ready_details": {
+ "rate": 0
+ },
+ "messages_unacknowledged": 0,
+ "messages_unacknowledged_details": {
+ "rate": 0
+ }
+ },
+ "object_totals": {
+ "channels": 0,
+ "connections": 0,
+ "consumers": 0,
+ "exchanges": 21,
+ "queues": 4
+ },
+ "statistics_db_event_queue": 0,
+ "node": "rabbit@localhost",
+ "listeners": [
+ {
+ "node": "rabbit@localhost",
+ "protocol": "amqp",
+ "ip_address": "::",
+ "port": 5672,
+ "socket_opts": {
+ "backlog": 128,
+ "nodelay": true,
+ "linger": [
+ true,
+ 0
+ ],
+ "exit_on_close": false
+ }
+ },
+ {
+ "node": "rabbit@localhost",
+ "protocol": "clustering",
+ "ip_address": "::",
+ "port": 25672,
+ "socket_opts": []
+ },
+ {
+ "node": "rabbit@localhost",
+ "protocol": "http",
+ "ip_address": "::",
+ "port": 15672,
+ "socket_opts": {
+ "cowboy_opts": {
+ "sendfile": false
+ },
+ "ip": "0.0.0.0",
+ "port": 15672
+ }
+ },
+ {
+ "node": "rabbit@localhost",
+ "protocol": "http/prometheus",
+ "ip_address": "::",
+ "port": 15692,
+ "socket_opts": {
+ "cowboy_opts": {
+ "sendfile": false
+ },
+ "port": 15692,
+ "protocol": "http/prometheus"
+ }
+ }
+ ],
+ "contexts": [
+ {
+ "ssl_opts": [],
+ "node": "rabbit@localhost",
+ "description": "RabbitMQ Management",
+ "path": "/",
+ "cowboy_opts": "[{sendfile,false}]",
+ "ip": "0.0.0.0",
+ "port": "15672"
+ },
+ {
+ "ssl_opts": [],
+ "node": "rabbit@localhost",
+ "description": "RabbitMQ Prometheus",
+ "path": "/",
+ "cowboy_opts": "[{sendfile,false}]",
+ "port": "15692",
+ "protocol": "'http/prometheus'"
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-queues.json b/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-queues.json
new file mode 100644
index 000000000..40c6e6c80
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-queues.json
@@ -0,0 +1,334 @@
+[
+ {
+ "arguments": {},
+ "auto_delete": false,
+ "backing_queue_status": {
+ "avg_ack_egress_rate": 0,
+ "avg_ack_ingress_rate": 0,
+ "avg_egress_rate": 0,
+ "avg_ingress_rate": 0,
+ "delta": [
+ "delta",
+ "undefined",
+ 0,
+ 0,
+ "undefined"
+ ],
+ "len": 0,
+ "mode": "default",
+ "next_deliver_seq_id": 0,
+ "next_seq_id": 0,
+ "num_pending_acks": 0,
+ "num_unconfirmed": 0,
+ "q1": 0,
+ "q2": 0,
+ "q3": 0,
+ "q4": 0,
+ "target_ram_count": "infinity",
+ "version": 1
+ },
+ "consumer_capacity": 0,
+ "consumer_utilisation": 0,
+ "consumers": 0,
+ "durable": true,
+ "effective_policy_definition": {},
+ "exclusive": false,
+ "exclusive_consumer_tag": null,
+ "garbage_collection": {
+ "fullsweep_after": 65535,
+ "max_heap_size": 0,
+ "min_bin_vheap_size": 46422,
+ "min_heap_size": 233,
+ "minor_gcs": 74
+ },
+ "head_message_timestamp": null,
+ "idle_since": "2023-01-02T15:51:49.985+00:00",
+ "memory": 55408,
+ "message_bytes": 0,
+ "message_bytes_paged_out": 0,
+ "message_bytes_persistent": 0,
+ "message_bytes_ram": 0,
+ "message_bytes_ready": 0,
+ "message_bytes_unacknowledged": 0,
+ "messages": 1,
+ "messages_details": {
+ "rate": 0
+ },
+ "messages_paged_out": 1,
+ "messages_persistent": 1,
+ "messages_ram": 0,
+ "messages_ready": 1,
+ "messages_ready_details": {
+ "rate": 0
+ },
+ "messages_ready_ram": 0,
+ "messages_unacknowledged": 1,
+ "messages_unacknowledged_details": {
+ "rate": 0
+ },
+ "messages_unacknowledged_ram": 0,
+ "name": "myFirstQueue",
+ "node": "rabbit@localhost",
+ "operator_policy": null,
+ "policy": null,
+ "recoverable_slaves": null,
+ "reductions": 91946,
+ "reductions_details": {
+ "rate": 0
+ },
+ "single_active_consumer_tag": null,
+ "state": "running",
+ "type": "classic",
+ "vhost": "/"
+ },
+ {
+ "arguments": {},
+ "auto_delete": false,
+ "backing_queue_status": {
+ "avg_ack_egress_rate": 0,
+ "avg_ack_ingress_rate": 0,
+ "avg_egress_rate": 0,
+ "avg_ingress_rate": 0,
+ "delta": [
+ "delta",
+ "undefined",
+ 0,
+ 0,
+ "undefined"
+ ],
+ "len": 0,
+ "mode": "default",
+ "next_deliver_seq_id": 0,
+ "next_seq_id": 0,
+ "num_pending_acks": 0,
+ "num_unconfirmed": 0,
+ "q1": 0,
+ "q2": 0,
+ "q3": 0,
+ "q4": 0,
+ "target_ram_count": "infinity",
+ "version": 1
+ },
+ "consumer_capacity": 0,
+ "consumer_utilisation": 0,
+ "consumers": 0,
+ "durable": true,
+ "effective_policy_definition": {},
+ "exclusive": false,
+ "exclusive_consumer_tag": null,
+ "garbage_collection": {
+ "fullsweep_after": 65535,
+ "max_heap_size": 0,
+ "min_bin_vheap_size": 46422,
+ "min_heap_size": 233,
+ "minor_gcs": 74
+ },
+ "head_message_timestamp": null,
+ "idle_since": "2023-01-02T15:51:49.296+00:00",
+ "memory": 55408,
+ "message_bytes": 0,
+ "message_bytes_paged_out": 0,
+ "message_bytes_persistent": 0,
+ "message_bytes_ram": 0,
+ "message_bytes_ready": 0,
+ "message_bytes_unacknowledged": 0,
+ "messages": 1,
+ "messages_details": {
+ "rate": 0
+ },
+ "messages_paged_out": 1,
+ "messages_persistent": 1,
+ "messages_ram": 0,
+ "messages_ready": 1,
+ "messages_ready_details": {
+ "rate": 0
+ },
+ "messages_ready_ram": 0,
+ "messages_unacknowledged": 1,
+ "messages_unacknowledged_details": {
+ "rate": 0
+ },
+ "messages_unacknowledged_ram": 0,
+ "name": "mySecondQueue",
+ "node": "rabbit@localhost",
+ "operator_policy": null,
+ "policy": null,
+ "recoverable_slaves": null,
+ "reductions": 91878,
+ "reductions_details": {
+ "rate": 0
+ },
+ "single_active_consumer_tag": null,
+ "state": "running",
+ "type": "classic",
+ "vhost": "/"
+ },
+ {
+ "arguments": {
+ "x-queue-type": "classic"
+ },
+ "auto_delete": false,
+ "backing_queue_status": {
+ "avg_ack_egress_rate": 0,
+ "avg_ack_ingress_rate": 0,
+ "avg_egress_rate": 0,
+ "avg_ingress_rate": 0,
+ "delta": [
+ "delta",
+ "undefined",
+ 0,
+ 0,
+ "undefined"
+ ],
+ "len": 0,
+ "mode": "default",
+ "next_deliver_seq_id": 0,
+ "next_seq_id": 0,
+ "num_pending_acks": 0,
+ "num_unconfirmed": 0,
+ "q1": 0,
+ "q2": 0,
+ "q3": 0,
+ "q4": 0,
+ "target_ram_count": "infinity",
+ "version": 1
+ },
+ "consumer_capacity": 0,
+ "consumer_utilisation": 0,
+ "consumers": 0,
+ "durable": true,
+ "effective_policy_definition": {},
+ "exclusive": false,
+ "exclusive_consumer_tag": null,
+ "garbage_collection": {
+ "fullsweep_after": 65535,
+ "max_heap_size": 0,
+ "min_bin_vheap_size": 46422,
+ "min_heap_size": 233,
+ "minor_gcs": 7
+ },
+ "head_message_timestamp": null,
+ "idle_since": "2023-01-02T15:52:57.855+00:00",
+ "memory": 55408,
+ "message_bytes": 0,
+ "message_bytes_paged_out": 0,
+ "message_bytes_persistent": 0,
+ "message_bytes_ram": 0,
+ "message_bytes_ready": 0,
+ "message_bytes_unacknowledged": 0,
+ "messages": 1,
+ "messages_details": {
+ "rate": 0
+ },
+ "messages_paged_out": 1,
+ "messages_persistent": 1,
+ "messages_ram": 0,
+ "messages_ready": 1,
+ "messages_ready_details": {
+ "rate": 0
+ },
+ "messages_ready_ram": 0,
+ "messages_unacknowledged": 1,
+ "messages_unacknowledged_details": {
+ "rate": 0
+ },
+ "messages_unacknowledged_ram": 0,
+ "name": "myFirstQueue",
+ "node": "rabbit@localhost",
+ "operator_policy": null,
+ "policy": null,
+ "recoverable_slaves": null,
+ "reductions": 7431,
+ "reductions_details": {
+ "rate": 0
+ },
+ "single_active_consumer_tag": null,
+ "state": "running",
+ "type": "classic",
+ "vhost": "myFirstVhost"
+ },
+ {
+ "arguments": {
+ "x-queue-type": "classic"
+ },
+ "auto_delete": false,
+ "backing_queue_status": {
+ "avg_ack_egress_rate": 0,
+ "avg_ack_ingress_rate": 0,
+ "avg_egress_rate": 0,
+ "avg_ingress_rate": 0,
+ "delta": [
+ "delta",
+ "undefined",
+ 0,
+ 0,
+ "undefined"
+ ],
+ "len": 0,
+ "mode": "default",
+ "next_deliver_seq_id": 0,
+ "next_seq_id": 0,
+ "num_pending_acks": 0,
+ "num_unconfirmed": 0,
+ "q1": 0,
+ "q2": 0,
+ "q3": 0,
+ "q4": 0,
+ "target_ram_count": "infinity",
+ "version": 1
+ },
+ "consumer_capacity": 0,
+ "consumer_utilisation": 0,
+ "consumers": 0,
+ "durable": true,
+ "effective_policy_definition": {},
+ "exclusive": false,
+ "exclusive_consumer_tag": null,
+ "garbage_collection": {
+ "fullsweep_after": 65535,
+ "max_heap_size": 0,
+ "min_bin_vheap_size": 46422,
+ "min_heap_size": 233,
+ "minor_gcs": 7
+ },
+ "head_message_timestamp": null,
+ "idle_since": "2023-01-02T15:53:08.260+00:00",
+ "memory": 55408,
+ "message_bytes": 0,
+ "message_bytes_paged_out": 0,
+ "message_bytes_persistent": 0,
+ "message_bytes_ram": 0,
+ "message_bytes_ready": 0,
+ "message_bytes_unacknowledged": 0,
+ "messages": 1,
+ "messages_details": {
+ "rate": 0
+ },
+ "messages_paged_out": 1,
+ "messages_persistent": 1,
+ "messages_ram": 0,
+ "messages_ready": 1,
+ "messages_ready_details": {
+ "rate": 0
+ },
+ "messages_ready_ram": 0,
+ "messages_unacknowledged": 1,
+ "messages_unacknowledged_details": {
+ "rate": 0
+ },
+ "messages_unacknowledged_ram": 0,
+ "name": "MyFirstQueue",
+ "node": "rabbit@localhost",
+ "operator_policy": null,
+ "policy": null,
+ "recoverable_slaves": null,
+ "reductions": 7436,
+ "reductions_details": {
+ "rate": 0
+ },
+ "single_active_consumer_tag": null,
+ "state": "running",
+ "type": "classic",
+ "vhost": "mySecondVhost"
+ }
+]
diff --git a/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-vhosts.json b/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-vhosts.json
new file mode 100644
index 000000000..ed2c3418d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rabbitmq/testdata/v3.11.5/api-vhosts.json
@@ -0,0 +1,82 @@
+[
+ {
+ "cluster_state": {
+ "rabbit@localhost": "running"
+ },
+ "default_queue_type": "undefined",
+ "description": "Default virtual host",
+ "messages": 1,
+ "messages_details": {
+ "rate": 0
+ },
+ "messages_ready": 1,
+ "messages_ready_details": {
+ "rate": 0
+ },
+ "messages_unacknowledged": 1,
+ "messages_unacknowledged_details": {
+ "rate": 0
+ },
+ "metadata": {
+ "description": "Default virtual host",
+ "tags": []
+ },
+ "name": "/",
+ "tags": [],
+ "tracing": false
+ },
+ {
+ "cluster_state": {
+ "rabbit@localhost": "running"
+ },
+ "default_queue_type": "classic",
+ "description": "",
+ "messages": 1,
+ "messages_details": {
+ "rate": 0
+ },
+ "messages_ready": 1,
+ "messages_ready_details": {
+ "rate": 0
+ },
+ "messages_unacknowledged": 1,
+ "messages_unacknowledged_details": {
+ "rate": 0
+ },
+ "metadata": {
+ "default_queue_type": "classic",
+ "description": "",
+ "tags": []
+ },
+ "name": "myFirstVhost",
+ "tags": [],
+ "tracing": false
+ },
+ {
+ "cluster_state": {
+ "rabbit@localhost": "running"
+ },
+ "default_queue_type": "classic",
+ "description": "",
+ "messages": 1,
+ "messages_details": {
+ "rate": 0
+ },
+ "messages_ready": 1,
+ "messages_ready_details": {
+ "rate": 0
+ },
+ "messages_unacknowledged": 1,
+ "messages_unacknowledged_details": {
+ "rate": 0
+ },
+ "metadata": {
+ "default_queue_type": "classic",
+ "description": "",
+ "tags": []
+ },
+ "name": "mySecondVhost",
+ "tags": [],
+ "tracing": false
+ }
+]
diff --git a/src/go/plugin/go.d/modules/redis/README.md b/src/go/plugin/go.d/modules/redis/README.md
new file mode 120000
index 000000000..e41666257
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/README.md
@@ -0,0 +1 @@
+integrations/redis.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/redis/charts.go b/src/go/plugin/go.d/modules/redis/charts.go
new file mode 100644
index 000000000..6d4f638bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/charts.go
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package redis
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+const (
+ prioConnections = module.Priority + iota
+ prioClients
+
+ prioPingLatency
+ prioCommands
+ prioKeyLookupHitRate
+
+ prioMemory
+ prioMemoryFragmentationRatio
+ prioKeyEviction
+
+ prioNet
+
+ prioConnectedReplicas
+ prioMasterLinkStatus
+ prioMasterLastIOSinceTime
+ prioMasterLinkDownSinceTime
+
+ prioPersistenceRDBChanges
+ prioPersistenceRDBBgSaveNow
+ prioPersistenceRDBBgSaveHealth
+ prioPersistenceRDBBgSaveLastSaveSinceTime
+ prioPersistenceAOFSize
+
+ prioCommandsCalls
+ prioCommandsUsec
+ prioCommandsUsecPerSec
+
+ prioKeyExpiration
+ prioKeys
+ prioExpiresKeys
+
+ prioUptime
+)
+
+var redisCharts = module.Charts{
+ chartConnections.Copy(),
+ chartClients.Copy(),
+
+ pingLatencyCommands.Copy(),
+ chartCommands.Copy(),
+ chartKeyLookupHitRate.Copy(),
+
+ chartMemory.Copy(),
+ chartMemoryFragmentationRatio.Copy(),
+ chartKeyEviction.Copy(),
+
+ chartNet.Copy(),
+
+ chartConnectedReplicas.Copy(),
+
+ chartPersistenceRDBChanges.Copy(),
+ chartPersistenceRDBBgSaveNow.Copy(),
+ chartPersistenceRDBBgSaveHealth.Copy(),
+ chartPersistenceRDBLastSaveSinceTime.Copy(),
+
+ chartCommandsCalls.Copy(),
+ chartCommandsUsec.Copy(),
+ chartCommandsUsecPerSec.Copy(),
+
+ chartKeyExpiration.Copy(),
+ chartKeys.Copy(),
+ chartExpiresKeys.Copy(),
+
+ chartUptime.Copy(),
+}
+
+var (
+ chartConnections = module.Chart{
+ ID: "connections",
+ Title: "Accepted and rejected (maxclients limit) connections",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "redis.connections",
+ Priority: prioConnections,
+ Dims: module.Dims{
+ {ID: "total_connections_received", Name: "accepted", Algo: module.Incremental},
+ {ID: "rejected_connections", Name: "rejected", Algo: module.Incremental},
+ },
+ }
+ chartClients = module.Chart{
+ ID: "clients",
+ Title: "Clients",
+ Units: "clients",
+ Fam: "connections",
+ Ctx: "redis.clients",
+ Priority: prioClients,
+ Dims: module.Dims{
+ {ID: "connected_clients", Name: "connected"},
+ {ID: "blocked_clients", Name: "blocked"},
+ {ID: "tracking_clients", Name: "tracking"},
+ {ID: "clients_in_timeout_table", Name: "in_timeout_table"},
+ },
+ }
+)
+
+var (
+ pingLatencyCommands = module.Chart{
+ ID: "ping_latency",
+ Title: "Ping latency",
+ Units: "seconds",
+ Fam: "performance",
+ Ctx: "redis.ping_latency",
+ Priority: prioPingLatency,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "ping_latency_min", Name: "min", Div: 1e6},
+ {ID: "ping_latency_max", Name: "max", Div: 1e6},
+ {ID: "ping_latency_avg", Name: "avg", Div: 1e6},
+ },
+ }
+ chartCommands = module.Chart{
+ ID: "commands",
+ Title: "Processed commands",
+ Units: "commands/s",
+ Fam: "performance",
+ Ctx: "redis.commands",
+ Priority: prioCommands,
+ Dims: module.Dims{
+ {ID: "total_commands_processed", Name: "processed", Algo: module.Incremental},
+ },
+ }
+ chartKeyLookupHitRate = module.Chart{
+ ID: "key_lookup_hit_rate",
+ Title: "Keys lookup hit rate",
+ Units: "percentage",
+ Fam: "performance",
+ Ctx: "redis.keyspace_lookup_hit_rate",
+ Priority: prioKeyLookupHitRate,
+ Dims: module.Dims{
+ {ID: "keyspace_hit_rate", Name: "lookup_hit_rate", Div: precision},
+ },
+ }
+)
+
+var (
+ chartMemory = module.Chart{
+ ID: "memory",
+ Title: "Memory usage",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "redis.memory",
+ Type: module.Area,
+ Priority: prioMemory,
+ Dims: module.Dims{
+ {ID: "maxmemory", Name: "max"},
+ {ID: "used_memory", Name: "used"},
+ {ID: "used_memory_rss", Name: "rss"},
+ {ID: "used_memory_peak", Name: "peak"},
+ {ID: "used_memory_dataset", Name: "dataset"},
+ {ID: "used_memory_lua", Name: "lua"},
+ {ID: "used_memory_scripts", Name: "scripts"},
+ },
+ }
+ chartMemoryFragmentationRatio = module.Chart{
+ ID: "mem_fragmentation_ratio",
+ Title: "Ratio between used_memory_rss and used_memory",
+ Units: "ratio",
+ Fam: "memory",
+ Ctx: "redis.mem_fragmentation_ratio",
+ Priority: prioMemoryFragmentationRatio,
+ Dims: module.Dims{
+ {ID: "mem_fragmentation_ratio", Name: "mem_fragmentation", Div: precision},
+ },
+ }
+ chartKeyEviction = module.Chart{
+ ID: "key_eviction_events",
+ Title: "Evicted keys due to maxmemory limit",
+ Units: "keys/s",
+ Fam: "memory",
+ Ctx: "redis.key_eviction_events",
+ Priority: prioKeyEviction,
+ Dims: module.Dims{
+ {ID: "evicted_keys", Name: "evicted", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartNet = module.Chart{
+ ID: "net",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "network",
+ Ctx: "redis.net",
+ Type: module.Area,
+ Priority: prioNet,
+ Dims: module.Dims{
+ {ID: "total_net_input_bytes", Name: "received", Mul: 8, Div: 1024, Algo: module.Incremental},
+ {ID: "total_net_output_bytes", Name: "sent", Mul: -8, Div: 1024, Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartPersistenceRDBChanges = module.Chart{
+ ID: "persistence",
+ Title: "Operations that produced changes since the last SAVE or BGSAVE",
+ Units: "operations",
+ Fam: "persistence",
+ Ctx: "redis.rdb_changes",
+ Priority: prioPersistenceRDBChanges,
+ Dims: module.Dims{
+ {ID: "rdb_changes_since_last_save", Name: "changes"},
+ },
+ }
+ chartPersistenceRDBBgSaveNow = module.Chart{
+ ID: "bgsave_now",
+ Title: "Duration of the on-going RDB save operation if any",
+ Units: "seconds",
+ Fam: "persistence",
+ Ctx: "redis.bgsave_now",
+ Priority: prioPersistenceRDBBgSaveNow,
+ Dims: module.Dims{
+ {ID: "rdb_current_bgsave_time_sec", Name: "current_bgsave_time"},
+ },
+ }
+ chartPersistenceRDBBgSaveHealth = module.Chart{
+ ID: "bgsave_health",
+ Title: "Status of the last RDB save operation (0: ok, 1: err)",
+ Units: "status",
+ Fam: "persistence",
+ Ctx: "redis.bgsave_health",
+ Priority: prioPersistenceRDBBgSaveHealth,
+ Dims: module.Dims{
+ {ID: "rdb_last_bgsave_status", Name: "last_bgsave"},
+ },
+ }
+ chartPersistenceRDBLastSaveSinceTime = module.Chart{
+ ID: "bgsave_last_rdb_save_since_time",
+ Title: "Time elapsed since the last successful RDB save",
+ Units: "seconds",
+ Fam: "persistence",
+ Ctx: "redis.bgsave_last_rdb_save_since_time",
+ Priority: prioPersistenceRDBBgSaveLastSaveSinceTime,
+ Dims: module.Dims{
+ {ID: "rdb_last_save_time", Name: "last_bgsave_time"},
+ },
+ }
+
+ chartPersistenceAOFSize = module.Chart{
+ ID: "persistence_aof_size",
+ Title: "AOF file size",
+ Units: "bytes",
+ Fam: "persistence",
+ Ctx: "redis.aof_file_size",
+ Priority: prioPersistenceAOFSize,
+ Dims: module.Dims{
+ {ID: "aof_current_size", Name: "current"},
+ {ID: "aof_base_size", Name: "base"},
+ },
+ }
+)
+
+var (
+ chartCommandsCalls = module.Chart{
+ ID: "commands_calls",
+ Title: "Calls per command",
+ Units: "calls/s",
+ Fam: "commands",
+ Ctx: "redis.commands_calls",
+ Type: module.Stacked,
+ Priority: prioCommandsCalls,
+ }
+ chartCommandsUsec = module.Chart{
+ ID: "commands_usec",
+ Title: "Total CPU time consumed by the commands",
+ Units: "microseconds",
+ Fam: "commands",
+ Ctx: "redis.commands_usec",
+ Type: module.Stacked,
+ Priority: prioCommandsUsec,
+ }
+ chartCommandsUsecPerSec = module.Chart{
+ ID: "commands_usec_per_sec",
+ Title: "Average CPU consumed per command execution",
+ Units: "microseconds/s",
+ Fam: "commands",
+ Ctx: "redis.commands_usec_per_sec",
+ Priority: prioCommandsUsecPerSec,
+ }
+)
+
+var (
+ chartKeyExpiration = module.Chart{
+ ID: "key_expiration_events",
+ Title: "Expired keys",
+ Units: "keys/s",
+ Fam: "keyspace",
+ Ctx: "redis.key_expiration_events",
+ Priority: prioKeyExpiration,
+ Dims: module.Dims{
+ {ID: "expired_keys", Name: "expired", Algo: module.Incremental},
+ },
+ }
+ chartKeys = module.Chart{
+ ID: "keys",
+ Title: "Keys per database",
+ Units: "keys",
+ Fam: "keyspace",
+ Ctx: "redis.database_keys",
+ Type: module.Stacked,
+ Priority: prioKeys,
+ }
+ chartExpiresKeys = module.Chart{
+ ID: "expires_keys",
+ Title: "Keys with an expiration per database",
+ Units: "keys",
+ Fam: "keyspace",
+ Ctx: "redis.database_expires_keys",
+ Type: module.Stacked,
+ Priority: prioExpiresKeys,
+ }
+)
+
+var (
+ chartConnectedReplicas = module.Chart{
+ ID: "connected_replicas",
+ Title: "Connected replicas",
+ Units: "replicas",
+ Fam: "replication",
+ Ctx: "redis.connected_replicas",
+ Priority: prioConnectedReplicas,
+ Dims: module.Dims{
+ {ID: "connected_slaves", Name: "connected"},
+ },
+ }
+ masterLinkStatusChart = module.Chart{
+ ID: "master_last_status",
+ Title: "Master link status",
+ Units: "status",
+ Fam: "replication",
+ Ctx: "redis.master_link_status",
+ Priority: prioMasterLinkStatus,
+ Dims: module.Dims{
+ {ID: "master_link_status_up", Name: "up"},
+ {ID: "master_link_status_down", Name: "down"},
+ },
+ }
+ masterLastIOSinceTimeChart = module.Chart{
+ ID: "master_last_io_since_time",
+ Title: "Time elapsed since the last interaction with master",
+ Units: "seconds",
+ Fam: "replication",
+ Ctx: "redis.master_last_io_since_time",
+ Priority: prioMasterLastIOSinceTime,
+ Dims: module.Dims{
+ {ID: "master_last_io_seconds_ago", Name: "time"},
+ },
+ }
+ masterLinkDownSinceTimeChart = module.Chart{
+ ID: "master_link_down_since_stime",
+ Title: "Time elapsed since the link between master and slave is down",
+ Units: "seconds",
+ Fam: "replication",
+ Ctx: "redis.master_link_down_since_time",
+ Priority: prioMasterLinkDownSinceTime,
+ Dims: module.Dims{
+ {ID: "master_link_down_since_seconds", Name: "time"},
+ },
+ }
+)
+
+var (
+ chartUptime = module.Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "redis.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "uptime_in_seconds", Name: "uptime"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/redis/collect.go b/src/go/plugin/go.d/modules/redis/collect.go
new file mode 100644
index 000000000..026164672
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/collect.go
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package redis
+
+import (
+ "bufio"
+ "context"
+ "errors"
+ "fmt"
+ "github.com/blang/semver/v4"
+ "regexp"
+ "strings"
+)
+
+const precision = 1000 // float values multiplier and dimensions divisor
+
+func (r *Redis) collect() (map[string]int64, error) {
+ info, err := r.rdb.Info(context.Background(), "all").Result()
+ if err != nil {
+ return nil, err
+ }
+
+ if r.server == "" {
+ s, v, err := extractServerVersion(info)
+ if err != nil {
+ return nil, fmt.Errorf("can not extract server app and version: %v", err)
+ }
+ r.server, r.version = s, v
+ r.Debugf(`server="%s",version="%s"`, s, v)
+ }
+
+ if r.server != "redis" {
+ return nil, fmt.Errorf("unsupported server app, want=redis, got=%s", r.server)
+ }
+
+ mx := make(map[string]int64)
+ r.collectInfo(mx, info)
+ r.collectPingLatency(mx)
+
+ return mx, nil
+}
+
+// redis_version:6.0.9
+var reVersion = regexp.MustCompile(`([a-z]+)_version:(\d+\.\d+\.\d+)`)
+
+func extractServerVersion(info string) (string, *semver.Version, error) {
+ var versionLine string
+ for sc := bufio.NewScanner(strings.NewReader(info)); sc.Scan(); {
+ line := sc.Text()
+ if strings.Contains(line, "_version") {
+ versionLine = strings.TrimSpace(line)
+ break
+ }
+ }
+ if versionLine == "" {
+ return "", nil, errors.New("no version property")
+ }
+
+ match := reVersion.FindStringSubmatch(versionLine)
+ if match == nil {
+ return "", nil, fmt.Errorf("can not parse version property '%s'", versionLine)
+ }
+
+ server, version := match[1], match[2]
+ ver, err := semver.New(version)
+ if err != nil {
+ return "", nil, err
+ }
+
+ return server, ver, nil
+}
diff --git a/src/go/plugin/go.d/modules/redis/collect_info.go b/src/go/plugin/go.d/modules/redis/collect_info.go
new file mode 100644
index 000000000..81f3646de
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/collect_info.go
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package redis
+
+import (
+ "bufio"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ infoSectionServer = "# Server"
+ infoSectionData = "# Data"
+ infoSectionClients = "# Clients"
+ infoSectionStats = "# Stats"
+ infoSectionCommandstats = "# Commandstats"
+ infoSectionCPU = "# CPU"
+ infoSectionRepl = "# Replication"
+ infoSectionKeyspace = "# Keyspace"
+)
+
+var infoSections = map[string]struct{}{
+ infoSectionServer: {},
+ infoSectionData: {},
+ infoSectionClients: {},
+ infoSectionStats: {},
+ infoSectionCommandstats: {},
+ infoSectionCPU: {},
+ infoSectionRepl: {},
+ infoSectionKeyspace: {},
+}
+
+func isInfoSection(line string) bool { _, ok := infoSections[line]; return ok }
+
+func (r *Redis) collectInfo(mx map[string]int64, info string) {
+ // https://redis.io/commands/info
+ // Lines can contain a section name (starting with a # character) or a property.
+ // All the properties are in the form of field:value terminated by \r\n.
+
+ var curSection string
+ sc := bufio.NewScanner(strings.NewReader(info))
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+ if len(line) == 0 {
+ curSection = ""
+ continue
+ }
+ if strings.HasPrefix(line, "#") {
+ if isInfoSection(line) {
+ curSection = line
+ }
+ continue
+ }
+
+ field, value, ok := parseProperty(line)
+ if !ok {
+ continue
+ }
+
+ switch {
+ case curSection == infoSectionCommandstats:
+ r.collectInfoCommandstatsProperty(mx, field, value)
+ case curSection == infoSectionKeyspace:
+ r.collectInfoKeyspaceProperty(mx, field, value)
+ case field == "rdb_last_bgsave_status":
+ collectNumericValue(mx, field, convertBgSaveStatus(value))
+ case field == "rdb_current_bgsave_time_sec" && value == "-1":
+ // TODO: https://github.com/netdata/dashboard/issues/198
+ // "-1" means there is no on-going bgsave operation;
+ // netdata has 'Convert seconds to time' feature (enabled by default),
+ // looks like it doesn't respect negative values and does abs().
+ // "-1" => "00:00:01".
+ collectNumericValue(mx, field, "0")
+ case field == "rdb_last_save_time":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ mx[field] = int64(time.Since(time.Unix(v, 0)).Seconds())
+ case field == "aof_enabled" && value == "1":
+ r.addAOFChartsOnce.Do(r.addAOFCharts)
+ case field == "master_link_status":
+ mx["master_link_status_up"] = boolToInt(value == "up")
+ mx["master_link_status_down"] = boolToInt(value == "down")
+ default:
+ collectNumericValue(mx, field, value)
+ }
+ }
+
+ if has(mx, "keyspace_hits", "keyspace_misses") {
+ mx["keyspace_hit_rate"] = int64(calcKeyspaceHitRate(mx) * precision)
+ }
+ if has(mx, "master_last_io_seconds_ago") {
+ r.addReplSlaveChartsOnce.Do(r.addReplSlaveCharts)
+ if !has(mx, "master_link_down_since_seconds") {
+ mx["master_link_down_since_seconds"] = 0
+ }
+ }
+}
+
+var reKeyspaceValue = regexp.MustCompile(`^keys=(\d+),expires=(\d+)`)
+
+func (r *Redis) collectInfoKeyspaceProperty(ms map[string]int64, field, value string) {
+ match := reKeyspaceValue.FindStringSubmatch(value)
+ if match == nil {
+ return
+ }
+
+ keys, expires := match[1], match[2]
+ collectNumericValue(ms, field+"_keys", keys)
+ collectNumericValue(ms, field+"_expires_keys", expires)
+
+ if !r.collectedDbs[field] {
+ r.collectedDbs[field] = true
+ r.addDbToKeyspaceCharts(field)
+ }
+}
+
+var reCommandstatsValue = regexp.MustCompile(`^calls=(\d+),usec=(\d+),usec_per_call=([\d.]+)`)
+
+func (r *Redis) collectInfoCommandstatsProperty(ms map[string]int64, field, value string) {
+ if !strings.HasPrefix(field, "cmdstat_") {
+ return
+ }
+ cmd := field[len("cmdstat_"):]
+
+ match := reCommandstatsValue.FindStringSubmatch(value)
+ if match == nil {
+ return
+ }
+
+ calls, usec, usecPerCall := match[1], match[2], match[3]
+ collectNumericValue(ms, "cmd_"+cmd+"_calls", calls)
+ collectNumericValue(ms, "cmd_"+cmd+"_usec", usec)
+ collectNumericValue(ms, "cmd_"+cmd+"_usec_per_call", usecPerCall)
+
+ if !r.collectedCommands[cmd] {
+ r.collectedCommands[cmd] = true
+ r.addCmdToCommandsCharts(cmd)
+ }
+}
+
+func collectNumericValue(ms map[string]int64, field, value string) {
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return
+ }
+ if strings.IndexByte(value, '.') == -1 {
+ ms[field] = int64(v)
+ } else {
+ ms[field] = int64(v * precision)
+ }
+}
+
+func convertBgSaveStatus(status string) string {
+ // https://github.com/redis/redis/blob/unstable/src/server.c
+ // "ok" or "err"
+ if status == "ok" {
+ return "0"
+ }
+ return "1"
+}
+
+func parseProperty(prop string) (field, value string, ok bool) {
+ i := strings.IndexByte(prop, ':')
+ if i == -1 {
+ return "", "", false
+ }
+ field, value = prop[:i], prop[i+1:]
+ return field, value, field != "" && value != ""
+}
+
+func calcKeyspaceHitRate(ms map[string]int64) float64 {
+ hits := ms["keyspace_hits"]
+ misses := ms["keyspace_misses"]
+ if hits+misses == 0 {
+ return 0
+ }
+ return float64(hits) * 100 / float64(hits+misses)
+}
+
+func (r *Redis) addCmdToCommandsCharts(cmd string) {
+ r.addDimToChart(chartCommandsCalls.ID, &module.Dim{
+ ID: "cmd_" + cmd + "_calls",
+ Name: strings.ToUpper(cmd),
+ Algo: module.Incremental,
+ })
+ r.addDimToChart(chartCommandsUsec.ID, &module.Dim{
+ ID: "cmd_" + cmd + "_usec",
+ Name: strings.ToUpper(cmd),
+ Algo: module.Incremental,
+ })
+ r.addDimToChart(chartCommandsUsecPerSec.ID, &module.Dim{
+ ID: "cmd_" + cmd + "_usec_per_call",
+ Name: strings.ToUpper(cmd),
+ Div: precision,
+ })
+}
+
+func (r *Redis) addDbToKeyspaceCharts(db string) {
+ r.addDimToChart(chartKeys.ID, &module.Dim{
+ ID: db + "_keys",
+ Name: db,
+ })
+ r.addDimToChart(chartExpiresKeys.ID, &module.Dim{
+ ID: db + "_expires_keys",
+ Name: db,
+ })
+}
+
+func (r *Redis) addDimToChart(chartID string, dim *module.Dim) {
+ chart := r.Charts().Get(chartID)
+ if chart == nil {
+ r.Warningf("error on adding '%s' dimension: can not find '%s' chart", dim.ID, chartID)
+ return
+ }
+ if err := chart.AddDim(dim); err != nil {
+ r.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (r *Redis) addAOFCharts() {
+ err := r.Charts().Add(chartPersistenceAOFSize.Copy())
+ if err != nil {
+ r.Warningf("error on adding '%s' chart", chartPersistenceAOFSize.ID)
+ }
+}
+
+func (r *Redis) addReplSlaveCharts() {
+ if err := r.Charts().Add(masterLinkStatusChart.Copy()); err != nil {
+ r.Warningf("error on adding '%s' chart", masterLinkStatusChart.ID)
+ }
+ if err := r.Charts().Add(masterLastIOSinceTimeChart.Copy()); err != nil {
+ r.Warningf("error on adding '%s' chart", masterLastIOSinceTimeChart.ID)
+ }
+ if err := r.Charts().Add(masterLinkDownSinceTimeChart.Copy()); err != nil {
+ r.Warningf("error on adding '%s' chart", masterLinkDownSinceTimeChart.ID)
+ }
+}
+
+func has(m map[string]int64, key string, keys ...string) bool {
+ switch _, ok := m[key]; len(keys) {
+ case 0:
+ return ok
+ default:
+ return ok && has(m, keys[0], keys[1:]...)
+ }
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
diff --git a/src/go/plugin/go.d/modules/redis/collect_ping_latency.go b/src/go/plugin/go.d/modules/redis/collect_ping_latency.go
new file mode 100644
index 000000000..063673c2c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/collect_ping_latency.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package redis
+
+import (
+ "context"
+ "time"
+)
+
+func (r *Redis) collectPingLatency(mx map[string]int64) {
+ r.pingSummary.Reset()
+
+ for i := 0; i < r.PingSamples; i++ {
+ now := time.Now()
+ _, err := r.rdb.Ping(context.Background()).Result()
+ elapsed := time.Since(now)
+
+ if err != nil {
+ r.Debug(err)
+ continue
+ }
+
+ r.pingSummary.Observe(float64(elapsed.Microseconds()))
+ }
+
+ r.pingSummary.WriteTo(mx, "ping_latency", 1, 1)
+}
diff --git a/src/go/plugin/go.d/modules/redis/config_schema.json b/src/go/plugin/go.d/modules/redis/config_schema.json
new file mode 100644
index 000000000..c57b06ac0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/config_schema.json
@@ -0,0 +1,123 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "title": "Redis collector configuration.",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "URI",
+ "description": "The URI specifying the connection details for the Redis server.",
+ "type": "string",
+ "default": "redis://@localhost:6379"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "ping_samples": {
+ "title": "Ping samples",
+ "description": "The number of PING commands to send per data collection interval. Used to calculate latency.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "address": {
+ "ui:placeholder": "redis://user:password@host:port",
+ "ui:help": "Tcp connection: `redis://user:password@host:port`. Unix connection: `unix://user:password@/path/to/redis.sock`."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout",
+ "ping_samples"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/redis/init.go b/src/go/plugin/go.d/modules/redis/init.go
new file mode 100644
index 000000000..8190be778
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/init.go
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package redis
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/go-redis/redis/v8"
+)
+
+func (r *Redis) validateConfig() error {
+ if r.Address == "" {
+ return errors.New("'address' not set")
+ }
+ return nil
+}
+
+func (r *Redis) initRedisClient() (*redis.Client, error) {
+ opts, err := redis.ParseURL(r.Address)
+ if err != nil {
+ return nil, err
+ }
+
+ tlsConfig, err := tlscfg.NewTLSConfig(r.TLSConfig)
+ if err != nil {
+ return nil, err
+ }
+
+ if opts.TLSConfig != nil && tlsConfig != nil {
+ tlsConfig.ServerName = opts.TLSConfig.ServerName
+ }
+
+ if opts.Username == "" && r.Username != "" {
+ opts.Username = r.Username
+ }
+ if opts.Password == "" && r.Password != "" {
+ opts.Password = r.Password
+ }
+
+ opts.PoolSize = 1
+ opts.TLSConfig = tlsConfig
+ opts.DialTimeout = r.Timeout.Duration()
+ opts.ReadTimeout = r.Timeout.Duration()
+ opts.WriteTimeout = r.Timeout.Duration()
+
+ return redis.NewClient(opts), nil
+}
+
+func (r *Redis) initCharts() (*module.Charts, error) {
+ return redisCharts.Copy(), nil
+}
diff --git a/src/go/plugin/go.d/modules/redis/integrations/redis.md b/src/go/plugin/go.d/modules/redis/integrations/redis.md
new file mode 100644
index 000000000..52dfbf8f2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/integrations/redis.md
@@ -0,0 +1,287 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/redis/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/redis/metadata.yaml"
+sidebar_label: "Redis"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Redis
+
+
+<img src="https://netdata.cloud/img/redis.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: redis
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.
+
+
+It connects to the Redis instance via a TCP or UNIX socket and executes the following commands:
+
+- [INFO ALL](https://redis.io/commands/info)
+- [PING](https://redis.io/commands/ping/)
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:
+
+- 127.0.0.1:6379
+- /tmp/redis.sock
+- /var/run/redis/redis.sock
+- /var/lib/redis/redis.sock
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Redis instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| redis.connections | accepted, rejected | connections/s |
+| redis.clients | connected, blocked, tracking, in_timeout_table | clients |
+| redis.ping_latency | min, max, avg | seconds |
+| redis.commands | processes | commands/s |
+| redis.keyspace_lookup_hit_rate | lookup_hit_rate | percentage |
+| redis.memory | max, used, rss, peak, dataset, lua, scripts | bytes |
+| redis.mem_fragmentation_ratio | mem_fragmentation | ratio |
+| redis.key_eviction_events | evicted | keys/s |
+| redis.net | received, sent | kilobits/s |
+| redis.rdb_changes | changes | operations |
+| redis.bgsave_now | current_bgsave_time | seconds |
+| redis.bgsave_health | last_bgsave | status |
+| redis.bgsave_last_rdb_save_since_time | last_bgsave_time | seconds |
+| redis.aof_file_size | current, base | bytes |
+| redis.commands_calls | a dimension per command | calls |
+| redis.commands_usec | a dimension per command | microseconds |
+| redis.commands_usec_per_sec | a dimension per command | microseconds/s |
+| redis.key_expiration_events | expired | keys/s |
+| redis.database_keys | a dimension per database | keys |
+| redis.database_expires_keys | a dimension per database | keys |
+| redis.connected_replicas | connected | replicas |
+| redis.master_link_status | up, down | status |
+| redis.master_last_io_since_time | time | seconds |
+| redis.master_link_down_since_time | time | seconds |
+| redis.uptime | uptime | seconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ redis_connections_rejected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.connections | connections rejected because of maxclients limit in the last minute |
+| [ redis_bgsave_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_now | duration of the on-going RDB save operation |
+| [ redis_bgsave_broken ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.bgsave_health | status of the last RDB save operation (0: ok, 1: error) |
+| [ redis_master_link_down ](https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf) | redis.master_link_down_since_time | time elapsed since the link between master and slave is down |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/redis.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/redis.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Redis server address. | redis://@localhost:6379 | yes |
+| timeout | Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds. | 1 | no |
+| username | Username used for authentication. | | no |
+| password | Password used for authentication. | | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certificate authority that client use when verifying server certificates. | | no |
+| tls_cert | Client tls certificate. | | no |
+| tls_key | Client tls key. | | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 'redis://@127.0.0.1:6379'
+
+```
+</details>
+
+##### Unix socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 'unix://@/tmp/redis.sock'
+
+```
+</details>
+
+##### TCP socket with password
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 'redis://:password@127.0.0.1:6379'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 'redis://:password@127.0.0.1:6379'
+
+ - name: remote
+ address: 'redis://user:password@203.0.113.0:6379'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `redis` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m redis
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `redis` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep redis
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep redis /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep redis
+```
+
+
diff --git a/src/go/plugin/go.d/modules/redis/metadata.yaml b/src/go/plugin/go.d/modules/redis/metadata.yaml
new file mode 100644
index 000000000..2d94017d6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/metadata.yaml
@@ -0,0 +1,343 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-redis
+ plugin_name: go.d.plugin
+ module_name: redis
+ monitored_instance:
+ name: Redis
+ link: https://redis.com/
+ categories:
+ - data-collection.database-servers
+ icon_filename: redis.svg
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ alternative_monitored_instances: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - redis
+ - databases
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the health and performance of Redis servers and collects general statistics, CPU and memory consumption, replication information, command statistics, and more.
+ method_description: |
+ It connects to the Redis instance via a TCP or UNIX socket and executes the following commands:
+
+ - [INFO ALL](https://redis.io/commands/info)
+ - [PING](https://redis.io/commands/ping/)
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects instances running on localhost by attempting to connect using known Redis TCP and UNIX sockets:
+
+ - 127.0.0.1:6379
+ - /tmp/redis.sock
+ - /var/run/redis/redis.sock
+ - /var/lib/redis/redis.sock
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/redis.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: Redis server address.
+ default_value: redis://@localhost:6379
+ required: true
+ details: |
+ There are two connection types: by tcp socket and by unix socket.
+
+ - Tcp connection: `redis://<user>:<password>@<host>:<port>/<db_number>`
+ - Unix connection: `unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>`
+ - name: timeout
+ description: Dial (establishing new connections), read (socket reads) and write (socket writes) timeout in seconds.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username used for authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password used for authentication.
+ default_value: ""
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certificate authority that client use when verifying server certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client tls certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client tls key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: TCP socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 'redis://@127.0.0.1:6379'
+ - name: Unix socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 'unix://@/tmp/redis.sock'
+ - name: TCP socket with password
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 'redis://:password@127.0.0.1:6379'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 'redis://:password@127.0.0.1:6379'
+
+ - name: remote
+ address: 'redis://user:password@203.0.113.0:6379'
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: redis_connections_rejected
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf
+ metric: redis.connections
+ info: connections rejected because of maxclients limit in the last minute
+ - name: redis_bgsave_slow
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf
+ metric: redis.bgsave_now
+ info: duration of the on-going RDB save operation
+ - name: redis_bgsave_broken
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf
+ metric: redis.bgsave_health
+ info: 'status of the last RDB save operation (0: ok, 1: error)'
+ - name: redis_master_link_down
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/redis.conf
+ metric: redis.master_link_down_since_time
+ info: time elapsed since the link between master and slave is down
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: redis.connections
+ description: Accepted and rejected (maxclients limit) connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: accepted
+ - name: rejected
+ - name: redis.clients
+ description: Clients
+ unit: clients
+ chart_type: line
+ dimensions:
+ - name: connected
+ - name: blocked
+ - name: tracking
+ - name: in_timeout_table
+ - name: redis.ping_latency
+ description: Ping latency
+ unit: seconds
+ chart_type: area
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: redis.commands
+ description: Processed commands
+ unit: commands/s
+ chart_type: line
+ dimensions:
+ - name: processes
+ - name: redis.keyspace_lookup_hit_rate
+ description: Keys lookup hit rate
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: lookup_hit_rate
+ - name: redis.memory
+ description: Memory usage
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: max
+ - name: used
+ - name: rss
+ - name: peak
+ - name: dataset
+ - name: lua
+ - name: scripts
+ - name: redis.mem_fragmentation_ratio
+ description: Ratio between used_memory_rss and used_memory
+ unit: ratio
+ chart_type: line
+ dimensions:
+ - name: mem_fragmentation
+ - name: redis.key_eviction_events
+ description: Evicted keys due to maxmemory limit
+ unit: keys/s
+ chart_type: line
+ dimensions:
+ - name: evicted
+ - name: redis.net
+ description: Bandwidth
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: redis.rdb_changes
+ description: Operations that produced changes since the last SAVE or BGSAVE
+ unit: operations
+ chart_type: line
+ dimensions:
+ - name: changes
+ - name: redis.bgsave_now
+ description: Duration of the on-going RDB save operation if any
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: current_bgsave_time
+ - name: redis.bgsave_health
+ description: 'Status of the last RDB save operation (0: ok, 1: err)'
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: last_bgsave
+ - name: redis.bgsave_last_rdb_save_since_time
+ description: Time elapsed since the last successful RDB save
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: last_bgsave_time
+ - name: redis.aof_file_size
+ description: AOF file size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: current
+ - name: base
+ - name: redis.commands_calls
+ description: Calls per command
+ unit: calls
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per command
+ - name: redis.commands_usec
+ description: Total CPU time consumed by the commands
+ unit: microseconds
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per command
+ - name: redis.commands_usec_per_sec
+ description: Average CPU consumed per command execution
+ unit: microseconds/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per command
+ - name: redis.key_expiration_events
+ description: Expired keys
+ unit: keys/s
+ chart_type: line
+ dimensions:
+ - name: expired
+ - name: redis.database_keys
+ description: Keys per database
+ unit: keys
+ chart_type: line
+ dimensions:
+ - name: a dimension per database
+ - name: redis.database_expires_keys
+ description: Keys with an expiration per database
+ unit: keys
+ chart_type: line
+ dimensions:
+ - name: a dimension per database
+ - name: redis.connected_replicas
+ description: Connected replicas
+ unit: replicas
+ chart_type: line
+ dimensions:
+ - name: connected
+ - name: redis.master_link_status
+ description: Master link status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: up
+ - name: down
+ - name: redis.master_last_io_since_time
+ description: Time elapsed since the last interaction with master
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: redis.master_link_down_since_time
+ description: Time elapsed since the link between master and slave is down
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: redis.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
diff --git a/src/go/plugin/go.d/modules/redis/redis.go b/src/go/plugin/go.d/modules/redis/redis.go
new file mode 100644
index 000000000..954205e1e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/redis.go
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package redis
+
+import (
+ "context"
+ _ "embed"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/blang/semver/v4"
+ "github.com/go-redis/redis/v8"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("redis", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Redis {
+ return &Redis{
+ Config: Config{
+ Address: "redis://@localhost:6379",
+ Timeout: web.Duration(time.Second),
+ PingSamples: 5,
+ },
+
+ addAOFChartsOnce: &sync.Once{},
+ addReplSlaveChartsOnce: &sync.Once{},
+ pingSummary: metrics.NewSummary(),
+ collectedCommands: make(map[string]bool),
+ collectedDbs: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ Username string `yaml:"username,omitempty" json:"username"`
+ Password string `yaml:"password,omitempty" json:"password"`
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+ PingSamples int `yaml:"ping_samples" json:"ping_samples"`
+}
+
+type (
+ Redis struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addAOFChartsOnce *sync.Once
+ addReplSlaveChartsOnce *sync.Once
+
+ rdb redisClient
+
+ server string
+ version *semver.Version
+ pingSummary metrics.Summary
+ collectedCommands map[string]bool
+ collectedDbs map[string]bool
+ }
+ redisClient interface {
+ Info(ctx context.Context, section ...string) *redis.StringCmd
+ Ping(context.Context) *redis.StatusCmd
+ Close() error
+ }
+)
+
+func (r *Redis) Configuration() any {
+ return r.Config
+}
+
+func (r *Redis) Init() error {
+ err := r.validateConfig()
+ if err != nil {
+ r.Errorf("config validation: %v", err)
+ return err
+ }
+
+ rdb, err := r.initRedisClient()
+ if err != nil {
+ r.Errorf("init redis client: %v", err)
+ return err
+ }
+ r.rdb = rdb
+
+ charts, err := r.initCharts()
+ if err != nil {
+ r.Errorf("init charts: %v", err)
+ return err
+ }
+ r.charts = charts
+
+ return nil
+}
+
+func (r *Redis) Check() error {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (r *Redis) Charts() *module.Charts {
+ return r.charts
+}
+
+func (r *Redis) Collect() map[string]int64 {
+ ms, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+
+func (r *Redis) Cleanup() {
+ if r.rdb == nil {
+ return
+ }
+ err := r.rdb.Close()
+ if err != nil {
+ r.Warningf("cleanup: error on closing redis client [%s]: %v", r.Address, err)
+ }
+ r.rdb = nil
+}
diff --git a/src/go/plugin/go.d/modules/redis/redis_test.go b/src/go/plugin/go.d/modules/redis/redis_test.go
new file mode 100644
index 000000000..e295f0f97
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/redis_test.go
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package redis
+
+import (
+ "context"
+ "errors"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/go-redis/redis/v8"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataPikaInfoAll, _ = os.ReadFile("testdata/pika/info_all.txt")
+ dataVer609InfoAll, _ = os.ReadFile("testdata/v6.0.9/info_all.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataPikaInfoAll": dataPikaInfoAll,
+ "dataVer609InfoAll": dataVer609InfoAll,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestRedis_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Redis{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestRedis_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset 'address'": {
+ wantFail: true,
+ config: Config{Address: ""},
+ },
+ "fails on invalid 'address' format": {
+ wantFail: true,
+ config: Config{Address: "127.0.0.1:6379"},
+ },
+ "fails on invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ Address: "redis://127.0.0.1:6379",
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rdb := New()
+ rdb.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, rdb.Init())
+ } else {
+ assert.NoError(t, rdb.Init())
+ }
+ })
+ }
+}
+
+func TestRedis_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) *Redis
+ wantFail bool
+ }{
+ "success on valid response v6.0.9": {
+ prepare: prepareRedisV609,
+ },
+ "fails on error on Info": {
+ wantFail: true,
+ prepare: prepareRedisErrorOnInfo,
+ },
+ "fails on response from not Redis instance": {
+ wantFail: true,
+ prepare: prepareRedisWithPikaMetrics,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rdb := test.prepare(t)
+
+ if test.wantFail {
+ assert.Error(t, rdb.Check())
+ } else {
+ assert.NoError(t, rdb.Check())
+ }
+ })
+ }
+}
+
+func TestRedis_Charts(t *testing.T) {
+ rdb := New()
+ require.NoError(t, rdb.Init())
+
+ assert.NotNil(t, rdb.Charts())
+}
+
+func TestRedis_Cleanup(t *testing.T) {
+ rdb := New()
+ assert.NotPanics(t, rdb.Cleanup)
+
+ require.NoError(t, rdb.Init())
+ m := &mockRedisClient{}
+ rdb.rdb = m
+
+ rdb.Cleanup()
+
+ assert.True(t, m.calledClose)
+}
+
+func TestRedis_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) *Redis
+ wantCollected map[string]int64
+ }{
+ "success on valid response v6.0.9": {
+ prepare: prepareRedisV609,
+ wantCollected: map[string]int64{
+ "active_defrag_hits": 0,
+ "active_defrag_key_hits": 0,
+ "active_defrag_key_misses": 0,
+ "active_defrag_misses": 0,
+ "active_defrag_running": 0,
+ "allocator_active": 1208320,
+ "allocator_allocated": 903408,
+ "allocator_frag_bytes": 304912,
+ "allocator_frag_ratio": 1340,
+ "allocator_resident": 3723264,
+ "allocator_rss_bytes": 2514944,
+ "allocator_rss_ratio": 3080,
+ "aof_base_size": 116,
+ "aof_buffer_length": 0,
+ "aof_current_rewrite_time_sec": -1,
+ "aof_current_size": 294,
+ "aof_delayed_fsync": 0,
+ "aof_enabled": 0,
+ "aof_last_cow_size": 0,
+ "aof_last_rewrite_time_sec": -1,
+ "aof_pending_bio_fsync": 0,
+ "aof_pending_rewrite": 0,
+ "aof_rewrite_buffer_length": 0,
+ "aof_rewrite_in_progress": 0,
+ "aof_rewrite_scheduled": 0,
+ "arch_bits": 64,
+ "blocked_clients": 0,
+ "client_recent_max_input_buffer": 8,
+ "client_recent_max_output_buffer": 0,
+ "clients_in_timeout_table": 0,
+ "cluster_enabled": 0,
+ "cmd_command_calls": 2,
+ "cmd_command_usec": 2182,
+ "cmd_command_usec_per_call": 1091000,
+ "cmd_get_calls": 2,
+ "cmd_get_usec": 29,
+ "cmd_get_usec_per_call": 14500,
+ "cmd_hello_calls": 1,
+ "cmd_hello_usec": 15,
+ "cmd_hello_usec_per_call": 15000,
+ "cmd_hmset_calls": 2,
+ "cmd_hmset_usec": 408,
+ "cmd_hmset_usec_per_call": 204000,
+ "cmd_info_calls": 132,
+ "cmd_info_usec": 37296,
+ "cmd_info_usec_per_call": 282550,
+ "cmd_ping_calls": 19,
+ "cmd_ping_usec": 286,
+ "cmd_ping_usec_per_call": 15050,
+ "cmd_set_calls": 3,
+ "cmd_set_usec": 140,
+ "cmd_set_usec_per_call": 46670,
+ "configured_hz": 10,
+ "connected_clients": 1,
+ "connected_slaves": 0,
+ "db0_expires_keys": 0,
+ "db0_keys": 4,
+ "evicted_keys": 0,
+ "expire_cycle_cpu_milliseconds": 28362,
+ "expired_keys": 0,
+ "expired_stale_perc": 0,
+ "expired_time_cap_reached_count": 0,
+ "hz": 10,
+ "instantaneous_input_kbps": 0,
+ "instantaneous_ops_per_sec": 0,
+ "instantaneous_output_kbps": 0,
+ "io_threaded_reads_processed": 0,
+ "io_threaded_writes_processed": 0,
+ "io_threads_active": 0,
+ "keyspace_hit_rate": 100000,
+ "keyspace_hits": 2,
+ "keyspace_misses": 0,
+ "latest_fork_usec": 810,
+ "lazyfree_pending_objects": 0,
+ "loading": 0,
+ "lru_clock": 13181377,
+ "master_repl_offset": 0,
+ "master_replid2": 0,
+ "maxmemory": 0,
+ "mem_aof_buffer": 0,
+ "mem_clients_normal": 0,
+ "mem_clients_slaves": 0,
+ "mem_fragmentation_bytes": 3185848,
+ "mem_fragmentation_ratio": 4960,
+ "mem_not_counted_for_evict": 0,
+ "mem_replication_backlog": 0,
+ "migrate_cached_sockets": 0,
+ "module_fork_in_progress": 0,
+ "module_fork_last_cow_size": 0,
+ "number_of_cached_scripts": 0,
+ "ping_latency_avg": 0,
+ "ping_latency_count": 5,
+ "ping_latency_max": 0,
+ "ping_latency_min": 0,
+ "ping_latency_sum": 0,
+ "process_id": 1,
+ "pubsub_channels": 0,
+ "pubsub_patterns": 0,
+ "rdb_bgsave_in_progress": 0,
+ "rdb_changes_since_last_save": 0,
+ "rdb_current_bgsave_time_sec": 0,
+ "rdb_last_bgsave_status": 0,
+ "rdb_last_bgsave_time_sec": 0,
+ "rdb_last_cow_size": 290816,
+ "rdb_last_save_time": 56978305,
+ "redis_git_dirty": 0,
+ "redis_git_sha1": 0,
+ "rejected_connections": 0,
+ "repl_backlog_active": 0,
+ "repl_backlog_first_byte_offset": 0,
+ "repl_backlog_histlen": 0,
+ "repl_backlog_size": 1048576,
+ "rss_overhead_bytes": 266240,
+ "rss_overhead_ratio": 1070,
+ "second_repl_offset": -1,
+ "slave_expires_tracked_keys": 0,
+ "sync_full": 0,
+ "sync_partial_err": 0,
+ "sync_partial_ok": 0,
+ "tcp_port": 6379,
+ "total_commands_processed": 161,
+ "total_connections_received": 87,
+ "total_net_input_bytes": 2301,
+ "total_net_output_bytes": 507187,
+ "total_reads_processed": 250,
+ "total_system_memory": 2084032512,
+ "total_writes_processed": 163,
+ "tracking_clients": 0,
+ "tracking_total_items": 0,
+ "tracking_total_keys": 0,
+ "tracking_total_prefixes": 0,
+ "unexpected_error_replies": 0,
+ "uptime_in_days": 2,
+ "uptime_in_seconds": 252812,
+ "used_cpu_sys": 630829,
+ "used_cpu_sys_children": 20,
+ "used_cpu_user": 188394,
+ "used_cpu_user_children": 2,
+ "used_memory": 867160,
+ "used_memory_dataset": 63816,
+ "used_memory_lua": 37888,
+ "used_memory_overhead": 803344,
+ "used_memory_peak": 923360,
+ "used_memory_rss": 3989504,
+ "used_memory_scripts": 0,
+ "used_memory_startup": 803152,
+ },
+ },
+ "fails on error on Info": {
+ prepare: prepareRedisErrorOnInfo,
+ },
+ "fails on response from not Redis instance": {
+ prepare: prepareRedisWithPikaMetrics,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rdb := test.prepare(t)
+
+ ms := rdb.Collect()
+
+ copyTimeRelatedMetrics(ms, test.wantCollected)
+
+ assert.Equal(t, test.wantCollected, ms)
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, rdb, ms)
+ ensureCollectedCommandsAddedToCharts(t, rdb)
+ ensureCollectedDbsAddedToCharts(t, rdb)
+ }
+ })
+ }
+}
+
+func prepareRedisV609(t *testing.T) *Redis {
+ rdb := New()
+ require.NoError(t, rdb.Init())
+ rdb.rdb = &mockRedisClient{
+ result: dataVer609InfoAll,
+ }
+ return rdb
+}
+
+func prepareRedisErrorOnInfo(t *testing.T) *Redis {
+ rdb := New()
+ require.NoError(t, rdb.Init())
+ rdb.rdb = &mockRedisClient{
+ errOnInfo: true,
+ }
+ return rdb
+}
+
+func prepareRedisWithPikaMetrics(t *testing.T) *Redis {
+ rdb := New()
+ require.NoError(t, rdb.Init())
+ rdb.rdb = &mockRedisClient{
+ result: dataPikaInfoAll,
+ }
+ return rdb
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, rdb *Redis, ms map[string]int64) {
+ for _, chart := range *rdb.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := ms[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := ms[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
+
+func ensureCollectedCommandsAddedToCharts(t *testing.T, rdb *Redis) {
+ for _, id := range []string{
+ chartCommandsCalls.ID,
+ chartCommandsUsec.ID,
+ chartCommandsUsecPerSec.ID,
+ } {
+ chart := rdb.Charts().Get(id)
+ require.NotNilf(t, chart, "'%s' chart is not in charts", id)
+ assert.Lenf(t, chart.Dims, len(rdb.collectedCommands),
+ "'%s' chart unexpected number of dimensions", id)
+ }
+}
+
+func ensureCollectedDbsAddedToCharts(t *testing.T, rdb *Redis) {
+ for _, id := range []string{
+ chartKeys.ID,
+ chartExpiresKeys.ID,
+ } {
+ chart := rdb.Charts().Get(id)
+ require.NotNilf(t, chart, "'%s' chart is not in charts", id)
+ assert.Lenf(t, chart.Dims, len(rdb.collectedDbs),
+ "'%s' chart unexpected number of dimensions", id)
+ }
+}
+
+func copyTimeRelatedMetrics(dst, src map[string]int64) {
+ for k, v := range src {
+ switch {
+ case k == "rdb_last_save_time",
+ strings.HasPrefix(k, "ping_latency"):
+
+ if _, ok := dst[k]; ok {
+ dst[k] = v
+ }
+ }
+ }
+}
+
+type mockRedisClient struct {
+ errOnInfo bool
+ result []byte
+ calledClose bool
+}
+
+func (m *mockRedisClient) Info(_ context.Context, _ ...string) (cmd *redis.StringCmd) {
+ if m.errOnInfo {
+ cmd = redis.NewStringResult("", errors.New("error on Info"))
+ } else {
+ cmd = redis.NewStringResult(string(m.result), nil)
+ }
+ return cmd
+}
+
+func (m *mockRedisClient) Ping(_ context.Context) (cmd *redis.StatusCmd) {
+ return redis.NewStatusResult("PONG", nil)
+}
+
+func (m *mockRedisClient) Close() error {
+ m.calledClose = true
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/redis/testdata/config.json b/src/go/plugin/go.d/modules/redis/testdata/config.json
new file mode 100644
index 000000000..050cfa3f4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/testdata/config.json
@@ -0,0 +1,12 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "username": "ok",
+ "password": "ok",
+ "ping_samples": 123,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/redis/testdata/config.yaml b/src/go/plugin/go.d/modules/redis/testdata/config.yaml
new file mode 100644
index 000000000..57c5cf7ea
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/testdata/config.yaml
@@ -0,0 +1,10 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+username: "ok"
+password: "ok"
+ping_samples: 123
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/redis/testdata/pika/info_all.txt b/src/go/plugin/go.d/modules/redis/testdata/pika/info_all.txt
new file mode 100644
index 000000000..a2bebf720
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/testdata/pika/info_all.txt
@@ -0,0 +1,67 @@
+$1315
+# Server
+pika_version:3.4.0
+pika_git_sha:bd30511bf82038c2c6531b3d84872c9825fe836a
+pika_build_compile_date: Dec 1 2020
+os:Linux 5.4.39-linuxkit x86_64
+arch_bits:64
+process_id:1
+tcp_port:9221
+thread_num:1
+sync_thread_num:6
+uptime_in_seconds:72089
+uptime_in_days:2
+config_file:/pika/conf/pika.conf
+server_id:1
+
+# Data
+db_size:473558
+db_size_human:0M
+log_size:4272095
+log_size_human:4M
+compression:snappy
+used_memory:8430
+used_memory_human:0M
+db_memtable_usage:8304
+db_tablereader_usage:126
+db_fatal:0
+db_fatal_msg:NULL
+
+# Clients
+connected_clients:1
+
+# Stats
+total_connections_received:14
+instantaneous_ops_per_sec:0
+total_commands_processed:14
+is_bgsaving:No
+is_scaning_keyspace:No
+is_compact:No
+compact_cron:
+compact_interval:
+
+# Command_Exec_Count
+INFO:9
+GET:2
+SET:1
+HGETALL:1
+HMSET:1
+
+# CPU
+used_cpu_sys:3638.63
+used_cpu_user:494.58
+used_cpu_sys_children:0.04
+used_cpu_user_children:0.02
+
+# Replication(MASTER)
+role:master
+connected_slaves:0
+db0 binlog_offset=0 440,safety_purge=none
+
+# Keyspace
+# Time:1970-01-01 08:00:00
+db0 Strings_keys=0, expires=0, invalid_keys=0
+db0 Hashes_keys=0, expires=0, invalid_keys=0
+db0 Lists_keys=0, expires=0, invalid_keys=0
+db0 Zsets_keys=0, expires=0, invalid_keys=0
+db0 Sets_keys=0, expires=0, invalid_keys=0
diff --git a/src/go/plugin/go.d/modules/redis/testdata/v6.0.9/info_all.txt b/src/go/plugin/go.d/modules/redis/testdata/v6.0.9/info_all.txt
new file mode 100644
index 000000000..9f1618982
--- /dev/null
+++ b/src/go/plugin/go.d/modules/redis/testdata/v6.0.9/info_all.txt
@@ -0,0 +1,172 @@
+$4050
+# Server
+redis_version:6.0.9
+redis_git_sha1:00000000
+redis_git_dirty:0
+redis_build_id:12c354e6793cb936
+redis_mode:standalone
+os:Linux 5.4.39-linuxkit x86_64
+arch_bits:64
+multiplexing_api:epoll
+atomicvar_api:atomic-builtin
+gcc_version:8.3.0
+process_id:1
+run_id:5d97fd948bbf6cb68458685fc747f9f9019c3fc4
+tcp_port:6379
+uptime_in_seconds:252812
+uptime_in_days:2
+hz:10
+configured_hz:10
+lru_clock:13181377
+executable:/data/redis-server
+config_file:
+io_threads_active:0
+
+# Clients
+connected_clients:1
+client_recent_max_input_buffer:8
+client_recent_max_output_buffer:0
+blocked_clients:0
+tracking_clients:0
+clients_in_timeout_table:0
+
+# Memory
+used_memory:867160
+used_memory_human:846.84K
+used_memory_rss:3989504
+used_memory_rss_human:3.80M
+used_memory_peak:923360
+used_memory_peak_human:901.72K
+used_memory_peak_perc:93.91%
+used_memory_overhead:803344
+used_memory_startup:803152
+used_memory_dataset:63816
+used_memory_dataset_perc:99.70%
+allocator_allocated:903408
+allocator_active:1208320
+allocator_resident:3723264
+total_system_memory:2084032512
+total_system_memory_human:1.94G
+used_memory_lua:37888
+used_memory_lua_human:37.00K
+used_memory_scripts:0
+used_memory_scripts_human:0B
+number_of_cached_scripts:0
+maxmemory:0
+maxmemory_human:0B
+maxmemory_policy:noeviction
+allocator_frag_ratio:1.34
+allocator_frag_bytes:304912
+allocator_rss_ratio:3.08
+allocator_rss_bytes:2514944
+rss_overhead_ratio:1.07
+rss_overhead_bytes:266240
+mem_fragmentation_ratio:4.96
+mem_fragmentation_bytes:3185848
+mem_not_counted_for_evict:0
+mem_replication_backlog:0
+mem_clients_slaves:0
+mem_clients_normal:0
+mem_aof_buffer:0
+mem_allocator:jemalloc-5.1.0
+active_defrag_running:0
+lazyfree_pending_objects:0
+
+# Persistence
+loading:0
+rdb_changes_since_last_save:0
+rdb_bgsave_in_progress:0
+rdb_last_save_time:1606951667
+rdb_last_bgsave_status:ok
+rdb_last_bgsave_time_sec:0
+rdb_current_bgsave_time_sec:-1
+rdb_last_cow_size:290816
+aof_enabled:0
+aof_rewrite_in_progress:0
+aof_rewrite_scheduled:0
+aof_last_rewrite_time_sec:-1
+aof_current_rewrite_time_sec:-1
+aof_last_bgrewrite_status:ok
+aof_last_write_status:ok
+aof_last_cow_size:0
+module_fork_in_progress:0
+module_fork_last_cow_size:0
+aof_current_size:294
+aof_base_size:116
+aof_pending_rewrite:0
+aof_buffer_length:0
+aof_rewrite_buffer_length:0
+aof_pending_bio_fsync:0
+aof_delayed_fsync:0
+
+# Stats
+total_connections_received:87
+total_commands_processed:161
+instantaneous_ops_per_sec:0
+total_net_input_bytes:2301
+total_net_output_bytes:507187
+instantaneous_input_kbps:0.00
+instantaneous_output_kbps:0.00
+rejected_connections:0
+sync_full:0
+sync_partial_ok:0
+sync_partial_err:0
+expired_keys:0
+expired_stale_perc:0.00
+expired_time_cap_reached_count:0
+expire_cycle_cpu_milliseconds:28362
+evicted_keys:0
+keyspace_hits:2
+keyspace_misses:0
+pubsub_channels:0
+pubsub_patterns:0
+latest_fork_usec:810
+migrate_cached_sockets:0
+slave_expires_tracked_keys:0
+active_defrag_hits:0
+active_defrag_misses:0
+active_defrag_key_hits:0
+active_defrag_key_misses:0
+tracking_total_keys:0
+tracking_total_items:0
+tracking_total_prefixes:0
+unexpected_error_replies:0
+total_reads_processed:250
+total_writes_processed:163
+io_threaded_reads_processed:0
+io_threaded_writes_processed:0
+
+# Replication
+role:master
+connected_slaves:0
+master_replid:3f0ad529c9c59a17834bde8ae85f09f77609ecb1
+master_replid2:0000000000000000000000000000000000000000
+master_repl_offset:0
+second_repl_offset:-1
+repl_backlog_active:0
+repl_backlog_size:1048576
+repl_backlog_first_byte_offset:0
+repl_backlog_histlen:0
+
+# CPU
+used_cpu_sys:630.829091
+used_cpu_user:188.394908
+used_cpu_sys_children:0.020626
+used_cpu_user_children:0.002731
+
+# Modules
+
+# Commandstats
+cmdstat_set:calls=3,usec=140,usec_per_call=46.67
+cmdstat_command:calls=2,usec=2182,usec_per_call=1091.00
+cmdstat_get:calls=2,usec=29,usec_per_call=14.50
+cmdstat_hmset:calls=2,usec=408,usec_per_call=204.00
+cmdstat_hello:calls=1,usec=15,usec_per_call=15.00
+cmdstat_ping:calls=19,usec=286,usec_per_call=15.05
+cmdstat_info:calls=132,usec=37296,usec_per_call=282.55
+
+# Cluster
+cluster_enabled:0
+
+# Keyspace
+db0:keys=4,expires=0,avg_ttl=0
diff --git a/src/go/plugin/go.d/modules/rethinkdb/README.md b/src/go/plugin/go.d/modules/rethinkdb/README.md
new file mode 120000
index 000000000..78ddcfa18
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/README.md
@@ -0,0 +1 @@
+integrations/rethinkdb.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/rethinkdb/charts.go b/src/go/plugin/go.d/modules/rethinkdb/charts.go
new file mode 100644
index 000000000..989a8c1e9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/charts.go
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rethinkdb
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioClusterServersStatsRequest = module.Priority + iota
+ prioClusterClientConnections
+ prioClusterActiveClients
+ prioClusterQueries
+ prioClusterDocuments
+
+ prioServerStatsRequestStatus
+ prioServerClientConnections
+ prioServerActiveClients
+ prioServerQueries
+ prioServerDocuments
+)
+
+var clusterCharts = module.Charts{
+ clusterServersStatsRequestChart.Copy(),
+ clusterClientConnectionsChart.Copy(),
+ clusterActiveClientsChart.Copy(),
+ clusterQueriesChart.Copy(),
+ clusterDocumentsChart.Copy(),
+}
+
+var (
+ clusterServersStatsRequestChart = module.Chart{
+ ID: "cluster_cluster_servers_stats_request",
+ Title: "Cluster Servers Stats Request",
+ Units: "servers",
+ Fam: "servers",
+ Ctx: "rethinkdb.cluster_servers_stats_request",
+ Priority: prioClusterServersStatsRequest,
+ Dims: module.Dims{
+ {ID: "cluster_servers_stats_request_success", Name: "success"},
+ {ID: "cluster_servers_stats_request_timeout", Name: "timeout"},
+ },
+ }
+ clusterClientConnectionsChart = module.Chart{
+ ID: "cluster_client_connections",
+ Title: "Cluster Client Connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "rethinkdb.cluster_client_connections",
+ Priority: prioClusterClientConnections,
+ Dims: module.Dims{
+ {ID: "cluster_client_connections", Name: "connections"},
+ },
+ }
+ clusterActiveClientsChart = module.Chart{
+ ID: "cluster_active_clients",
+ Title: "Cluster Active Clients",
+ Units: "clients",
+ Fam: "clients",
+ Ctx: "rethinkdb.cluster_active_clients",
+ Priority: prioClusterActiveClients,
+ Dims: module.Dims{
+ {ID: "cluster_clients_active", Name: "active"},
+ },
+ }
+ clusterQueriesChart = module.Chart{
+ ID: "cluster_queries",
+ Title: "Cluster Queries",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "rethinkdb.cluster_queries",
+ Priority: prioClusterQueries,
+ Dims: module.Dims{
+ {ID: "cluster_queries_total", Name: "queries", Algo: module.Incremental},
+ },
+ }
+ clusterDocumentsChart = module.Chart{
+ ID: "cluster_documents",
+ Title: "Cluster Documents",
+ Units: "documents/s",
+ Fam: "documents",
+ Ctx: "rethinkdb.cluster_documents",
+ Priority: prioClusterDocuments,
+ Dims: module.Dims{
+ {ID: "cluster_read_docs_total", Name: "read", Algo: module.Incremental},
+ {ID: "cluster_written_docs_total", Name: "written", Mul: -1, Algo: module.Incremental},
+ },
+ }
+)
+
+var serverChartsTmpl = module.Charts{
+ serverStatsRequestStatusChartTmpl.Copy(),
+ serverConnectionsChartTmpl.Copy(),
+ serverActiveClientsChartTmpl.Copy(),
+ serverQueriesChartTmpl.Copy(),
+ serverDocumentsChartTmpl.Copy(),
+}
+
+var (
+ serverStatsRequestStatusChartTmpl = module.Chart{
+ ID: "server_%s_stats_request_status",
+ Title: "Server Stats Request Status",
+ Units: "status",
+ Fam: "srv status",
+ Ctx: "rethinkdb.server_stats_request_status",
+ Priority: prioServerStatsRequestStatus,
+ Dims: module.Dims{
+ {ID: "server_%s_stats_request_status_success", Name: "success"},
+ {ID: "server_%s_stats_request_status_timeout", Name: "timeout"},
+ },
+ }
+ serverConnectionsChartTmpl = module.Chart{
+ ID: "server_%s_client_connections",
+ Title: "Server Client Connections",
+ Units: "connections",
+ Fam: "srv connections",
+ Ctx: "rethinkdb.server_client_connections",
+ Priority: prioServerClientConnections,
+ Dims: module.Dims{
+ {ID: "server_%s_client_connections", Name: "connections"},
+ },
+ }
+ serverActiveClientsChartTmpl = module.Chart{
+ ID: "server_%s_active_clients",
+ Title: "Server Active Clients",
+ Units: "clients",
+ Fam: "srv clients",
+ Ctx: "rethinkdb.server_active_clients",
+ Priority: prioServerActiveClients,
+ Dims: module.Dims{
+ {ID: "server_%s_clients_active", Name: "active"},
+ },
+ }
+ serverQueriesChartTmpl = module.Chart{
+ ID: "server_%s_queries",
+ Title: "Server Queries",
+ Units: "queries/s",
+ Fam: "srv queries",
+ Ctx: "rethinkdb.server_queries",
+ Priority: prioServerQueries,
+ Dims: module.Dims{
+ {ID: "server_%s_queries_total", Name: "queries", Algo: module.Incremental},
+ },
+ }
+ serverDocumentsChartTmpl = module.Chart{
+ ID: "server_%s_documents",
+ Title: "Server Documents",
+ Units: "documents/s",
+ Fam: "srv documents",
+ Ctx: "rethinkdb.server_documents",
+ Priority: prioServerDocuments,
+ Dims: module.Dims{
+ {ID: "server_%s_read_docs_total", Name: "read", Algo: module.Incremental},
+ {ID: "server_%s_written_docs_total", Name: "written", Mul: -1, Algo: module.Incremental},
+ },
+ }
+)
+
+func (r *Rethinkdb) addServerCharts(srvUUID, srvName string) {
+ charts := serverChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, srvUUID)
+ chart.Labels = []module.Label{
+ {Key: "sever_uuid", Value: srvUUID},
+ {Key: "sever_name", Value: srvName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, srvUUID)
+ }
+ }
+
+ if err := r.Charts().Add(*charts...); err != nil {
+ r.Warningf("failed to add chart for '%s' server: %v", srvName, err)
+ }
+}
+
+func (r *Rethinkdb) removeServerCharts(srvUUID string) {
+ px := fmt.Sprintf("server_%s_", srvUUID)
+ for _, chart := range *r.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rethinkdb/client.go b/src/go/plugin/go.d/modules/rethinkdb/client.go
new file mode 100644
index 000000000..d790d5439
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/client.go
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rethinkdb
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "gopkg.in/rethinkdb/rethinkdb-go.v6"
+)
+
+type rdbConn interface {
+ stats() ([][]byte, error)
+ close() error
+}
+
+func newRethinkdbConn(cfg Config) (rdbConn, error) {
+ sess, err := rethinkdb.Connect(rethinkdb.ConnectOpts{
+ Address: cfg.Address,
+ Username: cfg.Username,
+ Password: cfg.Password,
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ client := &rethinkdbClient{
+ timeout: cfg.Timeout.Duration(),
+ sess: sess,
+ }
+
+ return client, nil
+}
+
+type rethinkdbClient struct {
+ timeout time.Duration
+
+ sess *rethinkdb.Session
+}
+
+func (c *rethinkdbClient) stats() ([][]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), c.timeout)
+ defer cancel()
+
+ opts := rethinkdb.RunOpts{Context: ctx}
+
+ cur, err := rethinkdb.DB("rethinkdb").Table("stats").Run(c.sess, opts)
+ if err != nil {
+ return nil, err
+ }
+
+ if cur.IsNil() {
+ return nil, errors.New("no stats found (cursor is nil)")
+ }
+ defer func() { _ = cur.Close() }()
+
+ var stats [][]byte
+ for {
+ bs, ok := cur.NextResponse()
+ if !ok {
+ break
+ }
+ stats = append(stats, bs)
+ }
+
+ return stats, nil
+}
+
+func (c *rethinkdbClient) close() (err error) {
+ return c.sess.Close()
+}
diff --git a/src/go/plugin/go.d/modules/rethinkdb/collect.go b/src/go/plugin/go.d/modules/rethinkdb/collect.go
new file mode 100644
index 000000000..6c2bc32c9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/collect.go
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rethinkdb
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+type (
+ // https://rethinkdb.com/docs/system-stats/
+ serverStats struct {
+ ID []string `json:"id"`
+ Server string `json:"server"`
+ QueryEngine struct {
+ ClientConnections int64 `json:"client_connections" stm:"client_connections"`
+ ClientsActive int64 `json:"clients_active" stm:"clients_active"`
+ QueriesTotal int64 `json:"queries_total" stm:"queries_total"`
+ ReadDocsTotal int64 `json:"read_docs_total" stm:"read_docs_total"`
+ WrittenDocsTotal int64 `json:"written_docs_total" stm:"written_docs_total"`
+ } `json:"query_engine" stm:""`
+
+ Error string `json:"error"`
+ }
+)
+
+func (r *Rethinkdb) collect() (map[string]int64, error) {
+ if r.rdb == nil {
+ conn, err := r.newConn(r.Config)
+ if err != nil {
+ return nil, err
+ }
+ r.rdb = conn
+ }
+
+ mx := make(map[string]int64)
+
+ if err := r.collectStats(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (r *Rethinkdb) collectStats(mx map[string]int64) error {
+ resp, err := r.rdb.stats()
+ if err != nil {
+ return err
+ }
+
+ if len(resp) == 0 {
+ return errors.New("empty stats response from server")
+ }
+
+ for _, v := range []string{
+ "cluster_servers_stats_request_success",
+ "cluster_servers_stats_request_timeout",
+ "cluster_client_connections",
+ "cluster_clients_active",
+ "cluster_queries_total",
+ "cluster_read_docs_total",
+ "cluster_written_docs_total",
+ } {
+ mx[v] = 0
+ }
+
+ seen := make(map[string]bool)
+
+ for _, bs := range resp[1:] { // skip cluster
+ var srv serverStats
+
+ if err := json.Unmarshal(bs, &srv); err != nil {
+ return fmt.Errorf("invalid stats response: failed to unmarshal server data: %v", err)
+ }
+ if len(srv.ID[0]) == 0 {
+ return errors.New("invalid stats response: empty id")
+ }
+ if srv.ID[0] != "server" {
+ continue
+ }
+ if len(srv.ID) != 2 {
+ return fmt.Errorf("invalid stats response: unexpected server id: '%v'", srv.ID)
+ }
+
+ srvUUID := srv.ID[1]
+
+ seen[srvUUID] = true
+
+ if !r.seenServers[srvUUID] {
+ r.seenServers[srvUUID] = true
+ r.addServerCharts(srvUUID, srv.Server)
+ }
+
+ px := fmt.Sprintf("server_%s_", srv.ID[1]) // uuid
+
+ mx[px+"stats_request_status_success"] = 0
+ mx[px+"stats_request_status_timeout"] = 0
+ if srv.Error != "" {
+ mx["cluster_servers_stats_request_timeout"]++
+ mx[px+"stats_request_status_timeout"] = 1
+ continue
+ }
+ mx["cluster_servers_stats_request_success"]++
+ mx[px+"stats_request_status_success"] = 1
+
+ for k, v := range stm.ToMap(srv.QueryEngine) {
+ mx["cluster_"+k] += v
+ mx[px+k] = v
+ }
+ }
+
+ for k := range r.seenServers {
+ if !seen[k] {
+ delete(r.seenServers, k)
+ r.removeServerCharts(k)
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/rethinkdb/config_schema.json b/src/go/plugin/go.d/modules/rethinkdb/config_schema.json
new file mode 100644
index 000000000..9a84aeca4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/config_schema.json
@@ -0,0 +1,82 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "RethinkDB collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the RethinkDB service listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:28015"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rethinkdb/integrations/rethinkdb.md b/src/go/plugin/go.d/modules/rethinkdb/integrations/rethinkdb.md
new file mode 100644
index 000000000..3cc116e40
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/integrations/rethinkdb.md
@@ -0,0 +1,257 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/rethinkdb/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/rethinkdb/metadata.yaml"
+sidebar_label: "RethinkDB"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# RethinkDB
+
+
+<img src="https://netdata.cloud/img/rethinkdb.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: rethinkdb
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+It collects cluster-wide metrics such as server status, client connections, active clients, query rate, and document read/write rates.
+For each server, it offers similar metrics.
+
+
+The data is gathered by querying the stats table in RethinkDB, which stores real-time statistics related to the cluster and its individual servers.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If no configuration is given, collector will attempt to connect to RethinkDB instance on `127.0.0.1:28015` address.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per RethinkDB instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| rethinkdb.cluster_servers_stats_request | success, timeout | servers |
+| rethinkdb.cluster_client_connections | connections | connections |
+| rethinkdb.cluster_active_clients | active | clients |
+| rethinkdb.cluster_queries | queries | queries/s |
+| rethinkdb.cluster_documents | read, written | documents/s |
+
+### Per server
+
+These metrics refer to the server (cluster member).
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| server_uuid | Server UUID. |
+| server_name | Server name. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| rethinkdb.server_stats_request_status | success, timeout | status |
+| rethinkdb.server_client_connections | connections | connections |
+| rethinkdb.server_active_clients | active | clients |
+| rethinkdb.server_queries | queries | queries/s |
+| rethinkdb.server_documents | read, written | documents/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/rethinkdb.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/rethinkdb.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | The IP address and port where the RethinkDB service listens for connections. | 127.0.0.1:28015 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
+| username | Username used for authentication. | | no |
+| password | Password used for authentication. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:28015
+
+```
+</details>
+
+##### With authentication
+
+An example configuration with authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:28015
+ username: name
+ password: pass
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:28015
+
+ - name: remote
+ address: 203.0.113.0:28015
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `rethinkdb` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m rethinkdb
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `rethinkdb` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep rethinkdb
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep rethinkdb /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep rethinkdb
+```
+
+
diff --git a/src/go/plugin/go.d/modules/rethinkdb/metadata.yaml b/src/go/plugin/go.d/modules/rethinkdb/metadata.yaml
new file mode 100644
index 000000000..057d71a06
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/metadata.yaml
@@ -0,0 +1,198 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-rethinkdb
+ plugin_name: go.d.plugin
+ module_name: rethinkdb
+ monitored_instance:
+ name: RethinkDB
+ link: https://rethinkdb.com
+ categories:
+ - data-collection.database-servers
+ icon_filename: "rethinkdb.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - rethinkdb
+ - database
+ - db
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ It collects cluster-wide metrics such as server status, client connections, active clients, query rate, and document read/write rates.
+ For each server, it offers similar metrics.
+ method_description: |
+ The data is gathered by querying the stats table in RethinkDB, which stores real-time statistics related to the cluster and its individual servers.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ If no configuration is given, collector will attempt to connect to RethinkDB instance on `127.0.0.1:28015` address.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/rethinkdb.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: The IP address and port where the RethinkDB service listens for connections.
+ default_value: 127.0.0.1:28015
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username used for authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password used for authentication.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:28015
+ - name: With authentication
+ description: An example configuration with authentication.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:28015
+ username: name
+ password: pass
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:28015
+
+ - name: remote
+ address: 203.0.113.0:28015
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: rethinkdb.cluster_servers_stats_request
+ description: Cluster Servers Stats Request
+ unit: "servers"
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: timeout
+ - name: rethinkdb.cluster_client_connections
+ description: Cluster Client Connections
+ unit: "connections"
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: rethinkdb.cluster_active_clients
+ description: Cluster Active Clients
+ unit: "clients"
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: rethinkdb.cluster_queries
+ description: Cluster Queries
+ unit: "queries/s"
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: rethinkdb.cluster_documents
+ description: Cluster Documents
+ unit: "documents/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: written
+ - name: server
+ description: "These metrics refer to the server (cluster member)."
+ labels:
+ - name: server_uuid
+ description: Server UUID.
+ - name: server_name
+ description: Server name.
+ metrics:
+ - name: rethinkdb.server_stats_request_status
+ description: Server Stats Request Status
+ unit: "status"
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: timeout
+ - name: rethinkdb.server_client_connections
+ description: Server Client Connections
+ unit: "connections"
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: rethinkdb.server_active_clients
+ description: Server Active Clients
+ unit: "clients"
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: rethinkdb.server_queries
+ description: Server Queries
+ unit: "queries/s"
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: rethinkdb.server_documents
+ description: Server Documents
+ unit: "documents/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: written
diff --git a/src/go/plugin/go.d/modules/rethinkdb/rethinkdb.go b/src/go/plugin/go.d/modules/rethinkdb/rethinkdb.go
new file mode 100644
index 000000000..ccde593de
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/rethinkdb.go
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rethinkdb
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("rethinkdb", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Rethinkdb {
+ return &Rethinkdb{
+ Config: Config{
+ Address: "127.0.0.1:28015",
+ Timeout: web.Duration(time.Second * 1),
+ },
+
+ charts: clusterCharts.Copy(),
+ newConn: newRethinkdbConn,
+ seenServers: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ Username string `yaml:"username,omitempty" json:"username"`
+ Password string `yaml:"password,omitempty" json:"password"`
+}
+
+type (
+ Rethinkdb struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ newConn func(cfg Config) (rdbConn, error)
+ rdb rdbConn
+
+ seenServers map[string]bool
+ }
+)
+
+func (r *Rethinkdb) Configuration() any {
+ return r.Config
+}
+
+func (r *Rethinkdb) Init() error {
+ if r.Address == "" {
+ r.Error("address is not set")
+ return errors.New("address is not set")
+ }
+ return nil
+}
+
+func (r *Rethinkdb) Check() error {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (r *Rethinkdb) Charts() *module.Charts {
+ return r.charts
+}
+
+func (r *Rethinkdb) Collect() map[string]int64 {
+ ms, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+
+func (r *Rethinkdb) Cleanup() {
+ if r.rdb != nil {
+ if err := r.rdb.close(); err != nil {
+ r.Warningf("cleanup: error on closing client [%s]: %v", r.Address, err)
+ }
+ r.rdb = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rethinkdb/rethinkdb_test.go b/src/go/plugin/go.d/modules/rethinkdb/rethinkdb_test.go
new file mode 100644
index 000000000..f23c49747
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/rethinkdb_test.go
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rethinkdb
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStats, _ = os.ReadFile("testdata/v2.4.4/stats.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataStats": dataStats,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestRethinkdb_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Rethinkdb{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestRethinkdb_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success with default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if address not set": {
+ wantFail: true,
+ config: func() Config {
+ conf := New().Config
+ conf.Address = ""
+ return conf
+ }(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rdb := New()
+ rdb.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, rdb.Init())
+ } else {
+ assert.NoError(t, rdb.Init())
+ }
+ })
+ }
+}
+
+func TestRethinkdb_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Rethinkdb
+ }{
+ "not initialized": {
+ prepare: func() *Rethinkdb {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Rethinkdb {
+ rdb := New()
+ rdb.newConn = func(config Config) (rdbConn, error) {
+ return &mockRethinkdbConn{dataStats: dataStats}, nil
+ }
+ _ = rdb.Check()
+ return rdb
+ },
+ },
+ "after collect": {
+ prepare: func() *Rethinkdb {
+ rdb := New()
+ rdb.newConn = func(config Config) (rdbConn, error) {
+ return &mockRethinkdbConn{dataStats: dataStats}, nil
+ }
+ _ = rdb.Check()
+ return rdb
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rdb := test.prepare()
+
+ assert.NotPanics(t, rdb.Cleanup)
+ })
+ }
+}
+
+func TestRethinkdb_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Rethinkdb
+ wantFail bool
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: prepareCaseOk,
+ },
+ "fails if error on stats": {
+ wantFail: true,
+ prepare: prepareCaseErrOnStats,
+ },
+ "fails if error on connect": {
+ wantFail: true,
+ prepare: prepareCaseErrOnConnect,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rdb := test.prepare()
+
+ if test.wantFail {
+ assert.Error(t, rdb.Check())
+ } else {
+ assert.NoError(t, rdb.Check())
+ }
+
+ if m, ok := rdb.rdb.(*mockRethinkdbConn); ok {
+ assert.False(t, m.disconnectCalled, "rdb close before cleanup")
+ rdb.Cleanup()
+ assert.True(t, m.disconnectCalled, "rdb close after cleanup")
+ }
+ })
+ }
+}
+
+func TestRethinkdb_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Rethinkdb
+ wantMetrics map[string]int64
+ wantCharts int
+ skipChart func(chart *module.Chart) bool
+ }{
+ "success on valid response": {
+ prepare: prepareCaseOk,
+ wantCharts: len(clusterCharts) + len(serverChartsTmpl)*3,
+ skipChart: func(chart *module.Chart) bool {
+ return strings.HasPrefix(chart.ID, "server_0f74c641-af5f-48d6-a005-35b8983c576a") &&
+ !strings.Contains(chart.ID, "stats_request_status")
+ },
+ wantMetrics: map[string]int64{
+ "cluster_client_connections": 3,
+ "cluster_clients_active": 3,
+ "cluster_queries_total": 27,
+ "cluster_read_docs_total": 3,
+ "cluster_servers_stats_request_success": 2,
+ "cluster_servers_stats_request_timeout": 1,
+ "cluster_written_docs_total": 3,
+ "server_0f74c641-af5f-48d6-a005-35b8983c576a_stats_request_status_success": 0,
+ "server_0f74c641-af5f-48d6-a005-35b8983c576a_stats_request_status_timeout": 1,
+ "server_b7730db2-4303-4719-aef8-2a3c339c672b_client_connections": 1,
+ "server_b7730db2-4303-4719-aef8-2a3c339c672b_clients_active": 1,
+ "server_b7730db2-4303-4719-aef8-2a3c339c672b_queries_total": 13,
+ "server_b7730db2-4303-4719-aef8-2a3c339c672b_read_docs_total": 1,
+ "server_b7730db2-4303-4719-aef8-2a3c339c672b_stats_request_status_success": 1,
+ "server_b7730db2-4303-4719-aef8-2a3c339c672b_stats_request_status_timeout": 0,
+ "server_b7730db2-4303-4719-aef8-2a3c339c672b_written_docs_total": 1,
+ "server_f325e3c3-22d9-4005-b4b2-1f561d384edc_client_connections": 2,
+ "server_f325e3c3-22d9-4005-b4b2-1f561d384edc_clients_active": 2,
+ "server_f325e3c3-22d9-4005-b4b2-1f561d384edc_queries_total": 14,
+ "server_f325e3c3-22d9-4005-b4b2-1f561d384edc_read_docs_total": 2,
+ "server_f325e3c3-22d9-4005-b4b2-1f561d384edc_stats_request_status_success": 1,
+ "server_f325e3c3-22d9-4005-b4b2-1f561d384edc_stats_request_status_timeout": 0,
+ "server_f325e3c3-22d9-4005-b4b2-1f561d384edc_written_docs_total": 2,
+ },
+ },
+ "fails if error on stats": {
+ wantCharts: len(clusterCharts),
+ prepare: prepareCaseErrOnStats,
+ },
+ "fails if error on connect": {
+ wantCharts: len(clusterCharts),
+ prepare: prepareCaseErrOnStats,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rdb := test.prepare()
+
+ require.NoError(t, rdb.Init())
+
+ mx := rdb.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ assert.Equal(t, test.wantCharts, len(*rdb.Charts()))
+
+ if len(test.wantMetrics) > 0 {
+ module.TestMetricsHasAllChartsDimsSkip(t, rdb.Charts(), mx, test.skipChart)
+ }
+
+ if m, ok := rdb.rdb.(*mockRethinkdbConn); ok {
+ assert.False(t, m.disconnectCalled, "rdb close before cleanup")
+ rdb.Cleanup()
+ assert.True(t, m.disconnectCalled, "rdb close after cleanup")
+ }
+ })
+ }
+}
+
+func prepareCaseOk() *Rethinkdb {
+ rdb := New()
+ rdb.newConn = func(cfg Config) (rdbConn, error) {
+ return &mockRethinkdbConn{dataStats: dataStats}, nil
+ }
+ return rdb
+}
+
+func prepareCaseErrOnStats() *Rethinkdb {
+ rdb := New()
+ rdb.newConn = func(cfg Config) (rdbConn, error) {
+ return &mockRethinkdbConn{errOnStats: true}, nil
+ }
+ return rdb
+}
+
+func prepareCaseErrOnConnect() *Rethinkdb {
+ rdb := New()
+ rdb.newConn = func(cfg Config) (rdbConn, error) {
+ return nil, errors.New("mock failed to connect")
+ }
+ return rdb
+}
+
+type mockRethinkdbConn struct {
+ dataStats []byte
+ errOnStats bool
+ disconnectCalled bool
+}
+
+func (m *mockRethinkdbConn) stats() ([][]byte, error) {
+ if m.errOnStats {
+ return nil, fmt.Errorf("mock.stats() error")
+ }
+ return bytes.Split(bytes.TrimSpace(m.dataStats), []byte("\n")), nil
+}
+
+func (m *mockRethinkdbConn) close() error {
+ m.disconnectCalled = true
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/rethinkdb/testdata/config.json b/src/go/plugin/go.d/modules/rethinkdb/testdata/config.json
new file mode 100644
index 000000000..47f755ea4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/testdata/config.json
@@ -0,0 +1,7 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "username": "ok",
+ "password": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/rethinkdb/testdata/config.yaml b/src/go/plugin/go.d/modules/rethinkdb/testdata/config.yaml
new file mode 100644
index 000000000..6857aae7c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+username: "ok"
+password: "ok"
diff --git a/src/go/plugin/go.d/modules/rethinkdb/testdata/v2.4.4/stats.txt b/src/go/plugin/go.d/modules/rethinkdb/testdata/v2.4.4/stats.txt
new file mode 100644
index 000000000..0d3ab6a62
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rethinkdb/testdata/v2.4.4/stats.txt
@@ -0,0 +1,4 @@
+{"id":["cluster"],"query_engine":{"client_connections":1,"clients_active":1,"queries_per_sec":1,"read_docs_per_sec":0,"written_docs_per_sec":0}}
+{"id":["server","b7730db2-4303-4719-aef8-2a3c339c672b"],"query_engine":{"client_connections":1,"clients_active":1,"queries_per_sec":1,"queries_total":13,"read_docs_per_sec":0,"read_docs_total":1,"written_docs_per_sec":0,"written_docs_total":1},"server":"some_hostname_182"}
+{"id":["server","f325e3c3-22d9-4005-b4b2-1f561d384edc"],"query_engine":{"client_connections":2,"clients_active":2,"queries_per_sec":1,"queries_total":14,"read_docs_per_sec":0,"read_docs_total":2,"written_docs_per_sec":0,"written_docs_total":2},"server":"pve_deb_work_183"}
+{"id":["server","0f74c641-af5f-48d6-a005-35b8983c576a"],"server":"pve_deb_work_184","error":"Timed out. Unable to retrieve stats."}
diff --git a/src/go/plugin/go.d/modules/riakkv/README.md b/src/go/plugin/go.d/modules/riakkv/README.md
new file mode 120000
index 000000000..963843756
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/README.md
@@ -0,0 +1 @@
+integrations/riak_kv.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/riakkv/charts.go b/src/go/plugin/go.d/modules/riakkv/charts.go
new file mode 100644
index 000000000..345f01d69
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/charts.go
@@ -0,0 +1,461 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package riakkv
+
+import (
+ "slices"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioKvNodeOperations = module.Priority + iota
+ prioDtVnodeUpdates
+ prioSearchQueries
+ prioSearchDocuments
+ prioConsistentOperations
+
+ prioKvLatencyGet
+ prioKvLatencyPut
+ prioDtLatencyCounter
+ prioDtLatencySet
+ prioDtLatencyMap
+ prioSearchLatencyQuery
+ prioSearchLatencyIndex
+ prioConsistentLatencyGet
+ prioConsistentLatencyPut
+
+ prioVmProcessesCount
+ prioVmProcessesMemory
+
+ prioKvSiblingsEncounteredGet
+ prioKvObjSizeGet
+ prioSearchVnodeqSize
+ prioSearchIndexErrors
+ prioCorePbc
+ prioCoreRepairs
+ prioCoreFsmActive
+ prioCoreFsmREjected
+)
+
+var charts = module.Charts{
+ kvNodeOperationsChart.Copy(),
+ dtVnodeUpdatesChart.Copy(),
+ searchQueriesChart.Copy(),
+ searchDocumentsChart.Copy(),
+ consistentOperationsChart.Copy(),
+
+ kvLatencyGetChart.Copy(),
+ kvLatencyPutChart.Copy(),
+ dtLatencyCounterChart.Copy(),
+ dtLatencySetChart.Copy(),
+ dtLatencyMapChart.Copy(),
+ searchLatencyQueryChart.Copy(),
+ searchLatencyIndexChart.Copy(),
+ consistentLatencyGetChart.Copy(),
+ consistentLatencyPutChart.Copy(),
+
+ vmProcessesCountChart.Copy(),
+ vmProcessesMemoryChart.Copy(),
+
+ kvSiblingsEncounteredGetChart.Copy(),
+ kvObjectSizeGetChart.Copy(),
+ searchVnodeqSizeChart.Copy(),
+ searchIndexErrorsChart.Copy(),
+ corePbsChart.Copy(),
+ coreRepairsChart.Copy(),
+ coreFsmActiveChart.Copy(),
+ coreFsmRejectedChart.Copy(),
+}
+
+/*
+Throughput metrics
+https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
+
+Collected in totals
+*/
+var (
+ kvNodeOperationsChart = module.Chart{
+ ID: "kv_node_operations",
+ Title: "Reads & writes coordinated by this node",
+ Units: "operations/s",
+ Fam: "throughput",
+ Ctx: "riak.kv.throughput",
+ Priority: prioKvNodeOperations,
+ Dims: module.Dims{
+ {ID: "node_gets_total", Name: "gets", Algo: module.Incremental},
+ {ID: "node_puts_total", Name: "puts", Algo: module.Incremental},
+ },
+ }
+ dtVnodeUpdatesChart = module.Chart{
+ ID: "dt_vnode_updates",
+ Title: "Update operations coordinated by local vnodes by data type",
+ Units: "operations/s",
+ Fam: "throughput",
+ Ctx: "riak.dt.vnode_updates",
+ Priority: prioDtVnodeUpdates,
+ Dims: module.Dims{
+ {ID: "vnode_counter_update_total", Name: "counters", Algo: module.Incremental},
+ {ID: "vnode_set_update_total", Name: "sets", Algo: module.Incremental},
+ {ID: "vnode_map_update_total", Name: "maps", Algo: module.Incremental},
+ },
+ }
+ searchQueriesChart = module.Chart{
+ ID: "dt_vnode_updates",
+ Title: "Search queries on the node",
+ Units: "queries/s",
+ Fam: "throughput",
+ Ctx: "riak.search",
+ Priority: prioSearchQueries,
+ Dims: module.Dims{
+ {ID: "search_query_throughput_count", Name: "queries", Algo: module.Incremental},
+ },
+ }
+ searchDocumentsChart = module.Chart{
+ ID: "search_documents",
+ Title: "Documents indexed by search",
+ Units: "documents/s",
+ Fam: "throughput",
+ Ctx: "riak.search.documents",
+ Priority: prioSearchDocuments,
+ Dims: module.Dims{
+ {ID: "search_index_throughput_count", Name: "indexed", Algo: module.Incremental},
+ },
+ }
+ consistentOperationsChart = module.Chart{
+ ID: "consistent_operations",
+ Title: "Consistent node operations",
+ Units: "operations/s",
+ Fam: "throughput",
+ Ctx: "riak.consistent.operations",
+ Priority: prioConsistentOperations,
+ Dims: module.Dims{
+ {ID: "consistent_gets_total", Name: "gets", Algo: module.Incremental},
+ {ID: "consistent_puts_total", Name: "puts", Algo: module.Incremental},
+ },
+ }
+)
+
+/*
+Latency metrics
+https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
+
+Collected for the past minute in milliseconds and
+returned from Riak in microseconds.
+*/
+var (
+ kvLatencyGetChart = module.Chart{
+ ID: "kv_latency_get",
+ Title: "Time between reception of a client GET request and subsequent response to client",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "riak.kv.latency.get",
+ Priority: prioKvLatencyGet,
+ Dims: module.Dims{
+ {ID: "node_get_fsm_time_mean", Name: "mean", Div: 1000},
+ {ID: "node_get_fsm_time_median", Name: "median", Div: 1000},
+ {ID: "node_get_fsm_time_95", Name: "95", Div: 1000},
+ {ID: "node_get_fsm_time_99", Name: "99", Div: 1000},
+ {ID: "node_get_fsm_time_100", Name: "100", Div: 1000},
+ },
+ }
+ kvLatencyPutChart = module.Chart{
+ ID: "kv_latency_put",
+ Title: "Time between reception of a client PUT request and subsequent response to client",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "riak.kv.latency.put",
+ Priority: prioKvLatencyPut,
+ Dims: module.Dims{
+ {ID: "node_put_fsm_time_mean", Name: "mean", Div: 1000},
+ {ID: "node_put_fsm_time_median", Name: "median", Div: 1000},
+ {ID: "node_put_fsm_time_95", Name: "95", Div: 1000},
+ {ID: "node_put_fsm_time_99", Name: "99", Div: 1000},
+ {ID: "node_put_fsm_time_100", Name: "100", Div: 1000},
+ },
+ }
+ dtLatencyCounterChart = module.Chart{
+ ID: "dt_latency_counter",
+ Title: "Time it takes to perform an Update Counter operation",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "riak.dt.latency.counter_merge",
+ Priority: prioDtLatencyCounter,
+ Dims: module.Dims{
+ {ID: "object_counter_merge_time_mean", Name: "mean", Div: 1000},
+ {ID: "object_counter_merge_time_median", Name: "median", Div: 1000},
+ {ID: "object_counter_merge_time_95", Name: "95", Div: 1000},
+ {ID: "object_counter_merge_time_99", Name: "99", Div: 1000},
+ {ID: "object_counter_merge_time_100", Name: "100", Div: 1000},
+ },
+ }
+ dtLatencySetChart = module.Chart{
+ ID: "dt_latency_counter",
+ Title: "Time it takes to perform an Update Set operation",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "riak.dt.latency.set_merge",
+ Priority: prioDtLatencySet,
+ Dims: module.Dims{
+ {ID: "object_set_merge_time_mean", Name: "mean", Div: 1000},
+ {ID: "object_set_merge_time_median", Name: "median", Div: 1000},
+ {ID: "object_set_merge_time_95", Name: "95", Div: 1000},
+ {ID: "object_set_merge_time_99", Name: "99", Div: 1000},
+ {ID: "object_set_merge_time_100", Name: "100", Div: 1000},
+ },
+ }
+ dtLatencyMapChart = module.Chart{
+ ID: "dt_latency_map",
+ Title: "Time it takes to perform an Update Map operation",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "riak.dt.latency.map_merge",
+ Priority: prioDtLatencyMap,
+ Dims: module.Dims{
+ {ID: "object_map_merge_time_mean", Name: "mean", Div: 1000},
+ {ID: "object_map_merge_time_median", Name: "median", Div: 1000},
+ {ID: "object_map_merge_time_95", Name: "95", Div: 1000},
+ {ID: "object_map_merge_time_99", Name: "99", Div: 1000},
+ {ID: "object_map_merge_time_100", Name: "100", Div: 1000},
+ },
+ }
+ searchLatencyQueryChart = module.Chart{
+ ID: "search_latency_query",
+ Title: "Search query latency",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "riak.search.latency.query",
+ Priority: prioSearchLatencyQuery,
+ Dims: module.Dims{
+ {ID: "search_query_latency_median", Name: "median", Div: 1000},
+ {ID: "search_query_latency_min", Name: "min", Div: 1000},
+ {ID: "search_query_latency_95", Name: "95", Div: 1000},
+ {ID: "search_query_latency_99", Name: "99", Div: 1000},
+ {ID: "search_query_latency_999", Name: "999", Div: 1000},
+ {ID: "search_query_latency_max", Name: "max", Div: 1000},
+ },
+ }
+ searchLatencyIndexChart = module.Chart{
+ ID: "search_latency_index",
+ Title: "Time it takes Search to index a new document",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "riak.search.latency.index",
+ Priority: prioSearchLatencyIndex,
+ Dims: module.Dims{
+ {ID: "search_index_latency_median", Name: "median", Div: 1000},
+ {ID: "search_index_latency_min", Name: "min", Div: 1000},
+ {ID: "search_index_latency_95", Name: "95", Div: 1000},
+ {ID: "search_index_latency_99", Name: "99", Div: 1000},
+ {ID: "search_index_latency_999", Name: "999", Div: 1000},
+ {ID: "search_index_latency_max", Name: "max", Div: 1000},
+ },
+ }
+ consistentLatencyGetChart = module.Chart{
+ ID: "consistent_latency_get",
+ Title: "Strongly consistent read latency",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "riak.consistent.latency.get",
+ Priority: prioConsistentLatencyGet,
+ Dims: module.Dims{
+ {ID: "consistent_get_time_mean", Name: "mean", Div: 1000},
+ {ID: "consistent_get_time_median", Name: "median", Div: 1000},
+ {ID: "consistent_get_time_95", Name: "95", Div: 1000},
+ {ID: "consistent_get_time_99", Name: "99", Div: 1000},
+ {ID: "consistent_get_time_100", Name: "100", Div: 1000},
+ },
+ }
+ consistentLatencyPutChart = module.Chart{
+ ID: "consistent_latency_put",
+ Title: "Strongly consistent write latency",
+ Units: "ms",
+ Fam: "latency",
+ Ctx: "riak.consistent.latency.put",
+ Priority: prioConsistentLatencyPut,
+ Dims: module.Dims{
+ {ID: "consistent_put_time_mean", Name: "mean", Div: 1000},
+ {ID: "consistent_put_time_median", Name: "median", Div: 1000},
+ {ID: "consistent_put_time_95", Name: "95", Div: 1000},
+ {ID: "consistent_put_time_99", Name: "99", Div: 1000},
+ {ID: "consistent_put_time_100", Name: "100", Div: 1000},
+ },
+ }
+)
+
+/*
+Erlang's resource usage metrics
+https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#erlang-resource-usage-metrics
+
+Processes collected as a gauge.
+Memory collected as Megabytes, returned as bytes from Riak.
+*/
+var (
+ vmProcessesCountChart = module.Chart{
+ ID: "vm_processes",
+ Title: "Total processes running in the Erlang VM",
+ Units: "processes",
+ Fam: "vm",
+ Ctx: "riak.vm.processes.count",
+ Priority: prioVmProcessesCount,
+ Dims: module.Dims{
+ {ID: "sys_processes", Name: "processes"},
+ },
+ }
+ vmProcessesMemoryChart = module.Chart{
+ ID: "vm_processes",
+ Title: "Memory allocated & used by Erlang processes",
+ Units: "bytes",
+ Fam: "vm",
+ Ctx: "riak.vm.processes.memory",
+ Priority: prioVmProcessesMemory,
+ Dims: module.Dims{
+ {ID: "memory_processes", Name: "allocated"},
+ {ID: "memory_processes_used", Name: "used"},
+ },
+ }
+)
+
+/*
+General Riak Load / Health metrics
+https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-load-health-metrics
+*/
+var (
+ // General Riak Load / Health metrics
+ // https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-load-health-metrics
+ // Collected by Riak over the past minute
+
+ kvSiblingsEncounteredGetChart = module.Chart{
+ ID: "kv_siblings_encountered_get",
+ Title: "Siblings encountered during GET operations by this node during the past minute",
+ Units: "siblings",
+ Fam: "load",
+ Ctx: "riak.kv.siblings_encountered.get",
+ Priority: prioKvSiblingsEncounteredGet,
+ Dims: module.Dims{
+ {ID: "node_get_fsm_siblings_mean", Name: "mean"},
+ {ID: "node_get_fsm_siblings_median", Name: "median"},
+ {ID: "node_get_fsm_siblings_95", Name: "95"},
+ {ID: "node_get_fsm_siblings_99", Name: "99"},
+ {ID: "node_get_fsm_siblings_100", Name: "100"},
+ },
+ }
+ kvObjectSizeGetChart = module.Chart{
+ ID: "kv_siblings_encountered_get",
+ Title: "Object size encountered by this node during the past minute",
+ Units: "bytes",
+ Fam: "load",
+ Ctx: "riak.kv.objsize.get",
+ Priority: prioKvObjSizeGet,
+ Dims: module.Dims{
+ {ID: "node_get_fsm_objsize_mean", Name: "mean"},
+ {ID: "node_get_fsm_objsize_median", Name: "median"},
+ {ID: "node_get_fsm_objsize_95", Name: "95"},
+ {ID: "node_get_fsm_objsize_99", Name: "99"},
+ {ID: "node_get_fsm_objsize_100", Name: "100"},
+ },
+ }
+ searchVnodeqSizeChart = module.Chart{
+ ID: "kv_siblings_encountered_get",
+ Title: "Unprocessed messages in the vnode message queues of Search in the past minute",
+ Units: "messages",
+ Fam: "load",
+ Ctx: "riak.search.vnodeq_size",
+ Priority: prioSearchVnodeqSize,
+ Dims: module.Dims{
+ {ID: "riak_search_vnodeq_mean", Name: "mean"},
+ {ID: "riak_search_vnodeq_median", Name: "median"},
+ {ID: "riak_search_vnodeq_95", Name: "95"},
+ {ID: "riak_search_vnodeq_99", Name: "99"},
+ {ID: "riak_search_vnodeq_100", Name: "100"},
+ },
+ }
+
+ // General Riak Search Load / Health metrics
+ // https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-search-load-health-metrics
+ // https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-search-load-health-metrics
+ // Reported as counters.
+
+ searchIndexErrorsChart = module.Chart{
+ ID: "search_index_errors",
+ Title: "Errors encountered by Search",
+ Units: "errors",
+ Fam: "load",
+ Ctx: "riak.search.index.errors",
+ Priority: prioSearchIndexErrors,
+ Dims: module.Dims{
+ {ID: "search_index_fail_count", Name: "index_fail"},
+ {ID: "search_index_bad_entry_count", Name: "bad_entry"},
+ {ID: "search_index_extract_fail_count", Name: "extract_fail"},
+ },
+ }
+ corePbsChart = module.Chart{
+ ID: "core_pbc",
+ Title: "Protocol buffer connections by status",
+ Units: "connections",
+ Fam: "load",
+ Ctx: "riak.core.protobuf_connections",
+ Priority: prioCorePbc,
+ Dims: module.Dims{
+ {ID: "pbc_active", Name: "active"},
+ },
+ }
+ coreRepairsChart = module.Chart{
+ ID: "core_repairs",
+ Title: "Number of repair operations this node has coordinated",
+ Units: "repairs",
+ Fam: "load",
+ Ctx: "riak.core.protobuf_connections",
+ Priority: prioCoreRepairs,
+ Dims: module.Dims{
+ {ID: "read_repairs", Name: "read"},
+ },
+ }
+ coreFsmActiveChart = module.Chart{
+ ID: "core_fsm_active",
+ Title: "Active finite state machines by kind",
+ Units: "fsms",
+ Fam: "load",
+ Ctx: "riak.core.fsm_active",
+ Priority: prioCoreFsmActive,
+ Dims: module.Dims{
+ {ID: "node_get_fsm_active", Name: "get"},
+ {ID: "node_put_fsm_active", Name: "put"},
+ {ID: "index_fsm_active", Name: "secondary_index"},
+ {ID: "list_fsm_active", Name: "list_keys"},
+ },
+ }
+ coreFsmRejectedChart = module.Chart{
+ ID: "core_fsm_rejected",
+ Title: "Finite state machines being rejected by Sidejobs overload protection",
+ Units: "fsms",
+ Fam: "load",
+ Ctx: "riak.core.fsm_rejected",
+ Priority: prioCoreFsmREjected,
+ Dims: module.Dims{
+ {ID: "node_get_fsm_rejected", Name: "get"},
+ {ID: "node_put_fsm_rejected", Name: "put"},
+ },
+ }
+)
+
+func (r *RiakKv) adjustCharts(mx map[string]int64) {
+ var i int
+ for _, chart := range *r.Charts() {
+ chart.Dims = slices.DeleteFunc(chart.Dims, func(dim *module.Dim) bool {
+ _, ok := mx[dim.ID]
+ if !ok {
+ r.Debugf("removing dimension '%s' from chart '%s': metric not found", dim.ID, chart.ID)
+ }
+ return !ok
+ })
+
+ if len(chart.Dims) == 0 {
+ r.Debugf("removing chart '%s': no metrics found", chart.ID)
+ continue
+ }
+
+ (*r.Charts())[i] = chart
+ i++
+ }
+}
diff --git a/src/go/plugin/go.d/modules/riakkv/collect.go b/src/go/plugin/go.d/modules/riakkv/collect.go
new file mode 100644
index 000000000..0b3be9438
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/collect.go
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package riakkv
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (r *RiakKv) collect() (map[string]int64, error) {
+ stats, err := r.getStats()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := stm.ToMap(stats)
+
+ if len(mx) == 0 {
+ return nil, errors.New("no stats")
+ }
+
+ r.once.Do(func() { r.adjustCharts(mx) })
+
+ return mx, nil
+}
+
+func (r *RiakKv) getStats() (*riakStats, error) {
+ req, err := web.NewHTTPRequest(r.Request)
+ if err != nil {
+ return nil, err
+ }
+
+ var stats riakStats
+ if err := r.doOKDecode(req, &stats); err != nil {
+ return nil, err
+ }
+
+ return &stats, nil
+}
+
+func (r *RiakKv) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := r.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ msg := fmt.Sprintf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ if resp.StatusCode == http.StatusNotFound {
+ msg = fmt.Sprintf("%s (riak_kv_stat is not enabled)", msg)
+ }
+ return errors.New(msg)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/riakkv/config_schema.json b/src/go/plugin/go.d/modules/riakkv/config_schema.json
new file mode 100644
index 000000000..402c2c106
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/config_schema.json
@@ -0,0 +1,186 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "RiakKV collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 2
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the RiakKV [Stat](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html) endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:8098/stats",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "update_every": {
+ "ui:help": "Riak updates metrics on the `/stats` endpoint every second. To ensure accurate data representation, a polling interval of 2 seconds or more is suggested."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/riakkv/integrations/riak_kv.md b/src/go/plugin/go.d/modules/riakkv/integrations/riak_kv.md
new file mode 100644
index 000000000..872736277
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/integrations/riak_kv.md
@@ -0,0 +1,283 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/riakkv/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/riakkv/metadata.yaml"
+sidebar_label: "Riak KV"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Riak KV
+
+
+<img src="https://netdata.cloud/img/riak.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: riakkv
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors RiakKV metrics about throughput, latency, resources and more.
+
+
+It sends HTTP requests to the Riak [/stats](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html) endpoint.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Riak instances running on localhost that are listening on port 8098.
+On startup, it tries to collect metrics from:
+
+- http://127.0.0.1:8098/stats
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Riak KV instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| riak.kv.throughput | gets, puts | operations/s |
+| riak.dt.vnode_updates | counters, sets, maps | operations/s |
+| riak.search | queries | queries/s |
+| riak.search.documents | indexed | documents/s |
+| riak.consistent.operations | gets, puts | operations/s |
+| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |
+| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |
+| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |
+| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |
+| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |
+| riak.search.latency.query | median, min, 95, 99, 999, max | ms |
+| riak.search.latency.index | median, min, 95, 99, 999, max | ms |
+| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |
+| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |
+| riak.vm | processes | total |
+| riak.vm.memory.processes | allocated, used | MB |
+| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |
+| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |
+| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |
+| riak.search.index | index_fail, bad_entry, extract_fail | errors |
+| riak.core.protobuf_connections | active | connections |
+| riak.core.repairs | read | repairs |
+| riak.core.fsm_active | get, put, secondary index, list keys | fsms |
+| riak.core.fsm_rejected | get, put | fsms |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable /stats endpoint
+
+See the RiakKV [configuration reference](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/riakkv.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/riakkv.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8098/stats | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+With enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+
+ - name: remote
+ url: http://192.0.2.1:8098/stats
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `riakkv` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m riakkv
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `riakkv` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep riakkv
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep riakkv /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep riakkv
+```
+
+
diff --git a/src/go/plugin/go.d/modules/riakkv/metadata.yaml b/src/go/plugin/go.d/modules/riakkv/metadata.yaml
new file mode 100644
index 000000000..435cc4f9b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/metadata.yaml
@@ -0,0 +1,390 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-riakkv
+ plugin_name: go.d.plugin
+ module_name: riakkv
+ monitored_instance:
+ name: Riak KV
+ link: https://riak.com/products/riak-kv/index.html
+ categories:
+ - data-collection.database-servers
+ icon_filename: "riak.svg"
+ related_resources:
+ integrations:
+ list: []
+ alternative_monitored_instances: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - database
+ - nosql
+ - big data
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors RiakKV metrics about throughput, latency, resources and more.
+ method_description: |
+ It sends HTTP requests to the Riak [/stats](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html) endpoint.
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Riak instances running on localhost that are listening on port 8098.
+ On startup, it tries to collect metrics from:
+
+ - http://127.0.0.1:8098/stats
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list:
+ - title: Enable /stats endpoint
+ description: |
+ See the RiakKV [configuration reference](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html).
+ configuration:
+ file:
+ name: go.d/riakkv.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8098/stats
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: With enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+
+ - name: remote
+ url: http://192.0.2.1:8098/stats
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: riak.kv.throughput
+ description: Reads & writes coordinated by this node
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: gets
+ - name: puts
+ - name: riak.dt.vnode_updates
+ description: Update operations coordinated by local vnodes by data type
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: counters
+ - name: sets
+ - name: maps
+ - name: riak.search
+ description: Search queries on the node
+ unit: "queries/s"
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: riak.search.documents
+ description: Documents indexed by search
+ unit: "documents/s"
+ chart_type: line
+ dimensions:
+ - name: indexed
+ - name: riak.consistent.operations
+ description: Consistent node operations
+ unit: "operations/s"
+ chart_type: line
+ dimensions:
+ - name: gets
+ - name: puts
+ - name: riak.kv.latency.get
+ description: Time between reception of a client GET request and subsequent response to client
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.kv.latency.put
+ description: Time between reception of a client PUT request and subsequent response to client
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.dt.latency.counter_merge
+ description: Time it takes to perform an Update Counter operation
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.dt.latency.set_merge
+ description: Time it takes to perform an Update Set operation
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.dt.latency.map_merge
+ description: Time it takes to perform an Update Map operation
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.search.latency.query
+ description: Search query latency
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: median
+ - name: min
+ - name: "95"
+ - name: "99"
+ - name: "999"
+ - name: max
+ - name: riak.search.latency.index
+ description: Time it takes Search to index a new document
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: median
+ - name: min
+ - name: "95"
+ - name: "99"
+ - name: "999"
+ - name: max
+ - name: riak.consistent.latency.get
+ description: Strongly consistent read latency
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.consistent.latency.put
+ description: Strongly consistent write latency
+ unit: "ms"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.vm
+ description: Total processes running in the Erlang VM
+ unit: "total"
+ chart_type: line
+ dimensions:
+ - name: processes
+ - name: riak.vm.memory.processes
+ description: Memory allocated & used by Erlang processes
+ unit: "MB"
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: used
+ - name: riak.kv.siblings_encountered.get
+ description: Number of siblings encountered during GET operations by this node during the past minute
+ unit: "siblings"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.kv.objsize.get
+ description: Object size encountered by this node during the past minute
+ unit: "KB"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.search.vnodeq_size
+ description: Number of unprocessed messages in the vnode message queues of Search on this node in the past minute
+ unit: "messages"
+ chart_type: line
+ dimensions:
+ - name: mean
+ - name: median
+ - name: "95"
+ - name: "99"
+ - name: "100"
+ - name: riak.search.index
+ description: Errors encountered by Search
+ unit: "errors"
+ chart_type: line
+ dimensions:
+ - name: index_fail
+ - name: bad_entry
+ - name: extract_fail
+ - name: riak.core.protobuf_connections
+ description: Protocol buffer connections by status
+ unit: "connections"
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: riak.core.repairs
+ description: Number of repair operations this node has coordinated
+ unit: "repairs"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: riak.core.fsm_active
+ description: Active finite state machines by kind
+ unit: "fsms"
+ chart_type: line
+ dimensions:
+ - name: get
+ - name: put
+ - name: secondary index
+ - name: list keys
+ - name: riak.core.fsm_rejected
+ description: Finite state machines being rejected by Sidejobs overload protection
+ unit: "fsms"
+ chart_type: line
+ dimensions:
+ - name: get
+ - name: put
diff --git a/src/go/plugin/go.d/modules/riakkv/riakkv.go b/src/go/plugin/go.d/modules/riakkv/riakkv.go
new file mode 100644
index 000000000..64aeda1c1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/riakkv.go
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package riakkv
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("riakkv", module.Creator{
+ Create: func() module.Module { return New() },
+ // Riak updates the metrics on the /stats endpoint every 1 second.
+ // If we use 1 here, it means we might get weird jitter in the graph,
+ // so the default is set to 2 seconds to prevent that.
+ Defaults: module.Defaults{
+ UpdateEvery: 2,
+ },
+ JobConfigSchema: configSchema,
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *RiakKv {
+ return &RiakKv{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ // https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html
+ URL: "http://127.0.0.1:8098/stats",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ once: &sync.Once{},
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type RiakKv struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ once *sync.Once
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (r *RiakKv) Configuration() any {
+ return r.Config
+}
+
+func (r *RiakKv) Init() error {
+ if r.URL == "" {
+ r.Errorf("url required but not set")
+ return errors.New("url not set")
+ }
+
+ httpClient, err := web.NewHTTPClient(r.Client)
+ if err != nil {
+ r.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ r.httpClient = httpClient
+
+ r.Debugf("using URL %s", r.URL)
+ r.Debugf("using timeout: %s", r.Timeout)
+
+ return nil
+}
+
+func (r *RiakKv) Check() error {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
+
+func (r *RiakKv) Charts() *module.Charts {
+ return r.charts
+}
+
+func (r *RiakKv) Collect() map[string]int64 {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (r *RiakKv) Cleanup() {
+ if r.httpClient != nil {
+ r.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/riakkv/riakkv_test.go b/src/go/plugin/go.d/modules/riakkv/riakkv_test.go
new file mode 100644
index 000000000..de4e24092
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/riakkv_test.go
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package riakkv
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStats, _ = os.ReadFile("testdata/stats.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStats": dataStats,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestRiakKv_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &RiakKv{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestRiakKv_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ riak := New()
+ riak.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, riak.Init())
+ } else {
+ assert.NoError(t, riak.Init())
+ }
+ })
+ }
+}
+
+func TestRiakKv_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (riak *RiakKv, cleanup func())
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: caseOkResponse,
+ },
+ "fail on invalid data response": {
+ wantFail: true,
+ prepare: caseInvalidDataResponse,
+ },
+ "fail on connection refused": {
+ wantFail: true,
+ prepare: caseConnectionRefused,
+ },
+ "fail on 404 response": {
+ wantFail: true,
+ prepare: case404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ riak, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, riak.Check())
+ } else {
+ assert.NoError(t, riak.Check())
+ }
+ })
+ }
+}
+
+func TestRiakKv_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestRiakKv_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (riak *RiakKv, cleanup func())
+ wantMetrics map[string]int64
+ }{
+ "success on valid response": {
+ prepare: caseOkResponse,
+ wantMetrics: map[string]int64{
+ "consistent_get_time_100": 1,
+ "consistent_get_time_95": 1,
+ "consistent_get_time_99": 1,
+ "consistent_get_time_mean": 1,
+ "consistent_get_time_median": 1,
+ "consistent_gets_total": 1,
+ "consistent_put_time_100": 1,
+ "consistent_put_time_95": 1,
+ "consistent_put_time_99": 1,
+ "consistent_put_time_mean": 1,
+ "consistent_put_time_median": 1,
+ "consistent_puts_total": 1,
+ "index_fsm_active": 1,
+ "list_fsm_active": 1,
+ "memory_processes": 274468041,
+ "memory_processes_used": 274337336,
+ "node_get_fsm_active": 1,
+ "node_get_fsm_objsize_100": 1037,
+ "node_get_fsm_objsize_95": 1,
+ "node_get_fsm_objsize_99": 1025,
+ "node_get_fsm_objsize_mean": 791,
+ "node_get_fsm_objsize_median": 669,
+ "node_get_fsm_rejected": 1,
+ "node_get_fsm_siblings_100": 1,
+ "node_get_fsm_siblings_95": 1,
+ "node_get_fsm_siblings_99": 1,
+ "node_get_fsm_siblings_mean": 1,
+ "node_get_fsm_siblings_median": 1,
+ "node_get_fsm_time_100": 678351,
+ "node_get_fsm_time_95": 1,
+ "node_get_fsm_time_99": 10148,
+ "node_get_fsm_time_mean": 2161,
+ "node_get_fsm_time_median": 1022,
+ "node_gets_total": 422626,
+ "node_put_fsm_active": 1,
+ "node_put_fsm_rejected": 1,
+ "node_put_fsm_time_100": 1049568,
+ "node_put_fsm_time_95": 19609,
+ "node_put_fsm_time_99": 37735,
+ "node_put_fsm_time_mean": 11828,
+ "node_put_fsm_time_median": 5017,
+ "node_puts_total": 490965,
+ "object_counter_merge_time_100": 1,
+ "object_counter_merge_time_95": 1,
+ "object_counter_merge_time_99": 1,
+ "object_counter_merge_time_mean": 1,
+ "object_counter_merge_time_median": 1,
+ "object_map_merge_time_100": 1,
+ "object_map_merge_time_95": 1,
+ "object_map_merge_time_99": 1,
+ "object_map_merge_time_mean": 1,
+ "object_map_merge_time_median": 1,
+ "object_set_merge_time_100": 1,
+ "object_set_merge_time_95": 1,
+ "object_set_merge_time_99": 1,
+ "object_set_merge_time_mean": 1,
+ "object_set_merge_time_median": 1,
+ "pbc_active": 46,
+ "read_repairs": 1,
+ "vnode_counter_update_total": 1,
+ "vnode_map_update_total": 1,
+ "vnode_set_update_total": 1,
+ },
+ },
+ "fail on invalid data response": {
+ prepare: caseInvalidDataResponse,
+ wantMetrics: nil,
+ },
+ "fail on connection refused": {
+ prepare: caseConnectionRefused,
+ wantMetrics: nil,
+ },
+ "fail on 404 response": {
+ prepare: case404,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ riak, cleanup := test.prepare(t)
+ defer cleanup()
+
+ _ = riak.Check()
+
+ mx := riak.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ require.True(t, len(*riak.Charts()) > 0, "charts > 0")
+ module.TestMetricsHasAllChartsDims(t, riak.Charts(), mx)
+ }
+ })
+ }
+}
+
+func caseOkResponse(t *testing.T) (*RiakKv, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStats)
+ }))
+ riak := New()
+ riak.URL = srv.URL
+ require.NoError(t, riak.Init())
+
+ return riak, srv.Close
+}
+
+func caseInvalidDataResponse(t *testing.T) (*RiakKv, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+ riak := New()
+ riak.URL = srv.URL
+ require.NoError(t, riak.Init())
+
+ return riak, srv.Close
+}
+
+func caseConnectionRefused(t *testing.T) (*RiakKv, func()) {
+ t.Helper()
+ rk := New()
+ rk.URL = "http://127.0.0.1:65001"
+ require.NoError(t, rk.Init())
+
+ return rk, func() {}
+}
+
+func case404(t *testing.T) (*RiakKv, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ riak := New()
+ riak.URL = srv.URL
+ require.NoError(t, riak.Init())
+
+ return riak, srv.Close
+}
diff --git a/src/go/plugin/go.d/modules/riakkv/stats.go b/src/go/plugin/go.d/modules/riakkv/stats.go
new file mode 100644
index 000000000..ed2927583
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/stats.go
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package riakkv
+
+// FIXME: old data (likely wrong) from https://github.com/netdata/netdata/issues/2413#issuecomment-500867044
+type riakStats struct {
+ NodeGetsTotal *int64 `json:"node_gets_total" stm:"node_gets_total"`
+ NodePutsTotal *int64 `json:"node_puts_total" stm:"node_puts_total"`
+
+ VnodeCounterUpdateTotal *int64 `json:"vnode_counter_update_total" stm:"vnode_counter_update_total"`
+ VnodeSetUpdateTotal *int64 `json:"vnode_set_update_total" stm:"vnode_set_update_total"`
+ VnodeMapUpdateTotal *int64 `json:"vnode_map_update_total" stm:"vnode_map_update_total"`
+
+ SearchQueryThroughputCount *int64 `json:"search_query_throughput_count" stm:"search_query_throughput_count"`
+ SearchIndexThroughputCount *int64 `json:"search_index_throughput_count" stm:"search_index_throughput_count"`
+
+ ConsistentGetsTotal *int64 `json:"consistent_gets_total" stm:"consistent_gets_total"`
+ ConsistentPutsTotal *int64 `json:"consistent_puts_total" stm:"consistent_puts_total"`
+
+ NodeGetFsmTimeMean *int64 `json:"node_get_fsm_time_mean" stm:"node_get_fsm_time_mean"`
+ NodeGetFsmTimeMedian *int64 `json:"node_get_fsm_time_median" stm:"node_get_fsm_time_median"`
+ NodeGetFsmTime95 *int64 `json:"node_get_fsm_time_95" stm:"node_get_fsm_time_95"`
+ NodeGetFsmTime99 *int64 `json:"node_get_fsm_time_99" stm:"node_get_fsm_time_99"`
+ NodeGetFsmTime100 *int64 `json:"node_get_fsm_time_100" stm:"node_get_fsm_time_100"`
+
+ NodePutFsmTimeMean *int64 `json:"node_put_fsm_time_mean" stm:"node_put_fsm_time_mean"`
+ NodePutFsmTimeMedian *int64 `json:"node_put_fsm_time_median" stm:"node_put_fsm_time_median"`
+ NodePutFsmTime95 *int64 `json:"node_put_fsm_time_95" stm:"node_put_fsm_time_95"`
+ NodePutFsmTime99 *int64 `json:"node_put_fsm_time_99" stm:"node_put_fsm_time_99"`
+ NodePutFsmTime100 *int64 `json:"node_put_fsm_time_100" stm:"node_put_fsm_time_100"`
+
+ ObjectCounterMergeTimeMean *int64 `json:"object_counter_merge_time_mean" stm:"object_counter_merge_time_mean"`
+ ObjectCounterMergeTimeMedian *int64 `json:"object_counter_merge_time_median" stm:"object_counter_merge_time_median"`
+ ObjectCounterMergeTime95 *int64 `json:"object_counter_merge_time_95" stm:"object_counter_merge_time_95"`
+ ObjectCounterMergeTime99 *int64 `json:"object_counter_merge_time_99" stm:"object_counter_merge_time_99"`
+ ObjectCounterMergeTime100 *int64 `json:"object_counter_merge_time_100" stm:"object_counter_merge_time_100"`
+
+ ObjectSetMergeTimeMean *int64 `json:"object_set_merge_time_mean" stm:"object_set_merge_time_mean"`
+ ObjectSetMergeTimeMedian *int64 `json:"object_set_merge_time_median" stm:"object_set_merge_time_median"`
+ ObjectSetMergeTime95 *int64 `json:"object_set_merge_time_95" stm:"object_set_merge_time_95"`
+ ObjectSetMergeTime99 *int64 `json:"object_set_merge_time_99" stm:"object_set_merge_time_99"`
+ ObjectSetMergeTime100 *int64 `json:"object_set_merge_time_100" stm:"object_set_merge_time_100"`
+
+ ObjectMapMergeTimeMean *int64 `json:"object_map_merge_time_mean" stm:"object_map_merge_time_mean"`
+ ObjectMapMergeTimeMedian *int64 `json:"object_map_merge_time_median" stm:"object_map_merge_time_median"`
+ ObjectMapMergeTime95 *int64 `json:"object_map_merge_time_95" stm:"object_map_merge_time_95"`
+ ObjectMapMergeTime99 *int64 `json:"object_map_merge_time_99" stm:"object_map_merge_time_99"`
+ ObjectMapMergeTime100 *int64 `json:"object_map_merge_time_100" stm:"object_map_merge_time_100"`
+
+ SearchQueryLatencyMin *int64 `json:"search_query_latency_min" stm:"search_query_latency_min"`
+ SearchQueryLatencyMedian *int64 `json:"search_query_latency_median" stm:"search_query_latency_median"`
+ SearchQueryLatency95 *int64 `json:"search_query_latency_95" stm:"search_query_latency_95"`
+ SearchQueryLatency99 *int64 `json:"search_query_latency_99" stm:"search_query_latency_99"`
+ SearchQueryLatency999 *int64 `json:"search_query_latency_999" stm:"search_query_latency_999"`
+ SearchQueryLatencyMax *int64 `json:"search_query_latency_max" stm:"search_query_latency_max"`
+
+ SearchIndexLatencyMin *int64 `json:"search_index_latency_min" stm:"search_index_latency_min"`
+ SearchIndexLatencyMedian *int64 `json:"search_index_latency_median" stm:"search_index_latency_median"`
+ SearchIndexLatency95 *int64 `json:"search_index_latency_95" stm:"search_index_latency_95"`
+ SearchIndexLatency99 *int64 `json:"search_index_latency_99" stm:"search_index_latency_99"`
+ SearchIndexLatency999 *int64 `json:"search_index_latency_999" stm:"search_index_latency_999"`
+ SearchIndexLatencyMax *int64 `json:"search_index_latency_max" stm:"search_index_latency_max"`
+
+ ConsistentGetTimeMean *int64 `json:"consistent_get_time_mean" stm:"consistent_get_time_mean"`
+ ConsistentGetTimeMedian *int64 `json:"consistent_get_time_median" stm:"consistent_get_time_median"`
+ ConsistentGetTime95 *int64 `json:"consistent_get_time_95" stm:"consistent_get_time_95"`
+ ConsistentGetTime99 *int64 `json:"consistent_get_time_99" stm:"consistent_get_time_99"`
+ ConsistentGetTime100 *int64 `json:"consistent_get_time_100" stm:"consistent_get_time_100"`
+
+ ConsistentPutTimeMean *int64 `json:"consistent_put_time_mean" stm:"consistent_put_time_mean"`
+ ConsistentPutTimeMedian *int64 `json:"consistent_put_time_median" stm:"consistent_put_time_median"`
+ ConsistentPutTime95 *int64 `json:"consistent_put_time_95" stm:"consistent_put_time_95"`
+ ConsistentPutTime99 *int64 `json:"consistent_put_time_99" stm:"consistent_put_time_99"`
+ ConsistentPutTime100 *int64 `json:"consistent_put_time_100" stm:"consistent_put_time_100"`
+
+ SysProcesses *int64 `json:"sys_processes" stm:"sys_processes"`
+ MemoryProcesses *int64 `json:"memory_processes" stm:"memory_processes"`
+ MemoryProcessesUsed *int64 `json:"memory_processes_used" stm:"memory_processes_used"`
+
+ NodeGetFsmSiblingsMean *int64 `json:"node_get_fsm_siblings_mean" stm:"node_get_fsm_siblings_mean"`
+ NodeGetFsmSiblingsMedian *int64 `json:"node_get_fsm_siblings_median" stm:"node_get_fsm_siblings_median"`
+ NodeGetFsmSiblings99 *int64 `json:"node_get_fsm_siblings_99" stm:"node_get_fsm_siblings_99"`
+ NodeGetFsmSiblings95 *int64 `json:"node_get_fsm_siblings_95" stm:"node_get_fsm_siblings_95"`
+ NodeGetFsmSiblings100 *int64 `json:"node_get_fsm_siblings_100" stm:"node_get_fsm_siblings_100"`
+
+ NodeGetFsmObjsizeMean *int64 `json:"node_get_fsm_objsize_mean" stm:"node_get_fsm_objsize_mean"`
+ NodeGetFsmObjsizeMedian *int64 `json:"node_get_fsm_objsize_median" stm:"node_get_fsm_objsize_median"`
+ NodeGetFsmObjsize95 *int64 `json:"node_get_fsm_objsize_95" stm:"node_get_fsm_objsize_95"`
+ NodeGetFsmObjsize99 *int64 `json:"node_get_fsm_objsize_99" stm:"node_get_fsm_objsize_99"`
+ NodeGetFsmObjsize100 *int64 `json:"node_get_fsm_objsize_100" stm:"node_get_fsm_objsize_100"`
+
+ RiakSearchVnodeqMean *int64 `json:"riak_search_vnodeq_mean" stm:"riak_search_vnodeq_mean"`
+ RiakSearchVnodeqMedian *int64 `json:"riak_search_vnodeq_median" stm:"riak_search_vnodeq_median"`
+ RiakSearchVnodeq95 *int64 `json:"riak_search_vnodeq_95" stm:"riak_search_vnodeq_95"`
+ RiakSearchVnodeq99 *int64 `json:"riak_search_vnodeq_99" stm:"riak_search_vnodeq_99"`
+ RiakSearchVnodeq100 *int64 `json:"riak_search_vnodeq_100" stm:"riak_search_vnodeq_100"`
+
+ SearchIndexFailCount *int64 `json:"search_index_fail_count" stm:"search_index_fail_count"`
+ PbcActive *int64 `json:"pbc_active" stm:"pbc_active"`
+ ReadRepairs *int64 `json:"read_repairs" stm:"read_repairs"`
+
+ NodeGetFsmActive *int64 `json:"node_get_fsm_active" stm:"node_get_fsm_active"`
+ NodePutFsmActive *int64 `json:"node_put_fsm_active" stm:"node_put_fsm_active"`
+ IndexFsmActive *int64 `json:"index_fsm_active" stm:"index_fsm_active"`
+ ListFsmActive *int64 `json:"list_fsm_active" stm:"list_fsm_active"`
+
+ NodeGetFsmRejected *int64 `json:"node_get_fsm_rejected" stm:"node_get_fsm_rejected"`
+ NodePutFsmRejected *int64 `json:"node_put_fsm_rejected" stm:"node_put_fsm_rejected"`
+
+ SearchIndexBadEntryCount *int64 `json:"search_index_bad_entry_count" stm:"search_index_bad_entry_count"`
+ SearchIndexExtractFailCount *int64 `json:"search_index_extract_fail_count" stm:"search_index_extract_fail_count"`
+}
diff --git a/src/go/plugin/go.d/modules/riakkv/testdata/config.json b/src/go/plugin/go.d/modules/riakkv/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/riakkv/testdata/config.yaml b/src/go/plugin/go.d/modules/riakkv/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/riakkv/testdata/stats.json b/src/go/plugin/go.d/modules/riakkv/testdata/stats.json
new file mode 100644
index 000000000..8dd836f20
--- /dev/null
+++ b/src/go/plugin/go.d/modules/riakkv/testdata/stats.json
@@ -0,0 +1,478 @@
+{
+ "connected_nodes": [],
+ "consistent_get_objsize_100": 1,
+ "consistent_get_objsize_95": 1,
+ "consistent_get_objsize_99": 1,
+ "consistent_get_objsize_mean": 1,
+ "consistent_get_objsize_median": 1,
+ "consistent_get_time_100": 1,
+ "consistent_get_time_95": 1,
+ "consistent_get_time_99": 1,
+ "consistent_get_time_mean": 1,
+ "consistent_get_time_median": 1,
+ "consistent_gets": 1,
+ "consistent_gets_total": 1,
+ "consistent_put_objsize_100": 1,
+ "consistent_put_objsize_95": 1,
+ "consistent_put_objsize_99": 1,
+ "consistent_put_objsize_mean": 1,
+ "consistent_put_objsize_median": 1,
+ "consistent_put_time_100": 1,
+ "consistent_put_time_95": 1,
+ "consistent_put_time_99": 1,
+ "consistent_put_time_mean": 1,
+ "consistent_put_time_median": 1,
+ "consistent_puts": 1,
+ "consistent_puts_total": 1,
+ "converge_delay_last": 1,
+ "converge_delay_max": 1,
+ "converge_delay_mean": 1,
+ "converge_delay_min": 1,
+ "coord_redirs_total": 1,
+ "counter_actor_counts_100": 1,
+ "counter_actor_counts_95": 1,
+ "counter_actor_counts_99": 1,
+ "counter_actor_counts_mean": 1,
+ "counter_actor_counts_median": 1,
+ "cpu_avg1": 2276,
+ "cpu_avg15": 661,
+ "cpu_avg5": 1267,
+ "cpu_nprocs": 1443,
+ "dropped_vnode_requests_total": 1,
+ "executing_mappers": 1,
+ "gossip_received": 1,
+ "handoff_timeouts": 1,
+ "hll_bytes": 1,
+ "hll_bytes_100": 1,
+ "hll_bytes_95": 1,
+ "hll_bytes_99": 1,
+ "hll_bytes_mean": 1,
+ "hll_bytes_median": 1,
+ "hll_bytes_total": 1,
+ "ignored_gossip_total": 1,
+ "index_fsm_active": 1,
+ "index_fsm_create": 1,
+ "index_fsm_create_error": 1,
+ "late_put_fsm_coordinator_ack": 1,
+ "leveldb_read_block_error": "undefined",
+ "list_fsm_active": 1,
+ "list_fsm_create": 1,
+ "list_fsm_create_error": 1,
+ "list_fsm_create_error_total": 1,
+ "list_fsm_create_total": 1,
+ "map_actor_counts_100": 1,
+ "map_actor_counts_95": 1,
+ "map_actor_counts_99": 1,
+ "map_actor_counts_mean": 1,
+ "map_actor_counts_median": 1,
+ "mem_allocated": 14529916928,
+ "mem_total": 16728453121,
+ "memory_atom": 695185,
+ "memory_atom_used": 670675,
+ "memory_binary": 15413608,
+ "memory_code": 15375111,
+ "memory_ets": 7728584,
+ "memory_processes": 274468041,
+ "memory_processes_used": 274337336,
+ "memory_system": 126058328,
+ "memory_total": 400526368,
+ "node_get_fsm_active": 1,
+ "node_get_fsm_active_60s": 20079,
+ "node_get_fsm_counter_objsize_100": 1,
+ "node_get_fsm_counter_objsize_95": 1,
+ "node_get_fsm_counter_objsize_99": 1,
+ "node_get_fsm_counter_objsize_mean": 1,
+ "node_get_fsm_counter_objsize_median": 1,
+ "node_get_fsm_counter_siblings_100": 1,
+ "node_get_fsm_counter_siblings_95": 1,
+ "node_get_fsm_counter_siblings_99": 1,
+ "node_get_fsm_counter_siblings_mean": 1,
+ "node_get_fsm_counter_siblings_median": 1,
+ "node_get_fsm_counter_time_100": 1,
+ "node_get_fsm_counter_time_95": 1,
+ "node_get_fsm_counter_time_99": 1,
+ "node_get_fsm_counter_time_mean": 1,
+ "node_get_fsm_counter_time_median": 1,
+ "node_get_fsm_errors": 1,
+ "node_get_fsm_errors_total": 1,
+ "node_get_fsm_hll_objsize_100": 1,
+ "node_get_fsm_hll_objsize_95": 1,
+ "node_get_fsm_hll_objsize_99": 1,
+ "node_get_fsm_hll_objsize_mean": 1,
+ "node_get_fsm_hll_objsize_median": 1,
+ "node_get_fsm_hll_siblings_100": 1,
+ "node_get_fsm_hll_siblings_95": 1,
+ "node_get_fsm_hll_siblings_99": 1,
+ "node_get_fsm_hll_siblings_mean": 1,
+ "node_get_fsm_hll_siblings_median": 1,
+ "node_get_fsm_hll_time_100": 1,
+ "node_get_fsm_hll_time_95": 1,
+ "node_get_fsm_hll_time_99": 1,
+ "node_get_fsm_hll_time_mean": 1,
+ "node_get_fsm_hll_time_median": 1,
+ "node_get_fsm_in_rate": 181,
+ "node_get_fsm_map_objsize_100": 1,
+ "node_get_fsm_map_objsize_95": 1,
+ "node_get_fsm_map_objsize_99": 1,
+ "node_get_fsm_map_objsize_mean": 1,
+ "node_get_fsm_map_objsize_median": 1,
+ "node_get_fsm_map_siblings_100": 1,
+ "node_get_fsm_map_siblings_95": 1,
+ "node_get_fsm_map_siblings_99": 1,
+ "node_get_fsm_map_siblings_mean": 1,
+ "node_get_fsm_map_siblings_median": 1,
+ "node_get_fsm_map_time_100": 1,
+ "node_get_fsm_map_time_95": 1,
+ "node_get_fsm_map_time_99": 1,
+ "node_get_fsm_map_time_mean": 1,
+ "node_get_fsm_map_time_median": 1,
+ "node_get_fsm_objsize_100": 1037,
+ "node_get_fsm_objsize_95": 1,
+ "node_get_fsm_objsize_99": 1025,
+ "node_get_fsm_objsize_mean": 791,
+ "node_get_fsm_objsize_median": 669,
+ "node_get_fsm_out_rate": 191,
+ "node_get_fsm_rejected": 1,
+ "node_get_fsm_rejected_60s": 1,
+ "node_get_fsm_rejected_total": 1,
+ "node_get_fsm_set_objsize_100": 1,
+ "node_get_fsm_set_objsize_95": 1,
+ "node_get_fsm_set_objsize_99": 1,
+ "node_get_fsm_set_objsize_mean": 1,
+ "node_get_fsm_set_objsize_median": 1,
+ "node_get_fsm_set_siblings_100": 1,
+ "node_get_fsm_set_siblings_95": 1,
+ "node_get_fsm_set_siblings_99": 1,
+ "node_get_fsm_set_siblings_mean": 1,
+ "node_get_fsm_set_siblings_median": 1,
+ "node_get_fsm_set_time_100": 1,
+ "node_get_fsm_set_time_95": 1,
+ "node_get_fsm_set_time_99": 1,
+ "node_get_fsm_set_time_mean": 1,
+ "node_get_fsm_set_time_median": 1,
+ "node_get_fsm_siblings_100": 1,
+ "node_get_fsm_siblings_95": 1,
+ "node_get_fsm_siblings_99": 1,
+ "node_get_fsm_siblings_mean": 1,
+ "node_get_fsm_siblings_median": 1,
+ "node_get_fsm_time_100": 678351,
+ "node_get_fsm_time_95": 1,
+ "node_get_fsm_time_99": 10148,
+ "node_get_fsm_time_mean": 2161,
+ "node_get_fsm_time_median": 1022,
+ "node_gets": 19875,
+ "node_gets_counter": 1,
+ "node_gets_counter_total": 1,
+ "node_gets_hll": 1,
+ "node_gets_hll_total": 1,
+ "node_gets_map": 1,
+ "node_gets_map_total": 1,
+ "node_gets_set": 1,
+ "node_gets_set_total": 1,
+ "node_gets_total": 422626,
+ "node_put_fsm_active": 1,
+ "node_put_fsm_active_60s": 10498,
+ "node_put_fsm_counter_time_100": 1,
+ "node_put_fsm_counter_time_95": 1,
+ "node_put_fsm_counter_time_99": 1,
+ "node_put_fsm_counter_time_mean": 1,
+ "node_put_fsm_counter_time_median": 1,
+ "node_put_fsm_hll_time_100": 1,
+ "node_put_fsm_hll_time_95": 1,
+ "node_put_fsm_hll_time_99": 1,
+ "node_put_fsm_hll_time_mean": 1,
+ "node_put_fsm_hll_time_median": 1,
+ "node_put_fsm_in_rate": 116,
+ "node_put_fsm_map_time_100": 1,
+ "node_put_fsm_map_time_95": 1,
+ "node_put_fsm_map_time_99": 1,
+ "node_put_fsm_map_time_mean": 1,
+ "node_put_fsm_map_time_median": 1,
+ "node_put_fsm_out_rate": 127,
+ "node_put_fsm_rejected": 1,
+ "node_put_fsm_rejected_60s": 1,
+ "node_put_fsm_rejected_total": 1,
+ "node_put_fsm_set_time_100": 1,
+ "node_put_fsm_set_time_95": 1,
+ "node_put_fsm_set_time_99": 1,
+ "node_put_fsm_set_time_mean": 1,
+ "node_put_fsm_set_time_median": 1,
+ "node_put_fsm_time_100": 1049568,
+ "node_put_fsm_time_95": 19609,
+ "node_put_fsm_time_99": 37735,
+ "node_put_fsm_time_mean": 11828,
+ "node_put_fsm_time_median": 5017,
+ "node_puts": 10283,
+ "node_puts_counter": 1,
+ "node_puts_counter_total": 1,
+ "node_puts_hll": 1,
+ "node_puts_hll_total": 1,
+ "node_puts_map": 1,
+ "node_puts_map_total": 1,
+ "node_puts_set": 1,
+ "node_puts_set_total": 1,
+ "node_puts_total": 490965,
+ "nodename": "riak@127.0.0.1",
+ "object_counter_merge": 1,
+ "object_counter_merge_time_100": 1,
+ "object_counter_merge_time_95": 1,
+ "object_counter_merge_time_99": 1,
+ "object_counter_merge_time_mean": 1,
+ "object_counter_merge_time_median": 1,
+ "object_counter_merge_total": 1,
+ "object_hll_merge": 1,
+ "object_hll_merge_time_100": 1,
+ "object_hll_merge_time_95": 1,
+ "object_hll_merge_time_99": 1,
+ "object_hll_merge_time_mean": 1,
+ "object_hll_merge_time_median": 1,
+ "object_hll_merge_total": 1,
+ "object_map_merge": 1,
+ "object_map_merge_time_100": 1,
+ "object_map_merge_time_95": 1,
+ "object_map_merge_time_99": 1,
+ "object_map_merge_time_mean": 1,
+ "object_map_merge_time_median": 1,
+ "object_map_merge_total": 1,
+ "object_merge": 1,
+ "object_merge_time_100": 1,
+ "object_merge_time_95": 1,
+ "object_merge_time_99": 1,
+ "object_merge_time_mean": 1,
+ "object_merge_time_median": 1,
+ "object_merge_total": 7167,
+ "object_set_merge": 1,
+ "object_set_merge_time_100": 1,
+ "object_set_merge_time_95": 1,
+ "object_set_merge_time_99": 1,
+ "object_set_merge_time_mean": 1,
+ "object_set_merge_time_median": 1,
+ "object_set_merge_total": 1,
+ "pbc_active": 46,
+ "pbc_connects": 1,
+ "pbc_connects_total": 48,
+ "pipeline_active": 1,
+ "pipeline_create_count": 1,
+ "pipeline_create_error_count": 1,
+ "pipeline_create_error_one": 1,
+ "pipeline_create_one": 1,
+ "postcommit_fail": 1,
+ "precommit_fail": 1,
+ "read_repairs": 1,
+ "read_repairs_counter": 1,
+ "read_repairs_counter_total": 1,
+ "read_repairs_fallback_notfound_count": "undefined",
+ "read_repairs_fallback_notfound_one": "undefined",
+ "read_repairs_fallback_outofdate_count": "undefined",
+ "read_repairs_fallback_outofdate_one": "undefined",
+ "read_repairs_hll": 1,
+ "read_repairs_hll_total": 1,
+ "read_repairs_map": 1,
+ "read_repairs_map_total": 1,
+ "read_repairs_primary_notfound_count": 186,
+ "read_repairs_primary_notfound_one": 1,
+ "read_repairs_primary_outofdate_count": 24,
+ "read_repairs_primary_outofdate_one": 1,
+ "read_repairs_set": 1,
+ "read_repairs_set_total": 1,
+ "read_repairs_total": 105,
+ "rebalance_delay_last": 1,
+ "rebalance_delay_max": 1,
+ "rebalance_delay_mean": 1,
+ "rebalance_delay_min": 1,
+ "rejected_handoffs": 1,
+ "riak_kv_vnodeq_max": 3,
+ "riak_kv_vnodeq_mean": 0.078125,
+ "riak_kv_vnodeq_median": 1,
+ "riak_kv_vnodeq_min": 1,
+ "riak_kv_vnodeq_total": 5,
+ "riak_kv_vnodes_running": 64,
+ "riak_pipe_vnodeq_max": 1,
+ "riak_pipe_vnodeq_mean": 1,
+ "riak_pipe_vnodeq_median": 1,
+ "riak_pipe_vnodeq_min": 1,
+ "riak_pipe_vnodeq_total": 1,
+ "riak_pipe_vnodes_running": 64,
+ "ring_creation_size": 64,
+ "ring_members": [
+ "riak@127.0.0.1"
+ ],
+ "ring_num_partitions": 64,
+ "ring_ownership": "[{'riak@127.0.0.1',64}]",
+ "rings_reconciled": 1,
+ "rings_reconciled_total": 1,
+ "set_actor_counts_100": 1,
+ "set_actor_counts_95": 1,
+ "set_actor_counts_99": 1,
+ "set_actor_counts_mean": 1,
+ "set_actor_counts_median": 1,
+ "skipped_read_repairs": 1,
+ "skipped_read_repairs_total": 1,
+ "storage_backend": "riak_kv_bitcask_backend",
+ "sys_driver_version": "2.2",
+ "sys_global_heaps_size": "deprecated",
+ "sys_heap_type": "private",
+ "sys_logical_processors": 4,
+ "sys_monitor_count": 966,
+ "sys_otp_release": "R16B02_basho10",
+ "sys_port_count": 336,
+ "sys_process_count": 2169,
+ "sys_smp_support": true,
+ "sys_system_architecture": "x86_64-unknown-linux-gnu",
+ "sys_system_version": "Erlang R16B02_basho10 (erts-5.10.3) [source] [64-bit] [smp:4:4] [async-threads:64] [hipe] [kernel-poll:true] [frame-pointer]",
+ "sys_thread_pool_size": 64,
+ "sys_threads_enabled": true,
+ "sys_wordsize": 8,
+ "vnode_counter_update": 1,
+ "vnode_counter_update_time_100": 1,
+ "vnode_counter_update_time_95": 1,
+ "vnode_counter_update_time_99": 1,
+ "vnode_counter_update_time_mean": 1,
+ "vnode_counter_update_time_median": 1,
+ "vnode_counter_update_total": 1,
+ "vnode_get_fsm_time_100": 836988,
+ "vnode_get_fsm_time_95": 3415,
+ "vnode_get_fsm_time_99": 7394,
+ "vnode_get_fsm_time_mean": 1159,
+ "vnode_get_fsm_time_median": 461,
+ "vnode_gets": 59641,
+ "vnode_gets_total": 1267893,
+ "vnode_hll_update": 1,
+ "vnode_hll_update_time_100": 1,
+ "vnode_hll_update_time_95": 1,
+ "vnode_hll_update_time_99": 1,
+ "vnode_hll_update_time_mean": 1,
+ "vnode_hll_update_time_median": 1,
+ "vnode_hll_update_total": 1,
+ "vnode_index_deletes": 1,
+ "vnode_index_deletes_postings": 1,
+ "vnode_index_deletes_postings_total": 1,
+ "vnode_index_deletes_total": 1,
+ "vnode_index_reads": 1,
+ "vnode_index_reads_total": 1,
+ "vnode_index_refreshes": 1,
+ "vnode_index_refreshes_total": 1,
+ "vnode_index_writes": 1,
+ "vnode_index_writes_postings": 1,
+ "vnode_index_writes_postings_total": 1,
+ "vnode_index_writes_total": 1,
+ "vnode_map_update": 1,
+ "vnode_map_update_time_100": 1,
+ "vnode_map_update_time_95": 1,
+ "vnode_map_update_time_99": 1,
+ "vnode_map_update_time_mean": 1,
+ "vnode_map_update_time_median": 1,
+ "vnode_map_update_total": 1,
+ "vnode_put_fsm_time_100": 1034955,
+ "vnode_put_fsm_time_95": 10302,
+ "vnode_put_fsm_time_99": 16813,
+ "vnode_put_fsm_time_mean": 4511,
+ "vnode_put_fsm_time_median": 1927,
+ "vnode_puts": 30852,
+ "vnode_puts_total": 1473108,
+ "vnode_set_update": 1,
+ "vnode_set_update_time_100": 1,
+ "vnode_set_update_time_95": 1,
+ "vnode_set_update_time_99": 1,
+ "vnode_set_update_time_mean": 1,
+ "vnode_set_update_time_median": 1,
+ "vnode_set_update_total": 1,
+ "write_once_merge": 1,
+ "write_once_put_objsize_100": 1,
+ "write_once_put_objsize_95": 1,
+ "write_once_put_objsize_99": 1,
+ "write_once_put_objsize_mean": 1,
+ "write_once_put_objsize_median": 1,
+ "write_once_put_time_100": 1,
+ "write_once_put_time_95": 1,
+ "write_once_put_time_99": 1,
+ "write_once_put_time_mean": 1,
+ "write_once_put_time_median": 1,
+ "write_once_puts": 1,
+ "write_once_puts_total": 1,
+ "disk": [
+ {
+ "id": "/",
+ "size": 488386584,
+ "used": 11
+ },
+ {
+ "id": "/dev",
+ "size": 65536,
+ "used": 0
+ },
+ {
+ "id": "/sys/fs/cgroup",
+ "size": 8168188,
+ "used": 0
+ },
+ {
+ "id": "/etc/hosts",
+ "size": 488386584,
+ "used": 11
+ },
+ {
+ "id": "/dev/shm",
+ "size": 65536,
+ "used": 0
+ },
+ {
+ "id": "/proc/asound",
+ "size": 8168188,
+ "used": 0
+ },
+ {
+ "id": "/proc/acpi",
+ "size": 8168188,
+ "used": 0
+ },
+ {
+ "id": "/sys/firmware",
+ "size": 8168188,
+ "used": 0
+ }
+ ],
+ "riak_auth_mods_version": "2.1.0-0-g31b8b30",
+ "erlydtl_version": "0.7.0",
+ "riak_control_version": "2.1.6-0-gcbf605a",
+ "cluster_info_version": "2.0.5-0-gd61d055",
+ "yokozuna_version": "2.1.10-0-gb53d999",
+ "fuse_version": "2.1.0",
+ "ibrowse_version": "4.0.2",
+ "riak_search_version": "2.1.6-0-g0d398f2",
+ "merge_index_version": "2.0.4-0-gc5efac6",
+ "riak_kv_version": "2.1.7-0-gbd8e312",
+ "riak_api_version": "2.1.6-0-ga678e25",
+ "riak_pb_version": "2.2.0.0-0-gf5af9ff",
+ "protobuffs_version": "0.9.0-0-g0dde9d3",
+ "riak_dt_version": "2.1.3-0-g9450044",
+ "sidejob_version": "2.0.1-0-g8ac6803",
+ "riak_pipe_version": "2.1.5-0-g8b2c842",
+ "riak_core_version": "2.1.9-0-gb8a11b4",
+ "exometer_core_version": "1.0.0-basho9-0-gfcc8662",
+ "poolboy_version": "0.8.1p3-0-g8bb45fb",
+ "pbkdf2_version": "2.0.0-0-g7076584",
+ "eleveldb_version": "2.0.34-0-g55abc57",
+ "clique_version": "0.3.9-0-ge7114e9",
+ "bitcask_version": "2.0.3",
+ "basho_stats_version": "1.0.3",
+ "webmachine_version": "1.10.8-basho1-0-g494d14f",
+ "mochiweb_version": "2.9.0",
+ "inets_version": "5.9.6",
+ "xmerl_version": "1.3.4",
+ "erlang_js_version": "1.3.0-0-g07467d8",
+ "runtime_tools_version": "1.8.12",
+ "os_mon_version": "2.2.13",
+ "riak_sysmon_version": "2.1.5-0-g0ab94b3",
+ "ssl_version": "5.3.1",
+ "public_key_version": "0.20",
+ "crypto_version": "3.1",
+ "asn1_version": "2.0.3",
+ "sasl_version": "2.3.3",
+ "lager_version": "3.2.2",
+ "goldrush_version": "0.1.9",
+ "compiler_version": "4.9.3",
+ "syntax_tools_version": "1.6.11",
+ "stdlib_version": "1.19.3",
+ "kernel_version": "2.16.3"
+}
diff --git a/src/go/plugin/go.d/modules/rspamd/README.md b/src/go/plugin/go.d/modules/rspamd/README.md
new file mode 120000
index 000000000..b18fa0599
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/README.md
@@ -0,0 +1 @@
+integrations/rspamd.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/rspamd/charts.go b/src/go/plugin/go.d/modules/rspamd/charts.go
new file mode 100644
index 000000000..3d28ab21d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/charts.go
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rspamd
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+const (
+ prioClassifications = module.Priority + iota
+ prioActions
+ prioScans
+ prioLearns
+ prioConnections
+ prioControlConnections
+)
+
+var charts = module.Charts{
+ classificationsChartTmpl.Copy(),
+
+ actionsChart.Copy(),
+
+ scanChartTmpl.Copy(),
+ learnChartTmpl.Copy(),
+
+ connectionsChartTmpl.Copy(),
+ controlConnectionsChartTmpl.Copy(),
+}
+
+var (
+ classificationsChartTmpl = module.Chart{
+ ID: "classifications",
+ Title: "Classifications",
+ Units: "messages/s",
+ Fam: "classification",
+ Ctx: "rspamd.classifications",
+ Type: module.Stacked,
+ Priority: prioClassifications,
+ Dims: module.Dims{
+ {ID: "ham_count", Name: "ham", Algo: module.Incremental},
+ {ID: "spam_count", Name: "spam", Algo: module.Incremental},
+ },
+ }
+
+ actionsChart = module.Chart{
+ ID: "actions",
+ Title: "Actions",
+ Units: "messages/s",
+ Fam: "actions",
+ Ctx: "rspamd.actions",
+ Type: module.Stacked,
+ Priority: prioActions,
+ Dims: module.Dims{
+ {ID: "actions_reject", Name: "reject", Algo: module.Incremental},
+ {ID: "actions_soft_reject", Name: "soft_reject", Algo: module.Incremental},
+ {ID: "actions_rewrite_subject", Name: "rewrite_subject", Algo: module.Incremental},
+ {ID: "actions_add_header", Name: "add_header", Algo: module.Incremental},
+ {ID: "actions_greylist", Name: "greylist", Algo: module.Incremental},
+ {ID: "actions_custom", Name: "custom", Algo: module.Incremental},
+ {ID: "actions_discard", Name: "discard", Algo: module.Incremental},
+ {ID: "actions_quarantine", Name: "quarantine", Algo: module.Incremental},
+ {ID: "actions_no_action", Name: "no_action", Algo: module.Incremental},
+ },
+ }
+
+ scanChartTmpl = module.Chart{
+ ID: "scans",
+ Title: "Scanned messages",
+ Units: "messages/s",
+ Fam: "training",
+ Ctx: "rspamd.scans",
+ Priority: prioScans,
+ Dims: module.Dims{
+ {ID: "scanned", Name: "scanned", Algo: module.Incremental},
+ },
+ }
+
+ learnChartTmpl = module.Chart{
+ ID: "learns",
+ Title: "Learned messages",
+ Units: "messages/s",
+ Fam: "training",
+ Ctx: "rspamd.learns",
+ Priority: prioLearns,
+ Dims: module.Dims{
+ {ID: "learned", Name: "learned", Algo: module.Incremental},
+ },
+ }
+
+ connectionsChartTmpl = module.Chart{
+ ID: "connections",
+ Title: "Connections",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "rspamd.connections",
+ Priority: prioConnections,
+ Dims: module.Dims{
+ {ID: "connections", Name: "connections", Algo: module.Incremental},
+ },
+ }
+ controlConnectionsChartTmpl = module.Chart{
+ ID: "control_connections",
+ Title: "Control connections",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "rspamd.control_connections",
+ Priority: prioControlConnections,
+ Dims: module.Dims{
+ {ID: "control_connections", Name: "control_connections", Algo: module.Incremental},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/rspamd/collect.go b/src/go/plugin/go.d/modules/rspamd/collect.go
new file mode 100644
index 000000000..ecbe4a034
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/collect.go
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rspamd
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type rspamdStats struct {
+ Version string `json:"version"`
+ ConfigId string `json:"config_id"`
+ Scanned *int64 `json:"scanned" stm:"scanned"`
+ Learned *int64 `json:"learned" stm:"learned"`
+ Actions struct {
+ Reject int64 `json:"reject" stm:"reject"`
+ SoftReject int64 `json:"soft reject" stm:"soft_reject"`
+ RewriteSubject int64 `json:"rewrite subject" stm:"rewrite_subject"`
+ AddHeader int64 `json:"add header" stm:"add_header"`
+ Greylist int64 `json:"greylist" stm:"greylist"`
+ NoAction int64 `json:"no action" stm:"no_action"`
+ InvalidMaxAction int64 `json:"invalid max action" stm:"invalid_max_action"`
+ Custom int64 `json:"custom" stm:"custom"`
+ Discard int64 `json:"discard" stm:"discard"`
+ Quarantine int64 `json:"quarantine" stm:"quarantine"`
+ UnknownAction int64 `json:"unknown action" stm:"unknown_action"`
+ } `json:"actions" stm:"actions"`
+ ScanTimes []float64 `json:"scan_times"`
+ SpamCount int64 `json:"spam_count" stm:"spam_count"`
+ HamCount int64 `json:"ham_count" stm:"ham_count"`
+ Connections int64 `json:"connections" stm:"connections"`
+ ControlConnections int64 `json:"control_connections" stm:"control_connections"`
+ FuzzyHashes map[string]int64 `json:"fuzzy_hashes"`
+}
+
+func (r *Rspamd) collect() (map[string]int64, error) {
+ stats, err := r.queryRspamdStats()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := stm.ToMap(stats)
+
+ return mx, nil
+}
+
+func (r *Rspamd) queryRspamdStats() (*rspamdStats, error) {
+ req, err := web.NewHTTPRequestWithPath(r.Request, "/stat")
+ if err != nil {
+ return nil, err
+ }
+
+ var stats rspamdStats
+ if err := r.doOKDecode(req, &stats); err != nil {
+ return nil, err
+ }
+
+ if stats.Scanned == nil || stats.Learned == nil {
+ return nil, fmt.Errorf("unexpected response: not rspamd data")
+ }
+
+ return &stats, nil
+}
+
+func (r *Rspamd) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := r.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rspamd/config_schema.json b/src/go/plugin/go.d/modules/rspamd/config_schema.json
new file mode 100644
index 000000000..c7b866d87
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Rspamd collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the Rspamd [controller worker](https://rspamd.com/doc/workers/controller.html).",
+ "type": "string",
+ "default": "http://127.0.0.1:11334",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rspamd/integrations/rspamd.md b/src/go/plugin/go.d/modules/rspamd/integrations/rspamd.md
new file mode 100644
index 000000000..fe0949422
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/integrations/rspamd.md
@@ -0,0 +1,243 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/rspamd/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/rspamd/metadata.yaml"
+sidebar_label: "Rspamd"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Security Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Rspamd
+
+
+<img src="https://netdata.cloud/img/globe.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: rspamd
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the activity and performance of Rspamd servers. It gathers various metrics including scanned emails, learned messages, spam/ham counts, and actions taken on emails (reject, rewrite, etc.).
+
+
+It retrieves statistics from Rspamd's [built-in web server](https://rspamd.com/doc/workers/controller.html) by making HTTP requests to the `/stat` endpoint.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Rspamd instances running on localhost that are listening on port 11334.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Rspamd instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| rspamd.classifications | ham, spam | messages/s |
+| rspamd.actions | reject, soft_reject, rewrite_subject, add_header, greylist, custom, discard, quarantine, no_action | messages/s |
+| rspamd.scans | scanned | messages/s |
+| rspamd.learns | learned | messages/s |
+| rspamd.connections | connections | connections/s |
+| rspamd.control_connections | control_connections | connections/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/rspamd.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/rspamd.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:11334 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:11334
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:11334
+ username: username
+ password: password
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:11334
+
+ - name: remote
+ url: http://192.0.2.1:11334
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `rspamd` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m rspamd
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `rspamd` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep rspamd
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep rspamd /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep rspamd
+```
+
+
diff --git a/src/go/plugin/go.d/modules/rspamd/metadata.yaml b/src/go/plugin/go.d/modules/rspamd/metadata.yaml
new file mode 100644
index 000000000..a8ab16b49
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/metadata.yaml
@@ -0,0 +1,221 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-rspamd
+ plugin_name: go.d.plugin
+ module_name: rspamd
+ monitored_instance:
+ name: Rspamd
+ link: https://rspamd.com/
+ categories:
+ - data-collection.security-systems
+ icon_filename: globe.svg
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: go.d.plugin
+ module_name: httpcheck
+ - plugin_name: apps.plugin
+ module_name: apps
+ alternative_monitored_instances: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - spam
+ - rspamd
+ - email
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the activity and performance of Rspamd servers. It gathers various metrics including scanned emails, learned messages, spam/ham counts, and actions taken on emails (reject, rewrite, etc.).
+ method_description: |
+ It retrieves statistics from Rspamd's [built-in web server](https://rspamd.com/doc/workers/controller.html) by making HTTP requests to the `/stat` endpoint.
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Rspamd instances running on localhost that are listening on port 11334.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/rspamd.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:11334
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:11334
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:11334
+ username: username
+ password: password
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:11334
+
+ - name: remote
+ url: http://192.0.2.1:11334
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: rspamd.classifications
+ description: Classifications
+ unit: messages/s
+ chart_type: stacked
+ dimensions:
+ - name: ham
+ - name: spam
+ - name: rspamd.actions
+ description: Actions
+ unit: messages/s
+ chart_type: stacked
+ dimensions:
+ - name: reject
+ - name: soft_reject
+ - name: rewrite_subject
+ - name: add_header
+ - name: greylist
+ - name: custom
+ - name: discard
+ - name: quarantine
+ - name: no_action
+ - name: rspamd.scans
+ description: Scanned messages
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: scanned
+ - name: rspamd.learns
+ description: Learned messages
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: learned
+ - name: rspamd.connections
+ description: Connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: rspamd.control_connections
+ description: Control connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: control_connections
diff --git a/src/go/plugin/go.d/modules/rspamd/rspamd.go b/src/go/plugin/go.d/modules/rspamd/rspamd.go
new file mode 100644
index 000000000..0a5c4ffe5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/rspamd.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rspamd
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("rspamd", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Rspamd {
+ return &Rspamd{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:11334",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 1),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Rspamd struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (r *Rspamd) Configuration() any {
+ return r.Config
+}
+
+func (r *Rspamd) Init() error {
+ if r.URL == "" {
+ r.Error("URL not set")
+ return errors.New("url not set")
+ }
+
+ client, err := web.NewHTTPClient(r.Client)
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+ r.httpClient = client
+
+ r.Debugf("using URL %s", r.URL)
+ r.Debugf("using timeout: %s", r.Timeout)
+
+ return nil
+}
+
+func (r *Rspamd) Check() error {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (r *Rspamd) Charts() *module.Charts {
+ return r.charts
+}
+
+func (r *Rspamd) Collect() map[string]int64 {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (r *Rspamd) Cleanup() {
+ if r.httpClient != nil {
+ r.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/rspamd/rspamd_test.go b/src/go/plugin/go.d/modules/rspamd/rspamd_test.go
new file mode 100644
index 000000000..0c8cc8e5b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/rspamd_test.go
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package rspamd
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataV34Stat, _ = os.ReadFile("testdata/v3.4-stat.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataV34Stat": dataV34Stat,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestRspamd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Rspamd{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestRspamd_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rsp := New()
+ rsp.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, rsp.Init())
+ } else {
+ assert.NoError(t, rsp.Init())
+ }
+ })
+ }
+}
+
+func TestRspamd_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestRspamd_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (*Rspamd, func())
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: prepareCaseOk,
+ },
+ "fails on unexpected json response": {
+ wantFail: true,
+ prepare: prepareCaseUnexpectedJsonResponse,
+ },
+ "fails on invalid format response": {
+ wantFail: true,
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rsp, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, rsp.Check())
+ } else {
+ assert.NoError(t, rsp.Check())
+ }
+ })
+ }
+}
+
+func TestRspamd_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (*Rspamd, func())
+ wantMetrics map[string]int64
+ }{
+ "success on valid response": {
+ prepare: prepareCaseOk,
+ wantMetrics: map[string]int64{
+ "actions_add_header": 1,
+ "actions_custom": 0,
+ "actions_discard": 0,
+ "actions_greylist": 1,
+ "actions_invalid_max_action": 0,
+ "actions_no_action": 1,
+ "actions_quarantine": 0,
+ "actions_reject": 1,
+ "actions_rewrite_subject": 1,
+ "actions_soft_reject": 1,
+ "actions_unknown_action": 0,
+ "connections": 1,
+ "control_connections": 117,
+ "ham_count": 1,
+ "learned": 1,
+ "scanned": 1,
+ "spam_count": 1,
+ },
+ },
+ "fails on unexpected json response": {
+ prepare: prepareCaseUnexpectedJsonResponse,
+ },
+ "fails on invalid format response": {
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rsp, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := rsp.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ testMetricsHasAllChartsDims(t, rsp, mx)
+ }
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, rsp *Rspamd, mx map[string]int64) {
+ for _, chart := range *rsp.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareCaseOk(t *testing.T) (*Rspamd, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/stat":
+ _, _ = w.Write(dataV34Stat)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ rsp := New()
+ rsp.URL = srv.URL
+ require.NoError(t, rsp.Init())
+
+ return rsp, srv.Close
+}
+
+func prepareCaseUnexpectedJsonResponse(t *testing.T) (*Rspamd, func()) {
+ t.Helper()
+ resp := `
+{
+ "elephant": {
+ "burn": false,
+ "mountain": true,
+ "fog": false,
+ "skin": -1561907625,
+ "burst": "anyway",
+ "shadow": 1558616893
+ },
+ "start": "ever",
+ "base": 2093056027,
+ "mission": -2007590351,
+ "victory": 999053756,
+ "die": false
+}
+`
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case "/stat":
+ _, _ = w.Write([]byte(resp))
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ rsp := New()
+ rsp.URL = srv.URL
+ require.NoError(t, rsp.Init())
+
+ return rsp, srv.Close
+}
+
+func prepareCaseInvalidFormatResponse(t *testing.T) (*Rspamd, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ rsp := New()
+ rsp.URL = srv.URL
+ require.NoError(t, rsp.Init())
+
+ return rsp, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*Rspamd, func()) {
+ t.Helper()
+ rsp := New()
+ rsp.URL = "http://127.0.0.1:65001/stat"
+ require.NoError(t, rsp.Init())
+
+ return rsp, func() {}
+}
diff --git a/src/go/plugin/go.d/modules/rspamd/testdata/config.json b/src/go/plugin/go.d/modules/rspamd/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/rspamd/testdata/config.yaml b/src/go/plugin/go.d/modules/rspamd/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/rspamd/testdata/v3.4-stat.json b/src/go/plugin/go.d/modules/rspamd/testdata/v3.4-stat.json
new file mode 100644
index 000000000..38145477e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/rspamd/testdata/v3.4-stat.json
@@ -0,0 +1,66 @@
+{
+ "version": "3.4",
+ "config_id": "gkwm3ysiqrx96kj1mwnfashx9hkypj833w1tgjaw4nysgwwxqthh7q78hyrezi9gzamke3n9ea7u8cjrzru7i5p4z7r9xhcoitjpjyy",
+ "uptime": 1774,
+ "read_only": false,
+ "scanned": 1,
+ "learned": 1,
+ "actions": {
+ "reject": 1,
+ "soft reject": 1,
+ "rewrite subject": 1,
+ "add header": 1,
+ "greylist": 1,
+ "no action": 1
+ },
+ "scan_times": [
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null,
+ null
+ ],
+ "spam_count": 1,
+ "ham_count": 1,
+ "connections": 1,
+ "control_connections": 117,
+ "pools_allocated": 184,
+ "pools_freed": 147,
+ "bytes_allocated": 28807460,
+ "chunks_allocated": 282,
+ "shared_chunks_allocated": 4,
+ "chunks_freed": 0,
+ "chunks_oversized": 2,
+ "fragmented": 0,
+ "total_learns": 0,
+ "statfiles": [],
+ "fuzzy_hashes": {
+ "rspamd.com": 446607461
+ }
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/README.md b/src/go/plugin/go.d/modules/scaleio/README.md
new file mode 120000
index 000000000..1836d2805
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/README.md
@@ -0,0 +1 @@
+integrations/dell_emc_scaleio.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/scaleio/charts.go b/src/go/plugin/go.d/modules/scaleio/charts.go
new file mode 100644
index 000000000..9efd52c77
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/charts.go
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scaleio
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio/client"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+type (
+ // Charts is an alias for module.Charts.
+ Charts = module.Charts
+ // Dims is an alias for module.Dims.
+ Dims = module.Dims
+ // Vars is an alias for module.Vars.
+ Vars = module.Vars
+)
+
+var (
+ prioStoragePool = module.Priority + len(systemCharts) + 10
+ prioSdc = prioStoragePool + len(storagePoolCharts) + 10
+)
+
+var systemCharts = Charts{
+ // Capacity
+ {
+ ID: "system_capacity_total",
+ Title: "Total Capacity",
+ Units: "KiB",
+ Fam: "capacity",
+ Ctx: "scaleio.system_capacity_total",
+ Dims: Dims{
+ {ID: "system_capacity_max_capacity", Name: "total"},
+ },
+ },
+ {
+ ID: "system_capacity_in_use",
+ Title: "Capacity In Use",
+ Units: "KiB",
+ Fam: "capacity",
+ Ctx: "scaleio.system_capacity_in_use",
+ Dims: Dims{
+ {ID: "system_capacity_in_use", Name: "in_use"},
+ },
+ },
+ {
+ ID: "system_capacity_usage",
+ Title: "Capacity Usage",
+ Units: "KiB",
+ Fam: "capacity",
+ Type: module.Stacked,
+ Ctx: "scaleio.system_capacity_usage",
+ Dims: Dims{
+ {ID: "system_capacity_thick_in_use", Name: "thick"},
+ {ID: "system_capacity_decreased", Name: "decreased"},
+ {ID: "system_capacity_thin_in_use", Name: "thin"},
+ {ID: "system_capacity_snapshot", Name: "snapshot"},
+ {ID: "system_capacity_spare", Name: "spare"},
+ {ID: "system_capacity_unused", Name: "unused"},
+ },
+ },
+ {
+ ID: "system_capacity_available_volume_allocation",
+ Title: "Available For Volume Allocation",
+ Units: "KiB",
+ Fam: "capacity",
+ Ctx: "scaleio.system_capacity_available_volume_allocation",
+ Dims: Dims{
+ {ID: "system_capacity_available_for_volume_allocation", Name: "available"},
+ },
+ },
+ {
+ ID: "system_capacity_health_state",
+ Title: "Capacity Health State",
+ Units: "KiB",
+ Fam: "health",
+ Type: module.Stacked,
+ Ctx: "scaleio.system_capacity_health_state",
+ Dims: Dims{
+ {ID: "system_capacity_protected", Name: "protected"},
+ {ID: "system_capacity_degraded", Name: "degraded"},
+ {ID: "system_capacity_in_maintenance", Name: "in_maintenance"},
+ {ID: "system_capacity_failed", Name: "failed"},
+ {ID: "system_capacity_unreachable_unused", Name: "unavailable"},
+ },
+ },
+ // I/O Workload BW
+ {
+ ID: "system_workload_primary_bandwidth_total",
+ Title: "Primary Backend Bandwidth Total (Read and Write)",
+ Units: "KiB/s",
+ Fam: "workload",
+ Ctx: "scaleio.system_workload_primary_bandwidth_total",
+ Dims: Dims{
+ {ID: "system_backend_primary_bandwidth_read_write", Name: "total", Div: 1000},
+ },
+ },
+ {
+ ID: "system_workload_primary_bandwidth",
+ Title: "Primary Backend Bandwidth",
+ Units: "KiB/s",
+ Fam: "workload",
+ Ctx: "scaleio.system_workload_primary_bandwidth",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "system_backend_primary_bandwidth_read", Name: "read", Div: 1000},
+ {ID: "system_backend_primary_bandwidth_write", Name: "write", Mul: -1, Div: 1000},
+ },
+ },
+ // I/O Workload IOPS
+ {
+ ID: "system_workload_primary_iops_total",
+ Title: "Primary Backend IOPS Total (Read and Write)",
+ Units: "iops/s",
+ Fam: "workload",
+ Ctx: "scaleio.system_workload_primary_iops_total",
+ Dims: Dims{
+ {ID: "system_backend_primary_iops_read_write", Name: "total", Div: 1000},
+ },
+ },
+ {
+ ID: "system_workload_primary_iops",
+ Title: "Primary Backend IOPS",
+ Units: "iops/s",
+ Fam: "workload",
+ Ctx: "scaleio.system_workload_primary_iops",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "system_backend_primary_iops_read", Name: "read", Div: 1000},
+ {ID: "system_backend_primary_iops_write", Name: "write", Mul: -1, Div: 1000},
+ },
+ },
+ {
+ ID: "system_workload_primary_io_size_total",
+ Title: "Primary Backend I/O Size Total (Read and Write)",
+ Units: "KiB",
+ Fam: "workload",
+ Ctx: "scaleio.system_workload_primary_io_size_total",
+ Dims: Dims{
+ {ID: "system_backend_primary_io_size_read_write", Name: "io_size", Div: 1000},
+ },
+ },
+ // Rebalance
+ {
+ ID: "system_rebalance",
+ Title: "Rebalance",
+ Units: "KiB/s",
+ Fam: "rebalance",
+ Type: module.Area,
+ Ctx: "scaleio.system_rebalance",
+ Dims: Dims{
+ {ID: "system_rebalance_bandwidth_read", Name: "read", Div: 1000},
+ {ID: "system_rebalance_bandwidth_write", Name: "write", Mul: -1, Div: 1000},
+ },
+ },
+ {
+ ID: "system_rebalance_left",
+ Title: "Rebalance Pending Capacity",
+ Units: "KiB",
+ Fam: "rebalance",
+ Ctx: "scaleio.system_rebalance_left",
+ Dims: Dims{
+ {ID: "system_rebalance_pending_capacity_in_Kb", Name: "left"},
+ },
+ },
+ {
+ ID: "system_rebalance_time_until_finish",
+ Title: "Rebalance Approximate Time Until Finish",
+ Units: "seconds",
+ Fam: "rebalance",
+ Ctx: "scaleio.system_rebalance_time_until_finish",
+ Dims: Dims{
+ {ID: "system_rebalance_time_until_finish", Name: "time"},
+ },
+ },
+ // Rebuild
+ {
+ ID: "system_rebuild",
+ Title: "Rebuild Bandwidth Total (Forward, Backward and Normal)",
+ Units: "KiB/s",
+ Fam: "rebuild",
+ Ctx: "scaleio.system_rebuild",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "system_rebuild_total_bandwidth_read", Name: "read", Div: 1000},
+ {ID: "system_rebuild_total_bandwidth_write", Name: "write", Mul: -1, Div: 1000},
+ },
+ },
+ {
+ ID: "system_rebuild_left",
+ Title: "Rebuild Pending Capacity Total (Forward, Backward and Normal)",
+ Units: "KiB",
+ Fam: "rebuild",
+ Ctx: "scaleio.system_rebuild_left",
+ Dims: Dims{
+ {ID: "system_rebuild_total_pending_capacity_in_Kb", Name: "left"},
+ },
+ },
+ // Components
+ {
+ ID: "system_defined_components",
+ Title: "Components",
+ Units: "components",
+ Fam: "components",
+ Ctx: "scaleio.system_defined_components",
+ Dims: Dims{
+ {ID: "system_num_of_devices", Name: "devices"},
+ {ID: "system_num_of_fault_sets", Name: "fault_sets"},
+ {ID: "system_num_of_protection_domains", Name: "protection_domains"},
+ {ID: "system_num_of_rfcache_devices", Name: "rfcache_devices"},
+ {ID: "system_num_of_sdc", Name: "sdc"},
+ {ID: "system_num_of_sds", Name: "sds"},
+ {ID: "system_num_of_snapshots", Name: "snapshots"},
+ {ID: "system_num_of_storage_pools", Name: "storage_pools"},
+ {ID: "system_num_of_volumes", Name: "volumes"},
+ {ID: "system_num_of_vtrees", Name: "vtrees"},
+ },
+ },
+ {
+ ID: "system_components_volumes_by_type",
+ Title: "Volumes By Type",
+ Units: "volumes",
+ Fam: "components",
+ Ctx: "scaleio.system_components_volumes_by_type",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "system_num_of_thick_base_volumes", Name: "thick"},
+ {ID: "system_num_of_thin_base_volumes", Name: "thin"},
+ },
+ },
+ {
+ ID: "system_components_volumes_by_mapping",
+ Title: "Volumes By Mapping",
+ Units: "volumes",
+ Fam: "components",
+ Ctx: "scaleio.system_components_volumes_by_mapping",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "system_num_of_mapped_volumes", Name: "mapped"},
+ {ID: "system_num_of_unmapped_volumes", Name: "unmapped"},
+ },
+ },
+}
+
+var storagePoolCharts = Charts{
+ {
+ ID: "storage_pool_%s_capacity_total",
+ Title: "Total Capacity",
+ Units: "KiB",
+ Fam: "pool %s",
+ Ctx: "scaleio.storage_pool_capacity_total",
+ Dims: Dims{
+ {ID: "storage_pool_%s_capacity_max_capacity", Name: "total"},
+ },
+ },
+ {
+ ID: "storage_pool_%s_capacity_in_use",
+ Title: "Capacity In Use",
+ Units: "KiB",
+ Fam: "pool %s",
+ Ctx: "scaleio.storage_pool_capacity_in_use",
+ Dims: Dims{
+ {ID: "storage_pool_%s_capacity_in_use", Name: "in_use"},
+ },
+ },
+ {
+ ID: "storage_pool_%s_capacity_usage",
+ Title: "Capacity Usage",
+ Units: "KiB",
+ Fam: "pool %s",
+ Type: module.Stacked,
+ Ctx: "scaleio.storage_pool_capacity_usage",
+ Dims: Dims{
+ {ID: "storage_pool_%s_capacity_thick_in_use", Name: "thick"},
+ {ID: "storage_pool_%s_capacity_decreased", Name: "decreased"},
+ {ID: "storage_pool_%s_capacity_thin_in_use", Name: "thin"},
+ {ID: "storage_pool_%s_capacity_snapshot", Name: "snapshot"},
+ {ID: "storage_pool_%s_capacity_spare", Name: "spare"},
+ {ID: "storage_pool_%s_capacity_unused", Name: "unused"},
+ },
+ },
+ {
+ ID: "storage_pool_%s_capacity_utilization",
+ Title: "Capacity Utilization",
+ Units: "percentage",
+ Fam: "pool %s",
+ Ctx: "scaleio.storage_pool_capacity_utilization",
+ Dims: Dims{
+ {ID: "storage_pool_%s_capacity_utilization", Name: "used", Div: 100},
+ },
+ Vars: Vars{
+ {ID: "storage_pool_%s_capacity_alert_high_threshold"},
+ {ID: "storage_pool_%s_capacity_alert_critical_threshold"},
+ },
+ },
+ {
+ ID: "storage_pool_%s_capacity_available_volume_allocation",
+ Title: "Available For Volume Allocation",
+ Units: "KiB",
+ Fam: "pool %s",
+ Ctx: "scaleio.storage_pool_capacity_available_volume_allocation",
+ Dims: Dims{
+ {ID: "storage_pool_%s_capacity_available_for_volume_allocation", Name: "available"},
+ },
+ },
+ {
+ ID: "storage_pool_%s_capacity_health_state",
+ Title: "Capacity Health State",
+ Units: "KiB",
+ Fam: "pool %s",
+ Type: module.Stacked,
+ Ctx: "scaleio.storage_pool_capacity_health_state",
+ Dims: Dims{
+ {ID: "storage_pool_%s_capacity_protected", Name: "protected"},
+ {ID: "storage_pool_%s_capacity_degraded", Name: "degraded"},
+ {ID: "storage_pool_%s_capacity_in_maintenance", Name: "in_maintenance"},
+ {ID: "storage_pool_%s_capacity_failed", Name: "failed"},
+ {ID: "storage_pool_%s_capacity_unreachable_unused", Name: "unavailable"},
+ },
+ },
+ {
+ ID: "storage_pool_%s_components",
+ Title: "Components",
+ Units: "components",
+ Fam: "pool %s",
+ Ctx: "scaleio.storage_pool_components",
+ Dims: Dims{
+ {ID: "storage_pool_%s_num_of_devices", Name: "devices"},
+ {ID: "storage_pool_%s_num_of_snapshots", Name: "snapshots"},
+ {ID: "storage_pool_%s_num_of_volumes", Name: "volumes"},
+ {ID: "storage_pool_%s_num_of_vtrees", Name: "vtrees"},
+ },
+ },
+}
+
+func newStoragePoolCharts(pool client.StoragePool) *Charts {
+ charts := storagePoolCharts.Copy()
+ for i, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, pool.ID)
+ chart.Fam = fmt.Sprintf(chart.Fam, pool.Name)
+ chart.Priority = prioStoragePool + i
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, pool.ID)
+ }
+ for _, v := range chart.Vars {
+ v.ID = fmt.Sprintf(v.ID, pool.ID)
+ }
+ }
+ return charts
+}
+
+var sdcCharts = Charts{
+ {
+ ID: "sdc_%s_mdm_connection_state",
+ Title: "MDM Connection State",
+ Units: "boolean",
+ Fam: "sdc %s",
+ Ctx: "scaleio.sdc_mdm_connection_state",
+ Dims: Dims{
+ {ID: "sdc_%s_mdm_connection_state", Name: "connected"},
+ },
+ },
+ {
+ ID: "sdc_%s_bandwidth",
+ Title: "Bandwidth",
+ Units: "KiB/s",
+ Fam: "sdc %s",
+ Ctx: "scaleio.sdc_bandwidth",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "sdc_%s_bandwidth_read", Name: "read", Div: 1000},
+ {ID: "sdc_%s_bandwidth_write", Name: "write", Mul: -1, Div: 1000},
+ },
+ },
+ {
+ ID: "sdc_%s_iops",
+ Title: "IOPS",
+ Units: "iops/s",
+ Fam: "sdc %s",
+ Ctx: "scaleio.sdc_iops",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "sdc_%s_iops_read", Name: "read", Div: 1000},
+ {ID: "sdc_%s_iops_write", Name: "write", Mul: -1, Div: 1000},
+ },
+ },
+ {
+ ID: "sdc_%s_io_size",
+ Title: "I/O Size",
+ Units: "KiB",
+ Fam: "sdc %s",
+ Ctx: "scaleio.sdc_io_size",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "sdc_%s_io_size_read", Name: "read", Div: 1000},
+ {ID: "sdc_%s_io_size_write", Name: "write", Mul: -1, Div: 1000},
+ },
+ },
+ {
+ ID: "sdc_%s_num_of_mapped_volumed",
+ Title: "Mapped Volumes",
+ Units: "volumes",
+ Fam: "sdc %s",
+ Ctx: "scaleio.sdc_num_of_mapped_volumed",
+ Dims: Dims{
+ {ID: "sdc_%s_num_of_mapped_volumes", Name: "mapped"},
+ },
+ },
+}
+
+func newSdcCharts(sdc client.Sdc) *Charts {
+ charts := sdcCharts.Copy()
+ for i, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, sdc.ID)
+ chart.Fam = fmt.Sprintf(chart.Fam, sdc.SdcIp)
+ chart.Priority = prioSdc + i
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, sdc.ID)
+ }
+ }
+ return charts
+}
+
+// TODO: remove stale charts?
+func (s *ScaleIO) updateCharts() {
+ s.updateStoragePoolCharts()
+ s.updateSdcCharts()
+}
+
+func (s *ScaleIO) updateStoragePoolCharts() {
+ for _, pool := range s.discovered.pool {
+ if s.charted[pool.ID] {
+ continue
+ }
+ s.charted[pool.ID] = true
+ s.addStoragePoolCharts(pool)
+ }
+}
+
+func (s *ScaleIO) updateSdcCharts() {
+ for _, sdc := range s.discovered.sdc {
+ if s.charted[sdc.ID] {
+ continue
+ }
+ s.charted[sdc.ID] = true
+ s.addSdcCharts(sdc)
+ }
+}
+
+func (s *ScaleIO) addStoragePoolCharts(pool client.StoragePool) {
+ charts := newStoragePoolCharts(pool)
+ if err := s.Charts().Add(*charts...); err != nil {
+ s.Warningf("couldn't add charts for storage pool '%s(%s)': %v", pool.ID, pool.Name, err)
+ }
+}
+
+func (s *ScaleIO) addSdcCharts(sdc client.Sdc) {
+ charts := newSdcCharts(sdc)
+ if err := s.Charts().Add(*charts...); err != nil {
+ s.Warningf("couldn't add charts for sdc '%s(%s)': %v", sdc.ID, sdc.SdcIp, err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/client/client.go b/src/go/plugin/go.d/modules/scaleio/client/client.go
new file mode 100644
index 000000000..698b2d174
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/client/client.go
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "strconv"
+ "strings"
+ "sync"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+/*
+The REST API is served from the VxFlex OS Gateway.
+The FxFlex Gateway connects to a single MDM and serves requests by querying the MDM
+and reformatting the answers it receives from the MDM in s RESTful manner, back to a REST API.
+The Gateway is stateless. It requires the MDM username and password for the login requests.
+The login returns a token in the response, that is used for later authentication for other requests.
+
+The token is valid for 8 hours from the time it was created, unless there has been no activity
+for 10 minutes, of if the client has sent a logout request.
+
+General URI:
+- /api/login
+- /api/logout
+- /api/version
+- /api/instances/ // GET all instances
+- /api/types/{type}/instances // POST (create) / GET all objects for a given type
+- /api/instances/{type::id} // GET by ID
+- /api/instances/{type::id}/relationships/{Relationship name} // GET
+- /api/instances/querySelectedStatistics // POST Query selected statistics
+- /api/instances/{type::id}/action/{actionName} // POST a special action on an object
+- /api/types/{type}/instances/action/{actionName} // POST a special action on a given type
+
+Types:
+- System
+- Sds
+- StoragePool
+- ProtectionDomain
+- Device
+- Volume
+- VTree
+- Sdc
+- User
+- FaultSet
+- RfcacheDevice
+- Alerts
+
+Actions:
+- querySelectedStatistics // All types except Alarm and User
+- querySystemLimits // System
+- queryDisconnectedSdss // Sds
+- querySdsNetworkLatencyMeters // Sds
+- queryFailedDevices" // Device. Note: works strange!
+
+Relationships:
+- Statistics // All types except Alarm and User
+- ProtectionDomain // System
+- Sdc // System
+- User // System
+- StoragePool // ProtectionDomain
+- FaultSet // ProtectionDomain
+- Sds // ProtectionDomain
+- RfcacheDevice // Sds
+- Device // Sds, StoragePool
+- Volume // Sdc, StoragePool
+- VTree // StoragePool
+*/
+
+// New creates new ScaleIO client.
+func New(client web.Client, request web.Request) (*Client, error) {
+ httpClient, err := web.NewHTTPClient(client)
+ if err != nil {
+ return nil, err
+ }
+ return &Client{
+ Request: request,
+ httpClient: httpClient,
+ token: newToken(),
+ }, nil
+}
+
+// Client represents ScaleIO client.
+type Client struct {
+ Request web.Request
+ httpClient *http.Client
+ token *token
+}
+
+// LoggedIn reports whether the client is logged in.
+func (c *Client) LoggedIn() bool {
+ return c.token.isSet()
+}
+
+// Login connects to FxFlex Gateway to get the token that is used for later authentication for other requests.
+func (c *Client) Login() error {
+ if c.LoggedIn() {
+ _ = c.Logout()
+ }
+ req := c.createLoginRequest()
+ resp, err := c.doOK(req)
+ defer closeBody(resp)
+ if err != nil {
+ return err
+ }
+
+ token, err := decodeToken(resp.Body)
+ if err != nil {
+ return err
+ }
+
+ c.token.set(token)
+ return nil
+}
+
+// Logout sends logout request and unsets token.
+func (c *Client) Logout() error {
+ if !c.LoggedIn() {
+ return nil
+ }
+ req := c.createLogoutRequest()
+ c.token.unset()
+
+ resp, err := c.do(req)
+ defer closeBody(resp)
+ return err
+}
+
+// APIVersion returns FxFlex Gateway API version.
+func (c *Client) APIVersion() (Version, error) {
+ req := c.createAPIVersionRequest()
+ resp, err := c.doOK(req)
+ defer closeBody(resp)
+ if err != nil {
+ return Version{}, err
+ }
+ return decodeVersion(resp.Body)
+}
+
+// SelectedStatistics returns selected statistics.
+func (c *Client) SelectedStatistics(query SelectedStatisticsQuery) (SelectedStatistics, error) {
+ b, _ := json.Marshal(query)
+ req := c.createSelectedStatisticsRequest(b)
+ var stats SelectedStatistics
+ err := c.doJSONWithRetry(&stats, req)
+ return stats, err
+}
+
+// Instances returns all instances.
+func (c *Client) Instances() (Instances, error) {
+ req := c.createInstancesRequest()
+ var instances Instances
+ err := c.doJSONWithRetry(&instances, req)
+ return instances, err
+}
+
+func (c *Client) createLoginRequest() web.Request {
+ req := c.Request.Copy()
+ u, _ := url.Parse(req.URL)
+ u.Path = path.Join(u.Path, "/api/login")
+ req.URL = u.String()
+ return req
+}
+
+func (c *Client) createLogoutRequest() web.Request {
+ req := c.Request.Copy()
+ u, _ := url.Parse(req.URL)
+ u.Path = path.Join(u.Path, "/api/logout")
+ req.URL = u.String()
+ req.Password = c.token.get()
+ return req
+}
+
+func (c *Client) createAPIVersionRequest() web.Request {
+ req := c.Request.Copy()
+ u, _ := url.Parse(req.URL)
+ u.Path = path.Join(u.Path, "/api/version")
+ req.URL = u.String()
+ req.Password = c.token.get()
+ return req
+}
+
+func (c *Client) createSelectedStatisticsRequest(query []byte) web.Request {
+ req := c.Request.Copy()
+ u, _ := url.Parse(req.URL)
+ u.Path = path.Join(u.Path, "/api/instances/querySelectedStatistics")
+ req.URL = u.String()
+ req.Password = c.token.get()
+ req.Method = http.MethodPost
+ req.Headers = map[string]string{
+ "Content-Type": "application/json",
+ }
+ req.Body = string(query)
+ return req
+}
+
+func (c *Client) createInstancesRequest() web.Request {
+ req := c.Request.Copy()
+ u, _ := url.Parse(req.URL)
+ u.Path = path.Join(u.Path, "/api/instances")
+ req.URL = u.String()
+ req.Password = c.token.get()
+ return req
+}
+
+func (c *Client) do(req web.Request) (*http.Response, error) {
+ httpReq, err := web.NewHTTPRequest(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating http request to %s: %v", req.URL, err)
+ }
+ return c.httpClient.Do(httpReq)
+}
+
+func (c *Client) doOK(req web.Request) (*http.Response, error) {
+ resp, err := c.do(req)
+ if err != nil {
+ return nil, err
+ }
+ if err = checkStatusCode(resp); err != nil {
+ err = fmt.Errorf("%s returned %v", req.URL, err)
+ }
+ return resp, err
+}
+
+func (c *Client) doOKWithRetry(req web.Request) (*http.Response, error) {
+ resp, err := c.do(req)
+ if err != nil {
+ return nil, err
+ }
+ if resp.StatusCode == http.StatusUnauthorized {
+ if err = c.Login(); err != nil {
+ return resp, err
+ }
+ req.Password = c.token.get()
+ return c.doOK(req)
+ }
+ if err = checkStatusCode(resp); err != nil {
+ err = fmt.Errorf("%s returned %v", req.URL, err)
+ }
+ return resp, err
+}
+
+func (c *Client) doJSONWithRetry(dst interface{}, req web.Request) error {
+ resp, err := c.doOKWithRetry(req)
+ defer closeBody(resp)
+ if err != nil {
+ return err
+ }
+ return json.NewDecoder(resp.Body).Decode(dst)
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func checkStatusCode(resp *http.Response) error {
+ // For all 4xx and 5xx return codes, the body may contain an apiError
+ // instance with more specifics about the failure.
+ if resp.StatusCode >= 400 {
+ e := error(&apiError{})
+ if err := json.NewDecoder(resp.Body).Decode(e); err != nil {
+ e = err
+ }
+ return fmt.Errorf("HTTP status code %d : %v", resp.StatusCode, e)
+ }
+
+ // 200(OK), 201(Created), 202(Accepted), 204 (No Content).
+ if resp.StatusCode < 200 || resp.StatusCode > 299 {
+ return fmt.Errorf("HTTP status code %d", resp.StatusCode)
+ }
+ return nil
+}
+
+func decodeVersion(reader io.Reader) (ver Version, err error) {
+ bs, err := io.ReadAll(reader)
+ if err != nil {
+ return ver, err
+ }
+ parts := strings.Split(strings.Trim(string(bs), "\n "), ".")
+ if len(parts) != 2 {
+ return ver, fmt.Errorf("can't parse: %s", string(bs))
+ }
+ if ver.Major, err = strconv.ParseInt(parts[0], 10, 64); err != nil {
+ return ver, err
+ }
+ ver.Minor, err = strconv.ParseInt(parts[1], 10, 64)
+ return ver, err
+}
+
+func decodeToken(reader io.Reader) (string, error) {
+ bs, err := io.ReadAll(reader)
+ if err != nil {
+ return "", err
+ }
+ return strings.Trim(string(bs), `"`), nil
+}
+
+type token struct {
+ mux *sync.RWMutex
+ value string
+}
+
+func newToken() *token { return &token{mux: &sync.RWMutex{}} }
+func (t *token) get() string { t.mux.RLock(); defer t.mux.RUnlock(); return t.value }
+func (t *token) set(v string) { t.mux.Lock(); defer t.mux.Unlock(); t.value = v }
+func (t *token) unset() { t.set("") }
+func (t *token) isSet() bool { return t.get() != "" }
diff --git a/src/go/plugin/go.d/modules/scaleio/client/client_test.go b/src/go/plugin/go.d/modules/scaleio/client/client_test.go
new file mode 100644
index 000000000..02e1988b0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/client/client_test.go
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "net/http/httptest"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNew(t *testing.T) {
+ _, err := New(web.Client{}, web.Request{})
+ assert.NoError(t, err)
+}
+
+func TestClient_Login(t *testing.T) {
+ srv, client := prepareSrvClient(t)
+ defer srv.Close()
+
+ assert.NoError(t, client.Login())
+ assert.Equal(t, testToken, client.token.get())
+}
+
+func TestClient_Logout(t *testing.T) {
+ srv, client := prepareSrvClient(t)
+ defer srv.Close()
+
+ require.NoError(t, client.Login())
+
+ assert.NoError(t, client.Logout())
+ assert.False(t, client.token.isSet())
+
+}
+
+func TestClient_LoggedIn(t *testing.T) {
+ srv, client := prepareSrvClient(t)
+ defer srv.Close()
+
+ assert.False(t, client.LoggedIn())
+ assert.NoError(t, client.Login())
+ assert.True(t, client.LoggedIn())
+}
+
+func TestClient_APIVersion(t *testing.T) {
+ srv, client := prepareSrvClient(t)
+ defer srv.Close()
+
+ err := client.Login()
+ require.NoError(t, err)
+
+ version, err := client.APIVersion()
+ assert.NoError(t, err)
+ assert.Equal(t, Version{Major: 2, Minor: 5}, version)
+}
+
+func TestClient_Instances(t *testing.T) {
+ srv, client := prepareSrvClient(t)
+ defer srv.Close()
+
+ err := client.Login()
+ require.NoError(t, err)
+
+ instances, err := client.Instances()
+ assert.NoError(t, err)
+ assert.Equal(t, testInstances, instances)
+}
+
+func TestClient_Instances_RetryOnExpiredToken(t *testing.T) {
+ srv, client := prepareSrvClient(t)
+ defer srv.Close()
+
+ instances, err := client.Instances()
+ assert.NoError(t, err)
+ assert.Equal(t, testInstances, instances)
+}
+
+func TestClient_SelectedStatistics(t *testing.T) {
+ srv, client := prepareSrvClient(t)
+ defer srv.Close()
+
+ err := client.Login()
+ require.NoError(t, err)
+
+ stats, err := client.SelectedStatistics(SelectedStatisticsQuery{})
+ assert.NoError(t, err)
+ assert.Equal(t, testStatistics, stats)
+}
+
+func TestClient_SelectedStatistics_RetryOnExpiredToken(t *testing.T) {
+ srv, client := prepareSrvClient(t)
+ defer srv.Close()
+
+ stats, err := client.SelectedStatistics(SelectedStatisticsQuery{})
+ assert.Equal(t, testStatistics, stats)
+ assert.NoError(t, err)
+ assert.Equal(t, testStatistics, stats)
+}
+
+func prepareSrvClient(t *testing.T) (*httptest.Server, *Client) {
+ t.Helper()
+ srv := httptest.NewServer(MockScaleIOAPIServer{
+ User: testUser,
+ Password: testPassword,
+ Version: testVersion,
+ Token: testToken,
+ Instances: testInstances,
+ Statistics: testStatistics,
+ })
+ client, err := New(web.Client{}, web.Request{
+ URL: srv.URL,
+ Username: testUser,
+ Password: testPassword,
+ })
+ assert.NoError(t, err)
+ return srv, client
+}
+
+var (
+ testUser = "user"
+ testPassword = "password"
+ testVersion = "2.5"
+ testToken = "token"
+ testInstances = Instances{
+ StoragePoolList: []StoragePool{
+ {ID: "id1", Name: "Marketing", SparePercentage: 10},
+ {ID: "id2", Name: "Finance", SparePercentage: 10},
+ },
+ SdcList: []Sdc{
+ {ID: "id1", SdcIp: "10.0.0.1", MdmConnectionState: "Connected"},
+ {ID: "id2", SdcIp: "10.0.0.2", MdmConnectionState: "Connected"},
+ },
+ }
+ testStatistics = SelectedStatistics{
+ System: SystemStatistics{NumOfDevices: 1},
+ Sdc: map[string]SdcStatistics{"id1": {}, "id2": {}},
+ StoragePool: map[string]StoragePoolStatistics{"id1": {}, "id2": {}},
+ }
+)
diff --git a/src/go/plugin/go.d/modules/scaleio/client/server.go b/src/go/plugin/go.d/modules/scaleio/client/server.go
new file mode 100644
index 000000000..b7269d339
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/client/server.go
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+)
+
+// MockScaleIOAPIServer represents VxFlex OS Gateway.
+type MockScaleIOAPIServer struct {
+ User string
+ Password string
+ Token string
+ Version string
+ Instances Instances
+ Statistics SelectedStatistics
+}
+
+func (s MockScaleIOAPIServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ msg := fmt.Sprintf("unknown URL path: %s", r.URL.Path)
+ writeAPIError(w, msg)
+ case "/api/login":
+ s.handleLogin(w, r)
+ case "/api/logout":
+ s.handleLogout(w, r)
+ case "/api/version":
+ s.handleVersion(w, r)
+ case "/api/instances":
+ s.handleInstances(w, r)
+ case "/api/instances/querySelectedStatistics":
+ s.handleQuerySelectedStatistics(w, r)
+ }
+}
+
+func (s MockScaleIOAPIServer) handleLogin(w http.ResponseWriter, r *http.Request) {
+ if user, pass, ok := r.BasicAuth(); !ok || user != s.User || pass != s.Password {
+ w.WriteHeader(http.StatusUnauthorized)
+ msg := fmt.Sprintf("user got/expected: %s/%s, pass got/expected: %s/%s", user, s.User, pass, s.Password)
+ writeAPIError(w, msg)
+ return
+ }
+ if r.Method != http.MethodGet {
+ w.WriteHeader(http.StatusBadRequest)
+ msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodGet)
+ writeAPIError(w, msg)
+ return
+ }
+ _, _ = w.Write([]byte(s.Token))
+}
+
+func (s MockScaleIOAPIServer) handleLogout(w http.ResponseWriter, r *http.Request) {
+ if _, pass, ok := r.BasicAuth(); !ok || pass != s.Token {
+ w.WriteHeader(http.StatusUnauthorized)
+ msg := fmt.Sprintf("token got/expected: %s/%s", pass, s.Token)
+ writeAPIError(w, msg)
+ return
+ }
+ if r.Method != http.MethodGet {
+ w.WriteHeader(http.StatusBadRequest)
+ msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodGet)
+ writeAPIError(w, msg)
+ return
+ }
+}
+
+func (s MockScaleIOAPIServer) handleVersion(w http.ResponseWriter, r *http.Request) {
+ if _, pass, ok := r.BasicAuth(); !ok || pass != s.Token {
+ w.WriteHeader(http.StatusUnauthorized)
+ msg := fmt.Sprintf("token got/expected: %s/%s", pass, s.Token)
+ writeAPIError(w, msg)
+ return
+ }
+ if r.Method != http.MethodGet {
+ w.WriteHeader(http.StatusBadRequest)
+ msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodGet)
+ writeAPIError(w, msg)
+ return
+ }
+ _, _ = w.Write([]byte(s.Version))
+}
+
+func (s MockScaleIOAPIServer) handleInstances(w http.ResponseWriter, r *http.Request) {
+ if _, pass, ok := r.BasicAuth(); !ok || pass != s.Token {
+ w.WriteHeader(http.StatusUnauthorized)
+ msg := fmt.Sprintf("token got/expected: %s/%s", pass, s.Token)
+ writeAPIError(w, msg)
+ return
+ }
+ if r.Method != http.MethodGet {
+ w.WriteHeader(http.StatusBadRequest)
+ msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodGet)
+ writeAPIError(w, msg)
+ return
+ }
+ b, err := json.Marshal(s.Instances)
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ msg := fmt.Sprintf("marshal Instances: %v", err)
+ writeAPIError(w, msg)
+ return
+ }
+ _, _ = w.Write(b)
+}
+
+func (s MockScaleIOAPIServer) handleQuerySelectedStatistics(w http.ResponseWriter, r *http.Request) {
+ if _, pass, ok := r.BasicAuth(); !ok || pass != s.Token {
+ w.WriteHeader(http.StatusUnauthorized)
+ msg := fmt.Sprintf("token got/expected: %s/%s", pass, s.Token)
+ writeAPIError(w, msg)
+ return
+ }
+ if r.Method != http.MethodPost {
+ w.WriteHeader(http.StatusBadRequest)
+ msg := fmt.Sprintf("wrong method: '%s', expected '%s'", r.Method, http.MethodPost)
+ writeAPIError(w, msg)
+ return
+ }
+ if r.Header.Get("Content-Type") != "application/json" {
+ w.WriteHeader(http.StatusBadRequest)
+ writeAPIError(w, "no \"Content-Type: application/json\" in the header")
+ return
+ }
+ if err := json.NewDecoder(r.Body).Decode(&SelectedStatisticsQuery{}); err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ msg := fmt.Sprintf("body decode error: %v", err)
+ writeAPIError(w, msg)
+ return
+ }
+ b, err := json.Marshal(s.Statistics)
+ if err != nil {
+ w.WriteHeader(http.StatusInternalServerError)
+ msg := fmt.Sprintf("marshal SelectedStatistics: %v", err)
+ writeAPIError(w, msg)
+ return
+ }
+ _, _ = w.Write(b)
+}
+
+func writeAPIError(w io.Writer, msg string) {
+ err := apiError{Message: msg}
+ b, _ := json.Marshal(err)
+ _, _ = w.Write(b)
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/client/types.go b/src/go/plugin/go.d/modules/scaleio/client/types.go
new file mode 100644
index 000000000..c85bddf8d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/client/types.go
@@ -0,0 +1,1096 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+// https://github.com/dell/goscaleio/blob/master/types/v1/types.go
+
+// For all 4xx and 5xx return codes, the body may contain an apiError instance
+// with more specifics about the failure.
+type apiError struct {
+ Message string
+ HTTPStatusCode int
+ ErrorCode int
+}
+
+func (e apiError) Error() string {
+ return e.Message
+}
+
+// Version represents API version.
+type Version struct {
+ Major int64
+ Minor int64
+}
+
+// Bwc Bwc.
+type Bwc struct {
+ NumOccured int64
+ NumSeconds int64
+ TotalWeightInKb int64
+}
+
+// Sdc represents ScaleIO Data Client.
+type Sdc struct {
+ ID string
+ SdcIp string
+ MdmConnectionState string
+}
+
+// StoragePool represents ScaleIO Storage Pool.
+type StoragePool struct {
+ ID string
+ Name string
+ SparePercentage int64
+ CapacityAlertCriticalThreshold int64
+ CapacityAlertHighThreshold int64
+}
+
+// Instances represents '/api/instances' response.
+type Instances struct {
+ StoragePoolList []StoragePool
+ SdcList []Sdc
+}
+
+type (
+ // SelectedStatisticsQuery represents '/api/instances/querySelectedStatistics' query.
+ SelectedStatisticsQuery struct {
+ List []SelectedObject `json:"selectedStatisticsList"`
+ }
+ // SelectedObject represents '/api/instances/querySelectedStatistics' query object.
+ SelectedObject struct {
+ Type string `json:"type"` // object type (System, ProtectionDomain, Sds, StoragePool, Device, Volume, VTree, Sdc, FaultSet, RfcacheDevice).
+
+ // the following parameters are not relevant to the System type and can be omitted:
+ IDs []string `json:"ids,omitempty"` // list of objects ids
+ AllIDs allIds `json:"allIds,omitempty"` // all available objects
+
+ Properties []string `json:"properties"` // list of properties to fetch
+ }
+ allIds bool
+)
+
+func (b allIds) MarshalJSON() ([]byte, error) {
+ // should be set to empty value if AllIDs is true.
+ if b {
+ return []byte("[]"), nil
+ }
+ return nil, nil
+}
+func (b *allIds) UnmarshalJSON([]byte) error {
+ *b = true
+ return nil
+}
+
+// SelectedStatistics represents '/api/instances/querySelectedStatistics' response.
+type SelectedStatistics struct {
+ System SystemStatistics
+ Sdc map[string]SdcStatistics
+ StoragePool map[string]StoragePoolStatistics
+}
+
+// Those commented out structure fields are not deleted on purpose. We need them to see what other metrics can be collected.
+type (
+ // CapacityStatistics is System/StoragePool capacity statistics.
+ CapacityStatistics struct {
+ CapacityAvailableForVolumeAllocationInKb int64
+ MaxCapacityInKb int64
+ CapacityLimitInKb int64
+ ProtectedCapacityInKb int64
+ DegradedFailedCapacityInKb int64
+ DegradedHealthyCapacityInKb int64
+ SpareCapacityInKb int64
+ FailedCapacityInKb int64
+ UnreachableUnusedCapacityInKb int64
+ InMaintenanceCapacityInKb int64
+ ThinCapacityAllocatedInKb int64
+ ThinCapacityInUseInKb int64
+ ThickCapacityInUseInKb int64
+ SnapCapacityInUseOccupiedInKb int64
+ CapacityInUseInKb int64
+ }
+ SystemStatistics struct {
+ CapacityStatistics
+
+ NumOfDevices int64
+ NumOfFaultSets int64
+ NumOfProtectionDomains int64
+ NumOfRfcacheDevices int64
+ NumOfSdc int64
+ NumOfSds int64
+ NumOfSnapshots int64
+ NumOfStoragePools int64
+ NumOfVolumes int64
+ NumOfVtrees int64
+ NumOfThickBaseVolumes int64
+ NumOfThinBaseVolumes int64
+ NumOfMappedToAllVolumes int64
+ NumOfUnmappedVolumes int64
+
+ RebalanceReadBwc Bwc
+ RebalanceWriteBwc Bwc
+ PendingRebalanceCapacityInKb int64
+
+ PendingNormRebuildCapacityInKb int64
+ PendingBckRebuildCapacityInKb int64
+ PendingFwdRebuildCapacityInKb int64
+ NormRebuildReadBwc Bwc // TODO: ???
+ NormRebuildWriteBwc Bwc // TODO: ???
+ BckRebuildReadBwc Bwc // failed node/disk is back alive
+ BckRebuildWriteBwc Bwc // failed node/disk is back alive
+ FwdRebuildReadBwc Bwc // node/disk fails
+ FwdRebuildWriteBwc Bwc // node/disk fails
+
+ PrimaryReadBwc Bwc // Backend (SDSs + Devices) Primary - Mater MDM
+ PrimaryWriteBwc Bwc // Backend (SDSs + Devices) Primary - Mater MDM
+ SecondaryReadBwc Bwc // Backend (SDSs + Devices, 2nd) Secondary - Slave MDM
+ SecondaryWriteBwc Bwc // Backend (SDSs + Devices, 2nd) Secondary - Slave MDM
+ UserDataReadBwc Bwc // Frontend (Volumes + SDCs)
+ UserDataWriteBwc Bwc // Frontend (Volumes + SDCs)
+ TotalReadBwc Bwc // *ReadBwc
+ TotalWriteBwc Bwc // *WriteBwc
+
+ //SnapCapacityInUseInKb int64
+ //BackgroundScanCompareCount int64
+ //BackgroundScannedInMB int64
+ //ActiveBckRebuildCapacityInKb int64
+ //ActiveFwdRebuildCapacityInKb int64
+ //ActiveMovingCapacityInKb int64
+ //ActiveMovingInBckRebuildJobs int64
+ //ActiveMovingInFwdRebuildJobs int64
+ //ActiveMovingInNormRebuildJobs int64
+ //ActiveMovingInRebalanceJobs int64
+ //ActiveMovingOutBckRebuildJobs int64
+ //ActiveMovingOutFwdRebuildJobs int64
+ //ActiveMovingOutNormRebuildJobs int64
+ //ActiveMovingRebalanceJobs int64
+ //ActiveNormRebuildCapacityInKb int64
+ //ActiveRebalanceCapacityInKb int64
+ //AtRestCapacityInKb int64
+ //BckRebuildCapacityInKb int64
+ //DegradedFailedVacInKb int64
+ //DegradedHealthyVacInKb int64
+ //FailedVacInKb int64
+ //FixedReadErrorCount int64
+ //FwdRebuildCapacityInKb int64
+ //InMaintenanceVacInKb int64
+ //InUseVacInKb int64
+ //MovingCapacityInKb int64
+ //NormRebuildCapacityInKb int64
+ //NumOfScsiInitiators int64 // removed from version 3 of ScaleIO/VxFlex API
+ //PendingMovingCapacityInKb int64
+ //PendingMovingInBckRebuildJobs int64
+ //PendingMovingInFwdRebuildJobs int64
+ //PendingMovingInNormRebuildJobs int64
+ //PendingMovingInRebalanceJobs int64
+ //PendingMovingOutBckRebuildJobs int64
+ //PendingMovingOutFwdRebuildJobs int64
+ //PendingMovingOutNormrebuildJobs int64
+ //PendingMovingRebalanceJobs int64
+ //PrimaryReadFromDevBwc int64
+ //PrimaryReadFromRmcacheBwc int64
+ //PrimaryVacInKb int64
+ //ProtectedVacInKb int64
+ //ProtectionDomainIds int64
+ //RebalanceCapacityInKb int64
+ //RebalancePerReceiveJobNetThrottlingInKbps int64
+ //RebalanceWaitSendQLength int64
+ //RebuildPerReceiveJobNetThrottlingInKbps int64
+ //RebuildWaitSendQLength int64
+ //RfacheReadHit int64
+ //RfacheWriteHit int64
+ //RfcacheAvgReadTime int64
+ //RfcacheAvgWriteTime int64
+ //RfcacheFdAvgReadTime int64
+ //RfcacheFdAvgWriteTime int64
+ //RfcacheFdCacheOverloaded int64
+ //RfcacheFdInlightReads int64
+ //RfcacheFdInlightWrites int64
+ //RfcacheFdIoErrors int64
+ //RfcacheFdMonitorErrorStuckIo int64
+ //RfcacheFdReadTimeGreater1Min int64
+ //RfcacheFdReadTimeGreater1Sec int64
+ //RfcacheFdReadTimeGreater500Millis int64
+ //RfcacheFdReadTimeGreater5Sec int64
+ //RfcacheFdReadsReceived int64
+ //RfcacheFdWriteTimeGreater1Min int64
+ //RfcacheFdWriteTimeGreater1Sec int64
+ //RfcacheFdWriteTimeGreater500Millis int64
+ //RfcacheFdWriteTimeGreater5Sec int64
+ //RfcacheFdWritesReceived int64
+ //RfcacheIoErrors int64
+ //RfcacheIosOutstanding int64
+ //RfcacheIosSkipped int64
+ //RfcachePooIosOutstanding int64
+ //RfcachePoolCachePages int64
+ //RfcachePoolEvictions int64
+ //RfcachePoolInLowMemoryCondition int64
+ //RfcachePoolIoTimeGreater1Min int64
+ //RfcachePoolLockTimeGreater1Sec int64
+ //RfcachePoolLowResourcesInitiatedPassthroughMode int64
+ //RfcachePoolNumCacheDevs int64
+ //RfcachePoolNumSrcDevs int64
+ //RfcachePoolPagesInuse int64
+ //RfcachePoolReadHit int64
+ //RfcachePoolReadMiss int64
+ //RfcachePoolReadPendingG10Millis int64
+ //RfcachePoolReadPendingG1Millis int64
+ //RfcachePoolReadPendingG1Sec int64
+ //RfcachePoolReadPendingG500Micro int64
+ //RfcachePoolReadsPending int64
+ //RfcachePoolSize int64
+ //RfcachePoolSourceIdMismatch int64
+ //RfcachePoolSuspendedIos int64
+ //RfcachePoolSuspendedPequestsRedundantSearchs int64
+ //RfcachePoolWriteHit int64
+ //RfcachePoolWriteMiss int64
+ //RfcachePoolWritePending int64
+ //RfcachePoolWritePendingG10Millis int64
+ //RfcachePoolWritePendingG1Millis int64
+ //RfcachePoolWritePendingG1Sec int64
+ //RfcachePoolWritePendingG500Micro int64
+ //RfcacheReadMiss int64
+ //RfcacheReadsFromCache int64
+ //RfcacheReadsPending int64
+ //RfcacheReadsReceived int64
+ //RfcacheReadsSkipped int64
+ //RfcacheReadsSkippedAlignedSizeTooLarge int64
+ //RfcacheReadsSkippedHeavyLoad int64
+ //RfcacheReadsSkippedInternalError int64
+ //RfcacheReadsSkippedLockIos int64
+ //RfcacheReadsSkippedLowResources int64
+ //RfcacheReadsSkippedMaxIoSize int64
+ //RfcacheReadsSkippedStuckIo int64
+ //RfcacheSkippedUnlinedWrite int64
+ //RfcacheSourceDeviceReads int64
+ //RfcacheSourceDeviceWrites int64
+ //RfcacheWriteMiss int64
+ //RfcacheWritePending int64
+ //RfcacheWritesReceived int64
+ //RfcacheWritesSkippedCacheMiss int64
+ //RfcacheWritesSkippedHeavyLoad int64
+ //RfcacheWritesSkippedInternalError int64
+ //RfcacheWritesSkippedLowResources int64
+ //RfcacheWritesSkippedMaxIoSize int64
+ //RfcacheWritesSkippedStuckIo int64
+ //RmPendingAllocatedInKb int64
+ //Rmcache128kbEntryCount int64
+ //Rmcache16kbEntryCount int64
+ //Rmcache32kbEntryCount int64
+ //Rmcache4kbEntryCount int64
+ //Rmcache64kbEntryCount int64
+ //Rmcache8kbEntryCount int64
+ //RmcacheBigBlockEvictionCount int64
+ //RmcacheBigBlockEvictionSizeCountInKb int64
+ //RmcacheCurrNumOf128kbEntries int64
+ //RmcacheCurrNumOf16kbEntries int64
+ //RmcacheCurrNumOf32kbEntries int64
+ //RmcacheCurrNumOf4kbEntries int64
+ //RmcacheCurrNumOf64kbEntries int64
+ //RmcacheCurrNumOf8kbEntries int64
+ //RmcacheEntryEvictionCount int64
+ //RmcacheEntryEvictionSizeCountInKb int64
+ //RmcacheNoEvictionCount int64
+ //RmcacheSizeInKb int64
+ //RmcacheSizeInUseInKb int64
+ //RmcacheSkipCountCacheAllBusy int64
+ //RmcacheSkipCountLargeIo int64
+ //RmcacheSkipCountUnaligned4kbIo int64
+ //ScsiInitiatorIds int64
+ //SdcIds int64
+ //SecondaryReadFromDevBwc int64
+ //SecondaryReadFromRmcacheBwc int64
+ //SecondaryVacInKb int64
+ //SemiProtectedCapacityInKb int64
+ //SemiProtectedVacInKb int64
+ //SnapCapacityInUseOccupiedInKb int64
+ //UnusedCapacityInKb int64
+ }
+ SdcStatistics struct {
+ NumOfMappedVolumes int64
+ UserDataReadBwc Bwc
+ UserDataWriteBwc Bwc
+ //VolumeIds int64
+ }
+ StoragePoolStatistics struct {
+ CapacityStatistics
+
+ NumOfDevices int64
+ NumOfVolumes int64
+ NumOfVtrees int64
+ NumOfSnapshots int64
+
+ //SnapCapacityInUseInKb int64
+ //BackgroundScanCompareCount int64
+ //BackgroundScannedInMB int64
+ //ActiveBckRebuildCapacityInKb int64
+ //ActiveFwdRebuildCapacityInKb int64
+ //ActiveMovingCapacityInKb int64
+ //ActiveMovingInBckRebuildJobs int64
+ //ActiveMovingInFwdRebuildJobs int64
+ //ActiveMovingInNormRebuildJobs int64
+ //ActiveMovingInRebalanceJobs int64
+ //ActiveMovingOutBckRebuildJobs int64
+ //ActiveMovingOutFwdRebuildJobs int64
+ //ActiveMovingOutNormRebuildJobs int64
+ //ActiveMovingRebalanceJobs int64
+ //ActiveNormRebuildCapacityInKb int64
+ //ActiveRebalanceCapacityInKb int64
+ //AtRestCapacityInKb int64
+ //BckRebuildCapacityInKb int64
+ //BckRebuildReadBwc int64
+ //BckRebuildWriteBwc int64
+ //DegradedFailedVacInKb int64
+ //DegradedHealthyVacInKb int64
+ //DeviceIds int64
+ //FailedVacInKb int64
+ //FixedReadErrorCount int64
+ //FwdRebuildCapacityInKb int64
+ //FwdRebuildReadBwc int64
+ //FwdRebuildWriteBwc int64
+ //InMaintenanceVacInKb int64
+ //InUseVacInKb int64
+ //MovingCapacityInKb int64
+ //NormRebuildCapacityInKb int64
+ //NormRebuildReadBwc int64
+ //NormRebuildWriteBwc int64
+ //NumOfMappedToAllVolumes int64
+ //NumOfThickBaseVolumes int64
+ //NumOfThinBaseVolumes int64
+ //NumOfUnmappedVolumes int64
+ //NumOfVolumesInDeletion int64
+ //PendingBckRebuildCapacityInKb int64
+ //PendingFwdRebuildCapacityInKb int64
+ //PendingMovingCapacityInKb int64
+ //PendingMovingInBckRebuildJobs int64
+ //PendingMovingInFwdRebuildJobs int64
+ //PendingMovingInNormRebuildJobs int64
+ //PendingMovingInRebalanceJobs int64
+ //PendingMovingOutBckRebuildJobs int64
+ //PendingMovingOutFwdRebuildJobs int64
+ //PendingMovingOutNormrebuildJobs int64
+ //PendingMovingRebalanceJobs int64
+ //PendingNormRebuildCapacityInKb int64
+ //PendingRebalanceCapacityInKb int64
+ //PrimaryReadBwc int64
+ //PrimaryReadFromDevBwc int64
+ //PrimaryReadFromRmcacheBwc int64
+ //PrimaryVacInKb int64
+ //PrimaryWriteBwc int64
+ //ProtectedVacInKb int64
+ //RebalanceCapacityInKb int64
+ //RebalanceReadBwc int64
+ //RebalanceWriteBwc int64
+ //RfacheReadHit int64
+ //RfacheWriteHit int64
+ //RfcacheAvgReadTime int64
+ //RfcacheAvgWriteTime int64
+ //RfcacheIoErrors int64
+ //RfcacheIosOutstanding int64
+ //RfcacheIosSkipped int64
+ //RfcacheReadMiss int64
+ //RfcacheReadsFromCache int64
+ //RfcacheReadsPending int64
+ //RfcacheReadsReceived int64
+ //RfcacheReadsSkipped int64
+ //RfcacheReadsSkippedAlignedSizeTooLarge int64
+ //RfcacheReadsSkippedHeavyLoad int64
+ //RfcacheReadsSkippedInternalError int64
+ //RfcacheReadsSkippedLockIos int64
+ //RfcacheReadsSkippedLowResources int64
+ //RfcacheReadsSkippedMaxIoSize int64
+ //RfcacheReadsSkippedStuckIo int64
+ //RfcacheSkippedUnlinedWrite int64
+ //RfcacheSourceDeviceReads int64
+ //RfcacheSourceDeviceWrites int64
+ //RfcacheWriteMiss int64
+ //RfcacheWritePending int64
+ //RfcacheWritesReceived int64
+ //RfcacheWritesSkippedCacheMiss int64
+ //RfcacheWritesSkippedHeavyLoad int64
+ //RfcacheWritesSkippedInternalError int64
+ //RfcacheWritesSkippedLowResources int64
+ //RfcacheWritesSkippedMaxIoSize int64
+ //RfcacheWritesSkippedStuckIo int64
+ //RmPendingAllocatedInKb int64
+ //SecondaryReadBwc int64
+ //SecondaryReadFromDevBwc int64
+ //SecondaryReadFromRmcacheBwc int64
+ //SecondaryVacInKb int64
+ //SecondaryWriteBwc int64
+ //SemiProtectedCapacityInKb int64
+ //SemiProtectedVacInKb int64
+ //SnapCapacityInUseOccupiedInKb int64
+ //TotalReadBwc int64
+ //TotalWriteBwc int64
+ //UnusedCapacityInKb int64
+ //UserDataReadBwc int64
+ //UserDataWriteBwc int64
+ //VolumeIds int64
+ //VtreeIds int64
+ }
+ DeviceStatistic struct {
+ // BackgroundScanCompareCount int64
+ // BackgroundScannedInMB int64
+ // ActiveMovingInBckRebuildJobs int64
+ // ActiveMovingInFwdRebuildJobs int64
+ // ActiveMovingInNormRebuildJobs int64
+ // ActiveMovingInRebalanceJobs int64
+ // ActiveMovingOutBckRebuildJobs int64
+ // ActiveMovingOutFwdRebuildJobs int64
+ // ActiveMovingOutNormRebuildJobs int64
+ // ActiveMovingRebalanceJobs int64
+ // AvgReadLatencyInMicrosec int64
+ // AvgReadSizeInBytes int64
+ // AvgWriteLatencyInMicrosec int64
+ // AvgWriteSizeInBytes int64
+ // BckRebuildReadBwc int64
+ // BckRebuildWriteBwc int64
+ // CapacityInUseInKb int64
+ // CapacityLimitInKb int64
+ // DegradedFailedVacInKb int64
+ // DegradedHealthyVacInKb int64
+ // FailedVacInKb int64
+ // FixedReadErrorCount int64
+ // FwdRebuildReadBwc int64
+ // FwdRebuildWriteBwc int64
+ // InMaintenanceVacInKb int64
+ // InUseVacInKb int64
+ // MaxCapacityInKb int64
+ // NormRebuildReadBwc int64
+ // NormRebuildWriteBwc int64
+ // PendingMovingInBckRebuildJobs int64
+ // PendingMovingInFwdRebuildJobs int64
+ // PendingMovingInNormRebuildJobs int64
+ // PendingMovingInRebalanceJobs int64
+ // PendingMovingOutBckRebuildJobs int64
+ // PendingMovingOutFwdRebuildJobs int64
+ // PendingMovingOutNormrebuildJobs int64
+ // PendingMovingRebalanceJobs int64
+ // PrimaryReadBwc int64
+ // PrimaryReadFromDevBwc int64
+ // PrimaryReadFromRmcacheBwc int64
+ // PrimaryVacInKb int64
+ // PrimaryWriteBwc int64
+ // ProtectedVacInKb int64
+ // RebalanceReadBwc int64
+ // RebalanceWriteBwc int64
+ // RfacheReadHit int64
+ // RfacheWriteHit int64
+ // RfcacheAvgReadTime int64
+ // RfcacheAvgWriteTime int64
+ // RfcacheIoErrors int64
+ // RfcacheIosOutstanding int64
+ // RfcacheIosSkipped int64
+ // RfcacheReadMiss int64
+ // RfcacheReadsFromCache int64
+ // RfcacheReadsPending int64
+ // RfcacheReadsReceived int64
+ // RfcacheReadsSkipped int64
+ // RfcacheReadsSkippedAlignedSizeTooLarge int64
+ // RfcacheReadsSkippedHeavyLoad int64
+ // RfcacheReadsSkippedInternalError int64
+ // RfcacheReadsSkippedLockIos int64
+ // RfcacheReadsSkippedLowResources int64
+ // RfcacheReadsSkippedMaxIoSize int64
+ // RfcacheReadsSkippedStuckIo int64
+ // RfcacheSkippedUnlinedWrite int64
+ // RfcacheSourceDeviceReads int64
+ // RfcacheSourceDeviceWrites int64
+ // RfcacheWriteMiss int64
+ // RfcacheWritePending int64
+ // RfcacheWritesReceived int64
+ // RfcacheWritesSkippedCacheMiss int64
+ // RfcacheWritesSkippedHeavyLoad int64
+ // RfcacheWritesSkippedInternalError int64
+ // RfcacheWritesSkippedLowResources int64
+ // RfcacheWritesSkippedMaxIoSize int64
+ // RfcacheWritesSkippedStuckIo int64
+ // RmPendingAllocatedInKb int64
+ // SecondaryReadBwc int64
+ // SecondaryReadFromDevBwc int64
+ // SecondaryReadFromRmcacheBwc int64
+ // SecondaryVacInKb int64
+ // SecondaryWriteBwc int64
+ // SemiProtectedVacInKb int64
+ // SnapCapacityInUseInKb int64
+ // SnapCapacityInUseOccupiedInKb int64
+ // ThickCapacityInUseInKb int64
+ // ThinCapacityAllocatedInKb int64
+ // ThinCapacityInUseInKb int64
+ // TotalReadBwc int64
+ // TotalWriteBwc int64
+ // UnreachableUnusedCapacityInKb int64
+ // UnusedCapacityInKb int64
+ }
+ FaultSetStatistics struct {
+ // BackgroundScanCompareCount int64
+ // BackgroundScannedInMB int64
+ // ActiveMovingInBckRebuildJobs int64
+ // ActiveMovingInFwdRebuildJobs int64
+ // ActiveMovingInNormRebuildJobs int64
+ // ActiveMovingInRebalanceJobs int64
+ // ActiveMovingOutBckRebuildJobs int64
+ // ActiveMovingOutFwdRebuildJobs int64
+ // ActiveMovingOutNormRebuildJobs int64
+ // ActiveMovingRebalanceJobs int64
+ // BckRebuildReadBwc int64
+ // BckRebuildWriteBwc int64
+ // CapacityInUseInKb int64
+ // CapacityLimitInKb int64
+ // DegradedFailedVacInKb int64
+ // DegradedHealthyVacInKb int64
+ // FailedVacInKb int64
+ // FixedReadErrorCount int64
+ // FwdRebuildReadBwc int64
+ // FwdRebuildWriteBwc int64
+ // InMaintenanceVacInKb int64
+ // InUseVacInKb int64
+ // MaxCapacityInKb int64
+ // NormRebuildReadBwc int64
+ // NormRebuildWriteBwc int64
+ // NumOfSds int64
+ // PendingMovingInBckRebuildJobs int64
+ // PendingMovingInFwdRebuildJobs int64
+ // PendingMovingInNormRebuildJobs int64
+ // PendingMovingInRebalanceJobs int64
+ // PendingMovingOutBckRebuildJobs int64
+ // PendingMovingOutFwdRebuildJobs int64
+ // PendingMovingOutNormrebuildJobs int64
+ // PendingMovingRebalanceJobs int64
+ // PrimaryReadBwc int64
+ // PrimaryReadFromDevBwc int64
+ // PrimaryReadFromRmcacheBwc int64
+ // PrimaryVacInKb int64
+ // PrimaryWriteBwc int64
+ // ProtectedVacInKb int64
+ // RebalancePerReceiveJobNetThrottlingInKbps int64
+ // RebalanceReadBwc int64
+ // RebalanceWaitSendQLength int64
+ // RebalanceWriteBwc int64
+ // RebuildPerReceiveJobNetThrottlingInKbps int64
+ // RebuildWaitSendQLength int64
+ // RfacheReadHit int64
+ // RfacheWriteHit int64
+ // RfcacheAvgReadTime int64
+ // RfcacheAvgWriteTime int64
+ // RfcacheFdAvgReadTime int64
+ // RfcacheFdAvgWriteTime int64
+ // RfcacheFdCacheOverloaded int64
+ // RfcacheFdInlightReads int64
+ // RfcacheFdInlightWrites int64
+ // RfcacheFdIoErrors int64
+ // RfcacheFdMonitorErrorStuckIo int64
+ // RfcacheFdReadTimeGreater1Min int64
+ // RfcacheFdReadTimeGreater1Sec int64
+ // RfcacheFdReadTimeGreater500Millis int64
+ // RfcacheFdReadTimeGreater5Sec int64
+ // RfcacheFdReadsReceived int64
+ // RfcacheFdWriteTimeGreater1Min int64
+ // RfcacheFdWriteTimeGreater1Sec int64
+ // RfcacheFdWriteTimeGreater500Millis int64
+ // RfcacheFdWriteTimeGreater5Sec int64
+ // RfcacheFdWritesReceived int64
+ // RfcacheIoErrors int64
+ // RfcacheIosOutstanding int64
+ // RfcacheIosSkipped int64
+ // RfcachePooIosOutstanding int64
+ // RfcachePoolCachePages int64
+ // RfcachePoolEvictions int64
+ // RfcachePoolInLowMemoryCondition int64
+ // RfcachePoolIoTimeGreater1Min int64
+ // RfcachePoolLockTimeGreater1Sec int64
+ // RfcachePoolLowResourcesInitiatedPassthroughMode int64
+ // RfcachePoolNumCacheDevs int64
+ // RfcachePoolNumSrcDevs int64
+ // RfcachePoolPagesInuse int64
+ // RfcachePoolReadHit int64
+ // RfcachePoolReadMiss int64
+ // RfcachePoolReadPendingG10Millis int64
+ // RfcachePoolReadPendingG1Millis int64
+ // RfcachePoolReadPendingG1Sec int64
+ // RfcachePoolReadPendingG500Micro int64
+ // RfcachePoolReadsPending int64
+ // RfcachePoolSize int64
+ // RfcachePoolSourceIdMismatch int64
+ // RfcachePoolSuspendedIos int64
+ // RfcachePoolSuspendedPequestsRedundantSearchs int64
+ // RfcachePoolWriteHit int64
+ // RfcachePoolWriteMiss int64
+ // RfcachePoolWritePending int64
+ // RfcachePoolWritePendingG10Millis int64
+ // RfcachePoolWritePendingG1Millis int64
+ // RfcachePoolWritePendingG1Sec int64
+ // RfcachePoolWritePendingG500Micro int64
+ // RfcacheReadMiss int64
+ // RfcacheReadsFromCache int64
+ // RfcacheReadsPending int64
+ // RfcacheReadsReceived int64
+ // RfcacheReadsSkipped int64
+ // RfcacheReadsSkippedAlignedSizeTooLarge int64
+ // RfcacheReadsSkippedHeavyLoad int64
+ // RfcacheReadsSkippedInternalError int64
+ // RfcacheReadsSkippedLockIos int64
+ // RfcacheReadsSkippedLowResources int64
+ // RfcacheReadsSkippedMaxIoSize int64
+ // RfcacheReadsSkippedStuckIo int64
+ // RfcacheSkippedUnlinedWrite int64
+ // RfcacheSourceDeviceReads int64
+ // RfcacheSourceDeviceWrites int64
+ // RfcacheWriteMiss int64
+ // RfcacheWritePending int64
+ // RfcacheWritesReceived int64
+ // RfcacheWritesSkippedCacheMiss int64
+ // RfcacheWritesSkippedHeavyLoad int64
+ // RfcacheWritesSkippedInternalError int64
+ // RfcacheWritesSkippedLowResources int64
+ // RfcacheWritesSkippedMaxIoSize int64
+ // RfcacheWritesSkippedStuckIo int64
+ // RmPendingAllocatedInKb int64
+ // Rmcache128kbEntryCount int64
+ // Rmcache16kbEntryCount int64
+ // Rmcache32kbEntryCount int64
+ // Rmcache4kbEntryCount int64
+ // Rmcache64kbEntryCount int64
+ // Rmcache8kbEntryCount int64
+ // RmcacheBigBlockEvictionCount int64
+ // RmcacheBigBlockEvictionSizeCountInKb int64
+ // RmcacheCurrNumOf128kbEntries int64
+ // RmcacheCurrNumOf16kbEntries int64
+ // RmcacheCurrNumOf32kbEntries int64
+ // RmcacheCurrNumOf4kbEntries int64
+ // RmcacheCurrNumOf64kbEntries int64
+ // RmcacheCurrNumOf8kbEntries int64
+ // RmcacheEntryEvictionCount int64
+ // RmcacheEntryEvictionSizeCountInKb int64
+ // RmcacheNoEvictionCount int64
+ // RmcacheSizeInKb int64
+ // RmcacheSizeInUseInKb int64
+ // RmcacheSkipCountCacheAllBusy int64
+ // RmcacheSkipCountLargeIo int64
+ // RmcacheSkipCountUnaligned4kbIo int64
+ // SdsIds int64
+ // SecondaryReadBwc int64
+ // SecondaryReadFromDevBwc int64
+ // SecondaryReadFromRmcacheBwc int64
+ // SecondaryVacInKb int64
+ // SecondaryWriteBwc int64
+ // SemiProtectedVacInKb int64
+ // SnapCapacityInUseInKb int64
+ // SnapCapacityInUseOccupiedInKb int64
+ // ThickCapacityInUseInKb int64
+ // ThinCapacityAllocatedInKb int64
+ // ThinCapacityInUseInKb int64
+ // TotalReadBwc int64
+ // TotalWriteBwc int64
+ // UnreachableUnusedCapacityInKb int64
+ // UnusedCapacityInKb int64
+ }
+ ProtectionDomainStatistics struct {
+ // BackgroundScanCompareCount int64
+ // BackgroundScannedInMB int64
+ // ActiveBckRebuildCapacityInKb int64
+ // ActiveFwdRebuildCapacityInKb int64
+ // ActiveMovingCapacityInKb int64
+ // ActiveMovingInBckRebuildJobs int64
+ // ActiveMovingInFwdRebuildJobs int64
+ // ActiveMovingInNormRebuildJobs int64
+ // ActiveMovingInRebalanceJobs int64
+ // ActiveMovingOutBckRebuildJobs int64
+ // ActiveMovingOutFwdRebuildJobs int64
+ // ActiveMovingOutNormRebuildJobs int64
+ // ActiveMovingRebalanceJobs int64
+ // ActiveNormRebuildCapacityInKb int64
+ // ActiveRebalanceCapacityInKb int64
+ // AtRestCapacityInKb int64
+ // BckRebuildCapacityInKb int64
+ // BckRebuildReadBwc int64
+ // BckRebuildWriteBwc int64
+ // CapacityAvailableForVolumeAllocationInKb int64
+ // CapacityInUseInKb int64
+ // CapacityLimitInKb int64
+ // DegradedFailedCapacityInKb int64
+ // DegradedFailedVacInKb int64
+ // DegradedHealthyCapacityInKb int64
+ // DegradedHealthyVacInKb int64
+ // FailedCapacityInKb int64
+ // FailedVacInKb int64
+ // FaultSetIds int64
+ // FixedReadErrorCount int64
+ // FwdRebuildCapacityInKb int64
+ // FwdRebuildReadBwc int64
+ // FwdRebuildWriteBwc int64
+ // InMaintenanceCapacityInKb int64
+ // InMaintenanceVacInKb int64
+ // InUseVacInKb int64
+ // MaxCapacityInKb int64
+ // MovingCapacityInKb int64
+ // NormRebuildCapacityInKb int64
+ // NormRebuildReadBwc int64
+ // NormRebuildWriteBwc int64
+ // NumOfFaultSets int64
+ // NumOfMappedToAllVolumes int64
+ // NumOfSds int64
+ // NumOfSnapshots int64
+ // NumOfStoragePools int64
+ // NumOfThickBaseVolumes int64
+ // NumOfThinBaseVolumes int64
+ // NumOfUnmappedVolumes int64
+ // NumOfVolumesInDeletion int64
+ // PendingBckRebuildCapacityInKb int64
+ // PendingFwdRebuildCapacityInKb int64
+ // PendingMovingCapacityInKb int64
+ // PendingMovingInBckRebuildJobs int64
+ // PendingMovingInFwdRebuildJobs int64
+ // PendingMovingInNormRebuildJobs int64
+ // PendingMovingInRebalanceJobs int64
+ // PendingMovingOutBckRebuildJobs int64
+ // PendingMovingOutFwdRebuildJobs int64
+ // PendingMovingOutNormrebuildJobs int64
+ // PendingMovingRebalanceJobs int64
+ // PendingNormRebuildCapacityInKb int64
+ // PendingRebalanceCapacityInKb int64
+ // PrimaryReadBwc int64
+ // PrimaryReadFromDevBwc int64
+ // PrimaryReadFromRmcacheBwc int64
+ // PrimaryVacInKb int64
+ // PrimaryWriteBwc int64
+ // ProtectedCapacityInKb int64
+ // ProtectedVacInKb int64
+ // RebalanceCapacityInKb int64
+ // RebalancePerReceiveJobNetThrottlingInKbps int64
+ // RebalanceReadBwc int64
+ // RebalanceWaitSendQLength int64
+ // RebalanceWriteBwc int64
+ // RebuildPerReceiveJobNetThrottlingInKbps int64
+ // RebuildWaitSendQLength int64
+ // RfacheReadHit int64
+ // RfacheWriteHit int64
+ // RfcacheAvgReadTime int64
+ // RfcacheAvgWriteTime int64
+ // RfcacheFdAvgReadTime int64
+ // RfcacheFdAvgWriteTime int64
+ // RfcacheFdCacheOverloaded int64
+ // RfcacheFdInlightReads int64
+ // RfcacheFdInlightWrites int64
+ // RfcacheFdIoErrors int64
+ // RfcacheFdMonitorErrorStuckIo int64
+ // RfcacheFdReadTimeGreater1Min int64
+ // RfcacheFdReadTimeGreater1Sec int64
+ // RfcacheFdReadTimeGreater500Millis int64
+ // RfcacheFdReadTimeGreater5Sec int64
+ // RfcacheFdReadsReceived int64
+ // RfcacheFdWriteTimeGreater1Min int64
+ // RfcacheFdWriteTimeGreater1Sec int64
+ // RfcacheFdWriteTimeGreater500Millis int64
+ // RfcacheFdWriteTimeGreater5Sec int64
+ // RfcacheFdWritesReceived int64
+ // RfcacheIoErrors int64
+ // RfcacheIosOutstanding int64
+ // RfcacheIosSkipped int64
+ // RfcachePooIosOutstanding int64
+ // RfcachePoolCachePages int64
+ // RfcachePoolEvictions int64
+ // RfcachePoolInLowMemoryCondition int64
+ // RfcachePoolIoTimeGreater1Min int64
+ // RfcachePoolLockTimeGreater1Sec int64
+ // RfcachePoolLowResourcesInitiatedPassthroughMode int64
+ // RfcachePoolNumCacheDevs int64
+ // RfcachePoolNumSrcDevs int64
+ // RfcachePoolPagesInuse int64
+ // RfcachePoolReadHit int64
+ // RfcachePoolReadMiss int64
+ // RfcachePoolReadPendingG10Millis int64
+ // RfcachePoolReadPendingG1Millis int64
+ // RfcachePoolReadPendingG1Sec int64
+ // RfcachePoolReadPendingG500Micro int64
+ // RfcachePoolReadsPending int64
+ // RfcachePoolSize int64
+ // RfcachePoolSourceIdMismatch int64
+ // RfcachePoolSuspendedIos int64
+ // RfcachePoolSuspendedPequestsRedundantSearchs int64
+ // RfcachePoolWriteHit int64
+ // RfcachePoolWriteMiss int64
+ // RfcachePoolWritePending int64
+ // RfcachePoolWritePendingG10Millis int64
+ // RfcachePoolWritePendingG1Millis int64
+ // RfcachePoolWritePendingG1Sec int64
+ // RfcachePoolWritePendingG500Micro int64
+ // RfcacheReadMiss int64
+ // RfcacheReadsFromCache int64
+ // RfcacheReadsPending int64
+ // RfcacheReadsReceived int64
+ // RfcacheReadsSkipped int64
+ // RfcacheReadsSkippedAlignedSizeTooLarge int64
+ // RfcacheReadsSkippedHeavyLoad int64
+ // RfcacheReadsSkippedInternalError int64
+ // RfcacheReadsSkippedLockIos int64
+ // RfcacheReadsSkippedLowResources int64
+ // RfcacheReadsSkippedMaxIoSize int64
+ // RfcacheReadsSkippedStuckIo int64
+ // RfcacheSkippedUnlinedWrite int64
+ // RfcacheSourceDeviceReads int64
+ // RfcacheSourceDeviceWrites int64
+ // RfcacheWriteMiss int64
+ // RfcacheWritePending int64
+ // RfcacheWritesReceived int64
+ // RfcacheWritesSkippedCacheMiss int64
+ // RfcacheWritesSkippedHeavyLoad int64
+ // RfcacheWritesSkippedInternalError int64
+ // RfcacheWritesSkippedLowResources int64
+ // RfcacheWritesSkippedMaxIoSize int64
+ // RfcacheWritesSkippedStuckIo int64
+ // RmPendingAllocatedInKb int64
+ // Rmcache128kbEntryCount int64
+ // Rmcache16kbEntryCount int64
+ // Rmcache32kbEntryCount int64
+ // Rmcache4kbEntryCount int64
+ // Rmcache64kbEntryCount int64
+ // Rmcache8kbEntryCount int64
+ // RmcacheBigBlockEvictionCount int64
+ // RmcacheBigBlockEvictionSizeCountInKb int64
+ // RmcacheCurrNumOf128kbEntries int64
+ // RmcacheCurrNumOf16kbEntries int64
+ // RmcacheCurrNumOf32kbEntries int64
+ // RmcacheCurrNumOf4kbEntries int64
+ // RmcacheCurrNumOf64kbEntries int64
+ // RmcacheCurrNumOf8kbEntries int64
+ // RmcacheEntryEvictionCount int64
+ // RmcacheEntryEvictionSizeCountInKb int64
+ // RmcacheNoEvictionCount int64
+ // RmcacheSizeInKb int64
+ // RmcacheSizeInUseInKb int64
+ // RmcacheSkipCountCacheAllBusy int64
+ // RmcacheSkipCountLargeIo int64
+ // RmcacheSkipCountUnaligned4kbIo int64
+ // SdsIds int64
+ // SecondaryReadBwc int64
+ // SecondaryReadFromDevBwc int64
+ // SecondaryReadFromRmcacheBwc int64
+ // SecondaryVacInKb int64
+ // SecondaryWriteBwc int64
+ // SemiProtectedCapacityInKb int64
+ // SemiProtectedVacInKb int64
+ // SnapCapacityInUseInKb int64
+ // SnapCapacityInUseOccupiedInKb int64
+ // SpareCapacityInKb int64
+ // StoragePoolIds int64
+ // ThickCapacityInUseInKb int64
+ // ThinCapacityAllocatedInKb int64
+ // ThinCapacityInUseInKb int64
+ // TotalReadBwc int64
+ // TotalWriteBwc int64
+ // UnreachableUnusedCapacityInKb int64
+ // UnusedCapacityInKb int64
+ // UserDataReadBwc int64
+ // UserDataWriteBwc int64
+ }
+ RFCacheDeviceStatistics struct {
+ // RfcacheFdAvgReadTime int64
+ // RfcacheFdAvgWriteTime int64
+ // RfcacheFdCacheOverloaded int64
+ // RfcacheFdInlightReads int64
+ // RfcacheFdInlightWrites int64
+ // RfcacheFdIoErrors int64
+ // RfcacheFdMonitorErrorStuckIo int64
+ // RfcacheFdReadTimeGreater1Min int64
+ // RfcacheFdReadTimeGreater1Sec int64
+ // RfcacheFdReadTimeGreater500Millis int64
+ // RfcacheFdReadTimeGreater5Sec int64
+ // RfcacheFdReadsReceived int64
+ // RfcacheFdWriteTimeGreater1Min int64
+ // RfcacheFdWriteTimeGreater1Sec int64
+ // RfcacheFdWriteTimeGreater500Millis int64
+ // RfcacheFdWriteTimeGreater5Sec int64
+ // RfcacheFdWritesReceived int64
+ }
+ SdsStatistics struct {
+ // BackgroundScanCompareCount int64
+ // BackgroundScannedInMB int64
+ // ActiveMovingInBckRebuildJobs int64
+ // ActiveMovingInFwdRebuildJobs int64
+ // ActiveMovingInNormRebuildJobs int64
+ // ActiveMovingInRebalanceJobs int64
+ // ActiveMovingOutBckRebuildJobs int64
+ // ActiveMovingOutFwdRebuildJobs int64
+ // ActiveMovingOutNormRebuildJobs int64
+ // ActiveMovingRebalanceJobs int64
+ // BckRebuildReadBwc int64
+ // BckRebuildWriteBwc int64
+ // CapacityInUseInKb int64
+ // CapacityLimitInKb int64
+ // DegradedFailedVacInKb int64
+ // DegradedHealthyVacInKb int64
+ // DeviceIds int64
+ // FailedVacInKb int64
+ // FixedReadErrorCount int64
+ // FwdRebuildReadBwc int64
+ // FwdRebuildWriteBwc int64
+ // InMaintenanceVacInKb int64
+ // InUseVacInKb int64
+ // MaxCapacityInKb int64
+ // NormRebuildReadBwc int64
+ // NormRebuildWriteBwc int64
+ // NumOfDevices int64
+ // NumOfRfcacheDevices int64
+ // PendingMovingInBckRebuildJobs int64
+ // PendingMovingInFwdRebuildJobs int64
+ // PendingMovingInNormRebuildJobs int64
+ // PendingMovingInRebalanceJobs int64
+ // PendingMovingOutBckRebuildJobs int64
+ // PendingMovingOutFwdRebuildJobs int64
+ // PendingMovingOutNormrebuildJobs int64
+ // PendingMovingRebalanceJobs int64
+ // PrimaryReadBwc int64
+ // PrimaryReadFromDevBwc int64
+ // PrimaryReadFromRmcacheBwc int64
+ // PrimaryVacInKb int64
+ // PrimaryWriteBwc int64
+ // ProtectedVacInKb int64
+ // RebalancePerReceiveJobNetThrottlingInKbps int64
+ // RebalanceReadBwc int64
+ // RebalanceWaitSendQLength int64
+ // RebalanceWriteBwc int64
+ // RebuildPerReceiveJobNetThrottlingInKbps int64
+ // RebuildWaitSendQLength int64
+ // RfacheReadHit int64
+ // RfacheWriteHit int64
+ // RfcacheAvgReadTime int64
+ // RfcacheAvgWriteTime int64
+ // RfcacheDeviceIds int64
+ // RfcacheFdAvgReadTime int64
+ // RfcacheFdAvgWriteTime int64
+ // RfcacheFdCacheOverloaded int64
+ // RfcacheFdInlightReads int64
+ // RfcacheFdInlightWrites int64
+ // RfcacheFdIoErrors int64
+ // RfcacheFdMonitorErrorStuckIo int64
+ // RfcacheFdReadTimeGreater1Min int64
+ // RfcacheFdReadTimeGreater1Sec int64
+ // RfcacheFdReadTimeGreater500Millis int64
+ // RfcacheFdReadTimeGreater5Sec int64
+ // RfcacheFdReadsReceived int64
+ // RfcacheFdWriteTimeGreater1Min int64
+ // RfcacheFdWriteTimeGreater1Sec int64
+ // RfcacheFdWriteTimeGreater500Millis int64
+ // RfcacheFdWriteTimeGreater5Sec int64
+ // RfcacheFdWritesReceived int64
+ // RfcacheIoErrors int64
+ // RfcacheIosOutstanding int64
+ // RfcacheIosSkipped int64
+ // RfcachePooIosOutstanding int64
+ // RfcachePoolCachePages int64
+ // RfcachePoolContinuosMem int64
+ // RfcachePoolEvictions int64
+ // RfcachePoolInLowMemoryCondition int64
+ // RfcachePoolIoTimeGreater1Min int64
+ // RfcachePoolLockTimeGreater1Sec int64
+ // RfcachePoolLowResourcesInitiatedPassthroughMode int64
+ // RfcachePoolMaxIoSize int64
+ // RfcachePoolNumCacheDevs int64
+ // RfcachePoolNumOfDriverTheads int64
+ // RfcachePoolNumSrcDevs int64
+ // RfcachePoolOpmode int64
+ // RfcachePoolPageSize int64
+ // RfcachePoolPagesInuse int64
+ // RfcachePoolReadHit int64
+ // RfcachePoolReadMiss int64
+ // RfcachePoolReadPendingG10Millis int64
+ // RfcachePoolReadPendingG1Millis int64
+ // RfcachePoolReadPendingG1Sec int64
+ // RfcachePoolReadPendingG500Micro int64
+ // RfcachePoolReadsPending int64
+ // RfcachePoolSize int64
+ // RfcachePoolSourceIdMismatch int64
+ // RfcachePoolSuspendedIos int64
+ // RfcachePoolSuspendedIosMax int64
+ // RfcachePoolSuspendedPequestsRedundantSearchs int64
+ // RfcachePoolWriteHit int64
+ // RfcachePoolWriteMiss int64
+ // RfcachePoolWritePending int64
+ // RfcachePoolWritePendingG10Millis int64
+ // RfcachePoolWritePendingG1Millis int64
+ // RfcachePoolWritePendingG1Sec int64
+ // RfcachePoolWritePendingG500Micro int64
+ // RfcacheReadMiss int64
+ // RfcacheReadsFromCache int64
+ // RfcacheReadsPending int64
+ // RfcacheReadsReceived int64
+ // RfcacheReadsSkipped int64
+ // RfcacheReadsSkippedAlignedSizeTooLarge int64
+ // RfcacheReadsSkippedHeavyLoad int64
+ // RfcacheReadsSkippedInternalError int64
+ // RfcacheReadsSkippedLockIos int64
+ // RfcacheReadsSkippedLowResources int64
+ // RfcacheReadsSkippedMaxIoSize int64
+ // RfcacheReadsSkippedStuckIo int64
+ // RfcacheSkippedUnlinedWrite int64
+ // RfcacheSourceDeviceReads int64
+ // RfcacheSourceDeviceWrites int64
+ // RfcacheWriteMiss int64
+ // RfcacheWritePending int64
+ // RfcacheWritesReceived int64
+ // RfcacheWritesSkippedCacheMiss int64
+ // RfcacheWritesSkippedHeavyLoad int64
+ // RfcacheWritesSkippedInternalError int64
+ // RfcacheWritesSkippedLowResources int64
+ // RfcacheWritesSkippedMaxIoSize int64
+ // RfcacheWritesSkippedStuckIo int64
+ // RmPendingAllocatedInKb int64
+ // Rmcache128kbEntryCount int64
+ // Rmcache16kbEntryCount int64
+ // Rmcache32kbEntryCount int64
+ // Rmcache4kbEntryCount int64
+ // Rmcache64kbEntryCount int64
+ // Rmcache8kbEntryCount int64
+ // RmcacheBigBlockEvictionCount int64
+ // RmcacheBigBlockEvictionSizeCountInKb int64
+ // RmcacheCurrNumOf128kbEntries int64
+ // RmcacheCurrNumOf16kbEntries int64
+ // RmcacheCurrNumOf32kbEntries int64
+ // RmcacheCurrNumOf4kbEntries int64
+ // RmcacheCurrNumOf64kbEntries int64
+ // RmcacheCurrNumOf8kbEntries int64
+ // RmcacheEntryEvictionCount int64
+ // RmcacheEntryEvictionSizeCountInKb int64
+ // RmcacheNoEvictionCount int64
+ // RmcacheSizeInKb int64
+ // RmcacheSizeInUseInKb int64
+ // RmcacheSkipCountCacheAllBusy int64
+ // RmcacheSkipCountLargeIo int64
+ // RmcacheSkipCountUnaligned4kbIo int64
+ // SecondaryReadBwc int64
+ // SecondaryReadFromDevBwc int64
+ // SecondaryReadFromRmcacheBwc int64
+ // SecondaryVacInKb int64
+ // SecondaryWriteBwc int64
+ // SemiProtectedVacInKb int64
+ // SnapCapacityInUseInKb int64
+ // SnapCapacityInUseOccupiedInKb int64
+ // ThickCapacityInUseInKb int64
+ // ThinCapacityAllocatedInKb int64
+ // ThinCapacityInUseInKb int64
+ // TotalReadBwc int64
+ // TotalWriteBwc int64
+ // UnreachableUnusedCapacityInKb int64
+ // UnusedCapacityInKb int64
+ }
+ VolumeStatistics struct {
+ // ChildVolumeIds int64
+ // DescendantVolumeIds int64
+ // MappedSdcIds int64
+ // NumOfChildVolumes int64
+ // NumOfDescendantVolumes int64
+ // NumOfMappedScsiInitiators int64
+ // NumOfMappedSdcs int64
+ // UserDataReadBwc int64
+ // UserDataWriteBwc int64
+ }
+ VTreeStatistics struct {
+ // BaseNetCapacityInUseInKb int64
+ // NetCapacityInUseInKb int64
+ // NumOfVolumes int64
+ // SnapNetCapacityInUseInKb int64
+ // TrimmedCapacityInKb int64
+ // VolumeIds int64
+ }
+)
diff --git a/src/go/plugin/go.d/modules/scaleio/collect.go b/src/go/plugin/go.d/modules/scaleio/collect.go
new file mode 100644
index 000000000..a7782a7d9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/collect.go
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scaleio
+
+import (
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio/client"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+const discoveryEvery = 5
+
+func (s *ScaleIO) collect() (map[string]int64, error) {
+ s.runs += 1
+ if !s.lastDiscoveryOK || s.runs%discoveryEvery == 0 {
+ if err := s.discovery(); err != nil {
+ return nil, err
+ }
+ }
+
+ stats, err := s.client.SelectedStatistics(query)
+ if err != nil {
+ return nil, err
+ }
+
+ mx := metrics{
+ System: s.collectSystem(stats.System),
+ StoragePool: s.collectStoragePool(stats.StoragePool),
+ Sdc: s.collectSdc(stats.Sdc),
+ }
+
+ s.updateCharts()
+ return stm.ToMap(mx), nil
+}
+
+func (s *ScaleIO) discovery() error {
+ start := time.Now()
+ s.Debugf("starting discovery")
+ ins, err := s.client.Instances()
+ if err != nil {
+ s.lastDiscoveryOK = false
+ return err
+ }
+ s.Debugf("discovering: discovered %d storage pools, %d sdcs, it took %s",
+ len(ins.StoragePoolList), len(ins.SdcList), time.Since(start))
+
+ s.discovered.pool = make(map[string]client.StoragePool, len(ins.StoragePoolList))
+ for _, pool := range ins.StoragePoolList {
+ s.discovered.pool[pool.ID] = pool
+ }
+ s.discovered.sdc = make(map[string]client.Sdc, len(ins.SdcList))
+ for _, sdc := range ins.SdcList {
+ s.discovered.sdc[sdc.ID] = sdc
+ }
+ s.lastDiscoveryOK = true
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/collect_sdc.go b/src/go/plugin/go.d/modules/scaleio/collect_sdc.go
new file mode 100644
index 000000000..f62626707
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/collect_sdc.go
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scaleio
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio/client"
+
+func (s *ScaleIO) collectSdc(ss map[string]client.SdcStatistics) map[string]sdcMetrics {
+ ms := make(map[string]sdcMetrics, len(ss))
+
+ for id, stats := range ss {
+ sdc, ok := s.discovered.sdc[id]
+ if !ok {
+ continue
+ }
+ var m sdcMetrics
+ m.BW.set(
+ calcBW(stats.UserDataReadBwc),
+ calcBW(stats.UserDataWriteBwc),
+ )
+ m.IOPS.set(
+ calcIOPS(stats.UserDataReadBwc),
+ calcIOPS(stats.UserDataWriteBwc),
+ )
+ m.IOSize.set(
+ calcIOSize(stats.UserDataReadBwc),
+ calcIOSize(stats.UserDataWriteBwc),
+ )
+ m.MappedVolumes = stats.NumOfMappedVolumes
+ m.MDMConnectionState = isSdcConnected(sdc.MdmConnectionState)
+
+ ms[id] = m
+ }
+ return ms
+}
+
+func isSdcConnected(state string) bool {
+ return state == "Connected"
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/collect_storage_pool.go b/src/go/plugin/go.d/modules/scaleio/collect_storage_pool.go
new file mode 100644
index 000000000..4a347a64c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/collect_storage_pool.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scaleio
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio/client"
+
+func (s *ScaleIO) collectStoragePool(ss map[string]client.StoragePoolStatistics) map[string]storagePoolMetrics {
+ ms := make(map[string]storagePoolMetrics, len(ss))
+
+ for id, stats := range ss {
+ pool, ok := s.discovered.pool[id]
+ if !ok {
+ continue
+ }
+ var pm storagePoolMetrics
+ collectStoragePoolCapacity(&pm, stats, pool)
+ collectStoragePoolComponents(&pm, stats)
+
+ ms[id] = pm
+ }
+ return ms
+}
+
+func collectStoragePoolCapacity(pm *storagePoolMetrics, ps client.StoragePoolStatistics, pool client.StoragePool) {
+ collectCapacity(&pm.Capacity.capacity, ps.CapacityStatistics)
+ pm.Capacity.Utilization = calcCapacityUtilization(ps.CapacityInUseInKb, ps.MaxCapacityInKb, pool.SparePercentage)
+ pm.Capacity.AlertThreshold.Critical = pool.CapacityAlertCriticalThreshold
+ pm.Capacity.AlertThreshold.High = pool.CapacityAlertHighThreshold
+}
+
+func collectStoragePoolComponents(pm *storagePoolMetrics, ps client.StoragePoolStatistics) {
+ pm.Components.Devices = ps.NumOfDevices
+ pm.Components.Snapshots = ps.NumOfSnapshots
+ pm.Components.Volumes = ps.NumOfVolumes
+ pm.Components.Vtrees = ps.NumOfVtrees
+}
+
+func calcCapacityUtilization(inUse int64, max int64, sparePercent int64) float64 {
+ spare := float64(max) / 100 * float64(sparePercent)
+ return divFloat(float64(100*inUse), float64(max)-spare)
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/collect_system.go b/src/go/plugin/go.d/modules/scaleio/collect_system.go
new file mode 100644
index 000000000..ae6e89aa9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/collect_system.go
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scaleio
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio/client"
+
+func (s *ScaleIO) collectSystem(ss client.SystemStatistics) systemMetrics {
+ var sm systemMetrics
+ collectSystemCapacity(&sm, ss)
+ collectSystemWorkload(&sm, ss)
+ collectSystemRebalance(&sm, ss)
+ collectSystemRebuild(&sm, ss)
+ collectSystemComponents(&sm, ss)
+ return sm
+}
+
+func collectSystemCapacity(sm *systemMetrics, ss client.SystemStatistics) {
+ collectCapacity(&sm.Capacity, ss.CapacityStatistics)
+}
+
+func collectCapacity(m *capacity, ss client.CapacityStatistics) {
+ // Health
+ m.Protected = ss.ProtectedCapacityInKb
+ m.InMaintenance = ss.InMaintenanceCapacityInKb
+ m.Degraded = sum(ss.DegradedFailedCapacityInKb, ss.DegradedHealthyCapacityInKb)
+ m.Failed = ss.FailedCapacityInKb
+ m.UnreachableUnused = ss.UnreachableUnusedCapacityInKb
+
+ // Capacity
+ m.MaxCapacity = ss.MaxCapacityInKb
+ m.ThickInUse = ss.ThickCapacityInUseInKb
+ m.ThinInUse = ss.ThinCapacityInUseInKb
+ m.Snapshot = ss.SnapCapacityInUseOccupiedInKb
+ m.Spare = ss.SpareCapacityInKb
+ m.Decreased = sum(ss.MaxCapacityInKb, -ss.CapacityLimitInKb) // TODO: probably wrong
+ // Note: can't use 'UnusedCapacityInKb' directly, dashboard shows calculated value
+ used := sum(
+ ss.ProtectedCapacityInKb,
+ ss.InMaintenanceCapacityInKb,
+ m.Decreased,
+ m.Degraded,
+ ss.FailedCapacityInKb,
+ ss.SpareCapacityInKb,
+ ss.UnreachableUnusedCapacityInKb,
+ ss.SnapCapacityInUseOccupiedInKb,
+ )
+ m.Unused = sum(ss.MaxCapacityInKb, -used)
+
+ // Other
+ m.InUse = ss.CapacityInUseInKb
+ m.AvailableForVolumeAllocation = ss.CapacityAvailableForVolumeAllocationInKb
+}
+
+func collectSystemComponents(sm *systemMetrics, ss client.SystemStatistics) {
+ m := &sm.Components
+
+ m.Devices = ss.NumOfDevices
+ m.FaultSets = ss.NumOfFaultSets
+ m.MappedToAllVolumes = ss.NumOfMappedToAllVolumes
+ m.ProtectionDomains = ss.NumOfProtectionDomains
+ m.RfcacheDevices = ss.NumOfRfcacheDevices
+ m.Sdc = ss.NumOfSdc
+ m.Sds = ss.NumOfSds
+ m.Snapshots = ss.NumOfSnapshots
+ m.StoragePools = ss.NumOfStoragePools
+ m.VTrees = ss.NumOfVtrees
+ m.Volumes = ss.NumOfVolumes
+ m.ThickBaseVolumes = ss.NumOfThickBaseVolumes
+ m.ThinBaseVolumes = ss.NumOfThinBaseVolumes
+ m.UnmappedVolumes = ss.NumOfUnmappedVolumes
+ m.MappedVolumes = sum(ss.NumOfVolumes, -ss.NumOfUnmappedVolumes)
+}
+
+func collectSystemWorkload(sm *systemMetrics, ss client.SystemStatistics) {
+ m := &sm.Workload
+
+ m.Total.BW.set(
+ calcBW(ss.TotalReadBwc),
+ calcBW(ss.TotalWriteBwc),
+ )
+ m.Frontend.BW.set(
+ calcBW(ss.UserDataReadBwc),
+ calcBW(ss.UserDataWriteBwc),
+ )
+ m.Backend.Primary.BW.set(
+ calcBW(ss.PrimaryReadBwc),
+ calcBW(ss.PrimaryWriteBwc),
+ )
+ m.Backend.Secondary.BW.set(
+ calcBW(ss.SecondaryReadBwc),
+ calcBW(ss.SecondaryWriteBwc),
+ )
+ m.Backend.Total.BW.set(
+ sumFloat(m.Backend.Primary.BW.Read, m.Backend.Secondary.BW.Read),
+ sumFloat(m.Backend.Primary.BW.Write, m.Backend.Secondary.BW.Write),
+ )
+
+ m.Total.IOPS.set(
+ calcIOPS(ss.TotalReadBwc),
+ calcIOPS(ss.TotalWriteBwc),
+ )
+ m.Frontend.IOPS.set(
+ calcIOPS(ss.UserDataReadBwc),
+ calcIOPS(ss.UserDataWriteBwc),
+ )
+ m.Backend.Primary.IOPS.set(
+ calcIOPS(ss.PrimaryReadBwc),
+ calcIOPS(ss.PrimaryWriteBwc),
+ )
+ m.Backend.Secondary.IOPS.set(
+ calcIOPS(ss.SecondaryReadBwc),
+ calcIOPS(ss.SecondaryWriteBwc),
+ )
+ m.Backend.Total.IOPS.set(
+ sumFloat(m.Backend.Primary.IOPS.Read, m.Backend.Secondary.IOPS.Read),
+ sumFloat(m.Backend.Primary.IOPS.Write, m.Backend.Secondary.IOPS.Write),
+ )
+
+ m.Total.IOSize.set(
+ calcIOSize(ss.TotalReadBwc),
+ calcIOSize(ss.TotalWriteBwc),
+ )
+ m.Frontend.IOSize.set(
+ calcIOSize(ss.UserDataReadBwc),
+ calcIOSize(ss.UserDataWriteBwc),
+ )
+ m.Backend.Primary.IOSize.set(
+ calcIOSize(ss.PrimaryReadBwc),
+ calcIOSize(ss.PrimaryWriteBwc),
+ )
+ m.Backend.Secondary.IOSize.set(
+ calcIOSize(ss.SecondaryReadBwc),
+ calcIOSize(ss.SecondaryWriteBwc),
+ )
+ m.Backend.Total.IOSize.set(
+ sumFloat(m.Backend.Primary.IOSize.Read, m.Backend.Secondary.IOSize.Read),
+ sumFloat(m.Backend.Primary.IOSize.Write, m.Backend.Secondary.IOSize.Write),
+ )
+}
+
+func collectSystemRebuild(sm *systemMetrics, ss client.SystemStatistics) {
+ m := &sm.Rebuild
+
+ m.Forward.BW.set(
+ calcBW(ss.FwdRebuildReadBwc),
+ calcBW(ss.FwdRebuildWriteBwc),
+ )
+ m.Backward.BW.set(
+ calcBW(ss.BckRebuildReadBwc),
+ calcBW(ss.BckRebuildWriteBwc),
+ )
+ m.Normal.BW.set(
+ calcBW(ss.NormRebuildReadBwc),
+ calcBW(ss.NormRebuildWriteBwc),
+ )
+ m.Total.BW.set(
+ sumFloat(m.Forward.BW.Read, m.Backward.BW.Read, m.Normal.BW.Read),
+ sumFloat(m.Forward.BW.Write, m.Backward.BW.Write, m.Normal.BW.Write),
+ )
+
+ m.Forward.IOPS.set(
+ calcIOPS(ss.FwdRebuildReadBwc),
+ calcIOPS(ss.FwdRebuildWriteBwc),
+ )
+ m.Backward.IOPS.set(
+ calcIOPS(ss.BckRebuildReadBwc),
+ calcIOPS(ss.BckRebuildWriteBwc),
+ )
+ m.Normal.IOPS.set(
+ calcIOPS(ss.NormRebuildReadBwc),
+ calcIOPS(ss.NormRebuildWriteBwc),
+ )
+ m.Total.IOPS.set(
+ sumFloat(m.Forward.IOPS.Read, m.Backward.IOPS.Read, m.Normal.IOPS.Read),
+ sumFloat(m.Forward.IOPS.Write, m.Backward.IOPS.Write, m.Normal.IOPS.Write),
+ )
+
+ m.Forward.IOSize.set(
+ calcIOSize(ss.FwdRebuildReadBwc),
+ calcIOSize(ss.FwdRebuildWriteBwc),
+ )
+ m.Backward.IOSize.set(
+ calcIOSize(ss.BckRebuildReadBwc),
+ calcIOSize(ss.BckRebuildWriteBwc),
+ )
+ m.Normal.IOSize.set(
+ calcIOSize(ss.NormRebuildReadBwc),
+ calcIOSize(ss.NormRebuildWriteBwc),
+ )
+ m.Total.IOSize.set(
+ sumFloat(m.Forward.IOSize.Read, m.Backward.IOSize.Read, m.Normal.IOSize.Read),
+ sumFloat(m.Forward.IOSize.Write, m.Backward.IOSize.Write, m.Normal.IOSize.Write),
+ )
+
+ m.Forward.Pending = ss.PendingFwdRebuildCapacityInKb
+ m.Backward.Pending = ss.PendingBckRebuildCapacityInKb
+ m.Normal.Pending = ss.PendingNormRebuildCapacityInKb
+ m.Total.Pending = sum(m.Forward.Pending, m.Backward.Pending, m.Normal.Pending)
+}
+
+func collectSystemRebalance(sm *systemMetrics, ss client.SystemStatistics) {
+ m := &sm.Rebalance
+
+ m.BW.set(
+ calcBW(ss.RebalanceReadBwc),
+ calcBW(ss.RebalanceWriteBwc),
+ )
+
+ m.IOPS.set(
+ calcIOPS(ss.RebalanceReadBwc),
+ calcIOPS(ss.RebalanceWriteBwc),
+ )
+
+ m.IOSize.set(
+ calcIOSize(ss.RebalanceReadBwc),
+ calcIOSize(ss.RebalanceWriteBwc),
+ )
+
+ m.Pending = ss.PendingRebalanceCapacityInKb
+ m.TimeUntilFinish = divFloat(float64(m.Pending), m.BW.ReadWrite)
+}
+
+func calcBW(bwc client.Bwc) float64 { return div(bwc.TotalWeightInKb, bwc.NumSeconds) }
+func calcIOPS(bwc client.Bwc) float64 { return div(bwc.NumOccured, bwc.NumSeconds) }
+func calcIOSize(bwc client.Bwc) float64 { return div(bwc.TotalWeightInKb, bwc.NumOccured) }
+
+func sum(a, b int64, others ...int64) (res int64) {
+ for _, v := range others {
+ res += v
+ }
+ return res + a + b
+}
+
+func sumFloat(a, b float64, others ...float64) (res float64) {
+ for _, v := range others {
+ res += v
+ }
+ return res + a + b
+}
+
+func div(a, b int64) float64 {
+ return divFloat(float64(a), float64(b))
+}
+
+func divFloat(a, b float64) float64 {
+ if b == 0 {
+ return 0
+ }
+ return a / b
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/config_schema.json b/src/go/plugin/go.d/modules/scaleio/config_schema.json
new file mode 100644
index 000000000..97aea7faf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ScaleIO collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the VxFlex OS Gateway API.",
+ "type": "string",
+ "default": "http://127.0.0.1:80",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/integrations/dell_emc_scaleio.md b/src/go/plugin/go.d/modules/scaleio/integrations/dell_emc_scaleio.md
new file mode 100644
index 000000000..36d022526
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/integrations/dell_emc_scaleio.md
@@ -0,0 +1,290 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/scaleio/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/scaleio/metadata.yaml"
+sidebar_label: "Dell EMC ScaleIO"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Dell EMC ScaleIO
+
+
+<img src="https://netdata.cloud/img/dell.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: scaleio
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.
+
+It collects metrics for the following ScaleIO components:
+
+- System
+- Storage Pool
+- Sdc
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Dell EMC ScaleIO instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| scaleio.system_capacity_total | total | KiB |
+| scaleio.system_capacity_in_use | in_use | KiB |
+| scaleio.system_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |
+| scaleio.system_capacity_available_volume_allocation | available | KiB |
+| scaleio.system_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |
+| scaleio.system_workload_primary_bandwidth_total | total | KiB/s |
+| scaleio.system_workload_primary_bandwidth | read, write | KiB/s |
+| scaleio.system_workload_primary_iops_total | total | iops/s |
+| scaleio.system_workload_primary_iops | read, write | iops/s |
+| scaleio.system_workload_primary_io_size_total | io_size | KiB |
+| scaleio.system_rebalance | read, write | KiB/s |
+| scaleio.system_rebalance_left | left | KiB |
+| scaleio.system_rebalance_time_until_finish | time | seconds |
+| scaleio.system_rebuild | read, write | KiB/s |
+| scaleio.system_rebuild_left | left | KiB |
+| scaleio.system_defined_components | devices, fault_sets, protection_domains, rfcache_devices, sdc, sds, snapshots, storage_pools, volumes, vtrees | components |
+| scaleio.system_components_volumes_by_type | thick, thin | volumes |
+| scaleio.system_components_volumes_by_mapping | mapped, unmapped | volumes |
+
+### Per storage pool
+
+These metrics refer to the storage pool.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| scaleio.storage_pool_capacity_total | total | KiB |
+| scaleio.storage_pool_capacity_in_use | in_use | KiB |
+| scaleio.storage_pool_capacity_usage | thick, decreased, thin, snapshot, spare, unused | KiB |
+| scaleio.storage_pool_capacity_utilization | used | percentage |
+| scaleio.storage_pool_capacity_available_volume_allocation | available | KiB |
+| scaleio.storage_pool_capacity_health_state | protected, degraded, in_maintenance, failed, unavailable | KiB |
+| scaleio.storage_pool_components | devices, snapshots, volumes, vtrees | components |
+
+### Per sdc
+
+These metrics refer to the SDC (ScaleIO Data Client).
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| scaleio.sdc_mdm_connection_state | connected | boolean |
+| scaleio.sdc_bandwidth | read, write | KiB/s |
+| scaleio.sdc_iops | read, write | iops/s |
+| scaleio.sdc_io_size | read, write | KiB |
+| scaleio.sdc_num_of_mapped_volumed | mapped | volumes |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/scaleio.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/scaleio.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | https://127.0.0.1:80 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | yes |
+| password | Password for basic HTTP authentication. | | yes |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1
+ username: admin
+ password: password
+ tls_skip_verify: yes # self-signed certificate
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instance.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1
+ username: admin
+ password: password
+ tls_skip_verify: yes # self-signed certificate
+
+ - name: remote
+ url: https://203.0.113.10
+ username: admin
+ password: password
+ tls_skip_verify: yes
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `scaleio` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m scaleio
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `scaleio` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep scaleio
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep scaleio /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep scaleio
+```
+
+
diff --git a/src/go/plugin/go.d/modules/scaleio/metadata.yaml b/src/go/plugin/go.d/modules/scaleio/metadata.yaml
new file mode 100644
index 000000000..edee6fc8b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/metadata.yaml
@@ -0,0 +1,399 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-scaleio
+ plugin_name: go.d.plugin
+ module_name: scaleio
+ monitored_instance:
+ name: Dell EMC ScaleIO
+ link: https://www.dell.com/en-ca/dt/storage/scaleio/scaleioreadynode.htm
+ icon_filename: dell.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - scaleio
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors ScaleIO (VxFlex OS) instances via VxFlex OS Gateway API.
+
+ It collects metrics for the following ScaleIO components:
+
+ - System
+ - Storage Pool
+ - Sdc
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/scaleio.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: https://127.0.0.1:80
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: true
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: true
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1
+ username: admin
+ password: password
+ tls_skip_verify: yes # self-signed certificate
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instance.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1
+ username: admin
+ password: password
+ tls_skip_verify: yes # self-signed certificate
+
+ - name: remote
+ url: https://203.0.113.10
+ username: admin
+ password: password
+ tls_skip_verify: yes
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: scaleio.system_capacity_total
+ description: Total Capacity
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: scaleio.system_capacity_in_use
+ description: Capacity In Use
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: in_use
+ - name: scaleio.system_capacity_usage
+ description: Capacity Usage
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: thick
+ - name: decreased
+ - name: thin
+ - name: snapshot
+ - name: spare
+ - name: unused
+ - name: scaleio.system_capacity_available_volume_allocation
+ description: Available For Volume Allocation
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: available
+ - name: scaleio.system_capacity_health_state
+ description: Capacity Health State
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: protected
+ - name: degraded
+ - name: in_maintenance
+ - name: failed
+ - name: unavailable
+ - name: scaleio.system_workload_primary_bandwidth_total
+ description: Primary Backend Bandwidth Total (Read and Write)
+ unit: KiB/s
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: scaleio.system_workload_primary_bandwidth
+ description: Primary Backend Bandwidth
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: scaleio.system_workload_primary_iops_total
+ description: Primary Backend IOPS Total (Read and Write)
+ unit: iops/s
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: scaleio.system_workload_primary_iops
+ description: Primary Backend IOPS
+ unit: iops/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: scaleio.system_workload_primary_io_size_total
+ description: Primary Backend I/O Size Total (Read and Write)
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: io_size
+ - name: scaleio.system_rebalance
+ description: Rebalance
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: scaleio.system_rebalance_left
+ description: Rebalance Pending Capacity
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: left
+ - name: scaleio.system_rebalance_time_until_finish
+ description: Rebalance Approximate Time Until Finish
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: scaleio.system_rebuild
+ description: Rebuild Bandwidth Total (Forward, Backward and Normal)
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: scaleio.system_rebuild_left
+ description: Rebuild Pending Capacity Total (Forward, Backward and Normal)
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: left
+ - name: scaleio.system_defined_components
+ description: Components
+ unit: components
+ chart_type: line
+ dimensions:
+ - name: devices
+ - name: fault_sets
+ - name: protection_domains
+ - name: rfcache_devices
+ - name: sdc
+ - name: sds
+ - name: snapshots
+ - name: storage_pools
+ - name: volumes
+ - name: vtrees
+ - name: scaleio.system_components_volumes_by_type
+ description: Volumes By Type
+ unit: volumes
+ chart_type: stacked
+ dimensions:
+ - name: thick
+ - name: thin
+ - name: scaleio.system_components_volumes_by_mapping
+ description: Volumes By Mapping
+ unit: volumes
+ chart_type: stacked
+ dimensions:
+ - name: mapped
+ - name: unmapped
+ - name: storage pool
+ description: These metrics refer to the storage pool.
+ labels: []
+ metrics:
+ - name: scaleio.storage_pool_capacity_total
+ description: Total Capacity
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: scaleio.storage_pool_capacity_in_use
+ description: Capacity In Use
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: in_use
+ - name: scaleio.storage_pool_capacity_usage
+ description: Capacity Usage
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: thick
+ - name: decreased
+ - name: thin
+ - name: snapshot
+ - name: spare
+ - name: unused
+ - name: scaleio.storage_pool_capacity_utilization
+ description: Capacity Utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: scaleio.storage_pool_capacity_available_volume_allocation
+ description: Available For Volume Allocation
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: available
+ - name: scaleio.storage_pool_capacity_health_state
+ description: Capacity Health State
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: protected
+ - name: degraded
+ - name: in_maintenance
+ - name: failed
+ - name: unavailable
+ - name: scaleio.storage_pool_components
+ description: Components
+ unit: components
+ chart_type: line
+ dimensions:
+ - name: devices
+ - name: snapshots
+ - name: volumes
+ - name: vtrees
+ - name: sdc
+ description: These metrics refer to the SDC (ScaleIO Data Client).
+ labels: []
+ metrics:
+ - name: scaleio.sdc_mdm_connection_state
+ description: MDM Connection State
+ unit: boolean
+ chart_type: line
+ dimensions:
+ - name: connected
+ - name: scaleio.sdc_bandwidth
+ description: Bandwidth
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: scaleio.sdc_iops
+ description: IOPS
+ unit: iops/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: scaleio.sdc_io_size
+ description: IOPS Size
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: scaleio.sdc_num_of_mapped_volumed
+ description: Mapped Volumes
+ unit: volumes
+ chart_type: line
+ dimensions:
+ - name: mapped
diff --git a/src/go/plugin/go.d/modules/scaleio/metrics.go b/src/go/plugin/go.d/modules/scaleio/metrics.go
new file mode 100644
index 000000000..a5a9b9810
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/metrics.go
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scaleio
+
+type metrics struct {
+ System systemMetrics `stm:"system"`
+ Sdc map[string]sdcMetrics `stm:"sdc"`
+ StoragePool map[string]storagePoolMetrics `stm:"storage_pool"`
+}
+
+type capacity struct {
+ MaxCapacity int64 `stm:"max_capacity"`
+ ThickInUse int64 `stm:"thick_in_use"`
+ ThinInUse int64 `stm:"thin_in_use"`
+ Snapshot int64 `stm:"snapshot"`
+ Spare int64 `stm:"spare"`
+ Decreased int64 `stm:"decreased"` // not in statistics, should be calculated
+ Unused int64 `stm:"unused"`
+
+ InUse int64 `stm:"in_use"`
+ AvailableForVolumeAllocation int64 `stm:"available_for_volume_allocation"`
+
+ Protected int64 `stm:"protected"`
+ InMaintenance int64 `stm:"in_maintenance"`
+ Degraded int64 `stm:"degraded"`
+ Failed int64 `stm:"failed"`
+ UnreachableUnused int64 `stm:"unreachable_unused"`
+}
+
+type (
+ systemMetrics struct {
+ Capacity systemCapacity `stm:"capacity"`
+ Workload systemWorkload `stm:""`
+ Rebalance systemRebalance `stm:"rebalance"`
+ Rebuild systemRebuild `stm:"rebuild"`
+ Components systemComponents `stm:"num_of"`
+ }
+ systemCapacity = capacity
+ systemComponents struct {
+ Devices int64 `stm:"devices"`
+ FaultSets int64 `stm:"fault_sets"`
+ ProtectionDomains int64 `stm:"protection_domains"`
+ RfcacheDevices int64 `stm:"rfcache_devices"`
+ Sdc int64 `stm:"sdc"`
+ Sds int64 `stm:"sds"`
+ Snapshots int64 `stm:"snapshots"`
+ StoragePools int64 `stm:"storage_pools"`
+ MappedToAllVolumes int64 `stm:"mapped_to_all_volumes"`
+ ThickBaseVolumes int64 `stm:"thick_base_volumes"`
+ ThinBaseVolumes int64 `stm:"thin_base_volumes"`
+ UnmappedVolumes int64 `stm:"unmapped_volumes"`
+ MappedVolumes int64 `stm:"mapped_volumes"`
+ Volumes int64 `stm:"volumes"`
+ VTrees int64 `stm:"vtrees"`
+ }
+ systemWorkload struct {
+ Total bwIOPS `stm:"total"`
+ Backend struct {
+ Total bwIOPS `stm:"total"`
+ Primary bwIOPS `stm:"primary"`
+ Secondary bwIOPS `stm:"secondary"`
+ } `stm:"backend"`
+ Frontend bwIOPS `stm:"frontend_user_data"`
+ }
+ systemRebalance struct {
+ TimeUntilFinish float64 `stm:"time_until_finish"`
+ bwIOPSPending `stm:""`
+ }
+ systemRebuild struct {
+ Total bwIOPSPending `stm:"total"`
+ Forward bwIOPSPending `stm:"forward"`
+ Backward bwIOPSPending `stm:"backward"`
+ Normal bwIOPSPending `stm:"normal"`
+ }
+)
+
+type (
+ sdcMetrics struct {
+ bwIOPS `stm:""`
+ MappedVolumes int64 `stm:"num_of_mapped_volumes"`
+ MDMConnectionState bool `stm:"mdm_connection_state"`
+ }
+)
+
+type (
+ storagePoolMetrics struct {
+ Capacity storagePoolCapacity `stm:"capacity"`
+ Components struct {
+ Devices int64 `stm:"devices"`
+ Volumes int64 `stm:"volumes"`
+ Vtrees int64 `stm:"vtrees"`
+ Snapshots int64 `stm:"snapshots"`
+ } `stm:"num_of"`
+ }
+ storagePoolCapacity struct {
+ capacity `stm:""`
+ Utilization float64 `stm:"utilization,100,1"` // TODO: only StoragePool (sparePercentage)
+ AlertThreshold struct {
+ Critical int64 `stm:"critical_threshold"`
+ High int64 `stm:"high_threshold"`
+ } `stm:"alert"`
+ }
+)
+
+type (
+ readWrite struct {
+ Read float64 `stm:"read,1000,1"`
+ Write float64 `stm:"write,1000,1"`
+ ReadWrite float64 `stm:"read_write,1000,1"`
+ }
+ bwIOPS struct {
+ BW readWrite `stm:"bandwidth"`
+ IOPS readWrite `stm:"iops"`
+ IOSize readWrite `stm:"io_size"`
+ }
+ bwIOPSPending struct {
+ bwIOPS `stm:""`
+ Pending int64 `stm:"pending_capacity_in_Kb"`
+ }
+)
+
+func (rw *readWrite) set(r, w float64) {
+ rw.Read = r
+ rw.Write = w
+ rw.ReadWrite = r + w
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/queries.go b/src/go/plugin/go.d/modules/scaleio/queries.go
new file mode 100644
index 000000000..4f38f9976
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/queries.go
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scaleio
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio/client"
+
+/*
+Starting from version 3 of ScaleIO/VxFlex API numOfScsiInitiators property is removed from the system selectedStatisticsQuery.
+Reference: VxFlex OS v3.x REST API Reference Guide.pdf
+*/
+
+var query = client.SelectedStatisticsQuery{
+ List: []client.SelectedObject{
+ {
+ Type: "System",
+ Properties: []string{
+ "maxCapacityInKb",
+ "thickCapacityInUseInKb",
+ "thinCapacityInUseInKb",
+ "snapCapacityInUseOccupiedInKb",
+ "spareCapacityInKb",
+ "capacityLimitInKb",
+
+ "protectedCapacityInKb",
+ "degradedHealthyCapacityInKb",
+ "degradedFailedCapacityInKb",
+ "failedCapacityInKb",
+ "unreachableUnusedCapacityInKb",
+ "inMaintenanceCapacityInKb",
+
+ "capacityInUseInKb",
+ "capacityAvailableForVolumeAllocationInKb",
+
+ "numOfDevices",
+ "numOfFaultSets",
+ "numOfProtectionDomains",
+ "numOfRfcacheDevices",
+ "numOfSdc",
+ "numOfSds",
+ "numOfSnapshots",
+ "numOfStoragePools",
+ "numOfVolumes",
+ "numOfVtrees",
+ "numOfThickBaseVolumes",
+ "numOfThinBaseVolumes",
+ "numOfMappedToAllVolumes",
+ "numOfUnmappedVolumes",
+
+ "rebalanceReadBwc",
+ "rebalanceWriteBwc",
+ "pendingRebalanceCapacityInKb",
+
+ "pendingNormRebuildCapacityInKb",
+ "pendingBckRebuildCapacityInKb",
+ "pendingFwdRebuildCapacityInKb",
+ "normRebuildReadBwc",
+ "normRebuildWriteBwc",
+ "bckRebuildReadBwc",
+ "bckRebuildWriteBwc",
+ "fwdRebuildReadBwc",
+ "fwdRebuildWriteBwc",
+
+ "primaryReadBwc",
+ "primaryWriteBwc",
+ "secondaryReadBwc",
+ "secondaryWriteBwc",
+ "userDataReadBwc",
+ "userDataWriteBwc",
+ "totalReadBwc",
+ "totalWriteBwc",
+ },
+ },
+ {
+ Type: "StoragePool",
+ AllIDs: true,
+ Properties: []string{
+ "maxCapacityInKb",
+ "thickCapacityInUseInKb",
+ "thinCapacityInUseInKb",
+ "snapCapacityInUseOccupiedInKb",
+ "spareCapacityInKb",
+ "capacityLimitInKb",
+
+ "protectedCapacityInKb",
+ "degradedHealthyCapacityInKb",
+ "degradedFailedCapacityInKb",
+ "failedCapacityInKb",
+ "unreachableUnusedCapacityInKb",
+ "inMaintenanceCapacityInKb",
+
+ "capacityInUseInKb",
+ "capacityAvailableForVolumeAllocationInKb",
+
+ "numOfDevices",
+ "numOfVolumes",
+ "numOfVtrees",
+ "numOfSnapshots",
+ },
+ },
+ {
+ Type: "Sdc",
+ AllIDs: true,
+ Properties: []string{
+ "userDataReadBwc",
+ "userDataWriteBwc",
+
+ "numOfMappedVolumes",
+ },
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/scaleio.go b/src/go/plugin/go.d/modules/scaleio/scaleio.go
new file mode 100644
index 000000000..d32ccbffe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/scaleio.go
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scaleio
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio/client"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("scaleio", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *ScaleIO {
+ return &ScaleIO{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "https://127.0.0.1",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: systemCharts.Copy(),
+ charted: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type (
+ ScaleIO struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ client *client.Client
+
+ discovered instances
+ charted map[string]bool
+ lastDiscoveryOK bool
+ runs int
+ }
+ instances struct {
+ sdc map[string]client.Sdc
+ pool map[string]client.StoragePool
+ }
+)
+
+func (s *ScaleIO) Configuration() any {
+ return s.Config
+}
+
+func (s *ScaleIO) Init() error {
+ if s.Username == "" || s.Password == "" {
+ s.Error("username and password aren't set")
+ return errors.New("username and password aren't set")
+ }
+
+ c, err := client.New(s.Client, s.Request)
+ if err != nil {
+ s.Errorf("error on creating ScaleIO client: %v", err)
+ return err
+ }
+ s.client = c
+
+ s.Debugf("using URL %s", s.URL)
+ s.Debugf("using timeout: %s", s.Timeout)
+
+ return nil
+}
+
+func (s *ScaleIO) Check() error {
+ if err := s.client.Login(); err != nil {
+ s.Error(err)
+ return err
+ }
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (s *ScaleIO) Charts() *module.Charts {
+ return s.charts
+}
+
+func (s *ScaleIO) Collect() map[string]int64 {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return nil
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (s *ScaleIO) Cleanup() {
+ if s.client == nil {
+ return
+ }
+ _ = s.client.Logout()
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/scaleio_test.go b/src/go/plugin/go.d/modules/scaleio/scaleio_test.go
new file mode 100644
index 000000000..bb906333e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/scaleio_test.go
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scaleio
+
+import (
+ "encoding/json"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/scaleio/client"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataSelectedStatistics, _ = os.ReadFile("testdata/selected_statistics.json")
+ dataInstances, _ = os.ReadFile("testdata/instances.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataSelectedStatistics": dataSelectedStatistics,
+ "dataInstances": dataInstances,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestScaleIO_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &ScaleIO{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestScaleIO_Init(t *testing.T) {
+ scaleIO := New()
+ scaleIO.Username = "username"
+ scaleIO.Password = "password"
+
+ assert.NoError(t, scaleIO.Init())
+}
+func TestScaleIO_Init_UsernameAndPasswordNotSet(t *testing.T) {
+ assert.Error(t, New().Init())
+}
+
+func TestScaleIO_Init_ErrorOnCreatingClientWrongTLSCA(t *testing.T) {
+ job := New()
+ job.Username = "username"
+ job.Password = "password"
+ job.Client.TLSConfig.TLSCA = "testdata/tls"
+
+ assert.Error(t, job.Init())
+}
+
+func TestScaleIO_Check(t *testing.T) {
+ srv, _, scaleIO := prepareSrvMockScaleIO(t)
+ defer srv.Close()
+ require.NoError(t, scaleIO.Init())
+
+ assert.NoError(t, scaleIO.Check())
+}
+
+func TestScaleIO_Check_ErrorOnLogin(t *testing.T) {
+ srv, mock, scaleIO := prepareSrvMockScaleIO(t)
+ defer srv.Close()
+ require.NoError(t, scaleIO.Init())
+ mock.Password = "new password"
+
+ assert.Error(t, scaleIO.Check())
+}
+
+func TestScaleIO_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestScaleIO_Cleanup(t *testing.T) {
+ srv, _, scaleIO := prepareSrvMockScaleIO(t)
+ defer srv.Close()
+ require.NoError(t, scaleIO.Init())
+ require.NoError(t, scaleIO.Check())
+
+ scaleIO.Cleanup()
+ assert.False(t, scaleIO.client.LoggedIn())
+}
+
+func TestScaleIO_Collect(t *testing.T) {
+ srv, _, scaleIO := prepareSrvMockScaleIO(t)
+ defer srv.Close()
+ require.NoError(t, scaleIO.Init())
+ require.NoError(t, scaleIO.Check())
+
+ expected := map[string]int64{
+ "sdc_6076fd0f00000000_bandwidth_read": 0,
+ "sdc_6076fd0f00000000_bandwidth_read_write": 0,
+ "sdc_6076fd0f00000000_bandwidth_write": 0,
+ "sdc_6076fd0f00000000_io_size_read": 0,
+ "sdc_6076fd0f00000000_io_size_read_write": 0,
+ "sdc_6076fd0f00000000_io_size_write": 0,
+ "sdc_6076fd0f00000000_iops_read": 0,
+ "sdc_6076fd0f00000000_iops_read_write": 0,
+ "sdc_6076fd0f00000000_iops_write": 0,
+ "sdc_6076fd0f00000000_mdm_connection_state": 1,
+ "sdc_6076fd0f00000000_num_of_mapped_volumes": 1,
+ "sdc_6076fd1000000001_bandwidth_read": 1000,
+ "sdc_6076fd1000000001_bandwidth_read_write": 117400000,
+ "sdc_6076fd1000000001_bandwidth_write": 117399000,
+ "sdc_6076fd1000000001_io_size_read": 1000,
+ "sdc_6076fd1000000001_io_size_read_write": 695668,
+ "sdc_6076fd1000000001_io_size_write": 694668,
+ "sdc_6076fd1000000001_iops_read": 1000,
+ "sdc_6076fd1000000001_iops_read_write": 170000,
+ "sdc_6076fd1000000001_iops_write": 169000,
+ "sdc_6076fd1000000001_mdm_connection_state": 0,
+ "sdc_6076fd1000000001_num_of_mapped_volumes": 1,
+ "sdc_6076fd1100000002_bandwidth_read": 0,
+ "sdc_6076fd1100000002_bandwidth_read_write": 118972000,
+ "sdc_6076fd1100000002_bandwidth_write": 118972000,
+ "sdc_6076fd1100000002_io_size_read": 0,
+ "sdc_6076fd1100000002_io_size_read_write": 820496,
+ "sdc_6076fd1100000002_io_size_write": 820496,
+ "sdc_6076fd1100000002_iops_read": 0,
+ "sdc_6076fd1100000002_iops_read_write": 145000,
+ "sdc_6076fd1100000002_iops_write": 145000,
+ "sdc_6076fd1100000002_mdm_connection_state": 0,
+ "sdc_6076fd1100000002_num_of_mapped_volumes": 1,
+ "storage_pool_40395b7b00000000_capacity_alert_critical_threshold": 90,
+ "storage_pool_40395b7b00000000_capacity_alert_high_threshold": 80,
+ "storage_pool_40395b7b00000000_capacity_available_for_volume_allocation": 100663296,
+ "storage_pool_40395b7b00000000_capacity_decreased": 0,
+ "storage_pool_40395b7b00000000_capacity_degraded": 0,
+ "storage_pool_40395b7b00000000_capacity_failed": 0,
+ "storage_pool_40395b7b00000000_capacity_in_maintenance": 0,
+ "storage_pool_40395b7b00000000_capacity_in_use": 50110464,
+ "storage_pool_40395b7b00000000_capacity_max_capacity": 311424000,
+ "storage_pool_40395b7b00000000_capacity_protected": 50110464,
+ "storage_pool_40395b7b00000000_capacity_snapshot": 749568,
+ "storage_pool_40395b7b00000000_capacity_spare": 31141888,
+ "storage_pool_40395b7b00000000_capacity_thick_in_use": 0,
+ "storage_pool_40395b7b00000000_capacity_thin_in_use": 49360896,
+ "storage_pool_40395b7b00000000_capacity_unreachable_unused": 0,
+ "storage_pool_40395b7b00000000_capacity_unused": 229422080,
+ "storage_pool_40395b7b00000000_capacity_utilization": 1787,
+ "storage_pool_40395b7b00000000_num_of_devices": 3,
+ "storage_pool_40395b7b00000000_num_of_snapshots": 1,
+ "storage_pool_40395b7b00000000_num_of_volumes": 3,
+ "storage_pool_40395b7b00000000_num_of_vtrees": 2,
+ "storage_pool_4039828b00000001_capacity_alert_critical_threshold": 90,
+ "storage_pool_4039828b00000001_capacity_alert_high_threshold": 80,
+ "storage_pool_4039828b00000001_capacity_available_for_volume_allocation": 142606336,
+ "storage_pool_4039828b00000001_capacity_decreased": 0,
+ "storage_pool_4039828b00000001_capacity_degraded": 0,
+ "storage_pool_4039828b00000001_capacity_failed": 0,
+ "storage_pool_4039828b00000001_capacity_in_maintenance": 0,
+ "storage_pool_4039828b00000001_capacity_in_use": 0,
+ "storage_pool_4039828b00000001_capacity_max_capacity": 332395520,
+ "storage_pool_4039828b00000001_capacity_protected": 0,
+ "storage_pool_4039828b00000001_capacity_snapshot": 0,
+ "storage_pool_4039828b00000001_capacity_spare": 33239040,
+ "storage_pool_4039828b00000001_capacity_thick_in_use": 0,
+ "storage_pool_4039828b00000001_capacity_thin_in_use": 0,
+ "storage_pool_4039828b00000001_capacity_unreachable_unused": 0,
+ "storage_pool_4039828b00000001_capacity_unused": 299156480,
+ "storage_pool_4039828b00000001_capacity_utilization": 0,
+ "storage_pool_4039828b00000001_num_of_devices": 3,
+ "storage_pool_4039828b00000001_num_of_snapshots": 0,
+ "storage_pool_4039828b00000001_num_of_volumes": 0,
+ "storage_pool_4039828b00000001_num_of_vtrees": 0,
+ "system_backend_primary_bandwidth_read": 800,
+ "system_backend_primary_bandwidth_read_write": 238682400,
+ "system_backend_primary_bandwidth_write": 238681600,
+ "system_backend_primary_io_size_read": 4000,
+ "system_backend_primary_io_size_read_write": 770971,
+ "system_backend_primary_io_size_write": 766971,
+ "system_backend_primary_iops_read": 200,
+ "system_backend_primary_iops_read_write": 311400,
+ "system_backend_primary_iops_write": 311200,
+ "system_backend_secondary_bandwidth_read": 0,
+ "system_backend_secondary_bandwidth_read_write": 233926400,
+ "system_backend_secondary_bandwidth_write": 233926400,
+ "system_backend_secondary_io_size_read": 0,
+ "system_backend_secondary_io_size_read_write": 764465,
+ "system_backend_secondary_io_size_write": 764465,
+ "system_backend_secondary_iops_read": 0,
+ "system_backend_secondary_iops_read_write": 306000,
+ "system_backend_secondary_iops_write": 306000,
+ "system_backend_total_bandwidth_read": 800,
+ "system_backend_total_bandwidth_read_write": 472608800,
+ "system_backend_total_bandwidth_write": 472608000,
+ "system_backend_total_io_size_read": 4000,
+ "system_backend_total_io_size_read_write": 1535437,
+ "system_backend_total_io_size_write": 1531437,
+ "system_backend_total_iops_read": 200,
+ "system_backend_total_iops_read_write": 617400,
+ "system_backend_total_iops_write": 617200,
+ "system_capacity_available_for_volume_allocation": 243269632,
+ "system_capacity_decreased": 0,
+ "system_capacity_degraded": 0,
+ "system_capacity_failed": 0,
+ "system_capacity_in_maintenance": 0,
+ "system_capacity_in_use": 50110464,
+ "system_capacity_max_capacity": 643819520,
+ "system_capacity_protected": 50110464,
+ "system_capacity_snapshot": 749568,
+ "system_capacity_spare": 64380928,
+ "system_capacity_thick_in_use": 0,
+ "system_capacity_thin_in_use": 49360896,
+ "system_capacity_unreachable_unused": 0,
+ "system_capacity_unused": 528578560,
+ "system_frontend_user_data_bandwidth_read": 0,
+ "system_frontend_user_data_bandwidth_read_write": 227170000,
+ "system_frontend_user_data_bandwidth_write": 227170000,
+ "system_frontend_user_data_io_size_read": 0,
+ "system_frontend_user_data_io_size_read_write": 797087,
+ "system_frontend_user_data_io_size_write": 797087,
+ "system_frontend_user_data_iops_read": 0,
+ "system_frontend_user_data_iops_read_write": 285000,
+ "system_frontend_user_data_iops_write": 285000,
+ "system_num_of_devices": 6,
+ "system_num_of_fault_sets": 0,
+ "system_num_of_mapped_to_all_volumes": 0,
+ "system_num_of_mapped_volumes": 3,
+ "system_num_of_protection_domains": 1,
+ "system_num_of_rfcache_devices": 0,
+ "system_num_of_sdc": 3,
+ "system_num_of_sds": 3,
+ "system_num_of_snapshots": 1,
+ "system_num_of_storage_pools": 2,
+ "system_num_of_thick_base_volumes": 0,
+ "system_num_of_thin_base_volumes": 2,
+ "system_num_of_unmapped_volumes": 0,
+ "system_num_of_volumes": 3,
+ "system_num_of_vtrees": 2,
+ "system_rebalance_bandwidth_read": 0,
+ "system_rebalance_bandwidth_read_write": 0,
+ "system_rebalance_bandwidth_write": 0,
+ "system_rebalance_io_size_read": 0,
+ "system_rebalance_io_size_read_write": 0,
+ "system_rebalance_io_size_write": 0,
+ "system_rebalance_iops_read": 0,
+ "system_rebalance_iops_read_write": 0,
+ "system_rebalance_iops_write": 0,
+ "system_rebalance_pending_capacity_in_Kb": 0,
+ "system_rebalance_time_until_finish": 0,
+ "system_rebuild_backward_bandwidth_read": 0,
+ "system_rebuild_backward_bandwidth_read_write": 0,
+ "system_rebuild_backward_bandwidth_write": 0,
+ "system_rebuild_backward_io_size_read": 0,
+ "system_rebuild_backward_io_size_read_write": 0,
+ "system_rebuild_backward_io_size_write": 0,
+ "system_rebuild_backward_iops_read": 0,
+ "system_rebuild_backward_iops_read_write": 0,
+ "system_rebuild_backward_iops_write": 0,
+ "system_rebuild_backward_pending_capacity_in_Kb": 0,
+ "system_rebuild_forward_bandwidth_read": 0,
+ "system_rebuild_forward_bandwidth_read_write": 0,
+ "system_rebuild_forward_bandwidth_write": 0,
+ "system_rebuild_forward_io_size_read": 0,
+ "system_rebuild_forward_io_size_read_write": 0,
+ "system_rebuild_forward_io_size_write": 0,
+ "system_rebuild_forward_iops_read": 0,
+ "system_rebuild_forward_iops_read_write": 0,
+ "system_rebuild_forward_iops_write": 0,
+ "system_rebuild_forward_pending_capacity_in_Kb": 0,
+ "system_rebuild_normal_bandwidth_read": 0,
+ "system_rebuild_normal_bandwidth_read_write": 0,
+ "system_rebuild_normal_bandwidth_write": 0,
+ "system_rebuild_normal_io_size_read": 0,
+ "system_rebuild_normal_io_size_read_write": 0,
+ "system_rebuild_normal_io_size_write": 0,
+ "system_rebuild_normal_iops_read": 0,
+ "system_rebuild_normal_iops_read_write": 0,
+ "system_rebuild_normal_iops_write": 0,
+ "system_rebuild_normal_pending_capacity_in_Kb": 0,
+ "system_rebuild_total_bandwidth_read": 0,
+ "system_rebuild_total_bandwidth_read_write": 0,
+ "system_rebuild_total_bandwidth_write": 0,
+ "system_rebuild_total_io_size_read": 0,
+ "system_rebuild_total_io_size_read_write": 0,
+ "system_rebuild_total_io_size_write": 0,
+ "system_rebuild_total_iops_read": 0,
+ "system_rebuild_total_iops_read_write": 0,
+ "system_rebuild_total_iops_write": 0,
+ "system_rebuild_total_pending_capacity_in_Kb": 0,
+ "system_total_bandwidth_read": 800,
+ "system_total_bandwidth_read_write": 472608800,
+ "system_total_bandwidth_write": 472608000,
+ "system_total_io_size_read": 4000,
+ "system_total_io_size_read_write": 769729,
+ "system_total_io_size_write": 765729,
+ "system_total_iops_read": 200,
+ "system_total_iops_read_write": 617400,
+ "system_total_iops_write": 617200,
+ }
+
+ collected := scaleIO.Collect()
+ assert.Equal(t, expected, collected)
+ testCharts(t, scaleIO, collected)
+}
+
+func TestScaleIO_Collect_ConnectionRefused(t *testing.T) {
+ srv, _, scaleIO := prepareSrvMockScaleIO(t)
+ defer srv.Close()
+ require.NoError(t, scaleIO.Init())
+ require.NoError(t, scaleIO.Check())
+ scaleIO.client.Request.URL = "http://127.0.0.1:38001"
+
+ assert.Nil(t, scaleIO.Collect())
+}
+
+func testCharts(t *testing.T, scaleIO *ScaleIO, collected map[string]int64) {
+ t.Helper()
+ ensureStoragePoolChartsAreCreated(t, scaleIO)
+ ensureSdcChartsAreCreated(t, scaleIO)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, scaleIO, collected)
+}
+
+func ensureStoragePoolChartsAreCreated(t *testing.T, scaleIO *ScaleIO) {
+ for _, pool := range scaleIO.discovered.pool {
+ for _, chart := range *newStoragePoolCharts(pool) {
+ assert.Truef(t, scaleIO.Charts().Has(chart.ID), "chart '%s' is not created", chart.ID)
+ }
+ }
+}
+
+func ensureSdcChartsAreCreated(t *testing.T, scaleIO *ScaleIO) {
+ for _, sdc := range scaleIO.discovered.sdc {
+ for _, chart := range *newSdcCharts(sdc) {
+ assert.Truef(t, scaleIO.Charts().Has(chart.ID), "chart '%s' is not created", chart.ID)
+ }
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, scaleIO *ScaleIO, collected map[string]int64) {
+ for _, chart := range *scaleIO.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareSrvMockScaleIO(t *testing.T) (*httptest.Server, *client.MockScaleIOAPIServer, *ScaleIO) {
+ t.Helper()
+ const (
+ user = "user"
+ password = "password"
+ version = "2.5"
+ token = "token"
+ )
+ var stats client.SelectedStatistics
+ err := json.Unmarshal(dataSelectedStatistics, &stats)
+ require.NoError(t, err)
+
+ var ins client.Instances
+ err = json.Unmarshal(dataInstances, &ins)
+ require.NoError(t, err)
+
+ mock := client.MockScaleIOAPIServer{
+ User: user,
+ Password: password,
+ Version: version,
+ Token: token,
+ Instances: ins,
+ Statistics: stats,
+ }
+ srv := httptest.NewServer(&mock)
+ require.NoError(t, err)
+
+ scaleIO := New()
+ scaleIO.URL = srv.URL
+ scaleIO.Username = user
+ scaleIO.Password = password
+ return srv, &mock, scaleIO
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/testdata/config.json b/src/go/plugin/go.d/modules/scaleio/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/scaleio/testdata/config.yaml b/src/go/plugin/go.d/modules/scaleio/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/scaleio/testdata/instances.json b/src/go/plugin/go.d/modules/scaleio/testdata/instances.json
new file mode 100644
index 000000000..bc8c6e8ac
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/testdata/instances.json
@@ -0,0 +1,1160 @@
+{
+ "System": {
+ "authenticationMethod": "Native",
+ "capacityAlertCriticalThresholdPercent": 90,
+ "capacityAlertHighThresholdPercent": 80,
+ "capacityTimeLeftInDays": "Unlimited",
+ "cliPasswordAllowed": true,
+ "daysInstalled": 17,
+ "defaultIsVolumeObfuscated": false,
+ "enterpriseFeaturesEnabled": true,
+ "id": "499634a44778afc0",
+ "installId": "3e9fc5811a7efb00",
+ "isInitialLicense": true,
+ "links": [
+ {
+ "href": "/api/instances/System::499634a44778afc0",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/System::499634a44778afc0/relationships/Statistics",
+ "rel": "/api/System/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/System::499634a44778afc0/relationships/ProtectionDomain",
+ "rel": "/api/System/relationship/ProtectionDomain"
+ },
+ {
+ "href": "/api/instances/System::499634a44778afc0/relationships/Sdc",
+ "rel": "/api/System/relationship/Sdc"
+ },
+ {
+ "href": "/api/instances/System::499634a44778afc0/relationships/User",
+ "rel": "/api/System/relationship/User"
+ }
+ ],
+ "managementClientSecureCommunicationEnabled": true,
+ "maxCapacityInGb": "Unlimited",
+ "mdmCluster": {
+ "clusterMode": "ThreeNodes",
+ "clusterState": "ClusteredNormal",
+ "goodNodesNum": 3,
+ "goodReplicasNum": 2,
+ "id": "5302483491453710272",
+ "master": {
+ "id": "65ed0ee3247d0a00",
+ "ips": [
+ "100.127.0.10"
+ ],
+ "managementIPs": [
+ "100.127.0.10"
+ ],
+ "name": "Manager1",
+ "opensslVersion": "OpenSSL 1.0.2g 1 Mar 2016",
+ "port": 9011,
+ "role": "Manager",
+ "versionInfo": "R2_6.11000.0",
+ "virtualInterfaces": []
+ },
+ "slaves": [
+ {
+ "id": "4cc44104130ce7b1",
+ "ips": [
+ "100.127.0.11"
+ ],
+ "managementIPs": [
+ "100.127.0.11"
+ ],
+ "name": "Manager2",
+ "opensslVersion": "OpenSSL 1.0.2g 1 Mar 2016",
+ "port": 9011,
+ "role": "Manager",
+ "status": "Normal",
+ "versionInfo": "R2_6.11000.0",
+ "virtualInterfaces": []
+ }
+ ],
+ "tieBreakers": [
+ {
+ "id": "35bf9d62661a6db2",
+ "ips": [
+ "100.127.0.12"
+ ],
+ "managementIPs": [
+ "100.127.0.12"
+ ],
+ "name": "Tie-Breaker1",
+ "opensslVersion": "N/A",
+ "port": 9011,
+ "role": "TieBreaker",
+ "status": "Normal",
+ "versionInfo": "R2_6.11000.0"
+ }
+ ]
+ },
+ "mdmManagementPort": 6611,
+ "mdmToSdsPolicy": "Authentication",
+ "perfProfile": "Default",
+ "remoteReadOnlyLimitState": false,
+ "restrictedSdcMode": "None",
+ "restrictedSdcModeEnabled": false,
+ "sdcLongOperationsCounterParameters": {
+ "longWindow": {
+ "threshold": 1000000,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 100000,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 10000,
+ "windowSizeInSec": 60
+ }
+ },
+ "sdcMdmNetworkDisconnectionsCounterParameters": {
+ "longWindow": {
+ "threshold": 700,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 500,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 300,
+ "windowSizeInSec": 60
+ }
+ },
+ "sdcMemoryAllocationFailuresCounterParameters": {
+ "longWindow": {
+ "threshold": 700,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 500,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 300,
+ "windowSizeInSec": 60
+ }
+ },
+ "sdcSdsConnectivityInfo": {
+ "disconnectedSdcId": null,
+ "disconnectedSdcName": null,
+ "disconnectedSdsId": null,
+ "disconnectedSdsIp": null,
+ "disconnectedSdsName": null,
+ "sdcSdsConnectivityStatus": "AllConnected"
+ },
+ "sdcSdsNetworkDisconnectionsCounterParameters": {
+ "longWindow": {
+ "threshold": 20000,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 4000,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 800,
+ "windowSizeInSec": 60
+ }
+ },
+ "sdcSocketAllocationFailuresCounterParameters": {
+ "longWindow": {
+ "threshold": 700,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 500,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 300,
+ "windowSizeInSec": 60
+ }
+ },
+ "showGuid": true,
+ "swid": "",
+ "systemVersionName": "DellEMC ScaleIO Version: R2_6.11000.113",
+ "tlsVersion": "TLSv1.2",
+ "upgradeState": "NoUpgrade"
+ },
+ "deviceList": [
+ {
+ "aggregatedState": "NeverFailed",
+ "capacityLimitInKb": 103808000,
+ "deviceCurrentPathName": "/dev/sdb",
+ "deviceOriginalPathName": "/dev/sdb",
+ "deviceState": "Normal",
+ "errorState": "None",
+ "id": "ebbf9d6500010000",
+ "ledSetting": "Off",
+ "links": [
+ {
+ "href": "/api/instances/Device::ebbf9d6500010000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Device::ebbf9d6500010000/relationships/Statistics",
+ "rel": "/api/Device/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabe00000001",
+ "rel": "/api/parent/relationship/sdsId"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "longSuccessfulIos": {
+ "longWindow": null,
+ "mediumWindow": null,
+ "shortWindow": null
+ },
+ "maxCapacityInKb": 103808000,
+ "name": "sdb",
+ "rfcacheErrorDeviceDoesNotExist": false,
+ "sdsId": "130dcabe00000001",
+ "ssdEndOfLifeState": "NeverFailed",
+ "storagePoolId": "40395b7b00000000",
+ "temperatureState": "NeverFailed"
+ },
+ {
+ "aggregatedState": "NeverFailed",
+ "capacityLimitInKb": 114293760,
+ "deviceCurrentPathName": "/dev/sdc",
+ "deviceOriginalPathName": "/dev/sdc",
+ "deviceState": "Normal",
+ "errorState": "None",
+ "id": "ebbfc47300010001",
+ "ledSetting": "Off",
+ "links": [
+ {
+ "href": "/api/instances/Device::ebbfc47300010001",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Device::ebbfc47300010001/relationships/Statistics",
+ "rel": "/api/Device/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabe00000001",
+ "rel": "/api/parent/relationship/sdsId"
+ },
+ {
+ "href": "/api/instances/StoragePool::4039828b00000001",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "longSuccessfulIos": {
+ "longWindow": null,
+ "mediumWindow": null,
+ "shortWindow": null
+ },
+ "maxCapacityInKb": 114293760,
+ "name": "sdc",
+ "rfcacheErrorDeviceDoesNotExist": false,
+ "sdsId": "130dcabe00000001",
+ "ssdEndOfLifeState": "NeverFailed",
+ "storagePoolId": "4039828b00000001",
+ "temperatureState": "NeverFailed"
+ },
+ {
+ "aggregatedState": "NeverFailed",
+ "capacityLimitInKb": 103808000,
+ "deviceCurrentPathName": "/dev/sdb",
+ "deviceOriginalPathName": "/dev/sdb",
+ "deviceState": "Normal",
+ "errorState": "None",
+ "id": "ebbd9d6400000000",
+ "ledSetting": "Off",
+ "links": [
+ {
+ "href": "/api/instances/Device::ebbd9d6400000000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Device::ebbd9d6400000000/relationships/Statistics",
+ "rel": "/api/Device/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabd00000000",
+ "rel": "/api/parent/relationship/sdsId"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "longSuccessfulIos": {
+ "longWindow": null,
+ "mediumWindow": null,
+ "shortWindow": null
+ },
+ "maxCapacityInKb": 103808000,
+ "name": "sdb",
+ "rfcacheErrorDeviceDoesNotExist": false,
+ "sdsId": "130dcabd00000000",
+ "ssdEndOfLifeState": "NeverFailed",
+ "storagePoolId": "40395b7b00000000",
+ "temperatureState": "NeverFailed"
+ },
+ {
+ "aggregatedState": "NeverFailed",
+ "capacityLimitInKb": 114293760,
+ "deviceCurrentPathName": "/dev/sdc",
+ "deviceOriginalPathName": "/dev/sdc",
+ "deviceState": "Normal",
+ "errorState": "None",
+ "id": "ebbfc47700020001",
+ "ledSetting": "Off",
+ "links": [
+ {
+ "href": "/api/instances/Device::ebbfc47700020001",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Device::ebbfc47700020001/relationships/Statistics",
+ "rel": "/api/Device/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabf00000002",
+ "rel": "/api/parent/relationship/sdsId"
+ },
+ {
+ "href": "/api/instances/StoragePool::4039828b00000001",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "longSuccessfulIos": {
+ "longWindow": null,
+ "mediumWindow": null,
+ "shortWindow": null
+ },
+ "maxCapacityInKb": 114293760,
+ "name": "sdc",
+ "rfcacheErrorDeviceDoesNotExist": false,
+ "sdsId": "130dcabf00000002",
+ "ssdEndOfLifeState": "NeverFailed",
+ "storagePoolId": "4039828b00000001",
+ "temperatureState": "NeverFailed"
+ },
+ {
+ "aggregatedState": "NeverFailed",
+ "capacityLimitInKb": 103808000,
+ "deviceCurrentPathName": "/dev/sdc",
+ "deviceOriginalPathName": "/dev/sdc",
+ "deviceState": "Normal",
+ "errorState": "None",
+ "id": "ebbdc47600000001",
+ "ledSetting": "Off",
+ "links": [
+ {
+ "href": "/api/instances/Device::ebbdc47600000001",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Device::ebbdc47600000001/relationships/Statistics",
+ "rel": "/api/Device/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabd00000000",
+ "rel": "/api/parent/relationship/sdsId"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "longSuccessfulIos": {
+ "longWindow": null,
+ "mediumWindow": null,
+ "shortWindow": null
+ },
+ "maxCapacityInKb": 103808000,
+ "name": "sdc",
+ "rfcacheErrorDeviceDoesNotExist": false,
+ "sdsId": "130dcabd00000000",
+ "ssdEndOfLifeState": "NeverFailed",
+ "storagePoolId": "40395b7b00000000",
+ "temperatureState": "NeverFailed"
+ },
+ {
+ "aggregatedState": "NeverFailed",
+ "capacityLimitInKb": 103808000,
+ "deviceCurrentPathName": "/dev/sdb",
+ "deviceOriginalPathName": "/dev/sdb",
+ "deviceState": "Normal",
+ "errorState": "None",
+ "id": "ebbfc47800020000",
+ "ledSetting": "Off",
+ "links": [
+ {
+ "href": "/api/instances/Device::ebbfc47800020000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Device::ebbfc47800020000/relationships/Statistics",
+ "rel": "/api/Device/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabf00000002",
+ "rel": "/api/parent/relationship/sdsId"
+ },
+ {
+ "href": "/api/instances/StoragePool::4039828b00000001",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "longSuccessfulIos": {
+ "longWindow": null,
+ "mediumWindow": null,
+ "shortWindow": null
+ },
+ "maxCapacityInKb": 103808000,
+ "name": "sdb",
+ "rfcacheErrorDeviceDoesNotExist": false,
+ "sdsId": "130dcabf00000002",
+ "ssdEndOfLifeState": "NeverFailed",
+ "storagePoolId": "4039828b00000001",
+ "temperatureState": "NeverFailed"
+ }
+ ],
+ "faultSetList": [
+ {
+ "id": "a6a7b4cf00000000",
+ "links": [
+ {
+ "href": "/api/instances/FaultSet::a6a7b4cf00000000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/FaultSet::a6a7b4cf00000000/relationships/Statistics",
+ "rel": "/api/FaultSet/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/FaultSet::a6a7b4cf00000000/relationships/Sds",
+ "rel": "/api/FaultSet/relationship/Sds"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000",
+ "rel": "/api/parent/relationship/protectionDomainId"
+ }
+ ],
+ "name": "MyFaultSet",
+ "protectionDomainId": "74d855a900000000"
+ }
+ ],
+ "isDirty": false,
+ "lastDeviceVersion": 47,
+ "lastFaultSetVersion": 2,
+ "lastProtectionDomainVersion": 2,
+ "lastRfcacheDeviceVersion": 1,
+ "lastSdcVersion": 7,
+ "lastSdsVersion": 19,
+ "lastStoragePoolVersion": 4,
+ "lastSystemVersion": 2,
+ "lastVTreeVersion": 3,
+ "lastVolumeVersion": 3,
+ "protectionDomainList": [
+ {
+ "id": "74d855a900000000",
+ "links": [
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000/relationships/Statistics",
+ "rel": "/api/ProtectionDomain/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000/relationships/StoragePool",
+ "rel": "/api/ProtectionDomain/relationship/StoragePool"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000/relationships/Sds",
+ "rel": "/api/ProtectionDomain/relationship/Sds"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000/relationships/FaultSet",
+ "rel": "/api/ProtectionDomain/relationship/FaultSet"
+ },
+ {
+ "href": "/api/instances/System::499634a44778afc0",
+ "rel": "/api/parent/relationship/systemId"
+ }
+ ],
+ "mdmSdsNetworkDisconnectionsCounterParameters": {
+ "longWindow": {
+ "threshold": 700,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 500,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 300,
+ "windowSizeInSec": 60
+ }
+ },
+ "name": "default",
+ "overallIoNetworkThrottlingEnabled": false,
+ "overallIoNetworkThrottlingInKbps": null,
+ "protectionDomainState": "Active",
+ "rebalanceNetworkThrottlingEnabled": false,
+ "rebalanceNetworkThrottlingInKbps": null,
+ "rebuildNetworkThrottlingEnabled": false,
+ "rebuildNetworkThrottlingInKbps": null,
+ "rfcacheEnabled": true,
+ "rfcacheMaxIoSizeKb": 128,
+ "rfcacheOpertionalMode": "WriteMiss",
+ "rfcachePageSizeKb": 64,
+ "sdsConfigurationFailureCounterParameters": {
+ "longWindow": {
+ "threshold": 700,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 500,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 300,
+ "windowSizeInSec": 60
+ }
+ },
+ "sdsDecoupledCounterParameters": {
+ "longWindow": {
+ "threshold": 700,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 500,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 300,
+ "windowSizeInSec": 60
+ }
+ },
+ "sdsReceiveBufferAllocationFailuresCounterParameters": {
+ "longWindow": {
+ "threshold": 2000000,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 200000,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 20000,
+ "windowSizeInSec": 60
+ }
+ },
+ "sdsSdsNetworkDisconnectionsCounterParameters": {
+ "longWindow": {
+ "threshold": 700,
+ "windowSizeInSec": 86400
+ },
+ "mediumWindow": {
+ "threshold": 500,
+ "windowSizeInSec": 3600
+ },
+ "shortWindow": {
+ "threshold": 300,
+ "windowSizeInSec": 60
+ }
+ },
+ "systemId": "499634a44778afc0"
+ }
+ ],
+ "rfcacheDeviceList": null,
+ "sdcList": [
+ {
+ "id": "6076fd1100000002",
+ "installedSoftwareVersionInfo": "R2_6.11000.0",
+ "kernelBuildNumber": null,
+ "kernelVersion": "4.15.18",
+ "links": [
+ {
+ "href": "/api/instances/Sdc::6076fd1100000002",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Sdc::6076fd1100000002/relationships/Statistics",
+ "rel": "/api/Sdc/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sdc::6076fd1100000002/relationships/Volume",
+ "rel": "/api/Sdc/relationship/Volume"
+ },
+ {
+ "href": "/api/instances/System::499634a44778afc0",
+ "rel": "/api/parent/relationship/systemId"
+ }
+ ],
+ "mdmConnectionState": "Disconnected",
+ "memoryAllocationFailure": null,
+ "name": null,
+ "osType": "Linux",
+ "perfProfile": "Default",
+ "sdcApproved": true,
+ "sdcApprovedIps": [
+ "100.127.0.12"
+ ],
+ "sdcGuid": "B71F01AE-FF7A-47C5-A303-583FFD416818",
+ "sdcIp": "100.127.0.12",
+ "socketAllocationFailure": null,
+ "softwareVersionInfo": null,
+ "systemId": "499634a44778afc0",
+ "versionInfo": null
+ },
+ {
+ "id": "6076fd1000000001",
+ "installedSoftwareVersionInfo": "R2_6.11000.0",
+ "kernelBuildNumber": null,
+ "kernelVersion": "4.15.18",
+ "links": [
+ {
+ "href": "/api/instances/Sdc::6076fd1000000001",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Sdc::6076fd1000000001/relationships/Statistics",
+ "rel": "/api/Sdc/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sdc::6076fd1000000001/relationships/Volume",
+ "rel": "/api/Sdc/relationship/Volume"
+ },
+ {
+ "href": "/api/instances/System::499634a44778afc0",
+ "rel": "/api/parent/relationship/systemId"
+ }
+ ],
+ "mdmConnectionState": "Disconnected",
+ "memoryAllocationFailure": null,
+ "name": null,
+ "osType": "Linux",
+ "perfProfile": "Default",
+ "sdcApproved": true,
+ "sdcApprovedIps": [
+ "100.127.0.11"
+ ],
+ "sdcGuid": "5D2B24F9-5D49-4688-A67D-88AF8790BC05",
+ "sdcIp": "100.127.0.11",
+ "socketAllocationFailure": null,
+ "softwareVersionInfo": null,
+ "systemId": "499634a44778afc0",
+ "versionInfo": null
+ },
+ {
+ "id": "6076fd0f00000000",
+ "installedSoftwareVersionInfo": "R2_6.11000.0",
+ "kernelBuildNumber": null,
+ "kernelVersion": "4.15.18",
+ "links": [
+ {
+ "href": "/api/instances/Sdc::6076fd0f00000000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Sdc::6076fd0f00000000/relationships/Statistics",
+ "rel": "/api/Sdc/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sdc::6076fd0f00000000/relationships/Volume",
+ "rel": "/api/Sdc/relationship/Volume"
+ },
+ {
+ "href": "/api/instances/System::499634a44778afc0",
+ "rel": "/api/parent/relationship/systemId"
+ }
+ ],
+ "mdmConnectionState": "Connected",
+ "memoryAllocationFailure": null,
+ "name": null,
+ "osType": "Linux",
+ "perfProfile": "Default",
+ "sdcApproved": true,
+ "sdcApprovedIps": [
+ "100.127.0.10"
+ ],
+ "sdcGuid": "974F4AC7-FF37-4909-8713-D1BD3F002843",
+ "sdcIp": "100.127.0.10",
+ "socketAllocationFailure": null,
+ "softwareVersionInfo": "R2_6.11000.0",
+ "systemId": "499634a44778afc0",
+ "versionInfo": "R2_6.11000.0"
+ }
+ ],
+ "sdsList": [
+ {
+ "authenticationError": "None",
+ "certificateInfo": {
+ "issuer": "/GN=MDM/CN=test-VirtualBox/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD",
+ "subject": "/GN=SDS-000/CN=scaleIOslave1/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD",
+ "thumbprint": "FD:00:99:E9:40:90:A5:CE:85:B8:A9:07:86:BB:7E:F0:E0:DE:F9:75",
+ "validFrom": "Nov 12 19:17:22 2019 GMT",
+ "validFromAsn1Format": "191112191722Z",
+ "validTo": "Nov 10 20:17:22 2029 GMT",
+ "validToAsn1Format": "291110201722Z"
+ },
+ "drlMode": "Volatile",
+ "faultSetId": null,
+ "id": "130dcabd00000000",
+ "ipList": [
+ {
+ "ip": "100.127.0.11",
+ "role": "all"
+ }
+ ],
+ "links": [
+ {
+ "href": "/api/instances/Sds::130dcabd00000000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabd00000000/relationships/Statistics",
+ "rel": "/api/Sds/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabd00000000/relationships/Device",
+ "rel": "/api/Sds/relationship/Device"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabd00000000/relationships/RfcacheDevice",
+ "rel": "/api/Sds/relationship/RfcacheDevice"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000",
+ "rel": "/api/parent/relationship/protectionDomainId"
+ }
+ ],
+ "maintenanceState": "NoMaintenance",
+ "mdmConnectionState": "Connected",
+ "membershipState": "Joined",
+ "name": "SDS_[100.127.0.11]",
+ "numOfIoBuffers": null,
+ "onVmWare": false,
+ "perfProfile": "Default",
+ "port": 7072,
+ "protectionDomainId": "74d855a900000000",
+ "rfcacheEnabled": true,
+ "rfcacheErrorApiVersionMismatch": false,
+ "rfcacheErrorDeviceDoesNotExist": false,
+ "rfcacheErrorInconsistentCacheConfiguration": false,
+ "rfcacheErrorInconsistentSourceConfiguration": false,
+ "rfcacheErrorInvalidDriverPath": false,
+ "rfcacheErrorLowResources": false,
+ "rmcacheEnabled": true,
+ "rmcacheFrozen": false,
+ "rmcacheMemoryAllocationState": "AllocationPending",
+ "rmcacheSizeInKb": 131072,
+ "sdsConfigurationFailure": null,
+ "sdsDecoupled": null,
+ "sdsReceiveBufferAllocationFailures": null,
+ "sdsState": "Normal",
+ "softwareVersionInfo": "R2_6.11000.0"
+ },
+ {
+ "authenticationError": "None",
+ "certificateInfo": {
+ "issuer": "/GN=MDM/CN=test-VirtualBox/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD",
+ "subject": "/GN=SDS-001/CN=test-VirtualBox/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD",
+ "thumbprint": "B2:F9:88:84:55:94:A1:D8:7F:C1:4F:50:81:17:56:AC:72:B7:A2:AD",
+ "validFrom": "Nov 12 19:17:22 2019 GMT",
+ "validFromAsn1Format": "191112191722Z",
+ "validTo": "Nov 10 20:17:22 2029 GMT",
+ "validToAsn1Format": "291110201722Z"
+ },
+ "drlMode": "Volatile",
+ "faultSetId": null,
+ "id": "130dcabe00000001",
+ "ipList": [
+ {
+ "ip": "100.127.0.10",
+ "role": "all"
+ }
+ ],
+ "links": [
+ {
+ "href": "/api/instances/Sds::130dcabe00000001",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabe00000001/relationships/Statistics",
+ "rel": "/api/Sds/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabe00000001/relationships/Device",
+ "rel": "/api/Sds/relationship/Device"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabe00000001/relationships/RfcacheDevice",
+ "rel": "/api/Sds/relationship/RfcacheDevice"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000",
+ "rel": "/api/parent/relationship/protectionDomainId"
+ }
+ ],
+ "maintenanceState": "NoMaintenance",
+ "mdmConnectionState": "Connected",
+ "membershipState": "Joined",
+ "name": "SDS_[100.127.0.10]",
+ "numOfIoBuffers": null,
+ "onVmWare": false,
+ "perfProfile": "Default",
+ "port": 7072,
+ "protectionDomainId": "74d855a900000000",
+ "rfcacheEnabled": true,
+ "rfcacheErrorApiVersionMismatch": false,
+ "rfcacheErrorDeviceDoesNotExist": false,
+ "rfcacheErrorInconsistentCacheConfiguration": false,
+ "rfcacheErrorInconsistentSourceConfiguration": false,
+ "rfcacheErrorInvalidDriverPath": false,
+ "rfcacheErrorLowResources": false,
+ "rmcacheEnabled": true,
+ "rmcacheFrozen": false,
+ "rmcacheMemoryAllocationState": "AllocationPending",
+ "rmcacheSizeInKb": 131072,
+ "sdsConfigurationFailure": null,
+ "sdsDecoupled": null,
+ "sdsReceiveBufferAllocationFailures": null,
+ "sdsState": "Normal",
+ "softwareVersionInfo": "R2_6.11000.0"
+ },
+ {
+ "authenticationError": "None",
+ "certificateInfo": {
+ "issuer": "/GN=MDM/CN=test-VirtualBox/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD",
+ "subject": "/GN=SDS-002/CN=scaleIOSlave2/L=Hopkinton/ST=Massachusetts/C=US/O=EMC/OU=ASD",
+ "thumbprint": "CC:A0:E8:B7:84:9B:E5:D1:2E:F6:7C:3A:AC:21:D6:5C:5F:D1:47:D1",
+ "validFrom": "Nov 12 19:17:21 2019 GMT",
+ "validFromAsn1Format": "191112191721Z",
+ "validTo": "Nov 10 20:17:21 2029 GMT",
+ "validToAsn1Format": "291110201721Z"
+ },
+ "drlMode": "Volatile",
+ "faultSetId": null,
+ "id": "130dcabf00000002",
+ "ipList": [
+ {
+ "ip": "100.127.0.12",
+ "role": "all"
+ }
+ ],
+ "links": [
+ {
+ "href": "/api/instances/Sds::130dcabf00000002",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabf00000002/relationships/Statistics",
+ "rel": "/api/Sds/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabf00000002/relationships/Device",
+ "rel": "/api/Sds/relationship/Device"
+ },
+ {
+ "href": "/api/instances/Sds::130dcabf00000002/relationships/RfcacheDevice",
+ "rel": "/api/Sds/relationship/RfcacheDevice"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000",
+ "rel": "/api/parent/relationship/protectionDomainId"
+ }
+ ],
+ "maintenanceState": "NoMaintenance",
+ "mdmConnectionState": "Connected",
+ "membershipState": "Joined",
+ "name": "SDS_[100.127.0.12]",
+ "numOfIoBuffers": null,
+ "onVmWare": false,
+ "perfProfile": "Default",
+ "port": 7072,
+ "protectionDomainId": "74d855a900000000",
+ "rfcacheEnabled": true,
+ "rfcacheErrorApiVersionMismatch": false,
+ "rfcacheErrorDeviceDoesNotExist": false,
+ "rfcacheErrorInconsistentCacheConfiguration": false,
+ "rfcacheErrorInconsistentSourceConfiguration": false,
+ "rfcacheErrorInvalidDriverPath": false,
+ "rfcacheErrorLowResources": false,
+ "rmcacheEnabled": true,
+ "rmcacheFrozen": false,
+ "rmcacheMemoryAllocationState": "AllocationPending",
+ "rmcacheSizeInKb": 131072,
+ "sdsConfigurationFailure": null,
+ "sdsDecoupled": null,
+ "sdsReceiveBufferAllocationFailures": null,
+ "sdsState": "Normal",
+ "softwareVersionInfo": "R2_6.11000.0"
+ }
+ ],
+ "sessionTag": 19,
+ "storagePoolList": [
+ {
+ "backgroundScannerBWLimitKBps": 0,
+ "backgroundScannerMode": "Disabled",
+ "capacityAlertCriticalThreshold": 90,
+ "capacityAlertHighThreshold": 80,
+ "checksumEnabled": false,
+ "id": "4039828b00000001",
+ "links": [
+ {
+ "href": "/api/instances/StoragePool::4039828b00000001",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/StoragePool::4039828b00000001/relationships/Statistics",
+ "rel": "/api/StoragePool/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/StoragePool::4039828b00000001/relationships/Volume",
+ "rel": "/api/StoragePool/relationship/Volume"
+ },
+ {
+ "href": "/api/instances/StoragePool::4039828b00000001/relationships/Device",
+ "rel": "/api/StoragePool/relationship/Device"
+ },
+ {
+ "href": "/api/instances/StoragePool::4039828b00000001/relationships/VTree",
+ "rel": "/api/StoragePool/relationship/VTree"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000",
+ "rel": "/api/parent/relationship/protectionDomainId"
+ }
+ ],
+ "name": "StoragePool2",
+ "numOfParallelRebuildRebalanceJobsPerDevice": 2,
+ "protectionDomainId": "74d855a900000000",
+ "rebalanceEnabled": true,
+ "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null,
+ "rebalanceIoPriorityAppIopsPerDeviceThreshold": null,
+ "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240,
+ "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1,
+ "rebalanceIoPriorityPolicy": "favorAppIos",
+ "rebalanceIoPriorityQuietPeriodInMsec": null,
+ "rebuildEnabled": true,
+ "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null,
+ "rebuildIoPriorityAppIopsPerDeviceThreshold": null,
+ "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240,
+ "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1,
+ "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos",
+ "rebuildIoPriorityQuietPeriodInMsec": null,
+ "rmcacheWriteHandlingMode": "Cached",
+ "sparePercentage": 10,
+ "useRfcache": false,
+ "useRmcache": false,
+ "zeroPaddingEnabled": false
+ },
+ {
+ "backgroundScannerBWLimitKBps": 0,
+ "backgroundScannerMode": "Disabled",
+ "capacityAlertCriticalThreshold": 90,
+ "capacityAlertHighThreshold": 80,
+ "checksumEnabled": false,
+ "id": "40395b7b00000000",
+ "links": [
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000/relationships/Statistics",
+ "rel": "/api/StoragePool/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000/relationships/Volume",
+ "rel": "/api/StoragePool/relationship/Volume"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000/relationships/Device",
+ "rel": "/api/StoragePool/relationship/Device"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000/relationships/VTree",
+ "rel": "/api/StoragePool/relationship/VTree"
+ },
+ {
+ "href": "/api/instances/ProtectionDomain::74d855a900000000",
+ "rel": "/api/parent/relationship/protectionDomainId"
+ }
+ ],
+ "name": "StoragePool1",
+ "numOfParallelRebuildRebalanceJobsPerDevice": 2,
+ "protectionDomainId": "74d855a900000000",
+ "rebalanceEnabled": true,
+ "rebalanceIoPriorityAppBwPerDeviceThresholdInKbps": null,
+ "rebalanceIoPriorityAppIopsPerDeviceThreshold": null,
+ "rebalanceIoPriorityBwLimitPerDeviceInKbps": 10240,
+ "rebalanceIoPriorityNumOfConcurrentIosPerDevice": 1,
+ "rebalanceIoPriorityPolicy": "favorAppIos",
+ "rebalanceIoPriorityQuietPeriodInMsec": null,
+ "rebuildEnabled": true,
+ "rebuildIoPriorityAppBwPerDeviceThresholdInKbps": null,
+ "rebuildIoPriorityAppIopsPerDeviceThreshold": null,
+ "rebuildIoPriorityBwLimitPerDeviceInKbps": 10240,
+ "rebuildIoPriorityNumOfConcurrentIosPerDevice": 1,
+ "rebuildIoPriorityPolicy": "limitNumOfConcurrentIos",
+ "rebuildIoPriorityQuietPeriodInMsec": null,
+ "rmcacheWriteHandlingMode": "Cached",
+ "sparePercentage": 10,
+ "useRfcache": false,
+ "useRmcache": false,
+ "zeroPaddingEnabled": false
+ }
+ ],
+ "vTreeList": [
+ {
+ "baseVolumeId": "993a355e00000001",
+ "id": "252fd6e400000001",
+ "links": [
+ {
+ "href": "/api/instances/VTree::252fd6e400000001",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/VTree::252fd6e400000001/relationships/Statistics",
+ "rel": "/api/VTree/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/VTree::252fd6e400000001/relationships/Volume",
+ "rel": "/api/VTree/relationship/Volume"
+ },
+ {
+ "href": "/api/instances/Volume::993a355e00000001",
+ "rel": "/api/parent/relationship/baseVolumeId"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "name": null,
+ "storagePoolId": "40395b7b00000000"
+ },
+ {
+ "baseVolumeId": "993a355d00000000",
+ "id": "252fd6e300000000",
+ "links": [
+ {
+ "href": "/api/instances/VTree::252fd6e300000000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/VTree::252fd6e300000000/relationships/Statistics",
+ "rel": "/api/VTree/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/VTree::252fd6e300000000/relationships/Volume",
+ "rel": "/api/VTree/relationship/Volume"
+ },
+ {
+ "href": "/api/instances/Volume::993a355d00000000",
+ "rel": "/api/parent/relationship/baseVolumeId"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "name": null,
+ "storagePoolId": "40395b7b00000000"
+ }
+ ],
+ "volumeList": [
+ {
+ "ancestorVolumeId": null,
+ "consistencyGroupId": null,
+ "creationTime": 1574882772,
+ "id": "993a355e00000001",
+ "isObfuscated": false,
+ "isVvol": false,
+ "links": [
+ {
+ "href": "/api/instances/Volume::993a355e00000001",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Volume::993a355e00000001/relationships/Statistics",
+ "rel": "/api/Volume/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/VTree::252fd6e400000001",
+ "rel": "/api/parent/relationship/vtreeId"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "mappedSdcInfo": [
+ {
+ "limitBwInMbps": 0,
+ "limitIops": 0,
+ "sdcId": "6076fd1100000002",
+ "sdcIp": "100.127.0.12"
+ }
+ ],
+ "mappingToAllSdcsEnabled": false,
+ "name": "volume2-16",
+ "sizeInKb": 16777216,
+ "storagePoolId": "40395b7b00000000",
+ "useRmcache": false,
+ "volumeType": "ThinProvisioned",
+ "vtreeId": "252fd6e400000001"
+ },
+ {
+ "ancestorVolumeId": null,
+ "consistencyGroupId": null,
+ "creationTime": 1574882580,
+ "id": "993a355d00000000",
+ "isObfuscated": false,
+ "isVvol": false,
+ "links": [
+ {
+ "href": "/api/instances/Volume::993a355d00000000",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/Volume::993a355d00000000/relationships/Statistics",
+ "rel": "/api/Volume/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/VTree::252fd6e300000000",
+ "rel": "/api/parent/relationship/vtreeId"
+ },
+ {
+ "href": "/api/instances/StoragePool::40395b7b00000000",
+ "rel": "/api/parent/relationship/storagePoolId"
+ }
+ ],
+ "mappedSdcInfo": [
+ {
+ "limitBwInMbps": 0,
+ "limitIops": 0,
+ "sdcId": "6076fd1000000001",
+ "sdcIp": "100.127.0.11"
+ }
+ ],
+ "mappingToAllSdcsEnabled": false,
+ "name": "volume1-16",
+ "sizeInKb": 16777216,
+ "storagePoolId": "40395b7b00000000",
+ "useRmcache": false,
+ "volumeType": "ThinProvisioned",
+ "vtreeId": "252fd6e300000000"
+ }
+ ]
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/scaleio/testdata/selected_statistics.json b/src/go/plugin/go.d/modules/scaleio/testdata/selected_statistics.json
new file mode 100644
index 000000000..0b141bbe6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/scaleio/testdata/selected_statistics.json
@@ -0,0 +1,777 @@
+{
+ "Sdc": {
+ "6076fd0f00000000": {
+ "numOfMappedVolumes": 1,
+ "userDataReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 1,
+ "totalWeightInKb": 0
+ },
+ "userDataWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 1,
+ "totalWeightInKb": 0
+ },
+ "volumeIds": [
+ "993a5c6d00000002"
+ ]
+ },
+ "6076fd1000000001": {
+ "numOfMappedVolumes": 1,
+ "userDataReadBwc": {
+ "numOccured": 1,
+ "numSeconds": 1,
+ "totalWeightInKb": 1
+ },
+ "userDataWriteBwc": {
+ "numOccured": 169,
+ "numSeconds": 1,
+ "totalWeightInKb": 117399
+ },
+ "volumeIds": [
+ "993a355d00000000"
+ ]
+ },
+ "6076fd1100000002": {
+ "numOfMappedVolumes": 1,
+ "userDataReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 1,
+ "totalWeightInKb": 0
+ },
+ "userDataWriteBwc": {
+ "numOccured": 145,
+ "numSeconds": 1,
+ "totalWeightInKb": 118972
+ },
+ "volumeIds": [
+ "993a355e00000001"
+ ]
+ }
+ },
+ "StoragePool": {
+ "40395b7b00000000": {
+ "BackgroundScanCompareCount": 0,
+ "BackgroundScannedInMB": 0,
+ "activeBckRebuildCapacityInKb": 0,
+ "activeFwdRebuildCapacityInKb": 0,
+ "activeMovingCapacityInKb": 0,
+ "activeMovingInBckRebuildJobs": 0,
+ "activeMovingInFwdRebuildJobs": 0,
+ "activeMovingInNormRebuildJobs": 0,
+ "activeMovingInRebalanceJobs": 0,
+ "activeMovingOutBckRebuildJobs": 0,
+ "activeMovingOutFwdRebuildJobs": 0,
+ "activeMovingOutNormRebuildJobs": 0,
+ "activeMovingRebalanceJobs": 0,
+ "activeNormRebuildCapacityInKb": 0,
+ "activeRebalanceCapacityInKb": 0,
+ "atRestCapacityInKb": 50110464,
+ "bckRebuildCapacityInKb": 0,
+ "bckRebuildReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "bckRebuildWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "capacityAvailableForVolumeAllocationInKb": 100663296,
+ "capacityInUseInKb": 50110464,
+ "capacityLimitInKb": 311424000,
+ "degradedFailedCapacityInKb": 0,
+ "degradedFailedVacInKb": 0,
+ "degradedHealthyCapacityInKb": 0,
+ "degradedHealthyVacInKb": 0,
+ "deviceIds": [
+ "ebbdc47a00000000",
+ "ebbf9d6500010000",
+ "ebbfc47900020000"
+ ],
+ "failedCapacityInKb": 0,
+ "failedVacInKb": 0,
+ "fixedReadErrorCount": 0,
+ "fwdRebuildCapacityInKb": 0,
+ "fwdRebuildReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "fwdRebuildWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "inMaintenanceCapacityInKb": 0,
+ "inMaintenanceVacInKb": 0,
+ "inUseVacInKb": 67108864,
+ "maxCapacityInKb": 311424000,
+ "movingCapacityInKb": 0,
+ "normRebuildCapacityInKb": 0,
+ "normRebuildReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "normRebuildWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "numOfDevices": 3,
+ "numOfMappedToAllVolumes": 0,
+ "numOfSnapshots": 1,
+ "numOfThickBaseVolumes": 0,
+ "numOfThinBaseVolumes": 2,
+ "numOfUnmappedVolumes": 0,
+ "numOfVolumes": 3,
+ "numOfVolumesInDeletion": 0,
+ "numOfVtrees": 2,
+ "pendingBckRebuildCapacityInKb": 0,
+ "pendingFwdRebuildCapacityInKb": 0,
+ "pendingMovingCapacityInKb": 0,
+ "pendingMovingInBckRebuildJobs": 0,
+ "pendingMovingInFwdRebuildJobs": 0,
+ "pendingMovingInNormRebuildJobs": 0,
+ "pendingMovingInRebalanceJobs": 0,
+ "pendingMovingOutBckRebuildJobs": 0,
+ "pendingMovingOutFwdRebuildJobs": 0,
+ "pendingMovingOutNormrebuildJobs": 0,
+ "pendingMovingRebalanceJobs": 0,
+ "pendingNormRebuildCapacityInKb": 0,
+ "pendingRebalanceCapacityInKb": 0,
+ "primaryReadBwc": {
+ "numOccured": 1,
+ "numSeconds": 5,
+ "totalWeightInKb": 4
+ },
+ "primaryReadFromDevBwc": {
+ "numOccured": 1,
+ "numSeconds": 5,
+ "totalWeightInKb": 4
+ },
+ "primaryReadFromRmcacheBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "primaryVacInKb": 33554432,
+ "primaryWriteBwc": {
+ "numOccured": 1556,
+ "numSeconds": 5,
+ "totalWeightInKb": 1193408
+ },
+ "protectedCapacityInKb": 50110464,
+ "protectedVacInKb": 67108864,
+ "rebalanceCapacityInKb": 0,
+ "rebalanceReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "rebalanceWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "rfacheReadHit": 0,
+ "rfacheWriteHit": 0,
+ "rfcacheAvgReadTime": 0,
+ "rfcacheAvgWriteTime": 0,
+ "rfcacheIoErrors": 0,
+ "rfcacheIosOutstanding": 0,
+ "rfcacheIosSkipped": 0,
+ "rfcacheReadMiss": 0,
+ "rfcacheReadsFromCache": 0,
+ "rfcacheReadsPending": 0,
+ "rfcacheReadsReceived": 0,
+ "rfcacheReadsSkipped": 0,
+ "rfcacheReadsSkippedAlignedSizeTooLarge": 0,
+ "rfcacheReadsSkippedHeavyLoad": 0,
+ "rfcacheReadsSkippedInternalError": 0,
+ "rfcacheReadsSkippedLockIos": 0,
+ "rfcacheReadsSkippedLowResources": 0,
+ "rfcacheReadsSkippedMaxIoSize": 0,
+ "rfcacheReadsSkippedStuckIo": 0,
+ "rfcacheSkippedUnlinedWrite": 0,
+ "rfcacheSourceDeviceReads": 0,
+ "rfcacheSourceDeviceWrites": 0,
+ "rfcacheWriteMiss": 0,
+ "rfcacheWritePending": 0,
+ "rfcacheWritesReceived": 0,
+ "rfcacheWritesSkippedCacheMiss": 0,
+ "rfcacheWritesSkippedHeavyLoad": 0,
+ "rfcacheWritesSkippedInternalError": 0,
+ "rfcacheWritesSkippedLowResources": 0,
+ "rfcacheWritesSkippedMaxIoSize": 0,
+ "rfcacheWritesSkippedStuckIo": 0,
+ "rmPendingAllocatedInKb": 0,
+ "secondaryReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "secondaryReadFromDevBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "secondaryReadFromRmcacheBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "secondaryVacInKb": 33554432,
+ "secondaryWriteBwc": {
+ "numOccured": 1530,
+ "numSeconds": 5,
+ "totalWeightInKb": 1169632
+ },
+ "semiProtectedCapacityInKb": 0,
+ "semiProtectedVacInKb": 0,
+ "snapCapacityInUseInKb": 16699392,
+ "snapCapacityInUseOccupiedInKb": 749568,
+ "spareCapacityInKb": 31141888,
+ "thickCapacityInUseInKb": 0,
+ "thinCapacityAllocatedInKb": 67108864,
+ "thinCapacityInUseInKb": 49360896,
+ "totalReadBwc": {
+ "numOccured": 1,
+ "numSeconds": 5,
+ "totalWeightInKb": 4
+ },
+ "totalWriteBwc": {
+ "numOccured": 3086,
+ "numSeconds": 5,
+ "totalWeightInKb": 2363040
+ },
+ "unreachableUnusedCapacityInKb": 0,
+ "unusedCapacityInKb": 230171648,
+ "userDataReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "userDataWriteBwc": {
+ "numOccured": 285,
+ "numSeconds": 1,
+ "totalWeightInKb": 227170
+ },
+ "volumeIds": [
+ "993a355d00000000",
+ "993a5c6d00000002",
+ "993a355e00000001"
+ ],
+ "vtreeIds": [
+ "252fd6e300000000",
+ "252fd6e400000001"
+ ]
+ },
+ "4039828b00000001": {
+ "BackgroundScanCompareCount": 0,
+ "BackgroundScannedInMB": 0,
+ "activeBckRebuildCapacityInKb": 0,
+ "activeFwdRebuildCapacityInKb": 0,
+ "activeMovingCapacityInKb": 0,
+ "activeMovingInBckRebuildJobs": 0,
+ "activeMovingInFwdRebuildJobs": 0,
+ "activeMovingInNormRebuildJobs": 0,
+ "activeMovingInRebalanceJobs": 0,
+ "activeMovingOutBckRebuildJobs": 0,
+ "activeMovingOutFwdRebuildJobs": 0,
+ "activeMovingOutNormRebuildJobs": 0,
+ "activeMovingRebalanceJobs": 0,
+ "activeNormRebuildCapacityInKb": 0,
+ "activeRebalanceCapacityInKb": 0,
+ "atRestCapacityInKb": 0,
+ "bckRebuildCapacityInKb": 0,
+ "bckRebuildReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "bckRebuildWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "capacityAvailableForVolumeAllocationInKb": 142606336,
+ "capacityInUseInKb": 0,
+ "capacityLimitInKb": 332395520,
+ "degradedFailedCapacityInKb": 0,
+ "degradedFailedVacInKb": 0,
+ "degradedHealthyCapacityInKb": 0,
+ "degradedHealthyVacInKb": 0,
+ "deviceIds": [
+ "ebbdc47b00000001",
+ "ebbfc47300010001",
+ "ebbfc47700020001"
+ ],
+ "failedCapacityInKb": 0,
+ "failedVacInKb": 0,
+ "fixedReadErrorCount": 0,
+ "fwdRebuildCapacityInKb": 0,
+ "fwdRebuildReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "fwdRebuildWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "inMaintenanceCapacityInKb": 0,
+ "inMaintenanceVacInKb": 0,
+ "inUseVacInKb": 0,
+ "maxCapacityInKb": 332395520,
+ "movingCapacityInKb": 0,
+ "normRebuildCapacityInKb": 0,
+ "normRebuildReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "normRebuildWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "numOfDevices": 3,
+ "numOfMappedToAllVolumes": 0,
+ "numOfSnapshots": 0,
+ "numOfThickBaseVolumes": 0,
+ "numOfThinBaseVolumes": 0,
+ "numOfUnmappedVolumes": 0,
+ "numOfVolumes": 0,
+ "numOfVolumesInDeletion": 0,
+ "numOfVtrees": 0,
+ "pendingBckRebuildCapacityInKb": 0,
+ "pendingFwdRebuildCapacityInKb": 0,
+ "pendingMovingCapacityInKb": 0,
+ "pendingMovingInBckRebuildJobs": 0,
+ "pendingMovingInFwdRebuildJobs": 0,
+ "pendingMovingInNormRebuildJobs": 0,
+ "pendingMovingInRebalanceJobs": 0,
+ "pendingMovingOutBckRebuildJobs": 0,
+ "pendingMovingOutFwdRebuildJobs": 0,
+ "pendingMovingOutNormrebuildJobs": 0,
+ "pendingMovingRebalanceJobs": 0,
+ "pendingNormRebuildCapacityInKb": 0,
+ "pendingRebalanceCapacityInKb": 0,
+ "primaryReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "primaryReadFromDevBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "primaryReadFromRmcacheBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "primaryVacInKb": 0,
+ "primaryWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "protectedCapacityInKb": 0,
+ "protectedVacInKb": 0,
+ "rebalanceCapacityInKb": 0,
+ "rebalanceReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "rebalanceWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "rfacheReadHit": 0,
+ "rfacheWriteHit": 0,
+ "rfcacheAvgReadTime": 0,
+ "rfcacheAvgWriteTime": 0,
+ "rfcacheIoErrors": 0,
+ "rfcacheIosOutstanding": 0,
+ "rfcacheIosSkipped": 0,
+ "rfcacheReadMiss": 0,
+ "rfcacheReadsFromCache": 0,
+ "rfcacheReadsPending": 0,
+ "rfcacheReadsReceived": 0,
+ "rfcacheReadsSkipped": 0,
+ "rfcacheReadsSkippedAlignedSizeTooLarge": 0,
+ "rfcacheReadsSkippedHeavyLoad": 0,
+ "rfcacheReadsSkippedInternalError": 0,
+ "rfcacheReadsSkippedLockIos": 0,
+ "rfcacheReadsSkippedLowResources": 0,
+ "rfcacheReadsSkippedMaxIoSize": 0,
+ "rfcacheReadsSkippedStuckIo": 0,
+ "rfcacheSkippedUnlinedWrite": 0,
+ "rfcacheSourceDeviceReads": 0,
+ "rfcacheSourceDeviceWrites": 0,
+ "rfcacheWriteMiss": 0,
+ "rfcacheWritePending": 0,
+ "rfcacheWritesReceived": 0,
+ "rfcacheWritesSkippedCacheMiss": 0,
+ "rfcacheWritesSkippedHeavyLoad": 0,
+ "rfcacheWritesSkippedInternalError": 0,
+ "rfcacheWritesSkippedLowResources": 0,
+ "rfcacheWritesSkippedMaxIoSize": 0,
+ "rfcacheWritesSkippedStuckIo": 0,
+ "rmPendingAllocatedInKb": 0,
+ "secondaryReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "secondaryReadFromDevBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "secondaryReadFromRmcacheBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "secondaryVacInKb": 0,
+ "secondaryWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "semiProtectedCapacityInKb": 0,
+ "semiProtectedVacInKb": 0,
+ "snapCapacityInUseInKb": 3145728,
+ "snapCapacityInUseOccupiedInKb": 0,
+ "spareCapacityInKb": 33239040,
+ "thickCapacityInUseInKb": 0,
+ "thinCapacityAllocatedInKb": 0,
+ "thinCapacityInUseInKb": 0,
+ "totalReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "totalWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "unreachableUnusedCapacityInKb": 0,
+ "unusedCapacityInKb": 299156480,
+ "userDataReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "userDataWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "volumeIds": [],
+ "vtreeIds": []
+ }
+ },
+ "System": {
+ "BackgroundScanCompareCount": 0,
+ "BackgroundScannedInMB": 0,
+ "activeBckRebuildCapacityInKb": 0,
+ "activeFwdRebuildCapacityInKb": 0,
+ "activeMovingCapacityInKb": 0,
+ "activeMovingInBckRebuildJobs": 0,
+ "activeMovingInFwdRebuildJobs": 0,
+ "activeMovingInNormRebuildJobs": 0,
+ "activeMovingInRebalanceJobs": 0,
+ "activeMovingOutBckRebuildJobs": 0,
+ "activeMovingOutFwdRebuildJobs": 0,
+ "activeMovingOutNormRebuildJobs": 0,
+ "activeMovingRebalanceJobs": 0,
+ "activeNormRebuildCapacityInKb": 0,
+ "activeRebalanceCapacityInKb": 0,
+ "atRestCapacityInKb": 50110464,
+ "bckRebuildCapacityInKb": 0,
+ "bckRebuildReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "bckRebuildWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "capacityAvailableForVolumeAllocationInKb": 243269632,
+ "capacityInUseInKb": 50110464,
+ "capacityLimitInKb": 643819520,
+ "degradedFailedCapacityInKb": 0,
+ "degradedFailedVacInKb": 0,
+ "degradedHealthyCapacityInKb": 0,
+ "degradedHealthyVacInKb": 0,
+ "failedCapacityInKb": 0,
+ "failedVacInKb": 0,
+ "fixedReadErrorCount": 0,
+ "fwdRebuildCapacityInKb": 0,
+ "fwdRebuildReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "fwdRebuildWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "inMaintenanceCapacityInKb": 0,
+ "inMaintenanceVacInKb": 0,
+ "inUseVacInKb": 67108864,
+ "maxCapacityInKb": 643819520,
+ "movingCapacityInKb": 0,
+ "normRebuildCapacityInKb": 0,
+ "normRebuildReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "normRebuildWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "numOfDevices": 6,
+ "numOfFaultSets": 0,
+ "numOfMappedToAllVolumes": 0,
+ "numOfProtectionDomains": 1,
+ "numOfRfcacheDevices": 0,
+ "numOfScsiInitiators": 0,
+ "numOfSdc": 3,
+ "numOfSds": 3,
+ "numOfSnapshots": 1,
+ "numOfStoragePools": 2,
+ "numOfThickBaseVolumes": 0,
+ "numOfThinBaseVolumes": 2,
+ "numOfUnmappedVolumes": 0,
+ "numOfVolumes": 3,
+ "numOfVolumesInDeletion": 0,
+ "numOfVtrees": 2,
+ "pendingBckRebuildCapacityInKb": 0,
+ "pendingFwdRebuildCapacityInKb": 0,
+ "pendingMovingCapacityInKb": 0,
+ "pendingMovingInBckRebuildJobs": 0,
+ "pendingMovingInFwdRebuildJobs": 0,
+ "pendingMovingInNormRebuildJobs": 0,
+ "pendingMovingInRebalanceJobs": 0,
+ "pendingMovingOutBckRebuildJobs": 0,
+ "pendingMovingOutFwdRebuildJobs": 0,
+ "pendingMovingOutNormrebuildJobs": 0,
+ "pendingMovingRebalanceJobs": 0,
+ "pendingNormRebuildCapacityInKb": 0,
+ "pendingRebalanceCapacityInKb": 0,
+ "primaryReadBwc": {
+ "numOccured": 1,
+ "numSeconds": 5,
+ "totalWeightInKb": 4
+ },
+ "primaryReadFromDevBwc": {
+ "numOccured": 1,
+ "numSeconds": 5,
+ "totalWeightInKb": 4
+ },
+ "primaryReadFromRmcacheBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "primaryVacInKb": 33554432,
+ "primaryWriteBwc": {
+ "numOccured": 1556,
+ "numSeconds": 5,
+ "totalWeightInKb": 1193408
+ },
+ "protectedCapacityInKb": 50110464,
+ "protectedVacInKb": 67108864,
+ "protectionDomainIds": [
+ "74d855a900000000"
+ ],
+ "rebalanceCapacityInKb": 0,
+ "rebalancePerReceiveJobNetThrottlingInKbps": 0,
+ "rebalanceReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "rebalanceWaitSendQLength": 0,
+ "rebalanceWriteBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "rebuildPerReceiveJobNetThrottlingInKbps": 0,
+ "rebuildWaitSendQLength": 0,
+ "rfacheReadHit": 0,
+ "rfacheWriteHit": 0,
+ "rfcacheAvgReadTime": 0,
+ "rfcacheAvgWriteTime": 0,
+ "rfcacheFdAvgReadTime": 0,
+ "rfcacheFdAvgWriteTime": 0,
+ "rfcacheFdCacheOverloaded": 0,
+ "rfcacheFdInlightReads": 0,
+ "rfcacheFdInlightWrites": 0,
+ "rfcacheFdIoErrors": 0,
+ "rfcacheFdMonitorErrorStuckIo": 0,
+ "rfcacheFdReadTimeGreater1Min": 0,
+ "rfcacheFdReadTimeGreater1Sec": 0,
+ "rfcacheFdReadTimeGreater500Millis": 0,
+ "rfcacheFdReadTimeGreater5Sec": 0,
+ "rfcacheFdReadsReceived": 0,
+ "rfcacheFdWriteTimeGreater1Min": 0,
+ "rfcacheFdWriteTimeGreater1Sec": 0,
+ "rfcacheFdWriteTimeGreater500Millis": 0,
+ "rfcacheFdWriteTimeGreater5Sec": 0,
+ "rfcacheFdWritesReceived": 0,
+ "rfcacheIoErrors": 0,
+ "rfcacheIosOutstanding": 0,
+ "rfcacheIosSkipped": 0,
+ "rfcachePooIosOutstanding": 0,
+ "rfcachePoolCachePages": 0,
+ "rfcachePoolEvictions": 0,
+ "rfcachePoolInLowMemoryCondition": 0,
+ "rfcachePoolIoTimeGreater1Min": 0,
+ "rfcachePoolLockTimeGreater1Sec": 0,
+ "rfcachePoolLowResourcesInitiatedPassthroughMode": 0,
+ "rfcachePoolNumCacheDevs": 0,
+ "rfcachePoolNumSrcDevs": 0,
+ "rfcachePoolPagesInuse": 0,
+ "rfcachePoolReadHit": 0,
+ "rfcachePoolReadMiss": 0,
+ "rfcachePoolReadPendingG10Millis": 0,
+ "rfcachePoolReadPendingG1Millis": 0,
+ "rfcachePoolReadPendingG1Sec": 0,
+ "rfcachePoolReadPendingG500Micro": 0,
+ "rfcachePoolReadsPending": 0,
+ "rfcachePoolSize": 0,
+ "rfcachePoolSourceIdMismatch": 0,
+ "rfcachePoolSuspendedIos": 0,
+ "rfcachePoolSuspendedPequestsRedundantSearchs": 0,
+ "rfcachePoolWriteHit": 0,
+ "rfcachePoolWriteMiss": 0,
+ "rfcachePoolWritePending": 0,
+ "rfcachePoolWritePendingG10Millis": 0,
+ "rfcachePoolWritePendingG1Millis": 0,
+ "rfcachePoolWritePendingG1Sec": 0,
+ "rfcachePoolWritePendingG500Micro": 0,
+ "rfcacheReadMiss": 0,
+ "rfcacheReadsFromCache": 0,
+ "rfcacheReadsPending": 0,
+ "rfcacheReadsReceived": 0,
+ "rfcacheReadsSkipped": 0,
+ "rfcacheReadsSkippedAlignedSizeTooLarge": 0,
+ "rfcacheReadsSkippedHeavyLoad": 0,
+ "rfcacheReadsSkippedInternalError": 0,
+ "rfcacheReadsSkippedLockIos": 0,
+ "rfcacheReadsSkippedLowResources": 0,
+ "rfcacheReadsSkippedMaxIoSize": 0,
+ "rfcacheReadsSkippedStuckIo": 0,
+ "rfcacheSkippedUnlinedWrite": 0,
+ "rfcacheSourceDeviceReads": 0,
+ "rfcacheSourceDeviceWrites": 0,
+ "rfcacheWriteMiss": 0,
+ "rfcacheWritePending": 0,
+ "rfcacheWritesReceived": 0,
+ "rfcacheWritesSkippedCacheMiss": 0,
+ "rfcacheWritesSkippedHeavyLoad": 0,
+ "rfcacheWritesSkippedInternalError": 0,
+ "rfcacheWritesSkippedLowResources": 0,
+ "rfcacheWritesSkippedMaxIoSize": 0,
+ "rfcacheWritesSkippedStuckIo": 0,
+ "rmPendingAllocatedInKb": 0,
+ "rmcache128kbEntryCount": 0,
+ "rmcache16kbEntryCount": 0,
+ "rmcache32kbEntryCount": 0,
+ "rmcache4kbEntryCount": 0,
+ "rmcache64kbEntryCount": 0,
+ "rmcache8kbEntryCount": 0,
+ "rmcacheBigBlockEvictionCount": 0,
+ "rmcacheBigBlockEvictionSizeCountInKb": 0,
+ "rmcacheCurrNumOf128kbEntries": 0,
+ "rmcacheCurrNumOf16kbEntries": 0,
+ "rmcacheCurrNumOf32kbEntries": 0,
+ "rmcacheCurrNumOf4kbEntries": 0,
+ "rmcacheCurrNumOf64kbEntries": 0,
+ "rmcacheCurrNumOf8kbEntries": 0,
+ "rmcacheEntryEvictionCount": 0,
+ "rmcacheEntryEvictionSizeCountInKb": 0,
+ "rmcacheNoEvictionCount": 0,
+ "rmcacheSizeInKb": 393216,
+ "rmcacheSizeInUseInKb": 0,
+ "rmcacheSkipCountCacheAllBusy": 0,
+ "rmcacheSkipCountLargeIo": 0,
+ "rmcacheSkipCountUnaligned4kbIo": 0,
+ "scsiInitiatorIds": [],
+ "sdcIds": [
+ "6076fd0f00000000",
+ "6076fd1000000001",
+ "6076fd1100000002"
+ ],
+ "secondaryReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "secondaryReadFromDevBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "secondaryReadFromRmcacheBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "secondaryVacInKb": 33554432,
+ "secondaryWriteBwc": {
+ "numOccured": 1530,
+ "numSeconds": 5,
+ "totalWeightInKb": 1169632
+ },
+ "semiProtectedCapacityInKb": 0,
+ "semiProtectedVacInKb": 0,
+ "snapCapacityInUseInKb": 19845120,
+ "snapCapacityInUseOccupiedInKb": 749568,
+ "spareCapacityInKb": 64380928,
+ "thickCapacityInUseInKb": 0,
+ "thinCapacityAllocatedInKb": 67108864,
+ "thinCapacityInUseInKb": 49360896,
+ "totalReadBwc": {
+ "numOccured": 1,
+ "numSeconds": 5,
+ "totalWeightInKb": 4
+ },
+ "totalWriteBwc": {
+ "numOccured": 3086,
+ "numSeconds": 5,
+ "totalWeightInKb": 2363040
+ },
+ "unreachableUnusedCapacityInKb": 0,
+ "unusedCapacityInKb": 529328128,
+ "userDataReadBwc": {
+ "numOccured": 0,
+ "numSeconds": 0,
+ "totalWeightInKb": 0
+ },
+ "userDataWriteBwc": {
+ "numOccured": 285,
+ "numSeconds": 1,
+ "totalWeightInKb": 227170
+ }
+ }
+} \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/sensors/README.md b/src/go/plugin/go.d/modules/sensors/README.md
new file mode 120000
index 000000000..4e92b0882
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/README.md
@@ -0,0 +1 @@
+integrations/linux_sensors_lm-sensors.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/sensors/charts.go b/src/go/plugin/go.d/modules/sensors/charts.go
new file mode 100644
index 000000000..05081e1ad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/charts.go
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sensors
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioSensorTemperature = module.Priority + iota
+ prioSensorVoltage
+ prioSensorCurrent
+ prioSensorPower
+ prioSensorFan
+ prioSensorEnergy
+ prioSensorHumidity
+)
+
+var sensorTemperatureChartTmpl = module.Chart{
+ ID: "sensor_chip_%s_feature_%s_subfeature_%s_temperature",
+ Title: "Sensor temperature",
+ Units: "Celsius",
+ Fam: "temperature",
+ Ctx: "sensors.sensor_temperature",
+ Type: module.Line,
+ Priority: prioSensorTemperature,
+ Dims: module.Dims{
+ {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "temperature", Div: precision},
+ },
+}
+
+var sensorVoltageChartTmpl = module.Chart{
+ ID: "sensor_chip_%s_feature_%s_subfeature_%s_voltage",
+ Title: "Sensor voltage",
+ Units: "Volts",
+ Fam: "voltage",
+ Ctx: "sensors.sensor_voltage",
+ Type: module.Line,
+ Priority: prioSensorVoltage,
+ Dims: module.Dims{
+ {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "voltage", Div: precision},
+ },
+}
+
+var sensorCurrentChartTmpl = module.Chart{
+ ID: "sensor_chip_%s_feature_%s_subfeature_%s_current",
+ Title: "Sensor current",
+ Units: "Amperes",
+ Fam: "current",
+ Ctx: "sensors.sensor_current",
+ Type: module.Line,
+ Priority: prioSensorCurrent,
+ Dims: module.Dims{
+ {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "current", Div: precision},
+ },
+}
+
+var sensorPowerChartTmpl = module.Chart{
+ ID: "sensor_chip_%s_feature_%s_subfeature_%s_power",
+ Title: "Sensor power",
+ Units: "Watts",
+ Fam: "power",
+ Ctx: "sensors.sensor_power",
+ Type: module.Line,
+ Priority: prioSensorPower,
+ Dims: module.Dims{
+ {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "power", Div: precision},
+ },
+}
+
+var sensorFanChartTmpl = module.Chart{
+ ID: "sensor_chip_%s_feature_%s_subfeature_%s_fan",
+ Title: "Sensor fan speed",
+ Units: "RPM",
+ Fam: "fan",
+ Ctx: "sensors.sensor_fan_speed",
+ Type: module.Line,
+ Priority: prioSensorFan,
+ Dims: module.Dims{
+ {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "fan", Div: precision},
+ },
+}
+
+var sensorEnergyChartTmpl = module.Chart{
+ ID: "sensor_chip_%s_feature_%s_subfeature_%s_energy",
+ Title: "Sensor energy",
+ Units: "Joules",
+ Fam: "energy",
+ Ctx: "sensors.sensor_energy",
+ Type: module.Line,
+ Priority: prioSensorEnergy,
+ Dims: module.Dims{
+ {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "energy", Div: precision},
+ },
+}
+
+var sensorHumidityChartTmpl = module.Chart{
+ ID: "sensor_chip_%s_feature_%s_subfeature_%s_humidity",
+ Title: "Sensor humidity",
+ Units: "percent",
+ Fam: "humidity",
+ Ctx: "sensors.sensor_humidity",
+ Type: module.Area,
+ Priority: prioSensorHumidity,
+ Dims: module.Dims{
+ {ID: "sensor_chip_%s_feature_%s_subfeature_%s", Name: "humidity", Div: precision},
+ },
+}
+
+func (s *Sensors) addSensorChart(sn sensorStats) {
+ var chart *module.Chart
+
+ switch sensorType(sn) {
+ case sensorTypeTemp:
+ chart = sensorTemperatureChartTmpl.Copy()
+ case sensorTypeVoltage:
+ chart = sensorVoltageChartTmpl.Copy()
+ case sensorTypePower:
+ chart = sensorPowerChartTmpl.Copy()
+ case sensorTypeHumidity:
+ chart = sensorHumidityChartTmpl.Copy()
+ case sensorTypeFan:
+ chart = sensorFanChartTmpl.Copy()
+ case sensorTypeCurrent:
+ chart = sensorCurrentChartTmpl.Copy()
+ case sensorTypeEnergy:
+ chart = sensorEnergyChartTmpl.Copy()
+ default:
+ return
+ }
+
+ chip, feat, subfeat := snakeCase(sn.chip), snakeCase(sn.feature), snakeCase(sn.subfeature)
+
+ chart.ID = fmt.Sprintf(chart.ID, chip, feat, subfeat)
+ chart.Labels = []module.Label{
+ {Key: "chip", Value: sn.chip},
+ {Key: "feature", Value: sn.feature},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, chip, feat, subfeat)
+ }
+
+ if err := s.Charts().Add(chart); err != nil {
+ s.Warning(err)
+ }
+}
+
+func (s *Sensors) removeSensorChart(px string) {
+ for _, chart := range *s.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ return
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/sensors/collect.go b/src/go/plugin/go.d/modules/sensors/collect.go
new file mode 100644
index 000000000..46e900ad0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/collect.go
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sensors
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type sensorStats struct {
+ chip string
+ feature string
+ subfeature string
+ value string
+}
+
+func (s *sensorStats) String() string {
+ return fmt.Sprintf("chip:%s feat:%s subfeat:%s value:%s", s.chip, s.feature, s.subfeature, s.value)
+}
+
+const (
+ sensorTypeTemp = "temperature"
+ sensorTypeVoltage = "voltage"
+ sensorTypePower = "power"
+ sensorTypeHumidity = "humidity"
+ sensorTypeFan = "fan"
+ sensorTypeCurrent = "current"
+ sensorTypeEnergy = "energy"
+)
+
+const precision = 1000
+
+func (s *Sensors) collect() (map[string]int64, error) {
+ bs, err := s.exec.sensorsInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(bs) == 0 {
+ return nil, errors.New("empty response from sensors")
+ }
+
+ sensors, err := parseSensors(bs)
+ if err != nil {
+ return nil, err
+ }
+ if len(sensors) == 0 {
+ return nil, errors.New("no sensors found")
+ }
+
+ mx := make(map[string]int64)
+ seen := make(map[string]bool)
+
+ for _, sn := range sensors {
+ // TODO: Most likely we need different values depending on the type of sensor.
+ if !strings.HasSuffix(sn.subfeature, "_input") {
+ s.Debugf("skipping non input sensor: '%s'", sn)
+ continue
+ }
+
+ v, err := strconv.ParseFloat(sn.value, 64)
+ if err != nil {
+ s.Debugf("parsing value for sensor '%s': %v", sn, err)
+ continue
+ }
+
+ if sensorType(sn) == "" {
+ s.Debugf("can not find type for sensor '%s'", sn)
+ continue
+ }
+
+ if minVal, maxVal, ok := sensorLimits(sn); ok && (v < minVal || v > maxVal) {
+ s.Debugf("value outside limits [%d/%d] for sensor '%s'", int64(minVal), int64(maxVal), sn)
+ continue
+ }
+
+ key := fmt.Sprintf("sensor_chip_%s_feature_%s_subfeature_%s", sn.chip, sn.feature, sn.subfeature)
+ key = snakeCase(key)
+ if !s.sensors[key] {
+ s.sensors[key] = true
+ s.addSensorChart(sn)
+ }
+
+ seen[key] = true
+
+ mx[key] = int64(v * precision)
+ }
+
+ for k := range s.sensors {
+ if !seen[k] {
+ delete(s.sensors, k)
+ s.removeSensorChart(k)
+ }
+ }
+
+ return mx, nil
+}
+
+func snakeCase(n string) string {
+ return strings.ToLower(strings.ReplaceAll(n, " ", "_"))
+}
+
+func sensorLimits(sn sensorStats) (minVal float64, maxVal float64, ok bool) {
+ switch sensorType(sn) {
+ case sensorTypeTemp:
+ return -127, 1000, true
+ case sensorTypeVoltage:
+ return -400, 400, true
+ case sensorTypeCurrent:
+ return -127, 127, true
+ case sensorTypeFan:
+ return 0, 65535, true
+ default:
+ return 0, 0, false
+ }
+}
+
+func sensorType(sn sensorStats) string {
+ switch {
+ case strings.HasPrefix(sn.subfeature, "temp"):
+ return sensorTypeTemp
+ case strings.HasPrefix(sn.subfeature, "in"):
+ return sensorTypeVoltage
+ case strings.HasPrefix(sn.subfeature, "power"):
+ return sensorTypePower
+ case strings.HasPrefix(sn.subfeature, "humidity"):
+ return sensorTypeHumidity
+ case strings.HasPrefix(sn.subfeature, "fan"):
+ return sensorTypeFan
+ case strings.HasPrefix(sn.subfeature, "curr"):
+ return sensorTypeCurrent
+ case strings.HasPrefix(sn.subfeature, "energy"):
+ return sensorTypeEnergy
+ default:
+ return ""
+ }
+}
+
+func parseSensors(output []byte) ([]sensorStats, error) {
+ var sensors []sensorStats
+
+ sc := bufio.NewScanner(bytes.NewReader(output))
+
+ var chip, feat string
+
+ for sc.Scan() {
+ text := sc.Text()
+ if text == "" {
+ chip, feat = "", ""
+ continue
+ }
+
+ switch {
+ case strings.HasPrefix(text, " ") && chip != "" && feat != "":
+ parts := strings.Split(text, ":")
+ if len(parts) != 2 {
+ continue
+ }
+ subfeat, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])
+ sensors = append(sensors, sensorStats{
+ chip: chip,
+ feature: feat,
+ subfeature: subfeat,
+ value: value,
+ })
+ case strings.HasSuffix(text, ":") && chip != "":
+ feat = strings.TrimSpace(strings.TrimSuffix(text, ":"))
+ default:
+ chip = text
+ feat = ""
+ }
+ }
+
+ return sensors, nil
+}
diff --git a/src/go/plugin/go.d/modules/sensors/config_schema.json b/src/go/plugin/go.d/modules/sensors/config_schema.json
new file mode 100644
index 000000000..6c12ca9b8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/config_schema.json
@@ -0,0 +1,47 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Sensors collector configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "binary_path": {
+ "title": "Binary path",
+ "description": "Path to the `sensors` binary.",
+ "type": "string",
+ "default": "/usr/bin/sensors"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "required": [
+ "binary_path"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "binary_path": {
+ "ui:help": "If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/sensors/exec.go b/src/go/plugin/go.d/modules/sensors/exec.go
new file mode 100644
index 000000000..c386ddd7d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/exec.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sensors
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newSensorsCliExec(binPath string, timeout time.Duration) *sensorsCliExec {
+ return &sensorsCliExec{
+ binPath: binPath,
+ timeout: timeout,
+ }
+}
+
+type sensorsCliExec struct {
+ *logger.Logger
+
+ binPath string
+ timeout time.Duration
+}
+
+func (e *sensorsCliExec) sensorsInfo() ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.binPath, "-A", "-u")
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/sensors/init.go b/src/go/plugin/go.d/modules/sensors/init.go
new file mode 100644
index 000000000..6753693da
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/init.go
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sensors
+
+import (
+ "errors"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func (s *Sensors) validateConfig() error {
+ if s.BinaryPath == "" {
+ return errors.New("no sensors binary path specified")
+ }
+ return nil
+}
+
+func (s *Sensors) initSensorsCliExec() (sensorsCLI, error) {
+ binPath := s.BinaryPath
+
+ if !strings.HasPrefix(binPath, "/") {
+ path, err := exec.LookPath(binPath)
+ if err != nil {
+ return nil, err
+ }
+ binPath = path
+ }
+
+ if _, err := os.Stat(binPath); err != nil {
+ return nil, err
+ }
+
+ sensorsExec := newSensorsCliExec(binPath, s.Timeout.Duration())
+ sensorsExec.Logger = s.Logger
+
+ return sensorsExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md b/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md
new file mode 100644
index 000000000..d5e948c42
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/integrations/linux_sensors_lm-sensors.md
@@ -0,0 +1,215 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/sensors/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/sensors/metadata.yaml"
+sidebar_label: "Linux Sensors (lm-sensors)"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Linux Sensors (lm-sensors)
+
+
+<img src="https://netdata.cloud/img/microchip.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: sensors
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector gathers real-time system sensor statistics, including temperature, voltage, current, power, fan speed, energy consumption, and humidity, utilizing the [sensors](https://linux.die.net/man/1/sensors) binary.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+The following type of sensors are auto-detected:
+
+- temperature
+- fan
+- voltage
+- current
+- power
+- energy
+- humidity
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per sensor
+
+These metrics refer to the sensor.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| chip | The hardware component responsible for the sensor monitoring. |
+| feature | The specific sensor or monitoring point provided by the chip. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| sensors.sensor_temperature | temperature | Celsius |
+| sensors.sensor_voltage | voltage | Volts |
+| sensors.sensor_current | current | Amperes |
+| sensors.sensor_power | power | Watts |
+| sensors.sensor_fan_speed | fan | RPM |
+| sensors.sensor_energy | energy | Joules |
+| sensors.sensor_humidity | humidity | percent |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install lm-sensors
+
+- Install `lm-sensors` using your distribution's package manager.
+- Run `sensors-detect` to detect hardware monitoring chips.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/sensors.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/sensors.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| binary_path | Path to the `sensors` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/sensors | yes |
+| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom binary path
+
+The executable is not in the directories specified in the PATH environment variable.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: sensors
+ binary_path: /usr/local/sbin/sensors
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `sensors` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m sensors
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `sensors` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep sensors
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep sensors /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep sensors
+```
+
+
diff --git a/src/go/plugin/go.d/modules/sensors/metadata.yaml b/src/go/plugin/go.d/modules/sensors/metadata.yaml
new file mode 100644
index 000000000..5ea94f398
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/metadata.yaml
@@ -0,0 +1,157 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-sensors
+ plugin_name: go.d.plugin
+ module_name: sensors
+ monitored_instance:
+ name: Linux Sensors (lm-sensors)
+ link: https://hwmon.wiki.kernel.org/lm_sensors
+ icon_filename: "microchip.svg"
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords:
+ - sensors
+ - temperature
+ - voltage
+ - current
+ - power
+ - fan
+ - energy
+ - humidity
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector gathers real-time system sensor statistics,
+ including temperature, voltage, current, power, fan speed, energy consumption, and humidity,
+ utilizing the [sensors](https://linux.die.net/man/1/sensors) binary.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ The following type of sensors are auto-detected:
+
+ - temperature
+ - fan
+ - voltage
+ - current
+ - power
+ - energy
+ - humidity
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Install lm-sensors
+ description: |
+ - Install `lm-sensors` using your distribution's package manager.
+ - Run `sensors-detect` to detect hardware monitoring chips.
+ configuration:
+ file:
+ name: go.d/sensors.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: binary_path
+ description: Path to the `sensors` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable.
+ default_value: /usr/bin/sensors
+ required: true
+ - name: timeout
+ description: Timeout for executing the binary, specified in seconds.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom binary path
+ description: The executable is not in the directories specified in the PATH environment variable.
+ config: |
+ jobs:
+ - name: sensors
+ binary_path: /usr/local/sbin/sensors
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: sensor
+ description: These metrics refer to the sensor.
+ labels:
+ - name: chip
+ description: The hardware component responsible for the sensor monitoring.
+ - name: feature
+ description: The specific sensor or monitoring point provided by the chip.
+ metrics:
+ - name: sensors.sensor_temperature
+ description: Sensor temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: sensors.sensor_voltage
+ description: Sensor voltage
+ unit: Volts
+ chart_type: line
+ dimensions:
+ - name: voltage
+ - name: sensors.sensor_current
+ description: Sensor current
+ unit: Amperes
+ chart_type: line
+ dimensions:
+ - name: current
+ - name: sensors.sensor_power
+ description: Sensor power
+ unit: Watts
+ chart_type: line
+ dimensions:
+ - name: power
+ - name: sensors.sensor_fan_speed
+ description: Sensor fan speed
+ unit: RPM
+ chart_type: line
+ dimensions:
+ - name: fan
+ - name: sensors.sensor_energy
+ description: Sensor energy
+ unit: Joules
+ chart_type: line
+ dimensions:
+ - name: energy
+ - name: sensors.sensor_humidity
+ description: Sensor humidity
+ unit: percent
+ chart_type: area
+ dimensions:
+ - name: humidity
diff --git a/src/go/plugin/go.d/modules/sensors/sensors.go b/src/go/plugin/go.d/modules/sensors/sensors.go
new file mode 100644
index 000000000..379d44deb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/sensors.go
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sensors
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("sensors", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Sensors {
+ return &Sensors{
+ Config: Config{
+ BinaryPath: "/usr/bin/sensors",
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ sensors: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ BinaryPath string `yaml:"binary_path" json:"binary_path"`
+}
+
+type (
+ Sensors struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec sensorsCLI
+
+ sensors map[string]bool
+ }
+ sensorsCLI interface {
+ sensorsInfo() ([]byte, error)
+ }
+)
+
+func (s *Sensors) Configuration() any {
+ return s.Config
+}
+
+func (s *Sensors) Init() error {
+ if err := s.validateConfig(); err != nil {
+ s.Errorf("config validation: %s", err)
+ return err
+ }
+
+ sensorsExec, err := s.initSensorsCliExec()
+ if err != nil {
+ s.Errorf("sensors exec initialization: %v", err)
+ return err
+ }
+ s.exec = sensorsExec
+
+ return nil
+}
+
+func (s *Sensors) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (s *Sensors) Charts() *module.Charts {
+ return s.charts
+}
+
+func (s *Sensors) Collect() map[string]int64 {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (s *Sensors) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/sensors/sensors_test.go b/src/go/plugin/go.d/modules/sensors/sensors_test.go
new file mode 100644
index 000000000..a370d7500
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/sensors_test.go
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package sensors
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataSensorsTemp, _ = os.ReadFile("testdata/sensors-temp.txt")
+ dataSensorsTempInCurrPowerFan, _ = os.ReadFile("testdata/sensors-temp-in-curr-power-fan.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataSensorsTemp": dataSensorsTemp,
+ "dataSensorsTempInCurrPowerFan": dataSensorsTempInCurrPowerFan,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestSensors_Configuration(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Sensors{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestSensors_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if 'binary_path' is not set": {
+ wantFail: true,
+ config: Config{
+ BinaryPath: "",
+ },
+ },
+ "fails if failed to find binary": {
+ wantFail: true,
+ config: Config{
+ BinaryPath: "sensors!!!",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sensors := New()
+ sensors.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, sensors.Init())
+ } else {
+ assert.NoError(t, sensors.Init())
+ }
+ })
+ }
+}
+
+func TestSensors_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Sensors
+ }{
+ "not initialized exec": {
+ prepare: func() *Sensors {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Sensors {
+ sensors := New()
+ sensors.exec = prepareMockOkOnlyTemp()
+ _ = sensors.Check()
+ return sensors
+ },
+ },
+ "after collect": {
+ prepare: func() *Sensors {
+ sensors := New()
+ sensors.exec = prepareMockOkTempInCurrPowerFan()
+ _ = sensors.Collect()
+ return sensors
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sensors := test.prepare()
+
+ assert.NotPanics(t, sensors.Cleanup)
+ })
+ }
+}
+
+func TestSensors_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestSensors_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockSensorsCLIExec
+ wantFail bool
+ }{
+ "only temperature": {
+ wantFail: false,
+ prepareMock: prepareMockOkOnlyTemp,
+ },
+ "temperature and voltage": {
+ wantFail: false,
+ prepareMock: prepareMockOkTempInCurrPowerFan,
+ },
+ "error on sensors info call": {
+ wantFail: true,
+ prepareMock: prepareMockErr,
+ },
+ "empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sensors := New()
+ mock := test.prepareMock()
+ sensors.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, sensors.Check())
+ } else {
+ assert.NoError(t, sensors.Check())
+ }
+ })
+ }
+}
+
+func TestSensors_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockSensorsCLIExec
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "only temperature": {
+ prepareMock: prepareMockOkOnlyTemp,
+ wantCharts: 24,
+ wantMetrics: map[string]int64{
+ "sensor_chip_bnxt_en-pci-6200_feature_temp1_subfeature_temp1_input": 80000,
+ "sensor_chip_bnxt_en-pci-6201_feature_temp1_subfeature_temp1_input": 81000,
+ "sensor_chip_k10temp-pci-00c3_feature_tccd1_subfeature_temp3_input": 58250,
+ "sensor_chip_k10temp-pci-00c3_feature_tccd2_subfeature_temp4_input": 60250,
+ "sensor_chip_k10temp-pci-00c3_feature_tccd3_subfeature_temp5_input": 57000,
+ "sensor_chip_k10temp-pci-00c3_feature_tccd4_subfeature_temp6_input": 57250,
+ "sensor_chip_k10temp-pci-00c3_feature_tccd5_subfeature_temp7_input": 57750,
+ "sensor_chip_k10temp-pci-00c3_feature_tccd6_subfeature_temp8_input": 59500,
+ "sensor_chip_k10temp-pci-00c3_feature_tccd7_subfeature_temp9_input": 58500,
+ "sensor_chip_k10temp-pci-00c3_feature_tccd8_subfeature_temp10_input": 61250,
+ "sensor_chip_k10temp-pci-00c3_feature_tctl_subfeature_temp1_input": 62000,
+ "sensor_chip_k10temp-pci-00cb_feature_tccd1_subfeature_temp3_input": 54000,
+ "sensor_chip_k10temp-pci-00cb_feature_tccd2_subfeature_temp4_input": 55500,
+ "sensor_chip_k10temp-pci-00cb_feature_tccd3_subfeature_temp5_input": 56000,
+ "sensor_chip_k10temp-pci-00cb_feature_tccd4_subfeature_temp6_input": 52750,
+ "sensor_chip_k10temp-pci-00cb_feature_tccd5_subfeature_temp7_input": 53500,
+ "sensor_chip_k10temp-pci-00cb_feature_tccd6_subfeature_temp8_input": 55250,
+ "sensor_chip_k10temp-pci-00cb_feature_tccd7_subfeature_temp9_input": 53000,
+ "sensor_chip_k10temp-pci-00cb_feature_tccd8_subfeature_temp10_input": 53750,
+ "sensor_chip_k10temp-pci-00cb_feature_tctl_subfeature_temp1_input": 57500,
+ "sensor_chip_nouveau-pci-4100_feature_temp1_subfeature_temp1_input": 51000,
+ "sensor_chip_nvme-pci-0100_feature_composite_subfeature_temp1_input": 39850,
+ "sensor_chip_nvme-pci-6100_feature_composite_subfeature_temp1_input": 48850,
+ "sensor_chip_nvme-pci-8100_feature_composite_subfeature_temp1_input": 39850,
+ },
+ },
+ "multiple sensors": {
+ prepareMock: prepareMockOkTempInCurrPowerFan,
+ wantCharts: 19,
+ wantMetrics: map[string]int64{
+ "sensor_chip_acpitz-acpi-0_feature_temp1_subfeature_temp1_input": 88000,
+ "sensor_chip_amdgpu-pci-0300_feature_edge_subfeature_temp1_input": 53000,
+ "sensor_chip_amdgpu-pci-0300_feature_fan1_subfeature_fan1_input": 0,
+ "sensor_chip_amdgpu-pci-0300_feature_junction_subfeature_temp2_input": 58000,
+ "sensor_chip_amdgpu-pci-0300_feature_mem_subfeature_temp3_input": 57000,
+ "sensor_chip_amdgpu-pci-0300_feature_vddgfx_subfeature_in0_input": 787,
+ "sensor_chip_amdgpu-pci-6700_feature_edge_subfeature_temp1_input": 60000,
+ "sensor_chip_amdgpu-pci-6700_feature_ppt_subfeature_power1_input": 8144,
+ "sensor_chip_amdgpu-pci-6700_feature_vddgfx_subfeature_in0_input": 1335,
+ "sensor_chip_amdgpu-pci-6700_feature_vddnb_subfeature_in1_input": 973,
+ "sensor_chip_asus-isa-0000_feature_cpu_fan_subfeature_fan1_input": 5700000,
+ "sensor_chip_asus-isa-0000_feature_gpu_fan_subfeature_fan2_input": 6600000,
+ "sensor_chip_bat0-acpi-0_feature_in0_subfeature_in0_input": 17365,
+ "sensor_chip_k10temp-pci-00c3_feature_tctl_subfeature_temp1_input": 90000,
+ "sensor_chip_nvme-pci-0600_feature_composite_subfeature_temp1_input": 33850,
+ "sensor_chip_nvme-pci-0600_feature_sensor_1_subfeature_temp2_input": 48850,
+ "sensor_chip_nvme-pci-0600_feature_sensor_2_subfeature_temp3_input": 33850,
+ "sensor_chip_ucsi_source_psy_usbc000:001-isa-0000_feature_curr1_subfeature_curr1_input": 0,
+ "sensor_chip_ucsi_source_psy_usbc000:001-isa-0000_feature_in0_subfeature_in0_input": 0,
+ },
+ },
+ "error on sensors info call": {
+ prepareMock: prepareMockErr,
+ wantMetrics: nil,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sensors := New()
+ mock := test.prepareMock()
+ sensors.exec = mock
+
+ var mx map[string]int64
+ for i := 0; i < 10; i++ {
+ mx = sensors.Collect()
+ }
+
+ assert.Equal(t, test.wantMetrics, mx)
+ assert.Len(t, *sensors.Charts(), test.wantCharts)
+ testMetricsHasAllChartsDims(t, sensors, mx)
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, sensors *Sensors, mx map[string]int64) {
+ for _, chart := range *sensors.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareMockOkOnlyTemp() *mockSensorsCLIExec {
+ return &mockSensorsCLIExec{
+ sensorsInfoData: dataSensorsTemp,
+ }
+}
+
+func prepareMockOkTempInCurrPowerFan() *mockSensorsCLIExec {
+ return &mockSensorsCLIExec{
+ sensorsInfoData: dataSensorsTempInCurrPowerFan,
+ }
+}
+
+func prepareMockErr() *mockSensorsCLIExec {
+ return &mockSensorsCLIExec{
+ errOnSensorsInfo: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockSensorsCLIExec {
+ return &mockSensorsCLIExec{
+ sensorsInfoData: []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`),
+ }
+}
+
+func prepareMockEmptyResponse() *mockSensorsCLIExec {
+ return &mockSensorsCLIExec{}
+}
+
+type mockSensorsCLIExec struct {
+ errOnSensorsInfo bool
+ sensorsInfoData []byte
+}
+
+func (m *mockSensorsCLIExec) sensorsInfo() ([]byte, error) {
+ if m.errOnSensorsInfo {
+ return nil, errors.New("mock.sensorsInfo() error")
+ }
+
+ return m.sensorsInfoData, nil
+}
diff --git a/src/go/plugin/go.d/modules/sensors/testdata/config.json b/src/go/plugin/go.d/modules/sensors/testdata/config.json
new file mode 100644
index 000000000..095713193
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "binary_path": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/sensors/testdata/config.yaml b/src/go/plugin/go.d/modules/sensors/testdata/config.yaml
new file mode 100644
index 000000000..baf3bcd0b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+timeout: 123.123
+binary_path: "ok"
diff --git a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt b/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt
new file mode 100644
index 000000000..a38c7ab4e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp-in-curr-power-fan.txt
@@ -0,0 +1,72 @@
+asus-isa-0000
+cpu_fan:
+ fan1_input: 5700.000
+gpu_fan:
+ fan2_input: 6600.000
+nvme-pci-0600
+Composite:
+ temp1_input: 33.850
+ temp1_max: 83.850
+ temp1_min: -40.150
+ temp1_crit: 87.850
+ temp1_alarm: 0.000
+Sensor 1:
+ temp2_input: 48.850
+ temp2_max: 65261.850
+ temp2_min: -273.150
+Sensor 2:
+ temp3_input: 33.850
+ temp3_max: 65261.850
+ temp3_min: -273.150
+amdgpu-pci-6700
+vddgfx:
+ in0_input: 1.335
+vddnb:
+ in1_input: 0.973
+edge:
+ temp1_input: 60.000
+PPT:
+ power1_average: 5.088
+ power1_input: 8.144
+BAT0-acpi-0
+in0:
+ in0_input: 17.365
+ucsi_source_psy_USBC000:001-isa-0000
+in0:
+ in0_input: 0.000
+ in0_min: 0.000
+ in0_max: 0.000
+curr1:
+ curr1_input: 0.000
+ curr1_max: 0.000
+k10temp-pci-00c3
+Tctl:
+ temp1_input: 90.000
+amdgpu-pci-0300
+vddgfx:
+ in0_input: 0.787
+fan1:
+ fan1_input: 0.000
+ fan1_min: 0.000
+ fan1_max: 4900.000
+edge:
+ temp1_input: 53.000
+ temp1_crit: 100.000
+ temp1_crit_hyst: -273.150
+ temp1_emergency: 105.000
+junction:
+ temp2_input: 58.000
+ temp2_crit: 100.000
+ temp2_crit_hyst: -273.150
+ temp2_emergency: 105.000
+mem:
+ temp3_input: 57.000
+ temp3_crit: 105.000
+ temp3_crit_hyst: -273.150
+ temp3_emergency: 110.000
+PPT:
+ power1_average: 29.000
+ power1_cap: 120.000
+acpitz-acpi-0
+temp1:
+ temp1_input: 88.000
diff --git a/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt b/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt
new file mode 100644
index 000000000..decc7ee39
--- /dev/null
+++ b/src/go/plugin/go.d/modules/sensors/testdata/sensors-temp.txt
@@ -0,0 +1,81 @@
+k10temp-pci-00cb
+Tctl:
+ temp1_input: 57.500
+Tccd1:
+ temp3_input: 54.000
+Tccd2:
+ temp4_input: 55.500
+Tccd3:
+ temp5_input: 56.000
+Tccd4:
+ temp6_input: 52.750
+Tccd5:
+ temp7_input: 53.500
+Tccd6:
+ temp8_input: 55.250
+Tccd7:
+ temp9_input: 53.000
+Tccd8:
+ temp10_input: 53.750
+
+bnxt_en-pci-6201
+temp1:
+ temp1_input: 81.000
+
+nvme-pci-6100
+Composite:
+ temp1_input: 48.850
+ temp1_max: 89.850
+ temp1_min: -20.150
+ temp1_crit: 94.850
+ temp1_alarm: 0.000
+
+nvme-pci-0100
+Composite:
+ temp1_input: 39.850
+ temp1_max: 89.850
+ temp1_min: -20.150
+ temp1_crit: 94.850
+ temp1_alarm: 0.000
+
+nouveau-pci-4100
+temp1:
+ temp1_input: 51.000
+ temp1_max: 95.000
+ temp1_max_hyst: 3.000
+ temp1_crit: 105.000
+ temp1_crit_hyst: 5.000
+ temp1_emergency: 135.000
+ temp1_emergency_hyst: 5.000
+
+k10temp-pci-00c3
+Tctl:
+ temp1_input: 62.000
+Tccd1:
+ temp3_input: 58.250
+Tccd2:
+ temp4_input: 60.250
+Tccd3:
+ temp5_input: 57.000
+Tccd4:
+ temp6_input: 57.250
+Tccd5:
+ temp7_input: 57.750
+Tccd6:
+ temp8_input: 59.500
+Tccd7:
+ temp9_input: 58.500
+Tccd8:
+ temp10_input: 61.250
+
+bnxt_en-pci-6200
+temp1:
+ temp1_input: 80.000
+
+nvme-pci-8100
+Composite:
+ temp1_input: 39.850
+ temp1_max: 89.850
+ temp1_min: -20.150
+ temp1_crit: 94.850
+ temp1_alarm: 0.000
diff --git a/src/go/plugin/go.d/modules/smartctl/README.md b/src/go/plugin/go.d/modules/smartctl/README.md
new file mode 120000
index 000000000..63aad6c85
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/README.md
@@ -0,0 +1 @@
+integrations/s.m.a.r.t..md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/smartctl/charts.go b/src/go/plugin/go.d/modules/smartctl/charts.go
new file mode 100644
index 000000000..461f73501
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/charts.go
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package smartctl
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioDeviceSmartStatus = module.Priority + iota
+ prioDeviceAtaSmartErrorLogCount
+ prioDevicePowerOnTime
+ prioDeviceTemperature
+ prioDevicePowerCycleCount
+
+ prioDeviceScsiReadErrors
+ prioDeviceScsiWriteErrors
+ prioDeviceScsiVerifyErrors
+
+ prioDeviceSmartAttributeDecoded
+ prioDeviceSmartAttributeNormalized
+)
+
+var deviceChartsTmpl = module.Charts{
+ devicePowerOnTimeChartTmpl.Copy(),
+ deviceTemperatureChartTmpl.Copy(),
+ devicePowerCycleCountChartTmpl.Copy(),
+ deviceSmartStatusChartTmpl.Copy(),
+ deviceAtaSmartErrorLogCountChartTmpl.Copy(),
+}
+
+var (
+ deviceSmartStatusChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_smart_status",
+ Title: "Device smart status",
+ Units: "status",
+ Fam: "smart status",
+ Ctx: "smartctl.device_smart_status",
+ Type: module.Line,
+ Priority: prioDeviceSmartStatus,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_smart_status_passed", Name: "passed"},
+ {ID: "device_%s_type_%s_smart_status_failed", Name: "failed"},
+ },
+ }
+ deviceAtaSmartErrorLogCountChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_ata_smart_error_log_count",
+ Title: "Device ATA smart error log count",
+ Units: "logs",
+ Fam: "smart error log",
+ Ctx: "smartctl.device_ata_smart_error_log_count",
+ Type: module.Line,
+ Priority: prioDeviceAtaSmartErrorLogCount,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_ata_smart_error_log_summary_count", Name: "error_log"},
+ },
+ }
+ devicePowerOnTimeChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_power_on_time",
+ Title: "Device power on time",
+ Units: "seconds",
+ Fam: "power on time",
+ Ctx: "smartctl.device_power_on_time",
+ Type: module.Line,
+ Priority: prioDevicePowerOnTime,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_power_on_time", Name: "power_on_time"},
+ },
+ }
+ deviceTemperatureChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_temperature",
+ Title: "Device temperature",
+ Units: "Celsius",
+ Fam: "temperature",
+ Ctx: "smartctl.device_temperature",
+ Type: module.Line,
+ Priority: prioDeviceTemperature,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_temperature", Name: "temperature"},
+ },
+ }
+ devicePowerCycleCountChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_power_cycle_count",
+ Title: "Device power cycles",
+ Units: "cycles",
+ Fam: "power cycles",
+ Ctx: "smartctl.device_power_cycles_count",
+ Type: module.Line,
+ Priority: prioDevicePowerCycleCount,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_power_cycle_count", Name: "power"},
+ },
+ }
+)
+
+var deviceScsiErrorLogChartsTmpl = module.Charts{
+ deviceScsiReadErrorsChartTmpl.Copy(),
+ deviceScsiWriteErrorsChartTmpl.Copy(),
+ deviceScsiVerifyErrorsChartTmpl.Copy(),
+}
+
+var (
+ deviceScsiReadErrorsChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_read_errors_rate",
+ Title: "Device read errors",
+ Units: "errors/s",
+ Fam: "scsi errors",
+ Ctx: "smartctl.device_read_errors_rate",
+ Type: module.Line,
+ Priority: prioDeviceScsiReadErrors,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_scsi_error_log_read_total_errors_corrected", Name: "corrected", Algo: module.Incremental},
+ {ID: "device_%s_type_%s_scsi_error_log_read_total_uncorrected_errors", Name: "uncorrected", Algo: module.Incremental},
+ },
+ }
+ deviceScsiWriteErrorsChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_write_errors_rate",
+ Title: "Device write errors",
+ Units: "errors/s",
+ Fam: "scsi errors",
+ Ctx: "smartctl.device_write_errors_rate",
+ Type: module.Line,
+ Priority: prioDeviceScsiWriteErrors,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_scsi_error_log_write_total_errors_corrected", Name: "corrected", Algo: module.Incremental},
+ {ID: "device_%s_type_%s_scsi_error_log_read_total_uncorrected_errors", Name: "uncorrected", Algo: module.Incremental},
+ },
+ }
+ deviceScsiVerifyErrorsChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_verify_errors_rate",
+ Title: "Device verify errors",
+ Units: "errors/s",
+ Fam: "scsi errors",
+ Ctx: "smartctl.device_verify_errors_rate",
+ Type: module.Line,
+ Priority: prioDeviceScsiVerifyErrors,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_scsi_error_log_verify_total_errors_corrected", Name: "corrected", Algo: module.Incremental},
+ {ID: "device_%s_type_%s_scsi_error_log_verify_total_uncorrected_errors", Name: "uncorrected", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ deviceSmartAttributeDecodedChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_smart_attr_%s",
+ Title: "Device smart attribute %s",
+ Units: "value",
+ Fam: "attr %s",
+ Ctx: "smartctl.device_smart_attr_%s",
+ Type: module.Line,
+ Priority: prioDeviceSmartAttributeDecoded,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_attr_%s_decoded", Name: "%s"},
+ },
+ }
+ deviceSmartAttributeNormalizedChartTmpl = module.Chart{
+ ID: "device_%s_type_%s_smart_attr_%s_normalized",
+ Title: "Device smart attribute normalized %s",
+ Units: "value",
+ Fam: "attr %s",
+ Ctx: "smartctl.device_smart_attr_%s_normalized",
+ Type: module.Line,
+ Priority: prioDeviceSmartAttributeNormalized,
+ Dims: module.Dims{
+ {ID: "device_%s_type_%s_attr_%s_normalized", Name: "%s"},
+ },
+ }
+)
+
+func (s *Smartctl) addDeviceCharts(dev *smartDevice) {
+ charts := module.Charts{}
+
+ if cs := s.newDeviceCharts(dev); cs != nil && len(*cs) > 0 {
+ if err := charts.Add(*cs...); err != nil {
+ s.Warning(err)
+ }
+ }
+ if cs := s.newDeviceSmartAttrCharts(dev); cs != nil && len(*cs) > 0 {
+ if err := charts.Add(*cs...); err != nil {
+ s.Warning(err)
+ }
+ }
+ if cs := s.newDeviceScsiErrorLogCharts(dev); cs != nil && len(*cs) > 0 {
+ if err := charts.Add(*cs...); err != nil {
+ s.Warning(err)
+ }
+ }
+
+ if err := s.Charts().Add(charts...); err != nil {
+ s.Warning(err)
+ }
+}
+
+func (s *Smartctl) removeDeviceCharts(scanDev *scanDevice) {
+ px := fmt.Sprintf("device_%s_%s_", scanDev.shortName(), scanDev.typ)
+
+ for _, chart := range *s.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func (s *Smartctl) newDeviceCharts(dev *smartDevice) *module.Charts {
+
+ charts := deviceChartsTmpl.Copy()
+
+ if _, ok := dev.powerOnTime(); !ok {
+ _ = charts.Remove(devicePowerOnTimeChartTmpl.ID)
+ }
+ if _, ok := dev.temperature(); !ok {
+ _ = charts.Remove(deviceTemperatureChartTmpl.ID)
+ }
+ if _, ok := dev.powerCycleCount(); !ok {
+ _ = charts.Remove(devicePowerOnTimeChartTmpl.ID)
+ }
+ if _, ok := dev.smartStatusPassed(); !ok {
+ _ = charts.Remove(deviceSmartStatusChartTmpl.ID)
+ }
+ if _, ok := dev.ataSmartErrorLogCount(); !ok {
+ _ = charts.Remove(deviceAtaSmartErrorLogCountChartTmpl.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, dev.deviceName(), dev.deviceType())
+ chart.Labels = []module.Label{
+ {Key: "device_name", Value: dev.deviceName()},
+ {Key: "device_type", Value: dev.deviceType()},
+ {Key: "model_name", Value: dev.modelName()},
+ {Key: "serial_number", Value: dev.serialNumber()},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, dev.deviceName(), dev.deviceType())
+ }
+ }
+
+ return charts
+}
+
+func (s *Smartctl) newDeviceSmartAttrCharts(dev *smartDevice) *module.Charts {
+ attrs, ok := dev.ataSmartAttributeTable()
+ if !ok {
+ return nil
+ }
+ charts := module.Charts{}
+
+ for _, attr := range attrs {
+ if !isSmartAttrValid(attr) ||
+ strings.HasPrefix(attr.name(), "Unknown") ||
+ strings.HasPrefix(attr.name(), "Not_In_Use") {
+ continue
+ }
+
+ cs := module.Charts{
+ deviceSmartAttributeDecodedChartTmpl.Copy(),
+ deviceSmartAttributeNormalizedChartTmpl.Copy(),
+ }
+
+ attrName := attributeNameMap(attr.name())
+ cleanAttrName := cleanAttributeName(attrName)
+
+ for _, chart := range cs {
+ if chart.ID == deviceSmartAttributeDecodedChartTmpl.ID {
+ chart.Units = attributeUnit(attrName)
+ }
+ chart.ID = fmt.Sprintf(chart.ID, dev.deviceName(), dev.deviceType(), cleanAttrName)
+ chart.Title = fmt.Sprintf(chart.Title, attrName)
+ chart.Fam = fmt.Sprintf(chart.Fam, cleanAttrName)
+ chart.Ctx = fmt.Sprintf(chart.Ctx, cleanAttrName)
+ chart.Labels = []module.Label{
+ {Key: "device_name", Value: dev.deviceName()},
+ {Key: "device_type", Value: dev.deviceType()},
+ {Key: "model_name", Value: dev.modelName()},
+ {Key: "serial_number", Value: dev.serialNumber()},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, dev.deviceName(), dev.deviceType(), cleanAttrName)
+ dim.Name = fmt.Sprintf(dim.Name, cleanAttrName)
+ }
+ }
+
+ if err := charts.Add(cs...); err != nil {
+ s.Warning(err)
+ }
+ }
+
+ return &charts
+}
+
+func (s *Smartctl) newDeviceScsiErrorLogCharts(dev *smartDevice) *module.Charts {
+ if dev.deviceType() != "scsi" || !dev.data.Get("scsi_error_counter_log").Exists() {
+ return nil
+ }
+
+ charts := deviceScsiErrorLogChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, dev.deviceName(), dev.deviceType())
+ chart.Labels = []module.Label{
+ {Key: "device_name", Value: dev.deviceName()},
+ {Key: "device_type", Value: dev.deviceType()},
+ {Key: "model_name", Value: dev.modelName()},
+ {Key: "serial_number", Value: dev.serialNumber()},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, dev.deviceName(), dev.deviceType())
+ }
+ }
+
+ return charts
+}
+
+var attrNameReplacer = strings.NewReplacer(" ", "_", "/", "_")
+
+func cleanAttributeName(attrName string) string {
+ return strings.ToLower(attrNameReplacer.Replace(attrName))
+}
+
+func attributeUnit(attrName string) string {
+ units := map[string]string{
+ "Airflow_Temperature_Cel": "Celsius",
+ "Case_Temperature": "Celsius",
+ "Drive_Temperature": "Celsius",
+ "Temperature_Case": "Celsius",
+ "Temperature_Celsius": "Celsius",
+ "Temperature_Internal": "Celsius",
+ "Power_On_Hours": "hours",
+ "Spin_Up_Time": "milliseconds",
+ "Media_Wearout_Indicator": "percent",
+ "Percent_Life_Remaining": "percent",
+ "Percent_Lifetime_Remain": "percent",
+ "Total_LBAs_Read": "sectors",
+ "Total_LBAs_Written": "sectors",
+ "Offline_Uncorrectable": "sectors",
+ "Pending_Sector_Count": "sectors",
+ "Reallocated_Sector_Ct": "sectors",
+ "Current_Pending_Sector": "sectors",
+ "Reported_Uncorrect": "errors",
+ "Command_Timeout": "events",
+ }
+
+ if unit, ok := units[attrName]; ok {
+ return unit
+ }
+
+ // TODO: convert to bytes during data collection? (examples: NAND_Writes_32MiB, Flash_Writes_GiB)
+ if strings.HasSuffix(attrName, "MiB") || strings.HasSuffix(attrName, "GiB") {
+ if strings.Contains(attrName, "Writes") {
+ return "writes"
+ }
+ if strings.Contains(attrName, "Reads") {
+ return "reads"
+ }
+ }
+
+ if strings.Contains(attrName, "Error") {
+ return "errors"
+ }
+
+ for _, s := range []string{"_Count", "_Cnt", "_Ct"} {
+ if strings.HasSuffix(attrName, s) {
+ return "events"
+ }
+ }
+
+ return "value"
+}
+
+func attributeNameMap(attrName string) string {
+ // TODO: Handle Vendor-Specific S.M.A.R.T. Attribute Naming
+ // S.M.A.R.T. attribute names can vary slightly between vendors (e.g., "Thermal_Throttle_St" vs. "Thermal_Throttle_Status").
+ // This function ensures consistent naming.
+ return attrName
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/collect.go b/src/go/plugin/go.d/modules/smartctl/collect.go
new file mode 100644
index 000000000..35585db62
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/collect.go
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package smartctl
+
+import (
+ "fmt"
+ "maps"
+ "slices"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/tidwall/gjson"
+)
+
+func (s *Smartctl) collect() (map[string]int64, error) {
+ now := time.Now()
+
+ if s.forceScan || s.isTimeToScan(now) {
+ devices, err := s.scanDevices()
+ if err != nil {
+ return nil, err
+ }
+
+ for k, dev := range s.scannedDevices {
+ if _, ok := devices[k]; !ok {
+ delete(s.scannedDevices, k)
+ delete(s.seenDevices, k)
+ s.removeDeviceCharts(dev)
+ }
+ }
+
+ s.forceDevicePoll = !maps.Equal(s.scannedDevices, devices)
+ s.scannedDevices = devices
+ s.lastScanTime = now
+ s.forceScan = false
+ }
+
+ if s.forceDevicePoll || s.isTimeToPollDevices(now) {
+ mx := make(map[string]int64)
+
+ // TODO: make it concurrent
+ for _, d := range s.scannedDevices {
+ if err := s.collectScannedDevice(mx, d); err != nil {
+ s.Warning(err)
+ continue
+ }
+ }
+
+ s.forceDevicePoll = false
+ s.lastDevicePollTime = now
+ s.mx = mx
+ }
+
+ return s.mx, nil
+}
+
+func (s *Smartctl) collectScannedDevice(mx map[string]int64, scanDev *scanDevice) error {
+ resp, err := s.exec.deviceInfo(scanDev.name, scanDev.typ, s.NoCheckPowerMode)
+ if err != nil {
+ if resp != nil && isDeviceOpenFailedNoSuchDevice(resp) && !scanDev.extra {
+ s.Infof("smartctl reported that device '%s' type '%s' no longer exists", scanDev.name, scanDev.typ)
+ s.forceScan = true
+ return nil
+ }
+ return fmt.Errorf("failed to get device info for '%s' type '%s': %v", scanDev.name, scanDev.typ, err)
+ }
+
+ if isDeviceInLowerPowerMode(resp) {
+ s.Debugf("device '%s' type '%s' is in a low-power mode, skipping", scanDev.name, scanDev.typ)
+ return nil
+ }
+
+ dev := newSmartDevice(resp)
+ if !isSmartDeviceValid(dev) {
+ return nil
+ }
+
+ if !s.seenDevices[scanDev.key()] {
+ s.seenDevices[scanDev.key()] = true
+ s.addDeviceCharts(dev)
+ }
+
+ s.collectSmartDevice(mx, dev)
+
+ return nil
+}
+
+func (s *Smartctl) collectSmartDevice(mx map[string]int64, dev *smartDevice) {
+ px := fmt.Sprintf("device_%s_type_%s_", dev.deviceName(), dev.deviceType())
+
+ if v, ok := dev.powerOnTime(); ok {
+ mx[px+"power_on_time"] = v
+ }
+ if v, ok := dev.temperature(); ok {
+ mx[px+"temperature"] = v
+ }
+ if v, ok := dev.powerCycleCount(); ok {
+ mx[px+"power_cycle_count"] = v
+ }
+ if v, ok := dev.smartStatusPassed(); ok {
+ mx[px+"smart_status_passed"] = 0
+ mx[px+"smart_status_failed"] = 0
+ if v {
+ mx[px+"smart_status_passed"] = 1
+ } else {
+ mx[px+"smart_status_failed"] = 1
+ }
+ }
+ if v, ok := dev.ataSmartErrorLogCount(); ok {
+ mx[px+"ata_smart_error_log_summary_count"] = v
+ }
+
+ if attrs, ok := dev.ataSmartAttributeTable(); ok {
+ for _, attr := range attrs {
+ if !isSmartAttrValid(attr) {
+ continue
+ }
+ n := strings.ToLower(attr.name())
+ n = strings.ReplaceAll(n, " ", "_")
+ px := fmt.Sprintf("%sattr_%s_", px, n)
+
+ if v, err := strconv.ParseInt(attr.value(), 10, 64); err == nil {
+ mx[px+"normalized"] = v
+ }
+
+ if v, err := strconv.ParseInt(attr.rawValue(), 10, 64); err == nil {
+ mx[px+"raw"] = v
+ }
+
+ rs := strings.TrimSpace(attr.rawString())
+ if i := strings.IndexByte(rs, ' '); i != -1 {
+ rs = rs[:i]
+ }
+ if v, err := strconv.ParseInt(rs, 10, 64); err == nil {
+ mx[px+"decoded"] = v
+ }
+ }
+ }
+
+ if dev.deviceType() == "scsi" {
+ sel := dev.data.Get("scsi_error_counter_log")
+ if !sel.Exists() {
+ return
+ }
+
+ for _, v := range []string{"read", "write", "verify"} {
+ for _, n := range []string{
+ //"errors_corrected_by_eccdelayed",
+ //"errors_corrected_by_eccfast",
+ //"errors_corrected_by_rereads_rewrites",
+ "total_errors_corrected",
+ "total_uncorrected_errors",
+ } {
+ key := fmt.Sprintf("%sscsi_error_log_%s_%s", px, v, n)
+ metric := fmt.Sprintf("%s.%s", v, n)
+
+ if m := sel.Get(metric); m.Exists() {
+ mx[key] = m.Int()
+ }
+ }
+ }
+ }
+}
+
+func (s *Smartctl) isTimeToScan(now time.Time) bool {
+ return s.ScanEvery.Duration().Seconds() != 0 && now.After(s.lastScanTime.Add(s.ScanEvery.Duration()))
+}
+
+func (s *Smartctl) isTimeToPollDevices(now time.Time) bool {
+ return now.After(s.lastDevicePollTime.Add(s.PollDevicesEvery.Duration()))
+
+}
+
+func isSmartDeviceValid(d *smartDevice) bool {
+ return d.deviceName() != "" && d.deviceType() != ""
+}
+
+func isSmartAttrValid(a *smartAttribute) bool {
+ return a.id() != "" && a.name() != ""
+}
+
+func isDeviceInLowerPowerMode(r *gjson.Result) bool {
+ if !isExitStatusHasBit(r, 1) {
+ return false
+ }
+
+ messages := r.Get("smartctl.messages").Array()
+
+ return slices.ContainsFunc(messages, func(msg gjson.Result) bool {
+ text := msg.Get("string").String()
+ return strings.HasPrefix(text, "Device is in") && strings.Contains(text, "mode")
+ })
+}
+
+func isDeviceOpenFailedNoSuchDevice(r *gjson.Result) bool {
+ if !isExitStatusHasBit(r, 1) {
+ return false
+ }
+
+ messages := r.Get("smartctl.messages").Array()
+
+ return slices.ContainsFunc(messages, func(msg gjson.Result) bool {
+ text := msg.Get("string").String()
+ return strings.HasSuffix(text, "No such device")
+ })
+}
+
+func isExitStatusHasBit(r *gjson.Result, bit int) bool {
+ // https://manpages.debian.org/bullseye/smartmontools/smartctl.8.en.html#EXIT_STATUS
+ status := int(r.Get("smartctl.exit_status").Int())
+ mask := 1 << bit
+ return (status & mask) != 0
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/config_schema.json b/src/go/plugin/go.d/modules/smartctl/config_schema.json
new file mode 100644
index 000000000..afe7ce1a9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/config_schema.json
@@ -0,0 +1,140 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Smartctl collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Interval for updating Netdata charts, measured in seconds. Collector might use cached data if less than **Devices poll interval**.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the `smartctl` binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 5
+ },
+ "scan_every": {
+ "title": "Scan interval",
+ "description": "Interval for discovering new devices using `smartctl --scan`, measured in seconds. Set to 0 to scan devices only once on startup.",
+ "type": "number",
+ "minimum": 1,
+ "default": 900
+ },
+ "poll_devices_every": {
+ "title": "Devices poll interval",
+ "description": "Interval for gathering data for every device, measured in seconds. Data is cached for this interval.",
+ "type": "number",
+ "minimum": 1,
+ "default": 300
+ },
+ "no_check_power_mode": {
+ "title": "No check power mode",
+ "description": "ATA only. Skip data collection when the device is in a low-power mode. Prevents unnecessary disk spin-up.",
+ "type": "string",
+ "enum": [
+ "standby",
+ "never",
+ "sleep",
+ "idle"
+ ],
+ "default": "standby"
+ },
+ "device_selector": {
+ "title": "Device selector",
+ "description": "Specifies a [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme) to match the 'info name' of devices as reported by `smartctl --scan --json`. Only devices whose 'info name' matches this pattern will be collected.",
+ "type": "string",
+ "minimum": 1,
+ "default": "*"
+ },
+ "extra_devices": {
+ "title": "Extra devices",
+ "description": "Allows manual specification of devices not automatically detected by `smartctl --scan`. Each device entry must include both a name and a type.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "items": {
+ "title": "Device",
+ "type": [
+ "object",
+ "null"
+ ],
+ "required": [
+ "name",
+ "type"
+ ],
+ "properties": {
+ "name": {
+ "title": "Name",
+ "type": "string"
+ },
+ "type": {
+ "title": "Type",
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "timeout",
+ "scan_every",
+ "poll_devices_every",
+ "no_check_power_mode"
+ ]
+ },
+ {
+ "title": "Devices",
+ "fields": [
+ "device_selector",
+ "extra_devices"
+ ]
+ }
+ ]
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "no_check_power_mode": {
+ "ui:help": "`never` - check the device always; `sleep` - check the device unless it is in SLEEP mode; `standby` - check the device unless it is in SLEEP or STANDBY mode; `idle` - check the device unless it is in SLEEP, STANDBY or IDLE mode.",
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "device_selector": {
+ "ui:help": "Leave blank or use `*` to collect data for all devices."
+ },
+ "extra_devices": {
+ "items": {
+ "name": {
+ "ui:placeholder": "/dev/sda"
+ },
+ "type": {
+ "ui:placeholder": "jmb39x-q,3"
+ }
+ }
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/exec.go b/src/go/plugin/go.d/modules/smartctl/exec.go
new file mode 100644
index 000000000..94974c0d3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/exec.go
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package smartctl
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+
+ "github.com/tidwall/gjson"
+)
+
+func newSmartctlCliExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *smartctlCliExec {
+ return &smartctlCliExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ }
+}
+
+type smartctlCliExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (e *smartctlCliExec) scan(open bool) (*gjson.Result, error) {
+ if open {
+ return e.execute("smartctl-json-scan-open")
+ }
+ return e.execute("smartctl-json-scan")
+}
+
+func (e *smartctlCliExec) deviceInfo(deviceName, deviceType, powerMode string) (*gjson.Result, error) {
+ return e.execute("smartctl-json-device-info",
+ "--deviceName", deviceName,
+ "--deviceType", deviceType,
+ "--powerMode", powerMode,
+ )
+}
+
+func (e *smartctlCliExec) execute(args ...string) (*gjson.Result, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.ndsudoPath, args...)
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ if errors.Is(err, context.DeadlineExceeded) || isExecExitCode(err, 1) || len(bs) == 0 {
+ return nil, fmt.Errorf("'%s' execution failed: %v", cmd, err)
+ }
+ }
+ if len(bs) == 0 {
+ return nil, fmt.Errorf("'%s' returned no output", cmd)
+ }
+
+ if !gjson.ValidBytes(bs) {
+ return nil, fmt.Errorf("'%s' returned invalid JSON output", cmd)
+ }
+
+ res := gjson.ParseBytes(bs)
+ if !res.Get("smartctl.exit_status").Exists() {
+ return nil, fmt.Errorf("'%s' returned unexpected data", cmd)
+ }
+
+ for _, msg := range res.Get("smartctl.messages").Array() {
+ if msg.Get("severity").String() == "error" {
+ return &res, fmt.Errorf("'%s' reported an error: %s", cmd, msg.Get("string"))
+ }
+ }
+
+ return &res, nil
+}
+
+func isExecExitCode(err error, exitCode int) bool {
+ var v *exec.ExitError
+ return errors.As(err, &v) && v.ExitCode() == exitCode
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/init.go b/src/go/plugin/go.d/modules/smartctl/init.go
new file mode 100644
index 000000000..6d3731a18
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/init.go
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package smartctl
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+func (s *Smartctl) validateConfig() error {
+ switch s.NoCheckPowerMode {
+ case "never", "sleep", "standby", "idle":
+ default:
+ return fmt.Errorf("invalid power mode '%s'", s.NoCheckPowerMode)
+ }
+
+ for _, v := range s.ExtraDevices {
+ if v.Name == "" || v.Type == "" {
+ return fmt.Errorf("invalid extra device: name and type must both be provided, got name='%s' type='%s'", v.Name, v.Type)
+ }
+ }
+
+ return nil
+}
+
+func (s *Smartctl) initDeviceSelector() (matcher.Matcher, error) {
+ if s.DeviceSelector == "" {
+ return matcher.TRUE(), nil
+ }
+
+ m, err := matcher.NewSimplePatternsMatcher(s.DeviceSelector)
+ if err != nil {
+ return nil, err
+ }
+
+ return m, nil
+}
+
+func (s *Smartctl) initSmartctlCli() (smartctlCli, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+
+ }
+
+ smartctlExec := newSmartctlCliExec(ndsudoPath, s.Timeout.Duration(), s.Logger)
+
+ return smartctlExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/integrations/s.m.a.r.t..md b/src/go/plugin/go.d/modules/smartctl/integrations/s.m.a.r.t..md
new file mode 100644
index 000000000..b9eb9f368
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/integrations/s.m.a.r.t..md
@@ -0,0 +1,284 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/smartctl/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/smartctl/metadata.yaml"
+sidebar_label: "S.M.A.R.T."
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Hardware Devices and Sensors"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# S.M.A.R.T.
+
+
+<img src="https://netdata.cloud/img/smart.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: smartctl
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the health status of storage devices by analyzing S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) counters.
+It relies on the [`smartctl`](https://linux.die.net/man/8/smartctl) CLI tool but avoids directly executing the binary.
+Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+Executed commands:
+- `smartctl --json --scan`
+- `smartctl --json --all {deviceName} --device {deviceType} --nocheck {powerMode}`
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per controller
+
+These metrics refer to the Storage Device.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device_name | Device name |
+| device_type | Device type |
+| model_name | Model name |
+| serial_number | Serial number |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| smartctl.device_smart_status | passed, failed | status |
+| smartctl.device_ata_smart_error_log_count | error_log | logs |
+| smartctl.device_power_on_time | power_on_time | seconds |
+| smartctl.device_temperature | temperature | Celsius |
+| smartctl.device_power_cycles_count | power | cycles |
+| smartctl.device_read_errors_rate | corrected, uncorrected | errors/s |
+| smartctl.device_write_errors_rate | corrected, uncorrected | errors/s |
+| smartctl.device_verify_errors_rate | corrected, uncorrected | errors/s |
+| smartctl.device_smart_attr_{attribute_name} | {attribute_name} | {attribute_unit} |
+| smartctl.device_smart_attr_{attribute_name}_normalized | {attribute_name} | value |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Install smartmontools (v7.0+)
+
+Install `smartmontools` version 7.0 or later using your distribution's package manager. Version 7.0 introduced the `--json` output mode, which is required for this collector to function properly.
+
+
+#### For Netdata running in a Docker container
+
+1. **Install smartmontools**.
+
+ Ensure `smartctl` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=smartmontools` when starting the container.
+
+2. **Provide access to storage devices**.
+
+ Netdata requires the `SYS_RAWIO` capability and access to the storage devices to run the `smartctl` collector inside a Docker container. Here's how you can achieve this:
+
+ - `docker run`
+
+ ```bash
+ docker run --cap-add SYS_RAWIO --device /dev/sda:/dev/sda ...
+ ```
+
+ - `docker-compose.yml`
+
+ ```yaml
+ services:
+ netdata:
+ cap_add:
+ - SYS_PTRACE
+ - SYS_ADMIN
+ - SYS_RAWIO # smartctl
+ devices:
+ - "/dev/sda:/dev/sda"
+ ```
+
+ > **Multiple Devices**: These examples only show mapping of one device (/dev/sda). You'll need to add additional `--device` options (in docker run) or entries in the `devices` list (in docker-compose.yml) for each storage device you want Netdata's smartctl collector to monitor.
+
+ > **NVMe Devices**: Do not map NVMe devices using this method. Netdata uses a [dedicated collector](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvme#readme) to monitor NVMe devices.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/smartctl.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/smartctl.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | interval for updating Netdata charts, measured in seconds. Collector might use cached data if less than **Devices poll interval**. | 10 | no |
+| timeout | smartctl binary execution timeout. | 5 | no |
+| scan_every | interval for discovering new devices using `smartctl --scan`, measured in seconds. Set to 0 to scan devices only once on startup. | 900 | no |
+| poll_devices_every | interval for gathering data for every device, measured in seconds. Data is cached for this interval. | 300 | no |
+| device_selector | Specifies a pattern to match the 'info name' of devices as reported by `smartctl --scan --json`. | * | no |
+| extra_devices | Allows manual specification of devices not automatically detected by `smartctl --scan`. Each device entry must include both a name and a type. See "Configuration Examples" for details. | [] | no |
+| no_check_power_mode | Skip data collection when the device is in a low-power mode. Prevents unnecessary disk spin-up. | standby | no |
+
+##### no_check_power_mode
+
+The valid arguments to this option are:
+
+| Mode | Description |
+|---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| never | Check the device always. |
+| sleep | Check the device unless it is in SLEEP mode. |
+| standby | Check the device unless it is in SLEEP or STANDBY mode. In these modes most disks are not spinning, so if you want to prevent a disk from spinning up, this is probably what you want. |
+| idle | Check the device unless it is in SLEEP, STANDBY or IDLE mode. In the IDLE state, most disks are still spinning, so this is probably not what you want. |
+
+
+</details>
+
+#### Examples
+
+##### Custom devices poll interval
+
+Allows you to override the default devices poll interval (data collection).
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: smartctl
+ devices_poll_interval: 60 # Collect S.M.A.R.T statistics every 60 seconds
+
+```
+</details>
+
+##### Extra devices
+
+This example demonstrates using `extra_devices` to manually add a storage device (`/dev/sdc`) not automatically detected by `smartctl --scan`.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: smartctl
+ extra_devices:
+ - name: /dev/sdc
+ type: jmb39x-q,3
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `smartctl` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m smartctl
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `smartctl` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep smartctl
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep smartctl /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep smartctl
+```
+
+
diff --git a/src/go/plugin/go.d/modules/smartctl/metadata.yaml b/src/go/plugin/go.d/modules/smartctl/metadata.yaml
new file mode 100644
index 000000000..e748e82ae
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/metadata.yaml
@@ -0,0 +1,240 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-smartctl
+ plugin_name: go.d.plugin
+ module_name: smartctl
+ monitored_instance:
+ name: S.M.A.R.T.
+ link: "https://linux.die.net/man/8/smartd"
+ icon_filename: "smart.png"
+ categories:
+ - data-collection.hardware-devices-and-sensors
+ keywords:
+ - smart
+ - S.M.A.R.T.
+ - SCSI devices
+ - ATA devices
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the health status of storage devices by analyzing S.M.A.R.T. (Self-Monitoring, Analysis, and Reporting Technology) counters.
+ It relies on the [`smartctl`](https://linux.die.net/man/8/smartctl) CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+ Executed commands:
+ - `smartctl --json --scan`
+ - `smartctl --json --all {deviceName} --device {deviceType} --nocheck {powerMode}`
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Install smartmontools (v7.0+)
+ description: |
+ Install `smartmontools` version 7.0 or later using your distribution's package manager. Version 7.0 introduced the `--json` output mode, which is required for this collector to function properly.
+ - title: For Netdata running in a Docker container
+ description: |
+ 1. **Install smartmontools**.
+
+ Ensure `smartctl` is available in the container by setting the environment variable `NETDATA_EXTRA_DEB_PACKAGES=smartmontools` when starting the container.
+
+ 2. **Provide access to storage devices**.
+
+ Netdata requires the `SYS_RAWIO` capability and access to the storage devices to run the `smartctl` collector inside a Docker container. Here's how you can achieve this:
+
+ - `docker run`
+
+ ```bash
+ docker run --cap-add SYS_RAWIO --device /dev/sda:/dev/sda ...
+ ```
+
+ - `docker-compose.yml`
+
+ ```yaml
+ services:
+ netdata:
+ cap_add:
+ - SYS_PTRACE
+ - SYS_ADMIN
+ - SYS_RAWIO # smartctl
+ devices:
+ - "/dev/sda:/dev/sda"
+ ```
+
+ > **Multiple Devices**: These examples only show mapping of one device (/dev/sda). You'll need to add additional `--device` options (in docker run) or entries in the `devices` list (in docker-compose.yml) for each storage device you want Netdata's smartctl collector to monitor.
+
+ > **NVMe Devices**: Do not map NVMe devices using this method. Netdata uses a [dedicated collector](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/modules/nvme#readme) to monitor NVMe devices.
+ configuration:
+ file:
+ name: go.d/smartctl.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: interval for updating Netdata charts, measured in seconds. Collector might use cached data if less than **Devices poll interval**.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: smartctl binary execution timeout.
+ default_value: 5
+ required: false
+ - name: scan_every
+ description: interval for discovering new devices using `smartctl --scan`, measured in seconds. Set to 0 to scan devices only once on startup.
+ default_value: 900
+ required: false
+ - name: poll_devices_every
+ description: interval for gathering data for every device, measured in seconds. Data is cached for this interval.
+ default_value: 300
+ required: false
+ - name: device_selector
+ description: "Specifies a pattern to match the 'info name' of devices as reported by `smartctl --scan --json`."
+ default_value: "*"
+ required: false
+ - name: extra_devices
+ description: "Allows manual specification of devices not automatically detected by `smartctl --scan`. Each device entry must include both a name and a type. See \"Configuration Examples\" for details."
+ default_value: "[]"
+ required: false
+ - name: no_check_power_mode
+ description: "Skip data collection when the device is in a low-power mode. Prevents unnecessary disk spin-up."
+ default_value: standby
+ required: false
+ detailed_description: |
+ The valid arguments to this option are:
+
+ | Mode | Description |
+ |---------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+ | never | Check the device always. |
+ | sleep | Check the device unless it is in SLEEP mode. |
+ | standby | Check the device unless it is in SLEEP or STANDBY mode. In these modes most disks are not spinning, so if you want to prevent a disk from spinning up, this is probably what you want. |
+ | idle | Check the device unless it is in SLEEP, STANDBY or IDLE mode. In the IDLE state, most disks are still spinning, so this is probably not what you want. |
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom devices poll interval
+ description: Allows you to override the default devices poll interval (data collection).
+ config: |
+ jobs:
+ - name: smartctl
+ devices_poll_interval: 60 # Collect S.M.A.R.T statistics every 60 seconds
+ - name: Extra devices
+ description: |
+ This example demonstrates using `extra_devices` to manually add a storage device (`/dev/sdc`) not automatically detected by `smartctl --scan`.
+ config: |
+ jobs:
+ - name: smartctl
+ extra_devices:
+ - name: /dev/sdc
+ type: jmb39x-q,3
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: controller
+ description: These metrics refer to the Storage Device.
+ labels:
+ - name: device_name
+ description: Device name
+ - name: device_type
+ description: Device type
+ - name: model_name
+ description: Model name
+ - name: serial_number
+ description: Serial number
+ metrics:
+ - name: smartctl.device_smart_status
+ description: Device smart status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: passed
+ - name: failed
+ - name: smartctl.device_ata_smart_error_log_count
+ description: Device ATA smart error log count
+ unit: logs
+ chart_type: line
+ dimensions:
+ - name: error_log
+ - name: smartctl.device_power_on_time
+ description: Device power on time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: power_on_time
+ - name: smartctl.device_temperature
+ description: Device temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: smartctl.device_power_cycles_count
+ description: Device power cycles
+ unit: cycles
+ chart_type: line
+ dimensions:
+ - name: power
+ - name: smartctl.device_read_errors_rate
+ description: Device read errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: corrected
+ - name: uncorrected
+ - name: smartctl.device_write_errors_rate
+ description: Device write errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: corrected
+ - name: uncorrected
+ - name: smartctl.device_verify_errors_rate
+ description: Device verify errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: corrected
+ - name: uncorrected
+ - name: smartctl.device_smart_attr_{attribute_name}
+ description: Device smart attribute {attribute_name}
+ unit: '{attribute_unit}'
+ chart_type: line
+ dimensions:
+ - name: '{attribute_name}'
+ - name: smartctl.device_smart_attr_{attribute_name}_normalized
+ description: Device smart attribute {attribute_name} normalized
+ unit: value
+ chart_type: line
+ dimensions:
+ - name: '{attribute_name}'
diff --git a/src/go/plugin/go.d/modules/smartctl/scan.go b/src/go/plugin/go.d/modules/smartctl/scan.go
new file mode 100644
index 000000000..5564897a4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/scan.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package smartctl
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+)
+
+type scanDevice struct {
+ name string
+ infoName string
+ typ string
+ extra bool // added via config "extra_devices"
+}
+
+func (s *scanDevice) key() string {
+ return fmt.Sprintf("%s|%s", s.name, s.typ)
+}
+
+func (s *scanDevice) shortName() string {
+ return strings.TrimPrefix(s.name, "/dev/")
+}
+
+func (s *Smartctl) scanDevices() (map[string]*scanDevice, error) {
+ // Issue on Discord: https://discord.com/channels/847502280503590932/1261747175361347644/1261747175361347644
+ // "sat" devices being identified as "scsi" with --scan, and then later
+ // code attempts to validate the type by calling `smartctl` with the "scsi" type.
+ // This validation can trigger unintended "Enabling discard_zeroes_data" messages in system logs (dmesg).
+ // To address this specific issue we use `smartctl --scan-open` as a workaround.
+ // This method reliably identifies device types.
+ scanOpen := s.NoCheckPowerMode == "never"
+
+ resp, err := s.exec.scan(scanOpen)
+ if err != nil {
+ return nil, fmt.Errorf("failed to scan devices: %v", err)
+ }
+
+ devices := make(map[string]*scanDevice)
+
+ for _, d := range resp.Get("devices").Array() {
+ dev := &scanDevice{
+ name: d.Get("name").String(),
+ infoName: d.Get("info_name").String(),
+ typ: d.Get("type").String(),
+ }
+
+ if dev.name == "" || dev.typ == "" {
+ s.Warningf("device info missing required fields (name: '%s', type: '%s'), skipping", dev.name, dev.typ)
+ continue
+ }
+
+ if !s.deviceSr.MatchString(dev.infoName) {
+ s.Debugf("device %s does not match selector, skipping it", dev.infoName)
+ continue
+ }
+
+ if !scanOpen && dev.typ == "scsi" {
+ // `smartctl --scan` attempts to guess the device type based on the path, but this can be unreliable.
+ // Accurate device type information is crucial because we use the `--device` option to gather data.
+ // Using the wrong type can lead to issues.
+ // For example, using 'scsi' for 'sat' devices prevents `smartctl` from issuing the necessary ATA commands.
+
+ s.handleGuessedScsiScannedDevice(dev)
+ }
+
+ s.Debugf("smartctl scan found device '%s' type '%s' info_name '%s'", dev.name, dev.typ, dev.infoName)
+
+ devices[dev.key()] = dev
+ }
+
+ s.Debugf("smartctl scan found %d devices", len(devices))
+
+ for _, v := range s.ExtraDevices {
+ dev := &scanDevice{name: v.Name, typ: v.Type, extra: true}
+
+ if _, ok := devices[dev.key()]; !ok {
+ devices[dev.key()] = dev
+ }
+ }
+
+ if len(devices) == 0 {
+ return nil, errors.New("no devices found during scan")
+ }
+
+ return devices, nil
+}
+
+func (s *Smartctl) handleGuessedScsiScannedDevice(dev *scanDevice) {
+ if dev.typ != "scsi" || s.hasScannedDevice(dev) {
+ return
+ }
+
+ d := &scanDevice{name: dev.name, typ: "sat"}
+
+ if s.hasScannedDevice(d) {
+ dev.typ = d.typ
+ return
+ }
+
+ resp, _ := s.exec.deviceInfo(dev.name, "sat", s.NoCheckPowerMode)
+ if resp == nil || resp.Get("smartctl.exit_status").Int() != 0 {
+ return
+ }
+
+ atts, ok := newSmartDevice(resp).ataSmartAttributeTable()
+ if !ok || len(atts) == 0 {
+ return
+ }
+
+ s.Debugf("changing device '%s' type 'scsi' -> 'sat'", dev.name)
+ dev.typ = "sat"
+}
+
+func (s *Smartctl) hasScannedDevice(d *scanDevice) bool {
+ _, ok := s.scannedDevices[d.key()]
+ return ok
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/smart_device.go b/src/go/plugin/go.d/modules/smartctl/smart_device.go
new file mode 100644
index 000000000..280281aad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/smart_device.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package smartctl
+
+import (
+ "strings"
+
+ "github.com/tidwall/gjson"
+)
+
+func newSmartDevice(deviceData *gjson.Result) *smartDevice {
+ return &smartDevice{
+ data: deviceData,
+ }
+}
+
+type smartDevice struct {
+ data *gjson.Result
+}
+
+func (d *smartDevice) deviceName() string {
+ v := d.data.Get("device.name").String()
+ return strings.TrimPrefix(v, "/dev/")
+}
+
+func (d *smartDevice) deviceType() string {
+ return d.data.Get("device.type").String()
+}
+
+func (d *smartDevice) serialNumber() string {
+ return d.data.Get("serial_number").String()
+}
+
+func (d *smartDevice) modelName() string {
+ for _, s := range []string{"model_name", "scsi_model_name"} {
+ if v := d.data.Get(s); v.Exists() {
+ return v.String()
+ }
+ }
+ return "unknown"
+}
+
+func (d *smartDevice) powerOnTime() (int64, bool) {
+ h := d.data.Get("power_on_time.hours")
+ if !h.Exists() {
+ return 0, false
+ }
+ m := d.data.Get("power_on_time.minutes")
+ return h.Int()*60*60 + m.Int()*60, true
+}
+
+func (d *smartDevice) temperature() (int64, bool) {
+ v := d.data.Get("temperature.current")
+ return v.Int(), v.Exists()
+}
+
+func (d *smartDevice) powerCycleCount() (int64, bool) {
+ for _, s := range []string{"power_cycle_count", "scsi_start_stop_cycle_counter.accumulated_start_stop_cycles"} {
+ if v := d.data.Get(s); v.Exists() {
+ return v.Int(), true
+ }
+ }
+ return 0, false
+}
+
+func (d *smartDevice) smartStatusPassed() (bool, bool) {
+ v := d.data.Get("smart_status.passed")
+ return v.Bool(), v.Exists()
+}
+
+func (d *smartDevice) ataSmartErrorLogCount() (int64, bool) {
+ v := d.data.Get("ata_smart_error_log.summary.count")
+ return v.Int(), v.Exists()
+}
+
+func (d *smartDevice) ataSmartAttributeTable() ([]*smartAttribute, bool) {
+ table := d.data.Get("ata_smart_attributes.table")
+ if !table.Exists() || !table.IsArray() {
+ return nil, false
+ }
+
+ var attrs []*smartAttribute
+
+ for _, data := range table.Array() {
+ attrs = append(attrs, newSmartDeviceAttribute(data))
+ }
+
+ return attrs, true
+}
+
+func newSmartDeviceAttribute(attrData gjson.Result) *smartAttribute {
+ return &smartAttribute{
+ data: attrData,
+ }
+}
+
+type smartAttribute struct {
+ data gjson.Result
+}
+
+func (a *smartAttribute) id() string {
+ return a.data.Get("id").String()
+}
+
+func (a *smartAttribute) name() string {
+ return a.data.Get("name").String()
+}
+
+func (a *smartAttribute) value() string {
+ return a.data.Get("value").String()
+}
+
+func (a *smartAttribute) rawValue() string {
+ return a.data.Get("raw.value").String()
+}
+
+func (a *smartAttribute) rawString() string {
+ return a.data.Get("raw.string").String()
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/smartctl.go b/src/go/plugin/go.d/modules/smartctl/smartctl.go
new file mode 100644
index 000000000..36f390a37
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/smartctl.go
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package smartctl
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/tidwall/gjson"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("smartctl", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Smartctl {
+ return &Smartctl{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 5),
+ ScanEvery: web.Duration(time.Minute * 15),
+ PollDevicesEvery: web.Duration(time.Minute * 5),
+ NoCheckPowerMode: "standby",
+ DeviceSelector: "*",
+ },
+ charts: &module.Charts{},
+ forceScan: true,
+ deviceSr: matcher.TRUE(),
+ seenDevices: make(map[string]bool),
+ }
+}
+
+type (
+ Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ ScanEvery web.Duration `yaml:"scan_every,omitempty" json:"scan_every"`
+ PollDevicesEvery web.Duration `yaml:"poll_devices_every,omitempty" json:"poll_devices_every"`
+ NoCheckPowerMode string `yaml:"no_check_power_mode,omitempty" json:"no_check_power_mode"`
+ DeviceSelector string `yaml:"device_selector,omitempty" json:"device_selector"`
+ ExtraDevices []ConfigExtraDevice `yaml:"extra_devices,omitempty" json:"extra_devices"`
+ }
+ ConfigExtraDevice struct {
+ Name string `yaml:"name" json:"name"`
+ Type string `yaml:"type" json:"type"`
+ }
+)
+
+type (
+ Smartctl struct {
+ module.Base
+ Config `yaml:",inline" data:""`
+
+ charts *module.Charts
+
+ exec smartctlCli
+
+ deviceSr matcher.Matcher
+
+ lastScanTime time.Time
+ forceScan bool
+ scannedDevices map[string]*scanDevice
+
+ lastDevicePollTime time.Time
+ forceDevicePoll bool
+
+ seenDevices map[string]bool
+ mx map[string]int64
+ }
+ smartctlCli interface {
+ scan(open bool) (*gjson.Result, error)
+ deviceInfo(deviceName, deviceType, powerMode string) (*gjson.Result, error)
+ }
+)
+
+func (s *Smartctl) Configuration() any {
+ return s.Config
+}
+
+func (s *Smartctl) Init() error {
+ if err := s.validateConfig(); err != nil {
+ s.Errorf("config validation error: %s", err)
+ return err
+ }
+
+ sr, err := s.initDeviceSelector()
+ if err != nil {
+ s.Errorf("device selector initialization: %v", err)
+ return err
+ }
+ s.deviceSr = sr
+
+ smartctlExec, err := s.initSmartctlCli()
+ if err != nil {
+ s.Errorf("smartctl exec initialization: %v", err)
+ return err
+ }
+ s.exec = smartctlExec
+
+ return nil
+}
+
+func (s *Smartctl) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (s *Smartctl) Charts() *module.Charts {
+ return s.charts
+}
+
+func (s *Smartctl) Collect() map[string]int64 {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (s *Smartctl) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/smartctl/smartctl_test.go b/src/go/plugin/go.d/modules/smartctl/smartctl_test.go
new file mode 100644
index 000000000..7c56605f6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/smartctl_test.go
@@ -0,0 +1,508 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package smartctl
+
+import (
+ "fmt"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/tidwall/gjson"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataTypeSataScan, _ = os.ReadFile("testdata/type-sat/scan.json")
+ dataTypeSataDeviceHDDSda, _ = os.ReadFile("testdata/type-sat/device-hdd-sda.json")
+ dataTypeSataDeviceSSDSdc, _ = os.ReadFile("testdata/type-sat/device-ssd-sdc.json")
+
+ dataTypeNvmeScan, _ = os.ReadFile("testdata/type-nvme/scan.json")
+ dataTypeNvmeDeviceNvme0, _ = os.ReadFile("testdata/type-nvme/device-nvme0.json")
+ dataTypeNvmeDeviceNvme1, _ = os.ReadFile("testdata/type-nvme/device-nvme1.json")
+
+ dataTypeScsiScan, _ = os.ReadFile("testdata/type-scsi/scan.json")
+ dataTypeScsiDeviceSda, _ = os.ReadFile("testdata/type-scsi/device-sda.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataTypeSataScan": dataTypeSataScan,
+ "dataTypeSataDeviceHDDSda": dataTypeSataDeviceHDDSda,
+ "dataTypeSataDeviceSSDSdc": dataTypeSataDeviceSSDSdc,
+
+ "dataTypeNvmeScan": dataTypeNvmeScan,
+ "dataTypeNvmeDeviceNvme0": dataTypeNvmeDeviceNvme0,
+ "dataTypeNvmeDeviceNvme1": dataTypeNvmeDeviceNvme1,
+
+ "dataTypeScsiScan": dataTypeScsiScan,
+ "dataTypeScsiDeviceSda": dataTypeScsiDeviceSda,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSmartctl_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Smartctl{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestSmartctl_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if invalid power mode": {
+ wantFail: true,
+ config: func() Config {
+ cfg := New().Config
+ cfg.NoCheckPowerMode = "invalid"
+ return cfg
+ }(),
+ },
+ "fails if 'ndsudo' not found": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ smart := New()
+
+ if test.wantFail {
+ assert.Error(t, smart.Init())
+ } else {
+ assert.NoError(t, smart.Init())
+ }
+ })
+ }
+}
+
+func TestSmartctl_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Smartctl
+ }{
+ "not initialized exec": {
+ prepare: func() *Smartctl {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Smartctl {
+ smart := New()
+ smart.exec = prepareMockOkTypeSata()
+ _ = smart.Check()
+ return smart
+ },
+ },
+ "after collect": {
+ prepare: func() *Smartctl {
+ smart := New()
+ smart.exec = prepareMockOkTypeSata()
+ _ = smart.Collect()
+ return smart
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ smart := test.prepare()
+
+ assert.NotPanics(t, smart.Cleanup)
+ })
+ }
+}
+
+func TestSmartctl_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockSmartctlCliExec
+ wantFail bool
+ }{
+ "success type sata devices": {
+ wantFail: false,
+ prepareMock: prepareMockOkTypeSata,
+ },
+ "success type nvme devices": {
+ wantFail: false,
+ prepareMock: prepareMockOkTypeNvme,
+ },
+ "error on scan": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnScan,
+ },
+ "unexpected response on scan": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response on scan": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ smart := New()
+ mock := test.prepareMock()
+ smart.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, smart.Check())
+ } else {
+ assert.NoError(t, smart.Check())
+ }
+ })
+ }
+}
+
+func TestSmartctl_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockSmartctlCliExec
+ prepareConfig func() Config
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success type sata devices": {
+ prepareMock: prepareMockOkTypeSata,
+ wantCharts: 68,
+ wantMetrics: map[string]int64{
+ "device_sda_type_sat_ata_smart_error_log_summary_count": 0,
+ "device_sda_type_sat_attr_current_pending_sector_decoded": 0,
+ "device_sda_type_sat_attr_current_pending_sector_normalized": 100,
+ "device_sda_type_sat_attr_current_pending_sector_raw": 0,
+ "device_sda_type_sat_attr_load_cycle_count_decoded": 360,
+ "device_sda_type_sat_attr_load_cycle_count_normalized": 100,
+ "device_sda_type_sat_attr_load_cycle_count_raw": 360,
+ "device_sda_type_sat_attr_offline_uncorrectable_decoded": 0,
+ "device_sda_type_sat_attr_offline_uncorrectable_normalized": 100,
+ "device_sda_type_sat_attr_offline_uncorrectable_raw": 0,
+ "device_sda_type_sat_attr_power-off_retract_count_decoded": 360,
+ "device_sda_type_sat_attr_power-off_retract_count_normalized": 100,
+ "device_sda_type_sat_attr_power-off_retract_count_raw": 360,
+ "device_sda_type_sat_attr_power_cycle_count_decoded": 12,
+ "device_sda_type_sat_attr_power_cycle_count_normalized": 100,
+ "device_sda_type_sat_attr_power_cycle_count_raw": 12,
+ "device_sda_type_sat_attr_power_on_hours_decoded": 8244,
+ "device_sda_type_sat_attr_power_on_hours_normalized": 99,
+ "device_sda_type_sat_attr_power_on_hours_raw": 8244,
+ "device_sda_type_sat_attr_raw_read_error_rate_decoded": 0,
+ "device_sda_type_sat_attr_raw_read_error_rate_normalized": 100,
+ "device_sda_type_sat_attr_raw_read_error_rate_raw": 0,
+ "device_sda_type_sat_attr_reallocated_event_count_decoded": 0,
+ "device_sda_type_sat_attr_reallocated_event_count_normalized": 100,
+ "device_sda_type_sat_attr_reallocated_event_count_raw": 0,
+ "device_sda_type_sat_attr_reallocated_sector_ct_decoded": 0,
+ "device_sda_type_sat_attr_reallocated_sector_ct_normalized": 100,
+ "device_sda_type_sat_attr_reallocated_sector_ct_raw": 0,
+ "device_sda_type_sat_attr_seek_error_rate_decoded": 0,
+ "device_sda_type_sat_attr_seek_error_rate_normalized": 100,
+ "device_sda_type_sat_attr_seek_error_rate_raw": 0,
+ "device_sda_type_sat_attr_seek_time_performance_decoded": 15,
+ "device_sda_type_sat_attr_seek_time_performance_normalized": 140,
+ "device_sda_type_sat_attr_seek_time_performance_raw": 15,
+ "device_sda_type_sat_attr_spin_retry_count_decoded": 0,
+ "device_sda_type_sat_attr_spin_retry_count_normalized": 100,
+ "device_sda_type_sat_attr_spin_retry_count_raw": 0,
+ "device_sda_type_sat_attr_spin_up_time_decoded": 281,
+ "device_sda_type_sat_attr_spin_up_time_normalized": 86,
+ "device_sda_type_sat_attr_spin_up_time_raw": 25788088601,
+ "device_sda_type_sat_attr_start_stop_count_decoded": 12,
+ "device_sda_type_sat_attr_start_stop_count_normalized": 100,
+ "device_sda_type_sat_attr_start_stop_count_raw": 12,
+ "device_sda_type_sat_attr_temperature_celsius_decoded": 49,
+ "device_sda_type_sat_attr_temperature_celsius_normalized": 43,
+ "device_sda_type_sat_attr_temperature_celsius_raw": 240519741489,
+ "device_sda_type_sat_attr_throughput_performance_decoded": 48,
+ "device_sda_type_sat_attr_throughput_performance_normalized": 148,
+ "device_sda_type_sat_attr_throughput_performance_raw": 48,
+ "device_sda_type_sat_attr_udma_crc_error_count_decoded": 0,
+ "device_sda_type_sat_attr_udma_crc_error_count_normalized": 100,
+ "device_sda_type_sat_attr_udma_crc_error_count_raw": 0,
+ "device_sda_type_sat_attr_unknown_attribute_decoded": 100,
+ "device_sda_type_sat_attr_unknown_attribute_normalized": 100,
+ "device_sda_type_sat_attr_unknown_attribute_raw": 100,
+ "device_sda_type_sat_power_cycle_count": 12,
+ "device_sda_type_sat_power_on_time": 29678400,
+ "device_sda_type_sat_smart_status_failed": 0,
+ "device_sda_type_sat_smart_status_passed": 1,
+ "device_sda_type_sat_temperature": 49,
+ "device_sdc_type_sat_ata_smart_error_log_summary_count": 0,
+ "device_sdc_type_sat_attr_available_reservd_space_decoded": 100,
+ "device_sdc_type_sat_attr_available_reservd_space_normalized": 100,
+ "device_sdc_type_sat_attr_available_reservd_space_raw": 100,
+ "device_sdc_type_sat_attr_command_timeout_decoded": 0,
+ "device_sdc_type_sat_attr_command_timeout_normalized": 100,
+ "device_sdc_type_sat_attr_command_timeout_raw": 0,
+ "device_sdc_type_sat_attr_end-to-end_error_decoded": 0,
+ "device_sdc_type_sat_attr_end-to-end_error_normalized": 100,
+ "device_sdc_type_sat_attr_end-to-end_error_raw": 0,
+ "device_sdc_type_sat_attr_media_wearout_indicator_decoded": 65406,
+ "device_sdc_type_sat_attr_media_wearout_indicator_normalized": 100,
+ "device_sdc_type_sat_attr_media_wearout_indicator_raw": 65406,
+ "device_sdc_type_sat_attr_power_cycle_count_decoded": 13,
+ "device_sdc_type_sat_attr_power_cycle_count_normalized": 100,
+ "device_sdc_type_sat_attr_power_cycle_count_raw": 13,
+ "device_sdc_type_sat_attr_power_on_hours_decoded": 8244,
+ "device_sdc_type_sat_attr_power_on_hours_normalized": 100,
+ "device_sdc_type_sat_attr_power_on_hours_raw": 8244,
+ "device_sdc_type_sat_attr_reallocated_sector_ct_decoded": 0,
+ "device_sdc_type_sat_attr_reallocated_sector_ct_normalized": 100,
+ "device_sdc_type_sat_attr_reallocated_sector_ct_raw": 0,
+ "device_sdc_type_sat_attr_reported_uncorrect_decoded": 0,
+ "device_sdc_type_sat_attr_reported_uncorrect_normalized": 100,
+ "device_sdc_type_sat_attr_reported_uncorrect_raw": 0,
+ "device_sdc_type_sat_attr_temperature_celsius_decoded": 27,
+ "device_sdc_type_sat_attr_temperature_celsius_normalized": 73,
+ "device_sdc_type_sat_attr_temperature_celsius_raw": 184684970011,
+ "device_sdc_type_sat_attr_total_lbas_read_decoded": 76778,
+ "device_sdc_type_sat_attr_total_lbas_read_normalized": 253,
+ "device_sdc_type_sat_attr_total_lbas_read_raw": 76778,
+ "device_sdc_type_sat_attr_total_lbas_written_decoded": 173833,
+ "device_sdc_type_sat_attr_total_lbas_written_normalized": 253,
+ "device_sdc_type_sat_attr_total_lbas_written_raw": 173833,
+ "device_sdc_type_sat_attr_udma_crc_error_count_decoded": 0,
+ "device_sdc_type_sat_attr_udma_crc_error_count_normalized": 100,
+ "device_sdc_type_sat_attr_udma_crc_error_count_raw": 0,
+ "device_sdc_type_sat_attr_unknown_attribute_decoded": 0,
+ "device_sdc_type_sat_attr_unknown_attribute_normalized": 0,
+ "device_sdc_type_sat_attr_unknown_attribute_raw": 0,
+ "device_sdc_type_sat_attr_unknown_ssd_attribute_decoded": 4694419309637,
+ "device_sdc_type_sat_attr_unknown_ssd_attribute_normalized": 4,
+ "device_sdc_type_sat_attr_unknown_ssd_attribute_raw": 4694419309637,
+ "device_sdc_type_sat_power_cycle_count": 13,
+ "device_sdc_type_sat_power_on_time": 29678400,
+ "device_sdc_type_sat_smart_status_failed": 0,
+ "device_sdc_type_sat_smart_status_passed": 1,
+ "device_sdc_type_sat_temperature": 27,
+ },
+ },
+ "success type nvme devices": {
+ prepareMock: prepareMockOkTypeNvme,
+ wantCharts: 4,
+ wantMetrics: map[string]int64{
+ "device_nvme0_type_nvme_power_cycle_count": 2,
+ "device_nvme0_type_nvme_power_on_time": 11206800,
+ "device_nvme0_type_nvme_smart_status_failed": 0,
+ "device_nvme0_type_nvme_smart_status_passed": 1,
+ "device_nvme0_type_nvme_temperature": 39,
+ },
+ },
+ "success type nvme devices with extra": {
+ prepareMock: prepareMockOkTypeNvme,
+ prepareConfig: func() Config {
+ cfg := New().Config
+ cfg.ExtraDevices = []ConfigExtraDevice{
+ {Name: "/dev/nvme1", Type: "nvme"},
+ }
+ return cfg
+ },
+ wantCharts: 8,
+ wantMetrics: map[string]int64{
+ "device_nvme0_type_nvme_power_cycle_count": 2,
+ "device_nvme0_type_nvme_power_on_time": 11206800,
+ "device_nvme0_type_nvme_smart_status_failed": 0,
+ "device_nvme0_type_nvme_smart_status_passed": 1,
+ "device_nvme0_type_nvme_temperature": 39,
+ "device_nvme1_type_nvme_power_cycle_count": 5,
+ "device_nvme1_type_nvme_power_on_time": 17038800,
+ "device_nvme1_type_nvme_smart_status_failed": 0,
+ "device_nvme1_type_nvme_smart_status_passed": 1,
+ "device_nvme1_type_nvme_temperature": 36,
+ },
+ },
+ "success type scsi devices": {
+ prepareMock: prepareMockOkTypeScsi,
+ wantCharts: 7,
+ wantMetrics: map[string]int64{
+ "device_sda_type_scsi_power_cycle_count": 4,
+ "device_sda_type_scsi_power_on_time": 5908920,
+ "device_sda_type_scsi_scsi_error_log_read_total_errors_corrected": 647736,
+ "device_sda_type_scsi_scsi_error_log_read_total_uncorrected_errors": 0,
+ "device_sda_type_scsi_scsi_error_log_verify_total_errors_corrected": 0,
+ "device_sda_type_scsi_scsi_error_log_verify_total_uncorrected_errors": 0,
+ "device_sda_type_scsi_scsi_error_log_write_total_errors_corrected": 0,
+ "device_sda_type_scsi_scsi_error_log_write_total_uncorrected_errors": 0,
+ "device_sda_type_scsi_smart_status_failed": 0,
+ "device_sda_type_scsi_smart_status_passed": 1,
+ "device_sda_type_scsi_temperature": 34,
+ },
+ },
+ "error on scan": {
+ prepareMock: prepareMockErrOnScan,
+ },
+ "unexpected response on scan": {
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response on scan": {
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ smart := New()
+ if test.prepareConfig != nil {
+ smart.Config = test.prepareConfig()
+ }
+ mock := test.prepareMock()
+ smart.exec = mock
+ smart.ScanEvery = web.Duration(time.Microsecond * 1)
+ smart.PollDevicesEvery = web.Duration(time.Microsecond * 1)
+
+ var mx map[string]int64
+ for i := 0; i < 10; i++ {
+ mx = smart.Collect()
+ }
+
+ assert.Equal(t, test.wantMetrics, mx)
+ assert.Len(t, *smart.Charts(), test.wantCharts)
+ testMetricsHasAllChartsDims(t, smart, mx)
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, smart *Smartctl, mx map[string]int64) {
+ for _, chart := range *smart.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareMockOkTypeSata() *mockSmartctlCliExec {
+ return &mockSmartctlCliExec{
+ errOnScan: false,
+ scanData: dataTypeSataScan,
+ deviceDataFunc: func(deviceName, deviceType, powerMode string) ([]byte, error) {
+ if deviceType != "sat" {
+ return nil, fmt.Errorf("unexpected device type %s", deviceType)
+ }
+ switch deviceName {
+ case "/dev/sda":
+ return dataTypeSataDeviceHDDSda, nil
+ case "/dev/sdc":
+ return dataTypeSataDeviceSSDSdc, nil
+ default:
+ return nil, fmt.Errorf("unexpected device name %s", deviceName)
+ }
+ },
+ }
+}
+
+func prepareMockOkTypeNvme() *mockSmartctlCliExec {
+ return &mockSmartctlCliExec{
+ errOnScan: false,
+ scanData: dataTypeNvmeScan,
+ deviceDataFunc: func(deviceName, deviceType, powerMode string) ([]byte, error) {
+ if deviceType != "nvme" {
+ return nil, fmt.Errorf("unexpected device type %s", deviceType)
+ }
+ switch deviceName {
+ case "/dev/nvme0":
+ return dataTypeNvmeDeviceNvme0, nil
+ case "/dev/nvme1":
+ return dataTypeNvmeDeviceNvme1, nil
+ default:
+ return nil, fmt.Errorf("unexpected device name %s", deviceName)
+ }
+ },
+ }
+}
+
+func prepareMockOkTypeScsi() *mockSmartctlCliExec {
+ return &mockSmartctlCliExec{
+ errOnScan: false,
+ scanData: dataTypeScsiScan,
+ deviceDataFunc: func(deviceName, deviceType, powerMode string) ([]byte, error) {
+ if deviceType != "scsi" {
+ return nil, fmt.Errorf("unexpected device type %s", deviceType)
+ }
+ switch deviceName {
+ case "/dev/sda":
+ return dataTypeScsiDeviceSda, nil
+ default:
+ return nil, fmt.Errorf("unexpected device name %s", deviceName)
+ }
+ },
+ }
+}
+
+func prepareMockErrOnScan() *mockSmartctlCliExec {
+ return &mockSmartctlCliExec{
+ errOnScan: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockSmartctlCliExec {
+ return &mockSmartctlCliExec{
+ scanData: []byte(randomJsonData),
+ deviceDataFunc: func(_, _, _ string) ([]byte, error) { return []byte(randomJsonData), nil },
+ }
+}
+
+func prepareMockEmptyResponse() *mockSmartctlCliExec {
+ return &mockSmartctlCliExec{}
+}
+
+type mockSmartctlCliExec struct {
+ errOnScan bool
+ scanData []byte
+ deviceDataFunc func(deviceName, deviceType, powerMode string) ([]byte, error)
+}
+
+func (m *mockSmartctlCliExec) scan(_ bool) (*gjson.Result, error) {
+ if m.errOnScan {
+ return nil, fmt.Errorf("mock.scan() error")
+ }
+ res := gjson.ParseBytes(m.scanData)
+ return &res, nil
+}
+
+func (m *mockSmartctlCliExec) deviceInfo(deviceName, deviceType, powerMode string) (*gjson.Result, error) {
+ if m.deviceDataFunc == nil {
+ return nil, nil
+ }
+ bs, err := m.deviceDataFunc(deviceName, deviceType, powerMode)
+ if err != nil {
+ return nil, err
+ }
+ res := gjson.ParseBytes(bs)
+ return &res, nil
+}
+
+var randomJsonData = `
+{
+ "elephant": {
+ "burn": false,
+ "mountain": true,
+ "fog": false,
+ "skin": -1561907625,
+ "burst": "anyway",
+ "shadow": 1558616893
+ },
+ "start": "ever",
+ "base": 2093056027,
+ "mission": -2007590351,
+ "victory": 999053756,
+ "die": false
+}
+`
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/config.json b/src/go/plugin/go.d/modules/smartctl/testdata/config.json
new file mode 100644
index 000000000..41c69da51
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/config.json
@@ -0,0 +1,14 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "scan_every": 123.123,
+ "poll_devices_every": 123.123,
+ "no_check_power_mode": "ok",
+ "device_selector": "ok",
+ "extra_devices": [
+ {
+ "name": "ok",
+ "type": "ok"
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/config.yaml b/src/go/plugin/go.d/modules/smartctl/testdata/config.yaml
new file mode 100644
index 000000000..b0b77d53d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/config.yaml
@@ -0,0 +1,9 @@
+update_every: 123
+timeout: 123.123
+scan_every: 123.123
+poll_devices_every: 123.123
+no_check_power_mode: "ok"
+device_selector: "ok"
+extra_devices:
+ - name: "ok"
+ type: "ok"
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/device-nvme0.json b/src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/device-nvme0.json
new file mode 100644
index 000000000..1b31d322d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/device-nvme0.json
@@ -0,0 +1,112 @@
+{
+ "json_format_version": [
+ 1,
+ 0
+ ],
+ "smartctl": {
+ "version": [
+ 7,
+ 3
+ ],
+ "svn_revision": "5338",
+ "platform_info": "REDACTED",
+ "build_info": "(local build)",
+ "argv": [
+ "smartctl",
+ "--all",
+ "--json",
+ "--device=nvme",
+ "/dev/nvme0"
+ ],
+ "exit_status": 0
+ },
+ "local_time": {
+ "time_t": 1714480742,
+ "asctime": "Tue Apr 30 15:39:02 2024 EEST"
+ },
+ "device": {
+ "name": "/dev/nvme0",
+ "info_name": "/dev/nvme0",
+ "type": "nvme",
+ "protocol": "NVMe"
+ },
+ "model_name": "Seagate FireCuda 530 ZP4000GM30023",
+ "serial_number": "REDACTED",
+ "firmware_version": "REDACTED",
+ "nvme_pci_vendor": {
+ "id": 7089,
+ "subsystem_id": 7089
+ },
+ "nvme_ieee_oui_identifier": 6584743,
+ "nvme_total_capacity": 4000787030016,
+ "nvme_unallocated_capacity": 0,
+ "nvme_controller_id": 1,
+ "nvme_version": {
+ "string": "1.4",
+ "value": 66560
+ },
+ "nvme_number_of_namespaces": 1,
+ "nvme_namespaces": [
+ {
+ "id": 1,
+ "size": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "capacity": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "utilization": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "formatted_lba_size": 512,
+ "eui64": {
+ "oui": 6584743,
+ "ext_id": 553497146765
+ }
+ }
+ ],
+ "user_capacity": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "logical_block_size": 512,
+ "smart_support": {
+ "available": true,
+ "enabled": true
+ },
+ "smart_status": {
+ "passed": true,
+ "nvme": {
+ "value": 0
+ }
+ },
+ "nvme_smart_health_information_log": {
+ "critical_warning": 0,
+ "temperature": 39,
+ "available_spare": 100,
+ "available_spare_threshold": 5,
+ "percentage_used": 0,
+ "data_units_read": 52,
+ "data_units_written": 0,
+ "host_reads": 550,
+ "host_writes": 0,
+ "controller_busy_time": 0,
+ "power_cycles": 2,
+ "power_on_hours": 3113,
+ "unsafe_shutdowns": 1,
+ "media_errors": 0,
+ "num_err_log_entries": 4,
+ "warning_temp_time": 0,
+ "critical_comp_time": 0
+ },
+ "temperature": {
+ "current": 39
+ },
+ "power_cycle_count": 2,
+ "power_on_time": {
+ "hours": 3113
+ }
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/device-nvme1.json b/src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/device-nvme1.json
new file mode 100644
index 000000000..37faf7cfe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/device-nvme1.json
@@ -0,0 +1,113 @@
+{
+ "json_format_version": [
+ 1,
+ 0
+ ],
+ "smartctl": {
+ "version": [
+ 7,
+ 3
+ ],
+ "svn_revision": "5338",
+ "platform_info": "REDACTED",
+ "build_info": "(local build)",
+ "argv": [
+ "smartctl",
+ "--json",
+ "--all",
+ "/dev/nvme1",
+ "--device",
+ "nvme"
+ ],
+ "exit_status": 0
+ },
+ "local_time": {
+ "time_t": 1720897758,
+ "asctime": "Sat Jul 13 22:09:18 2024 EEST"
+ },
+ "device": {
+ "name": "/dev/nvme1",
+ "info_name": "/dev/nvme1",
+ "type": "nvme",
+ "protocol": "NVMe"
+ },
+ "model_name": "Seagate FireCuda 530 ZP4000GM30023",
+ "serial_number": "REDACTED",
+ "firmware_version": "REDACTED",
+ "nvme_pci_vendor": {
+ "id": 7089,
+ "subsystem_id": 7089
+ },
+ "nvme_ieee_oui_identifier": 6584743,
+ "nvme_total_capacity": 4000787030016,
+ "nvme_unallocated_capacity": 0,
+ "nvme_controller_id": 1,
+ "nvme_version": {
+ "string": "1.4",
+ "value": 66560
+ },
+ "nvme_number_of_namespaces": 1,
+ "nvme_namespaces": [
+ {
+ "id": 1,
+ "size": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "capacity": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "utilization": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "formatted_lba_size": 512,
+ "eui64": {
+ "oui": 6584743,
+ "ext_id": 553497146765
+ }
+ }
+ ],
+ "user_capacity": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "logical_block_size": 512,
+ "smart_support": {
+ "available": true,
+ "enabled": true
+ },
+ "smart_status": {
+ "passed": true,
+ "nvme": {
+ "value": 0
+ }
+ },
+ "nvme_smart_health_information_log": {
+ "critical_warning": 0,
+ "temperature": 36,
+ "available_spare": 100,
+ "available_spare_threshold": 5,
+ "percentage_used": 0,
+ "data_units_read": 202,
+ "data_units_written": 0,
+ "host_reads": 2509,
+ "host_writes": 0,
+ "controller_busy_time": 0,
+ "power_cycles": 5,
+ "power_on_hours": 4733,
+ "unsafe_shutdowns": 2,
+ "media_errors": 0,
+ "num_err_log_entries": 20,
+ "warning_temp_time": 0,
+ "critical_comp_time": 0
+ },
+ "temperature": {
+ "current": 36
+ },
+ "power_cycle_count": 5,
+ "power_on_time": {
+ "hours": 4733
+ }
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/scan.json b/src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/scan.json
new file mode 100644
index 000000000..b9f716cbd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/type-nvme/scan.json
@@ -0,0 +1,29 @@
+{
+ "json_format_version": [
+ 1,
+ 0
+ ],
+ "smartctl": {
+ "version": [
+ 7,
+ 3
+ ],
+ "svn_revision": "5338",
+ "platform_info": "REDACTED",
+ "build_info": "(local build)",
+ "argv": [
+ "smartctl",
+ "--scan",
+ "--json"
+ ],
+ "exit_status": 0
+ },
+ "devices": [
+ {
+ "name": "/dev/nvme0",
+ "info_name": "/dev/nvme0",
+ "type": "nvme",
+ "protocol": "NVMe"
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/type-sat/device-hdd-sda.json b/src/go/plugin/go.d/modules/smartctl/testdata/type-sat/device-hdd-sda.json
new file mode 100644
index 000000000..55cfe15f5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/type-sat/device-hdd-sda.json
@@ -0,0 +1,601 @@
+{
+ "json_format_version": [
+ 1,
+ 0
+ ],
+ "smartctl": {
+ "version": [
+ 7,
+ 3
+ ],
+ "svn_revision": "5338",
+ "platform_info": "REDACTED",
+ "build_info": "(local build)",
+ "argv": [
+ "smartctl",
+ "--all",
+ "--json",
+ "--device=sat",
+ "/dev/sda"
+ ],
+ "drive_database_version": {
+ "string": "7.3/5319"
+ },
+ "exit_status": 0
+ },
+ "local_time": {
+ "time_t": 1714480013,
+ "asctime": "Tue Apr 30 15:26:53 2024 EEST"
+ },
+ "device": {
+ "name": "/dev/sda",
+ "info_name": "/dev/sda [SAT]",
+ "type": "sat",
+ "protocol": "ATA"
+ },
+ "model_name": "WDC WD181KRYZ-01AGBB0",
+ "serial_number": "REDACTED",
+ "wwn": {
+ "naa": 5,
+ "oui": 3274,
+ "id": 11659362274
+ },
+ "firmware_version": "REDACTED",
+ "user_capacity": {
+ "blocks": 35156656128,
+ "bytes": 18000207937536
+ },
+ "logical_block_size": 512,
+ "physical_block_size": 4096,
+ "rotation_rate": 7200,
+ "form_factor": {
+ "ata_value": 2,
+ "name": "3.5 inches"
+ },
+ "trim": {
+ "supported": false
+ },
+ "in_smartctl_database": false,
+ "ata_version": {
+ "string": "ACS-4 published, ANSI INCITS 529-2018",
+ "major_value": 4092,
+ "minor_value": 156
+ },
+ "sata_version": {
+ "string": "SATA 3.3",
+ "value": 511
+ },
+ "interface_speed": {
+ "max": {
+ "sata_value": 14,
+ "string": "6.0 Gb/s",
+ "units_per_second": 60,
+ "bits_per_unit": 100000000
+ },
+ "current": {
+ "sata_value": 3,
+ "string": "6.0 Gb/s",
+ "units_per_second": 60,
+ "bits_per_unit": 100000000
+ }
+ },
+ "smart_support": {
+ "available": true,
+ "enabled": true
+ },
+ "smart_status": {
+ "passed": true
+ },
+ "ata_smart_data": {
+ "offline_data_collection": {
+ "status": {
+ "value": 130,
+ "string": "was completed without error",
+ "passed": true
+ },
+ "completion_seconds": 101
+ },
+ "self_test": {
+ "status": {
+ "value": 0,
+ "string": "completed without error",
+ "passed": true
+ },
+ "polling_minutes": {
+ "short": 2,
+ "extended": 1883
+ }
+ },
+ "capabilities": {
+ "values": [
+ 91,
+ 3
+ ],
+ "exec_offline_immediate_supported": true,
+ "offline_is_aborted_upon_new_cmd": false,
+ "offline_surface_scan_supported": true,
+ "self_tests_supported": true,
+ "conveyance_self_test_supported": false,
+ "selective_self_test_supported": true,
+ "attribute_autosave_enabled": true,
+ "error_logging_supported": true,
+ "gp_logging_supported": true
+ }
+ },
+ "ata_sct_capabilities": {
+ "value": 61,
+ "error_recovery_control_supported": true,
+ "feature_control_supported": true,
+ "data_table_supported": true
+ },
+ "ata_smart_attributes": {
+ "revision": 16,
+ "table": [
+ {
+ "id": 1,
+ "name": "Raw_Read_Error_Rate",
+ "value": 100,
+ "worst": 100,
+ "thresh": 1,
+ "when_failed": "",
+ "flags": {
+ "value": 11,
+ "string": "PO-R-- ",
+ "prefailure": true,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": true,
+ "event_count": false,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 2,
+ "name": "Throughput_Performance",
+ "value": 148,
+ "worst": 148,
+ "thresh": 54,
+ "when_failed": "",
+ "flags": {
+ "value": 5,
+ "string": "P-S--- ",
+ "prefailure": true,
+ "updated_online": false,
+ "performance": true,
+ "error_rate": false,
+ "event_count": false,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 48,
+ "string": "48"
+ }
+ },
+ {
+ "id": 3,
+ "name": "Spin_Up_Time",
+ "value": 86,
+ "worst": 86,
+ "thresh": 1,
+ "when_failed": "",
+ "flags": {
+ "value": 7,
+ "string": "POS--- ",
+ "prefailure": true,
+ "updated_online": true,
+ "performance": true,
+ "error_rate": false,
+ "event_count": false,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 25788088601,
+ "string": "281 (Average 279)"
+ }
+ },
+ {
+ "id": 4,
+ "name": "Start_Stop_Count",
+ "value": 100,
+ "worst": 100,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 18,
+ "string": "-O--C- ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 12,
+ "string": "12"
+ }
+ },
+ {
+ "id": 5,
+ "name": "Reallocated_Sector_Ct",
+ "value": 100,
+ "worst": 100,
+ "thresh": 1,
+ "when_failed": "",
+ "flags": {
+ "value": 51,
+ "string": "PO--CK ",
+ "prefailure": true,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 7,
+ "name": "Seek_Error_Rate",
+ "value": 100,
+ "worst": 100,
+ "thresh": 1,
+ "when_failed": "",
+ "flags": {
+ "value": 11,
+ "string": "PO-R-- ",
+ "prefailure": true,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": true,
+ "event_count": false,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 8,
+ "name": "Seek_Time_Performance",
+ "value": 140,
+ "worst": 140,
+ "thresh": 20,
+ "when_failed": "",
+ "flags": {
+ "value": 5,
+ "string": "P-S--- ",
+ "prefailure": true,
+ "updated_online": false,
+ "performance": true,
+ "error_rate": false,
+ "event_count": false,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 15,
+ "string": "15"
+ }
+ },
+ {
+ "id": 9,
+ "name": "Power_On_Hours",
+ "value": 99,
+ "worst": 99,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 18,
+ "string": "-O--C- ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 8244,
+ "string": "8244"
+ }
+ },
+ {
+ "id": 10,
+ "name": "Spin_Retry_Count",
+ "value": 100,
+ "worst": 100,
+ "thresh": 1,
+ "when_failed": "",
+ "flags": {
+ "value": 19,
+ "string": "PO--C- ",
+ "prefailure": true,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 12,
+ "name": "Power_Cycle_Count",
+ "value": 100,
+ "worst": 100,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 12,
+ "string": "12"
+ }
+ },
+ {
+ "id": 22,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "thresh": 25,
+ "when_failed": "",
+ "flags": {
+ "value": 35,
+ "string": "PO---K ",
+ "prefailure": true,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": false,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 100,
+ "string": "100"
+ }
+ },
+ {
+ "id": 192,
+ "name": "Power-Off_Retract_Count",
+ "value": 100,
+ "worst": 100,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 360,
+ "string": "360"
+ }
+ },
+ {
+ "id": 193,
+ "name": "Load_Cycle_Count",
+ "value": 100,
+ "worst": 100,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 18,
+ "string": "-O--C- ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 360,
+ "string": "360"
+ }
+ },
+ {
+ "id": 194,
+ "name": "Temperature_Celsius",
+ "value": 43,
+ "worst": 43,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 2,
+ "string": "-O---- ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": false,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 240519741489,
+ "string": "49 (Min/Max 24/56)"
+ }
+ },
+ {
+ "id": 196,
+ "name": "Reallocated_Event_Count",
+ "value": 100,
+ "worst": 100,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 197,
+ "name": "Current_Pending_Sector",
+ "value": 100,
+ "worst": 100,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 34,
+ "string": "-O---K ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": false,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 198,
+ "name": "Offline_Uncorrectable",
+ "value": 100,
+ "worst": 100,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 8,
+ "string": "---R-- ",
+ "prefailure": false,
+ "updated_online": false,
+ "performance": false,
+ "error_rate": true,
+ "event_count": false,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 199,
+ "name": "UDMA_CRC_Error_Count",
+ "value": 100,
+ "worst": 100,
+ "thresh": 0,
+ "when_failed": "",
+ "flags": {
+ "value": 10,
+ "string": "-O-R-- ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": true,
+ "event_count": false,
+ "auto_keep": false
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ }
+ ]
+ },
+ "power_on_time": {
+ "hours": 8244
+ },
+ "power_cycle_count": 12,
+ "temperature": {
+ "current": 49
+ },
+ "ata_smart_error_log": {
+ "summary": {
+ "revision": 1,
+ "count": 0
+ }
+ },
+ "ata_smart_self_test_log": {
+ "standard": {
+ "revision": 1,
+ "count": 0
+ }
+ },
+ "ata_smart_selective_self_test_log": {
+ "revision": 1,
+ "table": [
+ {
+ "lba_min": 0,
+ "lba_max": 0,
+ "status": {
+ "value": 0,
+ "string": "Not_testing"
+ }
+ },
+ {
+ "lba_min": 0,
+ "lba_max": 0,
+ "status": {
+ "value": 0,
+ "string": "Not_testing"
+ }
+ },
+ {
+ "lba_min": 0,
+ "lba_max": 0,
+ "status": {
+ "value": 0,
+ "string": "Not_testing"
+ }
+ },
+ {
+ "lba_min": 0,
+ "lba_max": 0,
+ "status": {
+ "value": 0,
+ "string": "Not_testing"
+ }
+ },
+ {
+ "lba_min": 0,
+ "lba_max": 0,
+ "status": {
+ "value": 0,
+ "string": "Not_testing"
+ }
+ }
+ ],
+ "flags": {
+ "value": 0,
+ "remainder_scan_enabled": false
+ },
+ "power_up_scan_resume_minutes": 0
+ }
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/type-sat/device-ssd-sdc.json b/src/go/plugin/go.d/modules/smartctl/testdata/type-sat/device-ssd-sdc.json
new file mode 100644
index 000000000..a2d8f0aaf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/type-sat/device-ssd-sdc.json
@@ -0,0 +1,652 @@
+{
+ "json_format_version": [
+ 1,
+ 0
+ ],
+ "smartctl": {
+ "version": [
+ 7,
+ 3
+ ],
+ "svn_revision": "5338",
+ "platform_info": "REDACTED",
+ "build_info": "(local build)",
+ "argv": [
+ "smartctl",
+ "--all",
+ "--json",
+ "--device=sat",
+ "/dev/sdc"
+ ],
+ "drive_database_version": {
+ "string": "7.3/5319"
+ },
+ "exit_status": 0
+ },
+ "local_time": {
+ "time_t": 1714480059,
+ "asctime": "Tue Apr 30 15:27:39 2024 EEST"
+ },
+ "device": {
+ "name": "/dev/sdc",
+ "info_name": "/dev/sdc [SAT]",
+ "type": "sat",
+ "protocol": "ATA"
+ },
+ "model_name": "WDC WDS400T1R0A-68A4W0",
+ "serial_number": "REDACTED",
+ "wwn": {
+ "naa": 5,
+ "oui": 6980,
+ "id": 37319905210
+ },
+ "firmware_version": "REDACTED",
+ "user_capacity": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "logical_block_size": 512,
+ "physical_block_size": 512,
+ "rotation_rate": 0,
+ "form_factor": {
+ "ata_value": 3,
+ "name": "2.5 inches"
+ },
+ "trim": {
+ "supported": true,
+ "deterministic": true,
+ "zeroed": true
+ },
+ "in_smartctl_database": false,
+ "ata_version": {
+ "string": "ACS-4 T13/BSR INCITS 529 revision 5",
+ "major_value": 4080,
+ "minor_value": 94
+ },
+ "sata_version": {
+ "string": "SATA 3.3",
+ "value": 511
+ },
+ "interface_speed": {
+ "max": {
+ "sata_value": 14,
+ "string": "6.0 Gb/s",
+ "units_per_second": 60,
+ "bits_per_unit": 100000000
+ },
+ "current": {
+ "sata_value": 3,
+ "string": "6.0 Gb/s",
+ "units_per_second": 60,
+ "bits_per_unit": 100000000
+ }
+ },
+ "smart_support": {
+ "available": true,
+ "enabled": true
+ },
+ "smart_status": {
+ "passed": true
+ },
+ "ata_smart_data": {
+ "offline_data_collection": {
+ "status": {
+ "value": 0,
+ "string": "was never started"
+ },
+ "completion_seconds": 0
+ },
+ "self_test": {
+ "status": {
+ "value": 0,
+ "string": "completed without error",
+ "passed": true
+ },
+ "polling_minutes": {
+ "short": 2,
+ "extended": 10
+ }
+ },
+ "capabilities": {
+ "values": [
+ 17,
+ 3
+ ],
+ "exec_offline_immediate_supported": true,
+ "offline_is_aborted_upon_new_cmd": false,
+ "offline_surface_scan_supported": false,
+ "self_tests_supported": true,
+ "conveyance_self_test_supported": false,
+ "selective_self_test_supported": false,
+ "attribute_autosave_enabled": true,
+ "error_logging_supported": true,
+ "gp_logging_supported": true
+ }
+ },
+ "ata_smart_attributes": {
+ "revision": 4,
+ "table": [
+ {
+ "id": 5,
+ "name": "Reallocated_Sector_Ct",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 9,
+ "name": "Power_On_Hours",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 8244,
+ "string": "8244"
+ }
+ },
+ {
+ "id": 12,
+ "name": "Power_Cycle_Count",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 13,
+ "string": "13"
+ }
+ },
+ {
+ "id": 165,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 515627355118,
+ "string": "515627355118"
+ }
+ },
+ {
+ "id": 166,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 1,
+ "string": "1"
+ }
+ },
+ {
+ "id": 167,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 84,
+ "string": "84"
+ }
+ },
+ {
+ "id": 168,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 34,
+ "string": "34"
+ }
+ },
+ {
+ "id": 169,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 1600,
+ "string": "1600"
+ }
+ },
+ {
+ "id": 170,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 171,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 172,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 173,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 15,
+ "string": "15"
+ }
+ },
+ {
+ "id": 174,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 4,
+ "string": "4"
+ }
+ },
+ {
+ "id": 184,
+ "name": "End-to-End_Error",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 187,
+ "name": "Reported_Uncorrect",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 188,
+ "name": "Command_Timeout",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 194,
+ "name": "Temperature_Celsius",
+ "value": 73,
+ "worst": 43,
+ "flags": {
+ "value": 34,
+ "string": "-O---K ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": false,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 184684970011,
+ "string": "27 (Min/Max 21/43)"
+ }
+ },
+ {
+ "id": 199,
+ "name": "UDMA_CRC_Error_Count",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ },
+ {
+ "id": 230,
+ "name": "Unknown_SSD_Attribute",
+ "value": 4,
+ "worst": 4,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 4694419309637,
+ "string": "4694419309637"
+ }
+ },
+ {
+ "id": 232,
+ "name": "Available_Reservd_Space",
+ "value": 100,
+ "worst": 100,
+ "thresh": 4,
+ "when_failed": "",
+ "flags": {
+ "value": 51,
+ "string": "PO--CK ",
+ "prefailure": true,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 100,
+ "string": "100"
+ }
+ },
+ {
+ "id": 233,
+ "name": "Media_Wearout_Indicator",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 65406,
+ "string": "65406"
+ }
+ },
+ {
+ "id": 234,
+ "name": "Unknown_Attribute",
+ "value": 100,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 190852,
+ "string": "190852"
+ }
+ },
+ {
+ "id": 241,
+ "name": "Total_LBAs_Written",
+ "value": 253,
+ "worst": 253,
+ "flags": {
+ "value": 48,
+ "string": "----CK ",
+ "prefailure": false,
+ "updated_online": false,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 173833,
+ "string": "173833"
+ }
+ },
+ {
+ "id": 242,
+ "name": "Total_LBAs_Read",
+ "value": 253,
+ "worst": 253,
+ "flags": {
+ "value": 48,
+ "string": "----CK ",
+ "prefailure": false,
+ "updated_online": false,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 76778,
+ "string": "76778"
+ }
+ },
+ {
+ "id": 244,
+ "name": "Unknown_Attribute",
+ "value": 0,
+ "worst": 100,
+ "flags": {
+ "value": 50,
+ "string": "-O--CK ",
+ "prefailure": false,
+ "updated_online": true,
+ "performance": false,
+ "error_rate": false,
+ "event_count": true,
+ "auto_keep": true
+ },
+ "raw": {
+ "value": 0,
+ "string": "0"
+ }
+ }
+ ]
+ },
+ "power_on_time": {
+ "hours": 8244
+ },
+ "power_cycle_count": 13,
+ "temperature": {
+ "current": 27
+ },
+ "ata_smart_error_log": {
+ "summary": {
+ "revision": 1,
+ "count": 0
+ }
+ },
+ "ata_smart_self_test_log": {
+ "standard": {
+ "revision": 1,
+ "count": 0
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/type-sat/scan.json b/src/go/plugin/go.d/modules/smartctl/testdata/type-sat/scan.json
new file mode 100644
index 000000000..c7a68ca8d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/type-sat/scan.json
@@ -0,0 +1,35 @@
+{
+ "json_format_version": [
+ 1,
+ 0
+ ],
+ "smartctl": {
+ "version": [
+ 7,
+ 3
+ ],
+ "svn_revision": "5338",
+ "platform_info": "REDACTED",
+ "build_info": "(local build)",
+ "argv": [
+ "smartctl",
+ "--scan",
+ "--json"
+ ],
+ "exit_status": 0
+ },
+ "devices": [
+ {
+ "name": "/dev/sda",
+ "info_name": "/dev/sda [SAT]",
+ "type": "sat",
+ "protocol": "ATA"
+ },
+ {
+ "name": "/dev/sdc",
+ "info_name": "/dev/sdc [SAT]",
+ "type": "sat",
+ "protocol": "ATA"
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/type-scsi/device-sda.json b/src/go/plugin/go.d/modules/smartctl/testdata/type-scsi/device-sda.json
new file mode 100644
index 000000000..0ab55d2c9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/type-scsi/device-sda.json
@@ -0,0 +1,128 @@
+{
+ "json_format_version": [
+ 1,
+ 0
+ ],
+ "smartctl": {
+ "version": [
+ 7,
+ 3
+ ],
+ "svn_revision": "5338",
+ "platform_info": "REDACTED",
+ "build_info": "(local build)",
+ "argv": [
+ "smartctl",
+ "--json",
+ "--all",
+ "/dev/sda",
+ "--device",
+ "scsi"
+ ],
+ "exit_status": 0
+ },
+ "local_time": {
+ "time_t": 1720689199,
+ "asctime": "Thu Jul 11 09:13:19 2024 UTC"
+ },
+ "device": {
+ "name": "/dev/sda",
+ "info_name": "/dev/sda",
+ "type": "scsi",
+ "protocol": "SCSI"
+ },
+ "scsi_vendor": "HGST",
+ "scsi_product": "REDACTED",
+ "scsi_model_name": "REDACTED",
+ "scsi_revision": "REDACTED",
+ "scsi_version": "REDACTED",
+ "user_capacity": {
+ "blocks": 7814037168,
+ "bytes": 4000787030016
+ },
+ "logical_block_size": 512,
+ "scsi_lb_provisioning": {
+ "name": "fully provisioned",
+ "value": 0,
+ "management_enabled": {
+ "name": "LBPME",
+ "value": 0
+ },
+ "read_zeros": {
+ "name": "LBPRZ",
+ "value": 0
+ }
+ },
+ "rotation_rate": 7200,
+ "form_factor": {
+ "scsi_value": 2,
+ "name": "3.5 inches"
+ },
+ "logical_unit_id": "REDACTED",
+ "serial_number": "REDACTED",
+ "device_type": {
+ "scsi_terminology": "Peripheral Device Type [PDT]",
+ "scsi_value": 0,
+ "name": "disk"
+ },
+ "scsi_transport_protocol": {
+ "name": "SAS (SPL-4)",
+ "value": 6
+ },
+ "smart_support": {
+ "available": true,
+ "enabled": true
+ },
+ "temperature_warning": {
+ "enabled": true
+ },
+ "smart_status": {
+ "passed": true
+ },
+ "temperature": {
+ "current": 34,
+ "drive_trip": 85
+ },
+ "power_on_time": {
+ "hours": 1641,
+ "minutes": 22
+ },
+ "scsi_start_stop_cycle_counter": {
+ "year_of_manufacture": "2013",
+ "week_of_manufacture": "51",
+ "specified_cycle_count_over_device_lifetime": 50000,
+ "accumulated_start_stop_cycles": 4,
+ "specified_load_unload_count_over_device_lifetime": 600000,
+ "accumulated_load_unload_cycles": 119
+ },
+ "scsi_grown_defect_list": 0,
+ "scsi_error_counter_log": {
+ "read": {
+ "errors_corrected_by_eccfast": 647707,
+ "errors_corrected_by_eccdelayed": 29,
+ "errors_corrected_by_rereads_rewrites": 0,
+ "total_errors_corrected": 647736,
+ "correction_algorithm_invocations": 586730,
+ "gigabytes_processed": "36537.378",
+ "total_uncorrected_errors": 0
+ },
+ "write": {
+ "errors_corrected_by_eccfast": 0,
+ "errors_corrected_by_eccdelayed": 0,
+ "errors_corrected_by_rereads_rewrites": 0,
+ "total_errors_corrected": 0,
+ "correction_algorithm_invocations": 13549,
+ "gigabytes_processed": "2811.293",
+ "total_uncorrected_errors": 0
+ },
+ "verify": {
+ "errors_corrected_by_eccfast": 0,
+ "errors_corrected_by_eccdelayed": 0,
+ "errors_corrected_by_rereads_rewrites": 0,
+ "total_errors_corrected": 0,
+ "correction_algorithm_invocations": 2146,
+ "gigabytes_processed": "0.000",
+ "total_uncorrected_errors": 0
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/smartctl/testdata/type-scsi/scan.json b/src/go/plugin/go.d/modules/smartctl/testdata/type-scsi/scan.json
new file mode 100644
index 000000000..398f5f4af
--- /dev/null
+++ b/src/go/plugin/go.d/modules/smartctl/testdata/type-scsi/scan.json
@@ -0,0 +1,29 @@
+{
+ "json_format_version": [
+ 1,
+ 0
+ ],
+ "smartctl": {
+ "version": [
+ 7,
+ 3
+ ],
+ "svn_revision": "5338",
+ "platform_info": "REDACTED",
+ "build_info": "(local build)",
+ "argv": [
+ "smartctl",
+ "--scan",
+ "--json"
+ ],
+ "exit_status": 0
+ },
+ "devices": [
+ {
+ "name": "/dev/sda",
+ "info_name": "/dev/sda",
+ "type": "scsi",
+ "protocol": "SCSI"
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/snmp/README.md b/src/go/plugin/go.d/modules/snmp/README.md
new file mode 120000
index 000000000..edf223bf9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/README.md
@@ -0,0 +1 @@
+integrations/snmp_devices.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/snmp/charts.go b/src/go/plugin/go.d/modules/snmp/charts.go
new file mode 100644
index 000000000..dd31f1cc7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/charts.go
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package snmp
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioNetIfaceTraffic = module.Priority + iota
+ prioNetIfaceUnicast
+ prioNetIfaceMulticast
+ prioNetIfaceBroadcast
+ prioNetIfaceErrors
+ prioNetIfaceDiscards
+ prioNetIfaceAdminStatus
+ prioNetIfaceOperStatus
+ prioSysUptime
+)
+
+var netIfaceChartsTmpl = module.Charts{
+ netIfaceTrafficChartTmpl.Copy(),
+ netIfacePacketsChartTmpl.Copy(),
+ netIfaceMulticastChartTmpl.Copy(),
+ netIfaceBroadcastChartTmpl.Copy(),
+ netIfaceErrorsChartTmpl.Copy(),
+ netIfaceDiscardsChartTmpl.Copy(),
+ netIfaceAdminStatusChartTmpl.Copy(),
+ netIfaceOperStatusChartTmpl.Copy(),
+}
+
+var (
+ netIfaceTrafficChartTmpl = module.Chart{
+ ID: "snmp_device_net_iface_%s_traffic",
+ Title: "SNMP device network interface traffic",
+ Units: "kilobits/s",
+ Fam: "traffic",
+ Ctx: "snmp.device_net_interface_traffic",
+ Priority: prioNetIfaceTraffic,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "net_iface_%s_traffic_in", Name: "received", Algo: module.Incremental},
+ {ID: "net_iface_%s_traffic_out", Name: "sent", Mul: -1, Algo: module.Incremental},
+ },
+ }
+
+ netIfacePacketsChartTmpl = module.Chart{
+ ID: "snmp_device_net_iface_%s_unicast",
+ Title: "SNMP device network interface unicast packets",
+ Units: "packets/s",
+ Fam: "packets",
+ Ctx: "snmp.device_net_interface_unicast",
+ Priority: prioNetIfaceUnicast,
+ Dims: module.Dims{
+ {ID: "net_iface_%s_ucast_in", Name: "received", Algo: module.Incremental},
+ {ID: "net_iface_%s_ucast_out", Name: "sent", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ netIfaceMulticastChartTmpl = module.Chart{
+ ID: "snmp_device_net_iface_%s_multicast",
+ Title: "SNMP device network interface multicast packets",
+ Units: "packets/s",
+ Fam: "packets",
+ Ctx: "snmp.device_net_interface_multicast",
+ Priority: prioNetIfaceMulticast,
+ Dims: module.Dims{
+ {ID: "net_iface_%s_mcast_in", Name: "received", Algo: module.Incremental},
+ {ID: "net_iface_%s_mcast_out", Name: "sent", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ netIfaceBroadcastChartTmpl = module.Chart{
+ ID: "snmp_device_net_iface_%s_broadcast",
+ Title: "SNMP device network interface broadcast packets",
+ Units: "packets/s",
+ Fam: "packets",
+ Ctx: "snmp.device_net_interface_broadcast",
+ Priority: prioNetIfaceBroadcast,
+ Dims: module.Dims{
+ {ID: "net_iface_%s_bcast_in", Name: "received", Algo: module.Incremental},
+ {ID: "net_iface_%s_bcast_out", Name: "sent", Mul: -1, Algo: module.Incremental},
+ },
+ }
+
+ netIfaceErrorsChartTmpl = module.Chart{
+ ID: "snmp_device_net_iface_%s_errors",
+ Title: "SNMP device network interface errors",
+ Units: "errors/s",
+ Fam: "errors",
+ Ctx: "snmp.device_net_interface_errors",
+ Priority: prioNetIfaceErrors,
+ Dims: module.Dims{
+ {ID: "net_iface_%s_errors_in", Name: "inbound", Algo: module.Incremental},
+ {ID: "net_iface_%s_errors_out", Name: "outbound", Mul: -1, Algo: module.Incremental},
+ },
+ }
+
+ netIfaceDiscardsChartTmpl = module.Chart{
+ ID: "snmp_device_net_iface_%s_discards",
+ Title: "SNMP device network interface discards",
+ Units: "discards/s",
+ Fam: "discards",
+ Ctx: "snmp.device_net_interface_discards",
+ Priority: prioNetIfaceDiscards,
+ Dims: module.Dims{
+ {ID: "net_iface_%s_discards_in", Name: "inbound", Algo: module.Incremental},
+ {ID: "net_iface_%s_discards_out", Name: "outbound", Mul: -1, Algo: module.Incremental},
+ },
+ }
+
+ netIfaceAdminStatusChartTmpl = module.Chart{
+ ID: "snmp_device_net_iface_%s_admin_status",
+ Title: "SNMP device network interface administrative status",
+ Units: "status",
+ Fam: "status",
+ Ctx: "snmp.device_net_interface_admin_status",
+ Priority: prioNetIfaceAdminStatus,
+ Dims: module.Dims{
+ {ID: "net_iface_%s_admin_status_up", Name: "up"},
+ {ID: "net_iface_%s_admin_status_down", Name: "down"},
+ {ID: "net_iface_%s_admin_status_testing", Name: "testing"},
+ },
+ }
+ netIfaceOperStatusChartTmpl = module.Chart{
+ ID: "snmp_device_net_iface_%s_oper_status",
+ Title: "SNMP device network interface operational status",
+ Units: "status",
+ Fam: "status",
+ Ctx: "snmp.device_net_interface_oper_status",
+ Priority: prioNetIfaceOperStatus,
+ Dims: module.Dims{
+ {ID: "net_iface_%s_oper_status_up", Name: "up"},
+ {ID: "net_iface_%s_oper_status_down", Name: "down"},
+ {ID: "net_iface_%s_oper_status_testing", Name: "testing"},
+ {ID: "net_iface_%s_oper_status_unknown", Name: "unknown"},
+ {ID: "net_iface_%s_oper_status_dormant", Name: "dormant"},
+ {ID: "net_iface_%s_oper_status_notPresent", Name: "not_present"},
+ {ID: "net_iface_%s_oper_status_lowerLayerDown", Name: "lower_layer_down"},
+ },
+ }
+)
+
+var (
+ uptimeChart = module.Chart{
+ ID: "snmp_device_uptime",
+ Title: "SNMP device uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "snmp.device_uptime",
+ Priority: prioSysUptime,
+ Dims: module.Dims{
+ {ID: "uptime", Name: "uptime"},
+ },
+ }
+)
+
+func (s *SNMP) addNetIfaceCharts(iface *netInterface) {
+ charts := netIfaceChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanIfaceName(iface.ifName))
+ chart.Labels = []module.Label{
+ {Key: "sysName", Value: s.sysName},
+ {Key: "ifDescr", Value: iface.ifDescr},
+ {Key: "ifName", Value: iface.ifName},
+ {Key: "ifType", Value: ifTypeMapping[iface.ifType]},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, iface.ifName)
+ }
+ }
+
+ if err := s.Charts().Add(*charts...); err != nil {
+ s.Warning(err)
+ }
+}
+
+func (s *SNMP) removeNetIfaceCharts(iface *netInterface) {
+ px := fmt.Sprintf("snmp_device_net_iface_%s_", cleanIfaceName(iface.ifName))
+ for _, chart := range *s.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func (s *SNMP) addSysUptimeChart() {
+ chart := uptimeChart.Copy()
+ chart.Labels = []module.Label{
+ {Key: "sysName", Value: s.sysName},
+ }
+ if err := s.Charts().Add(chart); err != nil {
+ s.Warning(err)
+ }
+}
+
+func cleanIfaceName(name string) string {
+ r := strings.NewReplacer(".", "_", " ", "_")
+ return r.Replace(name)
+}
+
+func newUserInputCharts(configs []ChartConfig) (*module.Charts, error) {
+ charts := &module.Charts{}
+ for _, cfg := range configs {
+ if len(cfg.IndexRange) == 2 {
+ cs, err := newUserInputChartsFromIndexRange(cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := charts.Add(*cs...); err != nil {
+ return nil, err
+ }
+ } else {
+ chart, err := newUserInputChart(cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err = charts.Add(chart); err != nil {
+ return nil, err
+ }
+ }
+ }
+ return charts, nil
+}
+
+func newUserInputChartsFromIndexRange(cfg ChartConfig) (*module.Charts, error) {
+ var addPrio int
+ charts := &module.Charts{}
+ for i := cfg.IndexRange[0]; i <= cfg.IndexRange[1]; i++ {
+ chart, err := newUserInputChartWithOIDIndex(i, cfg)
+ if err != nil {
+ return nil, err
+ }
+ chart.Priority += addPrio
+ addPrio += 1
+ if err = charts.Add(chart); err != nil {
+ return nil, err
+ }
+ }
+ return charts, nil
+}
+
+func newUserInputChartWithOIDIndex(oidIndex int, cfg ChartConfig) (*module.Chart, error) {
+ chart, err := newUserInputChart(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ chart.ID = fmt.Sprintf("%s_%d", chart.ID, oidIndex)
+ chart.Title = fmt.Sprintf("%s %d", chart.Title, oidIndex)
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf("%s.%d", dim.ID, oidIndex)
+ }
+
+ return chart, nil
+}
+
+func newUserInputChart(cfg ChartConfig) (*module.Chart, error) {
+ chart := &module.Chart{
+ ID: cfg.ID,
+ Title: cfg.Title,
+ Units: cfg.Units,
+ Fam: cfg.Family,
+ Ctx: fmt.Sprintf("snmp.%s", cfg.ID),
+ Type: module.ChartType(cfg.Type),
+ Priority: cfg.Priority,
+ }
+
+ if chart.Title == "" {
+ chart.Title = "Untitled chart"
+ }
+ if chart.Units == "" {
+ chart.Units = "num"
+ }
+ if chart.Priority < module.Priority {
+ chart.Priority += module.Priority
+ }
+
+ seen := make(map[string]struct{})
+ var a string
+ for _, cfg := range cfg.Dimensions {
+ if cfg.Algorithm != "" {
+ seen[cfg.Algorithm] = struct{}{}
+ a = cfg.Algorithm
+ }
+ dim := &module.Dim{
+ ID: strings.TrimPrefix(cfg.OID, "."),
+ Name: cfg.Name,
+ Algo: module.DimAlgo(cfg.Algorithm),
+ Mul: cfg.Multiplier,
+ Div: cfg.Divisor,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+ }
+ if len(seen) == 1 && a != "" && len(chart.Dims) > 1 {
+ for _, d := range chart.Dims {
+ if d.Algo == "" {
+ d.Algo = module.DimAlgo(a)
+ }
+ }
+ }
+
+ return chart, nil
+}
diff --git a/src/go/plugin/go.d/modules/snmp/collect.go b/src/go/plugin/go.d/modules/snmp/collect.go
new file mode 100644
index 000000000..24cc49dbc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/collect.go
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package snmp
+
+import (
+ "errors"
+ "fmt"
+ "log/slog"
+ "sort"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+
+ "github.com/gosnmp/gosnmp"
+)
+
+const (
+ oidSysUptime = "1.3.6.1.2.1.1.3.0"
+ oidSysName = "1.3.6.1.2.1.1.5.0"
+ rootOidIfMibIfTable = "1.3.6.1.2.1.2.2"
+ rootOidIfMibIfXTable = "1.3.6.1.2.1.31.1.1"
+)
+
+func (s *SNMP) collect() (map[string]int64, error) {
+ if s.sysName == "" {
+ sysName, err := s.getSysName()
+ if err != nil {
+ return nil, err
+ }
+ s.sysName = sysName
+ s.addSysUptimeChart()
+ }
+
+ mx := make(map[string]int64)
+
+ if err := s.collectSysUptime(mx); err != nil {
+ return nil, err
+ }
+
+ if s.collectIfMib {
+ if err := s.collectNetworkInterfaces(mx); err != nil {
+ return nil, err
+ }
+ }
+
+ if len(s.oids) > 0 {
+ if err := s.collectOIDs(mx); err != nil {
+ return nil, err
+ }
+ }
+
+ return mx, nil
+}
+
+func (s *SNMP) getSysName() (string, error) {
+ resp, err := s.snmpClient.Get([]string{oidSysName})
+ if err != nil {
+ return "", err
+ }
+ if len(resp.Variables) == 0 {
+ return "", errors.New("no system name")
+ }
+ return pduToString(resp.Variables[0])
+}
+
+func (s *SNMP) collectSysUptime(mx map[string]int64) error {
+ resp, err := s.snmpClient.Get([]string{oidSysUptime})
+ if err != nil {
+ return err
+ }
+ if len(resp.Variables) == 0 {
+ return errors.New("no system uptime")
+ }
+ v, err := pduToInt(resp.Variables[0])
+ if err != nil {
+ return err
+ }
+
+ mx["uptime"] = v / 100 // the time is in hundredths of a second
+
+ return nil
+}
+
+func (s *SNMP) collectNetworkInterfaces(mx map[string]int64) error {
+ if s.checkMaxReps {
+ ok, err := s.adjustMaxRepetitions()
+ if err != nil {
+ return err
+ }
+
+ s.checkMaxReps = false
+
+ if !ok {
+ s.collectIfMib = false
+
+ if len(s.oids) == 0 {
+ return errors.New("no IF-MIB data returned")
+ }
+
+ s.Warning("no IF-MIB data returned")
+ return nil
+ }
+ }
+
+ ifMibTable, err := s.walkAll(rootOidIfMibIfTable)
+ if err != nil {
+ return err
+ }
+
+ ifMibXTable, err := s.walkAll(rootOidIfMibIfXTable)
+ if err != nil {
+ return err
+ }
+
+ if len(ifMibTable) == 0 && len(ifMibXTable) == 0 {
+ s.Warning("no IF-MIB data returned")
+ s.collectIfMib = false
+ return nil
+ }
+
+ for _, i := range s.netInterfaces {
+ i.updated = false
+ }
+
+ pdus := make([]gosnmp.SnmpPDU, 0, len(ifMibTable)+len(ifMibXTable))
+ pdus = append(pdus, ifMibTable...)
+ pdus = append(pdus, ifMibXTable...)
+
+ for _, pdu := range pdus {
+ i := strings.LastIndexByte(pdu.Name, '.')
+ if i == -1 {
+ continue
+ }
+
+ idx := pdu.Name[i+1:]
+ oid := strings.TrimPrefix(pdu.Name[:i], ".")
+
+ iface, ok := s.netInterfaces[idx]
+ if !ok {
+ iface = &netInterface{idx: idx}
+ }
+
+ switch oid {
+ case oidIfIndex:
+ iface.ifIndex, err = pduToInt(pdu)
+ case oidIfDescr:
+ iface.ifDescr, err = pduToString(pdu)
+ case oidIfType:
+ iface.ifType, err = pduToInt(pdu)
+ case oidIfMtu:
+ iface.ifMtu, err = pduToInt(pdu)
+ case oidIfSpeed:
+ iface.ifSpeed, err = pduToInt(pdu)
+ case oidIfAdminStatus:
+ iface.ifAdminStatus, err = pduToInt(pdu)
+ case oidIfOperStatus:
+ iface.ifOperStatus, err = pduToInt(pdu)
+ case oidIfInOctets:
+ iface.ifInOctets, err = pduToInt(pdu)
+ case oidIfInUcastPkts:
+ iface.ifInUcastPkts, err = pduToInt(pdu)
+ case oidIfInNUcastPkts:
+ iface.ifInNUcastPkts, err = pduToInt(pdu)
+ case oidIfInDiscards:
+ iface.ifInDiscards, err = pduToInt(pdu)
+ case oidIfInErrors:
+ iface.ifInErrors, err = pduToInt(pdu)
+ case oidIfInUnknownProtos:
+ iface.ifInUnknownProtos, err = pduToInt(pdu)
+ case oidIfOutOctets:
+ iface.ifOutOctets, err = pduToInt(pdu)
+ case oidIfOutUcastPkts:
+ iface.ifOutUcastPkts, err = pduToInt(pdu)
+ case oidIfOutNUcastPkts:
+ iface.ifOutNUcastPkts, err = pduToInt(pdu)
+ case oidIfOutDiscards:
+ iface.ifOutDiscards, err = pduToInt(pdu)
+ case oidIfOutErrors:
+ iface.ifOutErrors, err = pduToInt(pdu)
+ case oidIfName:
+ iface.ifName, err = pduToString(pdu)
+ case oidIfInMulticastPkts:
+ iface.ifInMulticastPkts, err = pduToInt(pdu)
+ case oidIfInBroadcastPkts:
+ iface.ifInBroadcastPkts, err = pduToInt(pdu)
+ case oidIfOutMulticastPkts:
+ iface.ifOutMulticastPkts, err = pduToInt(pdu)
+ case oidIfOutBroadcastPkts:
+ iface.ifOutBroadcastPkts, err = pduToInt(pdu)
+ case oidIfHCInOctets:
+ iface.ifHCInOctets, err = pduToInt(pdu)
+ case oidIfHCInUcastPkts:
+ iface.ifHCInUcastPkts, err = pduToInt(pdu)
+ case oidIfHCInMulticastPkts:
+ iface.ifHCInMulticastPkts, err = pduToInt(pdu)
+ case oidIfHCInBroadcastPkts:
+ iface.ifHCInBroadcastPkts, err = pduToInt(pdu)
+ case oidIfHCOutOctets:
+ iface.ifHCOutOctets, err = pduToInt(pdu)
+ case oidIfHCOutUcastPkts:
+ iface.ifHCOutUcastPkts, err = pduToInt(pdu)
+ case oidIfHCOutMulticastPkts:
+ iface.ifHCOutMulticastPkts, err = pduToInt(pdu)
+ case oidIfHCOutBroadcastPkts:
+ iface.ifHCOutMulticastPkts, err = pduToInt(pdu)
+ case oidIfHighSpeed:
+ iface.ifHighSpeed, err = pduToInt(pdu)
+ case oidIfAlias:
+ iface.ifAlias, err = pduToString(pdu)
+ default:
+ continue
+ }
+
+ if err != nil {
+ return fmt.Errorf("OID '%s': %v", pdu.Name, err)
+ }
+
+ s.netInterfaces[idx] = iface
+ iface.updated = true
+ }
+
+ for _, iface := range s.netInterfaces {
+ if iface.ifName == "" {
+ continue
+ }
+
+ typeStr := ifTypeMapping[iface.ifType]
+ if s.netIfaceFilterByName.MatchString(iface.ifName) || s.netIfaceFilterByType.MatchString(typeStr) {
+ continue
+ }
+
+ if !iface.updated {
+ delete(s.netInterfaces, iface.idx)
+ if iface.hasCharts {
+ s.removeNetIfaceCharts(iface)
+ }
+ continue
+ }
+ if !iface.hasCharts {
+ iface.hasCharts = true
+ s.addNetIfaceCharts(iface)
+ }
+
+ px := fmt.Sprintf("net_iface_%s_", iface.ifName)
+ mx[px+"traffic_in"] = iface.ifHCInOctets * 8 / 1000 // kilobits
+ mx[px+"traffic_out"] = iface.ifHCOutOctets * 8 / 1000 // kilobits
+ mx[px+"ucast_in"] = iface.ifHCInUcastPkts
+ mx[px+"ucast_out"] = iface.ifHCOutUcastPkts
+ mx[px+"mcast_in"] = iface.ifHCInMulticastPkts
+ mx[px+"mcast_out"] = iface.ifHCOutMulticastPkts
+ mx[px+"bcast_in"] = iface.ifHCInBroadcastPkts
+ mx[px+"bcast_out"] = iface.ifHCOutBroadcastPkts
+ mx[px+"errors_in"] = iface.ifInErrors
+ mx[px+"errors_out"] = iface.ifOutErrors
+ mx[px+"discards_in"] = iface.ifInDiscards
+ mx[px+"discards_out"] = iface.ifOutDiscards
+
+ for _, v := range ifAdminStatusMapping {
+ mx[px+"admin_status_"+v] = 0
+ }
+ mx[px+"admin_status_"+ifAdminStatusMapping[iface.ifAdminStatus]] = 1
+
+ for _, v := range ifOperStatusMapping {
+ mx[px+"oper_status_"+v] = 0
+ }
+ mx[px+"oper_status_"+ifOperStatusMapping[iface.ifOperStatus]] = 1
+ }
+
+ if logger.Level.Enabled(slog.LevelDebug) {
+ ifaces := make([]*netInterface, 0, len(s.netInterfaces))
+ for _, nif := range s.netInterfaces {
+ ifaces = append(ifaces, nif)
+ }
+ sort.Slice(ifaces, func(i, j int) bool { return ifaces[i].ifIndex < ifaces[j].ifIndex })
+ for _, iface := range ifaces {
+ s.Debugf("found %s", iface)
+ }
+ }
+
+ return nil
+}
+
+func (s *SNMP) adjustMaxRepetitions() (bool, error) {
+ orig := s.Config.Options.MaxRepetitions
+ maxReps := s.Config.Options.MaxRepetitions
+
+ for {
+ v, err := s.walkAll(oidIfIndex)
+ if err != nil {
+ return false, err
+ }
+
+ if len(v) > 0 {
+ if orig != maxReps {
+ s.Infof("changed 'max_repetitions' %d => %d", orig, maxReps)
+ }
+ return true, nil
+ }
+
+ if maxReps > 5 {
+ maxReps = max(5, maxReps-5)
+ } else {
+ maxReps--
+ }
+
+ if maxReps <= 0 {
+ return false, nil
+ }
+
+ s.Debugf("no IF-MIB data returned, trying to decrese 'max_repetitions' to %d", maxReps)
+ s.snmpClient.SetMaxRepetitions(uint32(maxReps))
+ }
+}
+
+func (s *SNMP) walkAll(rootOid string) ([]gosnmp.SnmpPDU, error) {
+ if s.snmpClient.Version() == gosnmp.Version1 {
+ return s.snmpClient.WalkAll(rootOid)
+ }
+ return s.snmpClient.BulkWalkAll(rootOid)
+}
+
+func pduToString(pdu gosnmp.SnmpPDU) (string, error) {
+ switch pdu.Type {
+ case gosnmp.OctetString:
+ // TODO: this isn't reliable (e.g. physAddress we need hex.EncodeToString())
+ bs, ok := pdu.Value.([]byte)
+ if !ok {
+ return "", fmt.Errorf("OctetString is not a []byte but %T", pdu.Value)
+ }
+ return strings.ToValidUTF8(string(bs), "�"), nil
+ case gosnmp.Counter32, gosnmp.Counter64, gosnmp.Integer, gosnmp.Gauge32:
+ return gosnmp.ToBigInt(pdu.Value).String(), nil
+ default:
+ return "", fmt.Errorf("unussported type: '%v'", pdu.Type)
+ }
+}
+
+func pduToInt(pdu gosnmp.SnmpPDU) (int64, error) {
+ switch pdu.Type {
+ case gosnmp.Counter32, gosnmp.Counter64, gosnmp.Integer, gosnmp.Gauge32, gosnmp.TimeTicks:
+ return gosnmp.ToBigInt(pdu.Value).Int64(), nil
+ default:
+ return 0, fmt.Errorf("unussported type: '%v'", pdu.Type)
+ }
+}
+
+//func physAddressToString(pdu gosnmp.SnmpPDU) (string, error) {
+// address, ok := pdu.Value.([]uint8)
+// if !ok {
+// return "", errors.New("physAddress is not a []uint8")
+// }
+// parts := make([]string, 0, 6)
+// for _, v := range address {
+// parts = append(parts, fmt.Sprintf("%02X", v))
+// }
+// return strings.Join(parts, ":"), nil
+//}
+
+func (s *SNMP) collectOIDs(mx map[string]int64) error {
+ for i, end := 0, 0; i < len(s.oids); i += s.Options.MaxOIDs {
+ if end = i + s.Options.MaxOIDs; end > len(s.oids) {
+ end = len(s.oids)
+ }
+
+ oids := s.oids[i:end]
+ resp, err := s.snmpClient.Get(oids)
+ if err != nil {
+ s.Errorf("cannot get SNMP data: %v", err)
+ return err
+ }
+
+ for i, oid := range oids {
+ if i >= len(resp.Variables) {
+ continue
+ }
+
+ switch v := resp.Variables[i]; v.Type {
+ case gosnmp.Boolean,
+ gosnmp.Counter32,
+ gosnmp.Counter64,
+ gosnmp.Gauge32,
+ gosnmp.TimeTicks,
+ gosnmp.Uinteger32,
+ gosnmp.OpaqueFloat,
+ gosnmp.OpaqueDouble,
+ gosnmp.Integer:
+ mx[oid] = gosnmp.ToBigInt(v.Value).Int64()
+ default:
+ s.Debugf("skipping OID '%s' (unsupported type '%s')", oid, v.Type)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/snmp/config.go b/src/go/plugin/go.d/modules/snmp/config.go
new file mode 100644
index 000000000..631c47d39
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/config.go
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package snmp
+
+type (
+ Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Hostname string `yaml:"hostname" json:"hostname"`
+ Community string `yaml:"community,omitempty" json:"community"`
+ User User `yaml:"user,omitempty" json:"user"`
+ Options Options `yaml:"options,omitempty" json:"options"`
+ ChartsInput []ChartConfig `yaml:"charts,omitempty" json:"charts"`
+ NetworkInterfaceFilter NetworkInterfaceFilter `yaml:"network_interface_filter,omitempty" json:"network_interface_filter"`
+ }
+ NetworkInterfaceFilter struct {
+ ByName string `yaml:"by_name,omitempty" json:"by_name"`
+ ByType string `yaml:"by_type,omitempty" json:"by_type"`
+ }
+ User struct {
+ Name string `yaml:"name,omitempty" json:"name"`
+ SecurityLevel string `yaml:"level,omitempty" json:"level"`
+ AuthProto string `yaml:"auth_proto,omitempty" json:"auth_proto"`
+ AuthKey string `yaml:"auth_key,omitempty" json:"auth_key"`
+ PrivProto string `yaml:"priv_proto,omitempty" json:"priv_proto"`
+ PrivKey string `yaml:"priv_key,omitempty" json:"priv_key"`
+ }
+ Options struct {
+ Port int `yaml:"port,omitempty" json:"port"`
+ Retries int `yaml:"retries,omitempty" json:"retries"`
+ Timeout int `yaml:"timeout,omitempty" json:"timeout"`
+ Version string `yaml:"version,omitempty" json:"version"`
+ MaxOIDs int `yaml:"max_request_size,omitempty" json:"max_request_size"`
+ MaxRepetitions int `yaml:"max_repetitions,omitempty" json:"max_repetitions"`
+ }
+ ChartConfig struct {
+ ID string `yaml:"id" json:"id"`
+ Title string `yaml:"title" json:"title"`
+ Units string `yaml:"units" json:"units"`
+ Family string `yaml:"family" json:"family"`
+ Type string `yaml:"type" json:"type"`
+ Priority int `yaml:"priority" json:"priority"`
+ IndexRange []int `yaml:"multiply_range,omitempty" json:"multiply_range"`
+ Dimensions []DimensionConfig `yaml:"dimensions" json:"dimensions"`
+ }
+ DimensionConfig struct {
+ OID string `yaml:"oid" json:"oid"`
+ Name string `yaml:"name" json:"name"`
+ Algorithm string `yaml:"algorithm" json:"algorithm"`
+ Multiplier int `yaml:"multiplier" json:"multiplier"`
+ Divisor int `yaml:"divisor" json:"divisor"`
+ }
+)
diff --git a/src/go/plugin/go.d/modules/snmp/config_schema.json b/src/go/plugin/go.d/modules/snmp/config_schema.json
new file mode 100644
index 000000000..8deb4f6c8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/config_schema.json
@@ -0,0 +1,422 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "hostname": {
+ "title": "Hostname",
+ "description": "The hostname or IP address of the SNMP-enabled device.",
+ "type": "string"
+ },
+ "community": {
+ "title": "SNMPv1/2 community",
+ "description": "The SNMP community string for SNMPv1/v2c authentication.",
+ "type": "string",
+ "default": "public"
+ },
+ "network_interface_filter": {
+ "title": "Network interface filter",
+ "description": "Configuration for filtering specific network interfaces. If left empty, no interfaces will be filtered. You can filter interfaces by name or type using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "by_name": {
+ "title": "By Name",
+ "description": "Specify the interface name or a pattern to match against the [ifName](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.31.1.1.1.1) label.",
+ "type": "string"
+ },
+ "by_type": {
+ "title": "By Type",
+ "description": "Specify the interface type or a pattern to match against the [ifType](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.2.2.1.3) label.",
+ "type": "string"
+ }
+ }
+ },
+ "options": {
+ "title": "Options",
+ "description": "Configuration options for SNMP monitoring.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "version": {
+ "title": "SNMP version",
+ "type": "string",
+ "enum": [
+ "1",
+ "2c",
+ "3"
+ ],
+ "default": "2c"
+ },
+ "port": {
+ "title": "Port",
+ "description": "The port number on which the SNMP service is running.",
+ "type": "integer",
+ "exclusiveMinimum": 0,
+ "default": 161
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout duration in seconds for SNMP requests.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "retries": {
+ "title": "Retries",
+ "description": "The number of retries to attempt for SNMP requests.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 1
+ },
+ "max_repetitions": {
+ "title": "Max repetitions",
+ "description": "Controls how many SNMP variables to retrieve in a single GETBULK request.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 25
+ },
+ "max_request_size": {
+ "title": "Max OIDs",
+ "description": "The maximum number of OIDs allowed in a single GET request.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 60
+ }
+ },
+ "required": [
+ "version",
+ "port",
+ "retries",
+ "timeout",
+ "max_request_size"
+ ]
+ },
+ "user": {
+ "title": "SNMPv3 configuration",
+ "description": "Configuration options for SNMPv3 authentication and encryption.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "name": {
+ "title": "Username",
+ "description": "The username for SNMPv3 authentication.",
+ "type": "string"
+ },
+ "level": {
+ "title": "Security level",
+ "description": "Controls the security aspects of SNMPv3 communication, including authentication and encryption.",
+ "type": "string",
+ "enum": [
+ "none",
+ "authNoPriv",
+ "authPriv"
+ ],
+ "default": "authPriv"
+ },
+ "auth_proto": {
+ "title": "Authentication protocol",
+ "type": "string",
+ "enum": [
+ "none",
+ "md5",
+ "sha",
+ "sha224",
+ "sha256",
+ "sha384",
+ "sha512"
+ ],
+ "default": "sha512"
+ },
+ "auth_key": {
+ "title": "Authentication passphrase",
+ "type": "string"
+ },
+ "priv_proto": {
+ "title": "Privacy protocol",
+ "type": "string",
+ "enum": [
+ "none",
+ "des",
+ "aes",
+ "aes192",
+ "aes256",
+ "aes192c"
+ ],
+ "default": "aes192c"
+ },
+ "priv_key": {
+ "title": "Privacy passphrase",
+ "type": "string"
+ }
+ }
+ },
+ "charts": {
+ "title": "Charts configuration",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "items": {
+ "title": "Chart",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "id": {
+ "title": "ID",
+ "description": "Unique identifier for the chart.",
+ "type": "string"
+ },
+ "title": {
+ "title": "Title",
+ "description": "Title of the chart.",
+ "type": "string"
+ },
+ "units": {
+ "title": "Units",
+ "description": "Unit label for the vertical axis on charts.",
+ "type": "string"
+ },
+ "family": {
+ "title": "Family",
+ "description": "Subsection on the dashboard where the chart will be displayed.",
+ "type": "string"
+ },
+ "type": {
+ "title": "Type",
+ "type": "string",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ],
+ "default": "line"
+ },
+ "priority": {
+ "title": "Priority",
+ "description": "Rendering priority of the chart on the dashboard. Lower priority values will cause the chart to appear before those with higher priority values.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 90000
+ },
+ "multiply_range": {
+ "title": "OID index range",
+ "description": "Specifies the range of indexes used to create multiple charts. If set, a chart will be created for each index in the specified range. Each chart will have the index appended to the OID dimension.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Index",
+ "type": "integer",
+ "minimum": 0
+ },
+ "uniqueItems": true,
+ "maxItems": 2
+ },
+ "dimensions": {
+ "title": "Dimensions",
+ "description": "Configuration for dimensions of the chart.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "minItems": 1,
+ "items": {
+ "title": "Dimension configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "oid": {
+ "title": "OID",
+ "description": "SNMP OID.",
+ "type": "string"
+ },
+ "name": {
+ "title": "Dimension",
+ "description": "Name of the dimension.",
+ "type": "string"
+ },
+ "algorithm": {
+ "title": "Algorithm",
+ "description": "Algorithm of the dimension.",
+ "type": "string",
+ "enum": [
+ "absolute",
+ "incremental"
+ ],
+ "default": "absolute"
+ },
+ "multiplier": {
+ "title": "Multiplier",
+ "description": "Value to multiply the collected value.",
+ "type": "integer",
+ "not": {
+ "const": 0
+ },
+ "default": 1
+ },
+ "divisor": {
+ "title": "Divisor",
+ "description": "Value to divide the collected value.",
+ "type": "integer",
+ "not": {
+ "const": 0
+ },
+ "default": 1
+ }
+ },
+ "required": [
+ "oid",
+ "name",
+ "algorithm",
+ "multiplier",
+ "divisor"
+ ]
+ }
+ }
+ },
+ "required": [
+ "id",
+ "title",
+ "units",
+ "family",
+ "type",
+ "priority",
+ "dimensions"
+ ]
+ }
+ }
+ },
+ "required": [
+ "hostname",
+ "community",
+ "options"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "network_interface_filter": {
+ "ui:collapsible": true
+ },
+ "community": {
+ "ui:widget": "password"
+ },
+ "options": {
+ "version": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "max_repetitions": {
+ "ui:help": "A higher value retrieves more data in fewer round trips, potentially improving efficiency. This reduces network overhead compared to sending multiple individual requests. **Important**: Setting a value too high might cause the target device to return no data."
+ }
+ },
+ "user": {
+ "name": {
+ "ui:widget": "password"
+ },
+ "level": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "auth_proto": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "priv_proto": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ }
+ },
+ "charts": {
+ "items": {
+ "ui:collapsible": true,
+ "type": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "multiply_range": {
+ "ui:listFlavour": "list"
+ },
+ "dimensions": {
+ "items": {
+ "ui:collapsible": true,
+ "algorithm": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ }
+ }
+ }
+ }
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "hostname",
+ "community"
+ ]
+ },
+ {
+ "title": "Options",
+ "fields": [
+ "network_interface_filter",
+ "options"
+ ]
+ },
+ {
+ "title": "SNMPv3",
+ "fields": [
+ "user"
+ ]
+ },
+ {
+ "title": "Charts",
+ "fields": [
+ "charts"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/snmp/init.go b/src/go/plugin/go.d/modules/snmp/init.go
new file mode 100644
index 000000000..acde4b9b8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/init.go
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package snmp
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+ "github.com/gosnmp/gosnmp"
+)
+
+func (s *SNMP) validateConfig() error {
+ if s.Hostname == "" {
+ return errors.New("SNMP hostname is required")
+ }
+ return nil
+}
+
+func (s *SNMP) initSNMPClient() (gosnmp.Handler, error) {
+ client := s.newSnmpClient()
+
+ client.SetTarget(s.Hostname)
+ client.SetPort(uint16(s.Options.Port))
+ client.SetRetries(s.Options.Retries)
+ client.SetTimeout(time.Duration(s.Options.Timeout) * time.Second)
+ client.SetMaxOids(s.Options.MaxOIDs)
+ client.SetMaxRepetitions(uint32(s.Options.MaxRepetitions))
+
+ ver := parseSNMPVersion(s.Options.Version)
+ comm := s.Community
+
+ switch ver {
+ case gosnmp.Version1:
+ client.SetCommunity(comm)
+ client.SetVersion(gosnmp.Version1)
+ case gosnmp.Version2c:
+ client.SetCommunity(comm)
+ client.SetVersion(gosnmp.Version2c)
+ case gosnmp.Version3:
+ if s.User.Name == "" {
+ return nil, errors.New("username is required for SNMPv3")
+ }
+ client.SetVersion(gosnmp.Version3)
+ client.SetSecurityModel(gosnmp.UserSecurityModel)
+ client.SetMsgFlags(parseSNMPv3SecurityLevel(s.User.SecurityLevel))
+ client.SetSecurityParameters(&gosnmp.UsmSecurityParameters{
+ UserName: s.User.Name,
+ AuthenticationProtocol: parseSNMPv3AuthProtocol(s.User.AuthProto),
+ AuthenticationPassphrase: s.User.AuthKey,
+ PrivacyProtocol: parseSNMPv3PrivProtocol(s.User.PrivProto),
+ PrivacyPassphrase: s.User.PrivKey,
+ })
+ default:
+ return nil, fmt.Errorf("invalid SNMP version: %s", s.Options.Version)
+ }
+
+ s.Info(snmpClientConnInfo(client))
+
+ return client, nil
+}
+
+func (s *SNMP) initNetIfaceFilters() (matcher.Matcher, matcher.Matcher, error) {
+ byName, byType := matcher.FALSE(), matcher.FALSE()
+
+ if v := s.NetworkInterfaceFilter.ByName; v != "" {
+ m, err := matcher.NewSimplePatternsMatcher(v)
+ if err != nil {
+ return nil, nil, err
+ }
+ byName = m
+ }
+
+ if v := s.NetworkInterfaceFilter.ByType; v != "" {
+ m, err := matcher.NewSimplePatternsMatcher(v)
+ if err != nil {
+ return nil, nil, err
+ }
+ byType = m
+ }
+
+ return byName, byType, nil
+}
+
+func (s *SNMP) initOIDs() (oids []string) {
+ for _, c := range *s.charts {
+ for _, d := range c.Dims {
+ oids = append(oids, d.ID)
+ }
+ }
+ return oids
+}
+
+func parseSNMPVersion(version string) gosnmp.SnmpVersion {
+ switch version {
+ case "0", "1":
+ return gosnmp.Version1
+ case "2", "2c", "":
+ return gosnmp.Version2c
+ case "3":
+ return gosnmp.Version3
+ default:
+ return gosnmp.Version2c
+ }
+}
+
+func parseSNMPv3SecurityLevel(level string) gosnmp.SnmpV3MsgFlags {
+ switch level {
+ case "1", "none", "noAuthNoPriv", "":
+ return gosnmp.NoAuthNoPriv
+ case "2", "authNoPriv":
+ return gosnmp.AuthNoPriv
+ case "3", "authPriv":
+ return gosnmp.AuthPriv
+ default:
+ return gosnmp.NoAuthNoPriv
+ }
+}
+
+func parseSNMPv3AuthProtocol(protocol string) gosnmp.SnmpV3AuthProtocol {
+ switch protocol {
+ case "1", "none", "noAuth", "":
+ return gosnmp.NoAuth
+ case "2", "md5":
+ return gosnmp.MD5
+ case "3", "sha":
+ return gosnmp.SHA
+ case "4", "sha224":
+ return gosnmp.SHA224
+ case "5", "sha256":
+ return gosnmp.SHA256
+ case "6", "sha384":
+ return gosnmp.SHA384
+ case "7", "sha512":
+ return gosnmp.SHA512
+ default:
+ return gosnmp.NoAuth
+ }
+}
+
+func parseSNMPv3PrivProtocol(protocol string) gosnmp.SnmpV3PrivProtocol {
+ switch protocol {
+ case "1", "none", "noPriv", "":
+ return gosnmp.NoPriv
+ case "2", "des":
+ return gosnmp.DES
+ case "3", "aes":
+ return gosnmp.AES
+ case "4", "aes192":
+ return gosnmp.AES192
+ case "5", "aes256":
+ return gosnmp.AES256
+ case "6", "aes192c":
+ return gosnmp.AES192C
+ case "7", "aes256c":
+ return gosnmp.AES256C
+ default:
+ return gosnmp.NoPriv
+ }
+}
+
+func snmpClientConnInfo(c gosnmp.Handler) string {
+ var info strings.Builder
+ info.WriteString(fmt.Sprintf("hostname='%s',port='%d',snmp_version='%s'", c.Target(), c.Port(), c.Version()))
+ switch c.Version() {
+ case gosnmp.Version1, gosnmp.Version2c:
+ info.WriteString(fmt.Sprintf(",community='%s'", c.Community()))
+ case gosnmp.Version3:
+ info.WriteString(fmt.Sprintf(",security_level='%d,%s'", c.MsgFlags(), c.SecurityParameters().Description()))
+ }
+ return info.String()
+}
diff --git a/src/go/plugin/go.d/modules/snmp/integrations/snmp_devices.md b/src/go/plugin/go.d/modules/snmp/integrations/snmp_devices.md
new file mode 100644
index 000000000..a2431b006
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/integrations/snmp_devices.md
@@ -0,0 +1,496 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/snmp/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/snmp/metadata.yaml"
+sidebar_label: "SNMP devices"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Generic Collecting Metrics"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# SNMP devices
+
+
+<img src="https://netdata.cloud/img/snmp.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: snmp
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This SNMP collector discovers and gathers statistics for network interfaces on SNMP-enabled devices:
+
+- Traffic
+- Packets (unicast, multicast, broadcast)
+- Errors
+- Discards
+- Administrative and operational status
+
+Additionally, it collects overall device uptime.
+
+It is compatible with all SNMP versions (v1, v2c, and v3) and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package.
+
+**For advanced users**:
+
+- You can manually specify custom OIDs (Object Identifiers) to retrieve specific data points beyond the default metrics.
+- However, defining custom charts with dimensions for these OIDs requires manual configuration.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+**Device limitations**: Many SNMP switches and routers have limited processing power. They might not be able to report data as frequently as desired. You can monitor response times using go.d.plugin in debug mode to identify potential bottlenecks.
+
+**Concurrent access**: If multiple collectors or tools access the same SNMP device simultaneously, data points might be skipped. This is a limitation of the device itself, not this collector. To mitigate this, consider increasing the collection interval (update_every) to reduce the frequency of requests.
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+The metrics that will be collected are defined in the configuration file.
+
+### Per snmp device
+
+These metrics refer to the SNMP device.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| sysName | SNMP device's system name (OID: [1.3.6.1.2.1.1.5](https://oidref.com/1.3.6.1.2.1.1.5)). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| snmp.device_uptime | uptime | seconds |
+
+### Per network interface
+
+Network interfaces of the SNMP device being monitored. These metrics refer to each interface.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| sysName | SNMP device's system name (OID: [1.3.6.1.2.1.1.5](https://oidref.com/1.3.6.1.2.1.1.5)). |
+| ifDescr | Network interface description (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.2.2.1.2)). |
+| ifName | Network interface name (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.31.1.1.1.1)). |
+| ifType | Network interface type (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.2.2.1.3)). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| snmp.device_net_interface_traffic | received, sent | kilobits/s |
+| snmp.device_net_interface_unicast | received, sent | packets/s |
+| snmp.device_net_interface_multicast | received, sent | packets/s |
+| snmp.device_net_interface_broadcast | received, sent | packets/s |
+| snmp.device_net_interface_errors | inbound, outbound | errors/s |
+| snmp.device_net_interface_discards | inbound, outbound | discards/s |
+| snmp.device_net_interface_admin_status | up, down, testing | status |
+| snmp.device_net_interface_oper_status | up, down, testing, unknown, dormant, not_present, lower_layer_down | status |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/snmp.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/snmp.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| hostname | Target ipv4 address. | | yes |
+| community | SNMPv1/2 community string. | public | no |
+| options.version | SNMP version. Available versions: 1, 2, 3. | 2 | no |
+| options.port | Target port. | 161 | no |
+| options.retries | Retries to attempt. | 1 | no |
+| options.timeout | SNMP request/response timeout. | 5 | no |
+| options.max_repetitions | Controls how many SNMP variables to retrieve in a single GETBULK request. | 25 | no |
+| options.max_request_size | Maximum number of OIDs allowed in a single GET request. | 60 | no |
+| network_interface_filter.by_name | Filter interfaces by their names using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |
+| network_interface_filter.by_type | Filter interfaces by their types using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns). | | no |
+| user.name | SNMPv3 user name. | | no |
+| user.name | Security level of SNMPv3 messages. | | no |
+| user.auth_proto | Security level of SNMPv3 messages. | | no |
+| user.name | Authentication protocol for SNMPv3 messages. | | no |
+| user.auth_key | Authentication protocol pass phrase. | | no |
+| user.priv_proto | Privacy protocol for SNMPv3 messages. | | no |
+| user.priv_key | Privacy protocol pass phrase. | | no |
+| charts | List of charts. | [] | yes |
+| charts.id | Chart ID. Used to uniquely identify the chart. | | yes |
+| charts.title | Chart title. | Untitled chart | no |
+| charts.units | Chart units. | num | no |
+| charts.family | Chart family. | charts.id | no |
+| charts.type | Chart type (line, area, stacked). | line | no |
+| charts.priority | Chart priority. | 70000 | no |
+| charts.multiply_range | Used when you need to define many charts using incremental OIDs. | [] | no |
+| charts.dimensions | List of chart dimensions. | [] | yes |
+| charts.dimensions.oid | Collected metric OID. | | yes |
+| charts.dimensions.name | Dimension name. | | yes |
+| charts.dimensions.algorithm | Dimension algorithm (absolute, incremental). | absolute | no |
+| charts.dimensions.multiplier | Collected value multiplier, applied to convert it properly to units. | 1 | no |
+| charts.dimensions.divisor | Collected value divisor, applied to convert it properly to units. | 1 | no |
+
+##### user.auth_proto
+
+The security of an SNMPv3 message as per RFC 3414 (`user.level`):
+
+| String value | Int value | Description |
+|:------------:|:---------:|------------------------------------------|
+| none | 1 | no message authentication or encryption |
+| authNoPriv | 2 | message authentication and no encryption |
+| authPriv | 3 | message authentication and encryption |
+
+
+##### user.name
+
+The digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):
+
+| String value | Int value | Description |
+|:------------:|:---------:|-------------------------------------------|
+| none | 1 | no message authentication |
+| md5 | 2 | MD5 message authentication (HMAC-MD5-96) |
+| sha | 3 | SHA message authentication (HMAC-SHA-96) |
+| sha224 | 4 | SHA message authentication (HMAC-SHA-224) |
+| sha256 | 5 | SHA message authentication (HMAC-SHA-256) |
+| sha384 | 6 | SHA message authentication (HMAC-SHA-384) |
+| sha512 | 7 | SHA message authentication (HMAC-SHA-512) |
+
+
+##### user.priv_proto
+
+The encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):
+
+| String value | Int value | Description |
+|:------------:|:---------:|-------------------------------------------------------------------------|
+| none | 1 | no message encryption |
+| des | 2 | ES encryption (CBC-DES) |
+| aes | 3 | 128-bit AES encryption (CFB-AES-128) |
+| aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with "Blumenthal" key localization |
+| aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with "Blumenthal" key localization |
+| aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with "Reeder" key localization |
+| aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with "Reeder" key localization |
+
+
+</details>
+
+#### Examples
+
+##### SNMPv1/2
+
+In this example:
+
+- the SNMP device is `192.0.2.1`.
+- the SNMP version is `2`.
+- the SNMP community is `public`.
+- we will update the values every 10 seconds.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: switch
+ update_every: 10
+ hostname: 192.0.2.1
+ community: public
+ options:
+ version: 2
+
+```
+</details>
+
+##### SNMPv3
+
+To use SNMPv3:
+
+- use `user` instead of `community`.
+- set `options.version` to 3.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: switch
+ update_every: 10
+ hostname: 192.0.2.1
+ options:
+ version: 3
+ user:
+ name: username
+ level: authPriv
+ auth_proto: sha256
+ auth_key: auth_protocol_passphrase
+ priv_proto: aes256
+ priv_key: priv_protocol_passphrase
+
+```
+</details>
+
+##### Custom OIDs
+
+In this example:
+
+- the SNMP device is `192.0.2.1`.
+- the SNMP version is `2`.
+- the SNMP community is `public`.
+- we will update the values every 10 seconds.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: switch
+ update_every: 10
+ hostname: 192.0.2.1
+ community: public
+ options:
+ version: 2
+ charts:
+ - id: "bandwidth_port1"
+ title: "Switch Bandwidth for port 1"
+ units: "kilobits/s"
+ type: "area"
+ family: "ports"
+ dimensions:
+ - name: "in"
+ oid: "1.3.6.1.2.1.2.2.1.10.1"
+ algorithm: "incremental"
+ multiplier: 8
+ divisor: 1000
+ - name: "out"
+ oid: "1.3.6.1.2.1.2.2.1.16.1"
+ multiplier: -8
+ divisor: 1000
+ - id: "bandwidth_port2"
+ title: "Switch Bandwidth for port 2"
+ units: "kilobits/s"
+ type: "area"
+ family: "ports"
+ dimensions:
+ - name: "in"
+ oid: "1.3.6.1.2.1.2.2.1.10.2"
+ algorithm: "incremental"
+ multiplier: 8
+ divisor: 1000
+ - name: "out"
+ oid: "1.3.6.1.2.1.2.2.1.16.2"
+ multiplier: -8
+ divisor: 1000
+
+```
+</details>
+
+##### Custom OIDs with multiply range
+
+If you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.
+
+This is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.
+
+Each of the 24 new charts will have its id (1-24) appended at:
+
+- its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.
+- its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.
+- its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.
+- its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: switch
+ update_every: 10
+ hostname: "192.0.2.1"
+ community: public
+ options:
+ version: 2
+ charts:
+ - id: "bandwidth_port"
+ title: "Switch Bandwidth for port"
+ units: "kilobits/s"
+ type: "area"
+ family: "ports"
+ multiply_range: [1, 24]
+ dimensions:
+ - name: "in"
+ oid: "1.3.6.1.2.1.2.2.1.10"
+ algorithm: "incremental"
+ multiplier: 8
+ divisor: 1000
+ - name: "out"
+ oid: "1.3.6.1.2.1.2.2.1.16"
+ multiplier: -8
+ divisor: 1000
+
+```
+</details>
+
+##### Multiple devices with a common configuration
+
+YAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases).
+The `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.
+
+The following example:
+
+- adds an `anchor` to the first job.
+- injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.
+- injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - &anchor
+ name: switch
+ update_every: 10
+ hostname: "192.0.2.1"
+ community: public
+ options:
+ version: 2
+ charts:
+ - id: "bandwidth_port1"
+ title: "Switch Bandwidth for port 1"
+ units: "kilobits/s"
+ type: "area"
+ family: "ports"
+ dimensions:
+ - name: "in"
+ oid: "1.3.6.1.2.1.2.2.1.10.1"
+ algorithm: "incremental"
+ multiplier: 8
+ divisor: 1000
+ - name: "out"
+ oid: "1.3.6.1.2.1.2.2.1.16.1"
+ multiplier: -8
+ divisor: 1000
+ - <<: *anchor
+ name: switch2
+ hostname: "192.0.2.2"
+ - <<: *anchor
+ name: switch3
+ hostname: "192.0.2.3"
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `snmp` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m snmp
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `snmp` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep snmp
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep snmp /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep snmp
+```
+
+
diff --git a/src/go/plugin/go.d/modules/snmp/metadata.yaml b/src/go/plugin/go.d/modules/snmp/metadata.yaml
new file mode 100644
index 000000000..0475a2f21
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/metadata.yaml
@@ -0,0 +1,496 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-snmp
+ plugin_name: go.d.plugin
+ module_name: snmp
+ monitored_instance:
+ name: SNMP devices
+ link: ""
+ icon_filename: snmp.png
+ categories:
+ - data-collection.generic-data-collection
+ keywords:
+ - snmp
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This SNMP collector discovers and gathers statistics for network interfaces on SNMP-enabled devices:
+
+ - Traffic
+ - Packets (unicast, multicast, broadcast)
+ - Errors
+ - Discards
+ - Administrative and operational status
+
+ Additionally, it collects overall device uptime.
+
+ It is compatible with all SNMP versions (v1, v2c, and v3) and uses the [gosnmp](https://github.com/gosnmp/gosnmp) package.
+
+ **For advanced users**:
+
+ - You can manually specify custom OIDs (Object Identifiers) to retrieve specific data points beyond the default metrics.
+ - However, defining custom charts with dimensions for these OIDs requires manual configuration.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: |
+ **Device limitations**: Many SNMP switches and routers have limited processing power. They might not be able to report data as frequently as desired. You can monitor response times using go.d.plugin in debug mode to identify potential bottlenecks.
+
+ **Concurrent access**: If multiple collectors or tools access the same SNMP device simultaneously, data points might be skipped. This is a limitation of the device itself, not this collector. To mitigate this, consider increasing the collection interval (update_every) to reduce the frequency of requests.
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/snmp.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: hostname
+ description: Target ipv4 address.
+ default_value: ""
+ required: true
+ - name: community
+ description: SNMPv1/2 community string.
+ default_value: public
+ required: false
+ - name: options.version
+ description: "SNMP version. Available versions: 1, 2, 3."
+ default_value: 2
+ required: false
+ - name: options.port
+ description: Target port.
+ default_value: 161
+ required: false
+ - name: options.retries
+ description: Retries to attempt.
+ default_value: 1
+ required: false
+ - name: options.timeout
+ description: SNMP request/response timeout.
+ default_value: 5
+ required: false
+ - name: options.max_repetitions
+ description: Controls how many SNMP variables to retrieve in a single GETBULK request.
+ default_value: 25
+ required: false
+ - name: options.max_request_size
+ description: Maximum number of OIDs allowed in a single GET request.
+ default_value: 60
+ required: false
+ - name: network_interface_filter.by_name
+ description: "Filter interfaces by their names using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns)."
+ default_value: ""
+ required: false
+ - name: network_interface_filter.by_type
+ description: "Filter interfaces by their types using [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns)."
+ default_value: ""
+ required: false
+ - name: user.name
+ description: SNMPv3 user name.
+ default_value: ""
+ required: false
+ - name: user.name
+ description: Security level of SNMPv3 messages.
+ default_value: ""
+ required: false
+ - name: user.auth_proto
+ description: Security level of SNMPv3 messages.
+ default_value: ""
+ required: false
+ detailed_description: |
+ The security of an SNMPv3 message as per RFC 3414 (`user.level`):
+
+ | String value | Int value | Description |
+ |:------------:|:---------:|------------------------------------------|
+ | none | 1 | no message authentication or encryption |
+ | authNoPriv | 2 | message authentication and no encryption |
+ | authPriv | 3 | message authentication and encryption |
+ - name: user.name
+ description: Authentication protocol for SNMPv3 messages.
+ default_value: ""
+ required: false
+ detailed_description: |
+ The digest algorithm for SNMPv3 messages that require authentication (`user.auth_proto`):
+
+ | String value | Int value | Description |
+ |:------------:|:---------:|-------------------------------------------|
+ | none | 1 | no message authentication |
+ | md5 | 2 | MD5 message authentication (HMAC-MD5-96) |
+ | sha | 3 | SHA message authentication (HMAC-SHA-96) |
+ | sha224 | 4 | SHA message authentication (HMAC-SHA-224) |
+ | sha256 | 5 | SHA message authentication (HMAC-SHA-256) |
+ | sha384 | 6 | SHA message authentication (HMAC-SHA-384) |
+ | sha512 | 7 | SHA message authentication (HMAC-SHA-512) |
+ - name: user.auth_key
+ description: Authentication protocol pass phrase.
+ default_value: ""
+ required: false
+ - name: user.priv_proto
+ description: Privacy protocol for SNMPv3 messages.
+ default_value: ""
+ required: false
+ detailed_description: |
+ The encryption algorithm for SNMPv3 messages that require privacy (`user.priv_proto`):
+
+ | String value | Int value | Description |
+ |:------------:|:---------:|-------------------------------------------------------------------------|
+ | none | 1 | no message encryption |
+ | des | 2 | ES encryption (CBC-DES) |
+ | aes | 3 | 128-bit AES encryption (CFB-AES-128) |
+ | aes192 | 4 | 192-bit AES encryption (CFB-AES-192) with "Blumenthal" key localization |
+ | aes256 | 5 | 256-bit AES encryption (CFB-AES-256) with "Blumenthal" key localization |
+ | aes192c | 6 | 192-bit AES encryption (CFB-AES-192) with "Reeder" key localization |
+ | aes256c | 7 | 256-bit AES encryption (CFB-AES-256) with "Reeder" key localization |
+ - name: user.priv_key
+ description: Privacy protocol pass phrase.
+ default_value: ""
+ required: false
+ - name: charts
+ description: List of charts.
+ default_value: "[]"
+ required: true
+ - name: charts.id
+ description: Chart ID. Used to uniquely identify the chart.
+ default_value: ""
+ required: true
+ - name: charts.title
+ description: Chart title.
+ default_value: "Untitled chart"
+ required: false
+ - name: charts.units
+ description: Chart units.
+ default_value: num
+ required: false
+ - name: charts.family
+ description: Chart family.
+ default_value: charts.id
+ required: false
+ - name: charts.type
+ description: Chart type (line, area, stacked).
+ default_value: line
+ required: false
+ - name: charts.priority
+ description: Chart priority.
+ default_value: 70000
+ required: false
+ - name: charts.multiply_range
+ description: Used when you need to define many charts using incremental OIDs.
+ default_value: "[]"
+ required: false
+ - name: charts.dimensions
+ description: List of chart dimensions.
+ default_value: "[]"
+ required: true
+ - name: charts.dimensions.oid
+ description: Collected metric OID.
+ default_value: ""
+ required: true
+ - name: charts.dimensions.name
+ description: Dimension name.
+ default_value: ""
+ required: true
+ - name: charts.dimensions.algorithm
+ description: Dimension algorithm (absolute, incremental).
+ default_value: absolute
+ required: false
+ - name: charts.dimensions.multiplier
+ description: Collected value multiplier, applied to convert it properly to units.
+ default_value: 1
+ required: false
+ - name: charts.dimensions.divisor
+ description: Collected value divisor, applied to convert it properly to units.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: SNMPv1/2
+ description: |
+ In this example:
+
+ - the SNMP device is `192.0.2.1`.
+ - the SNMP version is `2`.
+ - the SNMP community is `public`.
+ - we will update the values every 10 seconds.
+ config: |
+ jobs:
+ - name: switch
+ update_every: 10
+ hostname: 192.0.2.1
+ community: public
+ options:
+ version: 2
+ - name: SNMPv3
+ description: |
+ To use SNMPv3:
+
+ - use `user` instead of `community`.
+ - set `options.version` to 3.
+ config: |
+ jobs:
+ - name: switch
+ update_every: 10
+ hostname: 192.0.2.1
+ options:
+ version: 3
+ user:
+ name: username
+ level: authPriv
+ auth_proto: sha256
+ auth_key: auth_protocol_passphrase
+ priv_proto: aes256
+ priv_key: priv_protocol_passphrase
+ - name: Custom OIDs
+ description: |
+ In this example:
+
+ - the SNMP device is `192.0.2.1`.
+ - the SNMP version is `2`.
+ - the SNMP community is `public`.
+ - we will update the values every 10 seconds.
+ config: |
+ jobs:
+ - name: switch
+ update_every: 10
+ hostname: 192.0.2.1
+ community: public
+ options:
+ version: 2
+ charts:
+ - id: "bandwidth_port1"
+ title: "Switch Bandwidth for port 1"
+ units: "kilobits/s"
+ type: "area"
+ family: "ports"
+ dimensions:
+ - name: "in"
+ oid: "1.3.6.1.2.1.2.2.1.10.1"
+ algorithm: "incremental"
+ multiplier: 8
+ divisor: 1000
+ - name: "out"
+ oid: "1.3.6.1.2.1.2.2.1.16.1"
+ multiplier: -8
+ divisor: 1000
+ - id: "bandwidth_port2"
+ title: "Switch Bandwidth for port 2"
+ units: "kilobits/s"
+ type: "area"
+ family: "ports"
+ dimensions:
+ - name: "in"
+ oid: "1.3.6.1.2.1.2.2.1.10.2"
+ algorithm: "incremental"
+ multiplier: 8
+ divisor: 1000
+ - name: "out"
+ oid: "1.3.6.1.2.1.2.2.1.16.2"
+ multiplier: -8
+ divisor: 1000
+ - name: Custom OIDs with multiply range
+ description: |
+ If you need to define many charts using incremental OIDs, you can use the `charts.multiply_range` option.
+
+ This is like the SNMPv1/2 example, but the option will multiply the current chart from 1 to 24 inclusive, producing 24 charts in total for the 24 ports of the switch `192.0.2.1`.
+
+ Each of the 24 new charts will have its id (1-24) appended at:
+
+ - its chart unique `id`, i.e. `bandwidth_port_1` to `bandwidth_port_24`.
+ - its title, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`.
+ - its `oid` (for all dimensions), i.e. dimension in will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`.
+ - its `priority` will be incremented for each chart so that the charts will appear on the dashboard in this order.
+ config: |
+ jobs:
+ - name: switch
+ update_every: 10
+ hostname: "192.0.2.1"
+ community: public
+ options:
+ version: 2
+ charts:
+ - id: "bandwidth_port"
+ title: "Switch Bandwidth for port"
+ units: "kilobits/s"
+ type: "area"
+ family: "ports"
+ multiply_range: [1, 24]
+ dimensions:
+ - name: "in"
+ oid: "1.3.6.1.2.1.2.2.1.10"
+ algorithm: "incremental"
+ multiplier: 8
+ divisor: 1000
+ - name: "out"
+ oid: "1.3.6.1.2.1.2.2.1.16"
+ multiplier: -8
+ divisor: 1000
+ - name: Multiple devices with a common configuration
+ description: |
+ YAML supports [anchors](https://yaml.org/spec/1.2.2/#3222-anchors-and-aliases).
+ The `&` defines and names an anchor, and the `*` uses it. `<<: *anchor` means, inject the anchor, then extend. We can use anchors to share the common configuration for multiple devices.
+
+ The following example:
+
+ - adds an `anchor` to the first job.
+ - injects (copies) the first job configuration to the second and updates `name` and `hostname` parameters.
+ - injects (copies) the first job configuration to the third and updates `name` and `hostname` parameters.
+ config: |
+ jobs:
+ - &anchor
+ name: switch
+ update_every: 10
+ hostname: "192.0.2.1"
+ community: public
+ options:
+ version: 2
+ charts:
+ - id: "bandwidth_port1"
+ title: "Switch Bandwidth for port 1"
+ units: "kilobits/s"
+ type: "area"
+ family: "ports"
+ dimensions:
+ - name: "in"
+ oid: "1.3.6.1.2.1.2.2.1.10.1"
+ algorithm: "incremental"
+ multiplier: 8
+ divisor: 1000
+ - name: "out"
+ oid: "1.3.6.1.2.1.2.2.1.16.1"
+ multiplier: -8
+ divisor: 1000
+ - <<: *anchor
+ name: switch2
+ hostname: "192.0.2.2"
+ - <<: *anchor
+ name: switch3
+ hostname: "192.0.2.3"
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: The metrics that will be collected are defined in the configuration file.
+ availability: []
+ scopes:
+ - name: snmp device
+ description: These metrics refer to the SNMP device.
+ labels:
+ - name: sysName
+ description: "SNMP device's system name (OID: [1.3.6.1.2.1.1.5](https://oidref.com/1.3.6.1.2.1.1.5))."
+ metrics:
+ - name: snmp.device_uptime
+ description: SNMP device uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: network interface
+ description: Network interfaces of the SNMP device being monitored. These metrics refer to each interface.
+ labels:
+ - name: sysName
+ description: "SNMP device's system name (OID: [1.3.6.1.2.1.1.5](https://oidref.com/1.3.6.1.2.1.1.5))."
+ - name: ifDescr
+ description: "Network interface description (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.2.2.1.2))."
+ - name: ifName
+ description: "Network interface name (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.31.1.1.1.1))."
+ - name: ifType
+ description: "Network interface type (OID: [1.3.6.1.2.1.2.2.1.2](https://cric.grenoble.cnrs.fr/Administrateurs/Outils/MIBS/?oid=1.3.6.1.2.1.2.2.1.3))."
+ metrics:
+ - name: snmp.device_net_interface_traffic
+ description: SNMP device network interface traffic
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: snmp.device_net_interface_unicast
+ description: SNMP device network interface unicast packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: snmp.device_net_interface_multicast
+ description: SNMP device network interface multicast packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: snmp.device_net_interface_broadcast
+ description: SNMP device network interface broadcast packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: snmp.device_net_interface_errors
+ description: SNMP device network interface errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: snmp.device_net_interface_discards
+ description: SNMP device network interface discards
+ unit: discards/s
+ chart_type: line
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: snmp.device_net_interface_admin_status
+ description: SNMP device network interface administrative status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: up
+ - name: down
+ - name: testing
+ - name: snmp.device_net_interface_oper_status
+ description: SNMP device network interface operational status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: up
+ - name: down
+ - name: testing
+ - name: unknown
+ - name: dormant
+ - name: not_present
+ - name: lower_layer_down
diff --git a/src/go/plugin/go.d/modules/snmp/netif.go b/src/go/plugin/go.d/modules/snmp/netif.go
new file mode 100644
index 000000000..1345e5ee4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/netif.go
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package snmp
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ oidIfIndex = "1.3.6.1.2.1.2.2.1.1"
+ oidIfDescr = "1.3.6.1.2.1.2.2.1.2"
+ oidIfType = "1.3.6.1.2.1.2.2.1.3"
+ oidIfMtu = "1.3.6.1.2.1.2.2.1.4"
+ oidIfSpeed = "1.3.6.1.2.1.2.2.1.5"
+ oidIfPhysAddress = "1.3.6.1.2.1.2.2.1.6"
+ oidIfAdminStatus = "1.3.6.1.2.1.2.2.1.7"
+ oidIfOperStatus = "1.3.6.1.2.1.2.2.1.8"
+ oidIfLastChange = "1.3.6.1.2.1.2.2.1.9"
+ oidIfInOctets = "1.3.6.1.2.1.2.2.1.10"
+ oidIfInUcastPkts = "1.3.6.1.2.1.2.2.1.11"
+ oidIfInNUcastPkts = "1.3.6.1.2.1.2.2.1.12"
+ oidIfInDiscards = "1.3.6.1.2.1.2.2.1.13"
+ oidIfInErrors = "1.3.6.1.2.1.2.2.1.14"
+ oidIfInUnknownProtos = "1.3.6.1.2.1.2.2.1.15"
+ oidIfOutOctets = "1.3.6.1.2.1.2.2.1.16"
+ oidIfOutUcastPkts = "1.3.6.1.2.1.2.2.1.17"
+ oidIfOutNUcastPkts = "1.3.6.1.2.1.2.2.1.18"
+ oidIfOutDiscards = "1.3.6.1.2.1.2.2.1.19"
+ oidIfOutErrors = "1.3.6.1.2.1.2.2.1.20"
+
+ oidIfName = "1.3.6.1.2.1.31.1.1.1.1"
+ oidIfInMulticastPkts = "1.3.6.1.2.1.31.1.1.1.2"
+ oidIfInBroadcastPkts = "1.3.6.1.2.1.31.1.1.1.3"
+ oidIfOutMulticastPkts = "1.3.6.1.2.1.31.1.1.1.4"
+ oidIfOutBroadcastPkts = "1.3.6.1.2.1.31.1.1.1.5"
+ oidIfHCInOctets = "1.3.6.1.2.1.31.1.1.1.6"
+ oidIfHCInUcastPkts = "1.3.6.1.2.1.31.1.1.1.7"
+ oidIfHCInMulticastPkts = "1.3.6.1.2.1.31.1.1.1.8"
+ oidIfHCInBroadcastPkts = "1.3.6.1.2.1.31.1.1.1.9"
+ oidIfHCOutOctets = "1.3.6.1.2.1.31.1.1.1.10"
+ oidIfHCOutUcastPkts = "1.3.6.1.2.1.31.1.1.1.11"
+ oidIfHCOutMulticastPkts = "1.3.6.1.2.1.31.1.1.1.12"
+ oidIfHCOutBroadcastPkts = "1.3.6.1.2.1.31.1.1.1.13"
+ oidIfHighSpeed = "1.3.6.1.2.1.31.1.1.1.15"
+ oidIfAlias = "1.3.6.1.2.1.31.1.1.1.18"
+)
+
+type netInterface struct {
+ updated bool
+ hasCharts bool
+ idx string
+
+ ifIndex int64
+ ifDescr string
+ ifType int64
+ ifMtu int64
+ ifSpeed int64
+ //ifPhysAddress string
+ ifAdminStatus int64
+ ifOperStatus int64
+ //ifLastChange string
+ ifInOctets int64
+ ifInUcastPkts int64
+ ifInNUcastPkts int64
+ ifInDiscards int64
+ ifInErrors int64
+ ifInUnknownProtos int64
+ ifOutOctets int64
+ ifOutUcastPkts int64
+ ifOutNUcastPkts int64
+ ifOutDiscards int64
+ ifOutErrors int64
+ ifName string
+ ifInMulticastPkts int64
+ ifInBroadcastPkts int64
+ ifOutMulticastPkts int64
+ ifOutBroadcastPkts int64
+ ifHCInOctets int64
+ ifHCInUcastPkts int64
+ ifHCInMulticastPkts int64
+ ifHCInBroadcastPkts int64
+ ifHCOutOctets int64
+ ifHCOutUcastPkts int64
+ ifHCOutMulticastPkts int64
+ ifHCOutBroadcastPkts int64
+ ifHighSpeed int64
+ ifAlias string
+}
+
+func (n *netInterface) String() string {
+ return fmt.Sprintf("iface index='%d',type='%s',name='%s',descr='%s',alias='%s'",
+ n.ifIndex, ifTypeMapping[n.ifType], n.ifName, n.ifDescr, strings.ReplaceAll(n.ifAlias, "\n", "\\n"))
+}
+
+var ifAdminStatusMapping = map[int64]string{
+ 1: "up",
+ 2: "down",
+ 3: "testing",
+}
+
+var ifOperStatusMapping = map[int64]string{
+ 1: "up",
+ 2: "down",
+ 3: "testing",
+ 4: "unknown",
+ 5: "dormant",
+ 6: "notPresent",
+ 7: "lowerLayerDown",
+}
+
+var ifTypeMapping = map[int64]string{
+ 1: "other",
+ 2: "regular1822",
+ 3: "hdh1822",
+ 4: "ddnX25",
+ 5: "rfc877x25",
+ 6: "ethernetCsmacd",
+ 7: "iso88023Csmacd",
+ 8: "iso88024TokenBus",
+ 9: "iso88025TokenRing",
+ 10: "iso88026Man",
+ 11: "starLan",
+ 12: "proteon10Mbit",
+ 13: "proteon80Mbit",
+ 14: "hyperchannel",
+ 15: "fddi",
+ 16: "lapb",
+ 17: "sdlc",
+ 18: "ds1",
+ 19: "e1",
+ 20: "basicISDN",
+ 21: "primaryISDN",
+ 22: "propPointToPointSerial",
+ 23: "ppp",
+ 24: "softwareLoopback",
+ 25: "eon",
+ 26: "ethernet3Mbit",
+ 27: "nsip",
+ 28: "slip",
+ 29: "ultra",
+ 30: "ds3",
+ 31: "sip",
+ 32: "frameRelay",
+ 33: "rs232",
+ 34: "para",
+ 35: "arcnet",
+ 36: "arcnetPlus",
+ 37: "atm",
+ 38: "miox25",
+ 39: "sonet",
+ 40: "x25ple",
+ 41: "iso88022llc",
+ 42: "localTalk",
+ 43: "smdsDxi",
+ 44: "frameRelayService",
+ 45: "v35",
+ 46: "hssi",
+ 47: "hippi",
+ 48: "modem",
+ 49: "aal5",
+ 50: "sonetPath",
+ 51: "sonetVT",
+ 52: "smdsIcip",
+ 53: "propVirtual",
+ 54: "propMultiplexor",
+ 55: "ieee80212",
+ 56: "fibreChannel",
+ 57: "hippiInterface",
+ 58: "frameRelayInterconnect",
+ 59: "aflane8023",
+ 60: "aflane8025",
+ 61: "cctEmul",
+ 62: "fastEther",
+ 63: "isdn",
+ 64: "v11",
+ 65: "v36",
+ 66: "g703at64k",
+ 67: "g703at2mb",
+ 68: "qllc",
+ 69: "fastEtherFX",
+ 70: "channel",
+ 71: "ieee80211",
+ 72: "ibm370parChan",
+ 73: "escon",
+ 74: "dlsw",
+ 75: "isdns",
+ 76: "isdnu",
+ 77: "lapd",
+ 78: "ipSwitch",
+ 79: "rsrb",
+ 80: "atmLogical",
+ 81: "ds0",
+ 82: "ds0Bundle",
+ 83: "bsc",
+ 84: "async",
+ 85: "cnr",
+ 86: "iso88025Dtr",
+ 87: "eplrs",
+ 88: "arap",
+ 89: "propCnls",
+ 90: "hostPad",
+ 91: "termPad",
+ 92: "frameRelayMPI",
+ 93: "x213",
+ 94: "adsl",
+ 95: "radsl",
+ 96: "sdsl",
+ 97: "vdsl",
+ 98: "iso88025CRFPInt",
+ 99: "myrinet",
+ 100: "voiceEM",
+ 101: "voiceFXO",
+ 102: "voiceFXS",
+ 103: "voiceEncap",
+ 104: "voiceOverIp",
+ 105: "atmDxi",
+ 106: "atmFuni",
+ 107: "atmIma",
+ 108: "pppMultilinkBundle",
+ 109: "ipOverCdlc",
+ 110: "ipOverClaw",
+ 111: "stackToStack",
+ 112: "virtualIpAddress",
+ 113: "mpc",
+ 114: "ipOverAtm",
+ 115: "iso88025Fiber",
+ 116: "tdlc",
+ 117: "gigabitEthernet",
+ 118: "hdlc",
+ 119: "lapf",
+ 120: "v37",
+ 121: "x25mlp",
+ 122: "x25huntGroup",
+ 123: "transpHdlc",
+ 124: "interleave",
+ 125: "fast",
+ 126: "ip",
+ 127: "docsCableMaclayer",
+ 128: "docsCableDownstream",
+ 129: "docsCableUpstream",
+ 130: "a12MppSwitch",
+ 131: "tunnel",
+ 132: "coffee",
+ 133: "ces",
+ 134: "atmSubInterface",
+ 135: "l2vlan",
+ 136: "l3ipvlan",
+ 137: "l3ipxvlan",
+ 138: "digitalPowerline",
+ 139: "mediaMailOverIp",
+ 140: "dtm",
+ 141: "dcn",
+ 142: "ipForward",
+ 143: "msdsl",
+ 144: "ieee1394",
+ 145: "if-gsn",
+ 146: "dvbRccMacLayer",
+ 147: "dvbRccDownstream",
+ 148: "dvbRccUpstream",
+ 149: "atmVirtual",
+ 150: "mplsTunnel",
+ 151: "srp",
+ 152: "voiceOverAtm",
+ 153: "voiceOverFrameRelay",
+ 154: "idsl",
+ 155: "compositeLink",
+ 156: "ss7SigLink",
+ 157: "propWirelessP2P",
+ 158: "frForward",
+ 159: "rfc1483",
+ 160: "usb",
+ 161: "ieee8023adLag",
+ 162: "bgppolicyaccounting",
+ 163: "frf16MfrBundle",
+ 164: "h323Gatekeeper",
+ 165: "h323Proxy",
+ 166: "mpls",
+ 167: "mfSigLink",
+ 168: "hdsl2",
+ 169: "shdsl",
+ 170: "ds1FDL",
+ 171: "pos",
+ 172: "dvbAsiIn",
+ 173: "dvbAsiOut",
+ 174: "plc",
+ 175: "nfas",
+ 176: "tr008",
+ 177: "gr303RDT",
+ 178: "gr303IDT",
+ 179: "isup",
+ 180: "propDocsWirelessMaclayer",
+ 181: "propDocsWirelessDownstream",
+ 182: "propDocsWirelessUpstream",
+ 183: "hiperlan2",
+ 184: "propBWAp2Mp",
+ 185: "sonetOverheadChannel",
+ 186: "digitalWrapperOverheadChannel",
+ 187: "aal2",
+ 188: "radioMAC",
+ 189: "atmRadio",
+ 190: "imt",
+ 191: "mvl",
+ 192: "reachDSL",
+ 193: "frDlciEndPt",
+ 194: "atmVciEndPt",
+ 195: "opticalChannel",
+ 196: "opticalTransport",
+ 197: "propAtm",
+ 198: "voiceOverCable",
+ 199: "infiniband",
+ 200: "teLink",
+ 201: "q2931",
+ 202: "virtualTg",
+ 203: "sipTg",
+ 204: "sipSig",
+ 205: "docsCableUpstreamChannel",
+ 206: "econet",
+ 207: "pon155",
+ 208: "pon622",
+ 209: "bridge",
+ 210: "linegroup",
+ 211: "voiceEMFGD",
+ 212: "voiceFGDEANA",
+ 213: "voiceDID",
+ 214: "mpegTransport",
+ 215: "sixToFour",
+ 216: "gtp",
+ 217: "pdnEtherLoop1",
+ 218: "pdnEtherLoop2",
+ 219: "opticalChannelGroup",
+ 220: "homepna",
+ 221: "gfp",
+ 222: "ciscoISLvlan",
+ 223: "actelisMetaLOOP",
+ 224: "fcipLink",
+ 225: "rpr",
+ 226: "qam",
+ 227: "lmp",
+ 228: "cblVectaStar",
+ 229: "docsCableMCmtsDownstream",
+ 230: "adsl2",
+ 231: "macSecControlledIF",
+ 232: "macSecUncontrolledIF",
+ 233: "aviciOpticalEther",
+ 234: "atmbond",
+ 235: "voiceFGDOS",
+ 236: "mocaVersion1",
+ 237: "ieee80216WMAN",
+ 238: "adsl2plus",
+ 239: "dvbRcsMacLayer",
+ 240: "dvbTdm",
+ 241: "dvbRcsTdma",
+ 242: "x86Laps",
+ 243: "wwanPP",
+ 244: "wwanPP2",
+ 245: "voiceEBS",
+ 246: "ifPwType",
+ 247: "ilan",
+ 248: "pip",
+ 249: "aluELP",
+ 250: "gpon",
+ 251: "vdsl2",
+ 252: "capwapDot11Profile",
+ 253: "capwapDot11Bss",
+ 254: "capwapWtpVirtualRadio",
+ 255: "bits",
+ 256: "docsCableUpstreamRfPort",
+ 257: "cableDownstreamRfPort",
+ 258: "vmwareVirtualNic",
+ 259: "ieee802154",
+ 260: "otnOdu",
+ 261: "otnOtu",
+ 262: "ifVfiType",
+ 263: "g9981",
+ 264: "g9982",
+ 265: "g9983",
+ 266: "aluEpon",
+ 267: "aluEponOnu",
+ 268: "aluEponPhysicalUni",
+ 269: "aluEponLogicalLink",
+ 270: "aluGponOnu",
+ 271: "aluGponPhysicalUni",
+ 272: "vmwareNicTeam",
+ 277: "docsOfdmDownstream",
+ 278: "docsOfdmaUpstream",
+ 279: "gfast",
+ 280: "sdci",
+ 281: "xboxWireless",
+ 282: "fastdsl",
+ 283: "docsCableScte55d1FwdOob",
+ 284: "docsCableScte55d1RetOob",
+ 285: "docsCableScte55d2DsOob",
+ 286: "docsCableScte55d2UsOob",
+ 287: "docsCableNdf",
+ 288: "docsCableNdr",
+ 289: "ptm",
+ 290: "ghn",
+ 291: "otnOtsi",
+ 292: "otnOtuc",
+ 293: "otnOduc",
+ 294: "otnOtsig",
+ 295: "microwaveCarrierTermination",
+ 296: "microwaveRadioLinkTerminal",
+ 297: "ieee8021axDrni",
+ 298: "ax25",
+ 299: "ieee19061nanocom",
+ 300: "cpri",
+ 301: "omni",
+ 302: "roe",
+ 303: "p2pOverLan",
+}
diff --git a/src/go/plugin/go.d/modules/snmp/snmp.go b/src/go/plugin/go.d/modules/snmp/snmp.go
new file mode 100644
index 000000000..253d9f50d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/snmp.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package snmp
+
+import (
+ _ "embed"
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+ "github.com/gosnmp/gosnmp"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("snmp", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *SNMP {
+ return &SNMP{
+ Config: Config{
+ Community: "public",
+ Options: Options{
+ Port: 161,
+ Retries: 1,
+ Timeout: 5,
+ Version: gosnmp.Version2c.String(),
+ MaxOIDs: 60,
+ MaxRepetitions: 25,
+ },
+ User: User{
+ SecurityLevel: "authPriv",
+ AuthProto: "sha512",
+ PrivProto: "aes192c",
+ },
+ },
+
+ newSnmpClient: gosnmp.NewHandler,
+
+ checkMaxReps: true,
+ collectIfMib: true,
+ netInterfaces: make(map[string]*netInterface),
+ }
+}
+
+type SNMP struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ newSnmpClient func() gosnmp.Handler
+ snmpClient gosnmp.Handler
+
+ netIfaceFilterByName matcher.Matcher
+ netIfaceFilterByType matcher.Matcher
+
+ checkMaxReps bool
+ collectIfMib bool
+ netInterfaces map[string]*netInterface
+ sysName string
+
+ oids []string
+}
+
+func (s *SNMP) Configuration() any {
+ return s.Config
+}
+
+func (s *SNMP) Init() error {
+ err := s.validateConfig()
+ if err != nil {
+ s.Errorf("config validation failed: %v", err)
+ return err
+ }
+
+ snmpClient, err := s.initSNMPClient()
+ if err != nil {
+ s.Errorf("failed to initialize SNMP client: %v", err)
+ return err
+ }
+
+ err = snmpClient.Connect()
+ if err != nil {
+ s.Errorf("SNMP client connection failed: %v", err)
+ return err
+ }
+ s.snmpClient = snmpClient
+
+ byName, byType, err := s.initNetIfaceFilters()
+ if err != nil {
+ s.Errorf("failed to initialize network interface filters: %v", err)
+ return err
+ }
+ s.netIfaceFilterByName = byName
+ s.netIfaceFilterByType = byType
+
+ charts, err := newUserInputCharts(s.ChartsInput)
+ if err != nil {
+ s.Errorf("failed to create user charts: %v", err)
+ return err
+ }
+ s.charts = charts
+
+ s.oids = s.initOIDs()
+
+ return nil
+}
+
+func (s *SNMP) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (s *SNMP) Charts() *module.Charts {
+ return s.charts
+}
+
+func (s *SNMP) Collect() map[string]int64 {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (s *SNMP) Cleanup() {
+ if s.snmpClient != nil {
+ _ = s.snmpClient.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/snmp/snmp_test.go b/src/go/plugin/go.d/modules/snmp/snmp_test.go
new file mode 100644
index 000000000..1841235f1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/snmp_test.go
@@ -0,0 +1,754 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package snmp
+
+import (
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/golang/mock/gomock"
+ "github.com/gosnmp/gosnmp"
+ snmpmock "github.com/gosnmp/gosnmp/mocks"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSNMP_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &SNMP{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestSNMP_Init(t *testing.T) {
+ tests := map[string]struct {
+ prepareSNMP func() *SNMP
+ wantFail bool
+ }{
+ "fail with default config": {
+ wantFail: true,
+ prepareSNMP: func() *SNMP {
+ return New()
+ },
+ },
+ "fail when using SNMPv3 but 'user.name' not set": {
+ wantFail: true,
+ prepareSNMP: func() *SNMP {
+ snmp := New()
+ snmp.Config = prepareV3Config()
+ snmp.User.Name = ""
+ return snmp
+ },
+ },
+ "success when using SNMPv1 with valid config": {
+ wantFail: false,
+ prepareSNMP: func() *SNMP {
+ snmp := New()
+ snmp.Config = prepareV1Config()
+ return snmp
+ },
+ },
+ "success when using SNMPv2 with valid config": {
+ wantFail: false,
+ prepareSNMP: func() *SNMP {
+ snmp := New()
+ snmp.Config = prepareV2Config()
+ return snmp
+ },
+ },
+ "success when using SNMPv3 with valid config": {
+ wantFail: false,
+ prepareSNMP: func() *SNMP {
+ snmp := New()
+ snmp.Config = prepareV3Config()
+ return snmp
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ snmp := test.prepareSNMP()
+
+ if test.wantFail {
+ assert.Error(t, snmp.Init())
+ } else {
+ assert.NoError(t, snmp.Init())
+ }
+ })
+ }
+}
+
+func TestSNMP_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepareSNMP func(t *testing.T, m *snmpmock.MockHandler) *SNMP
+ }{
+ "cleanup call if snmpClient initialized": {
+ prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareV2Config()
+ snmp.newSnmpClient = func() gosnmp.Handler { return m }
+ setMockClientInitExpect(m)
+
+ require.NoError(t, snmp.Init())
+
+ m.EXPECT().Close().Times(1)
+
+ return snmp
+ },
+ },
+ "cleanup call does not panic if snmpClient not initialized": {
+ prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareV2Config()
+ snmp.newSnmpClient = func() gosnmp.Handler { return m }
+ setMockClientInitExpect(m)
+
+ require.NoError(t, snmp.Init())
+
+ snmp.snmpClient = nil
+
+ return snmp
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mockSNMP, cleanup := mockInit(t)
+ defer cleanup()
+
+ snmp := test.prepareSNMP(t, mockSNMP)
+
+ assert.NotPanics(t, snmp.Cleanup)
+ })
+ }
+}
+
+func TestSNMP_Charts(t *testing.T) {
+ tests := map[string]struct {
+ prepareSNMP func(t *testing.T, m *snmpmock.MockHandler) *SNMP
+ wantNumCharts int
+ doCollect bool
+ }{
+ "if-mib, no custom": {
+ doCollect: true,
+ wantNumCharts: len(netIfaceChartsTmpl)*4 + 1,
+ prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareV2Config()
+ setMockClientSysExpect(m)
+ setMockClientIfMibExpect(m)
+
+ return snmp
+ },
+ },
+ "custom, no if-mib": {
+ wantNumCharts: 10,
+ prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareConfigWithUserCharts(prepareV2Config(), 0, 9)
+ snmp.collectIfMib = false
+
+ return snmp
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mockSNMP, cleanup := mockInit(t)
+ defer cleanup()
+
+ setMockClientInitExpect(mockSNMP)
+
+ snmp := test.prepareSNMP(t, mockSNMP)
+ snmp.newSnmpClient = func() gosnmp.Handler { return mockSNMP }
+
+ require.NoError(t, snmp.Init())
+
+ if test.doCollect {
+ _ = snmp.Collect()
+ }
+
+ assert.Equal(t, test.wantNumCharts, len(*snmp.Charts()))
+ })
+ }
+}
+
+func TestSNMP_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepareSNMP func(m *snmpmock.MockHandler) *SNMP
+ }{
+ "success when collecting IF-MIB": {
+ wantFail: false,
+ prepareSNMP: func(m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareV2Config()
+ setMockClientIfMibExpect(m)
+
+ return snmp
+ },
+ },
+ "success only custom OIDs supported type": {
+ wantFail: false,
+ prepareSNMP: func(m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareConfigWithUserCharts(prepareV2Config(), 0, 3)
+ snmp.collectIfMib = false
+
+ m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{
+ Variables: []gosnmp.SnmpPDU{
+ {Value: 10, Type: gosnmp.Counter32},
+ {Value: 20, Type: gosnmp.Counter64},
+ {Value: 30, Type: gosnmp.Gauge32},
+ {Value: 1, Type: gosnmp.Boolean},
+ {Value: 40, Type: gosnmp.Gauge32},
+ {Value: 50, Type: gosnmp.TimeTicks},
+ {Value: 60, Type: gosnmp.Uinteger32},
+ {Value: 70, Type: gosnmp.Integer},
+ },
+ }, nil).Times(1)
+
+ return snmp
+ },
+ },
+ "fail when snmp client Get fails": {
+ wantFail: true,
+ prepareSNMP: func(m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareConfigWithUserCharts(prepareV2Config(), 0, 3)
+ snmp.collectIfMib = false
+
+ m.EXPECT().Get(gomock.Any()).Return(nil, errors.New("mock Get() error")).Times(1)
+
+ return snmp
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mockSNMP, cleanup := mockInit(t)
+ defer cleanup()
+
+ setMockClientInitExpect(mockSNMP)
+ setMockClientSysExpect(mockSNMP)
+
+ snmp := test.prepareSNMP(mockSNMP)
+ snmp.newSnmpClient = func() gosnmp.Handler { return mockSNMP }
+
+ require.NoError(t, snmp.Init())
+
+ if test.wantFail {
+ assert.Error(t, snmp.Check())
+ } else {
+ assert.NoError(t, snmp.Check())
+ }
+ })
+ }
+}
+
+func TestSNMP_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareSNMP func(m *snmpmock.MockHandler) *SNMP
+ wantCollected map[string]int64
+ }{
+ "success only IF-MIB": {
+ prepareSNMP: func(m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareV2Config()
+
+ setMockClientIfMibExpect(m)
+
+ return snmp
+ },
+ wantCollected: map[string]int64{
+ "net_iface_ether1_admin_status_down": 0,
+ "net_iface_ether1_admin_status_testing": 0,
+ "net_iface_ether1_admin_status_up": 1,
+ "net_iface_ether1_bcast_in": 0,
+ "net_iface_ether1_bcast_out": 0,
+ "net_iface_ether1_discards_in": 0,
+ "net_iface_ether1_discards_out": 0,
+ "net_iface_ether1_errors_in": 0,
+ "net_iface_ether1_errors_out": 0,
+ "net_iface_ether1_mcast_in": 0,
+ "net_iface_ether1_mcast_out": 0,
+ "net_iface_ether1_oper_status_dormant": 0,
+ "net_iface_ether1_oper_status_down": 1,
+ "net_iface_ether1_oper_status_lowerLayerDown": 0,
+ "net_iface_ether1_oper_status_notPresent": 0,
+ "net_iface_ether1_oper_status_testing": 0,
+ "net_iface_ether1_oper_status_unknown": 0,
+ "net_iface_ether1_oper_status_up": 0,
+ "net_iface_ether1_traffic_in": 0,
+ "net_iface_ether1_traffic_out": 0,
+ "net_iface_ether1_ucast_in": 0,
+ "net_iface_ether1_ucast_out": 0,
+ "net_iface_ether2_admin_status_down": 0,
+ "net_iface_ether2_admin_status_testing": 0,
+ "net_iface_ether2_admin_status_up": 1,
+ "net_iface_ether2_bcast_in": 0,
+ "net_iface_ether2_bcast_out": 0,
+ "net_iface_ether2_discards_in": 0,
+ "net_iface_ether2_discards_out": 0,
+ "net_iface_ether2_errors_in": 0,
+ "net_iface_ether2_errors_out": 0,
+ "net_iface_ether2_mcast_in": 1891,
+ "net_iface_ether2_mcast_out": 7386,
+ "net_iface_ether2_oper_status_dormant": 0,
+ "net_iface_ether2_oper_status_down": 0,
+ "net_iface_ether2_oper_status_lowerLayerDown": 0,
+ "net_iface_ether2_oper_status_notPresent": 0,
+ "net_iface_ether2_oper_status_testing": 0,
+ "net_iface_ether2_oper_status_unknown": 0,
+ "net_iface_ether2_oper_status_up": 1,
+ "net_iface_ether2_traffic_in": 615057509,
+ "net_iface_ether2_traffic_out": 159677206,
+ "net_iface_ether2_ucast_in": 71080332,
+ "net_iface_ether2_ucast_out": 39509661,
+ "net_iface_sfp-sfpplus1_admin_status_down": 0,
+ "net_iface_sfp-sfpplus1_admin_status_testing": 0,
+ "net_iface_sfp-sfpplus1_admin_status_up": 1,
+ "net_iface_sfp-sfpplus1_bcast_in": 0,
+ "net_iface_sfp-sfpplus1_bcast_out": 0,
+ "net_iface_sfp-sfpplus1_discards_in": 0,
+ "net_iface_sfp-sfpplus1_discards_out": 0,
+ "net_iface_sfp-sfpplus1_errors_in": 0,
+ "net_iface_sfp-sfpplus1_errors_out": 0,
+ "net_iface_sfp-sfpplus1_mcast_in": 0,
+ "net_iface_sfp-sfpplus1_mcast_out": 0,
+ "net_iface_sfp-sfpplus1_oper_status_dormant": 0,
+ "net_iface_sfp-sfpplus1_oper_status_down": 0,
+ "net_iface_sfp-sfpplus1_oper_status_lowerLayerDown": 0,
+ "net_iface_sfp-sfpplus1_oper_status_notPresent": 1,
+ "net_iface_sfp-sfpplus1_oper_status_testing": 0,
+ "net_iface_sfp-sfpplus1_oper_status_unknown": 0,
+ "net_iface_sfp-sfpplus1_oper_status_up": 0,
+ "net_iface_sfp-sfpplus1_traffic_in": 0,
+ "net_iface_sfp-sfpplus1_traffic_out": 0,
+ "net_iface_sfp-sfpplus1_ucast_in": 0,
+ "net_iface_sfp-sfpplus1_ucast_out": 0,
+ "net_iface_sfp-sfpplus2_admin_status_down": 0,
+ "net_iface_sfp-sfpplus2_admin_status_testing": 0,
+ "net_iface_sfp-sfpplus2_admin_status_up": 1,
+ "net_iface_sfp-sfpplus2_bcast_in": 0,
+ "net_iface_sfp-sfpplus2_bcast_out": 0,
+ "net_iface_sfp-sfpplus2_discards_in": 0,
+ "net_iface_sfp-sfpplus2_discards_out": 0,
+ "net_iface_sfp-sfpplus2_errors_in": 0,
+ "net_iface_sfp-sfpplus2_errors_out": 0,
+ "net_iface_sfp-sfpplus2_mcast_in": 0,
+ "net_iface_sfp-sfpplus2_mcast_out": 0,
+ "net_iface_sfp-sfpplus2_oper_status_dormant": 0,
+ "net_iface_sfp-sfpplus2_oper_status_down": 0,
+ "net_iface_sfp-sfpplus2_oper_status_lowerLayerDown": 0,
+ "net_iface_sfp-sfpplus2_oper_status_notPresent": 1,
+ "net_iface_sfp-sfpplus2_oper_status_testing": 0,
+ "net_iface_sfp-sfpplus2_oper_status_unknown": 0,
+ "net_iface_sfp-sfpplus2_oper_status_up": 0,
+ "net_iface_sfp-sfpplus2_traffic_in": 0,
+ "net_iface_sfp-sfpplus2_traffic_out": 0,
+ "net_iface_sfp-sfpplus2_ucast_in": 0,
+ "net_iface_sfp-sfpplus2_ucast_out": 0,
+ "uptime": 60,
+ },
+ },
+ "success only custom OIDs supported type": {
+ prepareSNMP: func(m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareConfigWithUserCharts(prepareV2Config(), 0, 3)
+ snmp.collectIfMib = false
+
+ m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{
+ Variables: []gosnmp.SnmpPDU{
+ {Value: 10, Type: gosnmp.Counter32},
+ {Value: 20, Type: gosnmp.Counter64},
+ {Value: 30, Type: gosnmp.Gauge32},
+ {Value: 1, Type: gosnmp.Boolean},
+ {Value: 40, Type: gosnmp.Gauge32},
+ {Value: 50, Type: gosnmp.TimeTicks},
+ {Value: 60, Type: gosnmp.Uinteger32},
+ {Value: 70, Type: gosnmp.Integer},
+ },
+ }, nil).Times(1)
+
+ return snmp
+ },
+ wantCollected: map[string]int64{
+ "1.3.6.1.2.1.2.2.1.10.0": 10,
+ "1.3.6.1.2.1.2.2.1.16.0": 20,
+ "1.3.6.1.2.1.2.2.1.10.1": 30,
+ "1.3.6.1.2.1.2.2.1.16.1": 1,
+ "1.3.6.1.2.1.2.2.1.10.2": 40,
+ "1.3.6.1.2.1.2.2.1.16.2": 50,
+ "1.3.6.1.2.1.2.2.1.10.3": 60,
+ "1.3.6.1.2.1.2.2.1.16.3": 70,
+ "uptime": 60,
+ },
+ },
+ "success only custom OIDs supported and unsupported type": {
+ prepareSNMP: func(m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareConfigWithUserCharts(prepareV2Config(), 0, 2)
+ snmp.collectIfMib = false
+
+ m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{
+ Variables: []gosnmp.SnmpPDU{
+ {Value: 10, Type: gosnmp.Counter32},
+ {Value: 20, Type: gosnmp.Counter64},
+ {Value: 30, Type: gosnmp.Gauge32},
+ {Value: nil, Type: gosnmp.NoSuchInstance},
+ {Value: nil, Type: gosnmp.NoSuchInstance},
+ {Value: nil, Type: gosnmp.NoSuchInstance},
+ },
+ }, nil).Times(1)
+
+ return snmp
+ },
+ wantCollected: map[string]int64{
+ "1.3.6.1.2.1.2.2.1.10.0": 10,
+ "1.3.6.1.2.1.2.2.1.16.0": 20,
+ "1.3.6.1.2.1.2.2.1.10.1": 30,
+ "uptime": 60,
+ },
+ },
+ "success only custom OIDs unsupported type": {
+ prepareSNMP: func(m *snmpmock.MockHandler) *SNMP {
+ snmp := New()
+ snmp.Config = prepareConfigWithUserCharts(prepareV2Config(), 0, 2)
+ snmp.collectIfMib = false
+
+ m.EXPECT().Get(gomock.Any()).Return(&gosnmp.SnmpPacket{
+ Variables: []gosnmp.SnmpPDU{
+ {Value: nil, Type: gosnmp.NoSuchInstance},
+ {Value: nil, Type: gosnmp.NoSuchInstance},
+ {Value: nil, Type: gosnmp.NoSuchObject},
+ {Value: "192.0.2.0", Type: gosnmp.NsapAddress},
+ {Value: []uint8{118, 101, 116}, Type: gosnmp.OctetString},
+ {Value: ".1.3.6.1.2.1.4.32.1.5.2.1.4.10.19.0.0.16", Type: gosnmp.ObjectIdentifier},
+ },
+ }, nil).Times(1)
+
+ return snmp
+ },
+ wantCollected: map[string]int64{
+ "uptime": 60,
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ mockSNMP, cleanup := mockInit(t)
+ defer cleanup()
+
+ setMockClientInitExpect(mockSNMP)
+ setMockClientSysExpect(mockSNMP)
+
+ snmp := test.prepareSNMP(mockSNMP)
+ snmp.newSnmpClient = func() gosnmp.Handler { return mockSNMP }
+
+ require.NoError(t, snmp.Init())
+
+ mx := snmp.Collect()
+
+ assert.Equal(t, test.wantCollected, mx)
+ })
+ }
+}
+
+func mockInit(t *testing.T) (*snmpmock.MockHandler, func()) {
+ mockCtl := gomock.NewController(t)
+ cleanup := func() { mockCtl.Finish() }
+ mockSNMP := snmpmock.NewMockHandler(mockCtl)
+
+ return mockSNMP, cleanup
+}
+
+func prepareV3Config() Config {
+ cfg := prepareV2Config()
+ cfg.Options.Version = gosnmp.Version3.String()
+ cfg.User = User{
+ Name: "name",
+ SecurityLevel: "authPriv",
+ AuthProto: strings.ToLower(gosnmp.MD5.String()),
+ AuthKey: "auth_key",
+ PrivProto: strings.ToLower(gosnmp.AES.String()),
+ PrivKey: "priv_key",
+ }
+ return cfg
+}
+
+func prepareV2Config() Config {
+ cfg := prepareV1Config()
+ cfg.Options.Version = gosnmp.Version2c.String()
+ return cfg
+}
+
+func prepareV1Config() Config {
+ return Config{
+ UpdateEvery: 1,
+ Hostname: "192.0.2.1",
+ Community: "public",
+ Options: Options{
+ Port: 161,
+ Retries: 1,
+ Timeout: 5,
+ Version: gosnmp.Version1.String(),
+ MaxOIDs: 60,
+ MaxRepetitions: 25,
+ },
+ }
+}
+
+func prepareConfigWithUserCharts(cfg Config, start, end int) Config {
+ if start > end || start < 0 || end < 1 {
+ panic(fmt.Sprintf("invalid index range ('%d'-'%d')", start, end))
+ }
+ cfg.ChartsInput = []ChartConfig{
+ {
+ ID: "test_chart1",
+ Title: "This is Test Chart1",
+ Units: "kilobits/s",
+ Family: "family",
+ Type: module.Area.String(),
+ Priority: module.Priority,
+ Dimensions: []DimensionConfig{
+ {
+ OID: "1.3.6.1.2.1.2.2.1.10",
+ Name: "in",
+ Algorithm: module.Incremental.String(),
+ Multiplier: 8,
+ Divisor: 1000,
+ },
+ {
+ OID: "1.3.6.1.2.1.2.2.1.16",
+ Name: "out",
+ Algorithm: module.Incremental.String(),
+ Multiplier: 8,
+ Divisor: 1000,
+ },
+ },
+ },
+ }
+
+ for i := range cfg.ChartsInput {
+ cfg.ChartsInput[i].IndexRange = []int{start, end}
+ }
+
+ return cfg
+}
+
+func setMockClientInitExpect(m *snmpmock.MockHandler) {
+ m.EXPECT().Target().AnyTimes()
+ m.EXPECT().Port().AnyTimes()
+ m.EXPECT().Version().AnyTimes()
+ m.EXPECT().Community().AnyTimes()
+ m.EXPECT().SetTarget(gomock.Any()).AnyTimes()
+ m.EXPECT().SetPort(gomock.Any()).AnyTimes()
+ m.EXPECT().SetRetries(gomock.Any()).AnyTimes()
+ m.EXPECT().SetMaxRepetitions(gomock.Any()).AnyTimes()
+ m.EXPECT().SetMaxOids(gomock.Any()).AnyTimes()
+ m.EXPECT().SetLogger(gomock.Any()).AnyTimes()
+ m.EXPECT().SetTimeout(gomock.Any()).AnyTimes()
+ m.EXPECT().SetCommunity(gomock.Any()).AnyTimes()
+ m.EXPECT().SetVersion(gomock.Any()).AnyTimes()
+ m.EXPECT().SetSecurityModel(gomock.Any()).AnyTimes()
+ m.EXPECT().SetMsgFlags(gomock.Any()).AnyTimes()
+ m.EXPECT().SetSecurityParameters(gomock.Any()).AnyTimes()
+ m.EXPECT().Connect().Return(nil).AnyTimes()
+}
+
+func setMockClientSysExpect(m *snmpmock.MockHandler) {
+ m.EXPECT().Get([]string{oidSysName}).Return(&gosnmp.SnmpPacket{
+ Variables: []gosnmp.SnmpPDU{
+ {Value: []uint8("mock-host"), Type: gosnmp.OctetString},
+ },
+ }, nil).MinTimes(1)
+
+ m.EXPECT().Get([]string{oidSysUptime}).Return(&gosnmp.SnmpPacket{
+ Variables: []gosnmp.SnmpPDU{
+ {Value: uint32(6048), Type: gosnmp.TimeTicks},
+ },
+ }, nil).MinTimes(1)
+}
+
+func setMockClientIfMibExpect(m *snmpmock.MockHandler) {
+ m.EXPECT().WalkAll(oidIfIndex).Return([]gosnmp.SnmpPDU{
+ {Name: oidIfIndex + ".1", Value: 1, Type: gosnmp.Integer},
+ {Name: oidIfIndex + ".2", Value: 2, Type: gosnmp.Integer},
+ {Name: oidIfIndex + ".17", Value: 17, Type: gosnmp.Integer},
+ {Name: oidIfIndex + ".18", Value: 18, Type: gosnmp.Integer},
+ }, nil).MinTimes(1)
+ m.EXPECT().WalkAll(rootOidIfMibIfTable).Return([]gosnmp.SnmpPDU{
+ {Name: oidIfIndex + ".1", Value: 1, Type: gosnmp.Integer},
+ {Name: oidIfIndex + ".2", Value: 2, Type: gosnmp.Integer},
+ {Name: oidIfIndex + ".17", Value: 17, Type: gosnmp.Integer},
+ {Name: oidIfIndex + ".18", Value: 18, Type: gosnmp.Integer},
+ {Name: oidIfDescr + ".1", Value: []uint8("ether1"), Type: gosnmp.OctetString},
+ {Name: oidIfDescr + ".2", Value: []uint8("ether2"), Type: gosnmp.OctetString},
+ {Name: oidIfDescr + ".17", Value: []uint8("sfp-sfpplus2"), Type: gosnmp.OctetString},
+ {Name: oidIfDescr + ".18", Value: []uint8("sfp-sfpplus1"), Type: gosnmp.OctetString},
+ {Name: oidIfType + ".1", Value: 6, Type: gosnmp.Integer},
+ {Name: oidIfType + ".2", Value: 6, Type: gosnmp.Integer},
+ {Name: oidIfType + ".17", Value: 6, Type: gosnmp.Integer},
+ {Name: oidIfType + ".18", Value: 6, Type: gosnmp.Integer},
+ {Name: oidIfMtu + ".1", Value: 1500, Type: gosnmp.Integer},
+ {Name: oidIfMtu + ".2", Value: 1500, Type: gosnmp.Integer},
+ {Name: oidIfMtu + ".17", Value: 1500, Type: gosnmp.Integer},
+ {Name: oidIfMtu + ".18", Value: 1500, Type: gosnmp.Integer},
+ {Name: oidIfSpeed + ".1", Value: 0, Type: gosnmp.Gauge32},
+ {Name: oidIfSpeed + ".2", Value: 1000000000, Type: gosnmp.Gauge32},
+ {Name: oidIfSpeed + ".17", Value: 0, Type: gosnmp.Gauge32},
+ {Name: oidIfSpeed + ".18", Value: 0, Type: gosnmp.Gauge32},
+ {Name: oidIfPhysAddress + ".1", Value: decodePhysAddr("18:fd:74:7e:c5:80"), Type: gosnmp.OctetString},
+ {Name: oidIfPhysAddress + ".2", Value: decodePhysAddr("18:fd:74:7e:c5:81"), Type: gosnmp.OctetString},
+ {Name: oidIfPhysAddress + ".17", Value: decodePhysAddr("18:fd:74:7e:c5:90"), Type: gosnmp.OctetString},
+ {Name: oidIfPhysAddress + ".18", Value: decodePhysAddr("18:fd:74:7e:c5:91"), Type: gosnmp.OctetString},
+ {Name: oidIfAdminStatus + ".1", Value: 1, Type: gosnmp.Integer},
+ {Name: oidIfAdminStatus + ".2", Value: 1, Type: gosnmp.Integer},
+ {Name: oidIfAdminStatus + ".17", Value: 1, Type: gosnmp.Integer},
+ {Name: oidIfAdminStatus + ".18", Value: 1, Type: gosnmp.Integer},
+ {Name: oidIfOperStatus + ".1", Value: 2, Type: gosnmp.Integer},
+ {Name: oidIfOperStatus + ".2", Value: 1, Type: gosnmp.Integer},
+ {Name: oidIfOperStatus + ".17", Value: 6, Type: gosnmp.Integer},
+ {Name: oidIfOperStatus + ".18", Value: 6, Type: gosnmp.Integer},
+ {Name: oidIfLastChange + ".1", Value: 0, Type: gosnmp.TimeTicks},
+ {Name: oidIfLastChange + ".2", Value: 3243, Type: gosnmp.TimeTicks},
+ {Name: oidIfLastChange + ".17", Value: 0, Type: gosnmp.TimeTicks},
+ {Name: oidIfLastChange + ".18", Value: 0, Type: gosnmp.TimeTicks},
+ {Name: oidIfInOctets + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInOctets + ".2", Value: 3827243723, Type: gosnmp.Counter32},
+ {Name: oidIfInOctets + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInOctets + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInUcastPkts + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInUcastPkts + ".2", Value: 71035992, Type: gosnmp.Counter32},
+ {Name: oidIfInUcastPkts + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInUcastPkts + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInNUcastPkts + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInNUcastPkts + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInNUcastPkts + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInNUcastPkts + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInDiscards + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInDiscards + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInDiscards + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInDiscards + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInErrors + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInErrors + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInErrors + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInErrors + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInUnknownProtos + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInUnknownProtos + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInUnknownProtos + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInUnknownProtos + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutOctets + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutOctets + ".2", Value: 2769838772, Type: gosnmp.Counter32},
+ {Name: oidIfOutOctets + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutOctets + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutUcastPkts + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutUcastPkts + ".2", Value: 39482929, Type: gosnmp.Counter32},
+ {Name: oidIfOutUcastPkts + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutUcastPkts + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutNUcastPkts + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutNUcastPkts + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutNUcastPkts + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutNUcastPkts + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutDiscards + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutDiscards + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutDiscards + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutDiscards + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutErrors + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutErrors + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutErrors + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutErrors + ".18", Value: 0, Type: gosnmp.Counter32},
+ }, nil).MinTimes(1)
+
+ m.EXPECT().WalkAll(rootOidIfMibIfXTable).Return([]gosnmp.SnmpPDU{
+ {Name: oidIfName + ".1", Value: []uint8("ether1"), Type: gosnmp.OctetString},
+ {Name: oidIfName + ".2", Value: []uint8("ether2"), Type: gosnmp.OctetString},
+ {Name: oidIfName + ".17", Value: []uint8("sfp-sfpplus2"), Type: gosnmp.OctetString},
+ {Name: oidIfName + ".18", Value: []uint8("sfp-sfpplus1"), Type: gosnmp.OctetString},
+ {Name: oidIfInMulticastPkts + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInMulticastPkts + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInMulticastPkts + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInMulticastPkts + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInBroadcastPkts + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInBroadcastPkts + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInBroadcastPkts + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfInBroadcastPkts + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutMulticastPkts + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutMulticastPkts + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutMulticastPkts + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutMulticastPkts + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutBroadcastPkts + ".1", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutBroadcastPkts + ".2", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutBroadcastPkts + ".17", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfOutBroadcastPkts + ".18", Value: 0, Type: gosnmp.Counter32},
+ {Name: oidIfHCInOctets + ".1", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInOctets + ".2", Value: 76882188712, Type: gosnmp.Counter64},
+ {Name: oidIfHCInOctets + ".17", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInOctets + ".18", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInUcastPkts + ".1", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInUcastPkts + ".2", Value: 71080332, Type: gosnmp.Counter64},
+ {Name: oidIfHCInUcastPkts + ".17", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInUcastPkts + ".18", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInMulticastPkts + ".1", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInMulticastPkts + ".2", Value: 1891, Type: gosnmp.Counter64},
+ {Name: oidIfHCInMulticastPkts + ".17", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInMulticastPkts + ".18", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInBroadcastPkts + ".1", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInBroadcastPkts + ".2", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInBroadcastPkts + ".17", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCInBroadcastPkts + ".18", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutOctets + ".1", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutOctets + ".2", Value: 19959650810, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutOctets + ".17", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutOctets + ".18", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutUcastPkts + ".1", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutUcastPkts + ".2", Value: 39509661, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutUcastPkts + ".17", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutUcastPkts + ".18", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutMulticastPkts + ".1", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutMulticastPkts + ".2", Value: 28844, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutMulticastPkts + ".17", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutMulticastPkts + ".18", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutBroadcastPkts + ".1", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutBroadcastPkts + ".2", Value: 7386, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutBroadcastPkts + ".17", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHCOutBroadcastPkts + ".18", Value: 0, Type: gosnmp.Counter64},
+ {Name: oidIfHighSpeed + ".1", Value: 0, Type: gosnmp.Gauge32},
+ {Name: oidIfHighSpeed + ".2", Value: 1000, Type: gosnmp.Gauge32},
+ {Name: oidIfHighSpeed + ".17", Value: 0, Type: gosnmp.Gauge32},
+ {Name: oidIfHighSpeed + ".18", Value: 0, Type: gosnmp.Gauge32},
+ {Name: oidIfAlias + ".1", Value: []uint8(""), Type: gosnmp.OctetString},
+ {Name: oidIfAlias + ".2", Value: []uint8("UPLINK2 (2.1)"), Type: gosnmp.OctetString},
+ {Name: oidIfAlias + ".17", Value: []uint8(""), Type: gosnmp.OctetString},
+ {Name: oidIfAlias + ".18", Value: []uint8(""), Type: gosnmp.OctetString},
+ }, nil).MinTimes(1)
+}
+
+func decodePhysAddr(s string) []uint8 {
+ s = strings.ReplaceAll(s, ":", "")
+ v, _ := hex.DecodeString(s)
+ return v
+}
diff --git a/src/go/plugin/go.d/modules/snmp/testdata/config.json b/src/go/plugin/go.d/modules/snmp/testdata/config.json
new file mode 100644
index 000000000..b88ac1c25
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/testdata/config.json
@@ -0,0 +1,47 @@
+{
+ "update_every": 123,
+ "hostname": "ok",
+ "community": "ok",
+ "network_interface_filter": {
+ "by_name": "ok",
+ "by_type": "ok"
+ },
+ "user": {
+ "name": "ok",
+ "level": "ok",
+ "auth_proto": "ok",
+ "auth_key": "ok",
+ "priv_proto": "ok",
+ "priv_key": "ok"
+ },
+ "options": {
+ "port": 123,
+ "retries": 123,
+ "timeout": 123,
+ "version": "ok",
+ "max_request_size": 123,
+ "max_repetitions": 123
+ },
+ "charts": [
+ {
+ "id": "ok",
+ "title": "ok",
+ "units": "ok",
+ "family": "ok",
+ "type": "ok",
+ "priority": 123,
+ "multiply_range": [
+ 123
+ ],
+ "dimensions": [
+ {
+ "oid": "ok",
+ "name": "ok",
+ "algorithm": "ok",
+ "multiplier": 123,
+ "divisor": 123
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/snmp/testdata/config.yaml b/src/go/plugin/go.d/modules/snmp/testdata/config.yaml
new file mode 100644
index 000000000..f4ddbf91c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/snmp/testdata/config.yaml
@@ -0,0 +1,35 @@
+update_every: 123
+hostname: "ok"
+community: "ok"
+network_interface_filter:
+ by_name: "ok"
+ by_type: "ok"
+user:
+ name: "ok"
+ level: "ok"
+ auth_proto: "ok"
+ auth_key: "ok"
+ priv_proto: "ok"
+ priv_key: "ok"
+options:
+ port: 123
+ retries: 123
+ timeout: 123
+ version: "ok"
+ max_request_size: 123
+ max_repetitions: 123
+charts:
+ - id: "ok"
+ title: "ok"
+ units: "ok"
+ family: "ok"
+ type: "ok"
+ priority: 123
+ multiply_range:
+ - 123
+ dimensions:
+ - oid: "ok"
+ name: "ok"
+ algorithm: "ok"
+ multiplier: 123
+ divisor: 123
diff --git a/src/go/plugin/go.d/modules/squid/README.md b/src/go/plugin/go.d/modules/squid/README.md
new file mode 120000
index 000000000..c4e5a03d7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/README.md
@@ -0,0 +1 @@
+integrations/squid.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/squid/charts.go b/src/go/plugin/go.d/modules/squid/charts.go
new file mode 100644
index 000000000..47bab60f4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/charts.go
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squid
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioClientsNet = module.Priority + iota
+ prioClientsRequests
+ prioServersNet
+ prioServersRequests
+)
+
+var charts = module.Charts{
+ clientsNetChart.Copy(),
+ clientsRequestsChart.Copy(),
+ serversNetChart.Copy(),
+ serversRequestsChart.Copy(),
+}
+
+var (
+ clientsNetChart = module.Chart{
+ ID: "clients_net",
+ Title: "Squid Client Bandwidth",
+ Units: "kilobits/s",
+ Fam: "clients",
+ Ctx: "squid.clients_net",
+ Type: module.Area,
+ Priority: prioClientsNet,
+ Dims: module.Dims{
+ {ID: "client_http.kbytes_in", Name: "in", Algo: module.Incremental, Mul: 8},
+ {ID: "client_http.kbytes_out", Name: "out", Algo: module.Incremental, Mul: -8},
+ {ID: "client_http.hit_kbytes_out", Name: "hits", Algo: module.Incremental, Mul: -8},
+ },
+ }
+
+ clientsRequestsChart = module.Chart{
+ ID: "clients_requests",
+ Title: "Squid Client Requests",
+ Units: "requests/s",
+ Fam: "clients",
+ Ctx: "squid.clients_requests",
+ Type: module.Line,
+ Priority: prioClientsRequests,
+ Dims: module.Dims{
+ {ID: "client_http.requests", Name: "requests", Algo: module.Incremental},
+ {ID: "client_http.hits", Name: "hits", Algo: module.Incremental},
+ {ID: "client_http.errors", Name: "errors", Algo: module.Incremental, Mul: -1},
+ },
+ }
+
+ serversNetChart = module.Chart{
+ ID: "servers_net",
+ Title: "Squid Server Bandwidth",
+ Units: "kilobits/s",
+ Fam: "servers",
+ Ctx: "squid.servers_net",
+ Type: module.Area,
+ Priority: prioServersNet,
+ Dims: module.Dims{
+ {ID: "server.all.kbytes_in", Name: "in", Algo: module.Incremental, Mul: 8},
+ {ID: "server.all.kbytes_out", Name: "out", Algo: module.Incremental, Mul: -8},
+ },
+ }
+
+ serversRequestsChart = module.Chart{
+ ID: "servers_requests",
+ Title: "Squid Server Requests",
+ Units: "requests/s",
+ Fam: "servers",
+ Ctx: "squid.servers_requests",
+ Type: module.Line,
+ Priority: prioServersRequests,
+ Dims: module.Dims{
+ {ID: "server.all.requests", Name: "requests", Algo: module.Incremental},
+ {ID: "server.all.errors", Name: "errors", Algo: module.Incremental, Mul: -1},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/squid/collect.go b/src/go/plugin/go.d/modules/squid/collect.go
new file mode 100644
index 000000000..bb0cf1ab4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/collect.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squid
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ // https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager
+ urlPathServerStats = "/squid-internal-mgr/counters"
+)
+
+var statsCounters = map[string]bool{
+ "client_http.kbytes_in": true,
+ "client_http.kbytes_out": true,
+ "server.all.errors": true,
+ "server.all.requests": true,
+ "server.all.kbytes_out": true,
+ "server.all.kbytes_in": true,
+ "client_http.errors": true,
+ "client_http.hits": true,
+ "client_http.requests": true,
+ "client_http.hit_kbytes_out": true,
+}
+
+func (s *Squid) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := s.collectCounters(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (s *Squid) collectCounters(mx map[string]int64) error {
+ req, err := web.NewHTTPRequestWithPath(s.Request, urlPathServerStats)
+ if err != nil {
+ return err
+ }
+
+ if err := s.doOK(req, func(body io.Reader) error {
+ sc := bufio.NewScanner(body)
+
+ for sc.Scan() {
+ key, value, ok := strings.Cut(sc.Text(), "=")
+ if !ok {
+ continue
+ }
+
+ key, value = strings.TrimSpace(key), strings.TrimSpace(value)
+
+ if !statsCounters[key] {
+ continue
+ }
+
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ s.Debugf("failed to parse key %s value %s: %v", key, value, err)
+ continue
+ }
+
+ mx[key] = v
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ if len(mx) == 0 {
+ return fmt.Errorf("unexpected response from '%s': no metrics found", req.URL)
+ }
+
+ return nil
+}
+
+func (s *Squid) doOK(req *http.Request, parse func(body io.Reader) error) error {
+ resp, err := s.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ return parse(resp.Body)
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/squid/config_schema.json b/src/go/plugin/go.d/modules/squid/config_schema.json
new file mode 100644
index 000000000..b1264b2b1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/config_schema.json
@@ -0,0 +1,177 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Squid collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL where the Squid endpoint can be accessed.",
+ "type": "string",
+ "default": "http://127.0.0.1:1328",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/squid/integrations/squid.md b/src/go/plugin/go.d/modules/squid/integrations/squid.md
new file mode 100644
index 000000000..1a448de35
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/integrations/squid.md
@@ -0,0 +1,227 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/squid/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/squid/metadata.yaml"
+sidebar_label: "Squid"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Squid
+
+
+<img src="https://netdata.cloud/img/squid.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: squid
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.
+
+
+It collects metrics from the `squid-internal-mgr/counters` endpoint.
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Squid instances running on localhost that are listening on port 3128.
+On startup, it tries to collect metrics from:
+
+- https://127.0.0.1:3128
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Squid instance
+
+These metrics refer to each monitored Squid instance.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| squid.clients_net | in, out, hits | kilobits/s |
+| squid.clients_requests | requests, hits, errors | requests/s |
+| squid.servers_net | in, out | kilobits/s |
+| squid.servers_requests | requests, errors | requests/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/squid.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/squid.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:3128 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | POST | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:3128
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:3128
+
+ - name: remote
+ url: http://192.0.2.1:3128
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `squid` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m squid
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `squid` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep squid
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep squid /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep squid
+```
+
+
diff --git a/src/go/plugin/go.d/modules/squid/metadata.yaml b/src/go/plugin/go.d/modules/squid/metadata.yaml
new file mode 100644
index 000000000..fbe0202ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/metadata.yaml
@@ -0,0 +1,195 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-squid
+ plugin_name: go.d.plugin
+ module_name: squid
+ monitored_instance:
+ name: Squid
+ link: "https://www.squid-cache.org/"
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: "squid.png"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - squid
+ - web delivery
+ - squid caching proxy
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.
+ method_description: "It collects metrics from the `squid-internal-mgr/counters` endpoint."
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Squid instances running on localhost that are listening on port 3128.
+ On startup, it tries to collect metrics from:
+
+ - https://127.0.0.1:3128
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "go.d/squid.conf"
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:3128
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: POST
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: Config
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:3128
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:3128
+
+ - name: remote
+ url: http://192.0.2.1:3128
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: Squid instance
+ description: "These metrics refer to each monitored Squid instance."
+ labels: []
+ metrics:
+ - name: squid.clients_net
+ description: Squid Client Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: hits
+ - name: squid.clients_requests
+ description: Squid Client Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: hits
+ - name: errors
+ - name: squid.servers_net
+ description: Squid Server Bandwidth
+ unit: "kilobits/s"
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: squid.servers_requests
+ description: Squid Server Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: errors
diff --git a/src/go/plugin/go.d/modules/squid/squid.go b/src/go/plugin/go.d/modules/squid/squid.go
new file mode 100644
index 000000000..fe9c15ecb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/squid.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squid
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("squid", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Squid {
+ return &Squid{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:3128",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 1),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Squid struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (s *Squid) Configuration() any {
+ return s.Config
+}
+
+func (s *Squid) Init() error {
+ if s.URL == "" {
+ s.Error("URL not set")
+ return errors.New("url not set")
+ }
+
+ client, err := web.NewHTTPClient(s.Client)
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+ s.httpClient = client
+
+ s.Debugf("using URL %s", s.URL)
+ s.Debugf("using timeout: %s", s.Timeout)
+
+ return nil
+}
+
+func (s *Squid) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (s *Squid) Charts() *module.Charts {
+ return s.charts
+}
+
+func (s *Squid) Collect() map[string]int64 {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (s *Squid) Cleanup() {
+ if s.httpClient != nil {
+ s.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/squid/squid_test.go b/src/go/plugin/go.d/modules/squid/squid_test.go
new file mode 100644
index 000000000..c0856f89d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/squid_test.go
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squid
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+ dataCounters, _ = os.ReadFile("testdata/counters.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataCounters": dataCounters,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSquid_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Squid{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestSquid_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ squid := New()
+ squid.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, squid.Init())
+ } else {
+ assert.NoError(t, squid.Init())
+ }
+ })
+ }
+}
+
+func TestSquid_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestSquid_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (*Squid, func())
+ }{
+ "success case": {
+ wantFail: false,
+ prepare: prepareCaseSuccess,
+ },
+ "fails on unexpected response": {
+ wantFail: true,
+ prepare: prepareCaseUnexpectedResponse,
+ },
+ "fails on empty response": {
+ wantFail: true,
+ prepare: prepareCaseEmptyResponse,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ squid, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, squid.Check())
+ } else {
+ assert.NoError(t, squid.Check())
+ }
+ })
+ }
+}
+
+func TestSquid_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (*Squid, func())
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success case": {
+ prepare: prepareCaseSuccess,
+ wantCharts: len(charts),
+ wantMetrics: map[string]int64{
+ "client_http.errors": 5,
+ "client_http.hit_kbytes_out": 11,
+ "client_http.hits": 1,
+ "client_http.kbytes_in": 566,
+ "client_http.kbytes_out": 16081,
+ "client_http.requests": 9019,
+ "server.all.errors": 0,
+ "server.all.kbytes_in": 0,
+ "server.all.kbytes_out": 0,
+ "server.all.requests": 0,
+ },
+ },
+ "fails on unexpected response": {
+ prepare: prepareCaseUnexpectedResponse,
+ },
+ "fails on empty response": {
+ prepare: prepareCaseEmptyResponse,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ squid, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := squid.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ assert.Equal(t, test.wantCharts, len(*squid.Charts()))
+ module.TestMetricsHasAllChartsDims(t, squid.Charts(), mx)
+ }
+ })
+ }
+}
+
+func prepareCaseSuccess(t *testing.T) (*Squid, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathServerStats:
+ _, _ = w.Write(dataCounters)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ squid := New()
+ squid.URL = srv.URL
+ require.NoError(t, squid.Init())
+
+ return squid, srv.Close
+}
+
+func prepareCaseUnexpectedResponse(t *testing.T) (*Squid, func()) {
+ t.Helper()
+ resp := []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.`)
+
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(resp))
+ }))
+
+ squid := New()
+ squid.URL = srv.URL
+ require.NoError(t, squid.Init())
+
+ return squid, srv.Close
+}
+
+func prepareCaseEmptyResponse(t *testing.T) (*Squid, func()) {
+ t.Helper()
+ resp := []byte(``)
+
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(resp))
+ }))
+
+ squid := New()
+ squid.URL = srv.URL
+ require.NoError(t, squid.Init())
+
+ return squid, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*Squid, func()) {
+ t.Helper()
+ squid := New()
+ squid.URL = "http://127.0.0.1:65001"
+ require.NoError(t, squid.Init())
+
+ return squid, func() {}
+}
diff --git a/src/go/plugin/go.d/modules/squid/testdata/config.json b/src/go/plugin/go.d/modules/squid/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/squid/testdata/config.yaml b/src/go/plugin/go.d/modules/squid/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/squid/testdata/counters.txt b/src/go/plugin/go.d/modules/squid/testdata/counters.txt
new file mode 100644
index 000000000..250a003d3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squid/testdata/counters.txt
@@ -0,0 +1,59 @@
+sample_time = 1723030944.784818 (Wed, 07 Aug 2024 11:42:24 GMT)
+client_http.requests = 9019
+client_http.hits = 1
+client_http.errors = 5
+client_http.kbytes_in = 566
+client_http.kbytes_out = 16081
+client_http.hit_kbytes_out = 11
+server.all.requests = 0
+server.all.errors = 0
+server.all.kbytes_in = 0
+server.all.kbytes_out = 0
+server.http.requests = 0
+server.http.errors = 0
+server.http.kbytes_in = 0
+server.http.kbytes_out = 0
+server.ftp.requests = 0
+server.ftp.errors = 0
+server.ftp.kbytes_in = 0
+server.ftp.kbytes_out = 0
+server.other.requests = 0
+server.other.errors = 0
+server.other.kbytes_in = 0
+server.other.kbytes_out = 0
+icp.pkts_sent = 0
+icp.pkts_recv = 0
+icp.queries_sent = 0
+icp.replies_sent = 0
+icp.queries_recv = 0
+icp.replies_recv = 0
+icp.query_timeouts = 0
+icp.replies_queued = 0
+icp.kbytes_sent = 0
+icp.kbytes_recv = 0
+icp.q_kbytes_sent = 0
+icp.r_kbytes_sent = 0
+icp.q_kbytes_recv = 0
+icp.r_kbytes_recv = 0
+icp.times_used = 0
+cd.times_used = 0
+cd.msgs_sent = 0
+cd.msgs_recv = 0
+cd.memory = 0
+cd.local_memory = 0
+cd.kbytes_sent = 0
+cd.kbytes_recv = 0
+unlink.requests = 0
+page_faults = 874
+select_loops = 91146
+cpu_time = 8.501572
+wall_time = 13.524214
+swap.outs = 0
+swap.ins = 0
+swap.files_cleaned = 0
+aborted_requests = 0
+hit_validation.attempts = 0
+hit_validation.refusals.due_to_locking = 0
+hit_validation.refusals.due_to_zeroSize = 0
+hit_validation.refusals.due_to_timeLimit = 0
+hit_validation.failures = 0
diff --git a/src/go/plugin/go.d/modules/squidlog/README.md b/src/go/plugin/go.d/modules/squidlog/README.md
new file mode 120000
index 000000000..876d4b47a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/README.md
@@ -0,0 +1 @@
+integrations/squid_log_files.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/squidlog/charts.go b/src/go/plugin/go.d/modules/squidlog/charts.go
new file mode 100644
index 000000000..92875eaf9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/charts.go
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squidlog
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+type (
+ Charts = module.Charts
+ Chart = module.Chart
+ Dims = module.Dims
+ Dim = module.Dim
+)
+
+const (
+ prioReqTotal = module.Priority + iota
+ prioReqExcluded
+ prioReqType
+
+ prioHTTPRespCodesClass
+ prioHTTPRespCodes
+
+ prioUniqClients
+
+ prioBandwidth
+
+ prioRespTime
+
+ prioCacheCode
+ prioCacheTransportTag
+ prioCacheHandlingTag
+ prioCacheObjectTag
+ prioCacheLoadSourceTag
+ prioCacheErrorTag
+
+ prioReqMethod
+
+ prioHierCode
+ prioServers
+
+ prioMimeType
+)
+
+var (
+ // Requests
+ reqTotalChart = Chart{
+ ID: "requests",
+ Title: "Total Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "squidlog.requests",
+ Priority: prioReqTotal,
+ Dims: Dims{
+ {ID: "requests", Algo: module.Incremental},
+ },
+ }
+ reqExcludedChart = Chart{
+ ID: "excluded_requests",
+ Title: "Excluded Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "squidlog.excluded_requests",
+ Priority: prioReqExcluded,
+ Dims: Dims{
+ {ID: "unmatched", Algo: module.Incremental},
+ },
+ }
+ reqTypesChart = Chart{
+ ID: "requests_by_type",
+ Title: "Requests By Type",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "squidlog.type_requests",
+ Type: module.Stacked,
+ Priority: prioReqType,
+ Dims: Dims{
+ {ID: "req_type_success", Name: "success", Algo: module.Incremental},
+ {ID: "req_type_bad", Name: "bad", Algo: module.Incremental},
+ {ID: "req_type_redirect", Name: "redirect", Algo: module.Incremental},
+ {ID: "req_type_error", Name: "error", Algo: module.Incremental},
+ },
+ }
+
+ // HTTP Code
+ httpRespCodeClassChart = Chart{
+ ID: "responses_by_http_status_code_class",
+ Title: "Responses By HTTP Status Code Class",
+ Units: "responses/s",
+ Fam: "http code",
+ Ctx: "squidlog.http_status_code_class_responses",
+ Type: module.Stacked,
+ Priority: prioHTTPRespCodesClass,
+ Dims: Dims{
+ {ID: "http_resp_2xx", Name: "2xx", Algo: module.Incremental},
+ {ID: "http_resp_5xx", Name: "5xx", Algo: module.Incremental},
+ {ID: "http_resp_3xx", Name: "3xx", Algo: module.Incremental},
+ {ID: "http_resp_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: "http_resp_1xx", Name: "1xx", Algo: module.Incremental},
+ {ID: "http_resp_0xx", Name: "0xx", Algo: module.Incremental},
+ {ID: "http_resp_6xx", Name: "6xx", Algo: module.Incremental},
+ },
+ }
+ httpRespCodesChart = Chart{
+ ID: "responses_by_http_status_code",
+ Title: "Responses By HTTP Status Code",
+ Units: "responses/s",
+ Fam: "http code",
+ Ctx: "squidlog.http_status_code_responses",
+ Type: module.Stacked,
+ Priority: prioHTTPRespCodes,
+ }
+
+ // Bandwidth
+ bandwidthChart = Chart{
+ ID: "bandwidth",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "bandwidth",
+ Ctx: "squidlog.bandwidth",
+ Priority: prioBandwidth,
+ Dims: Dims{
+ {ID: "bytes_sent", Name: "sent", Algo: module.Incremental, Div: 1000},
+ },
+ }
+
+ // Response Time
+ respTimeChart = Chart{
+ ID: "response_time",
+ Title: "Response Time",
+ Units: "milliseconds",
+ Fam: "timings",
+ Ctx: "squidlog.response_time",
+ Priority: prioRespTime,
+ Dims: Dims{
+ {ID: "resp_time_min", Name: "min", Div: 1000},
+ {ID: "resp_time_max", Name: "max", Div: 1000},
+ {ID: "resp_time_avg", Name: "avg", Div: 1000},
+ },
+ }
+
+ // Clients
+ uniqClientsChart = Chart{
+ ID: "uniq_clients",
+ Title: "Unique Clients",
+ Units: "clients/s",
+ Fam: "clients",
+ Ctx: "squidlog.uniq_clients",
+ Priority: prioUniqClients,
+ Dims: Dims{
+ {ID: "uniq_clients", Name: "clients"},
+ },
+ }
+
+ // Cache Code Result
+ cacheCodeChart = Chart{
+ ID: "requests_by_cache_result_code",
+ Title: "Requests By Cache Result Code",
+ Units: "requests/s",
+ Fam: "cache result",
+ Ctx: "squidlog.cache_result_code_requests",
+ Priority: prioCacheCode,
+ Type: module.Stacked,
+ }
+ cacheCodeTransportTagChart = Chart{
+ ID: "requests_by_cache_result_code_transport_tag",
+ Title: "Requests By Cache Result Delivery Transport Tag",
+ Units: "requests/s",
+ Fam: "cache result",
+ Ctx: "squidlog.cache_result_code_transport_tag_requests",
+ Type: module.Stacked,
+ Priority: prioCacheTransportTag,
+ }
+ cacheCodeHandlingTagChart = Chart{
+ ID: "requests_by_cache_result_code_handling_tag",
+ Title: "Requests By Cache Result Handling Tag",
+ Units: "requests/s",
+ Fam: "cache result",
+ Ctx: "squidlog.cache_result_code_handling_tag_requests",
+ Type: module.Stacked,
+ Priority: prioCacheHandlingTag,
+ }
+ cacheCodeObjectTagChart = Chart{
+ ID: "requests_by_cache_code_object_tag",
+ Title: "Requests By Cache Result Produced Object Tag",
+ Units: "requests/s",
+ Fam: "cache result",
+ Ctx: "squidlog.cache_code_object_tag_requests",
+ Type: module.Stacked,
+ Priority: prioCacheObjectTag,
+ }
+ cacheCodeLoadSourceTagChart = Chart{
+ ID: "requests_by_cache_code_load_source_tag",
+ Title: "Requests By Cache Result Load Source Tag",
+ Units: "requests/s",
+ Fam: "cache result",
+ Ctx: "squidlog.cache_code_load_source_tag_requests",
+ Type: module.Stacked,
+ Priority: prioCacheLoadSourceTag,
+ }
+ cacheCodeErrorTagChart = Chart{
+ ID: "requests_by_cache_code_error_tag",
+ Title: "Requests By Cache Result Errors Tag",
+ Units: "requests/s",
+ Fam: "cache result",
+ Ctx: "squidlog.cache_code_error_tag_requests",
+ Type: module.Stacked,
+ Priority: prioCacheErrorTag,
+ }
+
+ // HTTP Method
+ reqMethodChart = Chart{
+ ID: "requests_by_http_method",
+ Title: "Requests By HTTP Method",
+ Units: "requests/s",
+ Fam: "http method",
+ Ctx: "squidlog.http_method_requests",
+ Type: module.Stacked,
+ Priority: prioReqMethod,
+ }
+
+ // MIME Type
+ mimeTypeChart = Chart{
+ ID: "requests_by_mime_type",
+ Title: "Requests By MIME Type",
+ Units: "requests/s",
+ Fam: "mime type",
+ Ctx: "squidlog.mime_type_requests",
+ Type: module.Stacked,
+ Priority: prioMimeType,
+ }
+
+ // Hierarchy
+ hierCodeChart = Chart{
+ ID: "requests_by_hier_code",
+ Title: "Requests By Hierarchy Code",
+ Units: "requests/s",
+ Fam: "hierarchy",
+ Ctx: "squidlog.hier_code_requests",
+ Type: module.Stacked,
+ Priority: prioHierCode,
+ }
+ serverAddrChart = Chart{
+ ID: "forwarded_requests_by_server_address",
+ Title: "Forwarded Requests By Server Address",
+ Units: "requests/s",
+ Fam: "hierarchy",
+ Ctx: "squidlog.server_address_forwarded_requests",
+ Type: module.Stacked,
+ Priority: prioServers,
+ }
+)
+
+func (s *SquidLog) createCharts(line *logLine) error {
+ if line.empty() {
+ return errors.New("empty line")
+ }
+ charts := &Charts{
+ reqTotalChart.Copy(),
+ reqExcludedChart.Copy(),
+ }
+ if line.hasRespTime() {
+ if err := addRespTimeCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasClientAddress() {
+ if err := addClientAddressCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasCacheCode() {
+ if err := addCacheCodeCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasHTTPCode() {
+ if err := addHTTPRespCodeCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasRespSize() {
+ if err := addRespSizeCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasReqMethod() {
+ if err := addMethodCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasHierCode() {
+ if err := addHierCodeCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasServerAddress() {
+ if err := addServerAddressCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasMimeType() {
+ if err := addMimeTypeCharts(charts); err != nil {
+ return err
+ }
+ }
+ s.charts = charts
+ return nil
+}
+
+func addRespTimeCharts(charts *Charts) error {
+ return charts.Add(respTimeChart.Copy())
+}
+
+func addClientAddressCharts(charts *Charts) error {
+ return charts.Add(uniqClientsChart.Copy())
+}
+
+func addCacheCodeCharts(charts *Charts) error {
+ cs := []Chart{
+ cacheCodeChart,
+ cacheCodeTransportTagChart,
+ cacheCodeHandlingTagChart,
+ cacheCodeObjectTagChart,
+ cacheCodeLoadSourceTagChart,
+ cacheCodeErrorTagChart,
+ }
+ for _, chart := range cs {
+ if err := charts.Add(chart.Copy()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+func addHTTPRespCodeCharts(charts *Charts) error {
+ cs := []Chart{
+ reqTypesChart,
+ httpRespCodeClassChart,
+ httpRespCodesChart,
+ }
+ for _, chart := range cs {
+ if err := charts.Add(chart.Copy()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addRespSizeCharts(charts *Charts) error {
+ return charts.Add(bandwidthChart.Copy())
+}
+
+func addMethodCharts(charts *Charts) error {
+ return charts.Add(reqMethodChart.Copy())
+}
+
+func addHierCodeCharts(charts *Charts) error {
+ return charts.Add(hierCodeChart.Copy())
+}
+func addServerAddressCharts(charts *Charts) error {
+ return charts.Add(serverAddrChart.Copy())
+}
+
+func addMimeTypeCharts(charts *Charts) error {
+ return charts.Add(mimeTypeChart.Copy())
+}
diff --git a/src/go/plugin/go.d/modules/squidlog/collect.go b/src/go/plugin/go.d/modules/squidlog/collect.go
new file mode 100644
index 000000000..ee548b5be
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/collect.go
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squidlog
+
+import (
+ "io"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (s *SquidLog) logPanicStackIfAny() {
+ err := recover()
+ if err == nil {
+ return
+ }
+ s.Errorf("[ERROR] %s\n", err)
+ for depth := 0; ; depth++ {
+ _, file, line, ok := runtime.Caller(depth)
+ if !ok {
+ break
+ }
+ s.Errorf("======> %d: %v:%d", depth, file, line)
+ }
+ panic(err)
+}
+
+func (s *SquidLog) collect() (map[string]int64, error) {
+ defer s.logPanicStackIfAny()
+ s.mx.reset()
+
+ var mx map[string]int64
+
+ n, err := s.collectLogLines()
+
+ if n > 0 || err == nil {
+ mx = stm.ToMap(s.mx)
+ }
+ return mx, err
+}
+
+func (s *SquidLog) collectLogLines() (int, error) {
+ var n int
+ for {
+ s.line.reset()
+ err := s.parser.ReadLine(s.line)
+ if err != nil {
+ if err == io.EOF {
+ return n, nil
+ }
+ if !logs.IsParseError(err) {
+ return n, err
+ }
+ n++
+ s.collectUnmatched()
+ continue
+ }
+ n++
+ if s.line.empty() {
+ s.collectUnmatched()
+ } else {
+ s.collectLogLine()
+ }
+ }
+}
+
+func (s *SquidLog) collectLogLine() {
+ s.mx.Requests.Inc()
+ s.collectRespTime()
+ s.collectClientAddress()
+ s.collectCacheCode()
+ s.collectHTTPCode()
+ s.collectRespSize()
+ s.collectReqMethod()
+ s.collectHierCode()
+ s.collectServerAddress()
+ s.collectMimeType()
+}
+
+func (s *SquidLog) collectUnmatched() {
+ s.mx.Requests.Inc()
+ s.mx.Unmatched.Inc()
+}
+
+func (s *SquidLog) collectRespTime() {
+ if !s.line.hasRespTime() {
+ return
+ }
+ s.mx.RespTime.Observe(float64(s.line.respTime))
+}
+
+func (s *SquidLog) collectClientAddress() {
+ if !s.line.hasClientAddress() {
+ return
+ }
+ s.mx.UniqueClients.Insert(s.line.clientAddr)
+}
+
+func (s *SquidLog) collectCacheCode() {
+ if !s.line.hasCacheCode() {
+ return
+ }
+
+ c, ok := s.mx.CacheCode.GetP(s.line.cacheCode)
+ if !ok {
+ s.addDimToCacheCodeChart(s.line.cacheCode)
+ }
+ c.Inc()
+
+ tags := strings.Split(s.line.cacheCode, "_")
+ for _, tag := range tags {
+ s.collectCacheCodeTag(tag)
+ }
+}
+
+func (s *SquidLog) collectHTTPCode() {
+ if !s.line.hasHTTPCode() {
+ return
+ }
+
+ code := s.line.httpCode
+ switch {
+ case code >= 100 && code < 300, code == 0, code == 304, code == 401:
+ s.mx.ReqSuccess.Inc()
+ case code >= 300 && code < 400:
+ s.mx.ReqRedirect.Inc()
+ case code >= 400 && code < 500:
+ s.mx.ReqBad.Inc()
+ case code >= 500 && code <= 603:
+ s.mx.ReqError.Inc()
+ }
+
+ switch code / 100 {
+ case 0:
+ s.mx.HTTPResp0xx.Inc()
+ case 1:
+ s.mx.HTTPResp1xx.Inc()
+ case 2:
+ s.mx.HTTPResp2xx.Inc()
+ case 3:
+ s.mx.HTTPResp3xx.Inc()
+ case 4:
+ s.mx.HTTPResp4xx.Inc()
+ case 5:
+ s.mx.HTTPResp5xx.Inc()
+ case 6:
+ s.mx.HTTPResp6xx.Inc()
+ }
+
+ codeStr := strconv.Itoa(code)
+ c, ok := s.mx.HTTPRespCode.GetP(codeStr)
+ if !ok {
+ s.addDimToHTTPRespCodesChart(codeStr)
+ }
+ c.Inc()
+}
+
+func (s *SquidLog) collectRespSize() {
+ if !s.line.hasRespSize() {
+ return
+ }
+ s.mx.BytesSent.Add(float64(s.line.respSize))
+}
+
+func (s *SquidLog) collectReqMethod() {
+ if !s.line.hasReqMethod() {
+ return
+ }
+ c, ok := s.mx.ReqMethod.GetP(s.line.reqMethod)
+ if !ok {
+ s.addDimToReqMethodChart(s.line.reqMethod)
+ }
+ c.Inc()
+}
+
+func (s *SquidLog) collectHierCode() {
+ if !s.line.hasHierCode() {
+ return
+ }
+ c, ok := s.mx.HierCode.GetP(s.line.hierCode)
+ if !ok {
+ s.addDimToHierCodeChart(s.line.hierCode)
+ }
+ c.Inc()
+}
+
+func (s *SquidLog) collectServerAddress() {
+ if !s.line.hasServerAddress() {
+ return
+ }
+ c, ok := s.mx.Server.GetP(s.line.serverAddr)
+ if !ok {
+ s.addDimToServerAddressChart(s.line.serverAddr)
+ }
+ c.Inc()
+}
+
+func (s *SquidLog) collectMimeType() {
+ if !s.line.hasMimeType() {
+ return
+ }
+ c, ok := s.mx.MimeType.GetP(s.line.mimeType)
+ if !ok {
+ s.addDimToMimeTypeChart(s.line.mimeType)
+ }
+ c.Inc()
+}
+
+func (s *SquidLog) collectCacheCodeTag(tag string) {
+ // https://wiki.squid-cache.org/SquidFaq/SquidLogs#Squid_result_codes
+ switch tag {
+ default:
+ case "TCP", "UDP", "NONE":
+ c, ok := s.mx.CacheCodeTransportTag.GetP(tag)
+ if !ok {
+ s.addDimToCacheCodeTransportTagChart(tag)
+ }
+ c.Inc()
+ case "CF", "CLIENT", "IMS", "ASYNC", "SWAPFAIL", "REFRESH", "SHARED", "REPLY":
+ c, ok := s.mx.CacheCodeHandlingTag.GetP(tag)
+ if !ok {
+ s.addDimToCacheCodeHandlingTagChart(tag)
+ }
+ c.Inc()
+ case "NEGATIVE", "STALE", "OFFLINE", "INVALID", "FAIL", "MODIFIED", "UNMODIFIED", "REDIRECT":
+ c, ok := s.mx.CacheCodeObjectTag.GetP(tag)
+ if !ok {
+ s.addDimToCacheCodeObjectTagChart(tag)
+ }
+ c.Inc()
+ case "HIT", "MEM", "MISS", "DENIED", "NOFETCH", "TUNNEL":
+ c, ok := s.mx.CacheCodeLoadSourceTag.GetP(tag)
+ if !ok {
+ s.addDimToCacheCodeLoadSourceTagChart(tag)
+ }
+ c.Inc()
+ case "ABORTED", "TIMEOUT", "IGNORED":
+ c, ok := s.mx.CacheCodeErrorTag.GetP(tag)
+ if !ok {
+ s.addDimToCacheCodeErrorTagChart(tag)
+ }
+ c.Inc()
+ }
+}
+
+func (s *SquidLog) addDimToCacheCodeChart(code string) {
+ chartID := cacheCodeChart.ID
+ dimID := pxCacheCode + code
+ s.addDimToChart(chartID, dimID, code)
+}
+
+func (s *SquidLog) addDimToCacheCodeTransportTagChart(tag string) {
+ chartID := cacheCodeTransportTagChart.ID
+ dimID := pxTransportTag + tag
+ s.addDimToChart(chartID, dimID, tag)
+}
+
+func (s *SquidLog) addDimToCacheCodeHandlingTagChart(tag string) {
+ chartID := cacheCodeHandlingTagChart.ID
+ dimID := pxHandlingTag + tag
+ s.addDimToChart(chartID, dimID, tag)
+}
+
+func (s *SquidLog) addDimToCacheCodeObjectTagChart(tag string) {
+ chartID := cacheCodeObjectTagChart.ID
+ dimID := pxObjectTag + tag
+ s.addDimToChart(chartID, dimID, tag)
+}
+
+func (s *SquidLog) addDimToCacheCodeLoadSourceTagChart(tag string) {
+ chartID := cacheCodeLoadSourceTagChart.ID
+ dimID := pxSourceTag + tag
+ s.addDimToChart(chartID, dimID, tag)
+}
+
+func (s *SquidLog) addDimToCacheCodeErrorTagChart(tag string) {
+ chartID := cacheCodeErrorTagChart.ID
+ dimID := pxErrorTag + tag
+ s.addDimToChart(chartID, dimID, tag)
+}
+
+func (s *SquidLog) addDimToHTTPRespCodesChart(tag string) {
+ chartID := httpRespCodesChart.ID
+ dimID := pxHTTPCode + tag
+ s.addDimToChart(chartID, dimID, tag)
+}
+
+func (s *SquidLog) addDimToReqMethodChart(method string) {
+ chartID := reqMethodChart.ID
+ dimID := pxReqMethod + method
+ s.addDimToChart(chartID, dimID, method)
+}
+
+func (s *SquidLog) addDimToHierCodeChart(code string) {
+ chartID := hierCodeChart.ID
+ dimID := pxHierCode + code
+ dimName := code[5:] // remove "HIER_"
+ s.addDimToChart(chartID, dimID, dimName)
+}
+
+func (s *SquidLog) addDimToServerAddressChart(address string) {
+ chartID := serverAddrChart.ID
+ dimID := pxSrvAddr + address
+ s.addDimToChartOrCreateIfNotExist(chartID, dimID, address)
+}
+
+func (s *SquidLog) addDimToMimeTypeChart(mimeType string) {
+ chartID := mimeTypeChart.ID
+ dimID := pxMimeType + mimeType
+ s.addDimToChartOrCreateIfNotExist(chartID, dimID, mimeType)
+}
+
+func (s *SquidLog) addDimToChart(chartID, dimID, dimName string) {
+ chart := s.Charts().Get(chartID)
+ if chart == nil {
+ s.Warningf("add '%s' dim: couldn't find '%s' chart in charts", dimID, chartID)
+ return
+ }
+
+ dim := &Dim{ID: dimID, Name: dimName, Algo: module.Incremental}
+
+ if err := chart.AddDim(dim); err != nil {
+ s.Warningf("add '%s' dim: %v", dimID, err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (s *SquidLog) addDimToChartOrCreateIfNotExist(chartID, dimID, dimName string) {
+ if s.Charts().Has(chartID) {
+ s.addDimToChart(chartID, dimID, dimName)
+ return
+ }
+
+ chart := newChartByID(chartID)
+ if chart == nil {
+ s.Warningf("add '%s' dim: couldn't create '%s' chart", dimID, chartID)
+ return
+ }
+ if err := s.Charts().Add(chart); err != nil {
+ s.Warning(err)
+ return
+ }
+ s.addDimToChart(chartID, dimID, dimName)
+}
+
+func newChartByID(chartID string) *Chart {
+ switch chartID {
+ case serverAddrChart.ID:
+ return serverAddrChart.Copy()
+ case mimeTypeChart.ID:
+ return mimeTypeChart.Copy()
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/squidlog/config_schema.json b/src/go/plugin/go.d/modules/squidlog/config_schema.json
new file mode 100644
index 000000000..47e55b09b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/config_schema.json
@@ -0,0 +1,217 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "path": {
+ "title": "Log file",
+ "description": "The file path to the Squid server log file.",
+ "type": "string",
+ "default": "/var/log/squid/access.log",
+ "pattern": "^$|^/"
+ },
+ "exclude_path": {
+ "title": "Exclude path",
+ "description": "Pattern to exclude log files.",
+ "type": "string",
+ "default": "*.gz"
+ },
+ "log_type": {
+ "title": "Log parser",
+ "description": "Type of parser to use for parsing the Squid server log file.",
+ "type": "string",
+ "enum": [
+ "csv",
+ "regexp",
+ "json",
+ "ltsv"
+ ],
+ "default": "csv"
+ }
+ },
+ "required": [
+ "path",
+ "log_type"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ },
+ "dependencies": {
+ "log_type": {
+ "oneOf": [
+ {
+ "properties": {
+ "log_type": {
+ "const": "csv"
+ },
+ "csv_config": {
+ "title": "CSV parser configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "format": {
+ "title": "Format",
+ "description": "Log format.",
+ "type": "string",
+ "default": "$remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent"
+ },
+ "delimiter": {
+ "title": "Delimiter",
+ "description": "Delimiter used to separate fields in the log file. Default: space (' ').",
+ "type": "string",
+ "default": " "
+ }
+ },
+ "required": [
+ "format",
+ "delimiter"
+ ]
+ }
+ },
+ "required": [
+ "csv_config"
+ ]
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "regexp"
+ },
+ "regexp_config": {
+ "title": "Regular expression parser configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "pattern": {
+ "title": "Pattern with named groups",
+ "description": "Regular expression pattern with named groups. Use named groups for known fields.",
+ "type": "string",
+ "default": ""
+ }
+ },
+ "required": [
+ "pattern"
+ ]
+ }
+ },
+ "required": [
+ "regexp_config"
+ ]
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "json"
+ },
+ "json_config": {
+ "title": "JSON parser configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "mapping": {
+ "title": "Field mapping",
+ "description": "Dictionary mapping fields in logs to known fields.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "ltsv"
+ },
+ "ltsv_config": {
+ "title": "LTSV parser configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "field_delimiter": {
+ "title": "Field delimiter",
+ "description": "Delimiter used to separate fields in LTSV logs. Default: tab ('\\t').",
+ "type": "string",
+ "default": "\t"
+ },
+ "value_delimiter": {
+ "title": "Value delimiter",
+ "description": "Delimiter used to separate label-value pairs in LTSV logs.",
+ "type": "string",
+ "default": ":"
+ },
+ "mapping": {
+ "title": "Field mapping",
+ "description": "Dictionary mapping fields in logs to known fields.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "log_type": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "path",
+ "exclude_path"
+ ]
+ },
+ {
+ "title": "Parser",
+ "fields": [
+ "log_type",
+ "csv_config",
+ "ltsv_config",
+ "regexp_config",
+ "json_config"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/squidlog/init.go b/src/go/plugin/go.d/modules/squidlog/init.go
new file mode 100644
index 000000000..fd3a76c9a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/init.go
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squidlog
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+)
+
+func (s *SquidLog) createLogReader() error {
+ s.Cleanup()
+ s.Debug("starting log reader creating")
+
+ reader, err := logs.Open(s.Path, s.ExcludePath, s.Logger)
+ if err != nil {
+ return fmt.Errorf("creating log reader: %v", err)
+ }
+
+ s.Debugf("created log reader, current file '%s'", reader.CurrentFilename())
+ s.file = reader
+ return nil
+}
+
+func (s *SquidLog) createParser() error {
+ s.Debug("starting parser creating")
+
+ const readLastLinesNum = 100
+
+ lines, err := logs.ReadLastLines(s.file.CurrentFilename(), readLastLinesNum)
+ if err != nil {
+ return fmt.Errorf("failed to read last lines: %v", err)
+ }
+
+ var found bool
+ for _, line := range lines {
+ if line = strings.TrimSpace(line); line == "" {
+ continue
+ }
+
+ s.Debugf("last line: '%s'", line)
+
+ s.parser, err = logs.NewParser(s.ParserConfig, s.file)
+ if err != nil {
+ s.Debugf("failed to create parser from line: %v", err)
+ continue
+ }
+
+ s.line.reset()
+
+ if err = s.parser.Parse([]byte(line), s.line); err != nil {
+ s.Debugf("failed to parse line: %v", err)
+ continue
+ }
+
+ if err = s.line.verify(); err != nil {
+ s.Debugf("failed to verify line: %v", err)
+ continue
+ }
+
+ found = true
+ break
+ }
+
+ if !found {
+ return fmt.Errorf("failed to create log parser (file '%s')", s.file.CurrentFilename())
+ }
+
+ return nil
+}
+
+func checkCSVFormatField(name string) (newName string, offset int, valid bool) {
+ name = cleanField(name)
+ if !knownField(name) {
+ return "", 0, false
+ }
+ return name, 0, true
+}
+
+func cleanField(name string) string {
+ return strings.TrimLeft(name, "$%")
+}
+
+func knownField(name string) bool {
+ switch name {
+ case fieldRespTime, fieldClientAddr, fieldCacheCode, fieldHTTPCode, fieldRespSize, fieldReqMethod:
+ fallthrough
+ case fieldHierCode, fieldServerAddr, fieldMimeType, fieldResultCode, fieldHierarchy:
+ return true
+ }
+ return false
+}
diff --git a/src/go/plugin/go.d/modules/squidlog/integrations/squid_log_files.md b/src/go/plugin/go.d/modules/squidlog/integrations/squid_log_files.md
new file mode 100644
index 000000000..7d1e4799e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/integrations/squid_log_files.md
@@ -0,0 +1,284 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/squidlog/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/squidlog/metadata.yaml"
+sidebar_label: "Squid log files"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Squid log files
+
+
+<img src="https://netdata.cloud/img/squid.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: squidlog
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+his collector monitors Squid servers by parsing their access log files.
+
+
+It automatically detects log files of Squid severs running on localhost.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Squid log files instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| squidlog.requests | requests | requests/s |
+| squidlog.excluded_requests | unmatched | requests/s |
+| squidlog.type_requests | success, bad, redirect, error | requests/s |
+| squidlog.http_status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |
+| squidlog.http_status_code_responses | a dimension per HTTP response code | responses/s |
+| squidlog.bandwidth | sent | kilobits/s |
+| squidlog.response_time | min, max, avg | milliseconds |
+| squidlog.uniq_clients | clients | clients |
+| squidlog.cache_result_code_requests | a dimension per cache result code | requests/s |
+| squidlog.cache_result_code_transport_tag_requests | a dimension per cache result delivery transport tag | requests/s |
+| squidlog.cache_result_code_handling_tag_requests | a dimension per cache result handling tag | requests/s |
+| squidlog.cache_code_object_tag_requests | a dimension per cache result produced object tag | requests/s |
+| squidlog.cache_code_load_source_tag_requests | a dimension per cache result load source tag | requests/s |
+| squidlog.cache_code_error_tag_requests | a dimension per cache result error tag | requests/s |
+| squidlog.http_method_requests | a dimension per HTTP method | requests/s |
+| squidlog.mime_type_requests | a dimension per MIME type | requests/s |
+| squidlog.hier_code_requests | a dimension per hierarchy code | requests/s |
+| squidlog.server_address_forwarded_requests | a dimension per server address | requests/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/squidlog.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/squidlog.conf
+```
+#### Options
+
+Squid [log format codes](http://www.squid-cache.org/Doc/config/logformat/).
+
+Squidlog is aware how to parse and interpret the following codes:
+
+| field | squid format code | description |
+|----------------|-------------------|---------------------------------------------------------------|
+| resp_time | %tr | Response time (milliseconds). |
+| client_address | %>a | Client source IP address. |
+| client_address | %>A | Client FQDN. |
+| cache_code | %Ss | Squid request status (TCP_MISS etc). |
+| http_code | %>Hs | The HTTP response status code from Content Gateway to client. |
+| resp_size | %<st | Total size of reply sent to client (after adaptation). |
+| req_method | %rm | Request method (GET/POST etc). |
+| hier_code | %Sh | Squid hierarchy status (DEFAULT_PARENT etc). |
+| server_address | %<a | Server IP address of the last server or peer connection. |
+| server_address | %<A | Server FQDN or peer name. |
+| mime_type | %mt | MIME content type. |
+
+In addition, to make `Squid` [native log format](https://wiki.squid-cache.org/Features/LogFormat#Squid_native_access.log_format_in_detail) csv parsable, squidlog understands these groups of codes:
+
+| field | squid format code | description |
+|-------------|-------------------|------------------------------------|
+| result_code | %Ss/%>Hs | Cache code and http code. |
+| hierarchy | %Sh/%<a | Hierarchy code and server address. |
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| path | Path to the Squid access log file. | /var/log/squid/access.log | yes |
+| exclude_path | Path to exclude. | *.gz | no |
+| parser | Log parser configuration. | | no |
+| parser.log_type | Log parser type. | auto | no |
+| parser.csv_config | CSV log parser config. | | no |
+| parser.csv_config.delimiter | CSV field delimiter. | space | no |
+| parser.csv_config.format | CSV log format. | - $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type | yes |
+| parser.ltsv_config | LTSV log parser config. | | no |
+| parser.ltsv_config.field_delimiter | LTSV field delimiter. | \t | no |
+| parser.ltsv_config.value_delimiter | LTSV value delimiter. | : | no |
+| parser.ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |
+| parser.regexp_config | RegExp log parser config. | | no |
+| parser.regexp_config.pattern | RegExp pattern with named groups. | | yes |
+
+##### parser.log_type
+
+Weblog supports 3 different log parsers:
+
+| Parser type | Description |
+|-------------|-------------------------------------------|
+| csv | A comma-separated values |
+| ltsv | [LTSV](http://ltsv.org/) |
+| regexp | Regular expression with named groups |
+
+Syntax:
+
+```yaml
+parser:
+ log_type: csv
+```
+
+
+##### parser.csv_config.format
+
+
+
+##### parser.ltsv_config.mapping
+
+The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.
+
+> **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+```yaml
+parser:
+ log_type: ltsv
+ ltsv_config:
+ mapping:
+ label1: field1
+ label2: field2
+```
+
+
+##### parser.regexp_config.pattern
+
+Use pattern with subexpressions names. These names should be **known fields**.
+
+> **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+Syntax:
+
+```yaml
+parser:
+ log_type: regexp
+ regexp_config:
+ pattern: PATTERN
+```
+
+
+</details>
+
+#### Examples
+There are no configuration examples.
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `squidlog` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m squidlog
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `squidlog` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep squidlog
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep squidlog /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep squidlog
+```
+
+
diff --git a/src/go/plugin/go.d/modules/squidlog/logline.go b/src/go/plugin/go.d/modules/squidlog/logline.go
new file mode 100644
index 000000000..47a8bf8f9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/logline.go
@@ -0,0 +1,413 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squidlog
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// https://wiki.squid-cache.org/Features/LogFormat
+// http://www.squid-cache.org/Doc/config/logformat/
+// https://wiki.squid-cache.org/SquidFaq/SquidLogs#Squid_result_codes
+// https://www.websense.com/content/support/library/web/v773/wcg_help/squid.aspx
+
+/*
+4.6.1:
+logformat squid %ts.%03tu %6tr %>a %Ss/%03>Hs %<st %rm %ru %[un %Sh/%<a %mt
+logformat common %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st %Ss:%Sh
+logformat combined %>a %[ui %[un [%tl] "%rm %ru HTTP/%rv" %>Hs %<st "%{Referer}>h" "%{User-Agent}>h" %Ss:%Sh
+logformat referrer %ts.%03tu %>a %{Referer}>h %ru
+logformat useragent %>a [%tl] "%{User-Agent}>h"
+logformat icap_squid %ts.%03tu %6icap::tr %>A %icap::to/%03icap::Hs %icap::<st %icap::rm %icap::ru %un -/%icap::<A -
+*/
+
+/*
+Valid Capture Name: [A-Za-z0-9_]+
+// TODO: namings
+
+| local | squid format code | description |
+|-------------------------|-------------------|------------------------------------------------------------------------|
+| resp_time | %tr | Response time (milliseconds).
+| client_address | %>a | Client source IP address.
+| client_address | %>A | Client FQDN.
+| cache_code | %Ss | Squid request status (TCP_MISS etc).
+| http_code | %>Hs | The HTTP response status code from Content Gateway to client.
+| resp_size | %<st | Total size of reply sent to client (after adaptation).
+| req_method | %rm | Request method (GET/POST etc).
+| hier_code | %Sh | Squid hierarchy status (DEFAULT_PARENT etc).
+| server_address | %<a | Server IP address of the last server or peer connection.
+| server_address | %<A | Server FQDN or peer name.
+| mime_type | %mt | MIME content type.
+
+// Following needed to make default log format csv parsable
+| result_code | %Ss/%03>Hs | cache code and http code.
+| hierarchy | %Sh/%<a | hierarchy code and server address.
+
+Notes:
+- %<a: older versions of Squid would put the origin server hostname here.
+*/
+
+var (
+ errEmptyLine = errors.New("empty line")
+ errBadRespTime = errors.New("bad response time")
+ errBadClientAddr = errors.New("bad client address")
+ errBadCacheCode = errors.New("bad cache code")
+ errBadHTTPCode = errors.New("bad http code")
+ errBadRespSize = errors.New("bad response size")
+ errBadReqMethod = errors.New("bad request method")
+ errBadHierCode = errors.New("bad hier code")
+ errBadServerAddr = errors.New("bad server address")
+ errBadMimeType = errors.New("bad mime type")
+ errBadResultCode = errors.New("bad result code")
+ errBadHierarchy = errors.New("bad hierarchy")
+)
+
+func newEmptyLogLine() *logLine {
+ var l logLine
+ l.reset()
+ return &l
+}
+
+type (
+ logLine struct {
+ clientAddr string
+ serverAddr string
+
+ respTime int
+ respSize int
+ httpCode int
+
+ reqMethod string
+ mimeType string
+
+ cacheCode string
+ hierCode string
+ }
+)
+
+const (
+ fieldRespTime = "resp_time"
+ fieldClientAddr = "client_address"
+ fieldCacheCode = "cache_code"
+ fieldHTTPCode = "http_code"
+ fieldRespSize = "resp_size"
+ fieldReqMethod = "req_method"
+ fieldHierCode = "hier_code"
+ fieldServerAddr = "server_address"
+ fieldMimeType = "mime_type"
+ fieldResultCode = "result_code"
+ fieldHierarchy = "hierarchy"
+)
+
+func (l *logLine) Assign(field string, value string) (err error) {
+ if value == "" {
+ return
+ }
+
+ switch field {
+ case fieldRespTime:
+ err = l.assignRespTime(value)
+ case fieldClientAddr:
+ err = l.assignClientAddress(value)
+ case fieldCacheCode:
+ err = l.assignCacheCode(value)
+ case fieldHTTPCode:
+ err = l.assignHTTPCode(value)
+ case fieldRespSize:
+ err = l.assignRespSize(value)
+ case fieldReqMethod:
+ err = l.assignReqMethod(value)
+ case fieldHierCode:
+ err = l.assignHierCode(value)
+ case fieldMimeType:
+ err = l.assignMimeType(value)
+ case fieldServerAddr:
+ err = l.assignServerAddress(value)
+ case fieldResultCode:
+ err = l.assignResultCode(value)
+ case fieldHierarchy:
+ err = l.assignHierarchy(value)
+ }
+ return err
+}
+
+const hyphen = "-"
+
+func (l *logLine) assignRespTime(time string) error {
+ if time == hyphen {
+ return fmt.Errorf("assign '%s': %w", time, errBadRespTime)
+ }
+ v, err := strconv.Atoi(time)
+ if err != nil || !isRespTimeValid(v) {
+ return fmt.Errorf("assign '%s': %w", time, errBadRespTime)
+ }
+ l.respTime = v
+ return nil
+}
+
+func (l *logLine) assignClientAddress(address string) error {
+ if address == hyphen {
+ return fmt.Errorf("assign '%s': %w", address, errBadClientAddr)
+ }
+ l.clientAddr = address
+ return nil
+}
+
+func (l *logLine) assignCacheCode(code string) error {
+ if code == hyphen || !isCacheCodeValid(code) {
+ return fmt.Errorf("assign '%s': %w", code, errBadCacheCode)
+ }
+ l.cacheCode = code
+ return nil
+}
+
+func (l *logLine) assignHTTPCode(code string) error {
+ if code == hyphen {
+ return fmt.Errorf("assign '%s': %w", code, errBadHTTPCode)
+ }
+ v, err := strconv.Atoi(code)
+ if err != nil || !isHTTPCodeValid(v) {
+ return fmt.Errorf("assign '%s': %w", code, errBadHTTPCode)
+ }
+ l.httpCode = v
+ return nil
+}
+
+func (l *logLine) assignResultCode(code string) error {
+ i := strings.IndexByte(code, '/')
+ if i <= 0 {
+ return fmt.Errorf("assign '%s': %w", code, errBadResultCode)
+ }
+ if err := l.assignCacheCode(code[:i]); err != nil {
+ return err
+ }
+ return l.assignHTTPCode(code[i+1:])
+}
+
+func (l *logLine) assignRespSize(size string) error {
+ if size == hyphen {
+ return fmt.Errorf("assign '%s': %w", size, errBadRespSize)
+ }
+ v, err := strconv.Atoi(size)
+ if err != nil || !isRespSizeValid(v) {
+ return fmt.Errorf("assign '%s': %w", size, errBadRespSize)
+ }
+ l.respSize = v
+ return nil
+}
+
+func (l *logLine) assignReqMethod(method string) error {
+ if method == hyphen || !isReqMethodValid(method) {
+ return fmt.Errorf("assign '%s': %w", method, errBadReqMethod)
+ }
+ l.reqMethod = method
+ return nil
+}
+
+func (l *logLine) assignHierCode(code string) error {
+ if code == hyphen || !isHierCodeValid(code) {
+ return fmt.Errorf("assign '%s': %w", code, errBadHierCode)
+ }
+ l.hierCode = code
+ return nil
+}
+
+func (l *logLine) assignServerAddress(address string) error {
+ // Logged as "-" if there is no hierarchy information.
+ // For TCP HIT, TCP failures, cachemgr requests and all UDP requests, there is no hierarchy information.
+ if address == hyphen {
+ return nil
+ }
+ l.serverAddr = address
+ return nil
+}
+
+func (l *logLine) assignHierarchy(hierarchy string) error {
+ i := strings.IndexByte(hierarchy, '/')
+ if i <= 0 {
+ return fmt.Errorf("assign '%s': %w", hierarchy, errBadHierarchy)
+ }
+ if err := l.assignHierCode(hierarchy[:i]); err != nil {
+ return err
+ }
+ return l.assignServerAddress(hierarchy[i+1:])
+}
+
+func (l *logLine) assignMimeType(mime string) error {
+ // ICP exchanges usually don't have any content type, and thus are logged "-".
+ //Also, some weird replies have content types ":" or even empty ones.
+ if mime == hyphen || mime == ":" {
+ return nil
+ }
+ // format: type/subtype, type/subtype;parameter=value
+ i := strings.IndexByte(mime, '/')
+ if i <= 0 {
+ return fmt.Errorf("assign '%s': %w", mime, errBadMimeType)
+ }
+
+ if !isMimeTypeValid(mime[:i]) {
+ return nil
+ }
+
+ l.mimeType = mime[:i] // drop subtype
+
+ return nil
+}
+
+func (l logLine) verify() error {
+ if l.empty() {
+ return fmt.Errorf("verify: %w", errEmptyLine)
+ }
+ if l.hasRespTime() && !l.isRespTimeValid() {
+ return fmt.Errorf("verify '%d': %w", l.respTime, errBadRespTime)
+ }
+ if l.hasClientAddress() && !l.isClientAddressValid() {
+ return fmt.Errorf("verify '%s': %w", l.clientAddr, errBadClientAddr)
+ }
+ if l.hasCacheCode() && !l.isCacheCodeValid() {
+ return fmt.Errorf("verify '%s': %w", l.cacheCode, errBadCacheCode)
+ }
+ if l.hasHTTPCode() && !l.isHTTPCodeValid() {
+ return fmt.Errorf("verify '%d': %w", l.httpCode, errBadHTTPCode)
+ }
+ if l.hasRespSize() && !l.isRespSizeValid() {
+ return fmt.Errorf("verify '%d': %w", l.respSize, errBadRespSize)
+ }
+ if l.hasReqMethod() && !l.isReqMethodValid() {
+ return fmt.Errorf("verify '%s': %w", l.reqMethod, errBadReqMethod)
+ }
+ if l.hasHierCode() && !l.isHierCodeValid() {
+ return fmt.Errorf("verify '%s': %w", l.hierCode, errBadHierCode)
+ }
+ if l.hasServerAddress() && !l.isServerAddressValid() {
+ return fmt.Errorf("verify '%s': %w", l.serverAddr, errBadServerAddr)
+ }
+ if l.hasMimeType() && !l.isMimeTypeValid() {
+ return fmt.Errorf("verify '%s': %w", l.mimeType, errBadMimeType)
+ }
+ return nil
+}
+
+func (l logLine) empty() bool { return l == emptyLogLine }
+func (l logLine) hasRespTime() bool { return !isEmptyNumber(l.respTime) }
+func (l logLine) hasClientAddress() bool { return !isEmptyString(l.clientAddr) }
+func (l logLine) hasCacheCode() bool { return !isEmptyString(l.cacheCode) }
+func (l logLine) hasHTTPCode() bool { return !isEmptyNumber(l.httpCode) }
+func (l logLine) hasRespSize() bool { return !isEmptyNumber(l.respSize) }
+func (l logLine) hasReqMethod() bool { return !isEmptyString(l.reqMethod) }
+func (l logLine) hasHierCode() bool { return !isEmptyString(l.hierCode) }
+func (l logLine) hasServerAddress() bool { return !isEmptyString(l.serverAddr) }
+func (l logLine) hasMimeType() bool { return !isEmptyString(l.mimeType) }
+func (l logLine) isRespTimeValid() bool { return isRespTimeValid(l.respTime) }
+func (l logLine) isClientAddressValid() bool { return reAddress.MatchString(l.clientAddr) }
+func (l logLine) isCacheCodeValid() bool { return isCacheCodeValid(l.cacheCode) }
+func (l logLine) isHTTPCodeValid() bool { return isHTTPCodeValid(l.httpCode) }
+func (l logLine) isRespSizeValid() bool { return isRespSizeValid(l.respSize) }
+func (l logLine) isReqMethodValid() bool { return isReqMethodValid(l.reqMethod) }
+func (l logLine) isHierCodeValid() bool { return isHierCodeValid(l.hierCode) }
+func (l logLine) isServerAddressValid() bool { return reAddress.MatchString(l.serverAddr) }
+func (l logLine) isMimeTypeValid() bool { return isMimeTypeValid(l.mimeType) }
+
+func (l *logLine) reset() {
+ l.respTime = emptyNumber
+ l.clientAddr = emptyString
+ l.cacheCode = emptyString
+ l.httpCode = emptyNumber
+ l.respSize = emptyNumber
+ l.reqMethod = emptyString
+ l.hierCode = emptyString
+ l.serverAddr = emptyString
+ l.mimeType = emptyString
+}
+
+var emptyLogLine = *newEmptyLogLine()
+
+const (
+ emptyString = "__empty_string__"
+ emptyNumber = -9999
+)
+
+var (
+ // IPv4, IPv6, FQDN.
+ reAddress = regexp.MustCompile(`^(?:(?:[0-9]{1,3}\.){3}[0-9]{1,3}|[a-f0-9:]{3,}|[a-zA-Z0-9-.]{3,})$`)
+)
+
+func isEmptyString(s string) bool {
+ return s == emptyString || s == ""
+}
+
+func isEmptyNumber(n int) bool {
+ return n == emptyNumber
+}
+
+func isRespTimeValid(time int) bool {
+ return time >= 0
+}
+
+// isCacheCodeValid does not guarantee cache result code is valid, but it is very likely.
+func isCacheCodeValid(code string) bool {
+ // https://wiki.squid-cache.org/SquidFaq/SquidLogs#Squid_result_codes
+ if code == "NONE" || code == "NONE_NONE" {
+ return true
+ }
+ return len(code) > 5 && (code[:4] == "TCP_" || code[:4] == "UDP_")
+}
+
+func isHTTPCodeValid(code int) bool {
+ // https://wiki.squid-cache.org/SquidFaq/SquidLogs#HTTP_status_codes
+ return code == 0 || code >= 100 && code <= 603
+}
+
+func isRespSizeValid(size int) bool {
+ return size >= 0
+}
+
+func isReqMethodValid(method string) bool {
+ // https://wiki.squid-cache.org/SquidFaq/SquidLogs#Request_methods
+ switch method {
+ case "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ "PATCH",
+ "DELETE",
+ "CONNECT",
+ "OPTIONS",
+ "TRACE",
+ "ICP_QUERY",
+ "PURGE",
+ "PROPFIND",
+ "PROPATCH",
+ "MKCOL",
+ "COPY",
+ "MOVE",
+ "LOCK",
+ "UNLOCK",
+ "NONE":
+ return true
+ }
+ return false
+}
+
+// isHierCodeValid does not guarantee hierarchy code is valid, but it is very likely.
+func isHierCodeValid(code string) bool {
+ // https://wiki.squid-cache.org/SquidFaq/SquidLogs#Hierarchy_Codes
+ return len(code) > 6 && code[:5] == "HIER_"
+}
+
+// isMimeTypeValid expects only mime type part.
+func isMimeTypeValid(mimeType string) bool {
+ // https://www.iana.org/assignments/media-types/media-types.xhtml
+ if mimeType == "text" {
+ return true
+ }
+ switch mimeType {
+ case "application", "audio", "font", "image", "message", "model", "multipart", "video":
+ return true
+ }
+ return false
+}
diff --git a/src/go/plugin/go.d/modules/squidlog/logline_test.go b/src/go/plugin/go.d/modules/squidlog/logline_test.go
new file mode 100644
index 000000000..cb3f399fe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/logline_test.go
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squidlog
+
+import (
+ "errors"
+ "fmt"
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const emptyStr = ""
+
+func TestLogLine_Assign(t *testing.T) {
+ type subTest struct {
+ input string
+ wantLine logLine
+ wantErr error
+ }
+ type test struct {
+ name string
+ field string
+ cases []subTest
+ }
+ tests := []test{
+ {
+ name: "Response Time",
+ field: fieldRespTime,
+ cases: []subTest{
+ {input: "0", wantLine: logLine{respTime: 0}},
+ {input: "1000", wantLine: logLine{respTime: 1000}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine, wantErr: errBadRespTime},
+ {input: "-1", wantLine: emptyLogLine, wantErr: errBadRespTime},
+ {input: "0.000", wantLine: emptyLogLine, wantErr: errBadRespTime},
+ },
+ },
+ {
+ name: "Client Address",
+ field: fieldClientAddr,
+ cases: []subTest{
+ {input: "127.0.0.1", wantLine: logLine{clientAddr: "127.0.0.1"}},
+ {input: "::1", wantLine: logLine{clientAddr: "::1"}},
+ {input: "kadr20.m1.netdata.lan", wantLine: logLine{clientAddr: "kadr20.m1.netdata.lan"}},
+ {input: "±!@#$%^&*()", wantLine: logLine{clientAddr: "±!@#$%^&*()"}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine, wantErr: errBadClientAddr},
+ },
+ },
+ {
+ name: "Cache Code",
+ field: fieldCacheCode,
+ cases: []subTest{
+ {input: "TCP_MISS", wantLine: logLine{cacheCode: "TCP_MISS"}},
+ {input: "TCP_DENIED", wantLine: logLine{cacheCode: "TCP_DENIED"}},
+ {input: "TCP_CLIENT_REFRESH_MISS", wantLine: logLine{cacheCode: "TCP_CLIENT_REFRESH_MISS"}},
+ {input: "UDP_MISS_NOFETCH", wantLine: logLine{cacheCode: "UDP_MISS_NOFETCH"}},
+ {input: "UDP_INVALID", wantLine: logLine{cacheCode: "UDP_INVALID"}},
+ {input: "NONE", wantLine: logLine{cacheCode: "NONE"}},
+ {input: "NONE_NONE", wantLine: logLine{cacheCode: "NONE_NONE"}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine, wantErr: errBadCacheCode},
+ {input: "TCP", wantLine: emptyLogLine, wantErr: errBadCacheCode},
+ {input: "UDP_", wantLine: emptyLogLine, wantErr: errBadCacheCode},
+ {input: "NONE_MISS", wantLine: emptyLogLine, wantErr: errBadCacheCode},
+ },
+ },
+ {
+ name: "HTTP Code",
+ field: fieldHTTPCode,
+ cases: []subTest{
+ {input: "000", wantLine: logLine{httpCode: 0}},
+ {input: "100", wantLine: logLine{httpCode: 100}},
+ {input: "200", wantLine: logLine{httpCode: 200}},
+ {input: "300", wantLine: logLine{httpCode: 300}},
+ {input: "400", wantLine: logLine{httpCode: 400}},
+ {input: "500", wantLine: logLine{httpCode: 500}},
+ {input: "603", wantLine: logLine{httpCode: 603}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine, wantErr: errBadHTTPCode},
+ {input: "1", wantLine: emptyLogLine, wantErr: errBadHTTPCode},
+ {input: "604", wantLine: emptyLogLine, wantErr: errBadHTTPCode},
+ {input: "1000", wantLine: emptyLogLine, wantErr: errBadHTTPCode},
+ {input: "TCP_MISS", wantLine: emptyLogLine, wantErr: errBadHTTPCode},
+ },
+ },
+ {
+ name: "Response Size",
+ field: fieldRespSize,
+ cases: []subTest{
+ {input: "0", wantLine: logLine{respSize: 0}},
+ {input: "1000", wantLine: logLine{respSize: 1000}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine, wantErr: errBadRespSize},
+ {input: "-1", wantLine: emptyLogLine, wantErr: errBadRespSize},
+ {input: "0.000", wantLine: emptyLogLine, wantErr: errBadRespSize},
+ },
+ },
+ {
+ name: "Request Method",
+ field: fieldReqMethod,
+ cases: []subTest{
+ {input: "GET", wantLine: logLine{reqMethod: "GET"}},
+ {input: "HEAD", wantLine: logLine{reqMethod: "HEAD"}},
+ {input: "POST", wantLine: logLine{reqMethod: "POST"}},
+ {input: "PUT", wantLine: logLine{reqMethod: "PUT"}},
+ {input: "PATCH", wantLine: logLine{reqMethod: "PATCH"}},
+ {input: "DELETE", wantLine: logLine{reqMethod: "DELETE"}},
+ {input: "CONNECT", wantLine: logLine{reqMethod: "CONNECT"}},
+ {input: "OPTIONS", wantLine: logLine{reqMethod: "OPTIONS"}},
+ {input: "TRACE", wantLine: logLine{reqMethod: "TRACE"}},
+ {input: "ICP_QUERY", wantLine: logLine{reqMethod: "ICP_QUERY"}},
+ {input: "PURGE", wantLine: logLine{reqMethod: "PURGE"}},
+ {input: "PROPFIND", wantLine: logLine{reqMethod: "PROPFIND"}},
+ {input: "PROPATCH", wantLine: logLine{reqMethod: "PROPATCH"}},
+ {input: "MKCOL", wantLine: logLine{reqMethod: "MKCOL"}},
+ {input: "COPY", wantLine: logLine{reqMethod: "COPY"}},
+ {input: "MOVE", wantLine: logLine{reqMethod: "MOVE"}},
+ {input: "LOCK", wantLine: logLine{reqMethod: "LOCK"}},
+ {input: "UNLOCK", wantLine: logLine{reqMethod: "UNLOCK"}},
+ {input: "NONE", wantLine: logLine{reqMethod: "NONE"}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine, wantErr: errBadReqMethod},
+ {input: "get", wantLine: emptyLogLine, wantErr: errBadReqMethod},
+ {input: "0.000", wantLine: emptyLogLine, wantErr: errBadReqMethod},
+ {input: "TCP_MISS", wantLine: emptyLogLine, wantErr: errBadReqMethod},
+ },
+ },
+ {
+ name: "Hier Code",
+ field: fieldHierCode,
+ cases: []subTest{
+ {input: "HIER_NONE", wantLine: logLine{hierCode: "HIER_NONE"}},
+ {input: "HIER_SIBLING_HIT", wantLine: logLine{hierCode: "HIER_SIBLING_HIT"}},
+ {input: "HIER_NO_CACHE_DIGEST_DIRECT", wantLine: logLine{hierCode: "HIER_NO_CACHE_DIGEST_DIRECT"}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine, wantErr: errBadHierCode},
+ {input: "0.000", wantLine: emptyLogLine, wantErr: errBadHierCode},
+ {input: "TCP_MISS", wantLine: emptyLogLine, wantErr: errBadHierCode},
+ {input: "HIER", wantLine: emptyLogLine, wantErr: errBadHierCode},
+ {input: "HIER_", wantLine: emptyLogLine, wantErr: errBadHierCode},
+ {input: "NONE", wantLine: emptyLogLine, wantErr: errBadHierCode},
+ {input: "SIBLING_HIT", wantLine: emptyLogLine, wantErr: errBadHierCode},
+ {input: "NO_CACHE_DIGEST_DIRECT", wantLine: emptyLogLine, wantErr: errBadHierCode},
+ },
+ },
+ {
+ name: "Server Address",
+ field: fieldServerAddr,
+ cases: []subTest{
+ {input: "127.0.0.1", wantLine: logLine{serverAddr: "127.0.0.1"}},
+ {input: "::1", wantLine: logLine{serverAddr: "::1"}},
+ {input: "kadr20.m1.netdata.lan", wantLine: logLine{serverAddr: "kadr20.m1.netdata.lan"}},
+ {input: "±!@#$%^&*()", wantLine: logLine{serverAddr: "±!@#$%^&*()"}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ },
+ },
+ {
+ name: "Mime Type",
+ field: fieldMimeType,
+ cases: []subTest{
+ {input: "application/zstd", wantLine: logLine{mimeType: "application"}},
+ {input: "audio/3gpp2", wantLine: logLine{mimeType: "audio"}},
+ {input: "font/otf", wantLine: logLine{mimeType: "font"}},
+ {input: "image/tiff", wantLine: logLine{mimeType: "image"}},
+ {input: "message/global", wantLine: logLine{mimeType: "message"}},
+ {input: "model/example", wantLine: logLine{mimeType: "model"}},
+ {input: "multipart/encrypted", wantLine: logLine{mimeType: "multipart"}},
+ {input: "text/html", wantLine: logLine{mimeType: "text"}},
+ {input: "video/3gpp", wantLine: logLine{mimeType: "video"}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "example/example", wantLine: emptyLogLine},
+ {input: "unknown/example", wantLine: emptyLogLine},
+ {input: "audio", wantLine: emptyLogLine, wantErr: errBadMimeType},
+ {input: "/", wantLine: emptyLogLine, wantErr: errBadMimeType},
+ },
+ },
+ {
+ name: "Result Code",
+ field: fieldResultCode,
+ cases: []subTest{
+ {input: "TCP_MISS/000", wantLine: logLine{cacheCode: "TCP_MISS", httpCode: 0}},
+ {input: "TCP_DENIED/603", wantLine: logLine{cacheCode: "TCP_DENIED", httpCode: 603}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine, wantErr: errBadResultCode},
+ {input: "TCP_MISS:000", wantLine: emptyLogLine, wantErr: errBadResultCode},
+ {input: "TCP_MISS 000", wantLine: emptyLogLine, wantErr: errBadResultCode},
+ {input: "/", wantLine: emptyLogLine, wantErr: errBadResultCode},
+ {input: "tcp/000", wantLine: emptyLogLine, wantErr: errBadCacheCode},
+ {input: "TCP_MISS/", wantLine: logLine{cacheCode: "TCP_MISS", httpCode: emptyNumber}, wantErr: errBadHTTPCode},
+ },
+ },
+ {
+ name: "Hierarchy",
+ field: fieldHierarchy,
+ cases: []subTest{
+ {input: "HIER_NONE/-", wantLine: logLine{hierCode: "HIER_NONE", serverAddr: emptyString}},
+ {input: "HIER_SIBLING_HIT/127.0.0.1", wantLine: logLine{hierCode: "HIER_SIBLING_HIT", serverAddr: "127.0.0.1"}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine, wantErr: errBadHierarchy},
+ {input: "HIER_NONE:-", wantLine: emptyLogLine, wantErr: errBadHierarchy},
+ {input: "HIER_SIBLING_HIT 127.0.0.1", wantLine: emptyLogLine, wantErr: errBadHierarchy},
+ {input: "/", wantLine: emptyLogLine, wantErr: errBadHierarchy},
+ {input: "HIER/-", wantLine: emptyLogLine, wantErr: errBadHierCode},
+ {input: "HIER_NONE/", wantLine: logLine{hierCode: "HIER_NONE", serverAddr: emptyStr}},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ for i, tc := range tt.cases {
+ name := fmt.Sprintf("[%s:%d]field='%s'|input='%s'", tt.name, i+1, tt.field, tc.input)
+ t.Run(name, func(t *testing.T) {
+
+ line := newEmptyLogLine()
+ err := line.Assign(tt.field, tc.input)
+
+ if tc.wantErr != nil {
+ require.Error(t, err)
+ assert.Truef(t, errors.Is(err, tc.wantErr), "expected '%v' error, got '%v'", tc.wantErr, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ expected := prepareAssignLogLine(t, tt.field, tc.wantLine)
+ assert.Equal(t, expected, *line)
+ })
+ }
+ }
+}
+
+func TestLogLine_verify(t *testing.T) {
+ type subTest struct {
+ input string
+ wantErr error
+ }
+ type test = struct {
+ name string
+ field string
+ cases []subTest
+ }
+ tests := []test{
+ {
+ name: "Response Time",
+ field: fieldRespTime,
+ cases: []subTest{
+ {input: "0"},
+ {input: "1000"},
+ {input: "-1", wantErr: errBadRespTime},
+ },
+ },
+ {
+ name: "Client Address",
+ field: fieldClientAddr,
+ cases: []subTest{
+ {input: "127.0.0.1"},
+ {input: "::1"},
+ {input: "kadr20.m1.netdata.lan"},
+ {input: emptyStr},
+ {input: "±!@#$%^&*()", wantErr: errBadClientAddr},
+ },
+ },
+ {
+ name: "Cache Code",
+ field: fieldCacheCode,
+ cases: []subTest{
+ {input: "TCP_MISS"},
+ {input: "TCP_DENIED"},
+ {input: "TCP_CLIENT_REFRESH_MISS"},
+ {input: "UDP_MISS_NOFETCH"},
+ {input: "UDP_INVALID"},
+ {input: "NONE"},
+ {input: "NONE_NONE"},
+ {input: emptyStr},
+ {input: "TCP", wantErr: errBadCacheCode},
+ {input: "UDP", wantErr: errBadCacheCode},
+ {input: "NONE_MISS", wantErr: errBadCacheCode},
+ },
+ },
+ {
+ name: "HTTP Code",
+ field: fieldHTTPCode,
+ cases: []subTest{
+ {input: "000"},
+ {input: "100"},
+ {input: "200"},
+ {input: "300"},
+ {input: "400"},
+ {input: "500"},
+ {input: "603"},
+ {input: "1", wantErr: errBadHTTPCode},
+ {input: "604", wantErr: errBadHTTPCode},
+ },
+ },
+ {
+ name: "Response Size",
+ field: fieldRespSize,
+ cases: []subTest{
+ {input: "0"},
+ {input: "1000"},
+ {input: "-1", wantErr: errBadRespSize},
+ },
+ },
+ {
+ name: "Request Method",
+ field: fieldReqMethod,
+ cases: []subTest{
+ {input: "GET"},
+ {input: "HEAD"},
+ {input: "POST"},
+ {input: "PUT"},
+ {input: "PATCH"},
+ {input: "DELETE"},
+ {input: "CONNECT"},
+ {input: "OPTIONS"},
+ {input: "TRACE"},
+ {input: "ICP_QUERY"},
+ {input: "PURGE"},
+ {input: "PROPFIND"},
+ {input: "PROPATCH"},
+ {input: "MKCOL"},
+ {input: "COPY"},
+ {input: "MOVE"},
+ {input: "LOCK"},
+ {input: "UNLOCK"},
+ {input: "NONE"},
+ {input: emptyStr},
+ {input: "get", wantErr: errBadReqMethod},
+ {input: "TCP_MISS", wantErr: errBadReqMethod},
+ },
+ },
+ {
+ name: "Hier Code",
+ field: fieldHierCode,
+ cases: []subTest{
+ {input: "HIER_NONE"},
+ {input: "HIER_SIBLING_HIT"},
+ {input: "HIER_NO_CACHE_DIGEST_DIRECT"},
+ {input: emptyStr},
+ {input: "0.000", wantErr: errBadHierCode},
+ {input: "TCP_MISS", wantErr: errBadHierCode},
+ {input: "HIER", wantErr: errBadHierCode},
+ {input: "HIER_", wantErr: errBadHierCode},
+ {input: "NONE", wantErr: errBadHierCode},
+ {input: "SIBLING_HIT", wantErr: errBadHierCode},
+ {input: "NO_CACHE_DIGEST_DIRECT", wantErr: errBadHierCode},
+ },
+ },
+ {
+ name: "Server Address",
+ field: fieldServerAddr,
+ cases: []subTest{
+ {input: "127.0.0.1"},
+ {input: "::1"},
+ {input: "kadr20.m1.netdata.lan"},
+ {input: emptyStr},
+ {input: "±!@#$%^&*()", wantErr: errBadServerAddr},
+ },
+ },
+ {
+ name: "Mime Type",
+ field: fieldMimeType,
+ cases: []subTest{
+ {input: "application"},
+ {input: "audio"},
+ {input: "font"},
+ {input: "image"},
+ {input: "message"},
+ {input: "model"},
+ {input: "multipart"},
+ {input: "text"},
+ {input: "video"},
+ {input: emptyStr},
+ {input: "example/example", wantErr: errBadMimeType},
+ {input: "unknown", wantErr: errBadMimeType},
+ {input: "/", wantErr: errBadMimeType},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ for i, tc := range tt.cases {
+ name := fmt.Sprintf("[%s:%d]field='%s'|input='%s'", tt.name, i+1, tt.field, tc.input)
+ t.Run(name, func(t *testing.T) {
+ line := prepareVerifyLogLine(t, tt.field, tc.input)
+
+ err := line.verify()
+
+ if tc.wantErr != nil {
+ require.Error(t, err)
+ assert.Truef(t, errors.Is(err, tc.wantErr), "expected '%v' error, got '%v'", tc.wantErr, err)
+ } else {
+ require.NoError(t, err)
+ }
+ })
+ }
+ }
+}
+
+func prepareAssignLogLine(t *testing.T, field string, template logLine) logLine {
+ t.Helper()
+ if template.empty() {
+ return template
+ }
+
+ var line logLine
+ line.reset()
+
+ switch field {
+ default:
+ t.Errorf("prepareAssignLogLine unknown field: '%s'", field)
+ case fieldRespTime:
+ line.respTime = template.respTime
+ case fieldClientAddr:
+ line.clientAddr = template.clientAddr
+ case fieldCacheCode:
+ line.cacheCode = template.cacheCode
+ case fieldHTTPCode:
+ line.httpCode = template.httpCode
+ case fieldRespSize:
+ line.respSize = template.respSize
+ case fieldReqMethod:
+ line.reqMethod = template.reqMethod
+ case fieldHierCode:
+ line.hierCode = template.hierCode
+ case fieldMimeType:
+ line.mimeType = template.mimeType
+ case fieldServerAddr:
+ line.serverAddr = template.serverAddr
+ case fieldResultCode:
+ line.cacheCode = template.cacheCode
+ line.httpCode = template.httpCode
+ case fieldHierarchy:
+ line.hierCode = template.hierCode
+ line.serverAddr = template.serverAddr
+ }
+ return line
+}
+
+func prepareVerifyLogLine(t *testing.T, field string, value string) logLine {
+ t.Helper()
+ var line logLine
+ line.reset()
+
+ switch field {
+ default:
+ t.Errorf("prepareVerifyLogLine unknown field: '%s'", field)
+ case fieldRespTime:
+ v, err := strconv.Atoi(value)
+ require.NoError(t, err)
+ line.respTime = v
+ case fieldClientAddr:
+ line.clientAddr = value
+ case fieldCacheCode:
+ line.cacheCode = value
+ case fieldHTTPCode:
+ v, err := strconv.Atoi(value)
+ require.NoError(t, err)
+ line.httpCode = v
+ case fieldRespSize:
+ v, err := strconv.Atoi(value)
+ require.NoError(t, err)
+ line.respSize = v
+ case fieldReqMethod:
+ line.reqMethod = value
+ case fieldHierCode:
+ line.hierCode = value
+ case fieldMimeType:
+ line.mimeType = value
+ case fieldServerAddr:
+ line.serverAddr = value
+ }
+ return line
+}
diff --git a/src/go/plugin/go.d/modules/squidlog/metadata.yaml b/src/go/plugin/go.d/modules/squidlog/metadata.yaml
new file mode 100644
index 000000000..82712f9e5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/metadata.yaml
@@ -0,0 +1,315 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-squidlog
+ plugin_name: go.d.plugin
+ module_name: squidlog
+ monitored_instance:
+ name: Squid log files
+ link: https://www.lighttpd.net/
+ icon_filename: squid.png
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - squid
+ - logs
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ his collector monitors Squid servers by parsing their access log files.
+ method_description: |
+ It automatically detects log files of Squid severs running on localhost.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/squidlog.conf
+ options:
+ description: |
+ Squid [log format codes](http://www.squid-cache.org/Doc/config/logformat/).
+
+ Squidlog is aware how to parse and interpret the following codes:
+
+ | field | squid format code | description |
+ |----------------|-------------------|---------------------------------------------------------------|
+ | resp_time | %tr | Response time (milliseconds). |
+ | client_address | %>a | Client source IP address. |
+ | client_address | %>A | Client FQDN. |
+ | cache_code | %Ss | Squid request status (TCP_MISS etc). |
+ | http_code | %>Hs | The HTTP response status code from Content Gateway to client. |
+ | resp_size | %<st | Total size of reply sent to client (after adaptation). |
+ | req_method | %rm | Request method (GET/POST etc). |
+ | hier_code | %Sh | Squid hierarchy status (DEFAULT_PARENT etc). |
+ | server_address | %<a | Server IP address of the last server or peer connection. |
+ | server_address | %<A | Server FQDN or peer name. |
+ | mime_type | %mt | MIME content type. |
+
+ In addition, to make `Squid` [native log format](https://wiki.squid-cache.org/Features/LogFormat#Squid_native_access.log_format_in_detail) csv parsable, squidlog understands these groups of codes:
+
+ | field | squid format code | description |
+ |-------------|-------------------|------------------------------------|
+ | result_code | %Ss/%>Hs | Cache code and http code. |
+ | hierarchy | %Sh/%<a | Hierarchy code and server address. |
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: path
+ description: Path to the Squid access log file.
+ default_value: /var/log/squid/access.log
+ required: true
+ - name: exclude_path
+ description: Path to exclude.
+ default_value: "*.gz"
+ required: false
+ - name: parser
+ description: Log parser configuration.
+ default_value: ""
+ required: false
+ - name: parser.log_type
+ description: Log parser type.
+ default_value: auto
+ required: false
+ detailed_description: |
+ Weblog supports 3 different log parsers:
+
+ | Parser type | Description |
+ |-------------|-------------------------------------------|
+ | csv | A comma-separated values |
+ | ltsv | [LTSV](http://ltsv.org/) |
+ | regexp | Regular expression with named groups |
+
+ Syntax:
+
+ ```yaml
+ parser:
+ log_type: csv
+ ```
+ - name: parser.csv_config
+ description: CSV log parser config.
+ default_value: ""
+ required: false
+ - name: parser.csv_config.delimiter
+ description: CSV field delimiter.
+ default_value: space
+ required: false
+ - name: parser.csv_config.format
+ description: CSV log format.
+ default_value: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type"
+ required: true
+ detailed_description: ""
+ - name: parser.ltsv_config
+ description: LTSV log parser config.
+ default_value: ""
+ required: false
+ - name: parser.ltsv_config.field_delimiter
+ description: LTSV field delimiter.
+ default_value: "\\t"
+ required: false
+ - name: parser.ltsv_config.value_delimiter
+ description: LTSV value delimiter.
+ default_value: ":"
+ required: false
+ - name: parser.ltsv_config.mapping
+ description: LTSV fields mapping to **known fields**.
+ default_value: ""
+ required: true
+ detailed_description: |
+ The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.
+
+ > **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+ ```yaml
+ parser:
+ log_type: ltsv
+ ltsv_config:
+ mapping:
+ label1: field1
+ label2: field2
+ ```
+ - name: parser.regexp_config
+ description: RegExp log parser config.
+ default_value: ""
+ required: false
+ - name: parser.regexp_config.pattern
+ description: RegExp pattern with named groups.
+ default_value: ""
+ required: true
+ detailed_description: |
+ Use pattern with subexpressions names. These names should be **known fields**.
+
+ > **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+ Syntax:
+
+ ```yaml
+ parser:
+ log_type: regexp
+ regexp_config:
+ pattern: PATTERN
+ ```
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: squidlog.requests
+ description: Total Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: squidlog.excluded_requests
+ description: Excluded Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: unmatched
+ - name: squidlog.type_requests
+ description: Requests By Type
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: success
+ - name: bad
+ - name: redirect
+ - name: error
+ - name: squidlog.http_status_code_class_responses
+ description: Responses By HTTP Status Code Class
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: 1xx
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: squidlog.http_status_code_responses
+ description: Responses By HTTP Status Code
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per HTTP response code
+ - name: squidlog.bandwidth
+ description: Bandwidth
+ unit: kilobits/s
+ chart_type: line
+ dimensions:
+ - name: sent
+ - name: squidlog.response_time
+ description: Response Time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: squidlog.uniq_clients
+ description: Unique Clients
+ unit: clients
+ chart_type: line
+ dimensions:
+ - name: clients
+ - name: squidlog.cache_result_code_requests
+ description: Requests By Cache Result Code
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per cache result code
+ - name: squidlog.cache_result_code_transport_tag_requests
+ description: Requests By Cache Result Delivery Transport Tag
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per cache result delivery transport tag
+ - name: squidlog.cache_result_code_handling_tag_requests
+ description: Requests By Cache Result Handling Tag
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per cache result handling tag
+ - name: squidlog.cache_code_object_tag_requests
+ description: Requests By Cache Result Produced Object Tag
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per cache result produced object tag
+ - name: squidlog.cache_code_load_source_tag_requests
+ description: Requests By Cache Result Load Source Tag
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per cache result load source tag
+ - name: squidlog.cache_code_error_tag_requests
+ description: Requests By Cache Result Errors Tag
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per cache result error tag
+ - name: squidlog.http_method_requests
+ description: Requests By HTTP Method
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per HTTP method
+ - name: squidlog.mime_type_requests
+ description: Requests By MIME Type
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per MIME type
+ - name: squidlog.hier_code_requests
+ description: Requests By Hierarchy Code
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per hierarchy code
+ - name: squidlog.server_address_forwarded_requests
+ description: Forwarded Requests By Server Address
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per server address
diff --git a/src/go/plugin/go.d/modules/squidlog/metrics.go b/src/go/plugin/go.d/modules/squidlog/metrics.go
new file mode 100644
index 000000000..031f832a1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/metrics.go
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squidlog
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+
+func newSummary() metrics.Summary {
+ return &summary{metrics.NewSummary()}
+}
+
+type summary struct {
+ metrics.Summary
+}
+
+func (s summary) WriteTo(rv map[string]int64, key string, mul, div int) {
+ s.Summary.WriteTo(rv, key, mul, div)
+ if _, ok := rv[key+"_min"]; !ok {
+ rv[key+"_min"] = 0
+ rv[key+"_max"] = 0
+ rv[key+"_avg"] = 0
+ }
+}
+
+const (
+ pxHTTPCode = "http_resp_code_"
+ pxReqMethod = "req_method_"
+ pxCacheCode = "cache_result_code_"
+ pxTransportTag = "cache_transport_tag_"
+ pxHandlingTag = "cache_handling_tag_"
+ pxObjectTag = "cache_object_tag_"
+ pxSourceTag = "cache_load_source_tag_"
+ pxErrorTag = "cache_error_tag_"
+ pxHierCode = "hier_code_"
+ pxMimeType = "mime_type_"
+ pxSrvAddr = "server_address_"
+)
+
+type metricsData struct {
+ Requests metrics.Counter `stm:"requests"`
+ Unmatched metrics.Counter `stm:"unmatched"`
+
+ HTTPRespCode metrics.CounterVec `stm:"http_resp_code"`
+ HTTPResp0xx metrics.Counter `stm:"http_resp_0xx"`
+ HTTPResp1xx metrics.Counter `stm:"http_resp_1xx"`
+ HTTPResp2xx metrics.Counter `stm:"http_resp_2xx"`
+ HTTPResp3xx metrics.Counter `stm:"http_resp_3xx"`
+ HTTPResp4xx metrics.Counter `stm:"http_resp_4xx"`
+ HTTPResp5xx metrics.Counter `stm:"http_resp_5xx"`
+ HTTPResp6xx metrics.Counter `stm:"http_resp_6xx"`
+
+ ReqSuccess metrics.Counter `stm:"req_type_success"`
+ ReqRedirect metrics.Counter `stm:"req_type_redirect"`
+ ReqBad metrics.Counter `stm:"req_type_bad"`
+ ReqError metrics.Counter `stm:"req_type_error"`
+
+ BytesSent metrics.Counter `stm:"bytes_sent"`
+ RespTime metrics.Summary `stm:"resp_time,1000,1"`
+ UniqueClients metrics.UniqueCounter `stm:"uniq_clients"`
+
+ ReqMethod metrics.CounterVec `stm:"req_method"`
+ CacheCode metrics.CounterVec `stm:"cache_result_code"`
+ CacheCodeTransportTag metrics.CounterVec `stm:"cache_transport_tag"`
+ CacheCodeHandlingTag metrics.CounterVec `stm:"cache_handling_tag"`
+ CacheCodeObjectTag metrics.CounterVec `stm:"cache_object_tag"`
+ CacheCodeLoadSourceTag metrics.CounterVec `stm:"cache_load_source_tag"`
+ CacheCodeErrorTag metrics.CounterVec `stm:"cache_error_tag"`
+ HierCode metrics.CounterVec `stm:"hier_code"`
+ MimeType metrics.CounterVec `stm:"mime_type"`
+ Server metrics.CounterVec `stm:"server_address"`
+}
+
+func (m *metricsData) reset() {
+ m.RespTime.Reset()
+ m.UniqueClients.Reset()
+}
+
+func newMetricsData() *metricsData {
+ return &metricsData{
+ RespTime: newSummary(),
+ UniqueClients: metrics.NewUniqueCounter(true),
+ HTTPRespCode: metrics.NewCounterVec(),
+ ReqMethod: metrics.NewCounterVec(),
+ CacheCode: metrics.NewCounterVec(),
+ CacheCodeTransportTag: metrics.NewCounterVec(),
+ CacheCodeHandlingTag: metrics.NewCounterVec(),
+ CacheCodeObjectTag: metrics.NewCounterVec(),
+ CacheCodeLoadSourceTag: metrics.NewCounterVec(),
+ CacheCodeErrorTag: metrics.NewCounterVec(),
+ HierCode: metrics.NewCounterVec(),
+ Server: metrics.NewCounterVec(),
+ MimeType: metrics.NewCounterVec(),
+ }
+}
diff --git a/src/go/plugin/go.d/modules/squidlog/squidlog.go b/src/go/plugin/go.d/modules/squidlog/squidlog.go
new file mode 100644
index 000000000..e2e743c69
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/squidlog.go
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squidlog
+
+import (
+ _ "embed"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("squidlog", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *SquidLog {
+ return &SquidLog{
+ Config: Config{
+ Path: "/var/log/squid/access.log",
+ ExcludePath: "*.gz",
+ ParserConfig: logs.ParserConfig{
+ LogType: logs.TypeCSV,
+ CSV: logs.CSVConfig{
+ FieldsPerRecord: -1,
+ Delimiter: " ",
+ TrimLeadingSpace: true,
+ Format: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type",
+ CheckField: checkCSVFormatField,
+ },
+ },
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Path string `yaml:"path" json:"path"`
+ ExcludePath string `yaml:"exclude_path,omitempty" json:"exclude_path"`
+ logs.ParserConfig `yaml:",inline" json:""`
+}
+
+type SquidLog struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ file *logs.Reader
+ parser logs.Parser
+ line *logLine
+
+ mx *metricsData
+}
+
+func (s *SquidLog) Configuration() any {
+ return s.Config
+}
+
+func (s *SquidLog) Init() error {
+ s.line = newEmptyLogLine()
+ s.mx = newMetricsData()
+ return nil
+}
+
+func (s *SquidLog) Check() error {
+ // Note: these inits are here to make auto-detection retry working
+ if err := s.createLogReader(); err != nil {
+ s.Warning("check failed: ", err)
+ return err
+ }
+
+ if err := s.createParser(); err != nil {
+ s.Warning("check failed: ", err)
+ return err
+ }
+
+ if err := s.createCharts(s.line); err != nil {
+ s.Warning("check failed: ", err)
+ return err
+ }
+
+ return nil
+}
+
+func (s *SquidLog) Charts() *module.Charts {
+ return s.charts
+}
+
+func (s *SquidLog) Collect() map[string]int64 {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (s *SquidLog) Cleanup() {
+ if s.file != nil {
+ _ = s.file.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/squidlog/squidlog_test.go b/src/go/plugin/go.d/modules/squidlog/squidlog_test.go
new file mode 100644
index 000000000..eb5ce635f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/squidlog_test.go
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package squidlog
+
+import (
+ "bytes"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNativeFormatAccessLog, _ = os.ReadFile("testdata/access.log")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNativeFormatAccessLog": dataNativeFormatAccessLog,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSquidLog_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &SquidLog{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNew(t *testing.T) {
+ assert.Implements(t, (*module.Module)(nil), New())
+}
+
+func TestSquidLog_Init(t *testing.T) {
+ squidlog := New()
+
+ assert.NoError(t, squidlog.Init())
+}
+
+func TestSquidLog_Check(t *testing.T) {
+}
+
+func TestSquidLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) {
+ squid := New()
+ defer squid.Cleanup()
+ squid.Path = "testdata/not_exists.log"
+ require.NoError(t, squid.Init())
+
+ assert.Error(t, squid.Check())
+}
+
+func TestSquid_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) {
+ squid := New()
+ defer squid.Cleanup()
+ squid.Path = "testdata/unknown.log"
+ require.NoError(t, squid.Init())
+
+ assert.Error(t, squid.Check())
+}
+
+func TestSquid_Check_ErrorOnCreatingParserZeroKnownFields(t *testing.T) {
+ squid := New()
+ defer squid.Cleanup()
+ squid.Path = "testdata/access.log"
+ squid.ParserConfig.CSV.Format = "$one $two"
+ require.NoError(t, squid.Init())
+
+ assert.Error(t, squid.Check())
+}
+
+func TestSquidLog_Charts(t *testing.T) {
+ assert.Nil(t, New().Charts())
+
+ squid := prepareSquidCollect(t)
+ assert.NotNil(t, squid.Charts())
+
+}
+
+func TestSquidLog_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestSquidLog_Collect(t *testing.T) {
+ squid := prepareSquidCollect(t)
+
+ expected := map[string]int64{
+ "bytes_sent": 6827357,
+ "cache_error_tag_ABORTED": 326,
+ "cache_handling_tag_CF": 154,
+ "cache_handling_tag_CLIENT": 172,
+ "cache_load_source_tag_MEM": 172,
+ "cache_object_tag_NEGATIVE": 308,
+ "cache_object_tag_STALE": 172,
+ "cache_result_code_NONE": 158,
+ "cache_result_code_TCP_CF_NEGATIVE_NEGATIVE_ABORTED": 154,
+ "cache_result_code_UDP_CLIENT_STALE_MEM_ABORTED": 172,
+ "cache_transport_tag_NONE": 158,
+ "cache_transport_tag_TCP": 154,
+ "cache_transport_tag_UDP": 172,
+ "hier_code_HIER_CACHE_DIGEST_HIT": 128,
+ "hier_code_HIER_NO_CACHE_DIGEST_DIRECT": 130,
+ "hier_code_HIER_PARENT_HIT": 106,
+ "hier_code_HIER_SINGLE_PARENT": 120,
+ "http_resp_0xx": 51,
+ "http_resp_1xx": 45,
+ "http_resp_2xx": 62,
+ "http_resp_3xx": 120,
+ "http_resp_4xx": 112,
+ "http_resp_5xx": 46,
+ "http_resp_6xx": 48,
+ "http_resp_code_0": 51,
+ "http_resp_code_100": 45,
+ "http_resp_code_200": 62,
+ "http_resp_code_300": 58,
+ "http_resp_code_304": 62,
+ "http_resp_code_400": 56,
+ "http_resp_code_401": 56,
+ "http_resp_code_500": 46,
+ "http_resp_code_603": 48,
+ "mime_type_application": 52,
+ "mime_type_audio": 56,
+ "mime_type_font": 44,
+ "mime_type_image": 50,
+ "mime_type_message": 44,
+ "mime_type_model": 62,
+ "mime_type_multipart": 61,
+ "mime_type_text": 61,
+ "mime_type_video": 54,
+ "req_method_COPY": 84,
+ "req_method_GET": 70,
+ "req_method_HEAD": 59,
+ "req_method_OPTIONS": 99,
+ "req_method_POST": 74,
+ "req_method_PURGE": 98,
+ "req_type_bad": 56,
+ "req_type_error": 94,
+ "req_type_redirect": 58,
+ "req_type_success": 276,
+ "requests": 500,
+ "resp_time_avg": 3015931,
+ "resp_time_count": 484,
+ "resp_time_max": 4988000,
+ "resp_time_min": 1002000,
+ "resp_time_sum": 1459711000,
+ "server_address_2001:db8:2ce:a": 79,
+ "server_address_2001:db8:2ce:b": 89,
+ "server_address_203.0.113.100": 67,
+ "server_address_203.0.113.200": 70,
+ "server_address_content-gateway": 87,
+ "uniq_clients": 5,
+ "unmatched": 16,
+ }
+
+ collected := squid.Collect()
+
+ assert.Equal(t, expected, collected)
+ testCharts(t, squid, collected)
+}
+
+func TestSquidLog_Collect_ReturnOldDataIfNothingRead(t *testing.T) {
+ squid := prepareSquidCollect(t)
+
+ expected := map[string]int64{
+ "bytes_sent": 6827357,
+ "cache_error_tag_ABORTED": 326,
+ "cache_handling_tag_CF": 154,
+ "cache_handling_tag_CLIENT": 172,
+ "cache_load_source_tag_MEM": 172,
+ "cache_object_tag_NEGATIVE": 308,
+ "cache_object_tag_STALE": 172,
+ "cache_result_code_NONE": 158,
+ "cache_result_code_TCP_CF_NEGATIVE_NEGATIVE_ABORTED": 154,
+ "cache_result_code_UDP_CLIENT_STALE_MEM_ABORTED": 172,
+ "cache_transport_tag_NONE": 158,
+ "cache_transport_tag_TCP": 154,
+ "cache_transport_tag_UDP": 172,
+ "hier_code_HIER_CACHE_DIGEST_HIT": 128,
+ "hier_code_HIER_NO_CACHE_DIGEST_DIRECT": 130,
+ "hier_code_HIER_PARENT_HIT": 106,
+ "hier_code_HIER_SINGLE_PARENT": 120,
+ "http_resp_0xx": 51,
+ "http_resp_1xx": 45,
+ "http_resp_2xx": 62,
+ "http_resp_3xx": 120,
+ "http_resp_4xx": 112,
+ "http_resp_5xx": 46,
+ "http_resp_6xx": 48,
+ "http_resp_code_0": 51,
+ "http_resp_code_100": 45,
+ "http_resp_code_200": 62,
+ "http_resp_code_300": 58,
+ "http_resp_code_304": 62,
+ "http_resp_code_400": 56,
+ "http_resp_code_401": 56,
+ "http_resp_code_500": 46,
+ "http_resp_code_603": 48,
+ "mime_type_application": 52,
+ "mime_type_audio": 56,
+ "mime_type_font": 44,
+ "mime_type_image": 50,
+ "mime_type_message": 44,
+ "mime_type_model": 62,
+ "mime_type_multipart": 61,
+ "mime_type_text": 61,
+ "mime_type_video": 54,
+ "req_method_COPY": 84,
+ "req_method_GET": 70,
+ "req_method_HEAD": 59,
+ "req_method_OPTIONS": 99,
+ "req_method_POST": 74,
+ "req_method_PURGE": 98,
+ "req_type_bad": 56,
+ "req_type_error": 94,
+ "req_type_redirect": 58,
+ "req_type_success": 276,
+ "requests": 500,
+ "resp_time_avg": 0,
+ "resp_time_count": 0,
+ "resp_time_max": 0,
+ "resp_time_min": 0,
+ "resp_time_sum": 0,
+ "server_address_2001:db8:2ce:a": 79,
+ "server_address_2001:db8:2ce:b": 89,
+ "server_address_203.0.113.100": 67,
+ "server_address_203.0.113.200": 70,
+ "server_address_content-gateway": 87,
+ "uniq_clients": 0,
+ "unmatched": 16,
+ }
+
+ _ = squid.Collect()
+ collected := squid.Collect()
+
+ assert.Equal(t, expected, collected)
+ testCharts(t, squid, collected)
+}
+
+func testCharts(t *testing.T, squidlog *SquidLog, collected map[string]int64) {
+ t.Helper()
+ ensureChartsDynamicDimsCreated(t, squidlog)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, squidlog, collected)
+}
+
+func ensureChartsDynamicDimsCreated(t *testing.T, squid *SquidLog) {
+ ensureDynamicDimsCreated(t, squid, cacheCodeChart.ID, pxCacheCode, squid.mx.CacheCode)
+ ensureDynamicDimsCreated(t, squid, cacheCodeTransportTagChart.ID, pxTransportTag, squid.mx.CacheCodeTransportTag)
+ ensureDynamicDimsCreated(t, squid, cacheCodeHandlingTagChart.ID, pxHandlingTag, squid.mx.CacheCodeHandlingTag)
+ ensureDynamicDimsCreated(t, squid, cacheCodeObjectTagChart.ID, pxObjectTag, squid.mx.CacheCodeObjectTag)
+ ensureDynamicDimsCreated(t, squid, cacheCodeLoadSourceTagChart.ID, pxSourceTag, squid.mx.CacheCodeLoadSourceTag)
+ ensureDynamicDimsCreated(t, squid, cacheCodeErrorTagChart.ID, pxErrorTag, squid.mx.CacheCodeErrorTag)
+ ensureDynamicDimsCreated(t, squid, httpRespCodesChart.ID, pxHTTPCode, squid.mx.HTTPRespCode)
+ ensureDynamicDimsCreated(t, squid, reqMethodChart.ID, pxReqMethod, squid.mx.ReqMethod)
+ ensureDynamicDimsCreated(t, squid, hierCodeChart.ID, pxHierCode, squid.mx.HierCode)
+ ensureDynamicDimsCreated(t, squid, serverAddrChart.ID, pxSrvAddr, squid.mx.Server)
+ ensureDynamicDimsCreated(t, squid, mimeTypeChart.ID, pxMimeType, squid.mx.MimeType)
+}
+
+func ensureDynamicDimsCreated(t *testing.T, squid *SquidLog, chartID, dimPrefix string, data metrics.CounterVec) {
+ chart := squid.Charts().Get(chartID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", chartID)
+ if chart == nil {
+ return
+ }
+ for v := range data {
+ id := dimPrefix + v
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s', expected '%s'", chart.ID, v, id)
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, s *SquidLog, collected map[string]int64) {
+ for _, chart := range *s.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareSquidCollect(t *testing.T) *SquidLog {
+ t.Helper()
+ squid := New()
+ squid.Path = "testdata/access.log"
+ require.NoError(t, squid.Init())
+ require.NoError(t, squid.Check())
+ defer squid.Cleanup()
+
+ p, err := logs.NewCSVParser(squid.ParserConfig.CSV, bytes.NewReader(dataNativeFormatAccessLog))
+ require.NoError(t, err)
+ squid.parser = p
+ return squid
+}
+
+// generateLogs is used to populate 'testdata/access.log'
+//func generateLogs(w io.Writer, num int) error {
+// var (
+// client = []string{"localhost", "203.0.113.1", "203.0.113.2", "2001:db8:2ce:1", "2001:db8:2ce:2"}
+// cacheCode = []string{"TCP_CF_NEGATIVE_NEGATIVE_ABORTED", "UDP_CLIENT_STALE_MEM_ABORTED", "NONE"}
+// httpCode = []string{"000", "100", "200", "300", "304", "400", "401", "500", "603"}
+// method = []string{"GET", "HEAD", "POST", "COPY", "PURGE", "OPTIONS"}
+// hierCode = []string{"HIER_PARENT_HIT", "HIER_SINGLE_PARENT", "HIER_CACHE_DIGEST_HIT", "HIER_NO_CACHE_DIGEST_DIRECT"}
+// server = []string{"content-gateway", "203.0.113.100", "203.0.113.200", "2001:db8:2ce:a", "2001:db8:2ce:b", "-"}
+// mimeType = []string{"application", "audio", "font", "image", "message", "model", "multipart", "video", "text"}
+// )
+//
+// r := rand.New(rand.NewSource(time.Now().UnixNano()))
+// randFromString := func(s []string) string { return s[r.Intn(len(s))] }
+// randInt := func(min, max int) int { return r.Intn(max-min) + min }
+//
+// var line string
+// for i := 0; i < num; i++ {
+// unmatched := randInt(1, 100) > 95
+// if i > 0 && unmatched {
+// line = "Unmatched! The rat the cat the dog chased killed ate the malt!\n"
+// } else {
+// // 1576177221.686 0 ::1 TCP_MISS/200 1621 GET cache_object://localhost/counters - HIER_NONE/- text/plain
+// line = fmt.Sprintf(
+// "1576177221.686 %d %s %s/%s %d %s cache_object://localhost/counters - %s/%s %s/plain\n",
+// randInt(1000, 5000),
+// randFromString(client),
+// randFromString(cacheCode),
+// randFromString(httpCode),
+// randInt(9000, 19000),
+// randFromString(method),
+// randFromString(hierCode),
+// randFromString(server),
+// randFromString(mimeType),
+// )
+// }
+// _, err := fmt.Fprint(w, line)
+// if err != nil {
+// return err
+// }
+// }
+// return nil
+//}
diff --git a/src/go/plugin/go.d/modules/squidlog/testdata/access.log b/src/go/plugin/go.d/modules/squidlog/testdata/access.log
new file mode 100644
index 000000000..64a23d35b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/testdata/access.log
@@ -0,0 +1,500 @@
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 3976 203.0.113.1 NONE/000 13564 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 model/plain
+1576177221.686 1241 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/401 10309 GET cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 text/plain
+1576177221.686 4052 203.0.113.2 NONE/400 17349 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- text/plain
+1576177221.686 3828 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 16025 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a image/plain
+1576177221.686 1798 203.0.113.2 NONE/000 14548 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/- model/plain
+1576177221.686 3910 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/300 16516 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b multipart/plain
+1576177221.686 4343 2001:db8:2ce:1 NONE/401 13967 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway video/plain
+1576177221.686 4244 203.0.113.1 NONE/304 10096 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a model/plain
+1576177221.686 1686 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 10491 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 video/plain
+1576177221.686 3387 localhost UDP_CLIENT_STALE_MEM_ABORTED/000 15776 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 application/plain
+1576177221.686 1370 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/200 16088 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway image/plain
+1576177221.686 2023 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/500 11529 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b text/plain
+1576177221.686 2858 2001:db8:2ce:2 NONE/300 9358 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a multipart/plain
+1576177221.686 4616 2001:db8:2ce:1 NONE/603 13869 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 image/plain
+1576177221.686 3764 2001:db8:2ce:1 NONE/304 12091 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- video/plain
+1576177221.686 4239 2001:db8:2ce:2 NONE/500 17583 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/- text/plain
+1576177221.686 1925 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 18889 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a application/plain
+1576177221.686 1451 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/304 12461 GET cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b font/plain
+1576177221.686 3907 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 9292 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 image/plain
+1576177221.686 1215 localhost NONE/000 16993 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- message/plain
+1576177221.686 4544 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/603 13625 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 multipart/plain
+1576177221.686 1611 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 9459 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a text/plain
+1576177221.686 1051 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/603 17581 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway audio/plain
+1576177221.686 3681 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 13021 COPY cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a multipart/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 2511 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 13955 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- multipart/plain
+1576177221.686 1296 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 13138 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 text/plain
+1576177221.686 3000 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 10871 GET cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b audio/plain
+1576177221.686 4571 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 13636 COPY cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b video/plain
+1576177221.686 3775 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 16627 GET cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b font/plain
+1576177221.686 2390 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 17552 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway text/plain
+1576177221.686 1022 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/401 18857 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 image/plain
+1576177221.686 4507 2001:db8:2ce:2 NONE/500 15436 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a video/plain
+1576177221.686 1938 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 13470 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a text/plain
+1576177221.686 3071 localhost NONE/500 12033 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a audio/plain
+1576177221.686 3880 203.0.113.2 NONE/304 17929 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 model/plain
+1576177221.686 1077 2001:db8:2ce:2 NONE/401 16424 COPY cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a audio/plain
+1576177221.686 4478 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 11321 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b text/plain
+1576177221.686 2768 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 12640 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- image/plain
+1576177221.686 3803 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/400 16857 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/- multipart/plain
+1576177221.686 2111 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 11050 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 application/plain
+1576177221.686 2878 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/200 14757 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- video/plain
+1576177221.686 3053 2001:db8:2ce:1 NONE/500 10030 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- text/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 2423 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/200 10214 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/- font/plain
+1576177221.686 1407 localhost UDP_CLIENT_STALE_MEM_ABORTED/304 11029 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b font/plain
+1576177221.686 3327 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 15419 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a message/plain
+1576177221.686 2300 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/200 16423 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a video/plain
+1576177221.686 1094 localhost UDP_CLIENT_STALE_MEM_ABORTED/300 17171 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 font/plain
+1576177221.686 1800 203.0.113.2 NONE/100 13840 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- message/plain
+1576177221.686 1866 203.0.113.2 NONE/603 16746 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- multipart/plain
+1576177221.686 4130 203.0.113.1 NONE/603 11088 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b audio/plain
+1576177221.686 3022 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/500 16903 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- multipart/plain
+1576177221.686 4651 2001:db8:2ce:1 NONE/300 15830 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b text/plain
+1576177221.686 4265 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/401 10342 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 audio/plain
+1576177221.686 2189 localhost UDP_CLIENT_STALE_MEM_ABORTED/000 12576 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a application/plain
+1576177221.686 1621 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 17153 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 multipart/plain
+1576177221.686 2610 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/500 12526 GET cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway model/plain
+1576177221.686 1652 localhost UDP_CLIENT_STALE_MEM_ABORTED/400 15106 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway video/plain
+1576177221.686 1599 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/200 16609 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a multipart/plain
+1576177221.686 1954 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/401 13417 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/- text/plain
+1576177221.686 2338 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 16484 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b model/plain
+1576177221.686 2504 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 12935 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- model/plain
+1576177221.686 3482 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 10694 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway multipart/plain
+1576177221.686 4549 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 17110 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 audio/plain
+1576177221.686 3596 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/603 9690 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 multipart/plain
+1576177221.686 4491 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 9378 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a model/plain
+1576177221.686 1336 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/401 14364 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- application/plain
+1576177221.686 1637 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/603 13319 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- text/plain
+1576177221.686 2330 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 16509 COPY cache_object://localhost/counters - HIER_PARENT_HIT/- audio/plain
+1576177221.686 4278 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 9931 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a audio/plain
+1576177221.686 2264 localhost UDP_CLIENT_STALE_MEM_ABORTED/603 16366 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 model/plain
+1576177221.686 4271 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 12708 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway text/plain
+1576177221.686 4580 203.0.113.2 NONE/500 17652 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- application/plain
+1576177221.686 2739 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/400 16253 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway video/plain
+1576177221.686 4122 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/400 10108 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- message/plain
+1576177221.686 2810 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 15493 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/- message/plain
+1576177221.686 1257 localhost UDP_CLIENT_STALE_MEM_ABORTED/500 13626 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 video/plain
+1576177221.686 2117 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/200 9348 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a video/plain
+1576177221.686 2467 203.0.113.2 NONE/603 13519 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 video/plain
+1576177221.686 3796 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 12236 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/- model/plain
+1576177221.686 1218 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/304 10061 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b text/plain
+1576177221.686 4561 2001:db8:2ce:2 NONE/500 16695 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- multipart/plain
+1576177221.686 1880 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 18046 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway message/plain
+1576177221.686 3518 2001:db8:2ce:1 NONE/304 9991 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- font/plain
+1576177221.686 2092 203.0.113.1 NONE/400 12206 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 audio/plain
+1576177221.686 1483 2001:db8:2ce:1 NONE/200 11454 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 model/plain
+1576177221.686 3683 203.0.113.2 NONE/100 9002 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway model/plain
+1576177221.686 1823 localhost NONE/603 13991 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway font/plain
+1576177221.686 4948 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 18034 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a image/plain
+1576177221.686 2798 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/500 18660 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b font/plain
+1576177221.686 2004 localhost UDP_CLIENT_STALE_MEM_ABORTED/400 12089 GET cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 audio/plain
+1576177221.686 1087 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/603 14469 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway audio/plain
+1576177221.686 3055 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 11938 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/- image/plain
+1576177221.686 2908 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/300 13859 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b image/plain
+1576177221.686 3945 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/200 17255 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 model/plain
+1576177221.686 2225 203.0.113.2 NONE/304 11717 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/- video/plain
+1576177221.686 3439 localhost UDP_CLIENT_STALE_MEM_ABORTED/000 14459 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 audio/plain
+1576177221.686 4939 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/300 9184 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b text/plain
+1576177221.686 3629 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 18778 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a application/plain
+1576177221.686 3956 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/500 17471 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 font/plain
+1576177221.686 1258 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 15939 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b model/plain
+1576177221.686 3328 2001:db8:2ce:1 NONE/200 15416 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b message/plain
+1576177221.686 4055 203.0.113.1 NONE/100 14766 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b text/plain
+1576177221.686 2851 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 12938 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway text/plain
+1576177221.686 1578 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 16826 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/- video/plain
+1576177221.686 3340 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/400 14833 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b model/plain
+1576177221.686 4474 2001:db8:2ce:1 NONE/401 11354 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b application/plain
+1576177221.686 4172 localhost NONE/300 9138 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/- audio/plain
+1576177221.686 2732 localhost NONE/603 9105 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b model/plain
+1576177221.686 1581 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 17797 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b image/plain
+1576177221.686 2029 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 15806 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a font/plain
+1576177221.686 3624 203.0.113.1 NONE/500 9549 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 application/plain
+1576177221.686 2591 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 10950 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a audio/plain
+1576177221.686 3351 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 10848 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 image/plain
+1576177221.686 2927 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/603 11330 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway image/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 3418 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/401 13606 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway text/plain
+1576177221.686 3542 localhost NONE/000 18143 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a text/plain
+1576177221.686 1755 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/401 11437 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/- message/plain
+1576177221.686 4189 localhost NONE/300 17965 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway multipart/plain
+1576177221.686 2069 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 17754 COPY cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a message/plain
+1576177221.686 1151 localhost UDP_CLIENT_STALE_MEM_ABORTED/603 12324 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/- multipart/plain
+1576177221.686 2695 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 11931 COPY cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway model/plain
+1576177221.686 3557 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 18705 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 message/plain
+1576177221.686 3862 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/401 17928 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 font/plain
+1576177221.686 2512 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/300 18026 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 image/plain
+1576177221.686 3725 localhost NONE/304 13496 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway text/plain
+1576177221.686 3295 203.0.113.1 NONE/400 11396 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 message/plain
+1576177221.686 1469 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 9413 COPY cache_object://localhost/counters - HIER_PARENT_HIT/- text/plain
+1576177221.686 2766 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 10738 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway application/plain
+1576177221.686 4106 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 9115 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 text/plain
+1576177221.686 2025 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/200 13876 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway video/plain
+1576177221.686 2522 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/304 13867 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b text/plain
+1576177221.686 4089 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/100 18319 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway video/plain
+1576177221.686 2728 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 9139 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 audio/plain
+1576177221.686 2658 203.0.113.2 NONE/500 17938 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 multipart/plain
+1576177221.686 2630 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 17682 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway video/plain
+1576177221.686 4063 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/300 10435 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a video/plain
+1576177221.686 2231 localhost NONE/500 12792 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b image/plain
+1576177221.686 2259 2001:db8:2ce:1 NONE/400 10533 GET cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a image/plain
+1576177221.686 4155 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/500 18879 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- image/plain
+1576177221.686 2396 2001:db8:2ce:2 NONE/200 17470 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b application/plain
+1576177221.686 2350 localhost NONE/603 12025 COPY cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a text/plain
+1576177221.686 1684 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/603 12195 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b video/plain
+1576177221.686 3228 localhost NONE/400 9220 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/- image/plain
+1576177221.686 1251 localhost NONE/304 14902 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway video/plain
+1576177221.686 4987 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/401 11056 GET cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a font/plain
+1576177221.686 3477 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/500 10332 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a multipart/plain
+1576177221.686 3825 203.0.113.1 NONE/400 11344 GET cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway model/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 2301 2001:db8:2ce:2 NONE/300 14192 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b video/plain
+1576177221.686 4128 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 10167 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway model/plain
+1576177221.686 2638 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/500 11889 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a application/plain
+1576177221.686 3224 localhost UDP_CLIENT_STALE_MEM_ABORTED/400 16272 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/- video/plain
+1576177221.686 2606 203.0.113.1 NONE/304 14417 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a multipart/plain
+1576177221.686 3032 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/100 15002 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a image/plain
+1576177221.686 1704 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/200 16472 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 multipart/plain
+1576177221.686 2207 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 15584 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b font/plain
+1576177221.686 1805 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 13707 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 font/plain
+1576177221.686 3957 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 11342 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 audio/plain
+1576177221.686 1436 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 16561 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 image/plain
+1576177221.686 4693 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/000 15382 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway message/plain
+1576177221.686 2814 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 16601 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b message/plain
+1576177221.686 3705 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/300 12188 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a audio/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 2920 2001:db8:2ce:1 NONE/304 12360 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b application/plain
+1576177221.686 4746 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 17802 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a multipart/plain
+1576177221.686 1734 2001:db8:2ce:2 NONE/500 9076 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 model/plain
+1576177221.686 3903 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/200 15655 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b audio/plain
+1576177221.686 3627 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/304 17310 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a message/plain
+1576177221.686 2903 localhost NONE/401 13330 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- text/plain
+1576177221.686 3840 localhost UDP_CLIENT_STALE_MEM_ABORTED/000 9723 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a message/plain
+1576177221.686 4204 203.0.113.2 NONE/401 14758 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b video/plain
+1576177221.686 2531 203.0.113.2 NONE/401 16884 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b model/plain
+1576177221.686 4442 203.0.113.1 NONE/100 16154 GET cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 text/plain
+1576177221.686 1874 2001:db8:2ce:2 NONE/400 16960 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 image/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 3935 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/200 18310 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 image/plain
+1576177221.686 1444 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/100 14971 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 audio/plain
+1576177221.686 1598 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 11677 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 image/plain
+1576177221.686 1331 localhost UDP_CLIENT_STALE_MEM_ABORTED/500 11860 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 multipart/plain
+1576177221.686 3019 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/200 18581 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway multipart/plain
+1576177221.686 2439 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 9268 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 model/plain
+1576177221.686 4018 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 16046 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 application/plain
+1576177221.686 4852 localhost NONE/200 17419 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b application/plain
+1576177221.686 1002 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 15627 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a audio/plain
+1576177221.686 3092 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/603 10554 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a video/plain
+1576177221.686 4281 2001:db8:2ce:2 NONE/300 12359 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway message/plain
+1576177221.686 2099 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 16391 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway text/plain
+1576177221.686 2011 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/100 16159 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- model/plain
+1576177221.686 4830 2001:db8:2ce:2 NONE/200 15816 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway text/plain
+1576177221.686 4042 localhost UDP_CLIENT_STALE_MEM_ABORTED/500 12298 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway audio/plain
+1576177221.686 3197 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 15824 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway audio/plain
+1576177221.686 1370 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 9400 GET cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway message/plain
+1576177221.686 2845 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/603 9027 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- model/plain
+1576177221.686 1022 localhost NONE/603 10231 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a multipart/plain
+1576177221.686 1539 203.0.113.2 NONE/401 11300 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b image/plain
+1576177221.686 1106 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/400 14320 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a multipart/plain
+1576177221.686 3392 203.0.113.1 NONE/100 11618 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 text/plain
+1576177221.686 4047 localhost UDP_CLIENT_STALE_MEM_ABORTED/401 11760 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 font/plain
+1576177221.686 2558 localhost NONE/500 16090 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a message/plain
+1576177221.686 3852 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 12957 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/- audio/plain
+1576177221.686 4583 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 15348 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a video/plain
+1576177221.686 3861 localhost NONE/603 18438 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a video/plain
+1576177221.686 3642 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 11404 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 multipart/plain
+1576177221.686 4239 2001:db8:2ce:1 NONE/300 17424 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway multipart/plain
+1576177221.686 3559 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/100 17973 COPY cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 model/plain
+1576177221.686 2857 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 13890 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/- image/plain
+1576177221.686 4096 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/100 16852 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b image/plain
+1576177221.686 1711 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/401 18346 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a image/plain
+1576177221.686 4833 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 13810 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 multipart/plain
+1576177221.686 1067 localhost NONE/401 11033 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 video/plain
+1576177221.686 3736 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 9198 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/- multipart/plain
+1576177221.686 4877 203.0.113.2 NONE/200 13819 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a video/plain
+1576177221.686 1994 203.0.113.2 NONE/400 13995 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 text/plain
+1576177221.686 4724 localhost UDP_CLIENT_STALE_MEM_ABORTED/500 18856 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- font/plain
+1576177221.686 3491 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 15865 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 application/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 3964 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 12752 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway image/plain
+1576177221.686 4215 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/304 14142 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 application/plain
+1576177221.686 3803 2001:db8:2ce:1 NONE/304 14779 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a message/plain
+1576177221.686 4518 203.0.113.1 NONE/400 15824 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a model/plain
+1576177221.686 2816 2001:db8:2ce:2 NONE/304 14078 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 font/plain
+1576177221.686 1937 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 9563 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b model/plain
+1576177221.686 3870 2001:db8:2ce:1 NONE/400 15286 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b font/plain
+1576177221.686 4854 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 11432 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a text/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 4579 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 15670 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a image/plain
+1576177221.686 1316 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 13083 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway multipart/plain
+1576177221.686 2319 203.0.113.1 NONE/304 13725 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b model/plain
+1576177221.686 1640 localhost UDP_CLIENT_STALE_MEM_ABORTED/401 14085 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- application/plain
+1576177221.686 2368 203.0.113.2 NONE/400 17238 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b video/plain
+1576177221.686 2035 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/603 13357 GET cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway audio/plain
+1576177221.686 2063 2001:db8:2ce:1 NONE/200 11460 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a text/plain
+1576177221.686 4884 203.0.113.2 NONE/200 9333 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b message/plain
+1576177221.686 2917 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/000 9114 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 font/plain
+1576177221.686 3784 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/300 12414 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/- font/plain
+1576177221.686 2514 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/200 16860 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b text/plain
+1576177221.686 1272 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 10082 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 audio/plain
+1576177221.686 4408 203.0.113.2 NONE/400 11884 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- text/plain
+1576177221.686 3444 2001:db8:2ce:2 NONE/300 15683 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/- message/plain
+1576177221.686 3471 localhost UDP_CLIENT_STALE_MEM_ABORTED/401 9915 COPY cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 message/plain
+1576177221.686 2684 2001:db8:2ce:1 NONE/401 13787 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a font/plain
+1576177221.686 2711 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 14585 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway multipart/plain
+1576177221.686 4244 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 17274 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 multipart/plain
+1576177221.686 1967 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/300 11902 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway multipart/plain
+1576177221.686 2722 localhost UDP_CLIENT_STALE_MEM_ABORTED/304 13803 COPY cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b video/plain
+1576177221.686 2672 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 11989 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 font/plain
+1576177221.686 4308 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 14034 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway image/plain
+1576177221.686 4970 203.0.113.2 NONE/304 15711 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- model/plain
+1576177221.686 2801 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/603 13296 COPY cache_object://localhost/counters - HIER_PARENT_HIT/- audio/plain
+1576177221.686 1915 2001:db8:2ce:1 NONE/300 15831 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/- video/plain
+1576177221.686 4406 203.0.113.2 NONE/304 18616 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a application/plain
+1576177221.686 1881 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/300 17573 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a message/plain
+1576177221.686 3561 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/000 10073 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b application/plain
+1576177221.686 2957 203.0.113.2 NONE/400 12867 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 font/plain
+1576177221.686 2166 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 9753 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 font/plain
+1576177221.686 2905 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 18309 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 text/plain
+1576177221.686 3528 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 16146 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway font/plain
+1576177221.686 3021 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 16082 GET cache_object://localhost/counters - HIER_PARENT_HIT/- image/plain
+1576177221.686 3228 203.0.113.1 NONE/200 17715 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b image/plain
+1576177221.686 2618 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/401 18779 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b application/plain
+1576177221.686 2707 203.0.113.1 NONE/603 15920 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/- model/plain
+1576177221.686 2840 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 17752 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- application/plain
+1576177221.686 3352 localhost UDP_CLIENT_STALE_MEM_ABORTED/603 13179 POST cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 model/plain
+1576177221.686 3764 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 12217 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a video/plain
+1576177221.686 3903 203.0.113.1 NONE/200 15292 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 message/plain
+1576177221.686 1690 203.0.113.1 NONE/603 9206 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 multipart/plain
+1576177221.686 3432 localhost UDP_CLIENT_STALE_MEM_ABORTED/304 16707 POST cache_object://localhost/counters - HIER_PARENT_HIT/- text/plain
+1576177221.686 3239 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/000 12097 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 model/plain
+1576177221.686 3761 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 9167 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 audio/plain
+1576177221.686 3184 2001:db8:2ce:1 NONE/300 17832 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 message/plain
+1576177221.686 3226 203.0.113.2 NONE/000 16530 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 message/plain
+1576177221.686 1121 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/300 9632 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 application/plain
+1576177221.686 2454 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 13564 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b text/plain
+1576177221.686 2497 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/400 15475 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 text/plain
+1576177221.686 2433 localhost UDP_CLIENT_STALE_MEM_ABORTED/300 10124 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 application/plain
+1576177221.686 2652 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 12632 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- model/plain
+1576177221.686 4245 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/603 13060 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 multipart/plain
+1576177221.686 4365 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 13039 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 model/plain
+1576177221.686 1397 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/400 13462 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 model/plain
+1576177221.686 1958 203.0.113.1 NONE/304 14745 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b font/plain
+1576177221.686 2374 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/400 16475 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway text/plain
+1576177221.686 3926 localhost UDP_CLIENT_STALE_MEM_ABORTED/200 13928 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b model/plain
+1576177221.686 3628 203.0.113.1 NONE/401 9594 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway message/plain
+1576177221.686 2776 localhost NONE/304 17589 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b application/plain
+1576177221.686 4820 localhost UDP_CLIENT_STALE_MEM_ABORTED/401 11138 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 audio/plain
+1576177221.686 4759 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 18362 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 video/plain
+1576177221.686 4282 203.0.113.2 NONE/304 9048 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 model/plain
+1576177221.686 3308 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 15329 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway audio/plain
+1576177221.686 2067 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 17856 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 text/plain
+1576177221.686 1421 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/100 17391 GET cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a text/plain
+1576177221.686 2881 2001:db8:2ce:1 NONE/400 15805 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b application/plain
+1576177221.686 4457 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/400 18550 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b text/plain
+1576177221.686 4043 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 14399 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- model/plain
+1576177221.686 3516 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/300 9287 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b font/plain
+1576177221.686 2504 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 11278 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 audio/plain
+1576177221.686 1995 203.0.113.1 NONE/603 18002 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 font/plain
+1576177221.686 1661 203.0.113.2 NONE/300 18944 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway message/plain
+1576177221.686 3593 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/100 18815 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 message/plain
+1576177221.686 4296 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 10891 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway application/plain
+1576177221.686 1392 203.0.113.2 NONE/401 16764 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 message/plain
+1576177221.686 2265 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/000 15565 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway model/plain
+1576177221.686 1936 2001:db8:2ce:2 NONE/000 16715 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b audio/plain
+1576177221.686 4612 203.0.113.2 NONE/304 16972 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b model/plain
+1576177221.686 4473 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 13787 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b audio/plain
+1576177221.686 1606 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/200 11784 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b image/plain
+1576177221.686 1155 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 14832 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b font/plain
+1576177221.686 1637 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 10566 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway font/plain
+1576177221.686 3313 localhost NONE/300 18497 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b multipart/plain
+1576177221.686 2058 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 17875 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/- message/plain
+1576177221.686 2789 203.0.113.2 NONE/401 10608 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b model/plain
+1576177221.686 3250 2001:db8:2ce:2 NONE/603 12794 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a image/plain
+1576177221.686 4962 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/500 18755 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a video/plain
+1576177221.686 3845 localhost UDP_CLIENT_STALE_MEM_ABORTED/200 13988 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- text/plain
+1576177221.686 3395 203.0.113.1 NONE/400 11117 GET cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway model/plain
+1576177221.686 4615 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 16982 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway multipart/plain
+1576177221.686 2663 localhost NONE/304 13113 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 audio/plain
+1576177221.686 4313 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/400 17031 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b text/plain
+1576177221.686 4051 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/400 9037 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/- font/plain
+1576177221.686 4779 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 17329 GET cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a font/plain
+1576177221.686 1086 localhost NONE/400 12162 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- application/plain
+1576177221.686 3314 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 10419 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway audio/plain
+1576177221.686 3505 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/200 13025 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b model/plain
+1576177221.686 3715 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/304 10068 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway multipart/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 3891 2001:db8:2ce:2 NONE/100 12361 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 audio/plain
+1576177221.686 1420 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/400 15872 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- multipart/plain
+1576177221.686 4483 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 9958 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- message/plain
+1576177221.686 3689 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/400 18792 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b font/plain
+1576177221.686 4106 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 17681 COPY cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a application/plain
+1576177221.686 4988 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/304 11687 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway audio/plain
+1576177221.686 2794 203.0.113.2 NONE/000 10568 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 message/plain
+1576177221.686 2742 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/401 9006 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 audio/plain
+1576177221.686 4899 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 17927 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 font/plain
+1576177221.686 1505 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/400 16266 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a audio/plain
+1576177221.686 3867 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 17250 COPY cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 audio/plain
+1576177221.686 2744 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 16015 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway model/plain
+1576177221.686 3933 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 12507 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway application/plain
+1576177221.686 1413 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 9943 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- application/plain
+1576177221.686 1834 203.0.113.1 NONE/304 12716 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway image/plain
+1576177221.686 1019 2001:db8:2ce:1 NONE/100 13276 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 multipart/plain
+1576177221.686 3599 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 17836 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 application/plain
+1576177221.686 2532 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 9700 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b model/plain
+1576177221.686 1634 203.0.113.2 NONE/500 18644 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 video/plain
+1576177221.686 3055 203.0.113.1 NONE/400 17369 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 text/plain
+1576177221.686 2935 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 17022 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway model/plain
+1576177221.686 4749 2001:db8:2ce:2 NONE/000 9821 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway message/plain
+1576177221.686 2284 203.0.113.2 NONE/200 10006 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b application/plain
+1576177221.686 3371 localhost UDP_CLIENT_STALE_MEM_ABORTED/500 12975 GET cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway model/plain
+1576177221.686 1971 203.0.113.1 NONE/603 14557 POST cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 text/plain
+1576177221.686 2721 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 17072 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/- model/plain
+1576177221.686 2604 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 13570 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway audio/plain
+1576177221.686 1344 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 16820 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway multipart/plain
+1576177221.686 4890 2001:db8:2ce:1 NONE/000 15095 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- audio/plain
+1576177221.686 1005 2001:db8:2ce:2 NONE/000 18911 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b application/plain
+1576177221.686 2956 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/304 10496 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a video/plain
+1576177221.686 3475 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/000 17288 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway image/plain
+1576177221.686 4601 localhost NONE/603 12287 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 image/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 1899 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 18603 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway application/plain
+1576177221.686 2613 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 13216 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway audio/plain
+1576177221.686 3209 localhost UDP_CLIENT_STALE_MEM_ABORTED/603 9944 COPY cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway font/plain
+1576177221.686 2856 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 9548 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 video/plain
+1576177221.686 2651 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/500 11656 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 application/plain
+1576177221.686 1297 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/401 15477 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway model/plain
+1576177221.686 1261 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/200 17803 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/- video/plain
+1576177221.686 4251 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 11606 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a multipart/plain
+1576177221.686 3367 localhost NONE/300 14497 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway application/plain
+1576177221.686 2739 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/401 17643 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 text/plain
+1576177221.686 1362 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 16303 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b image/plain
+1576177221.686 3661 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 18344 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b video/plain
+1576177221.686 3703 203.0.113.1 NONE/304 13318 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a video/plain
+1576177221.686 1964 203.0.113.2 NONE/304 18000 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 message/plain
+1576177221.686 3324 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 11296 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/- model/plain
+1576177221.686 3112 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 16582 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b model/plain
+1576177221.686 3776 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/500 12386 GET cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b font/plain
+1576177221.686 3284 203.0.113.1 NONE/500 18718 POST cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b application/plain
+1576177221.686 3741 2001:db8:2ce:2 NONE/200 18218 GET cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 font/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 3133 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 10342 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b video/plain
+1576177221.686 2460 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 12281 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway message/plain
+1576177221.686 1684 localhost UDP_CLIENT_STALE_MEM_ABORTED/400 17194 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a text/plain
+1576177221.686 1859 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 10156 COPY cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a audio/plain
+1576177221.686 1351 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/200 16631 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 model/plain
+1576177221.686 2007 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/304 10447 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- video/plain
+1576177221.686 4439 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/400 16940 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b video/plain
+1576177221.686 2943 203.0.113.2 NONE/100 18289 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 video/plain
+1576177221.686 4980 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/304 11876 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 image/plain
+1576177221.686 1472 203.0.113.1 NONE/100 15230 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 image/plain
+1576177221.686 4144 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 14558 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- text/plain
+1576177221.686 2425 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/000 14740 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- application/plain
+1576177221.686 2402 2001:db8:2ce:2 NONE/000 14386 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a image/plain
+1576177221.686 1256 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/100 12101 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway image/plain
+1576177221.686 3705 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/401 17437 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway message/plain
+1576177221.686 1983 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/401 15588 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/- model/plain
+1576177221.686 1236 203.0.113.2 NONE/304 18272 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 video/plain
+1576177221.686 4591 203.0.113.2 NONE/300 12960 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 message/plain
+1576177221.686 3565 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 11710 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 multipart/plain
+1576177221.686 3587 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 12506 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 model/plain
+1576177221.686 1945 203.0.113.1 NONE/200 12382 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.200 model/plain
+1576177221.686 4322 203.0.113.1 NONE/603 16150 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a font/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 3492 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/100 10572 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/- multipart/plain
+1576177221.686 4113 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/500 13848 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 application/plain
+1576177221.686 4035 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 13398 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b model/plain
+1576177221.686 4015 localhost NONE/200 18793 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- message/plain
+1576177221.686 2857 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 16562 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 video/plain
+1576177221.686 3459 localhost NONE/603 16567 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/content-gateway text/plain
+1576177221.686 2454 203.0.113.2 NONE/200 18504 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway application/plain
+1576177221.686 4180 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/000 13615 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/- audio/plain
+1576177221.686 1112 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/304 16484 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a video/plain
+1576177221.686 1997 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 16335 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway model/plain
+1576177221.686 3738 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 16001 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 application/plain
+1576177221.686 3299 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 18931 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/- image/plain
+1576177221.686 2029 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/401 16480 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway image/plain
+1576177221.686 4454 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 10548 POST cache_object://localhost/counters - HIER_PARENT_HIT/- application/plain
+1576177221.686 1384 203.0.113.1 NONE/300 13589 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a application/plain
+1576177221.686 4863 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 17670 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway audio/plain
+1576177221.686 3503 2001:db8:2ce:1 NONE/300 11721 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/- audio/plain
+1576177221.686 1778 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/100 11316 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 application/plain
+1576177221.686 1875 203.0.113.2 NONE/100 16222 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/- font/plain
+1576177221.686 1190 203.0.113.1 NONE/500 14110 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b text/plain
+1576177221.686 2266 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/300 10557 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 application/plain
+1576177221.686 4058 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 18050 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 multipart/plain
+1576177221.686 2274 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/200 17840 COPY cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 text/plain
+1576177221.686 2355 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 10842 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway text/plain
+1576177221.686 3761 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 17980 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 application/plain
+1576177221.686 3691 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 14715 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 message/plain
+1576177221.686 2211 203.0.113.2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 11506 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- audio/plain
+1576177221.686 3064 203.0.113.2 NONE/100 18827 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a application/plain
+1576177221.686 3739 203.0.113.2 NONE/200 12758 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b text/plain
+1576177221.686 2402 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/300 18878 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway image/plain
+1576177221.686 1166 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/400 10853 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b multipart/plain
+1576177221.686 4350 localhost NONE/000 10188 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 message/plain
+1576177221.686 4605 localhost NONE/200 15088 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a text/plain
+1576177221.686 1984 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 14555 HEAD cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 multipart/plain
+1576177221.686 2350 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/304 9723 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/- audio/plain
+1576177221.686 4382 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/100 17163 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 audio/plain
+1576177221.686 1611 2001:db8:2ce:2 NONE/100 16545 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b message/plain
+1576177221.686 1912 2001:db8:2ce:1 NONE/000 14480 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 application/plain
+1576177221.686 3990 203.0.113.2 NONE/304 9821 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/- image/plain
+1576177221.686 1396 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/000 9406 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway multipart/plain
+1576177221.686 4461 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/000 12499 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b video/plain
+1576177221.686 2152 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/500 18415 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b model/plain
+1576177221.686 3568 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 16702 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 image/plain
+1576177221.686 4207 localhost UDP_CLIENT_STALE_MEM_ABORTED/304 15949 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- message/plain
+1576177221.686 4903 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/200 14688 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a multipart/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 2145 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/300 10230 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- text/plain
+1576177221.686 2795 2001:db8:2ce:2 NONE/300 12164 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- image/plain
+1576177221.686 2045 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/304 18161 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a text/plain
+1576177221.686 4960 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/401 12553 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a multipart/plain
+1576177221.686 1844 2001:db8:2ce:2 NONE/304 16443 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/- multipart/plain
+1576177221.686 1398 203.0.113.1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/304 10761 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b video/plain
+1576177221.686 3877 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 18332 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:b audio/plain
+1576177221.686 1542 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/400 15785 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway text/plain
+1576177221.686 3736 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/000 13586 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 video/plain
+1576177221.686 3822 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/000 11593 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b application/plain
+1576177221.686 4850 2001:db8:2ce:2 NONE/603 15130 OPTIONS cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.200 font/plain
+1576177221.686 2672 2001:db8:2ce:1 NONE/100 15113 GET cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway audio/plain
+1576177221.686 4189 localhost NONE/500 18364 HEAD cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway model/plain
+1576177221.686 4318 2001:db8:2ce:2 NONE/000 13752 COPY cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:b font/plain
+1576177221.686 4463 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 13991 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 multipart/plain
+1576177221.686 3605 2001:db8:2ce:2 NONE/400 10487 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- multipart/plain
+1576177221.686 4719 2001:db8:2ce:2 NONE/200 16659 POST cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.200 application/plain
+1576177221.686 1639 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/304 9976 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 video/plain
+1576177221.686 3542 localhost UDP_CLIENT_STALE_MEM_ABORTED/401 11698 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/- image/plain
+1576177221.686 4298 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/401 13045 GET cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 audio/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 4714 localhost NONE/200 11253 HEAD cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- audio/plain
+1576177221.686 2857 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/100 18801 POST cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- multipart/plain
+1576177221.686 1060 localhost UDP_CLIENT_STALE_MEM_ABORTED/000 9986 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b font/plain
+1576177221.686 4162 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/603 12053 PURGE cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/- multipart/plain
+1576177221.686 2210 203.0.113.2 NONE/300 14717 PURGE cache_object://localhost/counters - HIER_SINGLE_PARENT/203.0.113.100 message/plain
+1576177221.686 2985 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/304 11529 COPY cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a audio/plain
+1576177221.686 2836 2001:db8:2ce:1 NONE/300 18394 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a application/plain
+1576177221.686 3857 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/000 13056 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/content-gateway model/plain
+1576177221.686 3929 localhost UDP_CLIENT_STALE_MEM_ABORTED/304 17257 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway font/plain
+1576177221.686 2737 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/200 12718 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway video/plain
+1576177221.686 3312 2001:db8:2ce:2 NONE/200 11992 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b multipart/plain
+1576177221.686 3303 203.0.113.2 NONE/400 13606 GET cache_object://localhost/counters - HIER_PARENT_HIT/- text/plain
+1576177221.686 3666 203.0.113.1 NONE/500 13027 POST cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.200 multipart/plain
+1576177221.686 4233 203.0.113.2 UDP_CLIENT_STALE_MEM_ABORTED/200 16194 OPTIONS cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/- model/plain
+1576177221.686 1622 localhost NONE/200 18572 OPTIONS cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:b multipart/plain
+1576177221.686 3854 localhost UDP_CLIENT_STALE_MEM_ABORTED/400 9919 POST cache_object://localhost/counters - HIER_PARENT_HIT/203.0.113.100 multipart/plain
+Unmatched! The rat the cat the dog chased killed ate the malt!
+1576177221.686 3735 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/000 11979 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/2001:db8:2ce:a multipart/plain
+1576177221.686 3528 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/100 11686 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/203.0.113.100 image/plain
+1576177221.686 3447 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/100 15826 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:a video/plain
+1576177221.686 3509 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/400 17565 PURGE cache_object://localhost/counters - HIER_PARENT_HIT/2001:db8:2ce:a image/plain
+1576177221.686 3357 2001:db8:2ce:2 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/300 10714 COPY cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b video/plain
+1576177221.686 4608 localhost TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 10035 PURGE cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/content-gateway audio/plain
+1576177221.686 4717 203.0.113.1 UDP_CLIENT_STALE_MEM_ABORTED/300 12759 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/203.0.113.100 font/plain
+1576177221.686 1559 2001:db8:2ce:1 TCP_CF_NEGATIVE_NEGATIVE_ABORTED/200 17001 GET cache_object://localhost/counters - HIER_SINGLE_PARENT/- multipart/plain
+1576177221.686 4497 2001:db8:2ce:1 UDP_CLIENT_STALE_MEM_ABORTED/200 12530 OPTIONS cache_object://localhost/counters - HIER_CACHE_DIGEST_HIT/2001:db8:2ce:a text/plain
+1576177221.686 1142 2001:db8:2ce:2 UDP_CLIENT_STALE_MEM_ABORTED/304 15782 GET cache_object://localhost/counters - HIER_PARENT_HIT/content-gateway multipart/plain
+1576177221.686 2368 203.0.113.2 NONE/400 17664 HEAD cache_object://localhost/counters - HIER_NO_CACHE_DIGEST_DIRECT/2001:db8:2ce:b model/plain \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/squidlog/testdata/config.json b/src/go/plugin/go.d/modules/squidlog/testdata/config.json
new file mode 100644
index 000000000..5d563cc7e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/testdata/config.json
@@ -0,0 +1,27 @@
+{
+ "update_every": 123,
+ "path": "ok",
+ "exclude_path": "ok",
+ "log_type": "ok",
+ "csv_config": {
+ "fields_per_record": 123,
+ "delimiter": "ok",
+ "trim_leading_space": true,
+ "format": "ok"
+ },
+ "ltsv_config": {
+ "field_delimiter": "ok",
+ "value_delimiter": "ok",
+ "mapping": {
+ "ok": "ok"
+ }
+ },
+ "regexp_config": {
+ "pattern": "ok"
+ },
+ "json_config": {
+ "mapping": {
+ "ok": "ok"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/squidlog/testdata/config.yaml b/src/go/plugin/go.d/modules/squidlog/testdata/config.yaml
new file mode 100644
index 000000000..701205e23
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/testdata/config.yaml
@@ -0,0 +1,19 @@
+update_every: 123
+path: "ok"
+exclude_path: "ok"
+log_type: "ok"
+csv_config:
+ fields_per_record: 123
+ delimiter: "ok"
+ trim_leading_space: yes
+ format: "ok"
+ltsv_config:
+ field_delimiter: "ok"
+ value_delimiter: "ok"
+ mapping:
+ ok: "ok"
+regexp_config:
+ pattern: "ok"
+json_config:
+ mapping:
+ ok: "ok"
diff --git a/src/go/plugin/go.d/modules/squidlog/testdata/unknown.log b/src/go/plugin/go.d/modules/squidlog/testdata/unknown.log
new file mode 100644
index 000000000..0478a5c18
--- /dev/null
+++ b/src/go/plugin/go.d/modules/squidlog/testdata/unknown.log
@@ -0,0 +1 @@
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 300 2698 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/storcli/README.md b/src/go/plugin/go.d/modules/storcli/README.md
new file mode 120000
index 000000000..482049b19
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/README.md
@@ -0,0 +1 @@
+integrations/storecli_raid.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/storcli/charts.go b/src/go/plugin/go.d/modules/storcli/charts.go
new file mode 100644
index 000000000..3e0c07c1d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/charts.go
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package storcli
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioControllerHealthStatus = module.Priority + iota
+ prioControllerStatus
+ prioControllerBBUStatus
+
+ prioPhysDriveErrors
+ prioPhysDrivePredictiveFailures
+ prioPhysDriveSmartAlertStatus
+ prioPhysDriveTemperature
+
+ prioBBUTemperature
+)
+
+var controllerMegaraidChartsTmpl = module.Charts{
+ controllerHealthStatusChartTmpl.Copy(),
+ controllerStatusChartTmpl.Copy(),
+ controllerBBUStatusChartTmpl.Copy(),
+}
+
+var controllerMpt3sasChartsTmpl = module.Charts{
+ controllerHealthStatusChartTmpl.Copy(),
+}
+
+var (
+ controllerHealthStatusChartTmpl = module.Chart{
+ ID: "controller_%s_health_status",
+ Title: "Controller health status",
+ Units: "status",
+ Fam: "cntrl status",
+ Ctx: "storcli.controller_health_status",
+ Type: module.Line,
+ Priority: prioControllerHealthStatus,
+ Dims: module.Dims{
+ {ID: "cntrl_%s_health_status_healthy", Name: "healthy"},
+ {ID: "cntrl_%s_health_status_unhealthy", Name: "unhealthy"},
+ },
+ }
+ controllerStatusChartTmpl = module.Chart{
+ ID: "controller_%s_status",
+ Title: "Controller status",
+ Units: "status",
+ Fam: "cntrl status",
+ Ctx: "storcli.controller_status",
+ Type: module.Line,
+ Priority: prioControllerStatus,
+ Dims: module.Dims{
+ {ID: "cntrl_%s_status_optimal", Name: "optimal"},
+ {ID: "cntrl_%s_status_degraded", Name: "degraded"},
+ {ID: "cntrl_%s_status_partially_degraded", Name: "partially_degraded"},
+ {ID: "cntrl_%s_status_failed", Name: "failed"},
+ },
+ }
+ controllerBBUStatusChartTmpl = module.Chart{
+ ID: "controller_%s_bbu_status",
+ Title: "Controller BBU status",
+ Units: "status",
+ Fam: "cntrl status",
+ Ctx: "storcli.controller_bbu_status",
+ Type: module.Line,
+ Priority: prioControllerBBUStatus,
+ Dims: module.Dims{
+ {ID: "cntrl_%s_bbu_status_healthy", Name: "healthy"},
+ {ID: "cntrl_%s_bbu_status_unhealthy", Name: "unhealthy"},
+ {ID: "cntrl_%s_bbu_status_na", Name: "na"},
+ },
+ }
+)
+
+var physDriveChartsTmpl = module.Charts{
+ physDriveMediaErrorsRateChartTmpl.Copy(),
+ physDrivePredictiveFailuresRateChartTmpl.Copy(),
+ physDriveSmartAlertStatusChartTmpl.Copy(),
+ physDriveTemperatureChartTmpl.Copy(),
+}
+
+var (
+ physDriveMediaErrorsRateChartTmpl = module.Chart{
+ ID: "phys_drive_%s_cntrl_%s_media_errors_rate",
+ Title: "Physical Drive media errors rate",
+ Units: "errors/s",
+ Fam: "pd errors",
+ Ctx: "storcli.phys_drive_errors",
+ Type: module.Line,
+ Priority: prioPhysDriveErrors,
+ Dims: module.Dims{
+ {ID: "phys_drive_%s_cntrl_%s_media_error_count", Name: "media"},
+ {ID: "phys_drive_%s_cntrl_%s_other_error_count", Name: "other"},
+ },
+ }
+ physDrivePredictiveFailuresRateChartTmpl = module.Chart{
+ ID: "phys_drive_%s_cntrl_%s_predictive_failures_rate",
+ Title: "Physical Drive predictive failures rate",
+ Units: "failures/s",
+ Fam: "pd errors",
+ Ctx: "storcli.phys_drive_predictive_failures",
+ Type: module.Line,
+ Priority: prioPhysDrivePredictiveFailures,
+ Dims: module.Dims{
+ {ID: "phys_drive_%s_cntrl_%s_predictive_failure_count", Name: "predictive_failures"},
+ },
+ }
+ physDriveSmartAlertStatusChartTmpl = module.Chart{
+ ID: "phys_drive_%s_cntrl_%s_smart_alert_status",
+ Title: "Physical Drive SMART alert status",
+ Units: "status",
+ Fam: "pd smart",
+ Ctx: "storcli.phys_drive_smart_alert_status",
+ Type: module.Line,
+ Priority: prioPhysDriveSmartAlertStatus,
+ Dims: module.Dims{
+ {ID: "phys_drive_%s_cntrl_%s_smart_alert_status_active", Name: "active"},
+ {ID: "phys_drive_%s_cntrl_%s_smart_alert_status_inactive", Name: "inactive"},
+ },
+ }
+ physDriveTemperatureChartTmpl = module.Chart{
+ ID: "phys_drive_%s_cntrl_%s_temperature",
+ Title: "Physical Drive temperature",
+ Units: "Celsius",
+ Fam: "pd temperature",
+ Ctx: "storcli.phys_drive_temperature",
+ Type: module.Line,
+ Priority: prioPhysDriveTemperature,
+ Dims: module.Dims{
+ {ID: "phys_drive_%s_cntrl_%s_temperature", Name: "temperature"},
+ },
+ }
+)
+
+var bbuChartsTmpl = module.Charts{
+ bbuTemperatureChartTmpl.Copy(),
+}
+
+var (
+ bbuTemperatureChartTmpl = module.Chart{
+ ID: "bbu_%s_cntrl_%s_temperature",
+ Title: "BBU temperature",
+ Units: "Celsius",
+ Fam: "bbu temperature",
+ Ctx: "storcli.bbu_temperature",
+ Type: module.Line,
+ Priority: prioBBUTemperature,
+ Dims: module.Dims{
+ {ID: "bbu_%s_cntrl_%s_temperature", Name: "temperature"},
+ },
+ }
+)
+
+func (s *StorCli) addControllerCharts(cntrl controllerInfo) {
+ var charts *module.Charts
+
+ switch cntrl.Version.DriverName {
+ case driverNameMegaraid:
+ charts = controllerMegaraidChartsTmpl.Copy()
+ case driverNameSas:
+ charts = controllerMpt3sasChartsTmpl.Copy()
+ default:
+ return
+ }
+
+ num := strconv.Itoa(cntrl.Basics.Controller)
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, num)
+ chart.Labels = []module.Label{
+ {Key: "controller_number", Value: num},
+ {Key: "model", Value: strings.TrimSpace(cntrl.Basics.Model)},
+ {Key: "driver_name", Value: cntrl.Version.DriverName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, num)
+ }
+ }
+
+ if err := s.Charts().Add(*charts...); err != nil {
+ s.Warning(err)
+ }
+}
+
+func (s *StorCli) addPhysDriveCharts(cntrlNum int, di *driveInfo, ds *driveState, da *driveAttrs) {
+ charts := physDriveChartsTmpl.Copy()
+
+ if _, ok := parseInt(getTemperature(ds.DriveTemperature)); !ok {
+ _ = charts.Remove(physDriveTemperatureChartTmpl.ID)
+ }
+
+ num := strconv.Itoa(cntrlNum)
+
+ var enc, slot string
+ if parts := strings.Split(di.EIDSlt, ":"); len(parts) == 2 { // EID:Slt
+ enc, slot = parts[0], parts[1]
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, da.WWN, num)
+ chart.Labels = []module.Label{
+ {Key: "controller_number", Value: num},
+ {Key: "enclosure_number", Value: enc},
+ {Key: "slot_number", Value: slot},
+ {Key: "media_type", Value: di.Med},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, da.WWN, num)
+ }
+ }
+
+ if err := s.Charts().Add(*charts...); err != nil {
+ s.Warning(err)
+ }
+}
+
+func (s *StorCli) addBBUCharts(cntrlNum, bbuNum, model string) {
+ charts := bbuChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, bbuNum, cntrlNum)
+ chart.Labels = []module.Label{
+ {Key: "controller_number", Value: cntrlNum},
+ {Key: "bbu_number", Value: bbuNum},
+ {Key: "model", Value: model},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, bbuNum, cntrlNum)
+ }
+ }
+
+ if err := s.Charts().Add(*charts...); err != nil {
+ s.Warning(err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/storcli/collect.go b/src/go/plugin/go.d/modules/storcli/collect.go
new file mode 100644
index 000000000..df2b09d87
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/collect.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package storcli
+
+import "fmt"
+
+const (
+ driverNameMegaraid = "megaraid_sas"
+ driverNameSas = "mpt3sas"
+)
+
+func (s *StorCli) collect() (map[string]int64, error) {
+ cntrlResp, err := s.queryControllersInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ driver := cntrlResp.Controllers[0].ResponseData.Version.DriverName
+
+ switch driver {
+ case driverNameMegaraid:
+ if err := s.collectMegaraidControllersInfo(mx, cntrlResp); err != nil {
+ return nil, fmt.Errorf("failed to collect megaraid controller info: %s", err)
+ }
+ if len(cntrlResp.Controllers[0].ResponseData.PDList) > 0 {
+ drivesResp, err := s.queryDrivesInfo()
+ if err != nil {
+ return nil, fmt.Errorf("failed to collect megaraid drive info: %s", err)
+ }
+ if err := s.collectMegaRaidDrives(mx, drivesResp); err != nil {
+ return nil, err
+ }
+ }
+ case driverNameSas:
+ if err := s.collectMpt3sasControllersInfo(mx, cntrlResp); err != nil {
+ return nil, fmt.Errorf("failed to collect mpt3sas controller info: %s", err)
+ }
+ default:
+ return nil, fmt.Errorf("unknown driver: %s", driver)
+ }
+
+ return mx, nil
+}
diff --git a/src/go/plugin/go.d/modules/storcli/collect_controllers.go b/src/go/plugin/go.d/modules/storcli/collect_controllers.go
new file mode 100644
index 000000000..64d615946
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/collect_controllers.go
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package storcli
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type (
+ controllersInfoResponse struct {
+ Controllers []struct {
+ CommandStatus struct {
+ Controller int `json:"Controller"`
+ Status string `json:"Status"`
+ } `json:"Command Status"`
+ ResponseData controllerInfo `json:"Response Data"`
+ } `json:"Controllers"`
+ }
+ controllerInfo struct {
+ Basics struct {
+ Controller int `json:"Controller"`
+ Model string `json:"Model"`
+ SerialNumber string `json:"Serial Number"`
+ } `json:"Basics"`
+ Version struct {
+ DriverName string `json:"Driver Name"`
+ } `json:"Version"`
+ Status struct {
+ ControllerStatus string `json:"Controller Status"`
+ BBUStatus *storNumber `json:"BBU Status"`
+ } `json:"Status"`
+ BBUInfo []struct {
+ Model string `json:"Model"`
+ State string `json:"State"`
+ Temp string `json:"Temp"`
+ } `json:"BBU_Info"`
+ PDList []struct {
+ } `json:"PD LIST"`
+ }
+)
+
+func (s *StorCli) collectMegaraidControllersInfo(mx map[string]int64, resp *controllersInfoResponse) error {
+ for _, v := range resp.Controllers {
+ cntrl := v.ResponseData
+
+ cntrlNum := strconv.Itoa(cntrl.Basics.Controller)
+
+ if !s.controllers[cntrlNum] {
+ s.controllers[cntrlNum] = true
+ s.addControllerCharts(cntrl)
+ }
+
+ px := fmt.Sprintf("cntrl_%s_", cntrlNum)
+
+ for _, st := range []string{"healthy", "unhealthy"} {
+ mx[px+"health_status_"+st] = 0
+ }
+ if strings.ToLower(cntrl.Status.ControllerStatus) == "optimal" {
+ mx[px+"health_status_healthy"] = 1
+ } else {
+ mx[px+"health_status_unhealthy"] = 1
+ }
+
+ for _, st := range []string{"optimal", "degraded", "partially_degraded", "failed"} {
+ mx[px+"status_"+st] = 0
+ }
+ mx[px+"status_"+strings.ToLower(cntrl.Status.ControllerStatus)] = 1
+
+ if cntrl.Status.BBUStatus != nil {
+ for _, st := range []string{"healthy", "unhealthy", "na"} {
+ mx[px+"bbu_status_"+st] = 0
+ }
+ // https://github.com/prometheus-community/node-exporter-textfile-collector-scripts/issues/27
+ switch *cntrl.Status.BBUStatus {
+ case "0", "8", "4096": // 0 good, 8 charging
+ mx[px+"bbu_status_healthy"] = 1
+ case "NA", "N/A":
+ mx[px+"bbu_status_na"] = 1
+ default:
+ mx[px+"bbu_status_unhealthy"] = 1
+ }
+ }
+
+ for i, bbu := range cntrl.BBUInfo {
+ bbuNum := strconv.Itoa(i)
+ if k := cntrlNum + bbuNum; !s.bbu[k] {
+ s.bbu[k] = true
+ s.addBBUCharts(cntrlNum, bbuNum, bbu.Model)
+ }
+
+ px := fmt.Sprintf("bbu_%s_cntrl_%s_", bbuNum, cntrlNum)
+
+ if v, ok := parseInt(getTemperature(bbu.Temp)); ok {
+ mx[px+"temperature"] = v
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *StorCli) collectMpt3sasControllersInfo(mx map[string]int64, resp *controllersInfoResponse) error {
+ for _, v := range resp.Controllers {
+ cntrl := v.ResponseData
+
+ cntrlNum := strconv.Itoa(cntrl.Basics.Controller)
+
+ if !s.controllers[cntrlNum] {
+ s.controllers[cntrlNum] = true
+ s.addControllerCharts(cntrl)
+ }
+
+ px := fmt.Sprintf("cntrl_%s_", cntrlNum)
+
+ for _, st := range []string{"healthy", "unhealthy"} {
+ mx[px+"health_status_"+st] = 0
+ }
+ if strings.ToLower(cntrl.Status.ControllerStatus) == "ok" {
+ mx[px+"health_status_healthy"] = 1
+ } else {
+ mx[px+"health_status_unhealthy"] = 1
+ }
+ }
+
+ return nil
+}
+
+func (s *StorCli) queryControllersInfo() (*controllersInfoResponse, error) {
+ bs, err := s.exec.controllersInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(bs) == 0 {
+ return nil, errors.New("empty response")
+ }
+
+ var resp controllersInfoResponse
+ if err := json.Unmarshal(bs, &resp); err != nil {
+ return nil, err
+ }
+ if len(resp.Controllers) == 0 {
+ return nil, errors.New("no controllers found")
+ }
+ if st := resp.Controllers[0].CommandStatus.Status; st != "Success" {
+ return nil, fmt.Errorf("command status error: %s", st)
+ }
+
+ return &resp, nil
+}
diff --git a/src/go/plugin/go.d/modules/storcli/collect_drives.go b/src/go/plugin/go.d/modules/storcli/collect_drives.go
new file mode 100644
index 000000000..5c2ecb387
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/collect_drives.go
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package storcli
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type drivesInfoResponse struct {
+ Controllers []struct {
+ CommandStatus struct {
+ Controller int `json:"Controller"`
+ Status string `json:"Status"`
+ } `json:"Command Status"`
+ ResponseData map[string]json.RawMessage `json:"Response Data"`
+ } `json:"Controllers"`
+}
+
+type (
+ driveInfo struct {
+ EIDSlt string `json:"EID:Slt"`
+ DID int `json:"DID"`
+ State string `json:"State"`
+ DG int `json:"DG"`
+ Size string `json:"Size"`
+ Intf string `json:"Intf"`
+ Med string `json:"Med"`
+ SED string `json:"SED"`
+ PI string `json:"PI"`
+ SeSz string `json:"SeSz"`
+ Model string `json:"Model"`
+ Sp string `json:"Sp"`
+ Type string `json:"Type"`
+ }
+ driveState struct {
+ MediaErrorCount storNumber `json:"Media Error Count"`
+ OtherErrorCount storNumber `json:"Other Error Count"`
+ DriveTemperature string `json:"Drive Temperature"`
+ PredictiveFailureCount storNumber `json:"Predictive Failure Count"`
+ SmartAlertFlagged string `json:"S.M.A.R.T alert flagged by drive"`
+ }
+ driveAttrs struct {
+ WWN string `json:"WWN"`
+ DeviceSpeed string `json:"Device Speed"`
+ LinkSpeed string `json:"Link Speed"`
+ }
+)
+
+type storNumber string // some int values can be 'N/A'
+
+func (n *storNumber) UnmarshalJSON(b []byte) error { *n = storNumber(b); return nil }
+
+func (s *StorCli) collectMegaRaidDrives(mx map[string]int64, resp *drivesInfoResponse) error {
+ if resp == nil {
+ return nil
+ }
+
+ for _, cntrl := range resp.Controllers {
+ var ids []string
+ for k := range cntrl.ResponseData {
+ if !strings.HasSuffix(k, "Detailed Information") {
+ continue
+ }
+ parts := strings.Fields(k) // Drive /c0/e252/s0 - Detailed Information
+ if len(parts) < 2 {
+ continue
+ }
+ id := parts[1]
+ if strings.IndexByte(id, '/') == -1 {
+ continue
+ }
+ ids = append(ids, id)
+ }
+
+ cntrlIdx := cntrl.CommandStatus.Controller
+
+ for _, id := range ids {
+ info, err := getDriveInfo(cntrl.ResponseData, id)
+ if err != nil {
+ return err
+ }
+ data, err := getDriveDetailedInfo(cntrl.ResponseData, id)
+ if err != nil {
+ return err
+ }
+ state, err := getDriveState(data, id)
+ if err != nil {
+ return err
+ }
+ attrs, err := getDriveAttrs(data, id)
+ if err != nil {
+ return err
+ }
+
+ if attrs.WWN == "" {
+ continue
+ }
+
+ if !s.drives[attrs.WWN] {
+ s.drives[attrs.WWN] = true
+ s.addPhysDriveCharts(cntrlIdx, info, state, attrs)
+ }
+
+ px := fmt.Sprintf("phys_drive_%s_cntrl_%d_", attrs.WWN, cntrlIdx)
+
+ if v, ok := parseInt(string(state.MediaErrorCount)); ok {
+ mx[px+"media_error_count"] = v
+ }
+ if v, ok := parseInt(string(state.OtherErrorCount)); ok {
+ mx[px+"other_error_count"] = v
+ }
+ if v, ok := parseInt(string(state.PredictiveFailureCount)); ok {
+ mx[px+"predictive_failure_count"] = v
+ }
+ if v, ok := parseInt(getTemperature(state.DriveTemperature)); ok {
+ mx[px+"temperature"] = v
+ }
+ for _, st := range []string{"active", "inactive"} {
+ mx[px+"smart_alert_status_"+st] = 0
+ }
+ if state.SmartAlertFlagged == "Yes" {
+ mx[px+"smart_alert_status_active"] = 1
+ } else {
+ mx[px+"smart_alert_status_inactive"] = 1
+ }
+ }
+ }
+
+ return nil
+}
+
+func (s *StorCli) queryDrivesInfo() (*drivesInfoResponse, error) {
+ bs, err := s.exec.drivesInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(bs) == 0 {
+ return nil, errors.New("empty response")
+ }
+
+ var resp drivesInfoResponse
+ if err := json.Unmarshal(bs, &resp); err != nil {
+ return nil, err
+ }
+
+ if len(resp.Controllers) == 0 {
+ return nil, errors.New("no controllers found")
+ }
+ if st := resp.Controllers[0].CommandStatus.Status; st != "Success" {
+ return nil, fmt.Errorf("command status error: %s", st)
+ }
+
+ return &resp, nil
+}
+
+func getDriveInfo(respData map[string]json.RawMessage, id string) (*driveInfo, error) {
+ k := fmt.Sprintf("Drive %s", id)
+ raw, ok := respData[k]
+ if !ok {
+ return nil, fmt.Errorf("drive info not found for '%s'", id)
+ }
+
+ var drive []driveInfo
+ if err := json.Unmarshal(raw, &drive); err != nil {
+ return nil, err
+ }
+
+ if len(drive) == 0 {
+ return nil, fmt.Errorf("drive info not found for '%s'", id)
+ }
+
+ return &drive[0], nil
+}
+
+func getDriveDetailedInfo(respData map[string]json.RawMessage, id string) (map[string]json.RawMessage, error) {
+ k := fmt.Sprintf("Drive %s - Detailed Information", id)
+ raw, ok := respData[k]
+ if !ok {
+ return nil, fmt.Errorf("drive detailed info not found for '%s'", id)
+ }
+
+ var info map[string]json.RawMessage
+ if err := json.Unmarshal(raw, &info); err != nil {
+ return nil, err
+ }
+
+ return info, nil
+}
+
+func getDriveState(driveDetailedInfo map[string]json.RawMessage, id string) (*driveState, error) {
+ k := fmt.Sprintf("Drive %s State", id)
+ raw, ok := driveDetailedInfo[k]
+ if !ok {
+ return nil, fmt.Errorf("drive detailed info state not found for '%s'", id)
+ }
+
+ var state driveState
+ if err := json.Unmarshal(raw, &state); err != nil {
+ return nil, err
+ }
+
+ return &state, nil
+}
+
+func getDriveAttrs(driveDetailedInfo map[string]json.RawMessage, id string) (*driveAttrs, error) {
+ k := fmt.Sprintf("Drive %s Device attributes", id)
+ raw, ok := driveDetailedInfo[k]
+ if !ok {
+ return nil, fmt.Errorf("drive detailed info state not found for '%s'", id)
+ }
+
+ var state driveAttrs
+ if err := json.Unmarshal(raw, &state); err != nil {
+ return nil, err
+ }
+
+ return &state, nil
+}
+
+func getTemperature(temp string) string {
+ // ' 28C (82.40 F)' (drive) or '33C' (bbu)
+ i := strings.IndexByte(temp, 'C')
+ if i == -1 {
+ return ""
+ }
+ return strings.TrimSpace(temp[:i])
+}
+
+func parseInt(s string) (int64, bool) {
+ i, err := strconv.ParseInt(s, 10, 64)
+ return i, err == nil
+}
diff --git a/src/go/plugin/go.d/modules/storcli/config_schema.json b/src/go/plugin/go.d/modules/storcli/config_schema.json
new file mode 100644
index 000000000..226a370f4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/config_schema.json
@@ -0,0 +1,35 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "StorCLI collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/storcli/exec.go b/src/go/plugin/go.d/modules/storcli/exec.go
new file mode 100644
index 000000000..5be88a899
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/exec.go
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package storcli
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newStorCliExec(ndsudoPath string, timeout time.Duration, log *logger.Logger) *storCliExec {
+ return &storCliExec{
+ Logger: log,
+ ndsudoPath: ndsudoPath,
+ timeout: timeout,
+ }
+}
+
+type storCliExec struct {
+ *logger.Logger
+
+ ndsudoPath string
+ timeout time.Duration
+}
+
+func (e *storCliExec) controllersInfo() ([]byte, error) {
+ return e.execute("storcli-controllers-info")
+}
+
+func (e *storCliExec) drivesInfo() ([]byte, error) {
+ return e.execute("storcli-drives-info")
+}
+
+func (e *storCliExec) execute(args ...string) ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.ndsudoPath, args...)
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/storcli/init.go b/src/go/plugin/go.d/modules/storcli/init.go
new file mode 100644
index 000000000..d35ad07db
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/init.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package storcli
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+func (s *StorCli) initStorCliExec() (storCli, error) {
+ ndsudoPath := filepath.Join(executable.Directory, "ndsudo")
+
+ if _, err := os.Stat(ndsudoPath); err != nil {
+ return nil, fmt.Errorf("ndsudo executable not found: %v", err)
+ }
+
+ storExec := newStorCliExec(ndsudoPath, s.Timeout.Duration(), s.Logger)
+
+ return storExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/storcli/integrations/storecli_raid.md b/src/go/plugin/go.d/modules/storcli/integrations/storecli_raid.md
new file mode 100644
index 000000000..9b8b28480
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/integrations/storecli_raid.md
@@ -0,0 +1,252 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/storcli/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/storcli/metadata.yaml"
+sidebar_label: "StoreCLI RAID"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# StoreCLI RAID
+
+
+<img src="https://netdata.cloud/img/hard-drive.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: storcli
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitors the health of StoreCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.
+It relies on the [`storcli`](https://docs.broadcom.com/doc/12352476) CLI tool but avoids directly executing the binary.
+Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+Executed commands:
+- `storcli /cALL show all J nolog`
+- `storcli /cALL/eALL/sALL show all J nolog`
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per controller
+
+These metrics refer to the Controller.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| controller_number | Controller number (index) |
+| model | Controller model |
+| driver_name | Controller driver (megaraid_sas or mpt3sas) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| storcli.controller_health_status | healthy, unhealthy | status |
+| storcli.controller_status | optimal, degraded, partially_degraded, failed | status |
+| storcli.controller_bbu_status | healthy, unhealthy, na | status |
+
+### Per physical drive
+
+These metrics refer to the Physical Drive.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| controller_number | Controller number (index) |
+| enclosure_number | Enclosure number (index) |
+| slot_number | Slot number (index) |
+| media type | Media type (e.g. HDD) |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| storcli.phys_drive_errors | media, other | errors/s |
+| storcli.phys_drive_predictive_failures | predictive_failures | failures/s |
+| storcli.phys_drive_smart_alert_status | active, inactive | status |
+| storcli.phys_drive_temperature | temperature | Celsius |
+
+### Per bbu
+
+These metrics refer to the Backup Battery Unit.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| controller_number | Controller number (index) |
+| bbu_number | BBU number (index) |
+| model | BBU model |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| storcli.bbu_temperature | temperature | Celsius |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ storcli_controller_health_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_health_status | RAID controller ${label:controller_number} is unhealthy |
+| [ storcli_controller_bbu_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.controller_bbu_status | RAID controller ${label:controller_number} BBU is unhealthy |
+| [ storcli_phys_drive_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_errors | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} errors |
+| [ storcli_phys_drive_predictive_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf) | storcli.phys_drive_predictive_failures | RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} predictive failures |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/storcli.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/storcli.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| timeout | storcli binary execution timeout. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom update_every
+
+Allows you to override the default data collection interval.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: storcli
+ update_every: 5 # Collect StorCLI RAID statistics every 5 seconds
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `storcli` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m storcli
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `storcli` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep storcli
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep storcli /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep storcli
+```
+
+
diff --git a/src/go/plugin/go.d/modules/storcli/metadata.yaml b/src/go/plugin/go.d/modules/storcli/metadata.yaml
new file mode 100644
index 000000000..7e807f056
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/metadata.yaml
@@ -0,0 +1,194 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-storcli
+ plugin_name: go.d.plugin
+ module_name: storcli
+ monitored_instance:
+ name: StoreCLI RAID
+ link: "https://docs.broadcom.com/doc/12352476"
+ icon_filename: "hard-drive.svg"
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - storage
+ - raid-controller
+ - manage-disks
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Monitors the health of StoreCLI Hardware RAID by tracking the status of RAID adapters, physical drives, and backup batteries in your storage system.
+ It relies on the [`storcli`](https://docs.broadcom.com/doc/12352476) CLI tool but avoids directly executing the binary.
+ Instead, it utilizes `ndsudo`, a Netdata helper specifically designed to run privileged commands securely within the Netdata environment.
+ This approach eliminates the need to use `sudo`, improving security and potentially simplifying permission management.
+
+ Executed commands:
+ - `storcli /cALL show all J nolog`
+ - `storcli /cALL/eALL/sALL show all J nolog`
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/storcli.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: timeout
+ description: storcli binary execution timeout.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom update_every
+ description: Allows you to override the default data collection interval.
+ config: |
+ jobs:
+ - name: storcli
+ update_every: 5 # Collect StorCLI RAID statistics every 5 seconds
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: storcli_controller_health_status
+ metric: storcli.controller_health_status
+ info: RAID controller ${label:controller_number} is unhealthy
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf
+ - name: storcli_controller_bbu_status
+ metric: storcli.controller_bbu_status
+ info: RAID controller ${label:controller_number} BBU is unhealthy
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf
+ - name: storcli_phys_drive_errors
+ metric: storcli.phys_drive_errors
+ info: RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} errors
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf
+ - name: storcli_phys_drive_predictive_failures
+ metric: storcli.phys_drive_predictive_failures
+ info: RAID physical drive c${label:controller_number}/e${label:enclosure_number}/s${label:slot_number} predictive failures
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/storcli.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: controller
+ description: These metrics refer to the Controller.
+ labels:
+ - name: controller_number
+ description: Controller number (index)
+ - name: model
+ description: Controller model
+ - name: driver_name
+ description: Controller driver (megaraid_sas or mpt3sas)
+ metrics:
+ - name: storcli.controller_health_status
+ description: Controller health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: healthy
+ - name: unhealthy
+ - name: storcli.controller_status
+ description: Controller status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: optimal
+ - name: degraded
+ - name: partially_degraded
+ - name: failed
+ - name: storcli.controller_bbu_status
+ description: Controller BBU status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: healthy
+ - name: unhealthy
+ - name: na
+ - name: physical drive
+ description: These metrics refer to the Physical Drive.
+ labels:
+ - name: controller_number
+ description: Controller number (index)
+ - name: enclosure_number
+ description: Enclosure number (index)
+ - name: slot_number
+ description: Slot number (index)
+ - name: media type
+ description: Media type (e.g. HDD)
+ metrics:
+ - name: storcli.phys_drive_errors
+ description: Physical Drive media errors rate
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: media
+ - name: other
+ - name: storcli.phys_drive_predictive_failures
+ description: Physical Drive predictive failures rate
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: predictive_failures
+ - name: storcli.phys_drive_smart_alert_status
+ description: Physical Drive SMART alert status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: storcli.phys_drive_temperature
+ description: Physical Drive temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: bbu
+ description: These metrics refer to the Backup Battery Unit.
+ labels:
+ - name: controller_number
+ description: Controller number (index)
+ - name: bbu_number
+ description: BBU number (index)
+ - name: model
+ description: BBU model
+ metrics:
+ - name: storcli.bbu_temperature
+ description: BBU temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
diff --git a/src/go/plugin/go.d/modules/storcli/storcli.go b/src/go/plugin/go.d/modules/storcli/storcli.go
new file mode 100644
index 000000000..0133c4700
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/storcli.go
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package storcli
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("storcli", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *StorCli {
+ return &StorCli{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ controllers: make(map[string]bool),
+ drives: make(map[string]bool),
+ bbu: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+}
+
+type (
+ StorCli struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec storCli
+
+ controllers map[string]bool
+ drives map[string]bool
+ bbu map[string]bool
+ }
+ storCli interface {
+ controllersInfo() ([]byte, error)
+ drivesInfo() ([]byte, error)
+ }
+)
+
+func (s *StorCli) Configuration() any {
+ return s.Config
+}
+
+func (s *StorCli) Init() error {
+ storExec, err := s.initStorCliExec()
+ if err != nil {
+ s.Errorf("storcli exec initialization: %v", err)
+ return err
+ }
+ s.exec = storExec
+
+ return nil
+}
+
+func (s *StorCli) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (s *StorCli) Charts() *module.Charts {
+ return s.charts
+}
+
+func (s *StorCli) Collect() map[string]int64 {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (s *StorCli) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/storcli/storcli_test.go b/src/go/plugin/go.d/modules/storcli/storcli_test.go
new file mode 100644
index 000000000..63ee54b56
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/storcli_test.go
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package storcli
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataMegaControllerInfo, _ = os.ReadFile("testdata/megaraid-controllers-info.json")
+ dataMegaDrivesInfo, _ = os.ReadFile("testdata/megaraid-drives-info.json")
+
+ dataSasControllerInfo, _ = os.ReadFile("testdata/mpt3sas-controllers-info.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataMegaControllerInfo": dataMegaControllerInfo,
+ "dataMegaDrivesInfo": dataMegaDrivesInfo,
+ "dataSasControllerInfo": dataSasControllerInfo,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestStorCli_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &StorCli{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestStorCli_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if 'ndsudo' not found": {
+ wantFail: true,
+ config: New().Config,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ stor := New()
+
+ if test.wantFail {
+ assert.Error(t, stor.Init())
+ } else {
+ assert.NoError(t, stor.Init())
+ }
+ })
+ }
+}
+
+func TestStorCli_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *StorCli
+ }{
+ "not initialized exec": {
+ prepare: func() *StorCli {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *StorCli {
+ stor := New()
+ stor.exec = prepareMockMegaRaidOK()
+ _ = stor.Check()
+ return stor
+ },
+ },
+ "after collect": {
+ prepare: func() *StorCli {
+ stor := New()
+ stor.exec = prepareMockMegaRaidOK()
+ _ = stor.Collect()
+ return stor
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ stor := test.prepare()
+
+ assert.NotPanics(t, stor.Cleanup)
+ })
+ }
+}
+
+func TestStorCli_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestStorCli_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockStorCliExec
+ wantFail bool
+ }{
+ "success MegaRAID controller": {
+ wantFail: false,
+ prepareMock: prepareMockMegaRaidOK,
+ },
+ "err on exec": {
+ wantFail: true,
+ prepareMock: prepareMockErr,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ stor := New()
+ mock := test.prepareMock()
+ stor.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, stor.Check())
+ } else {
+ assert.NoError(t, stor.Check())
+ }
+ })
+ }
+}
+
+func TestStorCli_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockStorCliExec
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success MegaRAID controller": {
+ prepareMock: prepareMockMegaRaidOK,
+ wantCharts: len(controllerMegaraidChartsTmpl)*1 + len(physDriveChartsTmpl)*6 + len(bbuChartsTmpl)*1,
+ wantMetrics: map[string]int64{
+ "bbu_0_cntrl_0_temperature": 34,
+ "cntrl_0_bbu_status_healthy": 1,
+ "cntrl_0_bbu_status_na": 0,
+ "cntrl_0_bbu_status_unhealthy": 0,
+ "cntrl_0_health_status_healthy": 1,
+ "cntrl_0_health_status_unhealthy": 0,
+ "cntrl_0_status_degraded": 0,
+ "cntrl_0_status_failed": 0,
+ "cntrl_0_status_optimal": 1,
+ "cntrl_0_status_partially_degraded": 0,
+ "phys_drive_5000C500C36C8BCD_cntrl_0_media_error_count": 0,
+ "phys_drive_5000C500C36C8BCD_cntrl_0_other_error_count": 0,
+ "phys_drive_5000C500C36C8BCD_cntrl_0_predictive_failure_count": 0,
+ "phys_drive_5000C500C36C8BCD_cntrl_0_smart_alert_status_active": 0,
+ "phys_drive_5000C500C36C8BCD_cntrl_0_smart_alert_status_inactive": 1,
+ "phys_drive_5000C500C36C8BCD_cntrl_0_temperature": 28,
+ "phys_drive_5000C500D59840FE_cntrl_0_media_error_count": 0,
+ "phys_drive_5000C500D59840FE_cntrl_0_other_error_count": 0,
+ "phys_drive_5000C500D59840FE_cntrl_0_predictive_failure_count": 0,
+ "phys_drive_5000C500D59840FE_cntrl_0_smart_alert_status_active": 0,
+ "phys_drive_5000C500D59840FE_cntrl_0_smart_alert_status_inactive": 1,
+ "phys_drive_5000C500D59840FE_cntrl_0_temperature": 28,
+ "phys_drive_5000C500D6061539_cntrl_0_media_error_count": 0,
+ "phys_drive_5000C500D6061539_cntrl_0_other_error_count": 0,
+ "phys_drive_5000C500D6061539_cntrl_0_predictive_failure_count": 0,
+ "phys_drive_5000C500D6061539_cntrl_0_smart_alert_status_active": 0,
+ "phys_drive_5000C500D6061539_cntrl_0_smart_alert_status_inactive": 1,
+ "phys_drive_5000C500D6061539_cntrl_0_temperature": 28,
+ "phys_drive_5000C500DC79B194_cntrl_0_media_error_count": 0,
+ "phys_drive_5000C500DC79B194_cntrl_0_other_error_count": 0,
+ "phys_drive_5000C500DC79B194_cntrl_0_predictive_failure_count": 0,
+ "phys_drive_5000C500DC79B194_cntrl_0_smart_alert_status_active": 0,
+ "phys_drive_5000C500DC79B194_cntrl_0_smart_alert_status_inactive": 1,
+ "phys_drive_5000C500DC79B194_cntrl_0_temperature": 28,
+ "phys_drive_5000C500E54F4EBB_cntrl_0_media_error_count": 0,
+ "phys_drive_5000C500E54F4EBB_cntrl_0_other_error_count": 0,
+ "phys_drive_5000C500E54F4EBB_cntrl_0_predictive_failure_count": 0,
+ "phys_drive_5000C500E54F4EBB_cntrl_0_smart_alert_status_active": 0,
+ "phys_drive_5000C500E54F4EBB_cntrl_0_smart_alert_status_inactive": 1,
+ "phys_drive_5000C500E54F4EBB_cntrl_0_temperature": 28,
+ "phys_drive_5000C500E5659BA7_cntrl_0_media_error_count": 0,
+ "phys_drive_5000C500E5659BA7_cntrl_0_other_error_count": 0,
+ "phys_drive_5000C500E5659BA7_cntrl_0_predictive_failure_count": 0,
+ "phys_drive_5000C500E5659BA7_cntrl_0_smart_alert_status_active": 0,
+ "phys_drive_5000C500E5659BA7_cntrl_0_smart_alert_status_inactive": 1,
+ "phys_drive_5000C500E5659BA7_cntrl_0_temperature": 27,
+ },
+ },
+ "success SAS controller": {
+ prepareMock: prepareMockSasOK,
+ wantCharts: len(controllerMpt3sasChartsTmpl) * 1,
+ wantMetrics: map[string]int64{
+ "cntrl_0_health_status_healthy": 1,
+ "cntrl_0_health_status_unhealthy": 0,
+ },
+ },
+ "err on exec": {
+ prepareMock: prepareMockErr,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ stor := New()
+ mock := test.prepareMock()
+ stor.exec = mock
+
+ mx := stor.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+ assert.Len(t, *stor.Charts(), test.wantCharts)
+ testMetricsHasAllChartsDims(t, stor, mx)
+ })
+ }
+}
+
+func prepareMockMegaRaidOK() *mockStorCliExec {
+ return &mockStorCliExec{
+ controllersInfoData: dataMegaControllerInfo,
+ drivesInfoData: dataMegaDrivesInfo,
+ }
+}
+
+func prepareMockSasOK() *mockStorCliExec {
+ return &mockStorCliExec{
+ controllersInfoData: dataSasControllerInfo,
+ drivesInfoData: nil,
+ }
+}
+
+func prepareMockErr() *mockStorCliExec {
+ return &mockStorCliExec{
+ errOnInfo: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockStorCliExec {
+ resp := []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`)
+ return &mockStorCliExec{
+ controllersInfoData: resp,
+ drivesInfoData: resp,
+ }
+}
+
+func prepareMockEmptyResponse() *mockStorCliExec {
+ return &mockStorCliExec{}
+}
+
+type mockStorCliExec struct {
+ errOnInfo bool
+ controllersInfoData []byte
+ drivesInfoData []byte
+}
+
+func (m *mockStorCliExec) controllersInfo() ([]byte, error) {
+ if m.errOnInfo {
+ return nil, errors.New("mock.controllerInfo() error")
+ }
+ return m.controllersInfoData, nil
+}
+
+func (m *mockStorCliExec) drivesInfo() ([]byte, error) {
+ if m.errOnInfo {
+ return nil, errors.New("mock.drivesInfo() error")
+ }
+ return m.drivesInfoData, nil
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, stor *StorCli, mx map[string]int64) {
+ for _, chart := range *stor.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/storcli/testdata/config.json b/src/go/plugin/go.d/modules/storcli/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/storcli/testdata/config.yaml b/src/go/plugin/go.d/modules/storcli/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/storcli/testdata/megaraid-controllers-info.json b/src/go/plugin/go.d/modules/storcli/testdata/megaraid-controllers-info.json
new file mode 100644
index 000000000..e4e988d10
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/testdata/megaraid-controllers-info.json
@@ -0,0 +1,687 @@
+{
+ "Controllers": [
+ {
+ "Command Status": {
+ "CLI Version": "007.2807.0000.0000 Dec 22, 2023",
+ "Operating system": "Linux 6.5.13-1-pve",
+ "Controller": 0,
+ "Status": "Success",
+ "Description": "None"
+ },
+ "Response Data": {
+ "Basics": {
+ "Controller": 0,
+ "Model": "ServeRAID M5015 SAS/SATA Controller",
+ "Serial Number": "SV04616189",
+ "Current Controller Date/Time": "04/17/2024, 18:28:30",
+ "Current System Date/time": "04/17/2024, 18:30:05",
+ "SAS Address": "500605b002e04f10",
+ "PCI Address": "00:0a:00:00",
+ "Mfg Date": "11/11/10",
+ "Rework Date": "00/00/00",
+ "Revision No": ""
+ },
+ "Version": {
+ "Firmware Package Build": "12.15.0-0239",
+ "Firmware Version": "2.130.403-4660",
+ "Bios Version": "3.30.02.2_4.16.08.00_0x06060A05",
+ "Preboot CLI Version": "04.04-020:#%00009",
+ "WebBIOS Version": "6.0-54-e_50-Rel",
+ "NVDATA Version": "2.09.03-0058",
+ "Boot Block Version": "2.02.00.00-0000",
+ "Bootloader Version": "09.250.01.219",
+ "Driver Name": "megaraid_sas",
+ "Driver Version": "07.725.01.00-rc1"
+ },
+ "Bus": {
+ "Vendor Id": 4096,
+ "Device Id": 121,
+ "SubVendor Id": 4116,
+ "SubDevice Id": 946,
+ "Host Interface": "PCI-E",
+ "Device Interface": "SAS-6G",
+ "Bus Number": 10,
+ "Device Number": 0,
+ "Function Number": 0,
+ "Domain ID": 0
+ },
+ "Pending Images in Flash": {
+ "Image name": "No pending images"
+ },
+ "Status": {
+ "Controller Status": "Optimal",
+ "Memory Correctable Errors": 0,
+ "Memory Uncorrectable Errors": 0,
+ "ECC Bucket Count": 0,
+ "Any Offline VD Cache Preserved": "No",
+ "BBU Status": 0,
+ "PD Firmware Download in progress": "No",
+ "Support PD Firmware Download": "Yes",
+ "Lock Key Assigned": "No",
+ "Failed to get lock key on bootup": "No",
+ "Lock key has not been backed up": "No",
+ "Bios was not detected during boot": "No",
+ "Controller must be rebooted to complete security operation": "No",
+ "A rollback operation is in progress": "No",
+ "At least one PFK exists in NVRAM": "No",
+ "SSC Policy is WB": "No",
+ "Controller has booted into safe mode": "No",
+ "Controller shutdown required": "No",
+ "Controller has booted into certificate provision mode": "No"
+ },
+ "Supported Adapter Operations": {
+ "Rebuild Rate": "Yes",
+ "CC Rate": "Yes",
+ "BGI Rate ": "Yes",
+ "Reconstruct Rate": "Yes",
+ "Patrol Read Rate": "Yes",
+ "Alarm Control": "Yes",
+ "Cluster Support": "No",
+ "BBU": "Yes",
+ "Spanning": "Yes",
+ "Dedicated Hot Spare": "Yes",
+ "Revertible Hot Spares": "Yes",
+ "Foreign Config Import": "Yes",
+ "Self Diagnostic": "Yes",
+ "Allow Mixed Redundancy on Array": "No",
+ "Global Hot Spares": "Yes",
+ "Deny SCSI Passthrough": "No",
+ "Deny SMP Passthrough": "No",
+ "Deny STP Passthrough": "No",
+ "Support more than 8 Phys": "Yes",
+ "FW and Event Time in GMT": "No",
+ "Support Enhanced Foreign Import": "Yes",
+ "Support Enclosure Enumeration": "Yes",
+ "Support Allowed Operations": "Yes",
+ "Abort CC on Error": "Yes",
+ "Support Multipath": "Yes",
+ "Support Odd & Even Drive count in RAID1E": "No",
+ "Support Security": "Yes",
+ "Support Config Page Model": "Yes",
+ "Support the OCE without adding drives": "Yes",
+ "Support EKM": "Yes",
+ "Snapshot Enabled": "Yes",
+ "Support PFK": "No",
+ "Support PI": "No",
+ "Support Ld BBM Info": "No",
+ "Support Shield State": "No",
+ "Block SSD Write Disk Cache Change": "No",
+ "Support Suspend Resume BG ops": "No",
+ "Support Emergency Spares": "Yes",
+ "Support Set Link Speed": "No",
+ "Support Boot Time PFK Change": "No",
+ "Support JBOD": "No",
+ "Disable Online PFK Change": "No",
+ "Support Perf Tuning": "No",
+ "Support SSD PatrolRead": "Yes",
+ "Real Time Scheduler": "Yes",
+ "Support Reset Now": "Yes",
+ "Support Emulated Drives": "No",
+ "Headless Mode": "Yes",
+ "Dedicated HotSpares Limited": "No",
+ "Point In Time Progress": "No",
+ "Extended LD": "No",
+ "Support Uneven span ": "No",
+ "Support Config Auto Balance": "No",
+ "Support Maintenance Mode": "No",
+ "Support Diagnostic results": "No",
+ "Support Ext Enclosure": "No",
+ "Support Sesmonitoring": "No",
+ "Support SecurityonJBOD": "No",
+ "Support ForceFlash": "No",
+ "Support DisableImmediateIO": "Yes",
+ "Support LargeIOSupport": "No",
+ "Support DrvActivityLEDSetting": "Yes",
+ "Support FlushWriteVerify": "No",
+ "Support CPLDUpdate": "No",
+ "Support ForceTo512e": "No",
+ "Support discardCacheDuringLDDelete": "No",
+ "Support JBOD Write cache": "No",
+ "Support Large QD Support": "No",
+ "Support Ctrl Info Extended": "No",
+ "Support IButton less": "No",
+ "Support AES Encryption Algorithm": "No",
+ "Support Encrypted MFC": "No",
+ "Support Snapdump": "No",
+ "Support Force Personality Change": "No",
+ "Support Dual Fw Image": "No",
+ "Support PSOC Update": "No",
+ "Support Secure Boot": "No",
+ "Support Debug Queue": "No",
+ "Support Least Latency Mode": "Yes",
+ "Support OnDemand Snapdump": "No",
+ "Support Clear Snapdump": "No",
+ "Support PHY current speed": "No",
+ "Support Lane current speed": "No",
+ "Support NVMe Width": "No",
+ "Support Lane DeviceType": "No",
+ "Support Extended Drive performance Monitoring": "No",
+ "Support NVMe Repair": "No",
+ "Support Platform Security": "No",
+ "Support None Mode Params": "No",
+ "Support Extended Controller Property": "No",
+ "Support Smart Poll Interval for DirectAttached": "No",
+ "Support Write Journal Pinning": "No",
+ "Support SMP Passthru with Port Number": "No",
+ "Support SnapDump Preboot Trace Buffer Toggle": "No",
+ "Support Parity Read Cache Bypass": "No",
+ "Support NVMe Init Error Device ConnectorIndex": "No",
+ "Support VolatileKey": "No",
+ "Support PSOC Part Information": "No",
+ "Support Slow array threshold calculation": "No",
+ "Support PCIe Reference Clock override": "No",
+ "Support PCIe PERST override": "No",
+ "Support Drive FW Download Mask": "No",
+ "Support Start of day PL log capture": "No",
+ "Support Drive Unrecovered Medium Error Count": "No"
+ },
+ "Enterprise Key management": {
+ "Capability": "Supported",
+ "Boot Agent": "Not Available",
+ "Configured": "No"
+ },
+ "Supported PD Operations": {
+ "Force Online": "Yes",
+ "Force Offline": "Yes",
+ "Force Rebuild": "Yes",
+ "Deny Force Failed": "No",
+ "Deny Force Good/Bad": "No",
+ "Deny Missing Replace": "No",
+ "Deny Clear": "No",
+ "Deny Locate": "No",
+ "Support Power State": "No",
+ "Set Power State For Cfg": "No",
+ "Support T10 Power State": "No",
+ "Support Temperature": "Yes",
+ "NCQ": "No",
+ "Support Max Rate SATA": "No",
+ "Support Degraded Media": "No",
+ "Support Parallel FW Update": "No",
+ "Support Drive Crypto Erase": "No",
+ "Support SSD Wear Gauge": "No",
+ "Support Sanitize": "No",
+ "Support Extended Sanitize": "No"
+ },
+ "Supported VD Operations": {
+ "Read Policy": "Yes",
+ "Write Policy": "Yes",
+ "IO Policy": "Yes",
+ "Access Policy": "Yes",
+ "Disk Cache Policy": "Yes",
+ "Reconstruction": "Yes",
+ "Deny Locate": "No",
+ "Deny CC": "No",
+ "Allow Ctrl Encryption": "No",
+ "Enable LDBBM": "No",
+ "Support FastPath": "Yes",
+ "Performance Metrics": "Yes",
+ "Power Savings": "No",
+ "Support Powersave Max With Cache": "No",
+ "Support Breakmirror": "No",
+ "Support SSC WriteBack": "Yes",
+ "Support SSC Association": "Yes",
+ "Support VD Hide": "No",
+ "Support VD Cachebypass": "No",
+ "Support VD discardCacheDuringLDDelete": "No",
+ "Support VD Scsi Unmap": "No"
+ },
+ "HwCfg": {
+ "ChipRevision": " B2",
+ "BatteryFRU": "N/A",
+ "Front End Port Count": 0,
+ "Backend Port Count": 8,
+ "BBU": "Present",
+ "Alarm": "Disable",
+ "Serial Debugger": "Present",
+ "NVRAM Size": "32KB",
+ "Flash Size": "8MB",
+ "On Board Memory Size": "512MB",
+ "CacheVault Flash Size": "NA",
+ "TPM": "Absent",
+ "Upgrade Key": "Present",
+ "On Board Expander": "Absent",
+ "Temperature Sensor for ROC": "Absent",
+ "Temperature Sensor for Controller": "Absent",
+ "Upgradable CPLD": "Absent",
+ "Upgradable PSOC": "Absent",
+ "Current Size of CacheCade (GB)": 0,
+ "Current Size of FW Cache (MB)": 349
+ },
+ "Policies": {
+ "Policies Table": [
+ {
+ "Policy": "Predictive Fail Poll Interval",
+ "Current": "300 sec",
+ "Default": ""
+ },
+ {
+ "Policy": "Interrupt Throttle Active Count",
+ "Current": "16",
+ "Default": ""
+ },
+ {
+ "Policy": "Interrupt Throttle Completion",
+ "Current": "50 us",
+ "Default": ""
+ },
+ {
+ "Policy": "Rebuild Rate",
+ "Current": "30 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "PR Rate",
+ "Current": "30 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "BGI Rate",
+ "Current": "30 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "Check Consistency Rate",
+ "Current": "30 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "Reconstruction Rate",
+ "Current": "30 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "Cache Flush Interval",
+ "Current": "4s",
+ "Default": ""
+ }
+ ],
+ "Flush Time(Default)": "4s",
+ "Drive Coercion Mode": "1GB",
+ "Auto Rebuild": "On",
+ "Battery Warning": "On",
+ "ECC Bucket Size": 15,
+ "ECC Bucket Leak Rate (hrs)": 24,
+ "Restore Hot Spare on Insertion": "Off",
+ "Expose Enclosure Devices": "On",
+ "Maintain PD Fail History": "On",
+ "Reorder Host Requests": "On",
+ "Auto detect BackPlane": "SGPIO/i2c SEP",
+ "Load Balance Mode": "Auto",
+ "Security Key Assigned": "Off",
+ "Disable Online Controller Reset": "Off",
+ "Use drive activity for locate": "Off"
+ },
+ "Boot": {
+ "BIOS Enumerate VDs": 1,
+ "Stop BIOS on Error": "On",
+ "Delay during POST": 4,
+ "Spin Down Mode": "None",
+ "Enable Ctrl-R": "No",
+ "Enable Web BIOS": "Yes",
+ "Enable PreBoot CLI": "Yes",
+ "Enable BIOS": "Yes",
+ "Max Drives to Spinup at One Time": 4,
+ "Maximum number of direct attached drives to spin up in 1 min": 20,
+ "Delay Among Spinup Groups (sec)": 12,
+ "Allow Boot with Preserved Cache": "Off"
+ },
+ "High Availability": {
+ "Topology Type": "None",
+ "Cluster Permitted": "No",
+ "Cluster Active": "No"
+ },
+ "Defaults": {
+ "Phy Polarity": 0,
+ "Phy PolaritySplit": 0,
+ "Strip Size": "128 KB",
+ "Write Policy": "WB",
+ "Read Policy": "No Read Ahead",
+ "Cache When BBU Bad": "Off",
+ "Cached IO": "Off",
+ "VD PowerSave Policy": "Controller Defined",
+ "Default spin down time (mins)": 30,
+ "Coercion Mode": "1 GB",
+ "ZCR Config": "Unknown",
+ "Max Chained Enclosures": 16,
+ "Direct PD Mapping": "No",
+ "Restore Hot Spare on Insertion": "No",
+ "Expose Enclosure Devices": "Yes",
+ "Maintain PD Fail History": "Yes",
+ "Zero Based Enclosure Enumeration": "No",
+ "Disable Puncturing": "Yes",
+ "EnableLDBBM": "No",
+ "DisableHII": "No",
+ "Un-Certified Hard Disk Drives": "Allow",
+ "SMART Mode": "Mode 6",
+ "Enable LED Header": "No",
+ "LED Show Drive Activity": "No",
+ "Dirty LED Shows Drive Activity": "No",
+ "EnableCrashDump": "No",
+ "Disable Online Controller Reset": "No",
+ "Treat Single span R1E as R10": "No",
+ "Power Saving option": "Enabled",
+ "TTY Log In Flash": "No",
+ "Auto Enhanced Import": "No",
+ "BreakMirror RAID Support": "No",
+ "Disable Join Mirror": "No",
+ "Enable Shield State": "No",
+ "Time taken to detect CME": "60 sec"
+ },
+ "Capabilities": {
+ "Supported Drives": "SAS, SATA",
+ "RAID Level Supported": "RAID0, RAID1(2 or more drives), RAID5, RAID6, RAID00, RAID10(2 or more drives per span), RAID50, RAID60",
+ "Enable JBOD": "No",
+ "Mix in Enclosure": "Allowed",
+ "Mix of SAS/SATA of HDD type in VD": "Not Allowed",
+ "Mix of SAS/SATA of SSD type in VD": "Not Allowed",
+ "Mix of SSD/HDD in VD": "Not Allowed",
+ "SAS Disable": "No",
+ "Max Arms Per VD": 32,
+ "Max Spans Per VD": 8,
+ "Max Arrays": 128,
+ "Max VD per array": 16,
+ "Max Number of VDs": 64,
+ "Max Parallel Commands": 1008,
+ "Max SGE Count": 60,
+ "Max Data Transfer Size": "8192 sectors",
+ "Max Strips PerIO": 42,
+ "Max Configurable CacheCade Size(GB)": 512,
+ "Max Transportable DGs": 0,
+ "Enable Snapdump": "No",
+ "Enable SCSI Unmap": "Yes",
+ "Read cache bypass enabled for Parity RAID LDs": "No",
+ "FDE Drive Mix Support": "No",
+ "Min Strip Size": "8 KB",
+ "Max Strip Size": "1.000 MB"
+ },
+ "Scheduled Tasks": {
+ "Consistency Check Reoccurrence": "168 hrs",
+ "Next Consistency check launch": "04/20/2024, 03:00:00",
+ "Patrol Read Reoccurrence": "168 hrs",
+ "Next Patrol Read launch": "04/20/2024, 03:00:00",
+ "Battery learn Reoccurrence": "672 hrs",
+ "Next Battery Learn": "04/18/2024, 18:32:56",
+ "OEMID": "Lenovo"
+ },
+ "Security Protocol properties": {
+ "Security Protocol": "None"
+ },
+ "Drive Groups": 1,
+ "TOPOLOGY": [
+ {
+ "DG": 0,
+ "Arr": "-",
+ "Row": "-",
+ "EID:Slot": "-",
+ "DID": "-",
+ "Type": "RAID6",
+ "State": "Optl",
+ "BT": "N",
+ "Size": "58.207 TB",
+ "PDC": "dsbl",
+ "PI": "N",
+ "SED": "N",
+ "DS3": "none",
+ "FSpace": "N",
+ "TR": "N"
+ },
+ {
+ "DG": 0,
+ "Arr": 0,
+ "Row": "-",
+ "EID:Slot": "-",
+ "DID": "-",
+ "Type": "RAID6",
+ "State": "Optl",
+ "BT": "N",
+ "Size": "58.207 TB",
+ "PDC": "dsbl",
+ "PI": "N",
+ "SED": "N",
+ "DS3": "none",
+ "FSpace": "N",
+ "TR": "N"
+ },
+ {
+ "DG": 0,
+ "Arr": 0,
+ "Row": 0,
+ "EID:Slot": "252:3",
+ "DID": 35,
+ "Type": "DRIVE",
+ "State": "Onln",
+ "BT": "N",
+ "Size": "14.551 TB",
+ "PDC": "dsbl",
+ "PI": "N",
+ "SED": "N",
+ "DS3": "none",
+ "FSpace": "-",
+ "TR": "N"
+ },
+ {
+ "DG": 0,
+ "Arr": 0,
+ "Row": 1,
+ "EID:Slot": "252:5",
+ "DID": 31,
+ "Type": "DRIVE",
+ "State": "Onln",
+ "BT": "N",
+ "Size": "14.551 TB",
+ "PDC": "dsbl",
+ "PI": "N",
+ "SED": "N",
+ "DS3": "none",
+ "FSpace": "-",
+ "TR": "N"
+ },
+ {
+ "DG": 0,
+ "Arr": 0,
+ "Row": 2,
+ "EID:Slot": "252:4",
+ "DID": 30,
+ "Type": "DRIVE",
+ "State": "Onln",
+ "BT": "N",
+ "Size": "14.551 TB",
+ "PDC": "dsbl",
+ "PI": "N",
+ "SED": "N",
+ "DS3": "none",
+ "FSpace": "-",
+ "TR": "N"
+ },
+ {
+ "DG": 0,
+ "Arr": 0,
+ "Row": 3,
+ "EID:Slot": "252:7",
+ "DID": 32,
+ "Type": "DRIVE",
+ "State": "Onln",
+ "BT": "N",
+ "Size": "14.551 TB",
+ "PDC": "dsbl",
+ "PI": "N",
+ "SED": "N",
+ "DS3": "none",
+ "FSpace": "-",
+ "TR": "N"
+ },
+ {
+ "DG": 0,
+ "Arr": 0,
+ "Row": 4,
+ "EID:Slot": "252:0",
+ "DID": 34,
+ "Type": "DRIVE",
+ "State": "Onln",
+ "BT": "N",
+ "Size": "14.551 TB",
+ "PDC": "dsbl",
+ "PI": "N",
+ "SED": "N",
+ "DS3": "none",
+ "FSpace": "-",
+ "TR": "N"
+ },
+ {
+ "DG": 0,
+ "Arr": 0,
+ "Row": 5,
+ "EID:Slot": "252:1",
+ "DID": 33,
+ "Type": "DRIVE",
+ "State": "Onln",
+ "BT": "N",
+ "Size": "14.551 TB",
+ "PDC": "dsbl",
+ "PI": "N",
+ "SED": "N",
+ "DS3": "none",
+ "FSpace": "-",
+ "TR": "N"
+ }
+ ],
+ "Virtual Drives": 1,
+ "VD LIST": [
+ {
+ "DG/VD": "0/0",
+ "TYPE": "RAID6",
+ "State": "Optl",
+ "Access": "RW",
+ "Consist": "Yes",
+ "Cache": "RWBD",
+ "Cac": "-",
+ "sCC": "ON",
+ "Size": "58.207 TB",
+ "Name": "Sluthub"
+ }
+ ],
+ "Physical Drives": 6,
+ "PD LIST": [
+ {
+ "EID:Slt": "252:0",
+ "DID": 34,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ },
+ {
+ "EID:Slt": "252:1",
+ "DID": 33,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ },
+ {
+ "EID:Slt": "252:3",
+ "DID": 35,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ },
+ {
+ "EID:Slt": "252:4",
+ "DID": 30,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ },
+ {
+ "EID:Slt": "252:5",
+ "DID": 31,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ },
+ {
+ "EID:Slt": "252:7",
+ "DID": 32,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ }
+ ],
+ "Enclosures": 1,
+ "Enclosure LIST": [
+ {
+ "EID": 252,
+ "State": "OK",
+ "Slots": 8,
+ "PD": 6,
+ "PS": 0,
+ "Fans": 0,
+ "TSs": 0,
+ "Alms": 0,
+ "SIM": 1,
+ "Port#": "-",
+ "ProdID": "SGPIO",
+ "VendorSpecific": " "
+ }
+ ],
+ "BBU_Info": [
+ {
+ "Model": "iBBU08",
+ "State": "Optimal",
+ "RetentionTime": "48 hours +",
+ "Temp": "34C",
+ "Mode": "4",
+ "MfgDate": "2011/03/18",
+ "Next Learn": "2024/04/18 18:32:56"
+ }
+ ]
+ }
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/storcli/testdata/megaraid-drives-info.json b/src/go/plugin/go.d/modules/storcli/testdata/megaraid-drives-info.json
new file mode 100644
index 000000000..b8735d6a3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/testdata/megaraid-drives-info.json
@@ -0,0 +1,495 @@
+{
+ "Controllers": [
+ {
+ "Command Status": {
+ "CLI Version": "007.2807.0000.0000 Dec 22, 2023",
+ "Operating system": "Linux 6.5.13-1-pve",
+ "Controller": 0,
+ "Status": "Success",
+ "Description": "Show Drive Information Succeeded."
+ },
+ "Response Data": {
+ "Drive /c0/e252/s0": [
+ {
+ "EID:Slt": "252:0",
+ "DID": 34,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ }
+ ],
+ "Drive /c0/e252/s0 - Detailed Information": {
+ "Drive /c0/e252/s0 State": {
+ "Shield Counter": 0,
+ "Media Error Count": 0,
+ "Other Error Count": 0,
+ "BBM Error Count": 0,
+ "Drive Temperature": " 28C (82.40 F)",
+ "Predictive Failure Count": 0,
+ "S.M.A.R.T alert flagged by drive": "No"
+ },
+ "Drive /c0/e252/s0 Device attributes": {
+ "SN": " ZL2PVFA8",
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST16000NM001G-2KK103",
+ "NAND Vendor": "NA",
+ "WWN": "5000C500E54F4EBB",
+ "Firmware Revision": "SN03 ",
+ "Raw size": "14.552 TB [0x746c00000 Sectors]",
+ "Coerced size": "14.551 TB [0x746a52800 Sectors]",
+ "Non Coerced size": "14.551 TB [0x746b00000 Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "6.0Gb/s",
+ "NCQ setting": "N/A",
+ "Write Cache": "N/A",
+ "Logical Sector Size": "512B",
+ "Physical Sector Size": "512B",
+ "Connector Name": ""
+ },
+ "Drive /c0/e252/s0 Policies/Settings": {
+ "Drive position": "DriveGroup:0, Span:0, Row:4",
+ "Enclosure position": "1",
+ "Connected Port Number": "1(path0) ",
+ "Sequence Number": 2,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": 0,
+ "Successful diagnostics completion on": "N/A",
+ "FDE Type": "None",
+ "SED Capable": "No",
+ "SED Enabled": "No",
+ "Secured": "No",
+ "Cryptographic Erase Capable": "No",
+ "Sanitize Support": "Not supported",
+ "Locked": "No",
+ "Needs EKM Attention": "No",
+ "PI Eligible": "No",
+ "Drive is formatted for PI": "No",
+ "PI type": "No PI",
+ "Number of bytes of user data in LBA": "512B",
+ "Certified": "No",
+ "Wide Port Capable": "No",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "6.0Gb/s",
+ "SAS address": "0x4433221103000000"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 4c 5a 50 32 46 56 38 41 00 00 00 00 00 00 4e 53 33 30 20 20 20 20 54 53 36 31 30 30 4e 30 30 4d 31 30 2d 47 4b 32 31 4b 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e252/s1": [
+ {
+ "EID:Slt": "252:1",
+ "DID": 33,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ }
+ ],
+ "Drive /c0/e252/s1 - Detailed Information": {
+ "Drive /c0/e252/s1 State": {
+ "Shield Counter": 0,
+ "Media Error Count": 0,
+ "Other Error Count": 0,
+ "BBM Error Count": 0,
+ "Drive Temperature": " 27C (80.60 F)",
+ "Predictive Failure Count": 0,
+ "S.M.A.R.T alert flagged by drive": "No"
+ },
+ "Drive /c0/e252/s1 Device attributes": {
+ "SN": " ZL2PY6LF",
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST16000NM001G-2KK103",
+ "NAND Vendor": "NA",
+ "WWN": "5000C500E5659BA7",
+ "Firmware Revision": "SN03 ",
+ "Raw size": "14.552 TB [0x746c00000 Sectors]",
+ "Coerced size": "14.551 TB [0x746a52800 Sectors]",
+ "Non Coerced size": "14.551 TB [0x746b00000 Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "6.0Gb/s",
+ "NCQ setting": "N/A",
+ "Write Cache": "N/A",
+ "Logical Sector Size": "512B",
+ "Physical Sector Size": "512B",
+ "Connector Name": ""
+ },
+ "Drive /c0/e252/s1 Policies/Settings": {
+ "Drive position": "DriveGroup:0, Span:0, Row:5",
+ "Enclosure position": "1",
+ "Connected Port Number": "2(path0) ",
+ "Sequence Number": 2,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": 0,
+ "Successful diagnostics completion on": "N/A",
+ "FDE Type": "None",
+ "SED Capable": "No",
+ "SED Enabled": "No",
+ "Secured": "No",
+ "Cryptographic Erase Capable": "No",
+ "Sanitize Support": "Not supported",
+ "Locked": "No",
+ "Needs EKM Attention": "No",
+ "PI Eligible": "No",
+ "Drive is formatted for PI": "No",
+ "PI type": "No PI",
+ "Number of bytes of user data in LBA": "512B",
+ "Certified": "No",
+ "Wide Port Capable": "No",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "6.0Gb/s",
+ "SAS address": "0x4433221102000000"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 4c 5a 50 32 36 59 46 4c 00 00 00 00 00 00 4e 53 33 30 20 20 20 20 54 53 36 31 30 30 4e 30 30 4d 31 30 2d 47 4b 32 31 4b 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e252/s3": [
+ {
+ "EID:Slt": "252:3",
+ "DID": 35,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ }
+ ],
+ "Drive /c0/e252/s3 - Detailed Information": {
+ "Drive /c0/e252/s3 State": {
+ "Shield Counter": 0,
+ "Media Error Count": 0,
+ "Other Error Count": 0,
+ "BBM Error Count": 0,
+ "Drive Temperature": " 28C (82.40 F)",
+ "Predictive Failure Count": 0,
+ "S.M.A.R.T alert flagged by drive": "No"
+ },
+ "Drive /c0/e252/s3 Device attributes": {
+ "SN": " ZL2M2WQ3",
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST16000NM001G-2KK103",
+ "NAND Vendor": "NA",
+ "WWN": "5000C500DC79B194",
+ "Firmware Revision": "SN03 ",
+ "Raw size": "14.552 TB [0x746c00000 Sectors]",
+ "Coerced size": "14.551 TB [0x746a52800 Sectors]",
+ "Non Coerced size": "14.551 TB [0x746b00000 Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "6.0Gb/s",
+ "NCQ setting": "N/A",
+ "Write Cache": "N/A",
+ "Logical Sector Size": "512B",
+ "Physical Sector Size": "512B",
+ "Connector Name": ""
+ },
+ "Drive /c0/e252/s3 Policies/Settings": {
+ "Drive position": "DriveGroup:0, Span:0, Row:0",
+ "Enclosure position": "1",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 2,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": 0,
+ "Successful diagnostics completion on": "N/A",
+ "FDE Type": "None",
+ "SED Capable": "No",
+ "SED Enabled": "No",
+ "Secured": "No",
+ "Cryptographic Erase Capable": "No",
+ "Sanitize Support": "Not supported",
+ "Locked": "No",
+ "Needs EKM Attention": "No",
+ "PI Eligible": "No",
+ "Drive is formatted for PI": "No",
+ "PI type": "No PI",
+ "Number of bytes of user data in LBA": "512B",
+ "Certified": "No",
+ "Wide Port Capable": "No",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "6.0Gb/s",
+ "SAS address": "0x4433221100000000"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 4c 5a 4d 32 57 32 33 51 00 00 00 00 00 00 4e 53 33 30 20 20 20 20 54 53 36 31 30 30 4e 30 30 4d 31 30 2d 47 4b 32 31 4b 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e252/s4": [
+ {
+ "EID:Slt": "252:4",
+ "DID": 30,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ }
+ ],
+ "Drive /c0/e252/s4 - Detailed Information": {
+ "Drive /c0/e252/s4 State": {
+ "Shield Counter": 0,
+ "Media Error Count": 0,
+ "Other Error Count": 0,
+ "BBM Error Count": 0,
+ "Drive Temperature": " 28C (82.40 F)",
+ "Predictive Failure Count": 0,
+ "S.M.A.R.T alert flagged by drive": "No"
+ },
+ "Drive /c0/e252/s4 Device attributes": {
+ "SN": " WL201HYL",
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST16000NM001G-2KK103",
+ "NAND Vendor": "NA",
+ "WWN": "5000C500D59840FE",
+ "Firmware Revision": "SN04 ",
+ "Raw size": "14.552 TB [0x746c00000 Sectors]",
+ "Coerced size": "14.551 TB [0x746a52800 Sectors]",
+ "Non Coerced size": "14.551 TB [0x746b00000 Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "6.0Gb/s",
+ "NCQ setting": "N/A",
+ "Write Cache": "N/A",
+ "Logical Sector Size": "512B",
+ "Physical Sector Size": "512B",
+ "Connector Name": ""
+ },
+ "Drive /c0/e252/s4 Policies/Settings": {
+ "Drive position": "DriveGroup:0, Span:0, Row:2",
+ "Enclosure position": "1",
+ "Connected Port Number": "3(path0) ",
+ "Sequence Number": 2,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": 0,
+ "Successful diagnostics completion on": "N/A",
+ "FDE Type": "None",
+ "SED Capable": "No",
+ "SED Enabled": "No",
+ "Secured": "No",
+ "Cryptographic Erase Capable": "No",
+ "Sanitize Support": "Not supported",
+ "Locked": "No",
+ "Needs EKM Attention": "No",
+ "PI Eligible": "No",
+ "Drive is formatted for PI": "No",
+ "PI type": "No PI",
+ "Number of bytes of user data in LBA": "512B",
+ "Certified": "No",
+ "Wide Port Capable": "No",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "6.0Gb/s",
+ "SAS address": "0x4433221104000000"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 4c 57 30 32 48 31 4c 59 00 00 00 00 00 00 4e 53 34 30 20 20 20 20 54 53 36 31 30 30 4e 30 30 4d 31 30 2d 47 4b 32 31 4b 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e252/s5": [
+ {
+ "EID:Slt": "252:5",
+ "DID": 31,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ }
+ ],
+ "Drive /c0/e252/s5 - Detailed Information": {
+ "Drive /c0/e252/s5 State": {
+ "Shield Counter": 0,
+ "Media Error Count": 0,
+ "Other Error Count": 0,
+ "BBM Error Count": 0,
+ "Drive Temperature": " 28C (82.40 F)",
+ "Predictive Failure Count": 0,
+ "S.M.A.R.T alert flagged by drive": "No"
+ },
+ "Drive /c0/e252/s5 Device attributes": {
+ "SN": " ZL21DC50",
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST16000NM001G-2KK103",
+ "NAND Vendor": "NA",
+ "WWN": "5000C500C36C8BCD",
+ "Firmware Revision": "SN04 ",
+ "Raw size": "14.552 TB [0x746c00000 Sectors]",
+ "Coerced size": "14.551 TB [0x746a52800 Sectors]",
+ "Non Coerced size": "14.551 TB [0x746b00000 Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "6.0Gb/s",
+ "NCQ setting": "N/A",
+ "Write Cache": "N/A",
+ "Logical Sector Size": "512B",
+ "Physical Sector Size": "512B",
+ "Connector Name": ""
+ },
+ "Drive /c0/e252/s5 Policies/Settings": {
+ "Drive position": "DriveGroup:0, Span:0, Row:1",
+ "Enclosure position": "1",
+ "Connected Port Number": "4(path0) ",
+ "Sequence Number": 2,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": 0,
+ "Successful diagnostics completion on": "N/A",
+ "FDE Type": "None",
+ "SED Capable": "No",
+ "SED Enabled": "No",
+ "Secured": "No",
+ "Cryptographic Erase Capable": "No",
+ "Sanitize Support": "Not supported",
+ "Locked": "No",
+ "Needs EKM Attention": "No",
+ "PI Eligible": "No",
+ "Drive is formatted for PI": "No",
+ "PI type": "No PI",
+ "Number of bytes of user data in LBA": "512B",
+ "Certified": "No",
+ "Wide Port Capable": "No",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "6.0Gb/s",
+ "SAS address": "0x4433221105000000"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 4c 5a 31 32 43 44 30 35 00 00 00 00 00 00 4e 53 34 30 20 20 20 20 54 53 36 31 30 30 4e 30 30 4d 31 30 2d 47 4b 32 31 4b 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e252/s7": [
+ {
+ "EID:Slt": "252:7",
+ "DID": 32,
+ "State": "Onln",
+ "DG": 0,
+ "Size": "14.551 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "N",
+ "PI": "N",
+ "SeSz": "512B",
+ "Model": "ST16000NM001G-2KK103",
+ "Sp": "U",
+ "Type": "-"
+ }
+ ],
+ "Drive /c0/e252/s7 - Detailed Information": {
+ "Drive /c0/e252/s7 State": {
+ "Shield Counter": 0,
+ "Media Error Count": 0,
+ "Other Error Count": 0,
+ "BBM Error Count": 0,
+ "Drive Temperature": " 28C (82.40 F)",
+ "Predictive Failure Count": 0,
+ "S.M.A.R.T alert flagged by drive": "No"
+ },
+ "Drive /c0/e252/s7 Device attributes": {
+ "SN": " WL204LF2",
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST16000NM001G-2KK103",
+ "NAND Vendor": "NA",
+ "WWN": "5000C500D6061539",
+ "Firmware Revision": "SB30 ",
+ "Raw size": "14.552 TB [0x746c00000 Sectors]",
+ "Coerced size": "14.551 TB [0x746a52800 Sectors]",
+ "Non Coerced size": "14.551 TB [0x746b00000 Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "6.0Gb/s",
+ "NCQ setting": "N/A",
+ "Write Cache": "N/A",
+ "Logical Sector Size": "512B",
+ "Physical Sector Size": "512B",
+ "Connector Name": ""
+ },
+ "Drive /c0/e252/s7 Policies/Settings": {
+ "Drive position": "DriveGroup:0, Span:0, Row:3",
+ "Enclosure position": "1",
+ "Connected Port Number": "5(path0) ",
+ "Sequence Number": 4,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": 0,
+ "Successful diagnostics completion on": "N/A",
+ "FDE Type": "None",
+ "SED Capable": "No",
+ "SED Enabled": "No",
+ "Secured": "No",
+ "Cryptographic Erase Capable": "No",
+ "Sanitize Support": "Not supported",
+ "Locked": "No",
+ "Needs EKM Attention": "No",
+ "PI Eligible": "No",
+ "Drive is formatted for PI": "No",
+ "PI type": "No PI",
+ "Number of bytes of user data in LBA": "512B",
+ "Certified": "No",
+ "Wide Port Capable": "No",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "6.0Gb/s",
+ "SAS address": "0x4433221107000000"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 4c 57 30 32 4c 34 32 46 00 00 00 00 00 00 42 53 30 33 20 20 20 20 54 53 36 31 30 30 4e 30 30 4d 31 30 2d 47 4b 32 31 4b 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ }
+ }
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/storcli/testdata/mpt3sas-controllers-info.json b/src/go/plugin/go.d/modules/storcli/testdata/mpt3sas-controllers-info.json
new file mode 100644
index 000000000..02eefd719
--- /dev/null
+++ b/src/go/plugin/go.d/modules/storcli/testdata/mpt3sas-controllers-info.json
@@ -0,0 +1,2260 @@
+{
+ "Controllers": [
+ {
+ "Command Status": {
+ "CLI Version": "007.2703.0000.0000 July 03, 2023",
+ "Operating system": "Linux 6.9.4-dx",
+ "Controller": 0,
+ "Status": "Success",
+ "Description": "None"
+ },
+ "Response Data": {
+ "Basics": {
+ "Controller": 0,
+ "Adapter Type": " SAS3808(A0)",
+ "Model": "HBA 9500-8i",
+ "Serial Number": "REDACTED",
+ "Current System Date/time": "06/17/2024 18:39:45",
+ "Concurrent commands supported": 4992,
+ "SAS Address": " 500062b20828c940",
+ "PCI Address": "00:01:00:00"
+ },
+ "Version": {
+ "Firmware Package Build": "28.00.00.00",
+ "Firmware Version": "28.00.00.00",
+ "Bios Version": "09.55.00.00_28.00.00.00",
+ "NVDATA Version": "28.01.00.12",
+ "PSOC FW Version": "0x0064",
+ "PSOC Part Number": "14790",
+ "Driver Name": "mpt3sas",
+ "Driver Version": "48.100.00.00"
+ },
+ "PCI Version": {
+ "Vendor Id": 4096,
+ "Device Id": 230,
+ "SubVendor Id": 4096,
+ "SubDevice Id": 16512,
+ "Host Interface": "PCIE",
+ "Device Interface": "SAS-12G",
+ "Bus Number": 1,
+ "Device Number": 0,
+ "Function Number": 0,
+ "Domain ID": 0
+ },
+ "Pending Images in Flash": {
+ "Image name": "No pending images"
+ },
+ "Status": {
+ "Controller Status": "OK",
+ "Memory Correctable Errors": 0,
+ "Memory Uncorrectable Errors": 0,
+ "Bios was not detected during boot": "No",
+ "Controller has booted into safe mode": "No",
+ "Controller has booted into certificate provision mode": "No",
+ "Package Stamp Mismatch": "No"
+ },
+ "Supported Adapter Operations": {
+ "Alarm Control": "No",
+ "Cluster Support": "No",
+ "Self Diagnostic": "No",
+ "Deny SCSI Passthrough": "No",
+ "Deny SMP Passthrough": "No",
+ "Deny STP Passthrough": "No",
+ "Support more than 8 Phys": "Yes",
+ "FW and Event Time in GMT": "No",
+ "Support Enclosure Enumeration": "Yes",
+ "Support Allowed Operations": "Yes",
+ "Support Multipath": "Yes",
+ "Support Security": "Yes",
+ "Support Config Page Model": "No",
+ "Support the OCE without adding drives": "No",
+ "support EKM": "No",
+ "Snapshot Enabled": "No",
+ "Support PFK": "No",
+ "Support PI": "No",
+ "Support Shield State": "No",
+ "Support Set Link Speed": "No",
+ "Support JBOD": "No",
+ "Disable Online PFK Change": "No",
+ "Real Time Scheduler": "No",
+ "Support Reset Now": "No",
+ "Support Emulated Drives": "No",
+ "Support Secure Boot": "Yes",
+ "Support Platform Security": "No",
+ "Support Package Stamp Mismatch Reporting": "Yes",
+ "Support PSOC Update": "Yes",
+ "Support PSOC Part Information": "Yes",
+ "Support PSOC Version Information": "Yes"
+ },
+ "HwCfg": {
+ "ChipRevision": " A0",
+ "BatteryFRU": "N/A",
+ "Front End Port Count": 1,
+ "Backend Port Count": 11,
+ "Serial Debugger": "Absent",
+ "NVRAM Size": "0KB",
+ "Flash Size": "16MB",
+ "On Board Memory Size": "0MB",
+ "On Board Expander": "Absent",
+ "Temperature Sensor for ROC": "Present",
+ "Temperature Sensor for Controller": "Absent",
+ "Current Size of CacheCade (GB)": 0,
+ "Current Size of FW Cache (MB)": 0,
+ "ROC temperature(Degree Celsius)": 44
+ },
+ "Policies": {
+ "Policies Table": [
+ {
+ "Policy": "Predictive Fail Poll Interval",
+ "Current": "0 sec",
+ "Default": ""
+ },
+ {
+ "Policy": "Interrupt Throttle Active Count",
+ "Current": "0",
+ "Default": ""
+ },
+ {
+ "Policy": "Interrupt Throttle Completion",
+ "Current": "0 us",
+ "Default": ""
+ },
+ {
+ "Policy": "Rebuild Rate",
+ "Current": "0 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "PR Rate",
+ "Current": "0 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "BGI Rate",
+ "Current": "0 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "Check Consistency Rate",
+ "Current": "0 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "Reconstruction Rate",
+ "Current": "0 %",
+ "Default": "30%"
+ },
+ {
+ "Policy": "Cache Flush Interval",
+ "Current": "0s",
+ "Default": ""
+ }
+ ],
+ "Flush Time(Default)": "4s",
+ "Drive Coercion Mode": "none",
+ "Auto Rebuild": "Off",
+ "Battery Warning": "Off",
+ "ECC Bucket Size": 0,
+ "ECC Bucket Leak Rate (hrs)": 0,
+ "Restore HotSpare on Insertion": "Off",
+ "Expose Enclosure Devices": "Off",
+ "Maintain PD Fail History": "Off",
+ "Reorder Host Requests": "On",
+ "Auto detect BackPlane": "SGPIO/i2c SEP",
+ "Load Balance Mode": "None",
+ "Security Key Assigned": "Off",
+ "Disable Online Controller Reset": "Off",
+ "Use drive activity for locate": "Off"
+ },
+ "Boot": {
+ "Max Drives to Spinup at One Time": 2,
+ "Maximum number of direct attached drives to spin up in 1 min": 60,
+ "Delay Among Spinup Groups (sec)": 2,
+ "Allow Boot with Preserved Cache": "On"
+ },
+ "Defaults": {
+ "Phy Polarity": 0,
+ "Phy PolaritySplit": 0,
+ "Cached IO": "Off",
+ "Default spin down time (mins)": 0,
+ "Coercion Mode": "None",
+ "ZCR Config": "Unknown",
+ "Max Chained Enclosures": 0,
+ "Direct PD Mapping": "No",
+ "Restore Hot Spare on Insertion": "No",
+ "Expose Enclosure Devices": "No",
+ "Maintain PD Fail History": "No",
+ "Zero Based Enclosure Enumeration": "No",
+ "Disable Puncturing": "No",
+ "Un-Certified Hard Disk Drives": "Block",
+ "SMART Mode": "Mode 6",
+ "Enable LED Header": "No",
+ "LED Show Drive Activity": "No",
+ "Dirty LED Shows Drive Activity": "No",
+ "EnableCrashDump": "No",
+ "Disable Online Controller Reset": "No",
+ "Treat Single span R1E as R10": "No",
+ "Power Saving option": "Enable",
+ "TTY Log In Flash": "No",
+ "Auto Enhanced Import": "No",
+ "Enable Shield State": "No",
+ "Time taken to detect CME": "60 sec"
+ },
+ "Capabilities": {
+ "Supported Drives": "SAS, SATA, NVMe",
+ "Enable JBOD": "Yes",
+ "Max Parallel Commands": 4992,
+ "Max SGE Count": 128,
+ "Max Data Transfer Size": "32 sectors",
+ "Max Strips PerIO": 0,
+ "Max Configurable CacheCade Size": 0,
+ "Min Strip Size": "512Bytes",
+ "Max Strip Size": "512Bytes"
+ },
+ "Scheduled Tasks": "NA",
+ "Secure Boot": {
+ "Secure Boot Enabled": "Yes",
+ "Controller in Soft Secure Mode": "No",
+ "Controller in Hard Secure Mode": "Yes",
+ "Key Update Pending": "No",
+ "Remaining Secure Boot Key Slots": 7
+ },
+ "Security Protocol properties": {
+ "Security Protocol": "None"
+ },
+ "Enclosure Information": [
+ {
+ "EID": 23,
+ "State": "OK",
+ "Slots": 24,
+ "PD": 23,
+ "PS": 0,
+ "Fans": 0,
+ "TSs": 2,
+ "Alms": 0,
+ "SIM": 0,
+ "ProdID": "SC846-P",
+ "VendorSpecific": "x40-66.16.11.0"
+ },
+ {
+ "EID": 30,
+ "State": "OK",
+ "Slots": 12,
+ "PD": 6,
+ "PS": 0,
+ "Fans": 0,
+ "TSs": 2,
+ "Alms": 0,
+ "SIM": 0,
+ "ProdID": "SC826-P",
+ "VendorSpecific": "x28-66.16.11.0"
+ }
+ ],
+ "Physical Device Information": {
+ "Drive /c0/e23/s0": [
+ {
+ "EID:Slt": "23:0",
+ "DID": 0,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "12.732 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "ST14000NM001G-2KJ103",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s0 - Detailed Information": {
+ "Drive /c0/e23/s0 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s0 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST14000NM001G-2KJ103",
+ "NAND Vendor": "NA",
+ "SN": " REDACTED",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SN03 ",
+ "Raw size": "12.732 TB [0x65ddfffff Sectors]",
+ "Coerced size": "12.732 TB [0x65ddfffff Sectors]",
+ "Non Coerced size": "12.732 TB [0x65ddfffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 27344764927,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s0 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4540"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 4c 5a 4a 32 30 46 4b 34 00 00 00 00 00 00 4e 53 33 30 20 20 20 20 54 53 34 31 30 30 4e 30 30 4d 31 30 2d 47 4b 32 31 4a 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s1": [
+ {
+ "EID:Slt": "23:1",
+ "DID": 1,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "1.819 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "WDC WDS200T1R0A-68A4W0",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s1 - Detailed Information": {
+ "Drive /c0/e23/s1 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s1 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "WDC WDS200T1R0A-68A4W0",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "411000WR",
+ "Raw size": "1.819 TB [0xe8e088af Sectors]",
+ "Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Non Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 3907029167,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s1 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4541"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 30 32 35 33 34 43 34 34 38 30 35 31 20 20 20 20 20 20 20 20 00 00 00 00 00 00 31 34 30 31 30 30 52 57 44 57 20 43 57 20 53 44 30 32 54 30 52 31 41 30 36 2d 41 38 57 34 20 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 01 80 00 40 00 2f 00 40 00 02 00 00 06 00 ff 3f 10 00 3f 00 10 fc fb 00 01 91 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s2": [
+ {
+ "EID:Slt": "23:2",
+ "DID": 2,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "16.370 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "ST18000NM000J-2TV103",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s2 - Detailed Information": {
+ "Drive /c0/e23/s2 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s2 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST18000NM000J-2TV103",
+ "NAND Vendor": "NA",
+ "SN": " REDACTED",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SN02 ",
+ "Raw size": "16.370 TB [0x82f7fffff Sectors]",
+ "Coerced size": "16.370 TB [0x82f7fffff Sectors]",
+ "Non Coerced size": "16.370 TB [0x82f7fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 35156656127,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s2 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4542"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 52 5a 33 35 47 57 50 30 00 00 00 00 00 00 4e 53 32 30 20 20 20 20 54 53 38 31 30 30 4e 30 30 4d 30 30 2d 4a 54 32 31 56 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s3": [
+ {
+ "EID:Slt": "23:3",
+ "DID": 3,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "1.819 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "WDC WDS200T1R0A-68A4W0",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s3 - Detailed Information": {
+ "Drive /c0/e23/s3 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s3 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "WDC WDS200T1R0A-68A4W0",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "411000WR",
+ "Raw size": "1.819 TB [0xe8e088af Sectors]",
+ "Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Non Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 3907029167,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s3 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4543"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 31 32 30 35 44 37 30 38 36 30 33 31 20 20 20 20 20 20 20 20 00 00 00 00 00 00 31 34 30 31 30 30 52 57 44 57 20 43 57 20 53 44 30 32 54 30 52 31 41 30 36 2d 41 38 57 34 20 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 01 80 00 40 00 2f 00 40 00 02 00 00 06 00 ff 3f 10 00 3f 00 10 fc fb 00 01 91 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s4": [
+ {
+ "EID:Slt": "23:4",
+ "DID": 4,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "10.913 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "4 KB",
+ "Model": "HGST HUH721212ALN604",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s4 - Detailed Information": {
+ "Drive /c0/e23/s4 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s4 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "HGST HUH721212ALN604",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "LEGNW925",
+ "Raw size": "10.913 TB [0xae9fffff Sectors]",
+ "Coerced size": "10.913 TB [0xae9fffff Sectors]",
+ "Non Coerced size": "10.913 TB [0xae9fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "4 KB",
+ "Config ID": "NA",
+ "Number of Blocks": 2929721343,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s4 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4544"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 04 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 51 35 35 4a 34 44 42 32 20 20 20 20 20 20 20 20 20 20 20 20 03 00 00 00 38 00 45 4c 4e 47 39 57 35 32 47 48 54 53 48 20 48 55 32 37 32 31 32 31 4c 41 36 4e 34 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 02 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 00 59 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s5": [
+ {
+ "EID:Slt": "23:5",
+ "DID": 5,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "894.252 GB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "KINGSTON SEDC500R960G",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s5 - Detailed Information": {
+ "Drive /c0/e23/s5 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s5 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "KINGSTON SEDC500R960G",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SCEKJ2.7",
+ "Raw size": "894.252 GB [0x6fc81aaf Sectors]",
+ "Coerced size": "894.252 GB [0x6fc81aaf Sectors]",
+ "Non Coerced size": "894.252 GB [0x6fc81aaf Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 1875385007,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s5 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4545"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 30 35 32 30 42 36 32 37 32 38 41 41 41 32 45 45 20 20 20 20 00 00 00 00 00 00 43 53 4b 45 32 4a 37 2e 49 4b 47 4e 54 53 4e 4f 53 20 44 45 35 43 30 30 39 52 30 36 20 47 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 00 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 01 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s6": [
+ {
+ "EID:Slt": "23:6",
+ "DID": 6,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "12.732 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "ST14000NM001G-2KJ103",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s6 - Detailed Information": {
+ "Drive /c0/e23/s6 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s6 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST14000NM001G-2KJ103",
+ "NAND Vendor": "NA",
+ "SN": " REDACTED",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SN03 ",
+ "Raw size": "12.732 TB [0x65ddfffff Sectors]",
+ "Coerced size": "12.732 TB [0x65ddfffff Sectors]",
+ "Non Coerced size": "12.732 TB [0x65ddfffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 27344764927,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s6 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4546"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 4c 5a 48 32 31 4a 50 44 00 00 00 00 00 00 4e 53 33 30 20 20 20 20 54 53 34 31 30 30 4e 30 30 4d 31 30 2d 47 4b 32 31 4a 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s7": [
+ {
+ "EID:Slt": "23:7",
+ "DID": 7,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "1.819 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "Seagate BarraCuda 120 SSD ZA2000CM10003",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s7 - Detailed Information": {
+ "Drive /c0/e23/s7 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s7 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "Seagate BarraCuda 120 SSD ZA2000CM10003",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "STZSE014",
+ "Raw size": "1.819 TB [0xe8e088af Sectors]",
+ "Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Non Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 3907029167,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s7 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4547"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 51 37 30 58 44 30 45 35 20 20 20 20 20 20 20 20 20 20 20 20 00 00 00 00 00 00 54 53 53 5a 30 45 34 31 65 53 67 61 74 61 20 65 61 42 72 72 43 61 64 75 20 61 32 31 20 30 53 53 20 44 41 5a 30 32 30 30 4d 43 30 31 30 30 20 33 10 80 00 40 00 2f 00 40 00 00 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 01 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s8": [
+ {
+ "EID:Slt": "23:8",
+ "DID": 8,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "16.370 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "ST18000NM000J-2TV103",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s8 - Detailed Information": {
+ "Drive /c0/e23/s8 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s8 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST18000NM000J-2TV103",
+ "NAND Vendor": "NA",
+ "SN": " REDACTED",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SN02 ",
+ "Raw size": "16.370 TB [0x82f7fffff Sectors]",
+ "Coerced size": "16.370 TB [0x82f7fffff Sectors]",
+ "Non Coerced size": "16.370 TB [0x82f7fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 35156656127,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s8 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4548"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 52 5a 33 35 44 5a 36 54 00 00 00 00 00 00 4e 53 32 30 20 20 20 20 54 53 38 31 30 30 4e 30 30 4d 30 30 2d 4a 54 32 31 56 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s9": [
+ {
+ "EID:Slt": "23:9",
+ "DID": 9,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "1.819 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "WDC WDS200T1R0A-68A4W0",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s9 - Detailed Information": {
+ "Drive /c0/e23/s9 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s9 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "WDC WDS200T1R0A-68A4W0",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "411000WR",
+ "Raw size": "1.819 TB [0xe8e088af Sectors]",
+ "Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Non Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 3907029167,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s9 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4549"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 31 32 36 34 57 36 30 38 33 30 31 34 20 20 20 20 20 20 20 20 00 00 00 00 00 00 31 34 30 31 30 30 52 57 44 57 20 43 57 20 53 44 30 32 54 30 52 31 41 30 36 2d 41 38 57 34 20 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 01 80 00 40 00 2f 00 40 00 02 00 00 06 00 ff 3f 10 00 3f 00 10 fc fb 00 01 91 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s10": [
+ {
+ "EID:Slt": "23:10",
+ "DID": 10,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "10.913 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "4 KB",
+ "Model": "HGST HUH721212ALN604",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s10 - Detailed Information": {
+ "Drive /c0/e23/s10 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s10 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "HGST HUH721212ALN604",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "LEGNW925",
+ "Raw size": "10.913 TB [0xae9fffff Sectors]",
+ "Coerced size": "10.913 TB [0xae9fffff Sectors]",
+ "Non Coerced size": "10.913 TB [0xae9fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "4 KB",
+ "Config ID": "NA",
+ "Number of Blocks": 2929721343,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s10 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db454a"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 04 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 51 35 42 48 4b 52 42 42 20 20 20 20 20 20 20 20 20 20 20 20 03 00 00 00 38 00 45 4c 4e 47 39 57 35 32 47 48 54 53 48 20 48 55 32 37 32 31 32 31 4c 41 36 4e 34 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 02 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 00 59 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s11": [
+ {
+ "EID:Slt": "23:11",
+ "DID": 11,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "894.252 GB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "KINGSTON SEDC500R960G",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s11 - Detailed Information": {
+ "Drive /c0/e23/s11 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s11 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "KINGSTON SEDC500R960G",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SCEKJ2.7",
+ "Raw size": "894.252 GB [0x6fc81aaf Sectors]",
+ "Coerced size": "894.252 GB [0x6fc81aaf Sectors]",
+ "Non Coerced size": "894.252 GB [0x6fc81aaf Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 1875385007,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s11 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db454b"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 30 35 32 30 42 36 32 37 32 38 41 41 41 32 38 44 20 20 20 20 00 00 00 00 00 00 43 53 4b 45 32 4a 37 2e 49 4b 47 4e 54 53 4e 4f 53 20 44 45 35 43 30 30 39 52 30 36 20 47 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 00 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 01 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s12": [
+ {
+ "EID:Slt": "23:12",
+ "DID": 12,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "3.492 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "KINGSTON SEDC600M3840G",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s12 - Detailed Information": {
+ "Drive /c0/e23/s12 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s12 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "KINGSTON SEDC600M3840G",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SCEKH5.1",
+ "Raw size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Coerced size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Non Coerced size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 7501476527,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s12 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db455c"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 30 35 32 30 42 36 32 37 32 38 38 46 32 44 39 43 20 20 20 20 00 00 00 00 00 00 43 53 4b 45 35 48 31 2e 49 4b 47 4e 54 53 4e 4f 53 20 44 45 36 43 30 30 33 4d 34 38 47 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 00 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 01 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s14": [
+ {
+ "EID:Slt": "23:14",
+ "DID": 13,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "16.370 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "ST18000NM000J-2TV103",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s14 - Detailed Information": {
+ "Drive /c0/e23/s14 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s14 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST18000NM000J-2TV103",
+ "NAND Vendor": "NA",
+ "SN": " REDACTED",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SN02 ",
+ "Raw size": "16.370 TB [0x82f7fffff Sectors]",
+ "Coerced size": "16.370 TB [0x82f7fffff Sectors]",
+ "Non Coerced size": "16.370 TB [0x82f7fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 35156656127,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s14 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db455e"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 52 5a 33 35 4e 54 35 36 00 00 00 00 00 00 4e 53 32 30 20 20 20 20 54 53 38 31 30 30 4e 30 30 4d 30 30 2d 4a 54 32 31 56 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s15": [
+ {
+ "EID:Slt": "23:15",
+ "DID": 14,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "1.819 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "WDC WDS200T1R0A-68A4W0",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s15 - Detailed Information": {
+ "Drive /c0/e23/s15 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s15 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "WDC WDS200T1R0A-68A4W0",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "411000WR",
+ "Raw size": "1.819 TB [0xe8e088af Sectors]",
+ "Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Non Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 3907029167,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s15 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db455f"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 31 32 36 34 57 36 30 38 33 30 36 33 20 20 20 20 20 20 20 20 00 00 00 00 00 00 31 34 30 31 30 30 52 57 44 57 20 43 57 20 53 44 30 32 54 30 52 31 41 30 36 2d 41 38 57 34 20 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 01 80 00 40 00 2f 00 40 00 02 00 00 06 00 ff 3f 10 00 3f 00 10 fc fb 00 01 91 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s16": [
+ {
+ "EID:Slt": "23:16",
+ "DID": 15,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "10.913 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "4 KB",
+ "Model": "HGST HUH721212ALN604",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s16 - Detailed Information": {
+ "Drive /c0/e23/s16 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s16 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "HGST HUH721212ALN604",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "LEGNW925",
+ "Raw size": "10.913 TB [0xae9fffff Sectors]",
+ "Coerced size": "10.913 TB [0xae9fffff Sectors]",
+ "Non Coerced size": "10.913 TB [0xae9fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "4 KB",
+ "Config ID": "NA",
+ "Number of Blocks": 2929721343,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s16 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4560"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 04 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 51 35 34 4a 5a 4b 42 4a 20 20 20 20 20 20 20 20 20 20 20 20 03 00 00 00 38 00 45 4c 4e 47 39 57 35 32 47 48 54 53 48 20 48 55 32 37 32 31 32 31 4c 41 36 4e 34 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 02 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 00 59 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s17": [
+ {
+ "EID:Slt": "23:17",
+ "DID": 16,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "931.512 GB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "WDC WDS100T1R0A-68A4W0",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s17 - Detailed Information": {
+ "Drive /c0/e23/s17 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s17 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "WDC WDS100T1R0A-68A4W0",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "411000WR",
+ "Raw size": "931.512 GB [0x74706daf Sectors]",
+ "Coerced size": "931.512 GB [0x74706daf Sectors]",
+ "Non Coerced size": "931.512 GB [0x74706daf Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 1953525167,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s17 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4561"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 30 32 33 31 45 46 34 34 34 39 34 31 20 20 20 20 20 20 20 20 00 00 00 00 00 00 31 34 30 31 30 30 52 57 44 57 20 43 57 20 53 44 30 31 54 30 52 31 41 30 36 2d 41 38 57 34 20 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 01 80 00 40 00 2f 00 40 00 02 00 00 06 00 ff 3f 10 00 3f 00 10 fc fb 00 01 91 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s18": [
+ {
+ "EID:Slt": "23:18",
+ "DID": 17,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "3.492 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "KINGSTON SEDC600M3840G",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s18 - Detailed Information": {
+ "Drive /c0/e23/s18 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s18 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "KINGSTON SEDC600M3840G",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SCEKH5.1",
+ "Raw size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Coerced size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Non Coerced size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 7501476527,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s18 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4562"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 30 35 32 30 42 36 32 37 32 38 44 46 43 35 42 34 20 20 20 20 00 00 00 00 00 00 43 53 4b 45 35 48 31 2e 49 4b 47 4e 54 53 4e 4f 53 20 44 45 36 43 30 30 33 4d 34 38 47 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 00 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 01 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s19": [
+ {
+ "EID:Slt": "23:19",
+ "DID": 18,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "931.512 GB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "Seagate IronWolf ZA1000NM10002-2ZG102",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s19 - Detailed Information": {
+ "Drive /c0/e23/s19 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s19 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "Seagate IronWolf ZA1000NM10002-2ZG102",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SU3SC013",
+ "Raw size": "931.512 GB [0x74706daf Sectors]",
+ "Coerced size": "931.512 GB [0x74706daf Sectors]",
+ "Non Coerced size": "931.512 GB [0x74706daf Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 1953525167,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s19 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4563"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 54 37 30 43 35 30 32 57 20 20 20 20 20 20 20 20 20 20 20 20 00 00 00 00 00 00 55 53 53 33 30 43 33 31 65 53 67 61 74 61 20 65 72 49 6e 6f 6f 57 66 6c 5a 20 31 41 30 30 4e 30 31 4d 30 30 32 30 32 2d 47 5a 30 31 20 32 20 20 10 80 00 40 00 2f 00 40 00 00 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 01 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s20": [
+ {
+ "EID:Slt": "23:20",
+ "DID": 19,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "16.370 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "ST18000NM000J-2TV103",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s20 - Detailed Information": {
+ "Drive /c0/e23/s20 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s20 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST18000NM000J-2TV103",
+ "NAND Vendor": "NA",
+ "SN": " REDACTED",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SN02 ",
+ "Raw size": "16.370 TB [0x82f7fffff Sectors]",
+ "Coerced size": "16.370 TB [0x82f7fffff Sectors]",
+ "Non Coerced size": "16.370 TB [0x82f7fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 35156656127,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s20 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4564"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 52 5a 33 35 34 59 4a 54 00 00 00 00 00 00 4e 53 32 30 20 20 20 20 54 53 38 31 30 30 4e 30 30 4d 30 30 2d 4a 54 32 31 56 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s21": [
+ {
+ "EID:Slt": "23:21",
+ "DID": 20,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "1.819 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "WDC WDS200T1R0A-68A4W0",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s21 - Detailed Information": {
+ "Drive /c0/e23/s21 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s21 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "WDC WDS200T1R0A-68A4W0",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "411000WR",
+ "Raw size": "1.819 TB [0xe8e088af Sectors]",
+ "Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Non Coerced size": "1.819 TB [0xe8e088af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 3907029167,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s21 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4565"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 31 32 38 32 34 35 34 34 33 31 32 30 20 20 20 20 20 20 20 20 00 00 00 00 00 00 31 34 30 31 30 30 52 57 44 57 20 43 57 20 53 44 30 32 54 30 52 31 41 30 36 2d 41 38 57 34 20 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 01 80 00 40 00 2f 00 40 00 02 00 00 06 00 ff 3f 10 00 3f 00 10 fc fb 00 01 91 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s22": [
+ {
+ "EID:Slt": "23:22",
+ "DID": 21,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "10.913 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "ST12000NM001G-2MV103",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s22 - Detailed Information": {
+ "Drive /c0/e23/s22 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s22 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "ST12000NM001G-2MV103",
+ "NAND Vendor": "NA",
+ "SN": " REDACTED",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SN03 ",
+ "Raw size": "10.913 TB [0x574ffffff Sectors]",
+ "Coerced size": "10.913 TB [0x574ffffff Sectors]",
+ "Non Coerced size": "10.913 TB [0x574ffffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 23437770751,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s22 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4566"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 0c ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 20 20 20 20 4c 57 30 32 47 36 50 58 00 00 00 00 00 00 4e 53 33 30 20 20 20 20 54 53 32 31 30 30 4e 30 30 4d 31 30 2d 47 4d 32 31 56 33 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e23/s23": [
+ {
+ "EID:Slt": "23:23",
+ "DID": 22,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "931.512 GB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "WDC WDS100T1R0A-68A4W0",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e23/s23 - Detailed Information": {
+ "Drive /c0/e23/s23 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e23/s23 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "WDC WDS100T1R0A-68A4W0",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "411000WR",
+ "Raw size": "931.512 GB [0x74706daf Sectors]",
+ "Coerced size": "931.512 GB [0x74706daf Sectors]",
+ "Non Coerced size": "931.512 GB [0x74706daf Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 1953525167,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e23/s23 Policies/Settings": {
+ "Enclosure position": "0",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020db4567"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 30 32 33 31 45 46 34 34 36 35 35 30 20 20 20 20 20 20 20 20 00 00 00 00 00 00 31 34 30 31 30 30 52 57 44 57 20 43 57 20 53 44 30 31 54 30 52 31 41 30 36 2d 41 38 57 34 20 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 01 80 00 40 00 2f 00 40 00 02 00 00 06 00 ff 3f 10 00 3f 00 10 fc fb 00 01 91 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e30/s3": [
+ {
+ "EID:Slt": "30:3",
+ "DID": 24,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "18.189 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "WDC WUH722020BLE6L4",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e30/s3 - Detailed Information": {
+ "Drive /c0/e30/s3 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e30/s3 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "WDC WUH722020BLE6L4",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "PQGNW540",
+ "Raw size": "18.189 TB [0x9185fffff Sectors]",
+ "Coerced size": "18.189 TB [0x9185fffff Sectors]",
+ "Non Coerced size": "18.189 TB [0x9185fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 39063650303,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e30/s3 Policies/Settings": {
+ "Enclosure position": "1",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020e87d43"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 04 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 4c 38 34 47 48 56 41 59 20 20 20 20 20 20 20 20 20 20 20 20 03 00 00 00 38 00 51 50 4e 47 35 57 30 34 44 57 20 43 57 20 48 55 32 37 30 32 30 32 4c 42 36 45 34 4c 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 00 59 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e30/s5": [
+ {
+ "EID:Slt": "30:5",
+ "DID": 25,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "18.189 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "WDC WUH722020BLE6L4",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e30/s5 - Detailed Information": {
+ "Drive /c0/e30/s5 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e30/s5 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "WDC WUH722020BLE6L4",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "PQGNW540",
+ "Raw size": "18.189 TB [0x9185fffff Sectors]",
+ "Coerced size": "18.189 TB [0x9185fffff Sectors]",
+ "Non Coerced size": "18.189 TB [0x9185fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 39063650303,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e30/s5 Policies/Settings": {
+ "Enclosure position": "1",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020e87d45"
+ }
+ ]
+ },
+ "Inquiry Data": "5a 04 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 4c 38 34 47 52 56 41 4c 20 20 20 20 20 20 20 20 20 20 20 20 03 00 00 00 38 00 51 50 4e 47 35 57 30 34 44 57 20 43 57 20 48 55 32 37 30 32 30 32 4c 42 36 45 34 4c 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 02 07 00 ff 3f 10 00 3f 00 10 fc fb 00 00 59 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e30/s6": [
+ {
+ "EID:Slt": "30:6",
+ "DID": 26,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "18.189 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "TOSHIBA MG10ACA20TE",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e30/s6 - Detailed Information": {
+ "Drive /c0/e30/s6 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e30/s6 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "TOSHIBA MG10ACA20TE",
+ "NAND Vendor": "NA",
+ "SN": " REDACTED",
+ "WWN": "REDACTED",
+ "Firmware Revision": "0102 ",
+ "Raw size": "18.189 TB [0x9185fffff Sectors]",
+ "Coerced size": "18.189 TB [0x9185fffff Sectors]",
+ "Non Coerced size": "18.189 TB [0x9185fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 39063650303,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e30/s6 Policies/Settings": {
+ "Enclosure position": "1",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020e87d46"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 32 5a 30 42 30 41 5a 33 34 46 4a 4d 00 00 00 00 00 00 31 30 32 30 20 20 20 20 4f 54 48 53 42 49 20 41 47 4d 30 31 43 41 32 41 54 30 20 45 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 07 00 07 00 "
+ },
+ "Drive /c0/e30/s8": [
+ {
+ "EID:Slt": "30:8",
+ "DID": 27,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "18.189 TB",
+ "Intf": "SATA",
+ "Med": "HDD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "TOSHIBA MG10ACA20TE",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e30/s8 - Detailed Information": {
+ "Drive /c0/e30/s8 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e30/s8 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "TOSHIBA MG10ACA20TE",
+ "NAND Vendor": "NA",
+ "SN": " REDACTED",
+ "WWN": "REDACTED",
+ "Firmware Revision": "0102 ",
+ "Raw size": "18.189 TB [0x9185fffff Sectors]",
+ "Coerced size": "18.189 TB [0x9185fffff Sectors]",
+ "Non Coerced size": "18.189 TB [0x9185fffff Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 39063650303,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e30/s8 Policies/Settings": {
+ "Enclosure position": "1",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020e87d48"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 20 20 20 20 20 20 20 20 32 5a 30 41 31 41 35 4b 34 46 4a 4d 00 00 00 00 00 00 31 30 32 30 20 20 20 20 4f 54 48 53 42 49 20 41 47 4d 30 31 43 41 32 41 54 30 20 45 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 02 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 5d ff ff ff 0f 07 00 07 00 "
+ },
+ "Drive /c0/e30/s9": [
+ {
+ "EID:Slt": "30:9",
+ "DID": 28,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "3.492 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "KINGSTON SEDC600M3840G",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e30/s9 - Detailed Information": {
+ "Drive /c0/e30/s9 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e30/s9 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "KINGSTON SEDC600M3840G",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SCEKH5.1",
+ "Raw size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Coerced size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Non Coerced size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 7501476527,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e30/s9 Policies/Settings": {
+ "Enclosure position": "1",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020e87d49"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 30 35 32 30 42 36 32 37 32 38 44 46 43 35 42 42 20 20 20 20 00 00 00 00 00 00 43 53 4b 45 35 48 31 2e 49 4b 47 4e 54 53 4e 4f 53 20 44 45 36 43 30 30 33 4d 34 38 47 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 00 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 01 ff ff ff 0f 00 00 07 00 "
+ },
+ "Drive /c0/e30/s11": [
+ {
+ "EID:Slt": "30:11",
+ "DID": 29,
+ "State": "JBOD",
+ "DG": "-",
+ "Size": "3.492 TB",
+ "Intf": "SATA",
+ "Med": "SSD",
+ "SED": "-",
+ "PI": "-",
+ "SeSz": "512B",
+ "Model": "KINGSTON SEDC600M3840G",
+ "Sp": "-"
+ }
+ ],
+ "Drive /c0/e30/s11 - Detailed Information": {
+ "Drive /c0/e30/s11 State": {
+ "Shield Counter": "N/A",
+ "Media Error Count": "N/A",
+ "Other Error Count": "N/A",
+ "Predictive Failure Count": "N/A",
+ "S.M.A.R.T alert flagged by drive": "N/A"
+ },
+ "Drive /c0/e30/s11 Device attributes": {
+ "Manufacturer Id": "ATA ",
+ "Model Number": "KINGSTON SEDC600M3840G",
+ "NAND Vendor": "NA",
+ "SN": "REDACTED ",
+ "WWN": "REDACTED",
+ "Firmware Revision": "SCEKH5.1",
+ "Raw size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Coerced size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Non Coerced size": "3.492 TB [0x1bf1f72af Sectors]",
+ "Device Speed": "6.0Gb/s",
+ "Link Speed": "12.0Gb/s",
+ "NCQ setting": "N/A",
+ "Sector Size": "512B",
+ "Config ID": "NA",
+ "Number of Blocks": 7501476527,
+ "Connector Name": "C0 & C1 "
+ },
+ "Drive /c0/e30/s11 Policies/Settings": {
+ "Enclosure position": "1",
+ "Connected Port Number": "0(path0) ",
+ "Sequence Number": 0,
+ "Commissioned Spare": "No",
+ "Emergency Spare": "No",
+ "Last Predictive Failure Event Sequence Number": "N/A",
+ "Successful diagnostics completion on": "N/A",
+ "SED Capable": "N/A",
+ "SED Enabled": "N/A",
+ "Secured": "N/A",
+ "Needs EKM Attention": "N/A",
+ "PI Eligible": "N/A",
+ "Certified": "N/A",
+ "Wide Port Capable": "N/A",
+ "Multipath": "No",
+ "Port Information": [
+ {
+ "Port": 0,
+ "Status": "Active",
+ "Linkspeed": "12.0Gb/s",
+ "SAS address": "0x5003048020e87d4b"
+ }
+ ]
+ },
+ "Inquiry Data": "40 00 ff 3f 37 c8 10 00 00 00 00 00 3f 00 00 00 00 00 00 00 30 35 32 30 42 36 32 37 32 38 38 46 32 44 37 34 20 20 20 20 00 00 00 00 00 00 43 53 4b 45 35 48 31 2e 49 4b 47 4e 54 53 4e 4f 53 20 44 45 36 43 30 30 33 4d 34 38 47 30 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 20 10 80 00 40 00 2f 00 40 00 00 00 00 07 00 ff 3f 10 00 3f 00 10 fc fb 00 10 01 ff ff ff 0f 00 00 07 00 "
+ }
+ }
+ }
+ }
+ ]
+}
+
diff --git a/src/go/plugin/go.d/modules/supervisord/README.md b/src/go/plugin/go.d/modules/supervisord/README.md
new file mode 120000
index 000000000..a8b743484
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/README.md
@@ -0,0 +1 @@
+integrations/supervisor.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/supervisord/charts.go b/src/go/plugin/go.d/modules/supervisord/charts.go
new file mode 100644
index 000000000..c0f7c9018
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/charts.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package supervisord
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ summaryChartsPriority = module.Priority
+ groupChartsPriority = summaryChartsPriority + 20
+)
+
+var summaryCharts = module.Charts{
+ {
+ ID: "processes",
+ Title: "Processes",
+ Units: "processes",
+ Fam: "summary",
+ Ctx: "supervisord.summary_processes",
+ Type: module.Stacked,
+ Priority: summaryChartsPriority,
+ Dims: module.Dims{
+ {ID: "running_processes", Name: "running"},
+ {ID: "non_running_processes", Name: "non-running"},
+ },
+ },
+}
+
+var (
+ groupChartsTmpl = module.Charts{
+ groupProcessesChartTmpl.Copy(),
+ groupProcessesStateCodeChartTmpl.Copy(),
+ groupProcessesExitStatusChartTmpl.Copy(),
+ groupProcessesUptimeChartTmpl.Copy(),
+ groupProcessesDowntimeChartTmpl.Copy(),
+ }
+
+ groupProcessesChartTmpl = module.Chart{
+ ID: "group_%s_processes",
+ Title: "Processes",
+ Units: "processes",
+ Fam: "group %s",
+ Ctx: "supervisord.processes",
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "group_%s_running_processes", Name: "running"},
+ {ID: "group_%s_non_running_processes", Name: "non-running"},
+ },
+ }
+ groupProcessesStateCodeChartTmpl = module.Chart{
+ ID: "group_%s_processes_state_code",
+ Title: "State code",
+ Units: "code",
+ Fam: "group %s",
+ Ctx: "supervisord.process_state_code",
+ }
+ groupProcessesExitStatusChartTmpl = module.Chart{
+ ID: "group_%s_processes_exit_status",
+ Title: "Exit status",
+ Units: "status",
+ Fam: "group %s",
+ Ctx: "supervisord.process_exit_status",
+ }
+ groupProcessesUptimeChartTmpl = module.Chart{
+ ID: "group_%s_processes_uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "group %s",
+ Ctx: "supervisord.process_uptime",
+ }
+ groupProcessesDowntimeChartTmpl = module.Chart{
+ ID: "group_%s_processes_downtime",
+ Title: "Downtime",
+ Units: "seconds",
+ Fam: "group %s",
+ Ctx: "supervisord.process_downtime",
+ }
+)
+
+func newProcGroupCharts(group string) *module.Charts {
+ charts := groupChartsTmpl.Copy()
+ for i, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, group)
+ c.Fam = fmt.Sprintf(c.Fam, group)
+ c.Priority = groupChartsPriority + i
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, group)
+ }
+ }
+ return charts
+}
diff --git a/src/go/plugin/go.d/modules/supervisord/client.go b/src/go/plugin/go.d/modules/supervisord/client.go
new file mode 100644
index 000000000..da62ca21c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/client.go
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package supervisord
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/mattn/go-xmlrpc"
+)
+
+type supervisorRPCClient struct {
+ client *xmlrpc.Client
+}
+
+func newSupervisorRPCClient(serverURL *url.URL, httpClient *http.Client) (supervisorClient, error) {
+ switch serverURL.Scheme {
+ case "http", "https":
+ c := xmlrpc.NewClient(serverURL.String())
+ c.HttpClient = httpClient
+ return &supervisorRPCClient{client: c}, nil
+ case "unix":
+ c := xmlrpc.NewClient("http://unix/RPC2")
+ t, ok := httpClient.Transport.(*http.Transport)
+ if !ok {
+ return nil, errors.New("unexpected HTTP client transport")
+ }
+ t.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
+ d := net.Dialer{Timeout: httpClient.Timeout}
+ return d.DialContext(ctx, "unix", serverURL.Path)
+ }
+ c.HttpClient = httpClient
+ return &supervisorRPCClient{client: c}, nil
+ default:
+ return nil, fmt.Errorf("unexpected URL scheme: %s", serverURL)
+ }
+}
+
+// http://supervisord.org/api.html#process-control
+type processStatus struct {
+ name string // name of the process.
+ group string // name of the process’ group.
+ start int // UNIX timestamp of when the process was started.
+ stop int // UNIX timestamp of when the process last ended, or 0 if the process has never been stopped.
+ now int // UNIX timestamp of the current time, which can be used to calculate process up-time.
+ state int // state code.
+ stateName string // string description of state.
+ exitStatus int // exit status (errorlevel) of process, or 0 if the process is still running.
+}
+
+func (c *supervisorRPCClient) getAllProcessInfo() ([]processStatus, error) {
+ const fn = "supervisor.getAllProcessInfo"
+ resp, err := c.client.Call(fn)
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s' function call: %v", fn, err)
+ }
+ return parseGetAllProcessInfo(resp)
+}
+
+func (c *supervisorRPCClient) closeIdleConnections() {
+ c.client.HttpClient.CloseIdleConnections()
+}
+
+func parseGetAllProcessInfo(resp interface{}) ([]processStatus, error) {
+ arr, ok := resp.(xmlrpc.Array)
+ if !ok {
+ return nil, fmt.Errorf("unexpected response type, want=xmlrpc.Array, got=%T", resp)
+ }
+
+ var info []processStatus
+
+ for _, item := range arr {
+ s, ok := item.(xmlrpc.Struct)
+ if !ok {
+ continue
+ }
+
+ var p processStatus
+ for k, v := range s {
+ switch strings.ToLower(k) {
+ case "name":
+ p.name, _ = v.(string)
+ case "group":
+ p.group, _ = v.(string)
+ case "start":
+ p.start, _ = v.(int)
+ case "stop":
+ p.stop, _ = v.(int)
+ case "now":
+ p.now, _ = v.(int)
+ case "state":
+ p.state, _ = v.(int)
+ case "statename":
+ p.stateName, _ = v.(string)
+ case "exitstatus":
+ p.exitStatus, _ = v.(int)
+ }
+ }
+ if p.name != "" && p.group != "" && p.stateName != "" {
+ info = append(info, p)
+ }
+ }
+ return info, nil
+}
diff --git a/src/go/plugin/go.d/modules/supervisord/collect.go b/src/go/plugin/go.d/modules/supervisord/collect.go
new file mode 100644
index 000000000..31a0d394b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/collect.go
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package supervisord
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (s *Supervisord) collect() (map[string]int64, error) {
+ info, err := s.client.getAllProcessInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ ms := make(map[string]int64)
+ s.collectAllProcessInfo(ms, info)
+
+ return ms, nil
+}
+
+func (s *Supervisord) collectAllProcessInfo(ms map[string]int64, info []processStatus) {
+ s.resetCache()
+ ms["running_processes"] = 0
+ ms["non_running_processes"] = 0
+ for _, p := range info {
+ if _, ok := s.cache[p.group]; !ok {
+ s.cache[p.group] = make(map[string]bool)
+ s.addProcessGroupCharts(p)
+ }
+ if _, ok := s.cache[p.group][p.name]; !ok {
+ s.addProcessToCharts(p)
+ }
+ s.cache[p.group][p.name] = true
+
+ ms["group_"+p.group+"_running_processes"] += 0
+ ms["group_"+p.group+"_non_running_processes"] += 0
+ if isProcRunning(p) {
+ ms["running_processes"] += 1
+ ms["group_"+p.group+"_running_processes"] += 1
+ } else {
+ ms["non_running_processes"] += 1
+ ms["group_"+p.group+"_non_running_processes"] += 1
+ }
+ id := procID(p)
+ ms[id+"_state_code"] = int64(p.state)
+ ms[id+"_exit_status"] = int64(p.exitStatus)
+ ms[id+"_uptime"] = calcProcessUptime(p)
+ ms[id+"_downtime"] = calcProcessDowntime(p)
+ }
+ s.cleanupCache()
+}
+
+func (s *Supervisord) resetCache() {
+ for _, procs := range s.cache {
+ for name := range procs {
+ procs[name] = false
+ }
+ }
+}
+
+func (s *Supervisord) cleanupCache() {
+ for group, procs := range s.cache {
+ for name, ok := range procs {
+ if !ok {
+ s.removeProcessFromCharts(group, name)
+ delete(s.cache[group], name)
+ }
+ }
+ if len(s.cache[group]) == 0 {
+ s.removeProcessGroupCharts(group)
+ delete(s.cache, group)
+ }
+ }
+}
+
+func calcProcessUptime(p processStatus) int64 {
+ if !isProcRunning(p) {
+ return 0
+ }
+ return int64(p.now - p.start)
+}
+
+func calcProcessDowntime(p processStatus) int64 {
+ if isProcRunning(p) || p.stop == 0 {
+ return 0
+ }
+ return int64(p.now - p.stop)
+}
+
+func (s *Supervisord) addProcessGroupCharts(p processStatus) {
+ charts := newProcGroupCharts(p.group)
+ if err := s.Charts().Add(*charts...); err != nil {
+ s.Warning(err)
+ }
+}
+
+func (s *Supervisord) addProcessToCharts(p processStatus) {
+ id := procID(p)
+ for _, c := range *s.Charts() {
+ var dimID string
+ switch c.ID {
+ case fmt.Sprintf(groupProcessesStateCodeChartTmpl.ID, p.group):
+ dimID = id + "_state_code"
+ case fmt.Sprintf(groupProcessesExitStatusChartTmpl.ID, p.group):
+ dimID = id + "_exit_status"
+ case fmt.Sprintf(groupProcessesUptimeChartTmpl.ID, p.group):
+ dimID = id + "_uptime"
+ case fmt.Sprintf(groupProcessesDowntimeChartTmpl.ID, p.group):
+ dimID = id + "_downtime"
+ default:
+ continue
+ }
+ dim := &module.Dim{ID: dimID, Name: p.name}
+ if err := c.AddDim(dim); err != nil {
+ s.Warning(err)
+ return
+ }
+ c.MarkNotCreated()
+ }
+}
+
+func (s *Supervisord) removeProcessGroupCharts(group string) {
+ prefix := "group_" + group
+ for _, c := range *s.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+func (s *Supervisord) removeProcessFromCharts(group, name string) {
+ id := procID(processStatus{name: name, group: group})
+ for _, c := range *s.Charts() {
+ var dimID string
+ switch c.ID {
+ case fmt.Sprintf(groupProcessesStateCodeChartTmpl.ID, group):
+ dimID = id + "_state_code"
+ case fmt.Sprintf(groupProcessesExitStatusChartTmpl.ID, group):
+ dimID = id + "_exit_status"
+ case fmt.Sprintf(groupProcessesUptimeChartTmpl.ID, group):
+ dimID = id + "_uptime"
+ case fmt.Sprintf(groupProcessesDowntimeChartTmpl.ID, group):
+ dimID = id + "_downtime"
+ default:
+ continue
+ }
+ if err := c.MarkDimRemove(dimID, true); err != nil {
+ s.Warning(err)
+ return
+ }
+ c.MarkNotCreated()
+ }
+}
+
+func procID(p processStatus) string {
+ return fmt.Sprintf("group_%s_process_%s", p.group, p.name)
+}
+
+func isProcRunning(p processStatus) bool {
+ // http://supervisord.org/subprocess.html#process-states
+ // STOPPED (0)
+ // STARTING (10)
+ // RUNNING (20)
+ // BACKOFF (30)
+ // STOPPING (40)
+ // EXITED (100)
+ // FATAL (200)
+ // UNKNOWN (1000)
+ return p.state == 20
+}
diff --git a/src/go/plugin/go.d/modules/supervisord/config_schema.json b/src/go/plugin/go.d/modules/supervisord/config_schema.json
new file mode 100644
index 000000000..8d3c4e943
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/config_schema.json
@@ -0,0 +1,87 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Supervisord collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Supervisord [XML-RPC interface](http://supervisord.org/xmlrpc.html#rpcinterface-factories).",
+ "type": "string",
+ "default": "http://127.0.0.1:9001/RPC2",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/supervisord/init.go b/src/go/plugin/go.d/modules/supervisord/init.go
new file mode 100644
index 000000000..c7ccc06b5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/init.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package supervisord
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (s *Supervisord) verifyConfig() error {
+ if s.URL == "" {
+ return errors.New("'url' not set")
+ }
+ return nil
+}
+
+func (s *Supervisord) initSupervisorClient() (supervisorClient, error) {
+ u, err := url.Parse(s.URL)
+ if err != nil {
+ return nil, fmt.Errorf("parse 'url': %v (%s)", err, s.URL)
+ }
+ httpClient, err := web.NewHTTPClient(s.Client)
+ if err != nil {
+ return nil, fmt.Errorf("create HTTP client: %v", err)
+ }
+ return newSupervisorRPCClient(u, httpClient)
+}
diff --git a/src/go/plugin/go.d/modules/supervisord/integrations/supervisor.md b/src/go/plugin/go.d/modules/supervisord/integrations/supervisor.md
new file mode 100644
index 000000000..ba302e4a0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/integrations/supervisor.md
@@ -0,0 +1,249 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/supervisord/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/supervisord/metadata.yaml"
+sidebar_label: "Supervisor"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Processes and System Services"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Supervisor
+
+
+<img src="https://netdata.cloud/img/supervisord.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: supervisord
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Supervisor instances.
+
+It can collect metrics from:
+
+- [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)
+- [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)
+
+Used methods:
+
+- [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Supervisor instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| supervisord.summary_processes | running, non-running | processes |
+
+### Per process group
+
+These metrics refer to the process group.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| supervisord.processes | running, non-running | processes |
+| supervisord.process_state_code | a dimension per process | code |
+| supervisord.process_exit_status | a dimension per process | exit status |
+| supervisord.process_uptime | a dimension per process | seconds |
+| supervisord.process_downtime | a dimension per process | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/supervisord.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/supervisord.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:9001/RPC2 | yes |
+| timeout | System bus requests timeout. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### HTTP
+
+Collect metrics via HTTP.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: 'http://127.0.0.1:9001/RPC2'
+
+```
+</details>
+
+##### Socket
+
+Collect metrics via Unix socket.
+
+<details open><summary>Config</summary>
+
+```yaml
+- name: local
+ url: 'unix:///run/supervisor.sock'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collect metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: 'http://127.0.0.1:9001/RPC2'
+
+ - name: remote
+ url: 'http://192.0.2.1:9001/RPC2'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `supervisord` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m supervisord
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `supervisord` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep supervisord
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep supervisord /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep supervisord
+```
+
+
diff --git a/src/go/plugin/go.d/modules/supervisord/metadata.yaml b/src/go/plugin/go.d/modules/supervisord/metadata.yaml
new file mode 100644
index 000000000..b5c81dd04
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/metadata.yaml
@@ -0,0 +1,161 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-supervisord
+ plugin_name: go.d.plugin
+ module_name: supervisord
+ monitored_instance:
+ name: Supervisor
+ link: http://supervisord.org/
+ icon_filename: supervisord.png
+ categories:
+ - data-collection.processes-and-system-services
+ keywords:
+ - supervisor
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Supervisor instances.
+
+ It can collect metrics from:
+
+ - [unix socket](http://supervisord.org/configuration.html?highlight=unix_http_server#unix-http-server-section-values)
+ - [internal http server](http://supervisord.org/configuration.html?highlight=unix_http_server#inet-http-server-section-settings)
+
+ Used methods:
+
+ - [`supervisor.getAllProcessInfo`](http://supervisord.org/api.html#supervisor.rpcinterface.SupervisorNamespaceRPCInterface.getAllProcessInfo)
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/supervisord.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:9001/RPC2
+ required: true
+ - name: timeout
+ description: System bus requests timeout.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: HTTP
+ description: Collect metrics via HTTP.
+ config: |
+ jobs:
+ - name: local
+ url: 'http://127.0.0.1:9001/RPC2'
+ - name: Socket
+ description: Collect metrics via Unix socket.
+ config: |
+ - name: local
+ url: 'unix:///run/supervisor.sock'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collect metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: 'http://127.0.0.1:9001/RPC2'
+
+ - name: remote
+ url: 'http://192.0.2.1:9001/RPC2'
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: supervisord.summary_processes
+ description: Processes
+ unit: processes
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: non-running
+ - name: process group
+ description: These metrics refer to the process group.
+ labels: []
+ metrics:
+ - name: supervisord.processes
+ description: Processes
+ unit: processes
+ chart_type: stacked
+ dimensions:
+ - name: running
+ - name: non-running
+ - name: supervisord.process_state_code
+ description: State code
+ unit: code
+ chart_type: line
+ dimensions:
+ - name: a dimension per process
+ - name: supervisord.process_exit_status
+ description: Exit status
+ unit: exit status
+ chart_type: line
+ dimensions:
+ - name: a dimension per process
+ - name: supervisord.process_uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: a dimension per process
+ - name: supervisord.process_downtime
+ description: Downtime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: a dimension per process
diff --git a/src/go/plugin/go.d/modules/supervisord/supervisord.go b/src/go/plugin/go.d/modules/supervisord/supervisord.go
new file mode 100644
index 000000000..0988cfc88
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/supervisord.go
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package supervisord
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("supervisord", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Supervisord {
+ return &Supervisord{
+ Config: Config{
+ URL: "http://127.0.0.1:9001/RPC2",
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+
+ charts: summaryCharts.Copy(),
+ cache: make(map[string]map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ URL string `yaml:"url" json:"url"`
+ web.Client `yaml:",inline" json:""`
+}
+
+type (
+ Supervisord struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ client supervisorClient
+
+ cache map[string]map[string]bool // map[group][procName]collected
+ }
+ supervisorClient interface {
+ getAllProcessInfo() ([]processStatus, error)
+ closeIdleConnections()
+ }
+)
+
+func (s *Supervisord) Configuration() any {
+ return s.Config
+}
+
+func (s *Supervisord) Init() error {
+ err := s.verifyConfig()
+ if err != nil {
+ s.Errorf("verify config: %v", err)
+ return err
+ }
+
+ client, err := s.initSupervisorClient()
+ if err != nil {
+ s.Errorf("init supervisord client: %v", err)
+ return err
+ }
+ s.client = client
+
+ return nil
+}
+
+func (s *Supervisord) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (s *Supervisord) Charts() *module.Charts {
+ return s.charts
+}
+
+func (s *Supervisord) Collect() map[string]int64 {
+ ms, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+
+func (s *Supervisord) Cleanup() {
+ if s.client != nil {
+ s.client.closeIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/supervisord/supervisord_test.go b/src/go/plugin/go.d/modules/supervisord/supervisord_test.go
new file mode 100644
index 000000000..7eb5df53a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/supervisord_test.go
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package supervisord
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSupervisord_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Supervisord{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestSupervisord_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset 'url'": {
+ wantFail: true,
+ config: Config{URL: ""},
+ },
+ "fails on unexpected 'url' scheme": {
+ wantFail: true,
+ config: Config{URL: "tcp://127.0.0.1:9001/RPC2"},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ supvr := New()
+ supvr.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, supvr.Init())
+ } else {
+ assert.NoError(t, supvr.Init())
+ }
+ })
+ }
+}
+
+func TestSupervisord_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) *Supervisord
+ wantFail bool
+ }{
+ "success on valid response": {
+ prepare: prepareSupervisordSuccessOnGetAllProcessInfo,
+ },
+ "success on zero processes response": {
+ prepare: prepareSupervisordZeroProcessesOnGetAllProcessInfo,
+ },
+ "fails on error": {
+ wantFail: true,
+ prepare: prepareSupervisordErrorOnGetAllProcessInfo,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ supvr := test.prepare(t)
+ defer supvr.Cleanup()
+
+ if test.wantFail {
+ assert.Error(t, supvr.Check())
+ } else {
+ assert.NoError(t, supvr.Check())
+ }
+ })
+ }
+}
+
+func TestSupervisord_Charts(t *testing.T) {
+ supvr := New()
+ require.NoError(t, supvr.Init())
+
+ assert.NotNil(t, supvr.Charts())
+}
+
+func TestSupervisord_Cleanup(t *testing.T) {
+ supvr := New()
+ assert.NotPanics(t, supvr.Cleanup)
+
+ require.NoError(t, supvr.Init())
+ m := &mockSupervisorClient{}
+ supvr.client = m
+
+ supvr.Cleanup()
+
+ assert.True(t, m.calledCloseIdleConnections)
+}
+
+func TestSupervisord_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) *Supervisord
+ wantCollected map[string]int64
+ }{
+ "success on valid response": {
+ prepare: prepareSupervisordSuccessOnGetAllProcessInfo,
+ wantCollected: map[string]int64{
+ "group_proc1_non_running_processes": 1,
+ "group_proc1_process_00_downtime": 16276,
+ "group_proc1_process_00_exit_status": 0,
+ "group_proc1_process_00_state_code": 200,
+ "group_proc1_process_00_uptime": 0,
+ "group_proc1_running_processes": 0,
+ "group_proc2_non_running_processes": 0,
+ "group_proc2_process_00_downtime": 0,
+ "group_proc2_process_00_exit_status": 0,
+ "group_proc2_process_00_state_code": 20,
+ "group_proc2_process_00_uptime": 2,
+ "group_proc2_process_01_downtime": 0,
+ "group_proc2_process_01_exit_status": 0,
+ "group_proc2_process_01_state_code": 20,
+ "group_proc2_process_01_uptime": 2,
+ "group_proc2_process_02_downtime": 0,
+ "group_proc2_process_02_exit_status": 0,
+ "group_proc2_process_02_state_code": 20,
+ "group_proc2_process_02_uptime": 8,
+ "group_proc2_running_processes": 3,
+ "group_proc3_non_running_processes": 0,
+ "group_proc3_process_00_downtime": 0,
+ "group_proc3_process_00_exit_status": 0,
+ "group_proc3_process_00_state_code": 20,
+ "group_proc3_process_00_uptime": 16291,
+ "group_proc3_running_processes": 1,
+ "non_running_processes": 1,
+ "running_processes": 4,
+ },
+ },
+ "success on response with zero processes": {
+ prepare: prepareSupervisordZeroProcessesOnGetAllProcessInfo,
+ wantCollected: map[string]int64{
+ "non_running_processes": 0,
+ "running_processes": 0,
+ },
+ },
+ "fails on error on getAllProcessesInfo": {
+ prepare: prepareSupervisordErrorOnGetAllProcessInfo,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ supvr := test.prepare(t)
+ defer supvr.Cleanup()
+
+ ms := supvr.Collect()
+ assert.Equal(t, test.wantCollected, ms)
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, supvr, ms)
+ ensureCollectedProcessesAddedToCharts(t, supvr)
+ }
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, supvr *Supervisord, ms map[string]int64) {
+ for _, chart := range *supvr.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := ms[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := ms[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
+
+func ensureCollectedProcessesAddedToCharts(t *testing.T, supvr *Supervisord) {
+ for group := range supvr.cache {
+ for _, c := range *newProcGroupCharts(group) {
+ assert.NotNilf(t, supvr.Charts().Get(c.ID), "'%s' chart is not in charts", c.ID)
+ }
+ }
+}
+
+func prepareSupervisordSuccessOnGetAllProcessInfo(t *testing.T) *Supervisord {
+ supvr := New()
+ require.NoError(t, supvr.Init())
+ supvr.client = &mockSupervisorClient{}
+ return supvr
+}
+
+func prepareSupervisordZeroProcessesOnGetAllProcessInfo(t *testing.T) *Supervisord {
+ supvr := New()
+ require.NoError(t, supvr.Init())
+ supvr.client = &mockSupervisorClient{returnZeroProcesses: true}
+ return supvr
+}
+
+func prepareSupervisordErrorOnGetAllProcessInfo(t *testing.T) *Supervisord {
+ supvr := New()
+ require.NoError(t, supvr.Init())
+ supvr.client = &mockSupervisorClient{errOnGetAllProcessInfo: true}
+ return supvr
+}
+
+type mockSupervisorClient struct {
+ errOnGetAllProcessInfo bool
+ returnZeroProcesses bool
+ calledCloseIdleConnections bool
+}
+
+func (m *mockSupervisorClient) getAllProcessInfo() ([]processStatus, error) {
+ if m.errOnGetAllProcessInfo {
+ return nil, errors.New("mock errOnGetAllProcessInfo")
+ }
+ if m.returnZeroProcesses {
+ return nil, nil
+ }
+ info := []processStatus{
+ {
+ name: "00", group: "proc1",
+ start: 1613374760, stop: 1613374762, now: 1613391038,
+ state: 200, stateName: "FATAL",
+ exitStatus: 0,
+ },
+ {
+ name: "00", group: "proc2",
+ start: 1613391036, stop: 1613391036, now: 1613391038,
+ state: 20, stateName: "RUNNING",
+ exitStatus: 0,
+ },
+ {
+ name: "01", group: "proc2",
+ start: 1613391036, stop: 1613391036, now: 1613391038,
+ state: 20, stateName: "RUNNING",
+ exitStatus: 0,
+ },
+ {
+ name: "02", group: "proc2",
+ start: 1613391030, stop: 1613391029, now: 1613391038,
+ state: 20, stateName: "RUNNING",
+ exitStatus: 0,
+ },
+ {
+ name: "00", group: "proc3",
+ start: 1613374747, stop: 0, now: 1613391038,
+ state: 20, stateName: "RUNNING",
+ exitStatus: 0,
+ },
+ }
+ return info, nil
+}
+
+func (m *mockSupervisorClient) closeIdleConnections() {
+ m.calledCloseIdleConnections = true
+}
diff --git a/src/go/plugin/go.d/modules/supervisord/testdata/config.json b/src/go/plugin/go.d/modules/supervisord/testdata/config.json
new file mode 100644
index 000000000..825b0c394
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/testdata/config.json
@@ -0,0 +1,11 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "proxy_url": "ok",
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/supervisord/testdata/config.yaml b/src/go/plugin/go.d/modules/supervisord/testdata/config.yaml
new file mode 100644
index 000000000..e1a01abd7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/supervisord/testdata/config.yaml
@@ -0,0 +1,9 @@
+update_every: 123
+url: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+proxy_url: "ok"
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/systemdunits/README.md b/src/go/plugin/go.d/modules/systemdunits/README.md
new file mode 120000
index 000000000..68dd433bf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/README.md
@@ -0,0 +1 @@
+integrations/systemd_units.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/systemdunits/charts.go b/src/go/plugin/go.d/modules/systemdunits/charts.go
new file mode 100644
index 000000000..9f1f56b70
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/charts.go
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package systemdunits
+
+import (
+ "fmt"
+ "path/filepath"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+)
+
+const (
+ prioUnitState = module.Priority + iota
+ prioUnitFileState
+)
+
+func (s *SystemdUnits) addUnitCharts(name, typ string) {
+ chart := module.Chart{
+ ID: "unit_%s_%s_state",
+ Title: "%s Unit State",
+ Units: "state",
+ Fam: "%s units",
+ Ctx: "systemd.%s_unit_state",
+ Priority: prioUnitState,
+ Labels: []module.Label{
+ {Key: "unit_name", Value: name},
+ },
+ Dims: module.Dims{
+ {ID: "unit_%s_%s_state_%s", Name: unitStateActive},
+ {ID: "unit_%s_%s_state_%s", Name: unitStateInactive},
+ {ID: "unit_%s_%s_state_%s", Name: unitStateActivating},
+ {ID: "unit_%s_%s_state_%s", Name: unitStateDeactivating},
+ {ID: "unit_%s_%s_state_%s", Name: unitStateFailed},
+ },
+ }
+
+ chart.ID = fmt.Sprintf(chart.ID, name, typ)
+ chart.Title = fmt.Sprintf(chart.Title, cases.Title(language.English, cases.Compact).String(typ))
+ chart.Fam = fmt.Sprintf(chart.Fam, typ)
+ chart.Ctx = fmt.Sprintf(chart.Ctx, typ)
+
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, name, typ, d.Name)
+ }
+
+ if err := s.Charts().Add(&chart); err != nil {
+ s.Warning(err)
+ }
+}
+
+func (s *SystemdUnits) removeUnitCharts(name, typ string) {
+ px := fmt.Sprintf("unit_%s_%s_", name, typ)
+ s.removeCharts(px)
+}
+
+func (s *SystemdUnits) addUnitFileCharts(unitPath string) {
+ _, unitName := filepath.Split(unitPath)
+ unitType := strings.TrimPrefix(filepath.Ext(unitPath), ".")
+
+ chart := module.Chart{
+ ID: "unit_file_%s_state",
+ Title: "Unit File State",
+ Units: "state",
+ Fam: "unit files",
+ Ctx: "systemd.unit_file_state",
+ Type: module.Line,
+ Priority: prioUnitFileState,
+ Labels: []module.Label{
+ {Key: "unit_file_name", Value: unitName},
+ {Key: "unit_file_type", Value: unitType},
+ },
+ Dims: module.Dims{
+ {ID: "unit_file_%s_state_enabled", Name: "enabled"},
+ {ID: "unit_file_%s_state_enabled-runtime", Name: "enabled-runtime"},
+ {ID: "unit_file_%s_state_linked", Name: "linked"},
+ {ID: "unit_file_%s_state_linked-runtime", Name: "linked-runtime"},
+ {ID: "unit_file_%s_state_alias", Name: "alias"},
+ {ID: "unit_file_%s_state_masked", Name: "masked"},
+ {ID: "unit_file_%s_state_masked-runtime", Name: "masked-runtime"},
+ {ID: "unit_file_%s_state_static", Name: "static"},
+ {ID: "unit_file_%s_state_disabled", Name: "disabled"},
+ {ID: "unit_file_%s_state_indirect", Name: "indirect"},
+ {ID: "unit_file_%s_state_generated", Name: "generated"},
+ {ID: "unit_file_%s_state_transient", Name: "transient"},
+ {ID: "unit_file_%s_state_bad", Name: "bad"},
+ },
+ }
+
+ chart.ID = fmt.Sprintf(chart.ID, strings.ReplaceAll(unitPath, ".", "_"))
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, unitPath)
+ }
+
+ if err := s.Charts().Add(&chart); err != nil {
+ s.Warning(err)
+ }
+}
+
+func (s *SystemdUnits) removeUnitFileCharts(unitPath string) {
+ px := fmt.Sprintf("unit_file_%s_", strings.ReplaceAll(unitPath, ".", "_"))
+ s.removeCharts(px)
+}
+
+func (s *SystemdUnits) removeCharts(prefix string) {
+ for _, chart := range *s.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/client.go b/src/go/plugin/go.d/modules/systemdunits/client.go
new file mode 100644
index 000000000..e6363d132
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/client.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package systemdunits
+
+import (
+ "context"
+
+ "github.com/coreos/go-systemd/v22/dbus"
+)
+
+type systemdClient interface {
+ connect() (systemdConnection, error)
+}
+type systemdConnection interface {
+ Close()
+ GetManagerProperty(string) (string, error)
+ GetUnitPropertyContext(ctx context.Context, unit string, propertyName string) (*dbus.Property, error)
+ ListUnitsContext(ctx context.Context) ([]dbus.UnitStatus, error)
+ ListUnitsByPatternsContext(ctx context.Context, states []string, patterns []string) ([]dbus.UnitStatus, error)
+ ListUnitFilesByPatternsContext(ctx context.Context, states []string, patterns []string) ([]dbus.UnitFile, error)
+}
+
+type systemdDBusClient struct{}
+
+func (systemdDBusClient) connect() (systemdConnection, error) {
+ return dbus.NewWithContext(context.Background())
+}
+
+func newSystemdDBusClient() *systemdDBusClient {
+ return &systemdDBusClient{}
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/collect.go b/src/go/plugin/go.d/modules/systemdunits/collect.go
new file mode 100644
index 000000000..0d61c9998
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/collect.go
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package systemdunits
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+)
+
+func (s *SystemdUnits) collect() (map[string]int64, error) {
+ conn, err := s.getConnection()
+ if err != nil {
+ return nil, err
+ }
+
+ if s.systemdVersion == 0 {
+ ver, err := s.getSystemdVersion(conn)
+ if err != nil {
+ s.closeConnection()
+ return nil, err
+ }
+ s.systemdVersion = ver
+ }
+
+ mx := make(map[string]int64)
+
+ if err := s.collectUnits(mx, conn); err != nil {
+ s.closeConnection()
+ return nil, err
+ }
+
+ if s.CollectUnitFiles && len(s.IncludeUnitFiles) > 0 {
+ if err := s.collectUnitFiles(mx, conn); err != nil {
+ s.closeConnection()
+ return mx, err
+ }
+ }
+
+ return mx, nil
+}
+
+func (s *SystemdUnits) getConnection() (systemdConnection, error) {
+ if s.conn == nil {
+ conn, err := s.client.connect()
+ if err != nil {
+ return nil, fmt.Errorf("error on creating a connection: %v", err)
+ }
+ s.conn = conn
+ }
+ return s.conn, nil
+}
+
+func (s *SystemdUnits) closeConnection() {
+ if s.conn != nil {
+ s.conn.Close()
+ s.conn = nil
+ }
+}
+
+var reVersion = regexp.MustCompile(`[0-9][0-9][0-9]`)
+
+const versionProperty = "Version"
+
+func (s *SystemdUnits) getSystemdVersion(conn systemdConnection) (int, error) {
+ s.Debugf("calling function 'GetManagerProperty'")
+ version, err := conn.GetManagerProperty(versionProperty)
+ if err != nil {
+ return 0, fmt.Errorf("error on getting '%s' manager property: %v", versionProperty, err)
+ }
+
+ s.Debugf("systemd version: %s", version)
+
+ major := reVersion.FindString(version)
+ if major == "" {
+ return 0, fmt.Errorf("couldn't parse systemd version string '%s'", version)
+ }
+
+ ver, err := strconv.Atoi(major)
+ if err != nil {
+ return 0, fmt.Errorf("couldn't parse systemd version string '%s': %v", version, err)
+ }
+
+ return ver, nil
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/collect_unit_files.go b/src/go/plugin/go.d/modules/systemdunits/collect_unit_files.go
new file mode 100644
index 000000000..eff2d6ecb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/collect_unit_files.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package systemdunits
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/coreos/go-systemd/v22/dbus"
+)
+
+// https://github.com/systemd/systemd/blob/3d320785c4bbba74459096b07e85a79c4f0cdffb/src/shared/install.c#L3785
+// see "is-enabled" in https://www.man7.org/linux/man-pages/man1/systemctl.1.html
+var unitFileStates = []string{
+ "enabled",
+ "enabled-runtime",
+ "linked",
+ "linked-runtime",
+ "alias",
+ "masked",
+ "masked-runtime",
+ "static",
+ "disabled",
+ "indirect",
+ "generated",
+ "transient",
+ "bad",
+}
+
+func (s *SystemdUnits) collectUnitFiles(mx map[string]int64, conn systemdConnection) error {
+ if s.systemdVersion < 230 {
+ return nil
+ }
+
+ if now := time.Now(); now.After(s.lastListUnitFilesTime.Add(s.CollectUnitFilesEvery.Duration())) {
+ unitFiles, err := s.getUnitFilesByPatterns(conn)
+ if err != nil {
+ return err
+ }
+ s.lastListUnitFilesTime = now
+ s.cachedUnitFiles = unitFiles
+ }
+
+ seen := make(map[string]bool)
+
+ for _, unitFile := range s.cachedUnitFiles {
+ seen[unitFile.Path] = true
+
+ if !s.seenUnitFiles[unitFile.Path] {
+ s.seenUnitFiles[unitFile.Path] = true
+ s.addUnitFileCharts(unitFile.Path)
+ }
+
+ px := fmt.Sprintf("unit_file_%s_state_", unitFile.Path)
+ for _, st := range unitFileStates {
+ mx[px+st] = 0
+ }
+ mx[px+strings.ToLower(unitFile.Type)] = 1
+ }
+
+ for k := range s.seenUnitFiles {
+ if !seen[k] {
+ delete(s.seenUnitFiles, k)
+ s.removeUnitFileCharts(k)
+ }
+ }
+
+ return nil
+}
+
+func (s *SystemdUnits) getUnitFilesByPatterns(conn systemdConnection) ([]dbus.UnitFile, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration())
+ defer cancel()
+
+ s.Debugf("calling function 'ListUnitFilesByPatterns'")
+
+ unitFiles, err := conn.ListUnitFilesByPatternsContext(ctx, nil, s.IncludeUnitFiles)
+ if err != nil {
+ return nil, fmt.Errorf("error on ListUnitFilesByPatterns: %v", err)
+ }
+
+ for i := range unitFiles {
+ unitFiles[i].Path = cleanUnitName(unitFiles[i].Path)
+ }
+
+ s.Debugf("got %d unit files", len(unitFiles))
+
+ return unitFiles, nil
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/collect_units.go b/src/go/plugin/go.d/modules/systemdunits/collect_units.go
new file mode 100644
index 000000000..0cf97af03
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/collect_units.go
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package systemdunits
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/coreos/go-systemd/v22/dbus"
+)
+
+const transientProperty = "Transient"
+
+const (
+ // https://www.freedesktop.org/software/systemd/man/systemd.html
+ unitStateActive = "active"
+ unitStateInactive = "inactive"
+ unitStateActivating = "activating"
+ unitStateDeactivating = "deactivating"
+ unitStateFailed = "failed"
+)
+
+var unitStates = []string{
+ unitStateActive,
+ unitStateActivating,
+ unitStateFailed,
+ unitStateInactive,
+ unitStateDeactivating,
+}
+
+func (s *SystemdUnits) collectUnits(mx map[string]int64, conn systemdConnection) error {
+ var units []dbus.UnitStatus
+ var err error
+
+ if s.systemdVersion >= 230 {
+ // https://github.com/systemd/systemd/pull/3142
+ units, err = s.getLoadedUnitsByPatterns(conn)
+ } else {
+ units, err = s.getLoadedUnits(conn)
+ }
+ if err != nil {
+ return err
+ }
+
+ seen := make(map[string]bool)
+
+ for _, unit := range units {
+ name, typ, ok := extractUnitNameType(unit.Name)
+ if !ok {
+ continue
+ }
+
+ seen[unit.Name] = true
+
+ if s.SkipTransient {
+ if _, ok := s.unitTransient[unit.Name]; !ok {
+ prop, err := s.getUnitTransientProperty(conn, unit.Name)
+ if err != nil {
+ return err
+ }
+ prop = strings.Trim(prop, "\"")
+ s.unitTransient[unit.Name] = prop == "true"
+ }
+ if s.unitTransient[unit.Name] {
+ continue
+ }
+ }
+
+ if !s.seenUnits[unit.Name] {
+ s.seenUnits[unit.Name] = true
+ s.addUnitCharts(name, typ)
+ }
+
+ for _, s := range unitStates {
+ mx[fmt.Sprintf("unit_%s_%s_state_%s", name, typ, s)] = 0
+ }
+ mx[fmt.Sprintf("unit_%s_%s_state_%s", name, typ, unit.ActiveState)] = 1
+ }
+
+ for k := range s.seenUnits {
+ if !seen[k] {
+ delete(s.seenUnits, k)
+ if name, typ, ok := extractUnitNameType(k); ok {
+ s.removeUnitCharts(name, typ)
+ }
+ }
+ }
+
+ for k := range s.unitTransient {
+ if !seen[k] {
+ delete(s.unitTransient, k)
+ }
+ }
+
+ return nil
+}
+
+func (s *SystemdUnits) getLoadedUnits(conn systemdConnection) ([]dbus.UnitStatus, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration())
+ defer cancel()
+
+ s.Debugf("calling function 'ListUnits'")
+ units, err := conn.ListUnitsContext(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("error on ListUnits: %v", err)
+ }
+
+ for i := range units {
+ units[i].Name = cleanUnitName(units[i].Name)
+ }
+
+ loaded := units[:0]
+ for _, unit := range units {
+ if unit.LoadState == "loaded" && s.unitSr.MatchString(unit.Name) {
+ loaded = append(loaded, unit)
+ }
+ }
+
+ s.Debugf("got total/loaded %d/%d units", len(units), len(loaded))
+
+ return loaded, nil
+}
+
+func (s *SystemdUnits) getLoadedUnitsByPatterns(conn systemdConnection) ([]dbus.UnitStatus, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration())
+ defer cancel()
+
+ s.Debugf("calling function 'ListUnitsByPatterns'")
+
+ units, err := conn.ListUnitsByPatternsContext(ctx, unitStates, s.Include)
+ if err != nil {
+ return nil, fmt.Errorf("error on ListUnitsByPatterns: %v", err)
+ }
+
+ for i := range units {
+ units[i].Name = cleanUnitName(units[i].Name)
+ }
+
+ loaded := units[:0]
+ for _, unit := range units {
+ if unit.LoadState == "loaded" {
+ loaded = append(loaded, unit)
+ }
+ }
+ s.Debugf("got total/loaded %d/%d units", len(units), len(loaded))
+
+ return loaded, nil
+}
+
+func (s *SystemdUnits) getUnitTransientProperty(conn systemdConnection, unit string) (string, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration())
+ defer cancel()
+
+ s.Debugf("calling function 'GetUnitProperty' for unit '%s'", unit)
+
+ prop, err := conn.GetUnitPropertyContext(ctx, unit, transientProperty)
+ if err != nil {
+ return "", fmt.Errorf("error on GetUnitProperty: %v", err)
+ }
+
+ return prop.Value.String(), nil
+}
+
+func extractUnitNameType(name string) (string, string, bool) {
+ idx := strings.LastIndexByte(name, '.')
+ if idx <= 0 {
+ return "", "", false
+ }
+ return name[:idx], name[idx+1:], true
+}
+
+func cleanUnitName(name string) string {
+ // dev-disk-by\x2duuid-DE44\x2dCEE0.device => dev-disk-by-uuid-DE44-CEE0.device
+ if strings.IndexByte(name, '\\') == -1 {
+ return name
+ }
+ v, err := strconv.Unquote("\"" + name + "\"")
+ if err != nil {
+ return name
+ }
+ return v
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/config_schema.json b/src/go/plugin/go.d/modules/systemdunits/config_schema.json
new file mode 100644
index 000000000..016e984ce
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/config_schema.json
@@ -0,0 +1,122 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Systemdunits collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout, in seconds, for connecting and querying systemd's D-Bus endpoint.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "skip_transient": {
+ "title": "Skip transient units",
+ "description": "If set, skip data collection for systemd transient units.",
+ "type": "boolean",
+ "default": false
+ },
+ "include": {
+ "title": "Include",
+ "description": "Configuration for monitoring specific systemd units. Include systemd units whose names match any of the specified [patterns](https://golang.org/pkg/path/filepath/#Match).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "minItems": 1,
+ "items": {
+ "title": "Unit pattern",
+ "type": "string"
+ },
+ "default": [
+ "*.service"
+ ]
+ },
+ "collect_unit_files": {
+ "title": "Collect unit files",
+ "description": "If set, collect the state of installed unit files. **Enabling this may increase system overhead**, particularly if the pattern matches a large number of unit files.",
+ "type": "boolean",
+ "default": false
+ },
+ "collect_unit_files_every": {
+ "title": "Unit files polling interval",
+ "description": "Interval for querying systemd about unit files and their enablement state, measured in seconds. Data is cached for this interval to reduce system overhead.",
+ "type": "number",
+ "minimum": 1,
+ "default": 300
+ },
+ "include_unit_files": {
+ "title": "Include unit files",
+ "description": "Configuration for monitoring specific systemd unit files. Include systemd unit files whose names match any of the specified [patterns](https://golang.org/pkg/path/filepath/#Match).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "minItems": 1,
+ "items": {
+ "title": "Unit file name pattern",
+ "type": "string"
+ },
+ "default": [
+ "*.service"
+ ]
+ }
+ },
+ "required": [
+ "include"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "timeout",
+ "skip_transient",
+ "include"
+ ]
+ },
+ {
+ "title": "Unit Files",
+ "fields": [
+ "collect_unit_files",
+ "collect_unit_files_every",
+ "include_unit_files"
+ ]
+ }
+ ]
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "skip_transient": {
+ "ui:help": "A systemd transient unit is a temporary unit created on-the-fly, typically used for ad-hoc tasks or testing purposes. They are created using the `systemd-run` command, which allows you to specify unit properties directly on the command line."
+ },
+ "include": {
+ "ui:listFlavour": "list"
+ },
+ "include_unit_files": {
+ "ui:listFlavour": "list"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/doc.go b/src/go/plugin/go.d/modules/systemdunits/doc.go
new file mode 100644
index 000000000..8bb45fab9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/doc.go
@@ -0,0 +1,4 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+// Package systemdunits is a systemd units states collector
+package systemdunits
diff --git a/src/go/plugin/go.d/modules/systemdunits/init.go b/src/go/plugin/go.d/modules/systemdunits/init.go
new file mode 100644
index 000000000..8a1b579c1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/init.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package systemdunits
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+func (s *SystemdUnits) validateConfig() error {
+ if len(s.Include) == 0 {
+ return errors.New("'include' option not set")
+ }
+ return nil
+}
+
+func (s *SystemdUnits) initUnitSelector() (matcher.Matcher, error) {
+ if len(s.Include) == 0 {
+ return matcher.TRUE(), nil
+ }
+
+ expr := strings.Join(s.Include, " ")
+ return matcher.NewSimplePatternsMatcher(expr)
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/integrations/systemd_units.md b/src/go/plugin/go.d/modules/systemdunits/integrations/systemd_units.md
new file mode 100644
index 000000000..a2ff90b0d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/integrations/systemd_units.md
@@ -0,0 +1,324 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/systemdunits/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/systemdunits/metadata.yaml"
+sidebar_label: "Systemd Units"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Systemd"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Systemd Units
+
+
+<img src="https://netdata.cloud/img/systemd.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: systemdunits
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the state of Systemd units and unit files.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per unit
+
+These metrics refer to the systemd unit.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| unit_name | systemd unit name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| systemd.service_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.socket_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.target_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.path_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.device_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.mount_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.automount_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.swap_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.timer_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.scope_unit_state | active, inactive, activating, deactivating, failed | state |
+| systemd.slice_unit_state | active, inactive, activating, deactivating, failed | state |
+
+### Per unit file
+
+These metrics refer to the systemd unit file.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| unit_file_name | systemd unit file name |
+| unit_file_type | systemd unit file type |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| systemd.unit_file_state | enabled, enabled-runtime, linked, linked-runtime, alias, masked, masked-runtime, static, disabled, indirect, generated, transient, bad | state |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ systemd_service_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.service_unit_state | systemd service unit in the failed state |
+| [ systemd_socket_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.socket_unit_state | systemd socket unit in the failed state |
+| [ systemd_target_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.target_unit_state | systemd target unit in the failed state |
+| [ systemd_path_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.path_unit_state | systemd path unit in the failed state |
+| [ systemd_device_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.device_unit_state | systemd device unit in the failed state |
+| [ systemd_mount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.mount_unit_state | systemd mount unit in the failed state |
+| [ systemd_automount_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.automount_unit_state | systemd automount unit in the failed state |
+| [ systemd_swap_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.swap_unit_state | systemd swap unit in the failed state |
+| [ systemd_scope_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.scope_unit_state | systemd scope unit in the failed state |
+| [ systemd_slice_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.slice_unit_state | systemd slice unit in the failed state |
+| [ systemd_timer_unit_failed_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf) | systemd.timer_unit_state | systemd timer unit in the failed state |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/systemdunits.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/systemdunits.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| timeout | System bus requests timeout. | 1 | no |
+| include | Systemd units selector. | *.service | no |
+| skip_transient | If set, skip data collection for systemd transient units. | false | no |
+| collect_unit_files | If set to true, collect the state of installed unit files. Enabling this may increase system overhead. | false | no |
+| collect_unit_files_every | Interval for querying systemd about unit files and their enablement state, measured in seconds. Data is cached for this interval to reduce system overhead. | 300 | no |
+| include_unit_files | Systemd unit files selector. | *.service | no |
+
+##### include
+
+Systemd units matching the selector will be monitored.
+
+- Logic: (pattern1 OR pattern2)
+- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)
+- Syntax:
+
+```yaml
+includes:
+ - pattern1
+ - pattern2
+```
+
+
+##### include_unit_files
+
+Systemd unit files matching the selector will be monitored.
+
+- Logic: (pattern1 OR pattern2)
+- Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)
+- Syntax:
+
+```yaml
+includes:
+ - pattern1
+ - pattern2
+```
+
+
+</details>
+
+#### Examples
+
+##### Service units
+
+Collect state of all service type units.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: service
+ include:
+ - '*.service'
+
+```
+</details>
+
+##### One specific unit
+
+Collect state of one specific unit.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: my-specific-service
+ include:
+ - 'my-specific.service'
+
+```
+</details>
+
+##### All unit types
+
+Collect state of all units.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: my-specific-service-unit
+ include:
+ - '*'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collect state of all service and socket type units.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: service
+ include:
+ - '*.service'
+
+ - name: socket
+ include:
+ - '*.socket'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `systemdunits` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m systemdunits
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `systemdunits` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep systemdunits
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep systemdunits /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep systemdunits
+```
+
+
diff --git a/src/go/plugin/go.d/modules/systemdunits/metadata.yaml b/src/go/plugin/go.d/modules/systemdunits/metadata.yaml
new file mode 100644
index 000000000..791e58400
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/metadata.yaml
@@ -0,0 +1,344 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-systemdunits
+ plugin_name: go.d.plugin
+ module_name: systemdunits
+ monitored_instance:
+ name: Systemd Units
+ link: https://www.freedesktop.org/wiki/Software/systemd/
+ icon_filename: systemd.svg
+ categories:
+ - data-collection.systemd
+ keywords:
+ - systemd
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the state of Systemd units and unit files.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/systemdunits.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: timeout
+ description: System bus requests timeout.
+ default_value: 1
+ required: false
+ - name: include
+ description: Systemd units selector.
+ default_value: "*.service"
+ required: false
+ detailed_description: |
+ Systemd units matching the selector will be monitored.
+
+ - Logic: (pattern1 OR pattern2)
+ - Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)
+ - Syntax:
+
+ ```yaml
+ includes:
+ - pattern1
+ - pattern2
+ ```
+ - name: skip_transient
+ description: If set, skip data collection for systemd transient units.
+ default_value: "false"
+ required: false
+ - name: collect_unit_files
+ description: If set to true, collect the state of installed unit files. Enabling this may increase system overhead.
+ default_value: "false"
+ required: false
+ - name: collect_unit_files_every
+ description: Interval for querying systemd about unit files and their enablement state, measured in seconds. Data is cached for this interval to reduce system overhead.
+ default_value: 300
+ required: false
+ - name: include_unit_files
+ description: Systemd unit files selector.
+ default_value: "*.service"
+ required: false
+ detailed_description: |
+ Systemd unit files matching the selector will be monitored.
+
+ - Logic: (pattern1 OR pattern2)
+ - Pattern syntax: [shell file name pattern](https://golang.org/pkg/path/filepath/#Match)
+ - Syntax:
+
+ ```yaml
+ includes:
+ - pattern1
+ - pattern2
+ ```
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Service units
+ description: Collect state of all service type units.
+ config: |
+ jobs:
+ - name: service
+ include:
+ - '*.service'
+ - name: One specific unit
+ description: Collect state of one specific unit.
+ config: |
+ jobs:
+ - name: my-specific-service
+ include:
+ - 'my-specific.service'
+ - name: All unit types
+ description: Collect state of all units.
+ config: |
+ jobs:
+ - name: my-specific-service-unit
+ include:
+ - '*'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collect state of all service and socket type units.
+ config: |
+ jobs:
+ - name: service
+ include:
+ - '*.service'
+
+ - name: socket
+ include:
+ - '*.socket'
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: systemd_service_unit_failed_state
+ metric: systemd.service_unit_state
+ info: systemd service unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_socket_unit_failed_state
+ metric: systemd.socket_unit_state
+ info: systemd socket unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_target_unit_failed_state
+ metric: systemd.target_unit_state
+ info: systemd target unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_path_unit_failed_state
+ metric: systemd.path_unit_state
+ info: systemd path unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_device_unit_failed_state
+ metric: systemd.device_unit_state
+ info: systemd device unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_mount_unit_failed_state
+ metric: systemd.mount_unit_state
+ info: systemd mount unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_automount_unit_failed_state
+ metric: systemd.automount_unit_state
+ info: systemd automount unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_swap_unit_failed_state
+ metric: systemd.swap_unit_state
+ info: systemd swap unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_scope_unit_failed_state
+ metric: systemd.scope_unit_state
+ info: systemd scope unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_slice_unit_failed_state
+ metric: systemd.slice_unit_state
+ info: systemd slice unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ - name: systemd_timer_unit_failed_state
+ metric: systemd.timer_unit_state
+ info: systemd timer unit in the failed state
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/systemdunits.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: unit
+ description: These metrics refer to the systemd unit.
+ labels:
+ - name: unit_name
+ description: systemd unit name
+ metrics:
+ - name: systemd.service_unit_state
+ description: Service Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.socket_unit_state
+ description: Socket Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.target_unit_state
+ description: Target Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.path_unit_state
+ description: Path Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.device_unit_state
+ description: Device Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.mount_unit_state
+ description: Mount Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.automount_unit_state
+ description: Automount Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.swap_unit_state
+ description: Swap Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.timer_unit_state
+ description: Timer Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.scope_unit_state
+ description: Scope Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: systemd.slice_unit_state
+ description: Slice Unit State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: inactive
+ - name: activating
+ - name: deactivating
+ - name: failed
+ - name: unit file
+ description: These metrics refer to the systemd unit file.
+ labels:
+ - name: unit_file_name
+ description: systemd unit file name
+ - name: unit_file_type
+ description: systemd unit file type
+ metrics:
+ - name: systemd.unit_file_state
+ description: Unit File State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: enabled
+ - name: enabled-runtime
+ - name: linked
+ - name: linked-runtime
+ - name: alias
+ - name: masked
+ - name: masked-runtime
+ - name: static
+ - name: disabled
+ - name: indirect
+ - name: generated
+ - name: transient
+ - name: bad
diff --git a/src/go/plugin/go.d/modules/systemdunits/systemdunits.go b/src/go/plugin/go.d/modules/systemdunits/systemdunits.go
new file mode 100644
index 000000000..9a3478768
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/systemdunits.go
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package systemdunits
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/coreos/go-systemd/v22/dbus"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("systemdunits", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10, // gathering systemd units can be a CPU intensive op
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *SystemdUnits {
+ return &SystemdUnits{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ Include: []string{"*.service"},
+ SkipTransient: false,
+ CollectUnitFiles: false,
+ IncludeUnitFiles: []string{"*.service"},
+ CollectUnitFilesEvery: web.Duration(time.Minute * 5),
+ },
+ charts: &module.Charts{},
+ client: newSystemdDBusClient(),
+ seenUnits: make(map[string]bool),
+ unitTransient: make(map[string]bool),
+ seenUnitFiles: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ Include []string `yaml:"include,omitempty" json:"include"`
+ SkipTransient bool `yaml:"skip_transient" json:"skip_transient"`
+ CollectUnitFiles bool `yaml:"collect_unit_files" json:"collect_unit_files"`
+ IncludeUnitFiles []string `yaml:"include_unit_files,omitempty" json:"include_unit_files"`
+ CollectUnitFilesEvery web.Duration `yaml:"collect_unit_files_every,omitempty" json:"collect_unit_files_every"`
+}
+
+type SystemdUnits struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ client systemdClient
+ conn systemdConnection
+
+ systemdVersion int
+
+ seenUnits map[string]bool
+ unitTransient map[string]bool
+ unitSr matcher.Matcher
+
+ lastListUnitFilesTime time.Time
+ cachedUnitFiles []dbus.UnitFile
+ seenUnitFiles map[string]bool
+
+ charts *module.Charts
+}
+
+func (s *SystemdUnits) Configuration() any {
+ return s.Config
+}
+
+func (s *SystemdUnits) Init() error {
+ if err := s.validateConfig(); err != nil {
+ s.Errorf("config validation: %v", err)
+ return err
+ }
+
+ sr, err := s.initUnitSelector()
+ if err != nil {
+ s.Errorf("init unit selector: %v", err)
+ return err
+ }
+ s.unitSr = sr
+
+ s.Debugf("timeout: %s", s.Timeout)
+ s.Debugf("units: patterns '%v'", s.Include)
+ s.Debugf("unit files: enabled '%v', every '%s', patterns: %v",
+ s.CollectUnitFiles, s.CollectUnitFilesEvery, s.IncludeUnitFiles)
+
+ return nil
+}
+
+func (s *SystemdUnits) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (s *SystemdUnits) Charts() *module.Charts {
+ return s.charts
+}
+
+func (s *SystemdUnits) Collect() map[string]int64 {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (s *SystemdUnits) Cleanup() {
+ s.closeConnection()
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/systemdunits_test.go b/src/go/plugin/go.d/modules/systemdunits/systemdunits_test.go
new file mode 100644
index 000000000..7074e186e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/systemdunits_test.go
@@ -0,0 +1,1156 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+//go:build linux
+// +build linux
+
+package systemdunits
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/coreos/go-systemd/v22/dbus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSystemdUnits_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &SystemdUnits{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestSystemdUnits_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "success when 'include' option set": {
+ config: Config{
+ Include: []string{"*"},
+ },
+ },
+ "fails when 'include' option not set": {
+ wantFail: true,
+ config: Config{Include: []string{}},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ systemd := New()
+ systemd.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, systemd.Init())
+ } else {
+ assert.NoError(t, systemd.Init())
+ }
+ })
+ }
+}
+
+func TestSystemdUnits_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *SystemdUnits
+ wantFail bool
+ }{
+ "success on systemd v230+": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*"}
+ systemd.client = prepareOKClient(230)
+ return systemd
+ },
+ },
+ "success on systemd v230-": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*"}
+ systemd.client = prepareOKClient(220)
+ return systemd
+ },
+ },
+ "fails when all unites are filtered": {
+ wantFail: true,
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*.not_exists"}
+ systemd.client = prepareOKClient(230)
+ return systemd
+ },
+ },
+ "fails on error on connect": {
+ wantFail: true,
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.client = prepareClientErrOnConnect()
+ return systemd
+ },
+ },
+ "fails on error on get manager property": {
+ wantFail: true,
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.client = prepareClientErrOnGetManagerProperty()
+ return systemd
+ },
+ },
+ "fails on error on list units": {
+ wantFail: true,
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.client = prepareClientErrOnListUnits()
+ return systemd
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ systemd := test.prepare()
+ require.NoError(t, systemd.Init())
+
+ if test.wantFail {
+ assert.Error(t, systemd.Check())
+ } else {
+ assert.NoError(t, systemd.Check())
+ }
+ })
+ }
+}
+
+func TestSystemdUnits_Charts(t *testing.T) {
+ systemd := New()
+ require.NoError(t, systemd.Init())
+ assert.NotNil(t, systemd.Charts())
+}
+
+func TestSystemdUnits_Cleanup(t *testing.T) {
+ systemd := New()
+ systemd.Include = []string{"*"}
+ client := prepareOKClient(230)
+ systemd.client = client
+
+ require.NoError(t, systemd.Init())
+ require.NotNil(t, systemd.Collect())
+ conn := systemd.conn
+ systemd.Cleanup()
+
+ assert.Nil(t, systemd.conn)
+ v, _ := conn.(*mockConn)
+ assert.True(t, v.closeCalled)
+}
+
+func TestSystemdUnits_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *SystemdUnits
+ wantCollected map[string]int64
+ }{
+ "success v230+ on collecting all unit type": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*"}
+ systemd.client = prepareOKClient(230)
+ return systemd
+ },
+ wantCollected: map[string]int64{
+ "unit_dbus_socket_state_activating": 0,
+ "unit_dbus_socket_state_active": 1,
+ "unit_dbus_socket_state_deactivating": 0,
+ "unit_dbus_socket_state_failed": 0,
+ "unit_dbus_socket_state_inactive": 0,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_activating": 0,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_active": 1,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_deactivating": 0,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_failed": 0,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_inactive": 0,
+ "unit_dev-nvme0n1_device_state_activating": 0,
+ "unit_dev-nvme0n1_device_state_active": 1,
+ "unit_dev-nvme0n1_device_state_deactivating": 0,
+ "unit_dev-nvme0n1_device_state_failed": 0,
+ "unit_dev-nvme0n1_device_state_inactive": 0,
+ "unit_docker_socket_state_activating": 0,
+ "unit_docker_socket_state_active": 1,
+ "unit_docker_socket_state_deactivating": 0,
+ "unit_docker_socket_state_failed": 0,
+ "unit_docker_socket_state_inactive": 0,
+ "unit_getty-pre_target_state_activating": 0,
+ "unit_getty-pre_target_state_active": 0,
+ "unit_getty-pre_target_state_deactivating": 0,
+ "unit_getty-pre_target_state_failed": 0,
+ "unit_getty-pre_target_state_inactive": 1,
+ "unit_init_scope_state_activating": 0,
+ "unit_init_scope_state_active": 1,
+ "unit_init_scope_state_deactivating": 0,
+ "unit_init_scope_state_failed": 0,
+ "unit_init_scope_state_inactive": 0,
+ "unit_logrotate_timer_state_activating": 0,
+ "unit_logrotate_timer_state_active": 1,
+ "unit_logrotate_timer_state_deactivating": 0,
+ "unit_logrotate_timer_state_failed": 0,
+ "unit_logrotate_timer_state_inactive": 0,
+ "unit_lvm2-lvmetad_socket_state_activating": 0,
+ "unit_lvm2-lvmetad_socket_state_active": 1,
+ "unit_lvm2-lvmetad_socket_state_deactivating": 0,
+ "unit_lvm2-lvmetad_socket_state_failed": 0,
+ "unit_lvm2-lvmetad_socket_state_inactive": 0,
+ "unit_lvm2-lvmpolld_socket_state_activating": 0,
+ "unit_lvm2-lvmpolld_socket_state_active": 1,
+ "unit_lvm2-lvmpolld_socket_state_deactivating": 0,
+ "unit_lvm2-lvmpolld_socket_state_failed": 0,
+ "unit_lvm2-lvmpolld_socket_state_inactive": 0,
+ "unit_man-db_timer_state_activating": 0,
+ "unit_man-db_timer_state_active": 1,
+ "unit_man-db_timer_state_deactivating": 0,
+ "unit_man-db_timer_state_failed": 0,
+ "unit_man-db_timer_state_inactive": 0,
+ "unit_org.cups.cupsd_path_state_activating": 0,
+ "unit_org.cups.cupsd_path_state_active": 1,
+ "unit_org.cups.cupsd_path_state_deactivating": 0,
+ "unit_org.cups.cupsd_path_state_failed": 0,
+ "unit_org.cups.cupsd_path_state_inactive": 0,
+ "unit_pamac-cleancache_timer_state_activating": 0,
+ "unit_pamac-cleancache_timer_state_active": 1,
+ "unit_pamac-cleancache_timer_state_deactivating": 0,
+ "unit_pamac-cleancache_timer_state_failed": 0,
+ "unit_pamac-cleancache_timer_state_inactive": 0,
+ "unit_pamac-mirrorlist_timer_state_activating": 0,
+ "unit_pamac-mirrorlist_timer_state_active": 1,
+ "unit_pamac-mirrorlist_timer_state_deactivating": 0,
+ "unit_pamac-mirrorlist_timer_state_failed": 0,
+ "unit_pamac-mirrorlist_timer_state_inactive": 0,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_activating": 0,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_active": 1,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_deactivating": 0,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_failed": 0,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_inactive": 0,
+ "unit_remote-fs-pre_target_state_activating": 0,
+ "unit_remote-fs-pre_target_state_active": 0,
+ "unit_remote-fs-pre_target_state_deactivating": 0,
+ "unit_remote-fs-pre_target_state_failed": 0,
+ "unit_remote-fs-pre_target_state_inactive": 1,
+ "unit_rpc_pipefs_target_state_activating": 0,
+ "unit_rpc_pipefs_target_state_active": 0,
+ "unit_rpc_pipefs_target_state_deactivating": 0,
+ "unit_rpc_pipefs_target_state_failed": 0,
+ "unit_rpc_pipefs_target_state_inactive": 1,
+ "unit_run-user-1000-gvfs_mount_state_activating": 0,
+ "unit_run-user-1000-gvfs_mount_state_active": 1,
+ "unit_run-user-1000-gvfs_mount_state_deactivating": 0,
+ "unit_run-user-1000-gvfs_mount_state_failed": 0,
+ "unit_run-user-1000-gvfs_mount_state_inactive": 0,
+ "unit_run-user-1000_mount_state_activating": 0,
+ "unit_run-user-1000_mount_state_active": 1,
+ "unit_run-user-1000_mount_state_deactivating": 0,
+ "unit_run-user-1000_mount_state_failed": 0,
+ "unit_run-user-1000_mount_state_inactive": 0,
+ "unit_session-1_scope_state_activating": 0,
+ "unit_session-1_scope_state_active": 1,
+ "unit_session-1_scope_state_deactivating": 0,
+ "unit_session-1_scope_state_failed": 0,
+ "unit_session-1_scope_state_inactive": 0,
+ "unit_session-2_scope_state_activating": 0,
+ "unit_session-2_scope_state_active": 1,
+ "unit_session-2_scope_state_deactivating": 0,
+ "unit_session-2_scope_state_failed": 0,
+ "unit_session-2_scope_state_inactive": 0,
+ "unit_session-3_scope_state_activating": 0,
+ "unit_session-3_scope_state_active": 1,
+ "unit_session-3_scope_state_deactivating": 0,
+ "unit_session-3_scope_state_failed": 0,
+ "unit_session-3_scope_state_inactive": 0,
+ "unit_session-6_scope_state_activating": 0,
+ "unit_session-6_scope_state_active": 1,
+ "unit_session-6_scope_state_deactivating": 0,
+ "unit_session-6_scope_state_failed": 0,
+ "unit_session-6_scope_state_inactive": 0,
+ "unit_shadow_timer_state_activating": 0,
+ "unit_shadow_timer_state_active": 1,
+ "unit_shadow_timer_state_deactivating": 0,
+ "unit_shadow_timer_state_failed": 0,
+ "unit_shadow_timer_state_inactive": 0,
+ "unit_sound_target_state_activating": 0,
+ "unit_sound_target_state_active": 1,
+ "unit_sound_target_state_deactivating": 0,
+ "unit_sound_target_state_failed": 0,
+ "unit_sound_target_state_inactive": 0,
+ "unit_sys-devices-virtual-net-loopback1_device_state_activating": 0,
+ "unit_sys-devices-virtual-net-loopback1_device_state_active": 1,
+ "unit_sys-devices-virtual-net-loopback1_device_state_deactivating": 0,
+ "unit_sys-devices-virtual-net-loopback1_device_state_failed": 0,
+ "unit_sys-devices-virtual-net-loopback1_device_state_inactive": 0,
+ "unit_sys-module-fuse_device_state_activating": 0,
+ "unit_sys-module-fuse_device_state_active": 1,
+ "unit_sys-module-fuse_device_state_deactivating": 0,
+ "unit_sys-module-fuse_device_state_failed": 0,
+ "unit_sys-module-fuse_device_state_inactive": 0,
+ "unit_sysinit_target_state_activating": 0,
+ "unit_sysinit_target_state_active": 1,
+ "unit_sysinit_target_state_deactivating": 0,
+ "unit_sysinit_target_state_failed": 0,
+ "unit_sysinit_target_state_inactive": 0,
+ "unit_system-getty_slice_state_activating": 0,
+ "unit_system-getty_slice_state_active": 1,
+ "unit_system-getty_slice_state_deactivating": 0,
+ "unit_system-getty_slice_state_failed": 0,
+ "unit_system-getty_slice_state_inactive": 0,
+ "unit_system-netctl_slice_state_activating": 0,
+ "unit_system-netctl_slice_state_active": 1,
+ "unit_system-netctl_slice_state_deactivating": 0,
+ "unit_system-netctl_slice_state_failed": 0,
+ "unit_system-netctl_slice_state_inactive": 0,
+ "unit_system-systemd-fsck_slice_state_activating": 0,
+ "unit_system-systemd-fsck_slice_state_active": 1,
+ "unit_system-systemd-fsck_slice_state_deactivating": 0,
+ "unit_system-systemd-fsck_slice_state_failed": 0,
+ "unit_system-systemd-fsck_slice_state_inactive": 0,
+ "unit_system_slice_state_activating": 0,
+ "unit_system_slice_state_active": 1,
+ "unit_system_slice_state_deactivating": 0,
+ "unit_system_slice_state_failed": 0,
+ "unit_system_slice_state_inactive": 0,
+ "unit_systemd-ask-password-console_path_state_activating": 0,
+ "unit_systemd-ask-password-console_path_state_active": 1,
+ "unit_systemd-ask-password-console_path_state_deactivating": 0,
+ "unit_systemd-ask-password-console_path_state_failed": 0,
+ "unit_systemd-ask-password-console_path_state_inactive": 0,
+ "unit_systemd-ask-password-wall_path_state_activating": 0,
+ "unit_systemd-ask-password-wall_path_state_active": 1,
+ "unit_systemd-ask-password-wall_path_state_deactivating": 0,
+ "unit_systemd-ask-password-wall_path_state_failed": 0,
+ "unit_systemd-ask-password-wall_path_state_inactive": 0,
+ "unit_systemd-ask-password-wall_service_state_activating": 0,
+ "unit_systemd-ask-password-wall_service_state_active": 0,
+ "unit_systemd-ask-password-wall_service_state_deactivating": 0,
+ "unit_systemd-ask-password-wall_service_state_failed": 0,
+ "unit_systemd-ask-password-wall_service_state_inactive": 1,
+ "unit_systemd-fsck-root_service_state_activating": 0,
+ "unit_systemd-fsck-root_service_state_active": 0,
+ "unit_systemd-fsck-root_service_state_deactivating": 0,
+ "unit_systemd-fsck-root_service_state_failed": 0,
+ "unit_systemd-fsck-root_service_state_inactive": 1,
+ "unit_systemd-udevd-kernel_socket_state_activating": 0,
+ "unit_systemd-udevd-kernel_socket_state_active": 1,
+ "unit_systemd-udevd-kernel_socket_state_deactivating": 0,
+ "unit_systemd-udevd-kernel_socket_state_failed": 0,
+ "unit_systemd-udevd-kernel_socket_state_inactive": 0,
+ "unit_tmp_mount_state_activating": 0,
+ "unit_tmp_mount_state_active": 1,
+ "unit_tmp_mount_state_deactivating": 0,
+ "unit_tmp_mount_state_failed": 0,
+ "unit_tmp_mount_state_inactive": 0,
+ "unit_user-runtime-dir@1000_service_state_activating": 0,
+ "unit_user-runtime-dir@1000_service_state_active": 1,
+ "unit_user-runtime-dir@1000_service_state_deactivating": 0,
+ "unit_user-runtime-dir@1000_service_state_failed": 0,
+ "unit_user-runtime-dir@1000_service_state_inactive": 0,
+ "unit_user@1000_service_state_activating": 0,
+ "unit_user@1000_service_state_active": 1,
+ "unit_user@1000_service_state_deactivating": 0,
+ "unit_user@1000_service_state_failed": 0,
+ "unit_user@1000_service_state_inactive": 0,
+ "unit_user_slice_state_activating": 0,
+ "unit_user_slice_state_active": 1,
+ "unit_user_slice_state_deactivating": 0,
+ "unit_user_slice_state_failed": 0,
+ "unit_user_slice_state_inactive": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_activating": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_active": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_deactivating": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_failed": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_inactive": 1,
+ },
+ },
+ "success v230+ on collecting all unit type with skip transient": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*"}
+ systemd.SkipTransient = true
+ systemd.client = prepareOKClient(230)
+ return systemd
+ },
+ wantCollected: map[string]int64{
+ "unit_systemd-ask-password-wall_service_state_activating": 0,
+ "unit_systemd-ask-password-wall_service_state_active": 0,
+ "unit_systemd-ask-password-wall_service_state_deactivating": 0,
+ "unit_systemd-ask-password-wall_service_state_failed": 0,
+ "unit_systemd-ask-password-wall_service_state_inactive": 1,
+ "unit_systemd-fsck-root_service_state_activating": 0,
+ "unit_systemd-fsck-root_service_state_active": 0,
+ "unit_systemd-fsck-root_service_state_deactivating": 0,
+ "unit_systemd-fsck-root_service_state_failed": 0,
+ "unit_systemd-fsck-root_service_state_inactive": 1,
+ "unit_user-runtime-dir@1000_service_state_activating": 0,
+ "unit_user-runtime-dir@1000_service_state_active": 1,
+ "unit_user-runtime-dir@1000_service_state_deactivating": 0,
+ "unit_user-runtime-dir@1000_service_state_failed": 0,
+ "unit_user-runtime-dir@1000_service_state_inactive": 0,
+ "unit_user@1000_service_state_activating": 0,
+ "unit_user@1000_service_state_active": 1,
+ "unit_user@1000_service_state_deactivating": 0,
+ "unit_user@1000_service_state_failed": 0,
+ "unit_user@1000_service_state_inactive": 0,
+ },
+ },
+ "success v230- on collecting all unit types": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*"}
+ systemd.client = prepareOKClient(220)
+ return systemd
+ },
+ wantCollected: map[string]int64{
+ "unit_dbus_socket_state_activating": 0,
+ "unit_dbus_socket_state_active": 1,
+ "unit_dbus_socket_state_deactivating": 0,
+ "unit_dbus_socket_state_failed": 0,
+ "unit_dbus_socket_state_inactive": 0,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_activating": 0,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_active": 1,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_deactivating": 0,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_failed": 0,
+ "unit_dev-disk-by-uuid-DE44-CEE0_device_state_inactive": 0,
+ "unit_dev-nvme0n1_device_state_activating": 0,
+ "unit_dev-nvme0n1_device_state_active": 1,
+ "unit_dev-nvme0n1_device_state_deactivating": 0,
+ "unit_dev-nvme0n1_device_state_failed": 0,
+ "unit_dev-nvme0n1_device_state_inactive": 0,
+ "unit_docker_socket_state_activating": 0,
+ "unit_docker_socket_state_active": 1,
+ "unit_docker_socket_state_deactivating": 0,
+ "unit_docker_socket_state_failed": 0,
+ "unit_docker_socket_state_inactive": 0,
+ "unit_getty-pre_target_state_activating": 0,
+ "unit_getty-pre_target_state_active": 0,
+ "unit_getty-pre_target_state_deactivating": 0,
+ "unit_getty-pre_target_state_failed": 0,
+ "unit_getty-pre_target_state_inactive": 1,
+ "unit_init_scope_state_activating": 0,
+ "unit_init_scope_state_active": 1,
+ "unit_init_scope_state_deactivating": 0,
+ "unit_init_scope_state_failed": 0,
+ "unit_init_scope_state_inactive": 0,
+ "unit_logrotate_timer_state_activating": 0,
+ "unit_logrotate_timer_state_active": 1,
+ "unit_logrotate_timer_state_deactivating": 0,
+ "unit_logrotate_timer_state_failed": 0,
+ "unit_logrotate_timer_state_inactive": 0,
+ "unit_lvm2-lvmetad_socket_state_activating": 0,
+ "unit_lvm2-lvmetad_socket_state_active": 1,
+ "unit_lvm2-lvmetad_socket_state_deactivating": 0,
+ "unit_lvm2-lvmetad_socket_state_failed": 0,
+ "unit_lvm2-lvmetad_socket_state_inactive": 0,
+ "unit_lvm2-lvmpolld_socket_state_activating": 0,
+ "unit_lvm2-lvmpolld_socket_state_active": 1,
+ "unit_lvm2-lvmpolld_socket_state_deactivating": 0,
+ "unit_lvm2-lvmpolld_socket_state_failed": 0,
+ "unit_lvm2-lvmpolld_socket_state_inactive": 0,
+ "unit_man-db_timer_state_activating": 0,
+ "unit_man-db_timer_state_active": 1,
+ "unit_man-db_timer_state_deactivating": 0,
+ "unit_man-db_timer_state_failed": 0,
+ "unit_man-db_timer_state_inactive": 0,
+ "unit_org.cups.cupsd_path_state_activating": 0,
+ "unit_org.cups.cupsd_path_state_active": 1,
+ "unit_org.cups.cupsd_path_state_deactivating": 0,
+ "unit_org.cups.cupsd_path_state_failed": 0,
+ "unit_org.cups.cupsd_path_state_inactive": 0,
+ "unit_pamac-cleancache_timer_state_activating": 0,
+ "unit_pamac-cleancache_timer_state_active": 1,
+ "unit_pamac-cleancache_timer_state_deactivating": 0,
+ "unit_pamac-cleancache_timer_state_failed": 0,
+ "unit_pamac-cleancache_timer_state_inactive": 0,
+ "unit_pamac-mirrorlist_timer_state_activating": 0,
+ "unit_pamac-mirrorlist_timer_state_active": 1,
+ "unit_pamac-mirrorlist_timer_state_deactivating": 0,
+ "unit_pamac-mirrorlist_timer_state_failed": 0,
+ "unit_pamac-mirrorlist_timer_state_inactive": 0,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_activating": 0,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_active": 1,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_deactivating": 0,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_failed": 0,
+ "unit_proc-sys-fs-binfmt_misc_automount_state_inactive": 0,
+ "unit_remote-fs-pre_target_state_activating": 0,
+ "unit_remote-fs-pre_target_state_active": 0,
+ "unit_remote-fs-pre_target_state_deactivating": 0,
+ "unit_remote-fs-pre_target_state_failed": 0,
+ "unit_remote-fs-pre_target_state_inactive": 1,
+ "unit_rpc_pipefs_target_state_activating": 0,
+ "unit_rpc_pipefs_target_state_active": 0,
+ "unit_rpc_pipefs_target_state_deactivating": 0,
+ "unit_rpc_pipefs_target_state_failed": 0,
+ "unit_rpc_pipefs_target_state_inactive": 1,
+ "unit_run-user-1000-gvfs_mount_state_activating": 0,
+ "unit_run-user-1000-gvfs_mount_state_active": 1,
+ "unit_run-user-1000-gvfs_mount_state_deactivating": 0,
+ "unit_run-user-1000-gvfs_mount_state_failed": 0,
+ "unit_run-user-1000-gvfs_mount_state_inactive": 0,
+ "unit_run-user-1000_mount_state_activating": 0,
+ "unit_run-user-1000_mount_state_active": 1,
+ "unit_run-user-1000_mount_state_deactivating": 0,
+ "unit_run-user-1000_mount_state_failed": 0,
+ "unit_run-user-1000_mount_state_inactive": 0,
+ "unit_session-1_scope_state_activating": 0,
+ "unit_session-1_scope_state_active": 1,
+ "unit_session-1_scope_state_deactivating": 0,
+ "unit_session-1_scope_state_failed": 0,
+ "unit_session-1_scope_state_inactive": 0,
+ "unit_session-2_scope_state_activating": 0,
+ "unit_session-2_scope_state_active": 1,
+ "unit_session-2_scope_state_deactivating": 0,
+ "unit_session-2_scope_state_failed": 0,
+ "unit_session-2_scope_state_inactive": 0,
+ "unit_session-3_scope_state_activating": 0,
+ "unit_session-3_scope_state_active": 1,
+ "unit_session-3_scope_state_deactivating": 0,
+ "unit_session-3_scope_state_failed": 0,
+ "unit_session-3_scope_state_inactive": 0,
+ "unit_session-6_scope_state_activating": 0,
+ "unit_session-6_scope_state_active": 1,
+ "unit_session-6_scope_state_deactivating": 0,
+ "unit_session-6_scope_state_failed": 0,
+ "unit_session-6_scope_state_inactive": 0,
+ "unit_shadow_timer_state_activating": 0,
+ "unit_shadow_timer_state_active": 1,
+ "unit_shadow_timer_state_deactivating": 0,
+ "unit_shadow_timer_state_failed": 0,
+ "unit_shadow_timer_state_inactive": 0,
+ "unit_sound_target_state_activating": 0,
+ "unit_sound_target_state_active": 1,
+ "unit_sound_target_state_deactivating": 0,
+ "unit_sound_target_state_failed": 0,
+ "unit_sound_target_state_inactive": 0,
+ "unit_sys-devices-virtual-net-loopback1_device_state_activating": 0,
+ "unit_sys-devices-virtual-net-loopback1_device_state_active": 1,
+ "unit_sys-devices-virtual-net-loopback1_device_state_deactivating": 0,
+ "unit_sys-devices-virtual-net-loopback1_device_state_failed": 0,
+ "unit_sys-devices-virtual-net-loopback1_device_state_inactive": 0,
+ "unit_sys-module-fuse_device_state_activating": 0,
+ "unit_sys-module-fuse_device_state_active": 1,
+ "unit_sys-module-fuse_device_state_deactivating": 0,
+ "unit_sys-module-fuse_device_state_failed": 0,
+ "unit_sys-module-fuse_device_state_inactive": 0,
+ "unit_sysinit_target_state_activating": 0,
+ "unit_sysinit_target_state_active": 1,
+ "unit_sysinit_target_state_deactivating": 0,
+ "unit_sysinit_target_state_failed": 0,
+ "unit_sysinit_target_state_inactive": 0,
+ "unit_system-getty_slice_state_activating": 0,
+ "unit_system-getty_slice_state_active": 1,
+ "unit_system-getty_slice_state_deactivating": 0,
+ "unit_system-getty_slice_state_failed": 0,
+ "unit_system-getty_slice_state_inactive": 0,
+ "unit_system-netctl_slice_state_activating": 0,
+ "unit_system-netctl_slice_state_active": 1,
+ "unit_system-netctl_slice_state_deactivating": 0,
+ "unit_system-netctl_slice_state_failed": 0,
+ "unit_system-netctl_slice_state_inactive": 0,
+ "unit_system-systemd-fsck_slice_state_activating": 0,
+ "unit_system-systemd-fsck_slice_state_active": 1,
+ "unit_system-systemd-fsck_slice_state_deactivating": 0,
+ "unit_system-systemd-fsck_slice_state_failed": 0,
+ "unit_system-systemd-fsck_slice_state_inactive": 0,
+ "unit_system_slice_state_activating": 0,
+ "unit_system_slice_state_active": 1,
+ "unit_system_slice_state_deactivating": 0,
+ "unit_system_slice_state_failed": 0,
+ "unit_system_slice_state_inactive": 0,
+ "unit_systemd-ask-password-console_path_state_activating": 0,
+ "unit_systemd-ask-password-console_path_state_active": 1,
+ "unit_systemd-ask-password-console_path_state_deactivating": 0,
+ "unit_systemd-ask-password-console_path_state_failed": 0,
+ "unit_systemd-ask-password-console_path_state_inactive": 0,
+ "unit_systemd-ask-password-wall_path_state_activating": 0,
+ "unit_systemd-ask-password-wall_path_state_active": 1,
+ "unit_systemd-ask-password-wall_path_state_deactivating": 0,
+ "unit_systemd-ask-password-wall_path_state_failed": 0,
+ "unit_systemd-ask-password-wall_path_state_inactive": 0,
+ "unit_systemd-ask-password-wall_service_state_activating": 0,
+ "unit_systemd-ask-password-wall_service_state_active": 0,
+ "unit_systemd-ask-password-wall_service_state_deactivating": 0,
+ "unit_systemd-ask-password-wall_service_state_failed": 0,
+ "unit_systemd-ask-password-wall_service_state_inactive": 1,
+ "unit_systemd-fsck-root_service_state_activating": 0,
+ "unit_systemd-fsck-root_service_state_active": 0,
+ "unit_systemd-fsck-root_service_state_deactivating": 0,
+ "unit_systemd-fsck-root_service_state_failed": 0,
+ "unit_systemd-fsck-root_service_state_inactive": 1,
+ "unit_systemd-udevd-kernel_socket_state_activating": 0,
+ "unit_systemd-udevd-kernel_socket_state_active": 1,
+ "unit_systemd-udevd-kernel_socket_state_deactivating": 0,
+ "unit_systemd-udevd-kernel_socket_state_failed": 0,
+ "unit_systemd-udevd-kernel_socket_state_inactive": 0,
+ "unit_tmp_mount_state_activating": 0,
+ "unit_tmp_mount_state_active": 1,
+ "unit_tmp_mount_state_deactivating": 0,
+ "unit_tmp_mount_state_failed": 0,
+ "unit_tmp_mount_state_inactive": 0,
+ "unit_user-runtime-dir@1000_service_state_activating": 0,
+ "unit_user-runtime-dir@1000_service_state_active": 1,
+ "unit_user-runtime-dir@1000_service_state_deactivating": 0,
+ "unit_user-runtime-dir@1000_service_state_failed": 0,
+ "unit_user-runtime-dir@1000_service_state_inactive": 0,
+ "unit_user@1000_service_state_activating": 0,
+ "unit_user@1000_service_state_active": 1,
+ "unit_user@1000_service_state_deactivating": 0,
+ "unit_user@1000_service_state_failed": 0,
+ "unit_user@1000_service_state_inactive": 0,
+ "unit_user_slice_state_activating": 0,
+ "unit_user_slice_state_active": 1,
+ "unit_user_slice_state_deactivating": 0,
+ "unit_user_slice_state_failed": 0,
+ "unit_user_slice_state_inactive": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_activating": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_active": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_deactivating": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_failed": 0,
+ "unit_var-lib-nfs-rpc_pipefs_mount_state_inactive": 1,
+ },
+ },
+ "success v230+ on collecting only 'service' units": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*.service"}
+ systemd.client = prepareOKClient(230)
+ return systemd
+ },
+ wantCollected: map[string]int64{
+ "unit_systemd-ask-password-wall_service_state_activating": 0,
+ "unit_systemd-ask-password-wall_service_state_active": 0,
+ "unit_systemd-ask-password-wall_service_state_deactivating": 0,
+ "unit_systemd-ask-password-wall_service_state_failed": 0,
+ "unit_systemd-ask-password-wall_service_state_inactive": 1,
+ "unit_systemd-fsck-root_service_state_activating": 0,
+ "unit_systemd-fsck-root_service_state_active": 0,
+ "unit_systemd-fsck-root_service_state_deactivating": 0,
+ "unit_systemd-fsck-root_service_state_failed": 0,
+ "unit_systemd-fsck-root_service_state_inactive": 1,
+ "unit_user-runtime-dir@1000_service_state_activating": 0,
+ "unit_user-runtime-dir@1000_service_state_active": 1,
+ "unit_user-runtime-dir@1000_service_state_deactivating": 0,
+ "unit_user-runtime-dir@1000_service_state_failed": 0,
+ "unit_user-runtime-dir@1000_service_state_inactive": 0,
+ "unit_user@1000_service_state_activating": 0,
+ "unit_user@1000_service_state_active": 1,
+ "unit_user@1000_service_state_deactivating": 0,
+ "unit_user@1000_service_state_failed": 0,
+ "unit_user@1000_service_state_inactive": 0,
+ },
+ },
+ "success v230- on collecting only 'service' units": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*.service"}
+ systemd.client = prepareOKClient(220)
+ return systemd
+ },
+ wantCollected: map[string]int64{
+ "unit_systemd-ask-password-wall_service_state_activating": 0,
+ "unit_systemd-ask-password-wall_service_state_active": 0,
+ "unit_systemd-ask-password-wall_service_state_deactivating": 0,
+ "unit_systemd-ask-password-wall_service_state_failed": 0,
+ "unit_systemd-ask-password-wall_service_state_inactive": 1,
+ "unit_systemd-fsck-root_service_state_activating": 0,
+ "unit_systemd-fsck-root_service_state_active": 0,
+ "unit_systemd-fsck-root_service_state_deactivating": 0,
+ "unit_systemd-fsck-root_service_state_failed": 0,
+ "unit_systemd-fsck-root_service_state_inactive": 1,
+ "unit_user-runtime-dir@1000_service_state_activating": 0,
+ "unit_user-runtime-dir@1000_service_state_active": 1,
+ "unit_user-runtime-dir@1000_service_state_deactivating": 0,
+ "unit_user-runtime-dir@1000_service_state_failed": 0,
+ "unit_user-runtime-dir@1000_service_state_inactive": 0,
+ "unit_user@1000_service_state_activating": 0,
+ "unit_user@1000_service_state_active": 1,
+ "unit_user@1000_service_state_deactivating": 0,
+ "unit_user@1000_service_state_failed": 0,
+ "unit_user@1000_service_state_inactive": 0,
+ },
+ },
+ "success v230+ on collecting only 'service' units and files": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*.service"}
+ systemd.IncludeUnitFiles = []string{"*.service", "*.slice"}
+ systemd.CollectUnitFiles = true
+ systemd.client = prepareOKClient(230)
+ return systemd
+ },
+ wantCollected: map[string]int64{
+ "unit_file_/lib/systemd/system/machine.slice_state_alias": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_bad": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_disabled": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_enabled": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_enabled-runtime": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_generated": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_indirect": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_linked": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_linked-runtime": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_masked": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_masked-runtime": 0,
+ "unit_file_/lib/systemd/system/machine.slice_state_static": 1,
+ "unit_file_/lib/systemd/system/machine.slice_state_transient": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_alias": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_bad": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_disabled": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_enabled": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_enabled-runtime": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_generated": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_indirect": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_linked": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_linked-runtime": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_masked": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_masked-runtime": 0,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_static": 1,
+ "unit_file_/lib/systemd/system/system-systemd-cryptsetup.slice_state_transient": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_alias": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_bad": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_disabled": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_enabled": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_enabled-runtime": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_generated": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_indirect": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_linked": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_linked-runtime": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_masked": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_masked-runtime": 0,
+ "unit_file_/lib/systemd/system/user.slice_state_static": 1,
+ "unit_file_/lib/systemd/system/user.slice_state_transient": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_alias": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_bad": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_disabled": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_enabled": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_enabled-runtime": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_generated": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_indirect": 1,
+ "unit_file_/lib/systemd/system/uuidd.service_state_linked": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_linked-runtime": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_masked": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_masked-runtime": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_static": 0,
+ "unit_file_/lib/systemd/system/uuidd.service_state_transient": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_alias": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_bad": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_disabled": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_enabled": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_enabled-runtime": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_generated": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_indirect": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_linked": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_linked-runtime": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_masked": 1,
+ "unit_file_/lib/systemd/system/x11-common.service_state_masked-runtime": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_static": 0,
+ "unit_file_/lib/systemd/system/x11-common.service_state_transient": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_alias": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_bad": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_disabled": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_enabled": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_enabled-runtime": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_generated": 1,
+ "unit_file_/run/systemd/generator.late/monit.service_state_indirect": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_linked": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_linked-runtime": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_masked": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_masked-runtime": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_static": 0,
+ "unit_file_/run/systemd/generator.late/monit.service_state_transient": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_alias": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_bad": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_disabled": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_enabled": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_enabled-runtime": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_generated": 1,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_indirect": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_linked": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_linked-runtime": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_masked": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_masked-runtime": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_static": 0,
+ "unit_file_/run/systemd/generator.late/sendmail.service_state_transient": 0,
+ "unit_systemd-ask-password-wall_service_state_activating": 0,
+ "unit_systemd-ask-password-wall_service_state_active": 0,
+ "unit_systemd-ask-password-wall_service_state_deactivating": 0,
+ "unit_systemd-ask-password-wall_service_state_failed": 0,
+ "unit_systemd-ask-password-wall_service_state_inactive": 1,
+ "unit_systemd-fsck-root_service_state_activating": 0,
+ "unit_systemd-fsck-root_service_state_active": 0,
+ "unit_systemd-fsck-root_service_state_deactivating": 0,
+ "unit_systemd-fsck-root_service_state_failed": 0,
+ "unit_systemd-fsck-root_service_state_inactive": 1,
+ "unit_user-runtime-dir@1000_service_state_activating": 0,
+ "unit_user-runtime-dir@1000_service_state_active": 1,
+ "unit_user-runtime-dir@1000_service_state_deactivating": 0,
+ "unit_user-runtime-dir@1000_service_state_failed": 0,
+ "unit_user-runtime-dir@1000_service_state_inactive": 0,
+ "unit_user@1000_service_state_activating": 0,
+ "unit_user@1000_service_state_active": 1,
+ "unit_user@1000_service_state_deactivating": 0,
+ "unit_user@1000_service_state_failed": 0,
+ "unit_user@1000_service_state_inactive": 0,
+ },
+ },
+ "fails when all unites are filtered": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.Include = []string{"*.not_exists"}
+ systemd.client = prepareOKClient(230)
+ return systemd
+ },
+ wantCollected: nil,
+ },
+ "fails on error on connect": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.client = prepareClientErrOnConnect()
+ return systemd
+ },
+ wantCollected: nil,
+ },
+ "fails on error on get manager property": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.client = prepareClientErrOnGetManagerProperty()
+ return systemd
+ },
+ wantCollected: nil,
+ },
+ "fails on error on list units": {
+ prepare: func() *SystemdUnits {
+ systemd := New()
+ systemd.client = prepareClientErrOnListUnits()
+ return systemd
+ },
+ wantCollected: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ systemd := test.prepare()
+ require.NoError(t, systemd.Init())
+
+ var mx map[string]int64
+
+ for i := 0; i < 10; i++ {
+ mx = systemd.Collect()
+ }
+
+ assert.Equal(t, test.wantCollected, mx)
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, systemd, mx)
+ }
+ })
+ }
+}
+
+func TestSystemdUnits_connectionReuse(t *testing.T) {
+ systemd := New()
+ systemd.Include = []string{"*"}
+ client := prepareOKClient(230)
+ systemd.client = client
+ require.NoError(t, systemd.Init())
+
+ var collected map[string]int64
+ for i := 0; i < 10; i++ {
+ collected = systemd.Collect()
+ }
+
+ assert.NotEmpty(t, collected)
+ assert.Equal(t, 1, client.connectCalls)
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, sd *SystemdUnits, collected map[string]int64) {
+ for _, chart := range *sd.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareOKClient(ver int) *mockClient {
+ return &mockClient{
+ conn: &mockConn{
+ version: ver,
+ units: mockSystemdUnits,
+ unitFiles: mockSystemdUnitFiles,
+ },
+ }
+}
+
+func prepareClientErrOnConnect() *mockClient {
+ return &mockClient{
+ errOnConnect: true,
+ }
+}
+
+func prepareClientErrOnGetManagerProperty() *mockClient {
+ return &mockClient{
+ conn: &mockConn{
+ version: 230,
+ errOnGetManagerProperty: true,
+ units: mockSystemdUnits,
+ },
+ }
+}
+
+func prepareClientErrOnListUnits() *mockClient {
+ return &mockClient{
+ conn: &mockConn{
+ version: 230,
+ errOnListUnits: true,
+ units: mockSystemdUnits,
+ },
+ }
+}
+
+type mockClient struct {
+ conn systemdConnection
+ connectCalls int
+ errOnConnect bool
+}
+
+func (m *mockClient) connect() (systemdConnection, error) {
+ m.connectCalls++
+ if m.errOnConnect {
+ return nil, errors.New("mock 'connect' error")
+ }
+ return m.conn, nil
+}
+
+type mockConn struct {
+ version int
+ errOnGetManagerProperty bool
+
+ units []dbus.UnitStatus
+ errOnListUnits bool
+
+ unitFiles []dbus.UnitFile
+ errOnListUnitFiles bool
+
+ closeCalled bool
+}
+
+func (m *mockConn) Close() {
+ m.closeCalled = true
+}
+
+func (m *mockConn) GetManagerProperty(prop string) (string, error) {
+ if m.errOnGetManagerProperty {
+ return "", errors.New("'GetManagerProperty' call error")
+ }
+ if prop != versionProperty {
+ return "", fmt.Errorf("'GetManagerProperty' unkown property: %s", prop)
+ }
+
+ return fmt.Sprintf("%d.6-1-manjaro", m.version), nil
+}
+
+func (m *mockConn) GetUnitPropertyContext(_ context.Context, unit string, propertyName string) (*dbus.Property, error) {
+ if propertyName != transientProperty {
+ return nil, fmt.Errorf("'GetUnitProperty' unkown property name: %s", propertyName)
+ }
+
+ var prop dbus.Property
+
+ if strings.HasSuffix(unit, ".service") {
+ prop = dbus.PropDescription("false")
+ } else {
+ prop = dbus.PropDescription("true")
+ }
+
+ prop.Name = propertyName
+
+ return &prop, nil
+}
+
+func (m *mockConn) ListUnitsContext(_ context.Context) ([]dbus.UnitStatus, error) {
+ if m.errOnListUnits {
+ return nil, errors.New("'ListUnits' call error")
+ }
+ if m.version >= 230 {
+ return nil, errors.New("'ListUnits' unsupported function error")
+ }
+
+ return append([]dbus.UnitStatus{}, m.units...), nil
+}
+
+func (m *mockConn) ListUnitsByPatternsContext(_ context.Context, _ []string, patterns []string) ([]dbus.UnitStatus, error) {
+ if m.errOnListUnits {
+ return nil, errors.New("'ListUnitsByPatterns' call error")
+ }
+ if m.version < 230 {
+ return nil, errors.New("'ListUnitsByPatterns' unsupported function error")
+ }
+
+ if len(m.units) == 0 {
+ return nil, nil
+ }
+
+ units := append([]dbus.UnitStatus{}, m.units...)
+
+ units = slices.DeleteFunc(units, func(u dbus.UnitStatus) bool {
+ name := cleanUnitName(u.Name)
+ for _, p := range patterns {
+ if ok, _ := filepath.Match(p, name); ok {
+ return false
+ }
+ }
+ return true
+ })
+
+ return units, nil
+}
+
+func (m *mockConn) ListUnitFilesByPatternsContext(_ context.Context, _ []string, patterns []string) ([]dbus.UnitFile, error) {
+ if m.errOnListUnitFiles {
+ return nil, errors.New("'ListUnitFilesByPatternsContex' call error")
+ }
+ if m.version < 230 {
+ return nil, errors.New("'ListUnitFilesByPatternsContex' unsupported function error")
+ }
+
+ if len(m.unitFiles) == 0 {
+ return nil, nil
+ }
+
+ unitFiles := append([]dbus.UnitFile{}, m.unitFiles...)
+
+ unitFiles = slices.DeleteFunc(unitFiles, func(file dbus.UnitFile) bool {
+ _, name := filepath.Split(file.Path)
+ for _, p := range patterns {
+ if ok, _ := filepath.Match(p, name); ok {
+ return false
+ }
+ }
+ return true
+ })
+
+ return unitFiles, nil
+}
+
+var mockSystemdUnits = []dbus.UnitStatus{
+ {Name: `proc-sys-fs-binfmt_misc.automount`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `dev-nvme0n1.device`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `sys-devices-virtual-net-loopback1.device`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `sys-module-fuse.device`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `dev-disk-by\x2duuid-DE44\x2dCEE0.device`, LoadState: "loaded", ActiveState: "active"},
+
+ {Name: `var-lib-nfs-rpc_pipefs.mount`, LoadState: "loaded", ActiveState: "inactive"},
+ {Name: `var.mount`, LoadState: "not-found", ActiveState: "inactive"},
+ {Name: `run-user-1000.mount`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `tmp.mount`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `run-user-1000-gvfs.mount`, LoadState: "loaded", ActiveState: "active"},
+
+ {Name: `org.cups.cupsd.path`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `systemd-ask-password-wall.path`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `systemd-ask-password-console.path`, LoadState: "loaded", ActiveState: "active"},
+
+ {Name: `init.scope`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `session-3.scope`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `session-6.scope`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `session-1.scope`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `session-2.scope`, LoadState: "loaded", ActiveState: "active"},
+
+ {Name: `systemd-fsck-root.service`, LoadState: "loaded", ActiveState: "inactive"},
+ {Name: `httpd.service`, LoadState: "not-found", ActiveState: "inactive"},
+ {Name: `user-runtime-dir@1000.service`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `systemd-ask-password-wall.service`, LoadState: "loaded", ActiveState: "inactive"},
+ {Name: `user@1000.service`, LoadState: "loaded", ActiveState: "active"},
+
+ {Name: `user.slice`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `system-getty.slice`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `system-netctl.slice`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `system.slice`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `system-systemd\x2dfsck.slice`, LoadState: "loaded", ActiveState: "active"},
+
+ {Name: `lvm2-lvmpolld.socket`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `docker.socket`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `systemd-udevd-kernel.socket`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `dbus.socket`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `lvm2-lvmetad.socket`, LoadState: "loaded", ActiveState: "active"},
+
+ {Name: `getty-pre.target`, LoadState: "loaded", ActiveState: "inactive"},
+ {Name: `rpc_pipefs.target`, LoadState: "loaded", ActiveState: "inactive"},
+ {Name: `remote-fs-pre.target`, LoadState: "loaded", ActiveState: "inactive"},
+ {Name: `sysinit.target`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `sound.target`, LoadState: "loaded", ActiveState: "active"},
+
+ {Name: `man-db.timer`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `pamac-mirrorlist.timer`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `pamac-cleancache.timer`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `shadow.timer`, LoadState: "loaded", ActiveState: "active"},
+ {Name: `logrotate.timer`, LoadState: "loaded", ActiveState: "active"},
+}
+
+var mockSystemdUnitFiles = []dbus.UnitFile{
+ {Path: "/lib/systemd/system/systemd-tmpfiles-clean.timer", Type: "static"},
+ {Path: "/lib/systemd/system/sysstat-summary.timer", Type: "disabled"},
+ {Path: "/lib/systemd/system/sysstat-collect.timer", Type: "disabled"},
+ {Path: "/lib/systemd/system/pg_dump@.timer", Type: "disabled"},
+
+ {Path: "/lib/systemd/system/veritysetup.target", Type: "static"},
+ {Path: "/lib/systemd/system/veritysetup-pre.target", Type: "static"},
+ {Path: "/lib/systemd/system/usb-gadget.target", Type: "static"},
+ {Path: "/lib/systemd/system/umount.target", Type: "static"},
+
+ {Path: "/lib/systemd/system/syslog.socket", Type: "static"},
+ {Path: "/lib/systemd/system/ssh.socket", Type: "disabled"},
+ {Path: "/lib/systemd/system/docker.socket", Type: "enabled"},
+ {Path: "/lib/systemd/system/dbus.socket", Type: "static"},
+
+ {Path: "/lib/systemd/system/user.slice", Type: "static"},
+ {Path: "/lib/systemd/system/system-systemd\x2dcryptsetup.slice", Type: "static"},
+ {Path: "/lib/systemd/system/machine.slice", Type: "static"},
+
+ {Path: "/run/systemd/generator.late/sendmail.service", Type: "generated"},
+ {Path: "/run/systemd/generator.late/monit.service", Type: "generated"},
+ {Path: "/lib/systemd/system/x11-common.service", Type: "masked"},
+ {Path: "/lib/systemd/system/uuidd.service", Type: "indirect"},
+
+ {Path: "/run/systemd/transient/session-144.scope", Type: "transient"},
+ {Path: "/run/systemd/transient/session-139.scope", Type: "transient"},
+ {Path: "/run/systemd/transient/session-132.scope", Type: "transient"},
+
+ {Path: "/lib/systemd/system/systemd-ask-password-wall.path", Type: "static"},
+ {Path: "/lib/systemd/system/systemd-ask-password-console.path", Type: "static"},
+ {Path: "/lib/systemd/system/postfix-resolvconf.path", Type: "disabled"},
+ {Path: "/lib/systemd/system/ntpsec-systemd-netif.path", Type: "enabled"},
+
+ {Path: "/run/systemd/generator/media-cdrom0.mount", Type: "generated"},
+ {Path: "/run/systemd/generator/boot.mount", Type: "generated"},
+ {Path: "/run/systemd/generator/-.mount", Type: "generated"},
+ {Path: "/lib/systemd/system/sys-kernel-tracing.mount", Type: "static"},
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/testdata/config.json b/src/go/plugin/go.d/modules/systemdunits/testdata/config.json
new file mode 100644
index 000000000..1ab5b47ea
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/testdata/config.json
@@ -0,0 +1,13 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "include": [
+ "ok"
+ ],
+ "skip_transient": true,
+ "collect_unit_files": true,
+ "collect_unit_files_every": 123.123,
+ "include_unit_files": [
+ "ok"
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/systemdunits/testdata/config.yaml b/src/go/plugin/go.d/modules/systemdunits/testdata/config.yaml
new file mode 100644
index 000000000..d1894aea1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/systemdunits/testdata/config.yaml
@@ -0,0 +1,9 @@
+update_every: 123
+timeout: 123.123
+include:
+ - ok
+skip_transient: true
+collect_unit_files: true
+collect_unit_files_every: 123.123
+include_unit_files:
+ - ok
diff --git a/src/go/plugin/go.d/modules/tengine/README.md b/src/go/plugin/go.d/modules/tengine/README.md
new file mode 120000
index 000000000..e016ea0c7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/README.md
@@ -0,0 +1 @@
+integrations/tengine.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/tengine/apiclient.go b/src/go/plugin/go.d/modules/tengine/apiclient.go
new file mode 100644
index 000000000..e91b99769
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/apiclient.go
@@ -0,0 +1,247 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tengine
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net/http"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+const (
+ bytesIn = "bytes_in"
+ bytesOut = "bytes_out"
+ connTotal = "conn_total"
+ reqTotal = "req_total"
+ http2xx = "http_2xx"
+ http3xx = "http_3xx"
+ http4xx = "http_4xx"
+ http5xx = "http_5xx"
+ httpOtherStatus = "http_other_status"
+ rt = "rt"
+ upsReq = "ups_req"
+ upsRT = "ups_rt"
+ upsTries = "ups_tries"
+ http200 = "http_200"
+ http206 = "http_206"
+ http302 = "http_302"
+ http304 = "http_304"
+ http403 = "http_403"
+ http404 = "http_404"
+ http416 = "http_416"
+ http499 = "http_499"
+ http500 = "http_500"
+ http502 = "http_502"
+ http503 = "http_503"
+ http504 = "http_504"
+ http508 = "http_508"
+ httpOtherDetailStatus = "http_other_detail_status"
+ httpUps4xx = "http_ups_4xx"
+ httpUps5xx = "http_ups_5xx"
+)
+
+var defaultLineFormat = []string{
+ bytesIn,
+ bytesOut,
+ connTotal,
+ reqTotal,
+ http2xx,
+ http3xx,
+ http4xx,
+ http5xx,
+ httpOtherStatus,
+ rt,
+ upsReq,
+ upsRT,
+ upsTries,
+ http200,
+ http206,
+ http302,
+ http304,
+ http403,
+ http404,
+ http416,
+ http499,
+ http500,
+ http502,
+ http503,
+ http504,
+ http508,
+ httpOtherDetailStatus,
+ httpUps4xx,
+ httpUps5xx,
+}
+
+func newAPIClient(client *http.Client, request web.Request) *apiClient {
+ return &apiClient{httpClient: client, request: request}
+}
+
+type apiClient struct {
+ httpClient *http.Client
+ request web.Request
+}
+
+func (a apiClient) getStatus() (*tengineStatus, error) {
+ req, err := web.NewHTTPRequest(a.request)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating request : %v", err)
+ }
+
+ resp, err := a.doRequestOK(req)
+ defer closeBody(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ status, err := parseStatus(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error on parsing response : %v", err)
+ }
+
+ return status, nil
+}
+
+func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) {
+ resp, err := a.httpClient.Do(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on request : %v", err)
+ }
+ if resp.StatusCode != http.StatusOK {
+ return resp, fmt.Errorf("%s returned HTTP code %d", req.URL, resp.StatusCode)
+ }
+ return resp, nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
+
+func parseStatus(r io.Reader) (*tengineStatus, error) {
+ var status tengineStatus
+
+ s := bufio.NewScanner(r)
+ for s.Scan() {
+ m, err := parseStatusLine(s.Text(), defaultLineFormat)
+ if err != nil {
+ return nil, err
+ }
+ status = append(status, *m)
+ }
+
+ return &status, nil
+}
+
+func parseStatusLine(line string, lineFormat []string) (*metric, error) {
+ parts := strings.Split(line, ",")
+
+ // NOTE: only default line format is supported
+ // TODO: custom line format?
+ // www.example.com,127.0.0.1:80,162,6242,1,1,1,0,0,0,0,10,1,10,1....
+ i := findFirstInt(parts)
+ if i == -1 {
+ return nil, fmt.Errorf("invalid line : %s", line)
+ }
+ if len(parts[i:]) != len(lineFormat) {
+ return nil, fmt.Errorf("invalid line length, got %d, expected %d, line : %s",
+ len(parts[i:]), len(lineFormat), line)
+ }
+
+ // skip "$host,$server_addr:$server_port"
+ parts = parts[i:]
+
+ var m metric
+ for i, key := range lineFormat {
+ value := mustParseInt(parts[i])
+ switch key {
+ default:
+ return nil, fmt.Errorf("unknown line format key: %s", key)
+ case bytesIn:
+ m.BytesIn = value
+ case bytesOut:
+ m.BytesOut = value
+ case connTotal:
+ m.ConnTotal = value
+ case reqTotal:
+ m.ReqTotal = value
+ case http2xx:
+ m.HTTP2xx = value
+ case http3xx:
+ m.HTTP3xx = value
+ case http4xx:
+ m.HTTP4xx = value
+ case http5xx:
+ m.HTTP5xx = value
+ case httpOtherStatus:
+ m.HTTPOtherStatus = value
+ case rt:
+ m.RT = value
+ case upsReq:
+ m.UpsReq = value
+ case upsRT:
+ m.UpsRT = value
+ case upsTries:
+ m.UpsTries = value
+ case http200:
+ m.HTTP200 = value
+ case http206:
+ m.HTTP206 = value
+ case http302:
+ m.HTTP302 = value
+ case http304:
+ m.HTTP304 = value
+ case http403:
+ m.HTTP403 = value
+ case http404:
+ m.HTTP404 = value
+ case http416:
+ m.HTTP416 = value
+ case http499:
+ m.HTTP499 = value
+ case http500:
+ m.HTTP500 = value
+ case http502:
+ m.HTTP502 = value
+ case http503:
+ m.HTTP503 = value
+ case http504:
+ m.HTTP504 = value
+ case http508:
+ m.HTTP508 = value
+ case httpOtherDetailStatus:
+ m.HTTPOtherDetailStatus = value
+ case httpUps4xx:
+ m.HTTPUps4xx = value
+ case httpUps5xx:
+ m.HTTPUps5xx = value
+ }
+ }
+ return &m, nil
+}
+
+func findFirstInt(s []string) int {
+ for i, v := range s {
+ _, err := strconv.ParseInt(v, 10, 64)
+ if err != nil {
+ continue
+ }
+ return i
+ }
+ return -1
+}
+
+func mustParseInt(value string) *int64 {
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ panic(err)
+ }
+
+ return &v
+}
diff --git a/src/go/plugin/go.d/modules/tengine/charts.go b/src/go/plugin/go.d/modules/tengine/charts.go
new file mode 100644
index 000000000..bd0564aa2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/charts.go
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tengine
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+)
+
+var charts = Charts{
+ {
+ ID: "bandwidth_total",
+ Title: "Bandwidth",
+ Units: "B/s",
+ Fam: "bandwidth",
+ Ctx: "tengine.bandwidth_total",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: "bytes_in", Name: "in", Algo: module.Incremental},
+ {ID: "bytes_out", Name: "out", Algo: module.Incremental, Mul: -1},
+ },
+ },
+ {
+ ID: "connections_total",
+ Title: "Connections",
+ Units: "connections/s",
+ Fam: "connections",
+ Ctx: "tengine.connections_total",
+ Dims: Dims{
+ {ID: "conn_total", Name: "accepted", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "requests_total",
+ Title: "Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "tengine.requests_total",
+ Dims: Dims{
+ {ID: "req_total", Name: "processed", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "requests_per_response_code_family_total",
+ Title: "Requests Per Response Code Family",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "tengine.requests_per_response_code_family_total",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "http_2xx", Name: "2xx", Algo: module.Incremental},
+ {ID: "http_5xx", Name: "5xx", Algo: module.Incremental},
+ {ID: "http_3xx", Name: "3xx", Algo: module.Incremental},
+ {ID: "http_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: "http_other_status", Name: "other", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "requests_per_response_code_detailed_total",
+ Title: "Requests Per Response Code Detailed",
+ Units: "requests/s",
+ Ctx: "tengine.requests_per_response_code_detailed_total",
+ Fam: "requests",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: "http_200", Name: "200", Algo: module.Incremental},
+ {ID: "http_206", Name: "206", Algo: module.Incremental},
+ {ID: "http_302", Name: "302", Algo: module.Incremental},
+ {ID: "http_304", Name: "304", Algo: module.Incremental},
+ {ID: "http_403", Name: "403", Algo: module.Incremental},
+ {ID: "http_404", Name: "404", Algo: module.Incremental},
+ {ID: "http_416", Name: "419", Algo: module.Incremental},
+ {ID: "http_499", Name: "499", Algo: module.Incremental},
+ {ID: "http_500", Name: "500", Algo: module.Incremental},
+ {ID: "http_502", Name: "502", Algo: module.Incremental},
+ {ID: "http_503", Name: "503", Algo: module.Incremental},
+ {ID: "http_504", Name: "504", Algo: module.Incremental},
+ {ID: "http_508", Name: "508", Algo: module.Incremental},
+ {ID: "http_other_detail_status", Name: "other", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "requests_upstream_total",
+ Title: "Number Of Requests Calling For Upstream",
+ Units: "requests/s",
+ Fam: "upstream",
+ Ctx: "tengine.requests_upstream_total",
+ Dims: Dims{
+ {ID: "ups_req", Name: "requests", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "tries_upstream_total",
+ Title: "Number Of Times Calling For Upstream",
+ Units: "calls/s",
+ Fam: "upstream",
+ Ctx: "tengine.tries_upstream_total",
+ Dims: Dims{
+ {ID: "ups_tries", Name: "calls", Algo: module.Incremental},
+ },
+ },
+ {
+ ID: "requests_upstream_per_response_code_family_total",
+ Title: "Upstream Requests Per Response Code Family",
+ Units: "requests/s",
+ Fam: "upstream",
+ Type: module.Stacked,
+ Ctx: "tengine.requests_upstream_per_response_code_family_total",
+ Dims: Dims{
+ {ID: "http_ups_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: "http_ups_5xx", Name: "5xx", Algo: module.Incremental},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/tengine/collect.go b/src/go/plugin/go.d/modules/tengine/collect.go
new file mode 100644
index 000000000..ffa39019e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/collect.go
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tengine
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func (t *Tengine) collect() (map[string]int64, error) {
+ status, err := t.apiClient.getStatus()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+ for _, m := range *status {
+ for k, v := range stm.ToMap(m) {
+ mx[k] += v
+ }
+ }
+ return mx, nil
+}
diff --git a/src/go/plugin/go.d/modules/tengine/config_schema.json b/src/go/plugin/go.d/modules/tengine/config_schema.json
new file mode 100644
index 000000000..44f6968e1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Tengine collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Tengine [status page](https://tengine.taobao.org/document/http_reqstat.html).",
+ "type": "string",
+ "default": "http://127.0.0.1/us",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/tengine/integrations/tengine.md b/src/go/plugin/go.d/modules/tengine/integrations/tengine.md
new file mode 100644
index 000000000..44bec575b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/integrations/tengine.md
@@ -0,0 +1,267 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/tengine/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/tengine/metadata.yaml"
+sidebar_label: "Tengine"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tengine
+
+
+<img src="https://netdata.cloud/img/tengine.jpeg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: tengine
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Tengine servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Tengine instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| tengine.bandwidth_total | in, out | B/s |
+| tengine.connections_total | accepted | connections/s |
+| tengine.requests_total | processed | requests/s |
+| tengine.requests_per_response_code_family_total | 2xx, 3xx, 4xx, 5xx, other | requests/s |
+| tengine.requests_per_response_code_detailed_total | 200, 206, 302, 304, 403, 404, 419, 499, 500, 502, 503, 504, 508, other | requests/s |
+| tengine.requests_upstream_total | requests | requests/s |
+| tengine.tries_upstream_total | calls | calls/s |
+| tengine.requests_upstream_per_response_code_family_total | 4xx, 5xx | requests/s |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable ngx_http_reqstat_module module.
+
+To enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).
+The default line format is the only supported format.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/tengine.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/tengine.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1/us | yes |
+| timeout | HTTP request timeout. | 2 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/us
+
+```
+</details>
+
+##### HTTP authentication
+
+Local server with basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/us
+ username: foo
+ password: bar
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Tengine with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1/us
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1/us
+
+ - name: remote
+ url: http://203.0.113.10/us
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `tengine` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m tengine
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `tengine` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep tengine
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep tengine /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep tengine
+```
+
+
diff --git a/src/go/plugin/go.d/modules/tengine/metadata.yaml b/src/go/plugin/go.d/modules/tengine/metadata.yaml
new file mode 100644
index 000000000..b0778c9fc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/metadata.yaml
@@ -0,0 +1,245 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-tengine
+ plugin_name: go.d.plugin
+ module_name: tengine
+ monitored_instance:
+ name: Tengine
+ link: https://tengine.taobao.org/
+ icon_filename: tengine.jpeg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - tengine
+ - web
+ - webserver
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Tengine servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable ngx_http_reqstat_module module.
+ description: |
+ To enable the module, see the [official documentation](ngx_http_reqstat_module](https://tengine.taobao.org/document/http_reqstat.html).
+ The default line format is the only supported format.
+ configuration:
+ file:
+ name: go.d/tengine.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1/us
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 2
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/us
+ - name: HTTP authentication
+ description: Local server with basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/us
+ username: foo
+ password: bar
+ - name: HTTPS with self-signed certificate
+ description: Tengine with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1/us
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1/us
+
+ - name: remote
+ url: http://203.0.113.10/us
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: tengine.bandwidth_total
+ description: Bandwidth
+ unit: B/s
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: tengine.connections_total
+ description: Connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: accepted
+ - name: tengine.requests_total
+ description: Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: processed
+ - name: tengine.requests_per_response_code_family_total
+ description: Requests Per Response Code Family
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: other
+ - name: tengine.requests_per_response_code_detailed_total
+ description: Requests Per Response Code Detailed
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: "200"
+ - name: "206"
+ - name: "302"
+ - name: "304"
+ - name: "403"
+ - name: "404"
+ - name: "419"
+ - name: "499"
+ - name: "500"
+ - name: "502"
+ - name: "503"
+ - name: "504"
+ - name: "508"
+ - name: other
+ - name: tengine.requests_upstream_total
+ description: Number Of Requests Calling For Upstream
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: tengine.tries_upstream_total
+ description: Number Of Times Calling For Upstream
+ unit: calls/s
+ chart_type: line
+ dimensions:
+ - name: calls
+ - name: tengine.requests_upstream_per_response_code_family_total
+ description: Upstream Requests Per Response Code Family
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: 4xx
+ - name: 5xx
diff --git a/src/go/plugin/go.d/modules/tengine/metrics.go b/src/go/plugin/go.d/modules/tengine/metrics.go
new file mode 100644
index 000000000..425559479
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/metrics.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tengine
+
+/*
+http://tengine.taobao.org/document/http_reqstat.html
+
+bytes_in total number of bytes received from client
+bytes_out total number of bytes sent to client
+conn_total total number of accepted connections
+req_total total number of processed requests
+http_2xx total number of 2xx requests
+http_3xx total number of 3xx requests
+http_4xx total number of 4xx requests
+http_5xx total number of 5xx requests
+http_other_status total number of other requests
+rt accumulation or rt
+ups_req total number of requests calling for upstream
+ups_rt accumulation or upstream rt
+ups_tries total number of times calling for upstream
+http_200 total number of 200 requests
+http_206 total number of 206 requests
+http_302 total number of 302 requests
+http_304 total number of 304 requests
+http_403 total number of 403 requests
+http_404 total number of 404 requests
+http_416 total number of 416 requests
+http_499 total number of 499 requests
+http_500 total number of 500 requests
+http_502 total number of 502 requests
+http_503 total number of 503 requests
+http_504 total number of 504 requests
+http_508 total number of 508 requests
+http_other_detail_status total number of requests of other status codes
+http_ups_4xx total number of requests of upstream 4xx
+http_ups_5xx total number of requests of upstream 5xx
+*/
+
+type (
+ tengineStatus []metric
+
+ metric struct {
+ Host string
+ ServerAddress string
+ BytesIn *int64 `stm:"bytes_in"`
+ BytesOut *int64 `stm:"bytes_out"`
+ ConnTotal *int64 `stm:"conn_total"`
+ ReqTotal *int64 `stm:"req_total"`
+ HTTP2xx *int64 `stm:"http_2xx"`
+ HTTP3xx *int64 `stm:"http_3xx"`
+ HTTP4xx *int64 `stm:"http_4xx"`
+ HTTP5xx *int64 `stm:"http_5xx"`
+ HTTPOtherStatus *int64 `stm:"http_other_status"`
+ RT *int64 `stm:"rt"`
+ UpsReq *int64 `stm:"ups_req"`
+ UpsRT *int64 `stm:"ups_rt"`
+ UpsTries *int64 `stm:"ups_tries"`
+ HTTP200 *int64 `stm:"http_200"`
+ HTTP206 *int64 `stm:"http_206"`
+ HTTP302 *int64 `stm:"http_302"`
+ HTTP304 *int64 `stm:"http_304"`
+ HTTP403 *int64 `stm:"http_403"`
+ HTTP404 *int64 `stm:"http_404"`
+ HTTP416 *int64 `stm:"http_416"`
+ HTTP499 *int64 `stm:"http_499"`
+ HTTP500 *int64 `stm:"http_500"`
+ HTTP502 *int64 `stm:"http_502"`
+ HTTP503 *int64 `stm:"http_503"`
+ HTTP504 *int64 `stm:"http_504"`
+ HTTP508 *int64 `stm:"http_508"`
+ HTTPOtherDetailStatus *int64 `stm:"http_other_detail_status"`
+ HTTPUps4xx *int64 `stm:"http_ups_4xx"`
+ HTTPUps5xx *int64 `stm:"http_ups_5xx"`
+ }
+)
diff --git a/src/go/plugin/go.d/modules/tengine/tengine.go b/src/go/plugin/go.d/modules/tengine/tengine.go
new file mode 100644
index 000000000..8f67fae46
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/tengine.go
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tengine
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("tengine", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Tengine {
+ return &Tengine{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1/us",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 2),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Tengine struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ apiClient *apiClient
+}
+
+func (t *Tengine) Configuration() any {
+ return t.Config
+}
+
+func (t *Tengine) Init() error {
+ if t.URL == "" {
+ t.Error("url not set")
+ return errors.New("url not set")
+ }
+
+ client, err := web.NewHTTPClient(t.Client)
+ if err != nil {
+ t.Errorf("error on creating http client : %v", err)
+ return err
+ }
+
+ t.apiClient = newAPIClient(client, t.Request)
+
+ t.Debugf("using URL: %s", t.URL)
+ t.Debugf("using timeout: %s", t.Timeout)
+
+ return nil
+}
+
+func (t *Tengine) Check() error {
+ mx, err := t.collect()
+ if err != nil {
+ t.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (t *Tengine) Charts() *module.Charts {
+ return t.charts
+}
+
+func (t *Tengine) Collect() map[string]int64 {
+ mx, err := t.collect()
+
+ if err != nil {
+ t.Error(err)
+ return nil
+ }
+
+ return mx
+}
+
+func (t *Tengine) Cleanup() {
+ if t.apiClient != nil && t.apiClient.httpClient != nil {
+ t.apiClient.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/tengine/tengine_test.go b/src/go/plugin/go.d/modules/tengine/tengine_test.go
new file mode 100644
index 000000000..e87e62b0c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/tengine_test.go
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tengine
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatusMetrics, _ = os.ReadFile("testdata/status.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStatusMetrics": dataStatusMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestTengine_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Tengine{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestTengine_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestTengine_Init(t *testing.T) {
+ job := New()
+
+ require.NoError(t, job.Init())
+ assert.NotNil(t, job.apiClient)
+}
+
+func TestTengine_Check(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
+}
+
+func TestTengine_CheckNG(t *testing.T) {
+ job := New()
+
+ job.URL = "http://127.0.0.1:38001/us"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestTengine_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
+
+func TestTengine_Collect(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataStatusMetrics)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
+
+ expected := map[string]int64{
+ "bytes_in": 5944,
+ "bytes_out": 20483,
+ "conn_total": 354,
+ "http_200": 1536,
+ "http_206": 0,
+ "http_2xx": 1536,
+ "http_302": 43,
+ "http_304": 0,
+ "http_3xx": 50,
+ "http_403": 1,
+ "http_404": 75,
+ "http_416": 0,
+ "http_499": 0,
+ "http_4xx": 80,
+ "http_500": 0,
+ "http_502": 1,
+ "http_503": 0,
+ "http_504": 0,
+ "http_508": 0,
+ "http_5xx": 1,
+ "http_other_detail_status": 11,
+ "http_other_status": 0,
+ "http_ups_4xx": 26,
+ "http_ups_5xx": 1,
+ "req_total": 1672,
+ "rt": 1339,
+ "ups_req": 268,
+ "ups_rt": 644,
+ "ups_tries": 268,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestTengine_InvalidData(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and goodbye"))
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
+
+func TestTengine_404(t *testing.T) {
+ ts := httptest.NewServer(
+ http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ defer ts.Close()
+
+ job := New()
+ job.URL = ts.URL
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
+}
diff --git a/src/go/plugin/go.d/modules/tengine/testdata/config.json b/src/go/plugin/go.d/modules/tengine/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/tengine/testdata/config.yaml b/src/go/plugin/go.d/modules/tengine/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/tengine/testdata/status.txt b/src/go/plugin/go.d/modules/tengine/testdata/status.txt
new file mode 100644
index 000000000..dff2ec2d6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tengine/testdata/status.txt
@@ -0,0 +1,3 @@
+100.127.0.91,100.127.0.91:80,1594,2181,6,7,7,0,0,0,0,0,0,0,0,7,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+127.0.0.1,127.0.0.1:80,4350,18302,58,58,58,0,0,0,0,0,0,0,0,58,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
+,0,0,290,1607,1471,50,80,1,0,1339,268,644,268,1471,0,43,0,1,75,0,0,0,1,0,0,0,11,26,1 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/tomcat/README.md b/src/go/plugin/go.d/modules/tomcat/README.md
new file mode 120000
index 000000000..997090c35
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/README.md
@@ -0,0 +1 @@
+integrations/tomcat.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/tomcat/charts.go b/src/go/plugin/go.d/modules/tomcat/charts.go
new file mode 100644
index 000000000..137f700b2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/charts.go
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tomcat
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioConnectorRequestsCount = module.Priority + iota
+ prioConnectorRequestsBandwidth
+ prioConnectorRequestsProcessingTime
+ prioConnectorRequestsErrors
+
+ prioConnectorRequestThreads
+
+ prioJvmMemoryUsage
+
+ prioJvmMemoryPoolMemoryUsage
+)
+
+var (
+ defaultCharts = module.Charts{
+ jvmMemoryUsageChart.Copy(),
+ }
+
+ jvmMemoryUsageChart = module.Chart{
+ ID: "jvm_memory_usage",
+ Title: "JVM Memory Usage",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "tomcat.jvm_memory_usage",
+ Type: module.Stacked,
+ Priority: prioJvmMemoryUsage,
+ Dims: module.Dims{
+ {ID: "jvm_memory_free", Name: "free"},
+ {ID: "jvm_memory_used", Name: "used"},
+ },
+ }
+)
+
+var (
+ connectorChartsTmpl = module.Charts{
+ connectorRequestsCountChartTmpl.Copy(),
+ connectorRequestsBandwidthChartTmpl.Copy(),
+ connectorRequestsProcessingTimeChartTmpl.Copy(),
+ connectorRequestsErrorsChartTmpl.Copy(),
+ connectorRequestThreadsChartTmpl.Copy(),
+ }
+
+ connectorRequestsCountChartTmpl = module.Chart{
+ ID: "connector_%_requests",
+ Title: "Connector Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "tomcat.connector_requests",
+ Type: module.Line,
+ Priority: prioConnectorRequestsCount,
+ Dims: module.Dims{
+ {ID: "connector_%s_request_info_request_count", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ connectorRequestsBandwidthChartTmpl = module.Chart{
+ ID: "connector_%s_requests_bandwidth",
+ Title: "Connector Requests Bandwidth",
+ Units: "bytes/s",
+ Fam: "requests",
+ Ctx: "tomcat.connector_bandwidth",
+ Type: module.Area,
+ Priority: prioConnectorRequestsBandwidth,
+ Dims: module.Dims{
+ {ID: "connector_%s_request_info_bytes_received", Name: "received", Algo: module.Incremental},
+ {ID: "connector_%s_request_info_bytes_sent", Name: "sent", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ connectorRequestsProcessingTimeChartTmpl = module.Chart{
+ ID: "connector_%_requests_processing_time",
+ Title: "Connector Requests Processing Time",
+ Units: "milliseconds",
+ Fam: "requests",
+ Ctx: "tomcat.connector_requests_processing_time",
+ Type: module.Line,
+ Priority: prioConnectorRequestsProcessingTime,
+ Dims: module.Dims{
+ {ID: "connector_%s_request_info_processing_time", Name: "processing_time", Algo: module.Incremental},
+ },
+ }
+ connectorRequestsErrorsChartTmpl = module.Chart{
+ ID: "connector_%_errors",
+ Title: "Connector Errors",
+ Units: "errors/s",
+ Fam: "requests",
+ Ctx: "tomcat.connector_errors",
+ Type: module.Line,
+ Priority: prioConnectorRequestsErrors,
+ Dims: module.Dims{
+ {ID: "connector_%s_request_info_error_count", Name: "errors", Algo: module.Incremental},
+ },
+ }
+
+ connectorRequestThreadsChartTmpl = module.Chart{
+ ID: "connector_%s_request_threads",
+ Title: "Connector Request Threads",
+ Units: "threads",
+ Fam: "threads",
+ Ctx: "tomcat.connector_request_threads",
+ Type: module.Stacked,
+ Priority: prioConnectorRequestThreads,
+ Dims: module.Dims{
+ {ID: "connector_%s_thread_info_idle", Name: "idle"},
+ {ID: "connector_%s_thread_info_busy", Name: "busy"},
+ },
+ }
+)
+
+var (
+ jvmMemoryPoolChartsTmpl = module.Charts{
+ jvmMemoryPoolMemoryUsageChartTmpl.Copy(),
+ }
+
+ jvmMemoryPoolMemoryUsageChartTmpl = module.Chart{
+ ID: "jvm_mem_pool_%s_memory_usage",
+ Title: "JVM Mem Pool Memory Usage",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "tomcat.jvm_mem_pool_memory_usage",
+ Type: module.Area,
+ Priority: prioJvmMemoryPoolMemoryUsage,
+ Dims: module.Dims{
+ {ID: "jvm_memorypool_%s_commited", Name: "commited"},
+ {ID: "jvm_memorypool_%s_used", Name: "used"},
+ {ID: "jvm_memorypool_%s_max", Name: "max"},
+ },
+ }
+)
+
+func (t *Tomcat) addConnectorCharts(name string) {
+ charts := connectorChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanName(name))
+ chart.Labels = []module.Label{
+ {Key: "connector_name", Value: strings.Trim(name, "\"")},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, cleanName(name))
+ }
+ }
+
+ if err := t.Charts().Add(*charts...); err != nil {
+ t.Warning(err)
+ }
+}
+
+func (t *Tomcat) addMemPoolCharts(name, typ string) {
+ name = strings.ReplaceAll(name, "'", "")
+
+ charts := jvmMemoryPoolChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanName(name))
+ chart.Labels = []module.Label{
+ {Key: "mempool_name", Value: name},
+ {Key: "mempool_type", Value: typ},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, cleanName(name))
+ }
+ }
+
+ if err := t.Charts().Add(*charts...); err != nil {
+ t.Warning(err)
+ }
+}
+
+func (t *Tomcat) removeConnectorCharts(name string) {
+ px := fmt.Sprintf("connector_%s_", cleanName(name))
+ t.removeCharts(px)
+}
+
+func (t *Tomcat) removeMemoryPoolCharts(name string) {
+ px := fmt.Sprintf("jvm_mem_pool_%s_", cleanName(name))
+ t.removeCharts(px)
+}
+
+func (t *Tomcat) removeCharts(prefix string) {
+ for _, chart := range *t.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/tomcat/collect.go b/src/go/plugin/go.d/modules/tomcat/collect.go
new file mode 100644
index 000000000..c6e2a74bd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/collect.go
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tomcat
+
+import (
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+var (
+ urlPathServerStatus = "/manager/status"
+ urlQueryServerStatus = url.Values{"XML": {"true"}}.Encode()
+)
+
+func (t *Tomcat) collect() (map[string]int64, error) {
+ mx, err := t.collectServerStatus()
+ if err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (t *Tomcat) collectServerStatus() (map[string]int64, error) {
+ resp, err := t.queryServerStatus()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(resp.Connectors) == 0 {
+ return nil, errors.New("unexpected response: not tomcat server status data")
+ }
+
+ seenConns, seenPools := make(map[string]bool), make(map[string]bool)
+
+ for i, v := range resp.Connectors {
+ resp.Connectors[i].STMKey = cleanName(v.Name)
+ ti := &resp.Connectors[i].ThreadInfo
+ ti.CurrentThreadsIdle = ti.CurrentThreadCount - ti.CurrentThreadsBusy
+
+ seenConns[v.Name] = true
+ if !t.seenConnectors[v.Name] {
+ t.seenConnectors[v.Name] = true
+ t.addConnectorCharts(v.Name)
+ }
+ }
+
+ for i, v := range resp.JVM.MemoryPools {
+ resp.JVM.MemoryPools[i].STMKey = cleanName(v.Name)
+
+ seenPools[v.Name] = true
+ if !t.seenMemPools[v.Name] {
+ t.seenMemPools[v.Name] = true
+ t.addMemPoolCharts(v.Name, v.Type)
+ }
+ }
+
+ for name := range t.seenConnectors {
+ if !seenConns[name] {
+ delete(t.seenConnectors, name)
+ t.removeConnectorCharts(name)
+ }
+ }
+
+ for name := range t.seenMemPools {
+ if !seenPools[name] {
+ delete(t.seenMemPools, name)
+ t.removeMemoryPoolCharts(name)
+ }
+ }
+
+ resp.JVM.Memory.Used = resp.JVM.Memory.Total - resp.JVM.Memory.Free
+
+ return stm.ToMap(resp), nil
+}
+
+func cleanName(name string) string {
+ r := strings.NewReplacer(" ", "_", ".", "_", "\"", "", "'", "")
+ return strings.ToLower(r.Replace(name))
+}
+
+func (t *Tomcat) queryServerStatus() (*serverStatusResponse, error) {
+ req, err := web.NewHTTPRequestWithPath(t.Request, urlPathServerStatus)
+ if err != nil {
+ return nil, err
+ }
+
+ req.URL.RawQuery = urlQueryServerStatus
+
+ var status serverStatusResponse
+
+ if err := t.doOKDecode(req, &status); err != nil {
+ return nil, err
+ }
+
+ return &status, nil
+}
+
+func (t *Tomcat) doOKDecode(req *http.Request, in interface{}) error {
+ resp, err := t.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ if err := xml.NewDecoder(resp.Body).Decode(in); err != nil {
+ return fmt.Errorf("error decoding XML response from '%s': %v", req.URL, err)
+ }
+
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/tomcat/config_schema.json b/src/go/plugin/go.d/modules/tomcat/config_schema.json
new file mode 100644
index 000000000..91d7096ee
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Tomcat collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the Tomcat server.",
+ "type": "string",
+ "default": "http://127.0.0.1:8080",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/tomcat/init.go b/src/go/plugin/go.d/modules/tomcat/init.go
new file mode 100644
index 000000000..2c2ee29e4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/init.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tomcat
+
+import (
+ "fmt"
+ "net/http"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (t *Tomcat) validateConfig() error {
+ if t.URL == "" {
+ return fmt.Errorf("url not set")
+ }
+ return nil
+}
+
+func (t *Tomcat) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(t.Client)
+}
diff --git a/src/go/plugin/go.d/modules/tomcat/integrations/tomcat.md b/src/go/plugin/go.d/modules/tomcat/integrations/tomcat.md
new file mode 100644
index 000000000..b404e66e2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/integrations/tomcat.md
@@ -0,0 +1,275 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/tomcat/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/tomcat/metadata.yaml"
+sidebar_label: "Tomcat"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tomcat
+
+
+<img src="https://netdata.cloud/img/tomcat.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: tomcat
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Tomcat metrics about bandwidth, processing time, threads and more.
+
+
+It parses the information provided by the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) HTTP endpoint.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+By default, this Tomcat collector cannot access the server's status page. To enable data collection, you will need to configure access credentials with appropriate permissions.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+If the Netdata agent and Tomcat are on the same host, the collector will attempt to connect to the Tomcat server's status page at `http://localhost:8080/manager/status?XML=true`.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Tomcat instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| tomcat.jvm_memory_usage | free, used | bytes |
+
+### Per jvm memory pool
+
+These metrics refer to the JVM memory pool.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mempool_name | Memory Pool name. |
+| mempool_type | Memory Pool type. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| tomcat.jvm_mem_pool_memory_usage | commited, used, max | bytes |
+
+### Per connector
+
+These metrics refer to the connector.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| connector_name | Connector name. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| tomcat.connector_requests | requests | requests/s |
+| tomcat.connector_bandwidth | received, sent | bytes/s |
+| tomcat.connector_requests_processing_time | processing_time | milliseconds |
+| tomcat.connector_errors | errors | errors/s |
+| tomcat.connector_request_threads | idle, busy | threads |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Access to Tomcat Status Endpoint
+
+The Netdata agent needs read-only access to its status endpoint to collect data from the Tomcat server.
+
+You can achieve this by creating a dedicated user named `netdata` with read-only permissions specifically for accessing the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) endpoint.
+
+Once you've created the `netdata` user, you'll need to configure the username and password in the collector configuration file.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/tomcat.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/tomcat.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8080 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | POST | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ username: John
+ password: Doe
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ username: admin1
+ password: hackme1
+
+ - name: remote
+ url: http://192.0.2.1:8080
+ username: admin2
+ password: hackme2
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `tomcat` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m tomcat
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `tomcat` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep tomcat
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep tomcat /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep tomcat
+```
+
+
diff --git a/src/go/plugin/go.d/modules/tomcat/metadata.yaml b/src/go/plugin/go.d/modules/tomcat/metadata.yaml
new file mode 100644
index 000000000..d5815cf70
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/metadata.yaml
@@ -0,0 +1,241 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ plugin_name: go.d.plugin
+ module_name: tomcat
+ monitored_instance:
+ name: Tomcat
+ link: "https://tomcat.apache.org/"
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: "tomcat.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - apache
+ - tomcat
+ - webserver
+ - websocket
+ - jakarta
+ - javaEE
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Tomcat metrics about bandwidth, processing time, threads and more.
+ method_description: |
+ It parses the information provided by the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) HTTP endpoint.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: >
+ By default, this Tomcat collector cannot access the server's status page.
+ To enable data collection, you will need to configure access credentials with appropriate permissions.
+ default_behavior:
+ auto_detection:
+ description: >
+ If the Netdata agent and Tomcat are on the same host, the collector will attempt to connect to the Tomcat server's status page at `http://localhost:8080/manager/status?XML=true`.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Access to Tomcat Status Endpoint
+ description: |
+ The Netdata agent needs read-only access to its status endpoint to collect data from the Tomcat server.
+
+ You can achieve this by creating a dedicated user named `netdata` with read-only permissions specifically for accessing the [Server Status](https://tomcat.apache.org/tomcat-10.0-doc/manager-howto.html#Server_Status) endpoint.
+
+ Once you've created the `netdata` user, you'll need to configure the username and password in the collector configuration file.
+ configuration:
+ file:
+ name: "go.d/tomcat.conf"
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8080
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: POST
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: "Config"
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ username: John
+ password: Doe
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8080
+ username: admin1
+ password: hackme1
+
+ - name: remote
+ url: http://192.0.2.1:8080
+ username: admin2
+ password: hackme2
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: tomcat.jvm_memory_usage
+ description: Requests
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: jvm memory pool
+ description: "These metrics refer to the JVM memory pool."
+ labels:
+ - name: mempool_name
+ description: Memory Pool name.
+ - name: mempool_type
+ description: Memory Pool type.
+ metrics:
+ - name: tomcat.jvm_mem_pool_memory_usage
+ description: JVM Mem Pool Memory Usage
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: commited
+ - name: used
+ - name: max
+ - name: connector
+ description: "These metrics refer to the connector."
+ labels:
+ - name: connector_name
+ description: Connector name.
+ metrics:
+ - name: tomcat.connector_requests
+ description: Connector Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: tomcat.connector_bandwidth
+ description: Connector Bandwidth
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: tomcat.connector_requests_processing_time
+ description: Connector Requests Processing Time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: processing_time
+ - name: tomcat.connector_errors
+ description: Connector Errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: errors
+ - name: tomcat.connector_request_threads
+ description: Connector Request Threads
+ unit: threads
+ chart_type: stacked
+ dimensions:
+ - name: idle
+ - name: busy
diff --git a/src/go/plugin/go.d/modules/tomcat/status_response.go b/src/go/plugin/go.d/modules/tomcat/status_response.go
new file mode 100644
index 000000000..1459bd56d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/status_response.go
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tomcat
+
+import "encoding/xml"
+
+type serverStatusResponse struct {
+ XMLName xml.Name `xml:"status"`
+
+ JVM struct {
+ Memory struct {
+ Used int64 `stm:"used"` // calculated manually
+ Free int64 `xml:"free,attr" stm:"free"`
+ Total int64 `xml:"total,attr" stm:"total"`
+ Max int64 `xml:"max,attr"`
+ } `xml:"memory" stm:"memory"`
+
+ MemoryPools []struct {
+ STMKey string
+
+ Name string `xml:"name,attr"`
+ Type string `xml:"type,attr"`
+ UsageInit int64 `xml:"usageInit,attr"`
+ UsageCommitted int64 `xml:"usageCommitted,attr" stm:"commited"`
+ UsageMax int64 `xml:"usageMax,attr" stm:"max"`
+ UsageUsed int64 `xml:"usageUsed,attr" stm:"used"`
+ } `xml:"memorypool" stm:"memorypool"`
+ } `xml:"jvm" stm:"jvm"`
+
+ Connectors []struct {
+ STMKey string
+
+ Name string `xml:"name,attr"`
+
+ ThreadInfo struct {
+ MaxThreads int64 `xml:"maxThreads,attr"`
+ CurrentThreadCount int64 `xml:"currentThreadCount,attr" stm:"count"`
+ CurrentThreadsBusy int64 `xml:"currentThreadsBusy,attr" stm:"busy"`
+ CurrentThreadsIdle int64 `stm:"idle"` // calculated manually
+ } `xml:"threadInfo" stm:"thread_info"`
+
+ RequestInfo struct {
+ MaxTime int64 `xml:"maxTime,attr"`
+ ProcessingTime int64 `xml:"processingTime,attr" stm:"processing_time"`
+ RequestCount int64 `xml:"requestCount,attr" stm:"request_count"`
+ ErrorCount int64 `xml:"errorCount,attr" stm:"error_count"`
+ BytesReceived int64 `xml:"bytesReceived,attr" stm:"bytes_received"`
+ BytesSent int64 `xml:"bytesSent,attr" stm:"bytes_sent"`
+ } `xml:"requestInfo" stm:"request_info"`
+ } `xml:"connector" stm:"connector"`
+}
diff --git a/src/go/plugin/go.d/modules/tomcat/testdata/config.json b/src/go/plugin/go.d/modules/tomcat/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/tomcat/testdata/config.yaml b/src/go/plugin/go.d/modules/tomcat/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/tomcat/testdata/server_status.xml b/src/go/plugin/go.d/modules/tomcat/testdata/server_status.xml
new file mode 100644
index 000000000..e4d54f4e8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/testdata/server_status.xml
@@ -0,0 +1,54 @@
+<?xml version="1.0" encoding="utf-8"?><?xml-stylesheet type="text/xsl" href="/manager/xform.xsl" ?>
+<status>
+ <jvm>
+ <memory free='144529816' total='179306496' max='1914699776'/>
+ <memorypool name='G1 Eden Space' type='Heap memory' usageInit='24117248' usageCommitted='108003328'
+ usageMax='-1' usageUsed='23068672'/>
+ <memorypool name='G1 Old Gen' type='Heap memory' usageInit='97517568' usageCommitted='66060288'
+ usageMax='1914699776' usageUsed='6175120'/>
+ <memorypool name='G1 Survivor Space' type='Heap memory' usageInit='0' usageCommitted='5242880' usageMax='-1'
+ usageUsed='5040192'/>
+ <memorypool name='CodeHeap &apos;non-nmethods&apos;' type='Non-heap memory' usageInit='2555904'
+ usageCommitted='2555904' usageMax='5840896' usageUsed='1477888'/>
+ <memorypool name='CodeHeap &apos;non-profiled nmethods&apos;' type='Non-heap memory' usageInit='2555904'
+ usageCommitted='4587520' usageMax='122908672' usageUsed='4536704'/>
+ <memorypool name='CodeHeap &apos;profiled nmethods&apos;' type='Non-heap memory' usageInit='2555904'
+ usageCommitted='13172736' usageMax='122908672' usageUsed='13132032'/>
+ <memorypool name='Compressed Class Space' type='Non-heap memory' usageInit='0' usageCommitted='1900544'
+ usageMax='1073741824' usageUsed='1712872'/>
+ <memorypool name='Metaspace' type='Non-heap memory' usageInit='0' usageCommitted='18939904' usageMax='-1'
+ usageUsed='18537336'/>
+ </jvm>
+ <connector name='"http-nio-8080"'>
+ <threadInfo maxThreads="200" currentThreadCount="10" currentThreadsBusy="1"/>
+ <requestInfo maxTime="247" processingTime="28326" requestCount="4838" errorCount="24" bytesReceived="0"
+ bytesSent="12174519"/>
+ <workers>
+ <worker stage="R" requestProcessingTime="0" requestBytesSent="0" requestBytesReceived="0" remoteAddr="&#63;"
+ virtualHost="&#63;" method="&#63;" currentUri="&#63;" currentQueryString="&#63;" protocol="&#63;"/>
+ <worker stage="R" requestProcessingTime="0" requestBytesSent="0" requestBytesReceived="0" remoteAddr="&#63;"
+ virtualHost="&#63;" method="&#63;" currentUri="&#63;" currentQueryString="&#63;" protocol="&#63;"/>
+ <worker stage="R" requestProcessingTime="0" requestBytesSent="0" requestBytesReceived="0" remoteAddr="&#63;"
+ virtualHost="&#63;" method="&#63;" currentUri="&#63;" currentQueryString="&#63;" protocol="&#63;"/>
+ <worker stage="S" requestProcessingTime="30" requestBytesSent="0" requestBytesReceived="0"
+ remoteAddr="127.0.0.1" virtualHost="127.0.0.1" method="GET" currentUri="&#47;manager&#47;status"
+ currentQueryString="XML=true" protocol="HTTP&#47;1.1"/>
+ </workers>
+ </connector>
+ <connector name='"http-nio-8081"'>
+ <threadInfo maxThreads="200" currentThreadCount="10" currentThreadsBusy="1"/>
+ <requestInfo maxTime="247" processingTime="28326" requestCount="4838" errorCount="24" bytesReceived="0"
+ bytesSent="12174519"/>
+ <workers>
+ <worker stage="R" requestProcessingTime="0" requestBytesSent="0" requestBytesReceived="0" remoteAddr="&#63;"
+ virtualHost="&#63;" method="&#63;" currentUri="&#63;" currentQueryString="&#63;" protocol="&#63;"/>
+ <worker stage="R" requestProcessingTime="0" requestBytesSent="0" requestBytesReceived="0" remoteAddr="&#63;"
+ virtualHost="&#63;" method="&#63;" currentUri="&#63;" currentQueryString="&#63;" protocol="&#63;"/>
+ <worker stage="R" requestProcessingTime="0" requestBytesSent="0" requestBytesReceived="0" remoteAddr="&#63;"
+ virtualHost="&#63;" method="&#63;" currentUri="&#63;" currentQueryString="&#63;" protocol="&#63;"/>
+ <worker stage="S" requestProcessingTime="30" requestBytesSent="0" requestBytesReceived="0"
+ remoteAddr="127.0.0.1" virtualHost="127.0.0.1" method="GET" currentUri="&#47;manager&#47;status"
+ currentQueryString="XML=true" protocol="HTTP&#47;1.1"/>
+ </workers>
+ </connector>
+</status>
diff --git a/src/go/plugin/go.d/modules/tomcat/tomcat.go b/src/go/plugin/go.d/modules/tomcat/tomcat.go
new file mode 100644
index 000000000..540247063
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/tomcat.go
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tomcat
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("tomcat", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Tomcat {
+ return &Tomcat{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8080",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 1),
+ },
+ },
+ },
+ charts: defaultCharts.Copy(),
+ seenConnectors: make(map[string]bool),
+ seenMemPools: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type Tomcat struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ seenConnectors map[string]bool
+ seenMemPools map[string]bool
+}
+
+func (t *Tomcat) Configuration() any {
+ return t.Config
+}
+
+func (t *Tomcat) Init() error {
+ if err := t.validateConfig(); err != nil {
+ t.Errorf("config validation: %v", err)
+ return err
+ }
+
+ httpClient, err := t.initHTTPClient()
+ if err != nil {
+ t.Errorf("init HTTP client: %v", err)
+ return err
+ }
+
+ t.httpClient = httpClient
+
+ t.Debugf("using URL %s", t.URL)
+ t.Debugf("using timeout: %s", t.Timeout)
+
+ return nil
+}
+
+func (t *Tomcat) Check() error {
+ mx, err := t.collect()
+ if err != nil {
+ t.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (t *Tomcat) Charts() *module.Charts {
+ return t.charts
+}
+
+func (t *Tomcat) Collect() map[string]int64 {
+ mx, err := t.collect()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (t *Tomcat) Cleanup() {
+ if t.httpClient != nil {
+ t.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/tomcat/tomcat_test.go b/src/go/plugin/go.d/modules/tomcat/tomcat_test.go
new file mode 100644
index 000000000..7dfb6ff1a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tomcat/tomcat_test.go
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tomcat
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataServerStatus, _ = os.ReadFile("testdata/server_status.xml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataServerStatus": dataServerStatus,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestTomcat_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Tomcat{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestTomcat_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tomcat := New()
+ tomcat.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, tomcat.Init())
+ } else {
+ assert.NoError(t, tomcat.Init())
+ }
+ })
+ }
+}
+
+func TestTomcat_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestTomcat_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (*Tomcat, func())
+ }{
+ "success case": {
+ wantFail: false,
+ prepare: prepareCaseSuccess,
+ },
+ "fails on unexpected xml response": {
+ wantFail: true,
+ prepare: prepareCaseUnexpectedXMLResponse,
+ },
+ "fails on invalid format response": {
+ wantFail: true,
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tomcat, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, tomcat.Check())
+ } else {
+ assert.NoError(t, tomcat.Check())
+ }
+ })
+ }
+}
+
+func TestTomcat_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (tomcat *Tomcat, cleanup func())
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success case": {
+ prepare: prepareCaseSuccess,
+ wantCharts: len(defaultCharts) + len(jvmMemoryPoolChartsTmpl)*8 + len(connectorChartsTmpl)*2,
+ wantMetrics: map[string]int64{
+ "connector_http-nio-8080_request_info_bytes_received": 0,
+ "connector_http-nio-8080_request_info_bytes_sent": 12174519,
+ "connector_http-nio-8080_request_info_error_count": 24,
+ "connector_http-nio-8080_request_info_processing_time": 28326,
+ "connector_http-nio-8080_request_info_request_count": 4838,
+ "connector_http-nio-8080_thread_info_busy": 1,
+ "connector_http-nio-8080_thread_info_count": 10,
+ "connector_http-nio-8080_thread_info_idle": 9,
+ "connector_http-nio-8081_request_info_bytes_received": 0,
+ "connector_http-nio-8081_request_info_bytes_sent": 12174519,
+ "connector_http-nio-8081_request_info_error_count": 24,
+ "connector_http-nio-8081_request_info_processing_time": 28326,
+ "connector_http-nio-8081_request_info_request_count": 4838,
+ "connector_http-nio-8081_thread_info_busy": 1,
+ "connector_http-nio-8081_thread_info_count": 10,
+ "connector_http-nio-8081_thread_info_idle": 9,
+ "jvm_memory_free": 144529816,
+ "jvm_memory_total": 179306496,
+ "jvm_memory_used": 34776680,
+ "jvm_memorypool_codeheap_non-nmethods_commited": 2555904,
+ "jvm_memorypool_codeheap_non-nmethods_max": 5840896,
+ "jvm_memorypool_codeheap_non-nmethods_used": 1477888,
+ "jvm_memorypool_codeheap_non-profiled_nmethods_commited": 4587520,
+ "jvm_memorypool_codeheap_non-profiled_nmethods_max": 122908672,
+ "jvm_memorypool_codeheap_non-profiled_nmethods_used": 4536704,
+ "jvm_memorypool_codeheap_profiled_nmethods_commited": 13172736,
+ "jvm_memorypool_codeheap_profiled_nmethods_max": 122908672,
+ "jvm_memorypool_codeheap_profiled_nmethods_used": 13132032,
+ "jvm_memorypool_compressed_class_space_commited": 1900544,
+ "jvm_memorypool_compressed_class_space_max": 1073741824,
+ "jvm_memorypool_compressed_class_space_used": 1712872,
+ "jvm_memorypool_g1_eden_space_commited": 108003328,
+ "jvm_memorypool_g1_eden_space_max": -1,
+ "jvm_memorypool_g1_eden_space_used": 23068672,
+ "jvm_memorypool_g1_old_gen_commited": 66060288,
+ "jvm_memorypool_g1_old_gen_max": 1914699776,
+ "jvm_memorypool_g1_old_gen_used": 6175120,
+ "jvm_memorypool_g1_survivor_space_commited": 5242880,
+ "jvm_memorypool_g1_survivor_space_max": -1,
+ "jvm_memorypool_g1_survivor_space_used": 5040192,
+ "jvm_memorypool_metaspace_commited": 18939904,
+ "jvm_memorypool_metaspace_max": -1,
+ "jvm_memorypool_metaspace_used": 18537336,
+ },
+ },
+ "fails on unexpected xml response": {
+ prepare: prepareCaseUnexpectedXMLResponse,
+ },
+ "fails on invalid format response": {
+ prepare: prepareCaseInvalidFormatResponse,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tomcat, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := tomcat.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ assert.Equal(t, test.wantCharts, len(*tomcat.Charts()))
+ module.TestMetricsHasAllChartsDims(t, tomcat.Charts(), mx)
+ }
+ })
+ }
+}
+
+func prepareCaseSuccess(t *testing.T) (*Tomcat, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ case urlPathServerStatus:
+ if r.URL.RawQuery != urlQueryServerStatus {
+ w.WriteHeader(http.StatusNotFound)
+ } else {
+ _, _ = w.Write(dataServerStatus)
+ }
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+ tomcat := New()
+ tomcat.URL = srv.URL
+ require.NoError(t, tomcat.Init())
+
+ return tomcat, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*Tomcat, func()) {
+ t.Helper()
+ tomcat := New()
+ tomcat.URL = "http://127.0.0.1:65001"
+ require.NoError(t, tomcat.Init())
+
+ return tomcat, func() {}
+}
+
+func prepareCaseUnexpectedXMLResponse(t *testing.T) (*Tomcat, func()) {
+ t.Helper()
+ resp := `
+<?xml version="1.0" encoding="UTF-8" ?>
+ <root>
+ <elephant>
+ <burn>false</burn>
+ <mountain>true</mountain>
+ <fog>false</fog>
+ <skin>-1561907625</skin>
+ <burst>anyway</burst>
+ <shadow>1558616893</shadow>
+ </elephant>
+ <start>ever</start>
+ <base>2093056027</base>
+ <mission>-2007590351</mission>
+ <victory>999053756</victory>
+ <die>false</die>
+ </root>
+
+`
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(resp))
+ }))
+
+ tomcat := New()
+ tomcat.URL = srv.URL
+ require.NoError(t, tomcat.Init())
+
+ return tomcat, srv.Close
+}
+
+func prepareCaseInvalidFormatResponse(t *testing.T) (*Tomcat, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ tomcat := New()
+ tomcat.URL = srv.URL
+ require.NoError(t, tomcat.Init())
+
+ return tomcat, srv.Close
+}
diff --git a/src/go/plugin/go.d/modules/tor/README.md b/src/go/plugin/go.d/modules/tor/README.md
new file mode 120000
index 000000000..7c20cd40a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/README.md
@@ -0,0 +1 @@
+integrations/tor.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/tor/charts.go b/src/go/plugin/go.d/modules/tor/charts.go
new file mode 100644
index 000000000..1e2a1ef97
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/charts.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tor
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioTraffic = module.Priority + iota
+ prioUptime
+)
+
+var charts = module.Charts{
+ trafficChart.Copy(),
+ uptimeChart.Copy(),
+}
+
+var trafficChart = module.Chart{
+ ID: "traffic",
+ Title: "Tor Traffic",
+ Units: "KiB/s",
+ Fam: "traffic",
+ Ctx: "tor.traffic",
+ Type: module.Area,
+ Priority: prioTraffic,
+ Dims: module.Dims{
+ {ID: "traffic/read", Name: "read", Algo: module.Incremental, Div: 1024},
+ {ID: "traffic/written", Name: "write", Algo: module.Incremental, Mul: -1, Div: 1024},
+ },
+}
+
+var uptimeChart = module.Chart{
+ ID: "uptime",
+ Title: "Tor Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "tor.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "uptime"},
+ },
+}
diff --git a/src/go/plugin/go.d/modules/tor/client.go b/src/go/plugin/go.d/modules/tor/client.go
new file mode 100644
index 000000000..e4a8045a9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/client.go
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tor
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+// https://spec.torproject.org/control-spec/index.html
+// https://github.com/torproject/stem/blob/master/stem/control.py
+
+const (
+ cmdAuthenticate = "AUTHENTICATE"
+ cmdQuit = "QUIT"
+ cmdGetInfo = "GETINFO"
+)
+
+type controlConn interface {
+ connect() error
+ disconnect()
+
+ getInfo(...string) ([]byte, error)
+}
+
+func newControlConn(conf Config) controlConn {
+ return &torControlClient{
+ password: conf.Password,
+ conn: socket.New(socket.Config{
+ Address: conf.Address,
+ ConnectTimeout: conf.Timeout.Duration(),
+ ReadTimeout: conf.Timeout.Duration(),
+ WriteTimeout: conf.Timeout.Duration(),
+ })}
+}
+
+type torControlClient struct {
+ password string
+ conn socket.Client
+}
+
+func (c *torControlClient) connect() error {
+ if err := c.conn.Connect(); err != nil {
+ return err
+ }
+
+ return c.authenticate()
+}
+
+func (c *torControlClient) authenticate() error {
+ // https://spec.torproject.org/control-spec/commands.html#authenticate
+
+ cmd := cmdAuthenticate
+ if c.password != "" {
+ cmd = fmt.Sprintf("%s \"%s\"", cmdAuthenticate, c.password)
+ }
+
+ var s string
+ err := c.conn.Command(cmd+"\n", func(bs []byte) bool {
+ s = string(bs)
+ return false
+ })
+ if err != nil {
+ return fmt.Errorf("authentication failed: %v", err)
+ }
+ if !strings.HasPrefix(s, "250") {
+ return fmt.Errorf("authentication failed: %s", s)
+ }
+ return nil
+}
+
+func (c *torControlClient) disconnect() {
+ // https://spec.torproject.org/control-spec/commands.html#quit
+
+ _ = c.conn.Command(cmdQuit+"\n", func(bs []byte) bool { return false })
+ _ = c.conn.Disconnect()
+}
+
+func (c *torControlClient) getInfo(keywords ...string) ([]byte, error) {
+ // https://spec.torproject.org/control-spec/commands.html#getinfo
+
+ if len(keywords) == 0 {
+ return nil, errors.New("no keywords specified")
+ }
+ cmd := fmt.Sprintf("%s %s", cmdGetInfo, strings.Join(keywords, " "))
+
+ var buf bytes.Buffer
+ var err error
+
+ clientErr := c.conn.Command(cmd+"\n", func(bs []byte) bool {
+ s := string(bs)
+
+ switch {
+ case strings.HasPrefix(s, "250-"):
+ buf.WriteString(strings.TrimPrefix(s, "250-"))
+ buf.WriteByte('\n')
+ return true
+ case strings.HasPrefix(s, "250 "):
+ return false
+ default:
+ err = errors.New(s)
+ return false
+ }
+ })
+ if clientErr != nil {
+ return nil, fmt.Errorf("command '%s' failed: %v", cmd, clientErr)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("command '%s' failed: %v", cmd, err)
+ }
+
+ return buf.Bytes(), nil
+}
diff --git a/src/go/plugin/go.d/modules/tor/collect.go b/src/go/plugin/go.d/modules/tor/collect.go
new file mode 100644
index 000000000..6e6078df3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/collect.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tor
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func (t *Tor) collect() (map[string]int64, error) {
+ if t.conn == nil {
+ conn, err := t.establishConnection()
+ if err != nil {
+ return nil, err
+ }
+ t.conn = conn
+ }
+
+ mx := make(map[string]int64)
+ if err := t.collectServerInfo(mx); err != nil {
+ t.Cleanup()
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (t *Tor) collectServerInfo(mx map[string]int64) error {
+ resp, err := t.conn.getInfo("traffic/read", "traffic/written", "uptime")
+ if err != nil {
+ return err
+ }
+
+ sc := bufio.NewScanner(bytes.NewReader(resp))
+
+ for sc.Scan() {
+ line := sc.Text()
+
+ key, value, ok := strings.Cut(line, "=")
+ if !ok {
+ return fmt.Errorf("failed to parse metric: %s", line)
+ }
+
+ v, err := strconv.ParseInt(value, 10, 64)
+ if err != nil {
+ return fmt.Errorf("failed to parse metric %s value: %v", line, err)
+ }
+ mx[key] = v
+ }
+
+ return nil
+}
+
+func (t *Tor) establishConnection() (controlConn, error) {
+ conn := t.newConn(t.Config)
+
+ if err := conn.connect(); err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
diff --git a/src/go/plugin/go.d/modules/tor/config_schema.json b/src/go/plugin/go.d/modules/tor/config_schema.json
new file mode 100644
index 000000000..abfc40d95
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/config_schema.json
@@ -0,0 +1,53 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Tor collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the Tor's Control Port listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:9051"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for authentication.",
+ "type": "string",
+ "sensitive": true
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/tor/integrations/tor.md b/src/go/plugin/go.d/modules/tor/integrations/tor.md
new file mode 100644
index 000000000..54b5a428c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/integrations/tor.md
@@ -0,0 +1,225 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/tor/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/tor/metadata.yaml"
+sidebar_label: "Tor"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/VPNs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Tor
+
+
+<img src="https://netdata.cloud/img/tor.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: tor
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Tracks Tor's download and upload traffic, as well as its uptime.
+
+
+It reads the server's response to the [GETINFO](https://spec.torproject.org/control-spec/commands.html#getinfo) command.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects Tor instances running on localhost that are listening on port 9051.
+On startup, it tries to collect metrics from:
+
+- 127.0.0.1:9051
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Tor instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| tor.traffic | read, write | KiB/s |
+| tor.uptime | uptime | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable Control Port
+
+Enable `ControlPort` in `/etc/tor/torrc`.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/tor.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/tor.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | The IP address and port where the Tor's Control Port listens for connections. | 127.0.0.1:9051 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
+| password | Password for authentication. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:9051
+ password: somePassword
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:9051
+ password: somePassword
+
+ - name: remote
+ address: 203.0.113.0:9051
+ password: somePassword
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `tor` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m tor
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `tor` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep tor
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep tor /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep tor
+```
+
+
diff --git a/src/go/plugin/go.d/modules/tor/metadata.yaml b/src/go/plugin/go.d/modules/tor/metadata.yaml
new file mode 100644
index 000000000..7df589346
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/metadata.yaml
@@ -0,0 +1,135 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-tor
+ plugin_name: go.d.plugin
+ module_name: tor
+ monitored_instance:
+ name: Tor
+ link: https://www.torproject.org/
+ categories:
+ - data-collection.vpns
+ icon_filename: "tor.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - tor
+ - traffic
+ - vpn
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Tracks Tor's download and upload traffic, as well as its uptime.
+ method_description: |
+ It reads the server's response to the [GETINFO](https://spec.torproject.org/control-spec/commands.html#getinfo) command.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects Tor instances running on localhost that are listening on port 9051.
+ On startup, it tries to collect metrics from:
+
+ - 127.0.0.1:9051
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable Control Port
+ description: |
+ Enable `ControlPort` in `/etc/tor/torrc`.
+ configuration:
+ file:
+ name: go.d/tor.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: The IP address and port where the Tor's Control Port listens for connections.
+ default_value: 127.0.0.1:9051
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
+ - name: password
+ description: Password for authentication.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:9051
+ password: somePassword
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:9051
+ password: somePassword
+
+ - name: remote
+ address: 203.0.113.0:9051
+ password: somePassword
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: tor.traffic
+ description: Tor Traffic
+ unit: "KiB/s"
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: tor.uptime
+ description: Tor Uptime
+ unit: "seconds"
+ chart_type: line
+ dimensions:
+ - name: uptime
diff --git a/src/go/plugin/go.d/modules/tor/testdata/config.json b/src/go/plugin/go.d/modules/tor/testdata/config.json
new file mode 100644
index 000000000..76769305c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "password": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/tor/testdata/config.yaml b/src/go/plugin/go.d/modules/tor/testdata/config.yaml
new file mode 100644
index 000000000..95ba970ba
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+password: "ok"
diff --git a/src/go/plugin/go.d/modules/tor/tor.go b/src/go/plugin/go.d/modules/tor/tor.go
new file mode 100644
index 000000000..bb6cacab1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/tor.go
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tor
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("tor", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Tor {
+ return &Tor{
+ Config: Config{
+ Address: "127.0.0.1:9051",
+ Timeout: web.Duration(time.Second * 1),
+ },
+ newConn: newControlConn,
+ charts: charts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ Password string `yaml:"password" json:"password"`
+}
+
+type Tor struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ newConn func(Config) controlConn
+ conn controlConn
+}
+
+func (t *Tor) Configuration() any {
+ return t.Config
+}
+
+func (t *Tor) Init() error {
+ if t.Address == "" {
+ t.Error("config: 'address' not set")
+ return errors.New("address not set")
+ }
+
+ return nil
+}
+
+func (t *Tor) Check() error {
+ mx, err := t.collect()
+ if err != nil {
+ t.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (t *Tor) Charts() *module.Charts {
+ return t.charts
+}
+
+func (t *Tor) Collect() map[string]int64 {
+ mx, err := t.collect()
+ if err != nil {
+ t.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (t *Tor) Cleanup() {
+ if t.conn != nil {
+ t.conn.disconnect()
+ t.conn = nil
+ }
+}
diff --git a/src/go/plugin/go.d/modules/tor/tor_test.go b/src/go/plugin/go.d/modules/tor/tor_test.go
new file mode 100644
index 000000000..35001c39a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/tor/tor_test.go
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tor
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestTor_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Tor{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestTor_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success with default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if address not set": {
+ wantFail: true,
+ config: func() Config {
+ conf := New().Config
+ conf.Address = ""
+ return conf
+ }(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tor := New()
+ tor.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, tor.Init())
+ } else {
+ assert.NoError(t, tor.Init())
+ }
+ })
+ }
+}
+
+func TestTor_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestTor_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (*Tor, *mockTorDaemon)
+ wantFail bool
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: prepareCaseOk,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tor, daemon := test.prepare()
+
+ defer func() {
+ assert.NoError(t, daemon.Close(), "daemon.Close()")
+ }()
+ go func() {
+ assert.NoError(t, daemon.Run(), "daemon.Run()")
+ }()
+
+ select {
+ case <-daemon.started:
+ case <-time.After(time.Second * 3):
+ t.Errorf("mock tor daemon start timed out")
+ }
+
+ require.NoError(t, tor.Init())
+
+ if test.wantFail {
+ assert.Error(t, tor.Check())
+ } else {
+ assert.NoError(t, tor.Check())
+ }
+
+ tor.Cleanup()
+
+ select {
+ case <-daemon.stopped:
+ case <-time.After(time.Second * 3):
+ t.Errorf("mock tor daemon stop timed out")
+ }
+ })
+ }
+}
+
+func TestTor_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (*Tor, *mockTorDaemon)
+ wantMetrics map[string]int64
+ wantCharts int
+ }{
+ "success on valid response": {
+ prepare: prepareCaseOk,
+ wantCharts: len(charts),
+ wantMetrics: map[string]int64{
+ "traffic/read": 100,
+ "traffic/written": 100,
+ "uptime": 100,
+ },
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ wantCharts: len(charts),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tor, daemon := test.prepare()
+
+ defer func() {
+ assert.NoError(t, daemon.Close(), "daemon.Close()")
+ }()
+ go func() {
+ assert.NoError(t, daemon.Run(), "daemon.Run()")
+ }()
+
+ select {
+ case <-daemon.started:
+ case <-time.After(time.Second * 3):
+ t.Errorf("mock tor daemon start timed out")
+ }
+
+ require.NoError(t, tor.Init())
+
+ mx := tor.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ assert.Equal(t, test.wantCharts, len(*tor.Charts()), "want charts")
+
+ if len(test.wantMetrics) > 0 {
+ module.TestMetricsHasAllChartsDims(t, tor.Charts(), mx)
+ }
+
+ tor.Cleanup()
+
+ select {
+ case <-daemon.stopped:
+ case <-time.After(time.Second * 3):
+ t.Errorf("mock tordaemon stop timed out")
+ }
+ })
+ }
+}
+
+func prepareCaseOk() (*Tor, *mockTorDaemon) {
+ daemon := &mockTorDaemon{
+ addr: "127.0.0.1:65001",
+ started: make(chan struct{}),
+ stopped: make(chan struct{}),
+ }
+
+ tor := New()
+ tor.Address = daemon.addr
+
+ return tor, daemon
+}
+
+func prepareCaseConnectionRefused() (*Tor, *mockTorDaemon) {
+ ch := make(chan struct{})
+ close(ch)
+
+ daemon := &mockTorDaemon{
+ addr: "127.0.0.1:65001",
+ dontStart: true,
+ started: ch,
+ stopped: ch,
+ }
+
+ tor := New()
+ tor.Address = daemon.addr
+
+ return tor, daemon
+}
+
+type mockTorDaemon struct {
+ addr string
+ srv net.Listener
+ started chan struct{}
+ stopped chan struct{}
+ dontStart bool
+ authenticated bool
+}
+
+func (m *mockTorDaemon) Run() error {
+ if m.dontStart {
+ return nil
+ }
+
+ srv, err := net.Listen("tcp", m.addr)
+ if err != nil {
+ return err
+ }
+
+ m.srv = srv
+
+ close(m.started)
+ defer close(m.stopped)
+
+ return m.handleConnections()
+}
+
+func (m *mockTorDaemon) Close() error {
+ if m.srv != nil {
+ err := m.srv.Close()
+ m.srv = nil
+ return err
+ }
+ return nil
+}
+
+func (m *mockTorDaemon) handleConnections() error {
+ conn, err := m.srv.Accept()
+ if err != nil || conn == nil {
+ return errors.New("could not accept connection")
+ }
+ return m.handleConnection(conn)
+}
+
+func (m *mockTorDaemon) handleConnection(conn net.Conn) error {
+ defer func() { _ = conn.Close() }()
+
+ rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
+ var line string
+ var err error
+
+ for {
+ if line, err = rw.ReadString('\n'); err != nil {
+ return fmt.Errorf("error reading from connection: %v", err)
+ }
+
+ line = strings.TrimSpace(line)
+
+ cmd, param, _ := strings.Cut(line, " ")
+
+ switch cmd {
+ case cmdQuit:
+ return m.handleQuit(conn)
+ case cmdAuthenticate:
+ err = m.handleAuthenticate(conn)
+ case cmdGetInfo:
+ err = m.handleGetInfo(conn, param)
+ default:
+ s := fmt.Sprintf("510 Unrecognized command \"%s\"\n", cmd)
+ _, _ = rw.WriteString(s)
+ return fmt.Errorf("unexpected command: %s", line)
+ }
+
+ _ = rw.Flush()
+
+ if err != nil {
+ return err
+ }
+ }
+}
+
+func (m *mockTorDaemon) handleQuit(conn io.Writer) error {
+ _, err := conn.Write([]byte("250 closing connection\n"))
+ return err
+}
+
+func (m *mockTorDaemon) handleAuthenticate(conn io.Writer) error {
+ m.authenticated = true
+ _, err := conn.Write([]byte("250 OK\n"))
+ return err
+}
+
+func (m *mockTorDaemon) handleGetInfo(conn io.Writer, keywords string) error {
+ if !m.authenticated {
+ _, _ = conn.Write([]byte("514 Authentication required\n"))
+ return errors.New("authentication required")
+ }
+
+ keywords = strings.Trim(keywords, "\"")
+
+ for _, k := range strings.Fields(keywords) {
+ s := fmt.Sprintf("250-%s=%d\n", k, 100)
+
+ if _, err := conn.Write([]byte(s)); err != nil {
+ return err
+ }
+ }
+
+ _, err := conn.Write([]byte("250 OK\n"))
+ return err
+}
diff --git a/src/go/plugin/go.d/modules/traefik/README.md b/src/go/plugin/go.d/modules/traefik/README.md
new file mode 120000
index 000000000..da5abad23
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/README.md
@@ -0,0 +1 @@
+integrations/traefik.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/traefik/charts.go b/src/go/plugin/go.d/modules/traefik/charts.go
new file mode 100644
index 000000000..7d67ef684
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/charts.go
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package traefik
+
+import (
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+var chartTmplEntrypointRequests = module.Chart{
+ ID: "entrypoint_requests_%s_%s",
+ Title: "Processed HTTP requests on <code>%s</code> entrypoint (protocol <code>%s</code>)",
+ Units: "requests/s",
+ Fam: "entrypoint %s %s",
+ Ctx: "traefik.entrypoint_requests",
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: prefixEntrypointRequests + "%s_%s_1xx", Name: "1xx", Algo: module.Incremental},
+ {ID: prefixEntrypointRequests + "%s_%s_2xx", Name: "2xx", Algo: module.Incremental},
+ {ID: prefixEntrypointRequests + "%s_%s_3xx", Name: "3xx", Algo: module.Incremental},
+ {ID: prefixEntrypointRequests + "%s_%s_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: prefixEntrypointRequests + "%s_%s_5xx", Name: "5xx", Algo: module.Incremental},
+ },
+}
+
+var chartTmplEntrypointRequestDuration = module.Chart{
+ ID: "entrypoint_request_duration_%s_%s",
+ Title: "Average HTTP request processing time on <code>%s</code> entrypoint (protocol <code>%s</code>)",
+ Units: "milliseconds",
+ Fam: "entrypoint %s %s",
+ Ctx: "traefik.entrypoint_request_duration_average",
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: prefixEntrypointReqDurAvg + "%s_%s_1xx", Name: "1xx"},
+ {ID: prefixEntrypointReqDurAvg + "%s_%s_2xx", Name: "2xx"},
+ {ID: prefixEntrypointReqDurAvg + "%s_%s_3xx", Name: "3xx"},
+ {ID: prefixEntrypointReqDurAvg + "%s_%s_4xx", Name: "4xx"},
+ {ID: prefixEntrypointReqDurAvg + "%s_%s_5xx", Name: "5xx"},
+ },
+}
+
+var chartTmplEntrypointOpenConnections = module.Chart{
+ ID: "entrypoint_open_connections_%s_%s",
+ Title: "Open connections on <code>%s</code> entrypoint (protocol <code>%s</code>)",
+ Units: "connections",
+ Fam: "entrypoint %s %s",
+ Ctx: "traefik.entrypoint_open_connections",
+ Type: module.Stacked,
+}
+
+func newChartEntrypointRequests(entrypoint, proto string) *module.Chart {
+ return newEntrypointChart(chartTmplEntrypointRequests, entrypoint, proto)
+}
+
+func newChartEntrypointRequestDuration(entrypoint, proto string) *module.Chart {
+ return newEntrypointChart(chartTmplEntrypointRequestDuration, entrypoint, proto)
+}
+
+func newChartEntrypointOpenConnections(entrypoint, proto string) *module.Chart {
+ return newEntrypointChart(chartTmplEntrypointOpenConnections, entrypoint, proto)
+}
+
+func newEntrypointChart(tmpl module.Chart, entrypoint, proto string) *module.Chart {
+ chart := tmpl.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, entrypoint, proto)
+ chart.Title = fmt.Sprintf(chart.Title, entrypoint, proto)
+ chart.Fam = fmt.Sprintf(chart.Fam, entrypoint, proto)
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, entrypoint, proto)
+ }
+ return chart
+}
diff --git a/src/go/plugin/go.d/modules/traefik/collect.go b/src/go/plugin/go.d/modules/traefik/collect.go
new file mode 100644
index 000000000..3f2556060
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/collect.go
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package traefik
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricEntrypointRequestsTotal = "traefik_entrypoint_requests_total"
+ metricEntrypointRequestDurationSecondsSum = "traefik_entrypoint_request_duration_seconds_sum"
+ metricEntrypointRequestDurationSecondsCount = "traefik_entrypoint_request_duration_seconds_count"
+ metricEntrypointOpenConnections = "traefik_entrypoint_open_connections"
+)
+
+const (
+ prefixEntrypointRequests = "entrypoint_requests_"
+ prefixEntrypointReqDurAvg = "entrypoint_request_duration_average_"
+ prefixEntrypointOpenConn = "entrypoint_open_connections_"
+)
+
+func isTraefikMetrics(pms prometheus.Series) bool {
+ for _, pm := range pms {
+ if strings.HasPrefix(pm.Name(), "traefik_") {
+ return true
+ }
+ }
+ return false
+}
+
+func (t *Traefik) collect() (map[string]int64, error) {
+ pms, err := t.prom.ScrapeSeries()
+ if err != nil {
+ return nil, err
+ }
+
+ if t.checkMetrics && !isTraefikMetrics(pms) {
+ return nil, errors.New("unexpected metrics (not Traefik)")
+ }
+ t.checkMetrics = false
+
+ mx := make(map[string]int64)
+
+ t.collectEntrypointRequestsTotal(mx, pms)
+ t.collectEntrypointRequestDuration(mx, pms)
+ t.collectEntrypointOpenConnections(mx, pms)
+ t.updateCodeClassMetrics(mx)
+
+ return mx, nil
+}
+
+func (t *Traefik) collectEntrypointRequestsTotal(mx map[string]int64, pms prometheus.Series) {
+ if pms = pms.FindByName(metricEntrypointRequestsTotal); pms.Len() == 0 {
+ return
+ }
+
+ for _, pm := range pms {
+ code := pm.Labels.Get("code")
+ ep := pm.Labels.Get("entrypoint")
+ proto := pm.Labels.Get("protocol")
+ codeClass := getCodeClass(code)
+ if code == "" || ep == "" || proto == "" || codeClass == "" {
+ continue
+ }
+
+ key := prefixEntrypointRequests + ep + "_" + proto + "_" + codeClass
+ mx[key] += int64(pm.Value)
+
+ id := ep + "_" + proto
+ ce := t.cacheGetOrPutEntrypoint(id)
+ if ce.requests == nil {
+ chart := newChartEntrypointRequests(ep, proto)
+ ce.requests = chart
+ if err := t.Charts().Add(chart); err != nil {
+ t.Warning(err)
+ }
+ }
+ }
+}
+
+func (t *Traefik) collectEntrypointRequestDuration(mx map[string]int64, pms prometheus.Series) {
+ if pms = pms.FindByNames(
+ metricEntrypointRequestDurationSecondsCount,
+ metricEntrypointRequestDurationSecondsSum,
+ ); pms.Len() == 0 {
+ return
+ }
+
+ for _, pm := range pms {
+ code := pm.Labels.Get("code")
+ ep := pm.Labels.Get("entrypoint")
+ proto := pm.Labels.Get("protocol")
+ codeClass := getCodeClass(code)
+ if code == "" || ep == "" || proto == "" || codeClass == "" {
+ continue
+ }
+
+ id := ep + "_" + proto
+ ce := t.cacheGetOrPutEntrypoint(id)
+ v := ce.reqDurData[codeClass]
+ if pm.Name() == metricEntrypointRequestDurationSecondsSum {
+ v.cur.secs += pm.Value
+ } else {
+ v.cur.reqs += pm.Value
+ }
+ ce.reqDurData[codeClass] = v
+ }
+
+ for id, ce := range t.cache.entrypoints {
+ if ce.reqDur == nil {
+ chart := newChartEntrypointRequestDuration(ce.name, ce.proto)
+ ce.reqDur = chart
+ if err := t.Charts().Add(chart); err != nil {
+ t.Warning(err)
+ }
+ }
+ for codeClass, v := range ce.reqDurData {
+ secs, reqs, seen := v.cur.secs-v.prev.secs, v.cur.reqs-v.prev.reqs, v.seen
+ v.prev.secs, v.prev.reqs, v.seen = v.cur.secs, v.cur.reqs, true
+ v.cur.secs, v.cur.reqs = 0, 0
+ ce.reqDurData[codeClass] = v
+
+ key := prefixEntrypointReqDurAvg + id + "_" + codeClass
+ if secs <= 0 || reqs <= 0 || !seen {
+ mx[key] = 0
+ } else {
+ mx[key] = int64(secs * 1000 / reqs)
+ }
+ }
+ }
+}
+
+func (t *Traefik) collectEntrypointOpenConnections(mx map[string]int64, pms prometheus.Series) {
+ if pms = pms.FindByName(metricEntrypointOpenConnections); pms.Len() == 0 {
+ return
+ }
+
+ for _, pm := range pms {
+ method := pm.Labels.Get("method")
+ ep := pm.Labels.Get("entrypoint")
+ proto := pm.Labels.Get("protocol")
+ if method == "" || ep == "" || proto == "" {
+ continue
+ }
+
+ key := prefixEntrypointOpenConn + ep + "_" + proto + "_" + method
+ mx[key] += int64(pm.Value)
+
+ id := ep + "_" + proto
+ ce := t.cacheGetOrPutEntrypoint(id)
+ if ce.openConn == nil {
+ chart := newChartEntrypointOpenConnections(ep, proto)
+ ce.openConn = chart
+ if err := t.Charts().Add(chart); err != nil {
+ t.Warning(err)
+ }
+ }
+
+ if !ce.openConnMethods[method] {
+ ce.openConnMethods[method] = true
+ dim := &module.Dim{ID: key, Name: method}
+ if err := ce.openConn.AddDim(dim); err != nil {
+ t.Warning(err)
+ }
+ ce.openConn.MarkNotCreated()
+ }
+ }
+}
+
+var httpRespCodeClasses = []string{"1xx", "2xx", "3xx", "4xx", "5xx"}
+
+func (t *Traefik) updateCodeClassMetrics(mx map[string]int64) {
+ for id, ce := range t.cache.entrypoints {
+ if ce.requests != nil {
+ for _, c := range httpRespCodeClasses {
+ key := prefixEntrypointRequests + id + "_" + c
+ mx[key] += 0
+ }
+ }
+ if ce.reqDur != nil {
+ for _, c := range httpRespCodeClasses {
+ key := prefixEntrypointReqDurAvg + id + "_" + c
+ mx[key] += 0
+ }
+ }
+ }
+}
+
+func getCodeClass(code string) string {
+ if len(code) != 3 {
+ return ""
+ }
+ return string(code[0]) + "xx"
+}
+
+func (t *Traefik) cacheGetOrPutEntrypoint(id string) *cacheEntrypoint {
+ if _, ok := t.cache.entrypoints[id]; !ok {
+ name, proto := id, id
+ if idx := strings.LastIndexByte(id, '_'); idx != -1 {
+ name, proto = id[:idx], id[idx+1:]
+ }
+ t.cache.entrypoints[id] = &cacheEntrypoint{
+ name: name,
+ proto: proto,
+ reqDurData: make(map[string]cacheEntrypointReqDur),
+ openConnMethods: make(map[string]bool),
+ }
+ }
+ return t.cache.entrypoints[id]
+}
diff --git a/src/go/plugin/go.d/modules/traefik/config_schema.json b/src/go/plugin/go.d/modules/traefik/config_schema.json
new file mode 100644
index 000000000..f027f20a0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Traefik collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Traefik metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:8082/metrics",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/traefik/init.go b/src/go/plugin/go.d/modules/traefik/init.go
new file mode 100644
index 000000000..02c1dde0d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/init.go
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package traefik
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus/selector"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (t *Traefik) validateConfig() error {
+ if t.URL == "" {
+ return errors.New("'url' is not set")
+ }
+ return nil
+}
+
+func (t *Traefik) initPrometheusClient() (prometheus.Prometheus, error) {
+ httpClient, err := web.NewHTTPClient(t.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ prom := prometheus.NewWithSelector(httpClient, t.Request, sr)
+ return prom, nil
+}
+
+var sr, _ = selector.Expr{
+ Allow: []string{
+ metricEntrypointRequestDurationSecondsSum,
+ metricEntrypointRequestDurationSecondsCount,
+ metricEntrypointRequestsTotal,
+ metricEntrypointOpenConnections,
+ },
+}.Parse()
diff --git a/src/go/plugin/go.d/modules/traefik/integrations/traefik.md b/src/go/plugin/go.d/modules/traefik/integrations/traefik.md
new file mode 100644
index 000000000..f5dc10eb9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/integrations/traefik.md
@@ -0,0 +1,246 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/traefik/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/traefik/metadata.yaml"
+sidebar_label: "Traefik"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Traefik
+
+
+<img src="https://netdata.cloud/img/traefik.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: traefik
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Traefik servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per entrypoint, protocol
+
+These metrics refer to the endpoint.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| traefik.entrypoint_requests | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |
+| traefik.entrypoint_request_duration_average | 1xx, 2xx, 3xx, 4xx, 5xx | milliseconds |
+| traefik.entrypoint_open_connections | a dimension per HTTP method | connections |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable built-in Prometheus exporter
+
+To enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/traefik.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/traefik.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>All options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8082/metrics | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8082/metrics
+
+```
+</details>
+
+##### Basic HTTP auth
+
+Local server with basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8082/metrics
+ username: foo
+ password: bar
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ http://127.0.0.1:8082/metrics
+
+ - name: remote
+ http://192.0.2.0:8082/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `traefik` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m traefik
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `traefik` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep traefik
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep traefik /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep traefik
+```
+
+
diff --git a/src/go/plugin/go.d/modules/traefik/metadata.yaml b/src/go/plugin/go.d/modules/traefik/metadata.yaml
new file mode 100644
index 000000000..7fe182ea3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/metadata.yaml
@@ -0,0 +1,196 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-traefik
+ plugin_name: go.d.plugin
+ module_name: traefik
+ monitored_instance:
+ name: Traefik
+ link: Traefik
+ icon_filename: traefik.svg
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ keywords:
+ - traefik
+ - proxy
+ - webproxy
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Traefik servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable built-in Prometheus exporter
+ description: |
+ To enable see [Prometheus exporter](https://doc.traefik.io/traefik/observability/metrics/prometheus/) documentation.
+ configuration:
+ file:
+ name: go.d/traefik.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: All options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8082/metrics
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8082/metrics
+ - name: Basic HTTP auth
+ description: Local server with basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8082/metrics
+ username: foo
+ password: bar
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ http://127.0.0.1:8082/metrics
+
+ - name: remote
+ http://192.0.2.0:8082/metrics
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: entrypoint, protocol
+ description: These metrics refer to the endpoint.
+ labels: []
+ metrics:
+ - name: traefik.entrypoint_requests
+ description: Processed HTTP requests
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: 1xx
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: traefik.entrypoint_request_duration_average
+ description: Average HTTP request processing time
+ unit: milliseconds
+ chart_type: stacked
+ dimensions:
+ - name: 1xx
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: traefik.entrypoint_open_connections
+ description: Open connections
+ unit: connections
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per HTTP method
diff --git a/src/go/plugin/go.d/modules/traefik/testdata/config.json b/src/go/plugin/go.d/modules/traefik/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/traefik/testdata/config.yaml b/src/go/plugin/go.d/modules/traefik/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/traefik/testdata/v2.2.1/metrics.txt b/src/go/plugin/go.d/modules/traefik/testdata/v2.2.1/metrics.txt
new file mode 100644
index 000000000..947a365c0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/testdata/v2.2.1/metrics.txt
@@ -0,0 +1,1170 @@
+# HELP traefik_entrypoint_open_connections How many open connections exist on an entrypoint, partitioned by method and protocol.
+# TYPE traefik_entrypoint_open_connections gauge
+traefik_entrypoint_open_connections{entrypoint="traefik",method="GET",protocol="http"} 1
+traefik_entrypoint_open_connections{entrypoint="web",method="DELETE",protocol="http"} 0
+traefik_entrypoint_open_connections{entrypoint="web",method="GET",protocol="http"} 0
+traefik_entrypoint_open_connections{entrypoint="web",method="GET",protocol="websocket"} 0
+traefik_entrypoint_open_connections{entrypoint="web",method="HEAD",protocol="http"} 0
+traefik_entrypoint_open_connections{entrypoint="web",method="OPTIONS",protocol="http"} 0
+traefik_entrypoint_open_connections{entrypoint="web",method="PATCH",protocol="http"} 0
+traefik_entrypoint_open_connections{entrypoint="web",method="POST",protocol="http"} 4
+traefik_entrypoint_open_connections{entrypoint="web",method="PUT",protocol="http"} 0
+# HELP traefik_entrypoint_request_duration_seconds How long it took to process the request on an entrypoint, partitioned by status code, protocol, and method.
+# TYPE traefik_entrypoint_request_duration_seconds histogram
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.1"} 2.839193e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.2"} 2.840809e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.3"} 2.840813e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.4"} 2.840813e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.5"} 2.840814e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.8"} 2.840814e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="0.9"} 2.840814e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="1"} 2.840814e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="1.1"} 2.840814e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="1.2"} 2.840814e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="5"} 2.840814e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="traefik",method="GET",protocol="http",le="+Inf"} 2.840814e+06
+traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="traefik",method="GET",protocol="http"} 5284.212647182563
+traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="traefik",method="GET",protocol="http"} 2.840814e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.1"} 6.77133599e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.2"} 7.53631104e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.3"} 7.72627022e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.4"} 7.79474876e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.5"} 7.81903287e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.8"} 7.8476649e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="0.9"} 7.85122472e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="1"} 7.85466352e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="1.1"} 7.85699767e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="1.2"} 7.85892303e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="5"} 7.86979178e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="GET",protocol="http",le="+Inf"} 7.87262719e+08
+traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="web",method="GET",protocol="http"} 3.573930237570157e+07
+traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="web",method="GET",protocol="http"} 7.87262719e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.1"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.2"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.3"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.4"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.5"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.8"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="0.9"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="1"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="1.1"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="1.2"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="5"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="HEAD",protocol="http",le="+Inf"} 6311
+traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="web",method="HEAD",protocol="http"} 7.36609426899999
+traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="web",method="HEAD",protocol="http"} 6311
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 5617
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 5828
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 5925
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 5968
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 5996
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 6027
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 6034
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="1"} 6035
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 6039
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 6039
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="5"} 6045
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 6047
+traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="web",method="PATCH",protocol="http"} 376.1973577400002
+traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="web",method="PATCH",protocol="http"} 6047
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.1"} 1.0407824e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.2"} 3.0289279e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.3"} 4.9925366e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.4"} 5.7915399e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.5"} 6.292114e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.8"} 6.826269e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="0.9"} 6.8979431e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="1"} 6.9399071e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="1.1"} 6.9717772e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="1.2"} 6.9953534e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="5"} 7.0917859e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="200",entrypoint="web",method="POST",protocol="http",le="+Inf"} 7.1907943e+07
+traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="web",method="POST",protocol="http"} 2.4994444082210593e+07
+traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="web",method="POST",protocol="http"} 7.1907943e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.1"} 1.75296233e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.2"} 1.75817375e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.3"} 1.76334316e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.4"} 1.76415232e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.5"} 1.76453514e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.8"} 1.76535963e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="0.9"} 1.76564373e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="1"} 1.76584473e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="1.1"} 1.76599247e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="1.2"} 1.76612342e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="5"} 1.76778007e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="201",entrypoint="web",method="POST",protocol="http",le="+Inf"} 1.76862498e+08
+traefik_entrypoint_request_duration_seconds_sum{code="201",entrypoint="web",method="POST",protocol="http"} 3.734233299392699e+06
+traefik_entrypoint_request_duration_seconds_count{code="201",entrypoint="web",method="POST",protocol="http"} 1.76862498e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 7980
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 8309
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 8412
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 8443
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 8451
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 8528
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 8568
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="1"} 8621
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 8730
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 8886
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="5"} 10410
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 10446
+traefik_entrypoint_request_duration_seconds_sum{code="204",entrypoint="web",method="DELETE",protocol="http"} 4241.144239078025
+traefik_entrypoint_request_duration_seconds_count{code="204",entrypoint="web",method="DELETE",protocol="http"} 10446
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 29818
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 30290
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 30456
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 30508
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 30534
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 30563
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 30571
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="1"} 30578
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 30581
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 30581
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="5"} 30602
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 30606
+traefik_entrypoint_request_duration_seconds_sum{code="204",entrypoint="web",method="PATCH",protocol="http"} 797.362519008993
+traefik_entrypoint_request_duration_seconds_count{code="204",entrypoint="web",method="PATCH",protocol="http"} 30606
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.1"} 54869
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.2"} 61844
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.3"} 63734
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.4"} 65053
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.5"} 66111
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.8"} 66489
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="0.9"} 66507
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="1"} 66512
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="1.1"} 66519
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="1.2"} 66526
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="5"} 66554
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="POST",protocol="http",le="+Inf"} 66555
+traefik_entrypoint_request_duration_seconds_sum{code="204",entrypoint="web",method="POST",protocol="http"} 3518.3602801470365
+traefik_entrypoint_request_duration_seconds_count{code="204",entrypoint="web",method="POST",protocol="http"} 66555
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.1"} 24769
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.2"} 46802
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.3"} 48080
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.4"} 48611
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.5"} 48903
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.8"} 49321
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="0.9"} 49412
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="1"} 49462
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="1.1"} 49518
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="1.2"} 49558
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="5"} 49829
+traefik_entrypoint_request_duration_seconds_bucket{code="204",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 49872
+traefik_entrypoint_request_duration_seconds_sum{code="204",entrypoint="web",method="PUT",protocol="http"} 5950.493801841983
+traefik_entrypoint_request_duration_seconds_count{code="204",entrypoint="web",method="PUT",protocol="http"} 49872
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.1"} 3037
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.2"} 3039
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.3"} 3040
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.4"} 3040
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.5"} 3041
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.8"} 3041
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="0.9"} 3041
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="1"} 3041
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="1.1"} 3041
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="1.2"} 3041
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="5"} 3043
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="GET",protocol="http",le="+Inf"} 3046
+traefik_entrypoint_request_duration_seconds_sum{code="206",entrypoint="web",method="GET",protocol="http"} 200.91194297900017
+traefik_entrypoint_request_duration_seconds_count{code="206",entrypoint="web",method="GET",protocol="http"} 3046
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.1"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.2"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.3"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.4"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.5"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.8"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="0.9"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="1"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="1.1"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="1.2"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="5"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="206",entrypoint="web",method="HEAD",protocol="http",le="+Inf"} 35
+traefik_entrypoint_request_duration_seconds_sum{code="206",entrypoint="web",method="HEAD",protocol="http"} 0.03518408899999999
+traefik_entrypoint_request_duration_seconds_count{code="206",entrypoint="web",method="HEAD",protocol="http"} 35
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 2767
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 2770
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 2772
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 2772
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 2772
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 2773
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 2773
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="1"} 2774
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 2774
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 2774
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="5"} 2775
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 2775
+traefik_entrypoint_request_duration_seconds_sum{code="207",entrypoint="web",method="DELETE",protocol="http"} 33.959802933999995
+traefik_entrypoint_request_duration_seconds_count{code="207",entrypoint="web",method="DELETE",protocol="http"} 2775
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.1"} 93
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.2"} 101
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.3"} 105
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.4"} 112
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.5"} 120
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.8"} 127
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="0.9"} 127
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="1"} 127
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="1.1"} 127
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="1.2"} 127
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="5"} 128
+traefik_entrypoint_request_duration_seconds_bucket{code="207",entrypoint="web",method="POST",protocol="http",le="+Inf"} 129
+traefik_entrypoint_request_duration_seconds_sum{code="207",entrypoint="web",method="POST",protocol="http"} 27.57962429700001
+traefik_entrypoint_request_duration_seconds_count{code="207",entrypoint="web",method="POST",protocol="http"} 129
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.1"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.2"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.3"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.4"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.5"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.8"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="0.9"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="1"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="1.1"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="1.2"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="5"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="301",entrypoint="web",method="GET",protocol="http",le="+Inf"} 248
+traefik_entrypoint_request_duration_seconds_sum{code="301",entrypoint="web",method="GET",protocol="http"} 0.25649611699999997
+traefik_entrypoint_request_duration_seconds_count{code="301",entrypoint="web",method="GET",protocol="http"} 248
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.1"} 30448
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.2"} 38318
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.3"} 41030
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.4"} 43988
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.5"} 46851
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.8"} 48508
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="0.9"} 48554
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="1"} 48571
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="1.1"} 48580
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="1.2"} 48587
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="5"} 48619
+traefik_entrypoint_request_duration_seconds_bucket{code="302",entrypoint="web",method="GET",protocol="http",le="+Inf"} 48623
+traefik_entrypoint_request_duration_seconds_sum{code="302",entrypoint="web",method="GET",protocol="http"} 5561.800275933011
+traefik_entrypoint_request_duration_seconds_count{code="302",entrypoint="web",method="GET",protocol="http"} 48623
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.1"} 367383
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.2"} 367384
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.3"} 367385
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.4"} 367385
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.5"} 367386
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.8"} 367387
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="0.9"} 367387
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="1"} 367387
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="1.1"} 367387
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="1.2"} 367387
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="5"} 367387
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="GET",protocol="http",le="+Inf"} 367387
+traefik_entrypoint_request_duration_seconds_sum{code="304",entrypoint="web",method="GET",protocol="http"} 418.3746390310068
+traefik_entrypoint_request_duration_seconds_count{code="304",entrypoint="web",method="GET",protocol="http"} 367387
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.1"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.2"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.3"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.4"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.5"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.8"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="0.9"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="1"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="1.1"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="1.2"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="5"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="304",entrypoint="web",method="HEAD",protocol="http",le="+Inf"} 4
+traefik_entrypoint_request_duration_seconds_sum{code="304",entrypoint="web",method="HEAD",protocol="http"} 0.0044282570000000005
+traefik_entrypoint_request_duration_seconds_count{code="304",entrypoint="web",method="HEAD",protocol="http"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.1"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.2"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.3"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.4"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.5"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.8"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="0.9"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="1"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="1.1"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="1.2"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="5"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="traefik",method="GET",protocol="http",le="+Inf"} 5
+traefik_entrypoint_request_duration_seconds_sum{code="400",entrypoint="traefik",method="GET",protocol="http"} 0.0006326610000000001
+traefik_entrypoint_request_duration_seconds_count{code="400",entrypoint="traefik",method="GET",protocol="http"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.1"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.2"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.3"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.4"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.5"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.8"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="0.9"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="1"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="1.1"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="1.2"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="5"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="GET",protocol="http",le="+Inf"} 8
+traefik_entrypoint_request_duration_seconds_sum{code="400",entrypoint="web",method="GET",protocol="http"} 0.010426270999999999
+traefik_entrypoint_request_duration_seconds_count{code="400",entrypoint="web",method="GET",protocol="http"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.1"} 42862
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.2"} 43468
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.3"} 43839
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.4"} 43940
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.5"} 43978
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.8"} 44029
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="0.9"} 44038
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="1"} 44049
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="1.1"} 44061
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="1.2"} 44066
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="5"} 44106
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="POST",protocol="http",le="+Inf"} 59417
+traefik_entrypoint_request_duration_seconds_sum{code="400",entrypoint="web",method="POST",protocol="http"} 77544.51951296844
+traefik_entrypoint_request_duration_seconds_count{code="400",entrypoint="web",method="POST",protocol="http"} 59417
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.1"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.2"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.3"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.4"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.5"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.8"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="0.9"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="1"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="1.1"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="1.2"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="5"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="400",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 4757
+traefik_entrypoint_request_duration_seconds_sum{code="400",entrypoint="web",method="PUT",protocol="http"} 7.191891319000009
+traefik_entrypoint_request_duration_seconds_count{code="400",entrypoint="web",method="PUT",protocol="http"} 4757
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="1"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="5"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 2
+traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="DELETE",protocol="http"} 0.0018184479999999999
+traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="DELETE",protocol="http"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.1"} 2.289379e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.2"} 2.2896175e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.3"} 2.2896199e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.4"} 2.2896204e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.5"} 2.2896211e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.8"} 2.2896212e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="0.9"} 2.2896213e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="1"} 2.2896213e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="1.1"} 2.2896213e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="1.2"} 2.2896213e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="5"} 2.2896213e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="GET",protocol="http",le="+Inf"} 2.2896213e+07
+traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="GET",protocol="http"} 25752.359368771624
+traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="GET",protocol="http"} 2.2896213e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="1"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="5"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 10
+traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="PATCH",protocol="http"} 0.010515436999999999
+traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="PATCH",protocol="http"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.1"} 927908
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.2"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.3"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.4"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.5"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.8"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="0.9"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="1"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="1.1"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="1.2"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="5"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="POST",protocol="http",le="+Inf"} 927912
+traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="POST",protocol="http"} 995.9855624980047
+traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="POST",protocol="http"} 927912
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.1"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.2"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.3"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.4"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.5"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.8"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="0.9"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="1"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="1.1"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="1.2"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="5"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="401",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 75
+traefik_entrypoint_request_duration_seconds_sum{code="401",entrypoint="web",method="PUT",protocol="http"} 0.16541799500000004
+traefik_entrypoint_request_duration_seconds_count{code="401",entrypoint="web",method="PUT",protocol="http"} 75
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 830
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 830
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 830
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 830
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 830
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 831
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 831
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="1"} 831
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 831
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 831
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="5"} 831
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 831
+traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="DELETE",protocol="http"} 9.061551029999986
+traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="DELETE",protocol="http"} 831
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.1"} 216932
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.2"} 217462
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.3"} 217600
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.4"} 217648
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.5"} 217684
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.8"} 217723
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="0.9"} 217728
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="1"} 217739
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="1.1"} 217744
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="1.2"} 217747
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="5"} 217766
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="GET",protocol="http",le="+Inf"} 217771
+traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="GET",protocol="http"} 1243.8479915990079
+traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="GET",protocol="http"} 217771
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 90
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 90
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 90
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 90
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="1"} 90
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 90
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 90
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="5"} 90
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 90
+traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="PATCH",protocol="http"} 1.039575084
+traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="PATCH",protocol="http"} 90
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.1"} 658814
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.2"} 667999
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.3"} 668305
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.4"} 668348
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.5"} 668368
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.8"} 668417
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="0.9"} 668427
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="1"} 668436
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="1.1"} 668441
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="1.2"} 668443
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="5"} 668485
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="POST",protocol="http",le="+Inf"} 668504
+traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="POST",protocol="http"} 5763.404909136024
+traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="POST",protocol="http"} 668504
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.1"} 387
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.2"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.3"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.4"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.5"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.8"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="0.9"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="1"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="1.1"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="1.2"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="5"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="403",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 388
+traefik_entrypoint_request_duration_seconds_sum{code="403",entrypoint="web",method="PUT",protocol="http"} 1.0210683440000006
+traefik_entrypoint_request_duration_seconds_count{code="403",entrypoint="web",method="PUT",protocol="http"} 388
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.1"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.2"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.3"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.4"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.5"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.8"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="0.9"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="1"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="1.1"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="1.2"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="5"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="traefik",method="GET",protocol="http",le="+Inf"} 3
+traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="traefik",method="GET",protocol="http"} 0.000172581
+traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="traefik",method="GET",protocol="http"} 3
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="1"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="5"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 5
+traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="DELETE",protocol="http"} 0.049077042999999994
+traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="DELETE",protocol="http"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.1"} 1.6708334e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.2"} 2.4431309e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.3"} 2.4897006e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.4"} 2.5060706e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.5"} 2.5158815e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.8"} 2.5319277e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="0.9"} 2.5348008e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="1"} 2.5366706e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="1.1"} 2.5380618e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="1.2"} 2.5390269e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="5"} 2.5431782e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="http",le="+Inf"} 2.5435602e+07
+traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="GET",protocol="http"} 1.5730236608823321e+06
+traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="GET",protocol="http"} 2.5435602e+07
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.1"} 76149
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.2"} 77389
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.3"} 78136
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.4"} 78736
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.5"} 78893
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.8"} 79100
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="0.9"} 79112
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="1"} 79125
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="1.1"} 79134
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="1.2"} 79137
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="5"} 79137
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="GET",protocol="websocket",le="+Inf"} 79137
+traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="GET",protocol="websocket"} 952.6657687000076
+traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="GET",protocol="websocket"} 79137
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.1"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.2"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.3"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.4"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.5"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.8"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="0.9"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="1"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="1.1"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="1.2"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="5"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="HEAD",protocol="http",le="+Inf"} 440
+traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="HEAD",protocol="http"} 0.8076752390000003
+traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="HEAD",protocol="http"} 440
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="1"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="5"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 10
+traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="PATCH",protocol="http"} 0.106270053
+traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="PATCH",protocol="http"} 10
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.1"} 11831
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.2"} 11996
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.3"} 12058
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.4"} 12066
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.5"} 12068
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.8"} 12080
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="0.9"} 12084
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="1"} 12086
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="1.1"} 12087
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="1.2"} 12091
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="5"} 12112
+traefik_entrypoint_request_duration_seconds_bucket{code="404",entrypoint="web",method="POST",protocol="http",le="+Inf"} 12125
+traefik_entrypoint_request_duration_seconds_sum{code="404",entrypoint="web",method="POST",protocol="http"} 354.48999692400014
+traefik_entrypoint_request_duration_seconds_count{code="404",entrypoint="web",method="POST",protocol="http"} 12125
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.1"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.2"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.3"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.4"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.5"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.8"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="0.9"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="1"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="1.1"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="1.2"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="5"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="OPTIONS",protocol="http",le="+Inf"} 89
+traefik_entrypoint_request_duration_seconds_sum{code="405",entrypoint="web",method="OPTIONS",protocol="http"} 0.111158589
+traefik_entrypoint_request_duration_seconds_count{code="405",entrypoint="web",method="OPTIONS",protocol="http"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="1"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="5"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 1
+traefik_entrypoint_request_duration_seconds_sum{code="405",entrypoint="web",method="PATCH",protocol="http"} 0.000997012
+traefik_entrypoint_request_duration_seconds_count{code="405",entrypoint="web",method="PATCH",protocol="http"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.1"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.2"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.3"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.4"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.5"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.8"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="0.9"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="1"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="1.1"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="1.2"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="5"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="POST",protocol="http",le="+Inf"} 13
+traefik_entrypoint_request_duration_seconds_sum{code="405",entrypoint="web",method="POST",protocol="http"} 0.015701319999999998
+traefik_entrypoint_request_duration_seconds_count{code="405",entrypoint="web",method="POST",protocol="http"} 13
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.1"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.2"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.3"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.4"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.5"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.8"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="0.9"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="1"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="1.1"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="1.2"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="5"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="405",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 518
+traefik_entrypoint_request_duration_seconds_sum{code="405",entrypoint="web",method="PUT",protocol="http"} 0.7715693390000001
+traefik_entrypoint_request_duration_seconds_count{code="405",entrypoint="web",method="PUT",protocol="http"} 518
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.1"} 2.12735267e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.2"} 2.12837945e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.3"} 2.12867308e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.4"} 2.12881286e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.5"} 2.12890892e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.8"} 2.12908516e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="0.9"} 2.12912307e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="1"} 2.12915414e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="1.1"} 2.12918123e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="1.2"} 2.12920839e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="5"} 2.12981945e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="GET",protocol="http",le="+Inf"} 2.13012914e+08
+traefik_entrypoint_request_duration_seconds_sum{code="409",entrypoint="web",method="GET",protocol="http"} 1.440885906018625e+06
+traefik_entrypoint_request_duration_seconds_count{code="409",entrypoint="web",method="GET",protocol="http"} 2.13012914e+08
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 289
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 289
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 289
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 290
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 290
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 290
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 290
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="1"} 290
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 290
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 291
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="5"} 293
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 293
+traefik_entrypoint_request_duration_seconds_sum{code="409",entrypoint="web",method="PATCH",protocol="http"} 8.790643885000003
+traefik_entrypoint_request_duration_seconds_count{code="409",entrypoint="web",method="PATCH",protocol="http"} 293
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.1"} 180
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.2"} 185
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.3"} 189
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.4"} 191
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.5"} 191
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.8"} 192
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="0.9"} 192
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="1"} 192
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="1.1"} 192
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="1.2"} 192
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="5"} 194
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="POST",protocol="http",le="+Inf"} 195
+traefik_entrypoint_request_duration_seconds_sum{code="409",entrypoint="web",method="POST",protocol="http"} 17.934394692999998
+traefik_entrypoint_request_duration_seconds_count{code="409",entrypoint="web",method="POST",protocol="http"} 195
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.1"} 38126
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.2"} 40054
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.3"} 40533
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.4"} 40866
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.5"} 41024
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.8"} 41282
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="0.9"} 41337
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="1"} 41373
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="1.1"} 41399
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="1.2"} 41422
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="5"} 41610
+traefik_entrypoint_request_duration_seconds_bucket{code="409",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 41665
+traefik_entrypoint_request_duration_seconds_sum{code="409",entrypoint="web",method="PUT",protocol="http"} 3606.133672342983
+traefik_entrypoint_request_duration_seconds_count{code="409",entrypoint="web",method="PUT",protocol="http"} 41665
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.1"} 1.706487e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.2"} 1.7067e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.3"} 1.706726e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.4"} 1.706742e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.5"} 1.706757e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.8"} 1.706779e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="0.9"} 1.706783e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="1"} 1.706789e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="1.1"} 1.706791e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="1.2"} 1.706797e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="5"} 1.706888e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="GET",protocol="http",le="+Inf"} 1.706931e+06
+traefik_entrypoint_request_duration_seconds_sum{code="410",entrypoint="web",method="GET",protocol="http"} 5115.734139137677
+traefik_entrypoint_request_duration_seconds_count{code="410",entrypoint="web",method="GET",protocol="http"} 1.706931e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="1"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="5"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="410",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 1
+traefik_entrypoint_request_duration_seconds_sum{code="410",entrypoint="web",method="PATCH",protocol="http"} 0.005254578
+traefik_entrypoint_request_duration_seconds_count{code="410",entrypoint="web",method="PATCH",protocol="http"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="1"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="5"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 1
+traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="DELETE",protocol="http"} 0.023973863
+traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="DELETE",protocol="http"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.1"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.2"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.3"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.4"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.5"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.8"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="0.9"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="1"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="1.1"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="1.2"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="5"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="GET",protocol="http",le="+Inf"} 20
+traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="GET",protocol="http"} 0.039623226
+traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="GET",protocol="http"} 20
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="1"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="5"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 26
+traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="PATCH",protocol="http"} 0.083693077
+traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="PATCH",protocol="http"} 26
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.1"} 939
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.2"} 948
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.3"} 953
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.4"} 953
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.5"} 954
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.8"} 954
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="0.9"} 954
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="1"} 954
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="1.1"} 954
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="1.2"} 954
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="5"} 955
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="POST",protocol="http",le="+Inf"} 955
+traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="POST",protocol="http"} 11.256437256000007
+traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="POST",protocol="http"} 955
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.1"} 12620
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.2"} 12624
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.3"} 12627
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.4"} 12627
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.5"} 12627
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.8"} 12628
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="0.9"} 12628
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="1"} 12628
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="1.1"} 12628
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="1.2"} 12628
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="5"} 12628
+traefik_entrypoint_request_duration_seconds_bucket{code="422",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 12628
+traefik_entrypoint_request_duration_seconds_sum{code="422",entrypoint="web",method="PUT",protocol="http"} 30.15632766300003
+traefik_entrypoint_request_duration_seconds_count{code="422",entrypoint="web",method="PUT",protocol="http"} 12628
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.1"} 2.103905e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.2"} 2.103908e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.3"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.4"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.5"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.8"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="0.9"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="1"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="1.1"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="1.2"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="5"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="GET",protocol="http",le="+Inf"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_sum{code="429",entrypoint="web",method="GET",protocol="http"} 336.7924126419656
+traefik_entrypoint_request_duration_seconds_count{code="429",entrypoint="web",method="GET",protocol="http"} 2.103909e+06
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.1"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.2"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.3"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.4"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.5"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.8"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="0.9"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="1"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="1.1"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="1.2"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="5"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="429",entrypoint="web",method="POST",protocol="http",le="+Inf"} 205
+traefik_entrypoint_request_duration_seconds_sum{code="429",entrypoint="web",method="POST",protocol="http"} 0.027288120999999995
+traefik_entrypoint_request_duration_seconds_count{code="429",entrypoint="web",method="POST",protocol="http"} 205
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.1"} 83
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.2"} 144
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.3"} 168
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.4"} 184
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.5"} 194
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.8"} 231
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="0.9"} 232
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="1"} 234
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="1.1"} 235
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="1.2"} 235
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="5"} 343
+traefik_entrypoint_request_duration_seconds_bucket{code="444",entrypoint="web",method="GET",protocol="http",le="+Inf"} 1255
+traefik_entrypoint_request_duration_seconds_sum{code="444",entrypoint="web",method="GET",protocol="http"} 29923.69344054194
+traefik_entrypoint_request_duration_seconds_count{code="444",entrypoint="web",method="GET",protocol="http"} 1255
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.3"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.4"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.8"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="0.9"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="1.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="1.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="445",entrypoint="web",method="GET",protocol="http",le="+Inf"} 269941
+traefik_entrypoint_request_duration_seconds_sum{code="445",entrypoint="web",method="GET",protocol="http"} 1.6198159394737784e+07
+traefik_entrypoint_request_duration_seconds_count{code="445",entrypoint="web",method="GET",protocol="http"} 269941
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.1"} 499
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.2"} 744
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.3"} 842
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.4"} 918
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.5"} 970
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.8"} 1061
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="0.9"} 1074
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="1"} 1094
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="1.1"} 1105
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="1.2"} 1132
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="5"} 1884
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="GET",protocol="http",le="+Inf"} 5075
+traefik_entrypoint_request_duration_seconds_sum{code="499",entrypoint="web",method="GET",protocol="http"} 138388.62840130684
+traefik_entrypoint_request_duration_seconds_count{code="499",entrypoint="web",method="GET",protocol="http"} 5075
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="5"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 2
+traefik_entrypoint_request_duration_seconds_sum{code="499",entrypoint="web",method="PATCH",protocol="http"} 45.061508693
+traefik_entrypoint_request_duration_seconds_count{code="499",entrypoint="web",method="PATCH",protocol="http"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.1"} 85786
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.2"} 125143
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.3"} 144101
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.4"} 151775
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.5"} 156313
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.8"} 163673
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="0.9"} 165387
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="1"} 166772
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="1.1"} 168246
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="1.2"} 169461
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="5"} 193067
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="POST",protocol="http",le="+Inf"} 194455
+traefik_entrypoint_request_duration_seconds_sum{code="499",entrypoint="web",method="POST",protocol="http"} 171588.70865418628
+traefik_entrypoint_request_duration_seconds_count{code="499",entrypoint="web",method="POST",protocol="http"} 194455
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.1"} 70
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.2"} 79
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.3"} 88
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.4"} 89
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.5"} 92
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.8"} 93
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="0.9"} 94
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="1"} 94
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="1.1"} 94
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="1.2"} 94
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="5"} 94
+traefik_entrypoint_request_duration_seconds_bucket{code="499",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 16127
+traefik_entrypoint_request_duration_seconds_sum{code="499",entrypoint="web",method="PUT",protocol="http"} 4.809399570415463e+06
+traefik_entrypoint_request_duration_seconds_count{code="499",entrypoint="web",method="PUT",protocol="http"} 16127
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="1"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 5
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="5"} 7
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 7
+traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="DELETE",protocol="http"} 2.9226568759999996
+traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="DELETE",protocol="http"} 7
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.1"} 4304
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.2"} 4314
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.3"} 4315
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.4"} 4317
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.5"} 4322
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.8"} 4333
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="0.9"} 4333
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="1"} 4333
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="1.1"} 4333
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="1.2"} 4334
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="5"} 4334
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="GET",protocol="http",le="+Inf"} 12951
+traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="GET",protocol="http"} 495411.215290646
+traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="GET",protocol="http"} 12951
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="1"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 11
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="5"} 12
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 12
+traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="PATCH",protocol="http"} 3.4746266410000004
+traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="PATCH",protocol="http"} 12
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.1"} 321
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.2"} 322
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.3"} 323
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.4"} 323
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.5"} 323
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.8"} 324
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="0.9"} 325
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="1"} 325
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="1.1"} 325
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="1.2"} 325
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="5"} 339
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="POST",protocol="http",le="+Inf"} 2196
+traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="POST",protocol="http"} 112599.76971862414
+traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="POST",protocol="http"} 2196
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.1"} 17
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.2"} 18
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.3"} 18
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.4"} 18
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.5"} 18
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.8"} 18
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="0.9"} 18
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="1"} 18
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="1.1"} 18
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="1.2"} 19
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="5"} 22
+traefik_entrypoint_request_duration_seconds_bucket{code="500",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 1551
+traefik_entrypoint_request_duration_seconds_sum{code="500",entrypoint="web",method="PUT",protocol="http"} 254492.6350865842
+traefik_entrypoint_request_duration_seconds_count{code="500",entrypoint="web",method="PUT",protocol="http"} 1551
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.1"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.2"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.3"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.4"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.5"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.8"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="0.9"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="1"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="1.1"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="1.2"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="5"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="DELETE",protocol="http",le="+Inf"} 4
+traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="DELETE",protocol="http"} 0.006532118999999999
+traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="DELETE",protocol="http"} 4
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.1"} 107436
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.2"} 107462
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.3"} 107466
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.4"} 107471
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.5"} 107478
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.8"} 107500
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="0.9"} 107508
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="1"} 107522
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="1.1"} 107568
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="1.2"} 107586
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="5"} 107931
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="GET",protocol="http",le="+Inf"} 115170
+traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="GET",protocol="http"} 241715.94925767966
+traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="GET",protocol="http"} 115170
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.3"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.4"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.8"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="0.9"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="1.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="1.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="5"} 1
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PATCH",protocol="http",le="+Inf"} 2
+traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="PATCH",protocol="http"} 27.351390443
+traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="PATCH",protocol="http"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.1"} 902
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.2"} 987
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.3"} 1046
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.4"} 1088
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.5"} 1104
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.8"} 1149
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="0.9"} 1158
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="1"} 1169
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="1.1"} 1182
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="1.2"} 1197
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="5"} 1400
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="POST",protocol="http",le="+Inf"} 2900
+traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="POST",protocol="http"} 1.0039723839193305e+06
+traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="POST",protocol="http"} 2900
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.1"} 36
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.2"} 37
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.3"} 37
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.4"} 37
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.5"} 37
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.8"} 37
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="0.9"} 38
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="1"} 38
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="1.1"} 38
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="1.2"} 38
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="5"} 38
+traefik_entrypoint_request_duration_seconds_bucket{code="502",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 40
+traefik_entrypoint_request_duration_seconds_sum{code="502",entrypoint="web",method="PUT",protocol="http"} 32.391189919
+traefik_entrypoint_request_duration_seconds_count{code="502",entrypoint="web",method="PUT",protocol="http"} 40
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.1"} 72447
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.2"} 72448
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.3"} 72448
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.4"} 72448
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.5"} 72448
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.8"} 72448
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="0.9"} 72448
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="1"} 72448
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="1.1"} 72448
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="1.2"} 72448
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="5"} 72454
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="GET",protocol="http",le="+Inf"} 72538
+traefik_entrypoint_request_duration_seconds_sum{code="503",entrypoint="web",method="GET",protocol="http"} 2883.984412680031
+traefik_entrypoint_request_duration_seconds_count{code="503",entrypoint="web",method="GET",protocol="http"} 72538
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.1"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.2"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.3"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.4"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.5"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.8"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="0.9"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="1"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="1.1"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="1.2"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="5"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="503",entrypoint="web",method="POST",protocol="http",le="+Inf"} 15648
+traefik_entrypoint_request_duration_seconds_sum{code="503",entrypoint="web",method="POST",protocol="http"} 18.386133866
+traefik_entrypoint_request_duration_seconds_count{code="503",entrypoint="web",method="POST",protocol="http"} 15648
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.3"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.4"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.8"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="0.9"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="1.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="1.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="GET",protocol="http",le="+Inf"} 8
+traefik_entrypoint_request_duration_seconds_sum{code="504",entrypoint="web",method="GET",protocol="http"} 240.012145339
+traefik_entrypoint_request_duration_seconds_count{code="504",entrypoint="web",method="GET",protocol="http"} 8
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.3"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.4"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.8"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="0.9"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="1.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="1.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="POST",protocol="http",le="+Inf"} 2
+traefik_entrypoint_request_duration_seconds_sum{code="504",entrypoint="web",method="POST",protocol="http"} 60.003337996
+traefik_entrypoint_request_duration_seconds_count{code="504",entrypoint="web",method="POST",protocol="http"} 2
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.3"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.4"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.8"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="0.9"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="1.1"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="1.2"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="5"} 0
+traefik_entrypoint_request_duration_seconds_bucket{code="504",entrypoint="web",method="PUT",protocol="http",le="+Inf"} 107
+traefik_entrypoint_request_duration_seconds_sum{code="504",entrypoint="web",method="PUT",protocol="http"} 3683.539644907
+traefik_entrypoint_request_duration_seconds_count{code="504",entrypoint="web",method="PUT",protocol="http"} 107
+# HELP traefik_entrypoint_requests_total How many HTTP requests processed on an entrypoint, partitioned by status code, protocol, and method.
+# TYPE traefik_entrypoint_requests_total counter
+traefik_entrypoint_requests_total{code="200",entrypoint="traefik",method="GET",protocol="http"} 2.840814e+06
+traefik_entrypoint_requests_total{code="200",entrypoint="web",method="GET",protocol="http"} 7.87262719e+08
+traefik_entrypoint_requests_total{code="200",entrypoint="web",method="HEAD",protocol="http"} 6311
+traefik_entrypoint_requests_total{code="200",entrypoint="web",method="PATCH",protocol="http"} 6047
+traefik_entrypoint_requests_total{code="200",entrypoint="web",method="POST",protocol="http"} 7.1907943e+07
+traefik_entrypoint_requests_total{code="201",entrypoint="web",method="POST",protocol="http"} 1.76862498e+08
+traefik_entrypoint_requests_total{code="204",entrypoint="web",method="DELETE",protocol="http"} 10446
+traefik_entrypoint_requests_total{code="204",entrypoint="web",method="PATCH",protocol="http"} 30606
+traefik_entrypoint_requests_total{code="204",entrypoint="web",method="POST",protocol="http"} 66555
+traefik_entrypoint_requests_total{code="204",entrypoint="web",method="PUT",protocol="http"} 49872
+traefik_entrypoint_requests_total{code="206",entrypoint="web",method="GET",protocol="http"} 3046
+traefik_entrypoint_requests_total{code="206",entrypoint="web",method="HEAD",protocol="http"} 35
+traefik_entrypoint_requests_total{code="207",entrypoint="web",method="DELETE",protocol="http"} 2775
+traefik_entrypoint_requests_total{code="207",entrypoint="web",method="POST",protocol="http"} 129
+traefik_entrypoint_requests_total{code="301",entrypoint="web",method="GET",protocol="http"} 248
+traefik_entrypoint_requests_total{code="302",entrypoint="web",method="GET",protocol="http"} 48623
+traefik_entrypoint_requests_total{code="304",entrypoint="web",method="GET",protocol="http"} 367387
+traefik_entrypoint_requests_total{code="304",entrypoint="web",method="HEAD",protocol="http"} 4
+traefik_entrypoint_requests_total{code="400",entrypoint="traefik",method="GET",protocol="http"} 5
+traefik_entrypoint_requests_total{code="400",entrypoint="web",method="GET",protocol="http"} 8
+traefik_entrypoint_requests_total{code="400",entrypoint="web",method="POST",protocol="http"} 59417
+traefik_entrypoint_requests_total{code="400",entrypoint="web",method="PUT",protocol="http"} 4757
+traefik_entrypoint_requests_total{code="401",entrypoint="web",method="DELETE",protocol="http"} 2
+traefik_entrypoint_requests_total{code="401",entrypoint="web",method="GET",protocol="http"} 2.2896213e+07
+traefik_entrypoint_requests_total{code="401",entrypoint="web",method="PATCH",protocol="http"} 10
+traefik_entrypoint_requests_total{code="401",entrypoint="web",method="POST",protocol="http"} 927912
+traefik_entrypoint_requests_total{code="401",entrypoint="web",method="PUT",protocol="http"} 75
+traefik_entrypoint_requests_total{code="403",entrypoint="web",method="DELETE",protocol="http"} 831
+traefik_entrypoint_requests_total{code="403",entrypoint="web",method="GET",protocol="http"} 217771
+traefik_entrypoint_requests_total{code="403",entrypoint="web",method="PATCH",protocol="http"} 90
+traefik_entrypoint_requests_total{code="403",entrypoint="web",method="POST",protocol="http"} 668504
+traefik_entrypoint_requests_total{code="403",entrypoint="web",method="PUT",protocol="http"} 388
+traefik_entrypoint_requests_total{code="404",entrypoint="traefik",method="GET",protocol="http"} 3
+traefik_entrypoint_requests_total{code="404",entrypoint="web",method="DELETE",protocol="http"} 5
+traefik_entrypoint_requests_total{code="404",entrypoint="web",method="GET",protocol="http"} 2.5435602e+07
+traefik_entrypoint_requests_total{code="404",entrypoint="web",method="GET",protocol="websocket"} 79137
+traefik_entrypoint_requests_total{code="404",entrypoint="web",method="HEAD",protocol="http"} 440
+traefik_entrypoint_requests_total{code="404",entrypoint="web",method="PATCH",protocol="http"} 10
+traefik_entrypoint_requests_total{code="404",entrypoint="web",method="POST",protocol="http"} 12125
+traefik_entrypoint_requests_total{code="405",entrypoint="web",method="OPTIONS",protocol="http"} 89
+traefik_entrypoint_requests_total{code="405",entrypoint="web",method="PATCH",protocol="http"} 1
+traefik_entrypoint_requests_total{code="405",entrypoint="web",method="POST",protocol="http"} 13
+traefik_entrypoint_requests_total{code="405",entrypoint="web",method="PUT",protocol="http"} 518
+traefik_entrypoint_requests_total{code="409",entrypoint="web",method="GET",protocol="http"} 2.13012914e+08
+traefik_entrypoint_requests_total{code="409",entrypoint="web",method="PATCH",protocol="http"} 293
+traefik_entrypoint_requests_total{code="409",entrypoint="web",method="POST",protocol="http"} 195
+traefik_entrypoint_requests_total{code="409",entrypoint="web",method="PUT",protocol="http"} 41665
+traefik_entrypoint_requests_total{code="410",entrypoint="web",method="GET",protocol="http"} 1.706931e+06
+traefik_entrypoint_requests_total{code="410",entrypoint="web",method="PATCH",protocol="http"} 1
+traefik_entrypoint_requests_total{code="422",entrypoint="web",method="DELETE",protocol="http"} 1
+traefik_entrypoint_requests_total{code="422",entrypoint="web",method="GET",protocol="http"} 20
+traefik_entrypoint_requests_total{code="422",entrypoint="web",method="PATCH",protocol="http"} 26
+traefik_entrypoint_requests_total{code="422",entrypoint="web",method="POST",protocol="http"} 955
+traefik_entrypoint_requests_total{code="422",entrypoint="web",method="PUT",protocol="http"} 12628
+traefik_entrypoint_requests_total{code="429",entrypoint="web",method="GET",protocol="http"} 2.103909e+06
+traefik_entrypoint_requests_total{code="429",entrypoint="web",method="POST",protocol="http"} 205
+traefik_entrypoint_requests_total{code="444",entrypoint="web",method="GET",protocol="http"} 1255
+traefik_entrypoint_requests_total{code="445",entrypoint="web",method="GET",protocol="http"} 269941
+traefik_entrypoint_requests_total{code="499",entrypoint="web",method="GET",protocol="http"} 5075
+traefik_entrypoint_requests_total{code="499",entrypoint="web",method="PATCH",protocol="http"} 2
+traefik_entrypoint_requests_total{code="499",entrypoint="web",method="POST",protocol="http"} 194455
+traefik_entrypoint_requests_total{code="499",entrypoint="web",method="PUT",protocol="http"} 16127
+traefik_entrypoint_requests_total{code="500",entrypoint="web",method="DELETE",protocol="http"} 7
+traefik_entrypoint_requests_total{code="500",entrypoint="web",method="GET",protocol="http"} 12951
+traefik_entrypoint_requests_total{code="500",entrypoint="web",method="PATCH",protocol="http"} 12
+traefik_entrypoint_requests_total{code="500",entrypoint="web",method="POST",protocol="http"} 2196
+traefik_entrypoint_requests_total{code="500",entrypoint="web",method="PUT",protocol="http"} 1551
+traefik_entrypoint_requests_total{code="502",entrypoint="web",method="DELETE",protocol="http"} 4
+traefik_entrypoint_requests_total{code="502",entrypoint="web",method="GET",protocol="http"} 115170
+traefik_entrypoint_requests_total{code="502",entrypoint="web",method="PATCH",protocol="http"} 2
+traefik_entrypoint_requests_total{code="502",entrypoint="web",method="POST",protocol="http"} 2900
+traefik_entrypoint_requests_total{code="502",entrypoint="web",method="PUT",protocol="http"} 40
+traefik_entrypoint_requests_total{code="503",entrypoint="web",method="GET",protocol="http"} 72538
+traefik_entrypoint_requests_total{code="503",entrypoint="web",method="POST",protocol="http"} 15648
+traefik_entrypoint_requests_total{code="504",entrypoint="web",method="GET",protocol="http"} 8
+traefik_entrypoint_requests_total{code="504",entrypoint="web",method="POST",protocol="http"} 2
+traefik_entrypoint_requests_total{code="504",entrypoint="web",method="PUT",protocol="http"} 107 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/traefik/traefik.go b/src/go/plugin/go.d/modules/traefik/traefik.go
new file mode 100644
index 000000000..e38ff9699
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/traefik.go
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package traefik
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("traefik", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Traefik {
+ return &Traefik{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8082/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+
+ charts: &module.Charts{},
+ checkMetrics: true,
+ cache: &cache{
+ entrypoints: make(map[string]*cacheEntrypoint),
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type (
+ Traefik struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prom prometheus.Prometheus
+
+ checkMetrics bool
+ cache *cache
+ }
+ cache struct {
+ entrypoints map[string]*cacheEntrypoint
+ }
+ cacheEntrypoint struct {
+ name, proto string
+ requests *module.Chart
+ reqDur *module.Chart
+ reqDurData map[string]cacheEntrypointReqDur
+ openConn *module.Chart
+ openConnMethods map[string]bool
+ }
+ cacheEntrypointReqDur struct {
+ prev, cur struct{ reqs, secs float64 }
+ seen bool
+ }
+)
+
+func (t *Traefik) Configuration() any {
+ return t.Config
+}
+
+func (t *Traefik) Init() error {
+ if err := t.validateConfig(); err != nil {
+ t.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prom, err := t.initPrometheusClient()
+ if err != nil {
+ t.Errorf("prometheus client initialization: %v", err)
+ return err
+ }
+ t.prom = prom
+
+ return nil
+}
+
+func (t *Traefik) Check() error {
+ mx, err := t.collect()
+ if err != nil {
+ t.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (t *Traefik) Charts() *module.Charts {
+ return t.charts
+}
+
+func (t *Traefik) Collect() map[string]int64 {
+ mx, err := t.collect()
+ if err != nil {
+ t.Error(err)
+ return nil
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (t *Traefik) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/traefik/traefik_test.go b/src/go/plugin/go.d/modules/traefik/traefik_test.go
new file mode 100644
index 000000000..f3ef024b8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/traefik/traefik_test.go
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package traefik
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer221Metrics, _ = os.ReadFile("testdata/v2.2.1/metrics.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer221Metrics": dataVer221Metrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestTraefik_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Traefik{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestTraefik_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ config: New().Config,
+ },
+ "fails on unset 'url'": {
+ wantFail: true,
+ config: Config{HTTP: web.HTTP{
+ Request: web.Request{},
+ }},
+ },
+ "fails on invalid TLSCA": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
+ },
+ }},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rdb := New()
+ rdb.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, rdb.Init())
+ } else {
+ assert.NoError(t, rdb.Init())
+ }
+ })
+ }
+}
+
+func TestTraefik_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestTraefik_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestTraefik_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (tk *Traefik, cleanup func())
+ }{
+ "success on valid response v2.3.1": {
+ wantFail: false,
+ prepare: prepareCaseTraefikV221Metrics,
+ },
+ "fails on response with unexpected metrics (not HAProxy)": {
+ wantFail: true,
+ prepare: prepareCaseNotTraefikMetrics,
+ },
+ "fails on 404 response": {
+ wantFail: true,
+ prepare: prepareCase404Response,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tk, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, tk.Check())
+ } else {
+ assert.NoError(t, tk.Check())
+ }
+ })
+ }
+}
+
+func TestTraefik_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (tk *Traefik, cleanup func())
+ wantCollected []map[string]int64
+ }{
+ "success on valid response v2.2.1": {
+ prepare: prepareCaseTraefikV221Metrics,
+ wantCollected: []map[string]int64{
+ {
+ "entrypoint_open_connections_traefik_http_GET": 1,
+ "entrypoint_open_connections_web_http_DELETE": 0,
+ "entrypoint_open_connections_web_http_GET": 0,
+ "entrypoint_open_connections_web_http_HEAD": 0,
+ "entrypoint_open_connections_web_http_OPTIONS": 0,
+ "entrypoint_open_connections_web_http_PATCH": 0,
+ "entrypoint_open_connections_web_http_POST": 4,
+ "entrypoint_open_connections_web_http_PUT": 0,
+ "entrypoint_open_connections_web_websocket_GET": 0,
+ "entrypoint_request_duration_average_traefik_http_1xx": 0,
+ "entrypoint_request_duration_average_traefik_http_2xx": 0,
+ "entrypoint_request_duration_average_traefik_http_3xx": 0,
+ "entrypoint_request_duration_average_traefik_http_4xx": 0,
+ "entrypoint_request_duration_average_traefik_http_5xx": 0,
+ "entrypoint_request_duration_average_web_http_1xx": 0,
+ "entrypoint_request_duration_average_web_http_2xx": 0,
+ "entrypoint_request_duration_average_web_http_3xx": 0,
+ "entrypoint_request_duration_average_web_http_4xx": 0,
+ "entrypoint_request_duration_average_web_http_5xx": 0,
+ "entrypoint_request_duration_average_web_websocket_1xx": 0,
+ "entrypoint_request_duration_average_web_websocket_2xx": 0,
+ "entrypoint_request_duration_average_web_websocket_3xx": 0,
+ "entrypoint_request_duration_average_web_websocket_4xx": 0,
+ "entrypoint_request_duration_average_web_websocket_5xx": 0,
+ "entrypoint_requests_traefik_http_1xx": 0,
+ "entrypoint_requests_traefik_http_2xx": 2840814,
+ "entrypoint_requests_traefik_http_3xx": 0,
+ "entrypoint_requests_traefik_http_4xx": 8,
+ "entrypoint_requests_traefik_http_5xx": 0,
+ "entrypoint_requests_web_http_1xx": 0,
+ "entrypoint_requests_web_http_2xx": 1036208982,
+ "entrypoint_requests_web_http_3xx": 416262,
+ "entrypoint_requests_web_http_4xx": 267591379,
+ "entrypoint_requests_web_http_5xx": 223136,
+ "entrypoint_requests_web_websocket_1xx": 0,
+ "entrypoint_requests_web_websocket_2xx": 0,
+ "entrypoint_requests_web_websocket_3xx": 0,
+ "entrypoint_requests_web_websocket_4xx": 79137,
+ "entrypoint_requests_web_websocket_5xx": 0,
+ },
+ },
+ },
+ "properly calculating entrypoint request duration delta": {
+ prepare: prepareCaseTraefikEntrypointRequestDuration,
+ wantCollected: []map[string]int64{
+ {
+ "entrypoint_request_duration_average_traefik_http_1xx": 0,
+ "entrypoint_request_duration_average_traefik_http_2xx": 0,
+ "entrypoint_request_duration_average_traefik_http_3xx": 0,
+ "entrypoint_request_duration_average_traefik_http_4xx": 0,
+ "entrypoint_request_duration_average_traefik_http_5xx": 0,
+ "entrypoint_request_duration_average_web_websocket_1xx": 0,
+ "entrypoint_request_duration_average_web_websocket_2xx": 0,
+ "entrypoint_request_duration_average_web_websocket_3xx": 0,
+ "entrypoint_request_duration_average_web_websocket_4xx": 0,
+ "entrypoint_request_duration_average_web_websocket_5xx": 0,
+ },
+ {
+ "entrypoint_request_duration_average_traefik_http_1xx": 0,
+ "entrypoint_request_duration_average_traefik_http_2xx": 500,
+ "entrypoint_request_duration_average_traefik_http_3xx": 0,
+ "entrypoint_request_duration_average_traefik_http_4xx": 0,
+ "entrypoint_request_duration_average_traefik_http_5xx": 0,
+ "entrypoint_request_duration_average_web_websocket_1xx": 0,
+ "entrypoint_request_duration_average_web_websocket_2xx": 0,
+ "entrypoint_request_duration_average_web_websocket_3xx": 250,
+ "entrypoint_request_duration_average_web_websocket_4xx": 0,
+ "entrypoint_request_duration_average_web_websocket_5xx": 0,
+ },
+ {
+ "entrypoint_request_duration_average_traefik_http_1xx": 0,
+ "entrypoint_request_duration_average_traefik_http_2xx": 1000,
+ "entrypoint_request_duration_average_traefik_http_3xx": 0,
+ "entrypoint_request_duration_average_traefik_http_4xx": 0,
+ "entrypoint_request_duration_average_traefik_http_5xx": 0,
+ "entrypoint_request_duration_average_web_websocket_1xx": 0,
+ "entrypoint_request_duration_average_web_websocket_2xx": 0,
+ "entrypoint_request_duration_average_web_websocket_3xx": 500,
+ "entrypoint_request_duration_average_web_websocket_4xx": 0,
+ "entrypoint_request_duration_average_web_websocket_5xx": 0,
+ },
+ {
+ "entrypoint_request_duration_average_traefik_http_1xx": 0,
+ "entrypoint_request_duration_average_traefik_http_2xx": 0,
+ "entrypoint_request_duration_average_traefik_http_3xx": 0,
+ "entrypoint_request_duration_average_traefik_http_4xx": 0,
+ "entrypoint_request_duration_average_traefik_http_5xx": 0,
+ "entrypoint_request_duration_average_web_websocket_1xx": 0,
+ "entrypoint_request_duration_average_web_websocket_2xx": 0,
+ "entrypoint_request_duration_average_web_websocket_3xx": 0,
+ "entrypoint_request_duration_average_web_websocket_4xx": 0,
+ "entrypoint_request_duration_average_web_websocket_5xx": 0,
+ },
+ },
+ },
+ "fails on response with unexpected metrics (not Traefik)": {
+ prepare: prepareCaseNotTraefikMetrics,
+ },
+ "fails on 404 response": {
+ prepare: prepareCase404Response,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tk, cleanup := test.prepare(t)
+ defer cleanup()
+
+ var ms map[string]int64
+ for _, want := range test.wantCollected {
+ ms = tk.Collect()
+ assert.Equal(t, want, ms)
+ }
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, tk, ms)
+ }
+ })
+ }
+}
+
+func prepareCaseTraefikV221Metrics(t *testing.T) (*Traefik, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer221Metrics)
+ }))
+ h := New()
+ h.URL = srv.URL
+ require.NoError(t, h.Init())
+
+ return h, srv.Close
+}
+
+func prepareCaseTraefikEntrypointRequestDuration(t *testing.T) (*Traefik, func()) {
+ t.Helper()
+ var num int
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ num++
+ switch num {
+ case 1:
+ _, _ = w.Write([]byte(`
+traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="traefik",method="GET",protocol="http"} 10.1
+traefik_entrypoint_request_duration_seconds_sum{code="300",entrypoint="web",method="GET",protocol="websocket"} 20.1
+traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="traefik",method="PUT",protocol="http"} 30
+traefik_entrypoint_request_duration_seconds_count{code="300",entrypoint="web",method="PUT",protocol="websocket"} 40
+`))
+ case 2:
+ _, _ = w.Write([]byte(`
+traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="traefik",method="GET",protocol="http"} 15.1
+traefik_entrypoint_request_duration_seconds_sum{code="300",entrypoint="web",method="GET",protocol="websocket"} 25.1
+traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="traefik",method="PUT",protocol="http"} 40
+traefik_entrypoint_request_duration_seconds_count{code="300",entrypoint="web",method="PUT",protocol="websocket"} 60
+`))
+ default:
+ _, _ = w.Write([]byte(`
+traefik_entrypoint_request_duration_seconds_sum{code="200",entrypoint="traefik",method="GET",protocol="http"} 25.1
+traefik_entrypoint_request_duration_seconds_sum{code="300",entrypoint="web",method="GET",protocol="websocket"} 35.1
+traefik_entrypoint_request_duration_seconds_count{code="200",entrypoint="traefik",method="PUT",protocol="http"} 50
+traefik_entrypoint_request_duration_seconds_count{code="300",entrypoint="web",method="PUT",protocol="websocket"} 80
+`))
+ }
+ }))
+ h := New()
+ h.URL = srv.URL
+ require.NoError(t, h.Init())
+
+ return h, srv.Close
+}
+
+func prepareCaseNotTraefikMetrics(t *testing.T) (*Traefik, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte(`
+# HELP application_backend_http_responses_total Total number of HTTP responses.
+# TYPE application_backend_http_responses_total counter
+application_backend_http_responses_total{proxy="infra-traefik-web",code="1xx"} 0
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="1xx"} 4130401
+application_backend_http_responses_total{proxy="infra-traefik-web",code="2xx"} 21338013
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="2xx"} 0
+application_backend_http_responses_total{proxy="infra-traefik-web",code="3xx"} 10004
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="3xx"} 0
+application_backend_http_responses_total{proxy="infra-traefik-web",code="4xx"} 10170758
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="4xx"} 0
+application_backend_http_responses_total{proxy="infra-traefik-web",code="5xx"} 3075
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="5xx"} 0
+application_backend_http_responses_total{proxy="infra-traefik-web",code="other"} 5657
+application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"} 0
+`))
+ }))
+ h := New()
+ h.URL = srv.URL
+ require.NoError(t, h.Init())
+
+ return h, srv.Close
+}
+
+func prepareCase404Response(t *testing.T) (*Traefik, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+ h := New()
+ h.URL = srv.URL
+ require.NoError(t, h.Init())
+
+ return h, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*Traefik, func()) {
+ t.Helper()
+ h := New()
+ h.URL = "http://127.0.0.1:38001"
+ require.NoError(t, h.Init())
+
+ return h, func() {}
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, tk *Traefik, ms map[string]int64) {
+ for _, chart := range *tk.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := ms[dim.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := ms[v.ID]
+ assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/unbound/README.md b/src/go/plugin/go.d/modules/unbound/README.md
new file mode 120000
index 000000000..5b0f42b04
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/README.md
@@ -0,0 +1 @@
+integrations/unbound.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/unbound/charts.go b/src/go/plugin/go.d/modules/unbound/charts.go
new file mode 100644
index 000000000..f0ac8b082
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/charts.go
@@ -0,0 +1,527 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package unbound
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "golang.org/x/text/cases"
+ "golang.org/x/text/language"
+)
+
+type (
+ // Charts is an alias for module.Charts
+ Charts = module.Charts
+ // Chart is an alias for module.Charts
+ Chart = module.Chart
+ // Dims is an alias for module.Dims
+ Dims = module.Dims
+ // Dim is an alias for module.Dim
+ Dim = module.Dim
+)
+
+const (
+ prioQueries = module.Priority + iota
+ prioIPRateLimitedQueries
+ prioQueryType
+ prioQueryClass
+ prioQueryOpCode
+ prioQueryFlag
+ prioDNSCryptQueries
+
+ prioRecurReplies
+ prioReplyRCode
+
+ prioRecurTime
+
+ prioCache
+ prioCachePercentage
+ prioCachePrefetch
+ prioCacheExpired
+ prioZeroTTL
+ prioCacheCount
+
+ prioReqListUsage
+ prioReqListCurUsage
+ prioReqListJostle
+
+ prioTCPUsage
+
+ prioMemCache
+ prioMemMod
+ prioMemStreamWait
+ prioUptime
+
+ prioThread
+)
+
+func charts(cumulative bool) *Charts {
+ return &Charts{
+ makeIncrIf(queriesChart.Copy(), cumulative),
+ makeIncrIf(ipRateLimitedQueriesChart.Copy(), cumulative),
+ makeIncrIf(cacheChart.Copy(), cumulative),
+ makePercOfIncrIf(cachePercentageChart.Copy(), cumulative),
+ makeIncrIf(prefetchChart.Copy(), cumulative),
+ makeIncrIf(expiredChart.Copy(), cumulative),
+ makeIncrIf(zeroTTLChart.Copy(), cumulative),
+ makeIncrIf(dnsCryptChart.Copy(), cumulative),
+ makeIncrIf(recurRepliesChart.Copy(), cumulative),
+ recurTimeChart.Copy(),
+ reqListUsageChart.Copy(),
+ reqListCurUsageChart.Copy(),
+ makeIncrIf(reqListJostleChart.Copy(), cumulative),
+ tcpUsageChart.Copy(),
+ uptimeChart.Copy(),
+ }
+}
+
+func extendedCharts(cumulative bool) *Charts {
+ return &Charts{
+ memCacheChart.Copy(),
+ memModChart.Copy(),
+ memStreamWaitChart.Copy(),
+ cacheCountChart.Copy(),
+ makeIncrIf(queryTypeChart.Copy(), cumulative),
+ makeIncrIf(queryClassChart.Copy(), cumulative),
+ makeIncrIf(queryOpCodeChart.Copy(), cumulative),
+ makeIncrIf(queryFlagChart.Copy(), cumulative),
+ makeIncrIf(answerRCodeChart.Copy(), cumulative),
+ }
+}
+
+func threadCharts(thread string, cumulative bool) *Charts {
+ charts := charts(cumulative)
+ _ = charts.Remove(uptimeChart.ID)
+
+ for i, chart := range *charts {
+ convertTotalChartToThread(chart, thread, prioThread+i)
+ }
+ return charts
+}
+
+func convertTotalChartToThread(chart *Chart, thread string, priority int) {
+ chart.ID = fmt.Sprintf("%s_%s", thread, chart.ID)
+ chart.Title = fmt.Sprintf("%s %s",
+ cases.Title(language.English, cases.Compact).String(thread),
+ chart.Title,
+ )
+ chart.Fam = thread + "_stats"
+ chart.Ctx = "thread_" + chart.Ctx
+ chart.Priority = priority
+ for _, dim := range chart.Dims {
+ dim.ID = strings.Replace(dim.ID, "total", thread, 1)
+ }
+}
+
+// Common stats charts
+// see https://nlnetlabs.nl/documentation/unbound/unbound-control for the stats provided by unbound-control
+var (
+ queriesChart = Chart{
+ ID: "queries",
+ Title: "Received Queries",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "unbound.queries",
+ Priority: prioQueries,
+ Dims: Dims{
+ {ID: "total.num.queries", Name: "queries"},
+ },
+ }
+ ipRateLimitedQueriesChart = Chart{
+ ID: "queries_ip_ratelimited",
+ Title: "Rate Limited Queries",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "unbound.queries_ip_ratelimited",
+ Priority: prioIPRateLimitedQueries,
+ Dims: Dims{
+ {ID: "total.num.queries_ip_ratelimited", Name: "ratelimited"},
+ },
+ }
+ // ifdef USE_DNSCRYPT
+ dnsCryptChart = Chart{
+ ID: "dnscrypt_queries",
+ Title: "DNSCrypt Queries",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "unbound.dnscrypt_queries",
+ Priority: prioDNSCryptQueries,
+ Dims: Dims{
+ {ID: "total.num.dnscrypt.crypted", Name: "crypted"},
+ {ID: "total.num.dnscrypt.cert", Name: "cert"},
+ {ID: "total.num.dnscrypt.cleartext", Name: "cleartext"},
+ {ID: "total.num.dnscrypt.malformed", Name: "malformed"},
+ },
+ }
+ cacheChart = Chart{
+ ID: "cache",
+ Title: "Cache Statistics",
+ Units: "events",
+ Fam: "cache",
+ Ctx: "unbound.cache",
+ Type: module.Stacked,
+ Priority: prioCache,
+ Dims: Dims{
+ {ID: "total.num.cachehits", Name: "hits"},
+ {ID: "total.num.cachemiss", Name: "miss"},
+ },
+ }
+ cachePercentageChart = Chart{
+ ID: "cache_percentage",
+ Title: "Cache Statistics Percentage",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "unbound.cache_percentage",
+ Type: module.Stacked,
+ Priority: prioCachePercentage,
+ Dims: Dims{
+ {ID: "total.num.cachehits", Name: "hits", Algo: module.PercentOfAbsolute},
+ {ID: "total.num.cachemiss", Name: "miss", Algo: module.PercentOfAbsolute},
+ },
+ }
+ prefetchChart = Chart{
+ ID: "cache_prefetch",
+ Title: "Cache Prefetches",
+ Units: "prefetches",
+ Fam: "cache",
+ Ctx: "unbound.prefetch",
+ Priority: prioCachePrefetch,
+ Dims: Dims{
+ {ID: "total.num.prefetch", Name: "prefetches"},
+ },
+ }
+ expiredChart = Chart{
+ ID: "cache_expired",
+ Title: "Replies Served From Expired Cache",
+ Units: "replies",
+ Fam: "cache",
+ Ctx: "unbound.expired",
+ Priority: prioCacheExpired,
+ Dims: Dims{
+ {ID: "total.num.expired", Name: "expired"},
+ },
+ }
+ zeroTTLChart = Chart{
+ ID: "zero_ttl_replies",
+ Title: "Replies Served From Expired Cache",
+ Units: "replies",
+ Fam: "cache",
+ Ctx: "unbound.zero_ttl_replies",
+ Priority: prioZeroTTL,
+ Dims: Dims{
+ {ID: "total.num.zero_ttl", Name: "zero_ttl"},
+ },
+ }
+ recurRepliesChart = Chart{
+ ID: "recursive_replies",
+ Title: "Replies That Needed Recursive Processing",
+ Units: "replies",
+ Fam: "replies",
+ Ctx: "unbound.recursive_replies",
+ Priority: prioRecurReplies,
+ Dims: Dims{
+ {ID: "total.num.recursivereplies", Name: "recursive"},
+ },
+ }
+ recurTimeChart = Chart{
+ ID: "recursion_time",
+ Title: "Time Spent On Recursive Processing",
+ Units: "milliseconds",
+ Fam: "recursion timings",
+ Ctx: "unbound.recursion_time",
+ Priority: prioRecurTime,
+ Dims: Dims{
+ {ID: "total.recursion.time.avg", Name: "avg"},
+ {ID: "total.recursion.time.median", Name: "median"},
+ },
+ }
+ reqListUsageChart = Chart{
+ ID: "request_list_usage",
+ Title: "Request List Usage",
+ Units: "queries",
+ Fam: "request list",
+ Ctx: "unbound.request_list_usage",
+ Priority: prioReqListUsage,
+ Dims: Dims{
+ {ID: "total.requestlist.avg", Name: "avg", Div: 1000},
+ {ID: "total.requestlist.max", Name: "max"}, // all time max in cumulative mode, never resets
+ },
+ }
+ reqListCurUsageChart = Chart{
+ ID: "current_request_list_usage",
+ Title: "Current Request List Usage",
+ Units: "queries",
+ Fam: "request list",
+ Ctx: "unbound.current_request_list_usage",
+ Type: module.Area,
+ Priority: prioReqListCurUsage,
+ Dims: Dims{
+ {ID: "total.requestlist.current.all", Name: "all"},
+ {ID: "total.requestlist.current.user", Name: "user"},
+ },
+ }
+ reqListJostleChart = Chart{
+ ID: "request_list_jostle_list",
+ Title: "Request List Jostle List Events",
+ Units: "queries",
+ Fam: "request list",
+ Ctx: "unbound.request_list_jostle_list",
+ Priority: prioReqListJostle,
+ Dims: Dims{
+ {ID: "total.requestlist.overwritten", Name: "overwritten"},
+ {ID: "total.requestlist.exceeded", Name: "dropped"},
+ },
+ }
+ tcpUsageChart = Chart{
+ ID: "tcpusage",
+ Title: "TCP Handler Buffers",
+ Units: "buffers",
+ Fam: "tcp buffers",
+ Ctx: "unbound.tcpusage",
+ Priority: prioTCPUsage,
+ Dims: Dims{
+ {ID: "total.tcpusage", Name: "usage"},
+ },
+ }
+ uptimeChart = Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "unbound.uptime",
+ Priority: prioUptime,
+ Dims: Dims{
+ {ID: "time.up", Name: "time"},
+ },
+ }
+)
+
+// Extended stats charts
+var (
+ // TODO: do not add dnscrypt stuff by default?
+ memCacheChart = Chart{
+ ID: "cache_memory",
+ Title: "Cache Memory",
+ Units: "KB",
+ Fam: "mem",
+ Ctx: "unbound.cache_memory",
+ Type: module.Stacked,
+ Priority: prioMemCache,
+ Dims: Dims{
+ {ID: "mem.cache.message", Name: "message", Div: 1024},
+ {ID: "mem.cache.rrset", Name: "rrset", Div: 1024},
+ {ID: "mem.cache.dnscrypt_nonce", Name: "dnscrypt_nonce", Div: 1024}, // ifdef USE_DNSCRYPT
+ {ID: "mem.cache.dnscrypt_shared_secret", Name: "dnscrypt_shared_secret", Div: 1024}, // ifdef USE_DNSCRYPT
+ },
+ }
+ // TODO: do not add subnet and ipsecmod stuff by default?
+ memModChart = Chart{
+ ID: "mod_memory",
+ Title: "Module Memory",
+ Units: "KB",
+ Fam: "mem",
+ Ctx: "unbound.mod_memory",
+ Type: module.Stacked,
+ Priority: prioMemMod,
+ Dims: Dims{
+ {ID: "mem.mod.iterator", Name: "iterator", Div: 1024},
+ {ID: "mem.mod.respip", Name: "respip", Div: 1024},
+ {ID: "mem.mod.validator", Name: "validator", Div: 1024},
+ {ID: "mem.mod.subnet", Name: "subnet", Div: 1024}, // ifdef CLIENT_SUBNET
+ {ID: "mem.mod.ipsecmod", Name: "ipsec", Div: 1024}, // ifdef USE_IPSECMOD
+ },
+ }
+ memStreamWaitChart = Chart{
+ ID: "mem_stream_wait",
+ Title: "TCP and TLS Stream Waif Buffer Memory",
+ Units: "KB",
+ Fam: "mem",
+ Ctx: "unbound.mem_streamwait",
+ Priority: prioMemStreamWait,
+ Dims: Dims{
+ {ID: "mem.streamwait", Name: "streamwait", Div: 1024},
+ },
+ }
+ // NOTE: same family as for cacheChart
+ cacheCountChart = Chart{
+ ID: "cache_count",
+ Title: "Cache Items Count",
+ Units: "items",
+ Fam: "cache",
+ Ctx: "unbound.cache_count",
+ Type: module.Stacked,
+ Priority: prioCacheCount,
+ Dims: Dims{
+ {ID: "infra.cache.count", Name: "infra"},
+ {ID: "key.cache.count", Name: "key"},
+ {ID: "msg.cache.count", Name: "msg"},
+ {ID: "rrset.cache.count", Name: "rrset"},
+ {ID: "dnscrypt_nonce.cache.count", Name: "dnscrypt_nonce"},
+ {ID: "dnscrypt_shared_secret.cache.count", Name: "shared_secret"},
+ },
+ }
+ queryTypeChart = Chart{
+ ID: "queries_by_type",
+ Title: "Queries By Type",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "unbound.type_queries",
+ Type: module.Stacked,
+ Priority: prioQueryType,
+ }
+ queryClassChart = Chart{
+ ID: "queries_by_class",
+ Title: "Queries By Class",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "unbound.class_queries",
+ Type: module.Stacked,
+ Priority: prioQueryClass,
+ }
+ queryOpCodeChart = Chart{
+ ID: "queries_by_opcode",
+ Title: "Queries By OpCode",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "unbound.opcode_queries",
+ Type: module.Stacked,
+ Priority: prioQueryOpCode,
+ }
+ queryFlagChart = Chart{
+ ID: "queries_by_flag",
+ Title: "Queries By Flag",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "unbound.flag_queries",
+ Type: module.Stacked,
+ Priority: prioQueryFlag,
+ Dims: Dims{
+ {ID: "num.query.flags.QR", Name: "QR"},
+ {ID: "num.query.flags.AA", Name: "AA"},
+ {ID: "num.query.flags.TC", Name: "TC"},
+ {ID: "num.query.flags.RD", Name: "RD"},
+ {ID: "num.query.flags.RA", Name: "RA"},
+ {ID: "num.query.flags.Z", Name: "Z"},
+ {ID: "num.query.flags.AD", Name: "AD"},
+ {ID: "num.query.flags.CD", Name: "CD"},
+ },
+ }
+ answerRCodeChart = Chart{
+ ID: "replies_by_rcode",
+ Title: "Replies By RCode",
+ Units: "replies",
+ Fam: "replies",
+ Ctx: "unbound.rcode_answers",
+ Type: module.Stacked,
+ Priority: prioReplyRCode,
+ }
+)
+
+func (u *Unbound) updateCharts() {
+ if len(u.curCache.threads) > 1 {
+ for v := range u.curCache.threads {
+ if !u.cache.threads[v] {
+ u.cache.threads[v] = true
+ u.addThreadCharts(v)
+ }
+ }
+ }
+ // 0-6 rcodes always included
+ if hasExtendedData := len(u.curCache.answerRCode) > 0; !hasExtendedData {
+ return
+ }
+
+ if !u.extChartsCreated {
+ charts := extendedCharts(u.Cumulative)
+ if err := u.Charts().Add(*charts...); err != nil {
+ u.Warningf("add extended charts: %v", err)
+ }
+ u.extChartsCreated = true
+ }
+
+ for v := range u.curCache.queryType {
+ if !u.cache.queryType[v] {
+ u.cache.queryType[v] = true
+ u.addDimToQueryTypeChart(v)
+ }
+ }
+ for v := range u.curCache.queryClass {
+ if !u.cache.queryClass[v] {
+ u.cache.queryClass[v] = true
+ u.addDimToQueryClassChart(v)
+ }
+ }
+ for v := range u.curCache.queryOpCode {
+ if !u.cache.queryOpCode[v] {
+ u.cache.queryOpCode[v] = true
+ u.addDimToQueryOpCodeChart(v)
+ }
+ }
+ for v := range u.curCache.answerRCode {
+ if !u.cache.answerRCode[v] {
+ u.cache.answerRCode[v] = true
+ u.addDimToAnswerRcodeChart(v)
+ }
+ }
+}
+
+func (u *Unbound) addThreadCharts(thread string) {
+ charts := threadCharts(thread, u.Cumulative)
+ if err := u.Charts().Add(*charts...); err != nil {
+ u.Warningf("add '%s' charts: %v", thread, err)
+ }
+}
+
+func (u *Unbound) addDimToQueryTypeChart(typ string) {
+ u.addDimToChart(queryTypeChart.ID, "num.query.type."+typ, typ)
+}
+func (u *Unbound) addDimToQueryClassChart(class string) {
+ u.addDimToChart(queryClassChart.ID, "num.query.class."+class, class)
+}
+func (u *Unbound) addDimToQueryOpCodeChart(opcode string) {
+ u.addDimToChart(queryOpCodeChart.ID, "num.query.opcode."+opcode, opcode)
+}
+func (u *Unbound) addDimToAnswerRcodeChart(rcode string) {
+ u.addDimToChart(answerRCodeChart.ID, "num.answer.rcode."+rcode, rcode)
+}
+
+func (u *Unbound) addDimToChart(chartID, dimID, dimName string) {
+ chart := u.Charts().Get(chartID)
+ if chart == nil {
+ u.Warningf("add '%s' dim: couldn't find '%s' chart", dimID, chartID)
+ return
+ }
+ dim := &Dim{ID: dimID, Name: dimName}
+ if u.Cumulative {
+ dim.Algo = module.Incremental
+ }
+ if err := chart.AddDim(dim); err != nil {
+ u.Warningf("add '%s' dim: %v", dimID, err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func makeIncrIf(chart *Chart, do bool) *Chart {
+ if !do {
+ return chart
+ }
+ chart.Units += "/s"
+ for _, d := range chart.Dims {
+ d.Algo = module.Incremental
+ }
+ return chart
+}
+
+func makePercOfIncrIf(chart *Chart, do bool) *Chart {
+ if !do {
+ return chart
+ }
+ for _, d := range chart.Dims {
+ d.Algo = module.PercentOfIncremental
+ }
+ return chart
+}
diff --git a/src/go/plugin/go.d/modules/unbound/collect.go b/src/go/plugin/go.d/modules/unbound/collect.go
new file mode 100644
index 000000000..125f206ae
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/collect.go
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package unbound
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// https://github.com/NLnetLabs/unbound/blob/master/daemon/remote.c (do_stats: print_stats, print_thread_stats, print_mem, print_uptime, print_ext)
+// https://github.com/NLnetLabs/unbound/blob/master/libunbound/unbound.h (structs: ub_server_stats, ub_shm_stat_info)
+// https://docs.datadoghq.com/integrations/unbound/#metrics (stats description)
+// https://docs.menandmice.com/display/MM/Unbound+request-list+demystified (request lists explanation)
+
+func (u *Unbound) collect() (map[string]int64, error) {
+ stats, err := u.scrapeUnboundStats()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := u.collectStats(stats)
+ u.updateCharts()
+ return mx, nil
+}
+
+func (u *Unbound) scrapeUnboundStats() ([]entry, error) {
+ var output []string
+ var command = "UBCT1 stats"
+ if u.Cumulative {
+ command = "UBCT1 stats_noreset"
+ }
+
+ if err := u.client.Connect(); err != nil {
+ return nil, fmt.Errorf("failed to connect: %v", err)
+ }
+ defer func() { _ = u.client.Disconnect() }()
+
+ err := u.client.Command(command+"\n", func(bytes []byte) bool {
+ output = append(output, string(bytes))
+ return true
+ })
+ if err != nil {
+ return nil, fmt.Errorf("send command '%s': %w", command, err)
+ }
+
+ switch len(output) {
+ case 0:
+ return nil, fmt.Errorf("command '%s': empty resopnse", command)
+ case 1:
+ // in case of error the first line of the response is: error <descriptive text possible> \n
+ return nil, fmt.Errorf("command '%s': '%s'", command, output[0])
+ }
+ return parseStatsOutput(output)
+}
+
+func (u *Unbound) collectStats(stats []entry) map[string]int64 {
+ if u.Cumulative {
+ return u.collectCumulativeStats(stats)
+ }
+ return u.collectNonCumulativeStats(stats)
+}
+
+func (u *Unbound) collectCumulativeStats(stats []entry) map[string]int64 {
+ mul := float64(1000)
+ // following stats change only on cachemiss event in cumulative mode
+ // - *.requestlist.avg,
+ // - *.recursion.time.avg
+ // - *.recursion.time.median
+ v := findEntry("total.num.cachemiss", stats)
+ if v == u.prevCacheMiss {
+ // so we need to reset them if there is no such event
+ mul = 0
+ }
+ u.prevCacheMiss = v
+ return u.processStats(stats, mul)
+}
+
+func (u *Unbound) collectNonCumulativeStats(stats []entry) map[string]int64 {
+ mul := float64(1000)
+ mx := u.processStats(stats, mul)
+
+ // see 'static int print_ext(RES* ssl, struct ub_stats_info* s)' in
+ // https://github.com/NLnetLabs/unbound/blob/master/daemon/remote.c
+ // - zero value queries type not included
+ // - zero value queries class not included
+ // - zero value queries opcode not included
+ // - only 0-6 rcodes answers always included, other zero value rcodes not included
+ for k := range u.cache.queryType {
+ if _, ok := u.curCache.queryType[k]; !ok {
+ mx["num.query.type."+k] = 0
+ }
+ }
+ for k := range u.cache.queryClass {
+ if _, ok := u.curCache.queryClass[k]; !ok {
+ mx["num.query.class."+k] = 0
+ }
+ }
+ for k := range u.cache.queryOpCode {
+ if _, ok := u.curCache.queryOpCode[k]; !ok {
+ mx["num.query.opcode."+k] = 0
+ }
+ }
+ for k := range u.cache.answerRCode {
+ if _, ok := u.curCache.answerRCode[k]; !ok {
+ mx["num.answer.rcode."+k] = 0
+ }
+ }
+ return mx
+}
+
+func (u *Unbound) processStats(stats []entry, mul float64) map[string]int64 {
+ u.curCache.clear()
+ mx := make(map[string]int64, len(stats))
+ for _, e := range stats {
+ switch {
+ // *.requestlist.avg, *.recursion.time.avg, *.recursion.time.median
+ case e.hasSuffix(".avg"), e.hasSuffix(".median"):
+ e.value *= mul
+ case e.hasPrefix("thread") && e.hasSuffix("num.queries"):
+ v := extractThread(e.key)
+ u.curCache.threads[v] = true
+ case e.hasPrefix("num.query.type"):
+ v := extractQueryType(e.key)
+ u.curCache.queryType[v] = true
+ case e.hasPrefix("num.query.class"):
+ v := extractQueryClass(e.key)
+ u.curCache.queryClass[v] = true
+ case e.hasPrefix("num.query.opcode"):
+ v := extractQueryOpCode(e.key)
+ u.curCache.queryOpCode[v] = true
+ case e.hasPrefix("num.answer.rcode"):
+ v := extractAnswerRCode(e.key)
+ u.curCache.answerRCode[v] = true
+ }
+ mx[e.key] = int64(e.value)
+ }
+ return mx
+}
+
+func extractThread(key string) string { idx := strings.IndexByte(key, '.'); return key[:idx] }
+func extractQueryType(key string) string { i := len("num.query.type."); return key[i:] }
+func extractQueryClass(key string) string { i := len("num.query.class."); return key[i:] }
+func extractQueryOpCode(key string) string { i := len("num.query.opcode."); return key[i:] }
+func extractAnswerRCode(key string) string { i := len("num.answer.rcode."); return key[i:] }
+
+type entry struct {
+ key string
+ value float64
+}
+
+func (e entry) hasPrefix(prefix string) bool { return strings.HasPrefix(e.key, prefix) }
+func (e entry) hasSuffix(suffix string) bool { return strings.HasSuffix(e.key, suffix) }
+
+func findEntry(key string, entries []entry) float64 {
+ for _, e := range entries {
+ if e.key == key {
+ return e.value
+ }
+ }
+ return -1
+}
+
+func parseStatsOutput(output []string) ([]entry, error) {
+ var es []entry
+ for _, v := range output {
+ e, err := parseStatsLine(v)
+ if err != nil {
+ return nil, err
+ }
+ if e.hasPrefix("histogram") {
+ continue
+ }
+ es = append(es, e)
+ }
+ return es, nil
+}
+
+func parseStatsLine(line string) (entry, error) {
+ // 'stats' output is a list of [key]=[value] lines.
+ parts := strings.Split(line, "=")
+ if len(parts) != 2 {
+ return entry{}, fmt.Errorf("bad line syntax: %s", line)
+ }
+ f, err := strconv.ParseFloat(parts[1], 64)
+ return entry{key: parts[0], value: f}, err
+}
+
+func newCollectCache() collectCache {
+ return collectCache{
+ threads: make(map[string]bool),
+ queryType: make(map[string]bool),
+ queryClass: make(map[string]bool),
+ queryOpCode: make(map[string]bool),
+ answerRCode: make(map[string]bool),
+ }
+}
+
+type collectCache struct {
+ threads map[string]bool
+ queryType map[string]bool
+ queryClass map[string]bool
+ queryOpCode map[string]bool
+ answerRCode map[string]bool
+}
+
+func (c *collectCache) clear() {
+ *c = newCollectCache()
+}
diff --git a/src/go/plugin/go.d/modules/unbound/config/config.go b/src/go/plugin/go.d/modules/unbound/config/config.go
new file mode 100644
index 000000000..69dc5c219
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/config.go
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package config
+
+import (
+ "fmt"
+ "strings"
+)
+
+// UnboundConfig represents Unbound configuration file.
+type UnboundConfig struct {
+ cumulative string // statistics-cumulative
+ enable string // control-enable
+ iface string // control-interface
+ port string // control-port
+ useCert string // control-use-cert
+ keyFile string // control-key-file
+ certFile string // control-cert-file
+}
+
+func (c UnboundConfig) String() string {
+ format := strings.Join([]string{
+ "[",
+ `"statistics-cumulative": '%s', `,
+ `"control-enable": '%s', `,
+ `"control-interface": '%s', `,
+ `"control-port": '%s', `,
+ `"control-user-cert": '%s', `,
+ `"control-key-file": '%s', `,
+ `"control-cert-file": '%s'`,
+ "]",
+ }, "")
+ return fmt.Sprintf(format, c.cumulative, c.enable, c.iface, c.port, c.useCert, c.keyFile, c.certFile)
+}
+
+func (c UnboundConfig) Empty() bool { return c == UnboundConfig{} }
+func (c UnboundConfig) Cumulative() (bool, bool) { return c.cumulative == "yes", c.cumulative != "" }
+func (c UnboundConfig) ControlEnabled() (bool, bool) { return c.enable == "yes", c.enable != "" }
+func (c UnboundConfig) ControlInterface() (string, bool) { return c.iface, c.iface != "" }
+func (c UnboundConfig) ControlPort() (string, bool) { return c.port, c.port != "" }
+func (c UnboundConfig) ControlUseCert() (bool, bool) { return c.useCert == "yes", c.useCert != "" }
+func (c UnboundConfig) ControlKeyFile() (string, bool) { return c.keyFile, c.keyFile != "" }
+func (c UnboundConfig) ControlCertFile() (string, bool) { return c.certFile, c.certFile != "" }
+
+func fromOptions(options []option) *UnboundConfig {
+ cfg := &UnboundConfig{}
+ for _, opt := range options {
+ switch opt.name {
+ default:
+ case optInterface:
+ applyControlInterface(cfg, opt.value)
+ case optCumulative:
+ cfg.cumulative = opt.value
+ case optEnable:
+ cfg.enable = opt.value
+ case optPort:
+ cfg.port = opt.value
+ case optUseCert:
+ cfg.useCert = opt.value
+ case optKeyFile:
+ cfg.keyFile = opt.value
+ case optCertFile:
+ cfg.certFile = opt.value
+ }
+ }
+ return cfg
+}
+
+// Unbound doesn't allow to query stats from unix socket when control-interface is enabled on ip interface.
+func applyControlInterface(cfg *UnboundConfig, value string) {
+ if cfg.iface == "" || !isUnixSocket(value) || isUnixSocket(cfg.iface) {
+ cfg.iface = value
+ }
+}
+
+func isUnixSocket(address string) bool {
+ return strings.HasPrefix(address, "/")
+}
diff --git a/src/go/plugin/go.d/modules/unbound/config/config_test.go b/src/go/plugin/go.d/modules/unbound/config/config_test.go
new file mode 100644
index 000000000..0375c1368
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/config_test.go
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package config
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestUnboundConfig_Empty(t *testing.T) {
+ assert.True(t, UnboundConfig{}.Empty())
+ assert.False(t, UnboundConfig{enable: "yes"}.Empty())
+}
+
+func TestUnboundConfig_Cumulative(t *testing.T) {
+ tests := []struct {
+ input string
+ wantValue bool
+ wantOK bool
+ }{
+ {input: "yes", wantValue: true, wantOK: true},
+ {input: "no", wantValue: false, wantOK: true},
+ {input: "", wantValue: false, wantOK: false},
+ {input: "some value", wantValue: false, wantOK: true},
+ }
+
+ for _, test := range tests {
+ t.Run(test.input, func(t *testing.T) {
+ cfg := UnboundConfig{cumulative: test.input}
+
+ v, ok := cfg.Cumulative()
+ assert.Equal(t, test.wantValue, v)
+ assert.Equal(t, test.wantOK, ok)
+ })
+ }
+}
+
+func TestUnboundConfig_ControlEnabled(t *testing.T) {
+ tests := []struct {
+ input string
+ wantValue bool
+ wantOK bool
+ }{
+ {input: "yes", wantValue: true, wantOK: true},
+ {input: "no", wantValue: false, wantOK: true},
+ {input: "", wantValue: false, wantOK: false},
+ {input: "some value", wantValue: false, wantOK: true},
+ }
+
+ for _, test := range tests {
+ t.Run(test.input, func(t *testing.T) {
+ cfg := UnboundConfig{enable: test.input}
+
+ v, ok := cfg.ControlEnabled()
+ assert.Equal(t, test.wantValue, v)
+ assert.Equal(t, test.wantOK, ok)
+ })
+ }
+}
+
+func TestUnboundConfig_ControlInterface(t *testing.T) {
+ tests := []struct {
+ input string
+ wantValue string
+ wantOK bool
+ }{
+ {input: "127.0.0.1", wantValue: "127.0.0.1", wantOK: true},
+ {input: "/var/run/unbound.sock", wantValue: "/var/run/unbound.sock", wantOK: true},
+ {input: "", wantValue: "", wantOK: false},
+ {input: "some value", wantValue: "some value", wantOK: true},
+ }
+
+ for _, test := range tests {
+ t.Run(test.input, func(t *testing.T) {
+ cfg := UnboundConfig{iface: test.input}
+
+ v, ok := cfg.ControlInterface()
+ assert.Equal(t, test.wantValue, v)
+ assert.Equal(t, test.wantOK, ok)
+ })
+ }
+}
+
+func TestUnboundConfig_ControlPort(t *testing.T) {
+ tests := []struct {
+ input string
+ wantValue string
+ wantOK bool
+ }{
+ {input: "8953", wantValue: "8953", wantOK: true},
+ {input: "", wantValue: "", wantOK: false},
+ {input: "some value", wantValue: "some value", wantOK: true},
+ }
+
+ for _, test := range tests {
+ t.Run(test.input, func(t *testing.T) {
+ cfg := UnboundConfig{port: test.input}
+
+ v, ok := cfg.ControlPort()
+ assert.Equal(t, test.wantValue, v)
+ assert.Equal(t, test.wantOK, ok)
+ })
+ }
+}
+
+func TestUnboundConfig_ControlUseCert(t *testing.T) {
+ tests := []struct {
+ input string
+ wantValue bool
+ wantOK bool
+ }{
+ {input: "yes", wantValue: true, wantOK: true},
+ {input: "no", wantValue: false, wantOK: true},
+ {input: "", wantValue: false, wantOK: false},
+ {input: "some value", wantValue: false, wantOK: true},
+ }
+
+ for _, test := range tests {
+ t.Run(test.input, func(t *testing.T) {
+ cfg := UnboundConfig{useCert: test.input}
+
+ v, ok := cfg.ControlUseCert()
+ assert.Equal(t, test.wantValue, v)
+ assert.Equal(t, test.wantOK, ok)
+ })
+ }
+}
+
+func TestUnboundConfig_ControlKeyFile(t *testing.T) {
+ tests := []struct {
+ input string
+ wantValue string
+ wantOK bool
+ }{
+ {input: "/etc/unbound/unbound_control.key", wantValue: "/etc/unbound/unbound_control.key", wantOK: true},
+ {input: "", wantValue: "", wantOK: false},
+ {input: "some value", wantValue: "some value", wantOK: true},
+ }
+
+ for _, test := range tests {
+ t.Run(test.input, func(t *testing.T) {
+ cfg := UnboundConfig{keyFile: test.input}
+
+ v, ok := cfg.ControlKeyFile()
+ assert.Equal(t, test.wantValue, v)
+ assert.Equal(t, test.wantOK, ok)
+ })
+ }
+}
+
+func TestUnboundConfig_ControlCertFile(t *testing.T) {
+ tests := []struct {
+ input string
+ wantValue string
+ wantOK bool
+ }{
+ {input: "/etc/unbound/unbound_control.pem", wantValue: "/etc/unbound/unbound_control.pem", wantOK: true},
+ {input: "", wantValue: "", wantOK: false},
+ {input: "some value", wantValue: "some value", wantOK: true},
+ }
+
+ for _, test := range tests {
+ t.Run(test.input, func(t *testing.T) {
+ cfg := UnboundConfig{certFile: test.input}
+
+ v, ok := cfg.ControlCertFile()
+ assert.Equal(t, test.wantValue, v)
+ assert.Equal(t, test.wantOK, ok)
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/modules/unbound/config/parse.go b/src/go/plugin/go.d/modules/unbound/config/parse.go
new file mode 100644
index 000000000..99a632d50
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/parse.go
@@ -0,0 +1,165 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package config
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+)
+
+type option struct{ name, value string }
+
+const (
+ optInclude = "include"
+ optIncludeToplevel = "include-toplevel"
+ optCumulative = "statistics-cumulative"
+ optEnable = "control-enable"
+ optInterface = "control-interface"
+ optPort = "control-port"
+ optUseCert = "control-use-cert"
+ optKeyFile = "control-key-file"
+ optCertFile = "control-cert-file"
+)
+
+func isOptionUsed(opt option) bool {
+ switch opt.name {
+ case optInclude,
+ optIncludeToplevel,
+ optCumulative,
+ optEnable,
+ optInterface,
+ optPort,
+ optUseCert,
+ optKeyFile,
+ optCertFile:
+ return true
+ }
+ return false
+}
+
+// TODO:
+// If also using chroot, using full path names for the included files works, relative pathnames for the included names
+// work if the directory where the daemon is started equals its chroot/working directory or is specified before
+// the include statement with directory: dir.
+
+// Parse parses Unbound configuration files into UnboundConfig.
+// It follows logic described in the 'man unbound.conf':
+// - Files can be included using the 'include:' directive. It can appear anywhere, it accepts a single file name as argument.
+// - Processing continues as if the text from the included file was copied into the config file at that point.
+// - Wildcards can be used to include multiple files.
+//
+// It stops processing on any error: syntax error, recursive include, glob matches directory etc.
+func Parse(entryPath string) (*UnboundConfig, error) {
+ options, err := parse(entryPath, nil)
+ if err != nil {
+ return nil, err
+ }
+ return fromOptions(options), nil
+}
+
+func parse(filename string, visited map[string]bool) ([]option, error) {
+ if visited == nil {
+ visited = make(map[string]bool)
+ }
+ if visited[filename] {
+ return nil, fmt.Errorf("'%s' already visited", filename)
+ }
+ visited[filename] = true
+
+ f, err := open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = f.Close() }()
+
+ var options []option
+ sc := bufio.NewScanner(f)
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ opt, err := parseLine(line)
+ if err != nil {
+ return nil, fmt.Errorf("file '%s', error on parsing line '%s': %v", filename, line, err)
+ }
+
+ if !isOptionUsed(opt) {
+ continue
+ }
+
+ if opt.name != optInclude && opt.name != optIncludeToplevel {
+ options = append(options, opt)
+ continue
+ }
+
+ filenames, err := globInclude(opt.value)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, name := range filenames {
+ opts, err := parse(name, visited)
+ if err != nil {
+ return nil, err
+ }
+ options = append(options, opts...)
+ }
+ }
+ return options, nil
+}
+
+func globInclude(include string) ([]string, error) {
+ if isGlobPattern(include) {
+ return filepath.Glob(include)
+ }
+ return []string{include}, nil
+}
+
+func parseLine(line string) (option, error) {
+ parts := strings.Split(line, ":")
+ if len(parts) < 2 {
+ return option{}, errors.New("bad syntax")
+ }
+ key, value := cleanKeyValue(parts[0], parts[1])
+ return option{name: key, value: value}, nil
+}
+
+func cleanKeyValue(key, value string) (string, string) {
+ if i := strings.IndexByte(value, '#'); i > 0 {
+ value = value[:i-1]
+ }
+ key = strings.TrimSpace(key)
+ value = strings.Trim(strings.TrimSpace(value), "\"'")
+ return key, value
+}
+
+func isGlobPattern(value string) bool {
+ magicChars := `*?[`
+ if runtime.GOOS != "windows" {
+ magicChars = `*?[\`
+ }
+ return strings.ContainsAny(value, magicChars)
+}
+
+func open(filename string) (*os.File, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if !fi.Mode().IsRegular() {
+ return nil, fmt.Errorf("'%s' is not a regular file", filename)
+ }
+ return f, nil
+}
diff --git a/src/go/plugin/go.d/modules/unbound/config/parse_test.go b/src/go/plugin/go.d/modules/unbound/config/parse_test.go
new file mode 100644
index 000000000..72542a861
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/parse_test.go
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package config
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParse(t *testing.T) {
+ tests := map[string]struct {
+ path string
+ wantCfg UnboundConfig
+ wantErr bool
+ }{
+ "valid include": {
+ path: "testdata/valid_include.conf",
+ wantCfg: UnboundConfig{
+ cumulative: "yes",
+ enable: "yes",
+ iface: "10.0.0.1",
+ port: "8955",
+ useCert: "yes",
+ keyFile: "/etc/unbound/unbound_control_2.key",
+ certFile: "/etc/unbound/unbound_control_2.pem",
+ },
+ },
+ "valid include-toplevel": {
+ path: "testdata/valid_include_toplevel.conf",
+ wantCfg: UnboundConfig{
+ cumulative: "yes",
+ enable: "yes",
+ iface: "10.0.0.1",
+ port: "8955",
+ useCert: "yes",
+ keyFile: "/etc/unbound/unbound_control_2.key",
+ certFile: "/etc/unbound/unbound_control_2.pem",
+ },
+ },
+ "valid glob include": {
+ path: "testdata/valid_glob.conf",
+ wantCfg: UnboundConfig{
+ cumulative: "yes",
+ enable: "yes",
+ iface: "10.0.0.1",
+ port: "8955",
+ useCert: "yes",
+ keyFile: "/etc/unbound/unbound_control_2.key",
+ certFile: "/etc/unbound/unbound_control_2.pem",
+ },
+ },
+ "non existent glob include": {
+ path: "testdata/non_existent_glob_include.conf",
+ wantCfg: UnboundConfig{
+ cumulative: "yes",
+ enable: "yes",
+ iface: "10.0.0.1",
+ port: "8953",
+ useCert: "yes",
+ keyFile: "/etc/unbound/unbound_control.key",
+ certFile: "/etc/unbound/unbound_control.pem",
+ },
+ },
+ "infinite recursion include": {
+ path: "testdata/infinite_rec.conf",
+ wantErr: true,
+ },
+ "non existent include": {
+ path: "testdata/non_existent_include.conf",
+ wantErr: true,
+ },
+ "non existent path": {
+ path: "testdata/non_existent_path.conf",
+ wantErr: true,
+ },
+ }
+
+ for name, test := range tests {
+ name = fmt.Sprintf("%s (%s)", name, test.path)
+ t.Run(name, func(t *testing.T) {
+ cfg, err := Parse(test.path)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, test.wantCfg, *cfg)
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/infinite_rec.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/infinite_rec.conf
new file mode 100644
index 000000000..904f75b30
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/infinite_rec.conf
@@ -0,0 +1,85 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+include: "testdata/infinite_rec.conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+ statistics-cumulative: yes
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+ # extended-statistics: yes
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ control-enable: yes
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.1
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ control-port: 8953
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+ control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ control-key-file: "/etc/unbound/unbound_control.key"
+
+ # unbound-control certificate file.
+ control-cert-file: "/etc/unbound/unbound_control.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/non_existent_glob_include.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/non_existent_glob_include.conf
new file mode 100644
index 000000000..21620f7d5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/non_existent_glob_include.conf
@@ -0,0 +1,85 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+include: "testdata/__non_existent_glob__*.conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+ statistics-cumulative: yes
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+ # extended-statistics: yes
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ control-enable: yes
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.1
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ control-port: 8953
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+ control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ control-key-file: "/etc/unbound/unbound_control.key"
+
+ # unbound-control certificate file.
+ control-cert-file: "/etc/unbound/unbound_control.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/non_existent_include.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/non_existent_include.conf
new file mode 100644
index 000000000..e493e35bb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/non_existent_include.conf
@@ -0,0 +1,85 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+include: "testdata/__non_existent_include__.conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+ statistics-cumulative: yes
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+ # extended-statistics: yes
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ control-enable: yes
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.1
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ control-port: 8953
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+ control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ control-key-file: "/etc/unbound/unbound_control.key"
+
+ # unbound-control certificate file.
+ control-cert-file: "/etc/unbound/unbound_control.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob.conf
new file mode 100644
index 000000000..f020c580a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob.conf
@@ -0,0 +1,82 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+include: "testdata/valid_glob[2-3].conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ statistics-cumulative: yes
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ control-enable: yes
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.1
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ # control-port: 8955
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ # control-key-file: "/etc/unbound/unbound_control_2.key"
+
+ # unbound-control certificate file.
+ # control-cert-file: "/etc/unbound/unbound_control_2.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob2.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob2.conf
new file mode 100644
index 000000000..85bd80e0d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob2.conf
@@ -0,0 +1,80 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ # control-enable: no
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ # control-interface: ::1
+ control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ # control-port: 8955
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ control-key-file: "/etc/unbound/unbound_control_2.key"
+
+ # unbound-control certificate file.
+ control-cert-file: "/etc/unbound/unbound_control_2.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob3.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob3.conf
new file mode 100644
index 000000000..f20eacf1a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_glob3.conf
@@ -0,0 +1,81 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ # control-enable: no
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.3
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ control-port: 8955
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ # control-key-file: "/etc/unbound/unbound_control.key"
+
+ # unbound-control certificate file.
+ # control-cert-file: "/etc/unbound/unbound_control.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include.conf
new file mode 100644
index 000000000..1974f6178
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include.conf
@@ -0,0 +1,82 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+include: "testdata/valid_include2.conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ statistics-cumulative: yes
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ control-enable: yes
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.1
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ # control-port: 8955
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ # control-key-file: "/etc/unbound/unbound_control_2.key"
+
+ # unbound-control certificate file.
+ # control-cert-file: "/etc/unbound/unbound_control_2.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include2.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include2.conf
new file mode 100644
index 000000000..c956d44d5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include2.conf
@@ -0,0 +1,81 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+include: "testdata/valid_include3.conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ # control-enable: no
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ # control-interface: ::1
+ control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ # control-port: 8955
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ control-key-file: "/etc/unbound/unbound_control_2.key"
+
+ # unbound-control certificate file.
+ control-cert-file: "/etc/unbound/unbound_control_2.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include3.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include3.conf
new file mode 100644
index 000000000..f20eacf1a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include3.conf
@@ -0,0 +1,81 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ # control-enable: no
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.3
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ control-port: 8955
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ # control-key-file: "/etc/unbound/unbound_control.key"
+
+ # unbound-control certificate file.
+ # control-cert-file: "/etc/unbound/unbound_control.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel.conf
new file mode 100644
index 000000000..9e5675e10
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel.conf
@@ -0,0 +1,82 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+include-toplevel: "testdata/valid_include_toplevel2.conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ statistics-cumulative: yes
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+# Script file to load
+# python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ control-enable: yes
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.1
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ # control-port: 8955
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ # control-key-file: "/etc/unbound/unbound_control_2.key"
+
+ # unbound-control certificate file.
+ # control-cert-file: "/etc/unbound/unbound_control_2.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel2.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel2.conf
new file mode 100644
index 000000000..f3f69470d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel2.conf
@@ -0,0 +1,81 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+include-toplevel: "testdata/valid_include_toplevel3.conf"
+
+# The server clause sets the main parameters.
+server:
+# whitespace is not necessary, but looks cleaner.
+
+# verbosity number, 0 is least verbose. 1 is default.
+# verbosity: 1
+
+# print statistics to the log (for every thread) every N seconds.
+# Set to "" or 0 to disable. Default is disabled.
+# statistics-interval: 0
+
+# enable shm for stats, default no. if you enable also enable
+# statistics-interval, every time it also writes stats to the
+# shared memory segment keyed with shm-key.
+# shm-enable: no
+
+# shm for stats uses this key, and key+1 for the shared mem segment.
+# shm-key: 11777
+
+# enable cumulative statistics, without clearing them after printing.
+# statistics-cumulative: no
+
+# enable extended statistics (query types, answer codes, status)
+# printed from unbound-control. default off, because of speed.
+# extended-statistics: no
+
+# number of threads to create. 1 disables threading.
+# num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+# Script file to load
+# python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ # control-enable: no
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ # control-interface: ::1
+ control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ # control-port: 8955
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ control-key-file: "/etc/unbound/unbound_control_2.key"
+
+ # unbound-control certificate file.
+ control-cert-file: "/etc/unbound/unbound_control_2.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel3.conf b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel3.conf
new file mode 100644
index 000000000..d30778c01
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config/testdata/valid_include_toplevel3.conf
@@ -0,0 +1,81 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+
+# The server clause sets the main parameters.
+server:
+# whitespace is not necessary, but looks cleaner.
+
+# verbosity number, 0 is least verbose. 1 is default.
+# verbosity: 1
+
+# print statistics to the log (for every thread) every N seconds.
+# Set to "" or 0 to disable. Default is disabled.
+# statistics-interval: 0
+
+# enable shm for stats, default no. if you enable also enable
+# statistics-interval, every time it also writes stats to the
+# shared memory segment keyed with shm-key.
+# shm-enable: no
+
+# shm for stats uses this key, and key+1 for the shared mem segment.
+# shm-key: 11777
+
+# enable cumulative statistics, without clearing them after printing.
+# statistics-cumulative: no
+
+# enable extended statistics (query types, answer codes, status)
+# printed from unbound-control. default off, because of speed.
+# extended-statistics: no
+
+# number of threads to create. 1 disables threading.
+# num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+# Script file to load
+# python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ # control-enable: no
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.3
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ control-port: 8955
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ # control-key-file: "/etc/unbound/unbound_control.key"
+
+ # unbound-control certificate file.
+ # control-cert-file: "/etc/unbound/unbound_control.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/config_schema.json b/src/go/plugin/go.d/modules/unbound/config_schema.json
new file mode 100644
index 000000000..500b60169
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/config_schema.json
@@ -0,0 +1,113 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Unbound collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the Unbound server listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:8953"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout duration, in seconds, for connection, read, write, and SSL handshake operations.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "conf_path": {
+ "title": "Path to unbound.conf",
+ "description": "The absolute path to the Unbound configuration file. Providing this path enables the tool to make adjustments based on the 'remote-control' section.",
+ "type": "string",
+ "default": "/etc/unbound/unbound.conf"
+ },
+ "cumulative_stats": {
+ "title": "Cumulative stats",
+ "description": "Specifies whether statistics collection mode is enabled. Should match the 'statistics-cumulative' parameter in unbound.conf.",
+ "type": "boolean",
+ "default": false
+ },
+ "use_tls": {
+ "title": "Use TLS",
+ "description": "Indicates whether TLS should be used for secure communication.",
+ "type": "boolean",
+ "default": true
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean",
+ "default": true
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "default": "/etc/unbound/unbound_control.pem",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "default": "/etc/unbound/unbound_control.key",
+ "pattern": "^$|^/"
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout",
+ "conf_path",
+ "cumulative_stats"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "use_tls",
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/unbound/init.go b/src/go/plugin/go.d/modules/unbound/init.go
new file mode 100644
index 000000000..88e5e5ab0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/init.go
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package unbound
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/unbound/config"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+)
+
+func (u *Unbound) initConfig() (enabled bool) {
+ if u.ConfPath == "" {
+ u.Info("'conf_path' not set, skipping parameters auto detection")
+ return true
+ }
+
+ u.Infof("reading '%s'", u.ConfPath)
+ cfg, err := config.Parse(u.ConfPath)
+ if err != nil {
+ u.Warningf("%v, skipping parameters auto detection", err)
+ return true
+ }
+
+ if cfg.Empty() {
+ u.Debug("empty configuration")
+ return true
+ }
+
+ if enabled, ok := cfg.ControlEnabled(); ok && !enabled {
+ u.Info("remote control is disabled in the configuration file")
+ return false
+ }
+
+ u.applyConfig(cfg)
+ return true
+}
+
+func (u *Unbound) applyConfig(cfg *config.UnboundConfig) {
+ u.Infof("applying configuration: %s", cfg)
+ if cumulative, ok := cfg.Cumulative(); ok && cumulative != u.Cumulative {
+ u.Debugf("changing 'cumulative_stats': %v => %v", u.Cumulative, cumulative)
+ u.Cumulative = cumulative
+ }
+ if useCert, ok := cfg.ControlUseCert(); ok && useCert != u.UseTLS {
+ u.Debugf("changing 'use_tls': %v => %v", u.UseTLS, useCert)
+ u.UseTLS = useCert
+ }
+ if keyFile, ok := cfg.ControlKeyFile(); ok && keyFile != u.TLSKey {
+ u.Debugf("changing 'tls_key': '%s' => '%s'", u.TLSKey, keyFile)
+ u.TLSKey = keyFile
+ }
+ if certFile, ok := cfg.ControlCertFile(); ok && certFile != u.TLSCert {
+ u.Debugf("changing 'tls_cert': '%s' => '%s'", u.TLSCert, certFile)
+ u.TLSCert = certFile
+ }
+ if iface, ok := cfg.ControlInterface(); ok && adjustControlInterface(iface) != u.Address {
+ address := adjustControlInterface(iface)
+ u.Debugf("changing 'address': '%s' => '%s'", u.Address, address)
+ u.Address = address
+ }
+ if port, ok := cfg.ControlPort(); ok && !socket.IsUnixSocket(u.Address) {
+ if host, curPort, err := net.SplitHostPort(u.Address); err == nil && curPort != port {
+ address := net.JoinHostPort(host, port)
+ u.Debugf("changing 'address': '%s' => '%s'", u.Address, address)
+ u.Address = address
+ }
+ }
+}
+
+func (u *Unbound) initClient() (err error) {
+ var tlsCfg *tls.Config
+ useTLS := !socket.IsUnixSocket(u.Address) && u.UseTLS
+
+ if useTLS && (u.TLSConfig.TLSCert == "" || u.TLSConfig.TLSKey == "") {
+ return errors.New("'tls_cert' or 'tls_key' is missing")
+ }
+
+ if useTLS {
+ if tlsCfg, err = tlscfg.NewTLSConfig(u.TLSConfig); err != nil {
+ return err
+ }
+ }
+
+ u.client = socket.New(socket.Config{
+ Address: u.Address,
+ ConnectTimeout: u.Timeout.Duration(),
+ ReadTimeout: u.Timeout.Duration(),
+ WriteTimeout: u.Timeout.Duration(),
+ TLSConf: tlsCfg,
+ })
+ return nil
+}
+
+func adjustControlInterface(value string) string {
+ if socket.IsUnixSocket(value) {
+ return value
+ }
+ if value == "0.0.0.0" {
+ value = "127.0.0.1"
+ }
+ return net.JoinHostPort(value, "8953")
+}
diff --git a/src/go/plugin/go.d/modules/unbound/integrations/unbound.md b/src/go/plugin/go.d/modules/unbound/integrations/unbound.md
new file mode 100644
index 000000000..df6412270
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/integrations/unbound.md
@@ -0,0 +1,305 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/unbound/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/unbound/metadata.yaml"
+sidebar_label: "Unbound"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Unbound
+
+
+<img src="https://netdata.cloud/img/unbound.png" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: unbound
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Unbound servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Unbound instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| unbound.queries | queries | queries |
+| unbound.queries_ip_ratelimited | ratelimited | queries |
+| unbound.dnscrypt_queries | crypted, cert, cleartext, malformed | queries |
+| unbound.cache | hits, miss | events |
+| unbound.cache_percentage | hits, miss | percentage |
+| unbound.prefetch | prefetches | prefetches |
+| unbound.expired | expired | replies |
+| unbound.zero_ttl_replies | zero_ttl | replies |
+| unbound.recursive_replies | recursive | replies |
+| unbound.recursion_time | avg, median | milliseconds |
+| unbound.request_list_usage | avg, max | queries |
+| unbound.current_request_list_usage | all, users | queries |
+| unbound.request_list_jostle_list | overwritten, dropped | queries |
+| unbound.tcpusage | usage | buffers |
+| unbound.uptime | time | seconds |
+| unbound.cache_memory | message, rrset, dnscrypt_nonce, dnscrypt_shared_secret | KB |
+| unbound.mod_memory | iterator, respip, validator, subnet, ipsec | KB |
+| unbound.mem_streamwait | streamwait | KB |
+| unbound.cache_count | infra, key, msg, rrset, dnscrypt_nonce, shared_secret | items |
+| unbound.type_queries | a dimension per query type | queries |
+| unbound.class_queries | a dimension per query class | queries |
+| unbound.opcode_queries | a dimension per query opcode | queries |
+| unbound.flag_queries | qr, aa, tc, rd, ra, z, ad, cd | queries |
+| unbound.rcode_answers | a dimension per reply rcode | replies |
+
+### Per thread
+
+These metrics refer to threads.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| unbound.thread_queries | queries | queries |
+| unbound.thread_queries_ip_ratelimited | ratelimited | queries |
+| unbound.thread_dnscrypt_queries | crypted, cert, cleartext, malformed | queries |
+| unbound.thread_cache | hits, miss | events |
+| unbound.thread_cache_percentage | hits, miss | percentage |
+| unbound.thread_prefetch | prefetches | prefetches |
+| unbound.thread_expired | expired | replies |
+| unbound.thread_zero_ttl_replies | zero_ttl | replies |
+| unbound.thread_recursive_replies | recursive | replies |
+| unbound.thread_recursion_time | avg, median | milliseconds |
+| unbound.thread_request_list_usage | avg, max | queries |
+| unbound.thread_current_request_list_usage | all, users | queries |
+| unbound.thread_request_list_jostle_list | overwritten, dropped | queries |
+| unbound.thread_tcpusage | usage | buffers |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable remote control interface
+
+Set `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).
+
+
+#### Check permissions and adjust if necessary
+
+If using unix socket:
+
+- socket should be readable and writeable by `netdata` user
+
+If using ip socket and TLS is disabled:
+
+- socket should be accessible via network
+
+If TLS is enabled, in addition:
+
+- `control-key-file` should be readable by `netdata` user
+- `control-cert-file` should be readable by `netdata` user
+
+For auto-detection parameters from `unbound.conf`:
+
+- `unbound.conf` should be readable by `netdata` user
+- if you have several configuration files (include feature) all of them should be readable by `netdata` user
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/unbound.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/unbound.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Server address in IP:PORT format. | 127.0.0.1:8953 | yes |
+| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |
+| conf_path | Absolute path to the unbound configuration file. | /etc/unbound/unbound.conf | no |
+| cumulative_stats | Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file. | no | no |
+| use_tls | Whether to use TLS or not. | yes | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | yes | no |
+| tls_ca | Certificate authority that client use when verifying server certificates. | | no |
+| tls_cert | Client tls certificate. | /etc/unbound/unbound_control.pem | no |
+| tls_key | Client tls key. | /etc/unbound/unbound_control.key | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:8953
+
+```
+</details>
+
+##### Unix socket
+
+Connecting through Unix socket.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: socket
+ address: /var/run/unbound.sock
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:8953
+
+ - name: remote
+ address: 203.0.113.11:8953
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `unbound` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m unbound
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `unbound` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep unbound
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep unbound /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep unbound
+```
+
+
diff --git a/src/go/plugin/go.d/modules/unbound/metadata.yaml b/src/go/plugin/go.d/modules/unbound/metadata.yaml
new file mode 100644
index 000000000..ec6e6538d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/metadata.yaml
@@ -0,0 +1,431 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-unbound
+ plugin_name: go.d.plugin
+ module_name: unbound
+ monitored_instance:
+ name: Unbound
+ link: https://nlnetlabs.nl/projects/unbound/about/
+ icon_filename: unbound.png
+ categories:
+ - data-collection.dns-and-dhcp-servers
+ keywords:
+ - unbound
+ - dns
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Unbound servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable remote control interface
+ description: |
+ Set `control-enable` to yes in [unbound.conf](https://nlnetlabs.nl/documentation/unbound/unbound.conf).
+ - title: Check permissions and adjust if necessary
+ description: |
+ If using unix socket:
+
+ - socket should be readable and writeable by `netdata` user
+
+ If using ip socket and TLS is disabled:
+
+ - socket should be accessible via network
+
+ If TLS is enabled, in addition:
+
+ - `control-key-file` should be readable by `netdata` user
+ - `control-cert-file` should be readable by `netdata` user
+
+ For auto-detection parameters from `unbound.conf`:
+
+ - `unbound.conf` should be readable by `netdata` user
+ - if you have several configuration files (include feature) all of them should be readable by `netdata` user
+ configuration:
+ file:
+ name: go.d/unbound.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: Server address in IP:PORT format.
+ default_value: 127.0.0.1:8953
+ required: true
+ - name: timeout
+ description: Connection/read/write/ssl handshake timeout.
+ default_value: 1
+ required: false
+ - name: conf_path
+ description: Absolute path to the unbound configuration file.
+ default_value: /etc/unbound/unbound.conf
+ required: false
+ - name: cumulative_stats
+ description: Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file.
+ default_value: false
+ required: false
+ - name: use_tls
+ description: Whether to use TLS or not.
+ default_value: true
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: true
+ required: false
+ - name: tls_ca
+ description: Certificate authority that client use when verifying server certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client tls certificate.
+ default_value: /etc/unbound/unbound_control.pem
+ required: false
+ - name: tls_key
+ description: Client tls key.
+ default_value: /etc/unbound/unbound_control.key
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:8953
+ - name: Unix socket
+ description: Connecting through Unix socket.
+ config: |
+ jobs:
+ - name: socket
+ address: /var/run/unbound.sock
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:8953
+
+ - name: remote
+ address: 203.0.113.11:8953
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: unbound.queries
+ description: Received Queries
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: unbound.queries_ip_ratelimited
+ description: Rate Limited Queries
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: ratelimited
+ - name: unbound.dnscrypt_queries
+ description: DNSCrypt Queries
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: crypted
+ - name: cert
+ - name: cleartext
+ - name: malformed
+ - name: unbound.cache
+ description: Cache Statistics
+ unit: events
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: miss
+ - name: unbound.cache_percentage
+ description: Cache Statistics Percentage
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: hits
+ - name: miss
+ - name: unbound.prefetch
+ description: Cache Prefetches
+ unit: prefetches
+ chart_type: line
+ dimensions:
+ - name: prefetches
+ - name: unbound.expired
+ description: Replies Served From Expired Cache
+ unit: replies
+ chart_type: line
+ dimensions:
+ - name: expired
+ - name: unbound.zero_ttl_replies
+ description: Replies Served From Expired Cache
+ unit: replies
+ chart_type: line
+ dimensions:
+ - name: zero_ttl
+ - name: unbound.recursive_replies
+ description: Replies That Needed Recursive Processing
+ unit: replies
+ chart_type: line
+ dimensions:
+ - name: recursive
+ - name: unbound.recursion_time
+ description: Time Spent On Recursive Processing
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: avg
+ - name: median
+ - name: unbound.request_list_usage
+ description: Request List Usage
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: avg
+ - name: max
+ - name: unbound.current_request_list_usage
+ description: Current Request List Usage
+ unit: queries
+ chart_type: area
+ dimensions:
+ - name: all
+ - name: users
+ - name: unbound.request_list_jostle_list
+ description: Request List Jostle List Events
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: overwritten
+ - name: dropped
+ - name: unbound.tcpusage
+ description: TCP Handler Buffers
+ unit: buffers
+ chart_type: line
+ dimensions:
+ - name: usage
+ - name: unbound.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: unbound.cache_memory
+ description: Cache Memory
+ unit: KB
+ chart_type: stacked
+ dimensions:
+ - name: message
+ - name: rrset
+ - name: dnscrypt_nonce
+ - name: dnscrypt_shared_secret
+ - name: unbound.mod_memory
+ description: Module Memory
+ unit: KB
+ chart_type: stacked
+ dimensions:
+ - name: iterator
+ - name: respip
+ - name: validator
+ - name: subnet
+ - name: ipsec
+ - name: unbound.mem_streamwait
+ description: TCP and TLS Stream Waif Buffer Memory
+ unit: KB
+ chart_type: line
+ dimensions:
+ - name: streamwait
+ - name: unbound.cache_count
+ description: Cache Items Count
+ unit: items
+ chart_type: stacked
+ dimensions:
+ - name: infra
+ - name: key
+ - name: msg
+ - name: rrset
+ - name: dnscrypt_nonce
+ - name: shared_secret
+ - name: unbound.type_queries
+ description: Queries By Type
+ unit: queries
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per query type
+ - name: unbound.class_queries
+ description: Queries By Class
+ unit: queries
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per query class
+ - name: unbound.opcode_queries
+ description: Queries By OpCode
+ unit: queries
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per query opcode
+ - name: unbound.flag_queries
+ description: Queries By Flag
+ unit: queries
+ chart_type: stacked
+ dimensions:
+ - name: qr
+ - name: aa
+ - name: tc
+ - name: rd
+ - name: ra
+ - name: z
+ - name: ad
+ - name: cd
+ - name: unbound.rcode_answers
+ description: Replies By RCode
+ unit: replies
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per reply rcode
+ - name: thread
+ description: These metrics refer to threads.
+ labels: []
+ metrics:
+ - name: unbound.thread_queries
+ description: Thread Received Queries
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: queries
+ - name: unbound.thread_queries_ip_ratelimited
+ description: Thread Rate Limited Queries
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: ratelimited
+ - name: unbound.thread_dnscrypt_queries
+ description: Thread DNSCrypt Queries
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: crypted
+ - name: cert
+ - name: cleartext
+ - name: malformed
+ - name: unbound.thread_cache
+ description: Cache Statistics
+ unit: events
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: miss
+ - name: unbound.thread_cache_percentage
+ description: Cache Statistics Percentage
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: miss
+ - name: unbound.thread_prefetch
+ description: Cache Prefetches
+ unit: prefetches
+ chart_type: line
+ dimensions:
+ - name: prefetches
+ - name: unbound.thread_expired
+ description: Replies Served From Expired Cache
+ unit: replies
+ chart_type: line
+ dimensions:
+ - name: expired
+ - name: unbound.thread_zero_ttl_replies
+ description: Replies Served From Expired Cache
+ unit: replies
+ chart_type: line
+ dimensions:
+ - name: zero_ttl
+ - name: unbound.thread_recursive_replies
+ description: Replies That Needed Recursive Processing
+ unit: replies
+ chart_type: line
+ dimensions:
+ - name: recursive
+ - name: unbound.thread_recursion_time
+ description: Time Spent On Recursive Processing
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: avg
+ - name: median
+ - name: unbound.thread_request_list_usage
+ description: Time Spent On Recursive Processing
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: avg
+ - name: max
+ - name: unbound.thread_current_request_list_usage
+ description: Current Request List Usage
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: all
+ - name: users
+ - name: unbound.thread_request_list_jostle_list
+ description: Request List Jostle List Events
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: overwritten
+ - name: dropped
+ - name: unbound.thread_tcpusage
+ description: TCP Handler Buffers
+ unit: buffers
+ chart_type: line
+ dimensions:
+ - name: usage
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/config.json b/src/go/plugin/go.d/modules/unbound/testdata/config.json
new file mode 100644
index 000000000..9874de180
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/config.json
@@ -0,0 +1,12 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "conf_path": "ok",
+ "timeout": 123.123,
+ "cumulative_stats": true,
+ "use_tls": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/config.yaml b/src/go/plugin/go.d/modules/unbound/testdata/config.yaml
new file mode 100644
index 000000000..68326cabc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/config.yaml
@@ -0,0 +1,10 @@
+update_every: 123
+address: "ok"
+conf_path: "ok"
+timeout: 123.123
+cumulative_stats: yes
+use_tls: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/stats/common.txt b/src/go/plugin/go.d/modules/unbound/testdata/stats/common.txt
new file mode 100644
index 000000000..7a1f91a31
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/stats/common.txt
@@ -0,0 +1,66 @@
+thread0.num.queries=28
+thread0.num.queries_ip_ratelimited=0
+thread0.num.cachehits=21
+thread0.num.cachemiss=7
+thread0.num.prefetch=0
+thread0.num.expired=0
+thread0.num.zero_ttl=0
+thread0.num.recursivereplies=7
+thread0.num.dnscrypt.crypted=0
+thread0.num.dnscrypt.cert=0
+thread0.num.dnscrypt.cleartext=0
+thread0.num.dnscrypt.malformed=0
+thread0.requestlist.avg=0.857143
+thread0.requestlist.max=6
+thread0.requestlist.overwritten=0
+thread0.requestlist.exceeded=0
+thread0.requestlist.current.all=0
+thread0.requestlist.current.user=0
+thread0.recursion.time.avg=1.255822
+thread0.recursion.time.median=0.480597
+thread0.tcpusage=0
+thread1.num.queries=16
+thread1.num.queries_ip_ratelimited=0
+thread1.num.cachehits=13
+thread1.num.cachemiss=3
+thread1.num.prefetch=0
+thread1.num.expired=0
+thread1.num.zero_ttl=0
+thread1.num.recursivereplies=3
+thread1.num.dnscrypt.crypted=0
+thread1.num.dnscrypt.cert=0
+thread1.num.dnscrypt.cleartext=0
+thread1.num.dnscrypt.malformed=0
+thread1.requestlist.avg=0
+thread1.requestlist.max=0
+thread1.requestlist.overwritten=0
+thread1.requestlist.exceeded=0
+thread1.requestlist.current.all=0
+thread1.requestlist.current.user=0
+thread1.recursion.time.avg=0.093941
+thread1.recursion.time.median=0
+thread1.tcpusage=0
+total.num.queries=44
+total.num.queries_ip_ratelimited=0
+total.num.cachehits=34
+total.num.cachemiss=10
+total.num.prefetch=0
+total.num.expired=0
+total.num.zero_ttl=0
+total.num.recursivereplies=10
+total.num.dnscrypt.crypted=0
+total.num.dnscrypt.cert=0
+total.num.dnscrypt.cleartext=0
+total.num.dnscrypt.malformed=0
+total.requestlist.avg=0.6
+total.requestlist.max=6
+total.requestlist.overwritten=0
+total.requestlist.exceeded=0
+total.requestlist.current.all=0
+total.requestlist.current.user=0
+total.recursion.time.avg=0.907258
+total.recursion.time.median=0.240299
+total.tcpusage=0
+time.now=1574094836.941149
+time.up=88.434983
+time.elapsed=88.4349831 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/stats/extended.txt b/src/go/plugin/go.d/modules/unbound/testdata/stats/extended.txt
new file mode 100644
index 000000000..578794fad
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/stats/extended.txt
@@ -0,0 +1,162 @@
+thread0.num.queries=28
+thread0.num.queries_ip_ratelimited=0
+thread0.num.cachehits=21
+thread0.num.cachemiss=7
+thread0.num.prefetch=0
+thread0.num.expired=0
+thread0.num.zero_ttl=0
+thread0.num.recursivereplies=7
+thread0.num.dnscrypt.crypted=0
+thread0.num.dnscrypt.cert=0
+thread0.num.dnscrypt.cleartext=0
+thread0.num.dnscrypt.malformed=0
+thread0.requestlist.avg=0.857143
+thread0.requestlist.max=6
+thread0.requestlist.overwritten=0
+thread0.requestlist.exceeded=0
+thread0.requestlist.current.all=0
+thread0.requestlist.current.user=0
+thread0.recursion.time.avg=1.255822
+thread0.recursion.time.median=0.480597
+thread0.tcpusage=0
+thread1.num.queries=16
+thread1.num.queries_ip_ratelimited=0
+thread1.num.cachehits=13
+thread1.num.cachemiss=3
+thread1.num.prefetch=0
+thread1.num.expired=0
+thread1.num.zero_ttl=0
+thread1.num.recursivereplies=3
+thread1.num.dnscrypt.crypted=0
+thread1.num.dnscrypt.cert=0
+thread1.num.dnscrypt.cleartext=0
+thread1.num.dnscrypt.malformed=0
+thread1.requestlist.avg=0
+thread1.requestlist.max=0
+thread1.requestlist.overwritten=0
+thread1.requestlist.exceeded=0
+thread1.requestlist.current.all=0
+thread1.requestlist.current.user=0
+thread1.recursion.time.avg=0.093941
+thread1.recursion.time.median=0
+thread1.tcpusage=0
+total.num.queries=44
+total.num.queries_ip_ratelimited=0
+total.num.cachehits=34
+total.num.cachemiss=10
+total.num.prefetch=0
+total.num.expired=0
+total.num.zero_ttl=0
+total.num.recursivereplies=10
+total.num.dnscrypt.crypted=0
+total.num.dnscrypt.cert=0
+total.num.dnscrypt.cleartext=0
+total.num.dnscrypt.malformed=0
+total.requestlist.avg=0.6
+total.requestlist.max=6
+total.requestlist.overwritten=0
+total.requestlist.exceeded=0
+total.requestlist.current.all=0
+total.requestlist.current.user=0
+total.recursion.time.avg=0.907258
+total.recursion.time.median=0.240299
+total.tcpusage=0
+time.now=1574094836.941149
+time.up=88.434983
+time.elapsed=88.434983
+mem.cache.rrset=178642
+mem.cache.message=90357
+mem.mod.iterator=16588
+mem.mod.validator=81059
+mem.mod.respip=0
+mem.mod.subnet=74504
+mem.cache.dnscrypt_shared_secret=0
+mem.cache.dnscrypt_nonce=0
+mem.streamwait=0
+histogram.000000.000000.to.000000.000001=0
+histogram.000000.000001.to.000000.000002=0
+histogram.000000.000002.to.000000.000004=0
+histogram.000000.000004.to.000000.000008=0
+histogram.000000.000008.to.000000.000016=0
+histogram.000000.000016.to.000000.000032=0
+histogram.000000.000032.to.000000.000064=0
+histogram.000000.000064.to.000000.000128=0
+histogram.000000.000128.to.000000.000256=0
+histogram.000000.000256.to.000000.000512=0
+histogram.000000.000512.to.000000.001024=0
+histogram.000000.001024.to.000000.002048=0
+histogram.000000.002048.to.000000.004096=0
+histogram.000000.004096.to.000000.008192=0
+histogram.000000.008192.to.000000.016384=0
+histogram.000000.016384.to.000000.032768=0
+histogram.000000.032768.to.000000.065536=2
+histogram.000000.065536.to.000000.131072=0
+histogram.000000.131072.to.000000.262144=2
+histogram.000000.262144.to.000000.524288=3
+histogram.000000.524288.to.000001.000000=2
+histogram.000001.000000.to.000002.000000=0
+histogram.000002.000000.to.000004.000000=0
+histogram.000004.000000.to.000008.000000=1
+histogram.000008.000000.to.000016.000000=0
+histogram.000016.000000.to.000032.000000=0
+histogram.000032.000000.to.000064.000000=0
+histogram.000064.000000.to.000128.000000=0
+histogram.000128.000000.to.000256.000000=0
+histogram.000256.000000.to.000512.000000=0
+histogram.000512.000000.to.001024.000000=0
+histogram.001024.000000.to.002048.000000=0
+histogram.002048.000000.to.004096.000000=0
+histogram.004096.000000.to.008192.000000=0
+histogram.008192.000000.to.016384.000000=0
+histogram.016384.000000.to.032768.000000=0
+histogram.032768.000000.to.065536.000000=0
+histogram.065536.000000.to.131072.000000=0
+histogram.131072.000000.to.262144.000000=0
+histogram.262144.000000.to.524288.000000=0
+num.query.type.A=13
+num.query.type.PTR=5
+num.query.type.MX=13
+num.query.type.AAAA=13
+num.query.class.IN=44
+num.query.opcode.QUERY=44
+num.query.tcp=0
+num.query.tcpout=1
+num.query.tls=0
+num.query.tls.resume=0
+num.query.ipv6=39
+num.query.flags.QR=0
+num.query.flags.AA=0
+num.query.flags.TC=0
+num.query.flags.RD=44
+num.query.flags.RA=0
+num.query.flags.Z=0
+num.query.flags.AD=0
+num.query.flags.CD=0
+num.query.edns.present=0
+num.query.edns.DO=0
+num.answer.rcode.NOERROR=40
+num.answer.rcode.FORMERR=0
+num.answer.rcode.SERVFAIL=0
+num.answer.rcode.NXDOMAIN=4
+num.answer.rcode.NOTIMPL=0
+num.answer.rcode.REFUSED=0
+num.query.ratelimited=0
+num.answer.secure=0
+num.answer.bogus=0
+num.rrset.bogus=0
+num.query.aggressive.NOERROR=2
+num.query.aggressive.NXDOMAIN=0
+unwanted.queries=0
+unwanted.replies=0
+msg.cache.count=81
+rrset.cache.count=314
+infra.cache.count=205
+key.cache.count=9
+dnscrypt_shared_secret.cache.count=0
+dnscrypt_nonce.cache.count=0
+num.query.dnscrypt.shared_secret.cachemiss=0
+num.query.dnscrypt.replay=0
+num.query.authzone.up=0
+num.query.authzone.down=0
+num.query.subnet=0
+num.query.subnet_cache=0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended1.txt b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended1.txt
new file mode 100644
index 000000000..53bd7f955
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended1.txt
@@ -0,0 +1,162 @@
+thread0.num.queries=90
+thread0.num.queries_ip_ratelimited=0
+thread0.num.cachehits=80
+thread0.num.cachemiss=10
+thread0.num.prefetch=0
+thread0.num.expired=0
+thread0.num.zero_ttl=0
+thread0.num.recursivereplies=10
+thread0.num.dnscrypt.crypted=0
+thread0.num.dnscrypt.cert=0
+thread0.num.dnscrypt.cleartext=0
+thread0.num.dnscrypt.malformed=0
+thread0.requestlist.avg=0.1
+thread0.requestlist.max=1
+thread0.requestlist.overwritten=0
+thread0.requestlist.exceeded=0
+thread0.requestlist.current.all=0
+thread0.requestlist.current.user=0
+thread0.recursion.time.avg=0.222018
+thread0.recursion.time.median=0.337042
+thread0.tcpusage=0
+thread1.num.queries=110
+thread1.num.queries_ip_ratelimited=0
+thread1.num.cachehits=101
+thread1.num.cachemiss=9
+thread1.num.prefetch=0
+thread1.num.expired=0
+thread1.num.zero_ttl=0
+thread1.num.recursivereplies=9
+thread1.num.dnscrypt.crypted=0
+thread1.num.dnscrypt.cert=0
+thread1.num.dnscrypt.cleartext=0
+thread1.num.dnscrypt.malformed=0
+thread1.requestlist.avg=0.222222
+thread1.requestlist.max=1
+thread1.requestlist.overwritten=0
+thread1.requestlist.exceeded=0
+thread1.requestlist.current.all=0
+thread1.requestlist.current.user=0
+thread1.recursion.time.avg=0.844506
+thread1.recursion.time.median=0.360448
+thread1.tcpusage=0
+total.num.queries=200
+total.num.queries_ip_ratelimited=0
+total.num.cachehits=181
+total.num.cachemiss=19
+total.num.prefetch=0
+total.num.expired=0
+total.num.zero_ttl=0
+total.num.recursivereplies=19
+total.num.dnscrypt.crypted=0
+total.num.dnscrypt.cert=0
+total.num.dnscrypt.cleartext=0
+total.num.dnscrypt.malformed=0
+total.requestlist.avg=0.157895
+total.requestlist.max=1
+total.requestlist.overwritten=0
+total.requestlist.exceeded=0
+total.requestlist.current.all=0
+total.requestlist.current.user=0
+total.recursion.time.avg=0.516881
+total.recursion.time.median=0.348745
+total.tcpusage=0
+time.now=1574103378.552596
+time.up=122.956436
+time.elapsed=122.956436
+mem.cache.rrset=175745
+mem.cache.message=93392
+mem.mod.iterator=16588
+mem.mod.validator=81479
+mem.mod.respip=0
+mem.mod.subnet=74504
+mem.cache.dnscrypt_shared_secret=0
+mem.cache.dnscrypt_nonce=0
+mem.streamwait=0
+histogram.000000.000000.to.000000.000001=0
+histogram.000000.000001.to.000000.000002=0
+histogram.000000.000002.to.000000.000004=0
+histogram.000000.000004.to.000000.000008=0
+histogram.000000.000008.to.000000.000016=0
+histogram.000000.000016.to.000000.000032=0
+histogram.000000.000032.to.000000.000064=0
+histogram.000000.000064.to.000000.000128=0
+histogram.000000.000128.to.000000.000256=0
+histogram.000000.000256.to.000000.000512=0
+histogram.000000.000512.to.000000.001024=0
+histogram.000000.001024.to.000000.002048=0
+histogram.000000.002048.to.000000.004096=0
+histogram.000000.004096.to.000000.008192=0
+histogram.000000.008192.to.000000.016384=2
+histogram.000000.016384.to.000000.032768=1
+histogram.000000.032768.to.000000.065536=3
+histogram.000000.065536.to.000000.131072=0
+histogram.000000.131072.to.000000.262144=0
+histogram.000000.262144.to.000000.524288=11
+histogram.000000.524288.to.000001.000000=0
+histogram.000001.000000.to.000002.000000=1
+histogram.000002.000000.to.000004.000000=0
+histogram.000004.000000.to.000008.000000=1
+histogram.000008.000000.to.000016.000000=0
+histogram.000016.000000.to.000032.000000=0
+histogram.000032.000000.to.000064.000000=0
+histogram.000064.000000.to.000128.000000=0
+histogram.000128.000000.to.000256.000000=0
+histogram.000256.000000.to.000512.000000=0
+histogram.000512.000000.to.001024.000000=0
+histogram.001024.000000.to.002048.000000=0
+histogram.002048.000000.to.004096.000000=0
+histogram.004096.000000.to.008192.000000=0
+histogram.008192.000000.to.016384.000000=0
+histogram.016384.000000.to.032768.000000=0
+histogram.032768.000000.to.065536.000000=0
+histogram.065536.000000.to.131072.000000=0
+histogram.131072.000000.to.262144.000000=0
+histogram.262144.000000.to.524288.000000=0
+num.query.type.A=60
+num.query.type.PTR=20
+num.query.type.MX=60
+num.query.type.AAAA=60
+num.query.class.IN=200
+num.query.opcode.QUERY=200
+num.query.tcp=0
+num.query.tcpout=0
+num.query.tls=0
+num.query.tls.resume=0
+num.query.ipv6=0
+num.query.flags.QR=0
+num.query.flags.AA=0
+num.query.flags.TC=0
+num.query.flags.RD=200
+num.query.flags.RA=0
+num.query.flags.Z=0
+num.query.flags.AD=0
+num.query.flags.CD=0
+num.query.edns.present=0
+num.query.edns.DO=0
+num.answer.rcode.NOERROR=184
+num.answer.rcode.FORMERR=0
+num.answer.rcode.SERVFAIL=0
+num.answer.rcode.NXDOMAIN=16
+num.answer.rcode.NOTIMPL=0
+num.answer.rcode.REFUSED=0
+num.query.ratelimited=0
+num.answer.secure=0
+num.answer.bogus=0
+num.rrset.bogus=0
+num.query.aggressive.NOERROR=1
+num.query.aggressive.NXDOMAIN=0
+unwanted.queries=0
+unwanted.replies=0
+msg.cache.count=94
+rrset.cache.count=304
+infra.cache.count=192
+key.cache.count=11
+dnscrypt_shared_secret.cache.count=0
+dnscrypt_nonce.cache.count=0
+num.query.dnscrypt.shared_secret.cachemiss=0
+num.query.dnscrypt.replay=0
+num.query.authzone.up=0
+num.query.authzone.down=0
+num.query.subnet=0
+num.query.subnet_cache=0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended2.txt b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended2.txt
new file mode 100644
index 000000000..939ba75de
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended2.txt
@@ -0,0 +1,162 @@
+thread0.num.queries=133
+thread0.num.queries_ip_ratelimited=0
+thread0.num.cachehits=123
+thread0.num.cachemiss=10
+thread0.num.prefetch=0
+thread0.num.expired=0
+thread0.num.zero_ttl=0
+thread0.num.recursivereplies=10
+thread0.num.dnscrypt.crypted=0
+thread0.num.dnscrypt.cert=0
+thread0.num.dnscrypt.cleartext=0
+thread0.num.dnscrypt.malformed=0
+thread0.requestlist.avg=0.1
+thread0.requestlist.max=1
+thread0.requestlist.overwritten=0
+thread0.requestlist.exceeded=0
+thread0.requestlist.current.all=0
+thread0.requestlist.current.user=0
+thread0.recursion.time.avg=0.222018
+thread0.recursion.time.median=0.337042
+thread0.tcpusage=0
+thread1.num.queries=157
+thread1.num.queries_ip_ratelimited=0
+thread1.num.cachehits=148
+thread1.num.cachemiss=9
+thread1.num.prefetch=0
+thread1.num.expired=0
+thread1.num.zero_ttl=0
+thread1.num.recursivereplies=9
+thread1.num.dnscrypt.crypted=0
+thread1.num.dnscrypt.cert=0
+thread1.num.dnscrypt.cleartext=0
+thread1.num.dnscrypt.malformed=0
+thread1.requestlist.avg=0.222222
+thread1.requestlist.max=1
+thread1.requestlist.overwritten=0
+thread1.requestlist.exceeded=0
+thread1.requestlist.current.all=0
+thread1.requestlist.current.user=0
+thread1.recursion.time.avg=0.844506
+thread1.recursion.time.median=0.360448
+thread1.tcpusage=0
+total.num.queries=290
+total.num.queries_ip_ratelimited=0
+total.num.cachehits=271
+total.num.cachemiss=19
+total.num.prefetch=0
+total.num.expired=0
+total.num.zero_ttl=0
+total.num.recursivereplies=19
+total.num.dnscrypt.crypted=0
+total.num.dnscrypt.cert=0
+total.num.dnscrypt.cleartext=0
+total.num.dnscrypt.malformed=0
+total.requestlist.avg=0.157895
+total.requestlist.max=1
+total.requestlist.overwritten=0
+total.requestlist.exceeded=0
+total.requestlist.current.all=0
+total.requestlist.current.user=0
+total.recursion.time.avg=0.516881
+total.recursion.time.median=0.348745
+total.tcpusage=0
+time.now=1574103461.161540
+time.up=205.565380
+time.elapsed=82.608944
+mem.cache.rrset=175745
+mem.cache.message=93392
+mem.mod.iterator=16588
+mem.mod.validator=81479
+mem.mod.respip=0
+mem.mod.subnet=74504
+mem.cache.dnscrypt_shared_secret=0
+mem.cache.dnscrypt_nonce=0
+mem.streamwait=0
+histogram.000000.000000.to.000000.000001=0
+histogram.000000.000001.to.000000.000002=0
+histogram.000000.000002.to.000000.000004=0
+histogram.000000.000004.to.000000.000008=0
+histogram.000000.000008.to.000000.000016=0
+histogram.000000.000016.to.000000.000032=0
+histogram.000000.000032.to.000000.000064=0
+histogram.000000.000064.to.000000.000128=0
+histogram.000000.000128.to.000000.000256=0
+histogram.000000.000256.to.000000.000512=0
+histogram.000000.000512.to.000000.001024=0
+histogram.000000.001024.to.000000.002048=0
+histogram.000000.002048.to.000000.004096=0
+histogram.000000.004096.to.000000.008192=0
+histogram.000000.008192.to.000000.016384=2
+histogram.000000.016384.to.000000.032768=1
+histogram.000000.032768.to.000000.065536=3
+histogram.000000.065536.to.000000.131072=0
+histogram.000000.131072.to.000000.262144=0
+histogram.000000.262144.to.000000.524288=11
+histogram.000000.524288.to.000001.000000=0
+histogram.000001.000000.to.000002.000000=1
+histogram.000002.000000.to.000004.000000=0
+histogram.000004.000000.to.000008.000000=1
+histogram.000008.000000.to.000016.000000=0
+histogram.000016.000000.to.000032.000000=0
+histogram.000032.000000.to.000064.000000=0
+histogram.000064.000000.to.000128.000000=0
+histogram.000128.000000.to.000256.000000=0
+histogram.000256.000000.to.000512.000000=0
+histogram.000512.000000.to.001024.000000=0
+histogram.001024.000000.to.002048.000000=0
+histogram.002048.000000.to.004096.000000=0
+histogram.004096.000000.to.008192.000000=0
+histogram.008192.000000.to.016384.000000=0
+histogram.016384.000000.to.032768.000000=0
+histogram.032768.000000.to.065536.000000=0
+histogram.065536.000000.to.131072.000000=0
+histogram.131072.000000.to.262144.000000=0
+histogram.262144.000000.to.524288.000000=0
+num.query.type.A=90
+num.query.type.PTR=20
+num.query.type.MX=90
+num.query.type.AAAA=90
+num.query.class.IN=290
+num.query.opcode.QUERY=290
+num.query.tcp=0
+num.query.tcpout=0
+num.query.tls=0
+num.query.tls.resume=0
+num.query.ipv6=0
+num.query.flags.QR=0
+num.query.flags.AA=0
+num.query.flags.TC=0
+num.query.flags.RD=290
+num.query.flags.RA=0
+num.query.flags.Z=0
+num.query.flags.AD=0
+num.query.flags.CD=0
+num.query.edns.present=0
+num.query.edns.DO=0
+num.answer.rcode.NOERROR=274
+num.answer.rcode.FORMERR=0
+num.answer.rcode.SERVFAIL=0
+num.answer.rcode.NXDOMAIN=16
+num.answer.rcode.NOTIMPL=0
+num.answer.rcode.REFUSED=0
+num.query.ratelimited=0
+num.answer.secure=0
+num.answer.bogus=0
+num.rrset.bogus=0
+num.query.aggressive.NOERROR=1
+num.query.aggressive.NXDOMAIN=0
+unwanted.queries=0
+unwanted.replies=0
+msg.cache.count=94
+rrset.cache.count=304
+infra.cache.count=192
+key.cache.count=11
+dnscrypt_shared_secret.cache.count=0
+dnscrypt_nonce.cache.count=0
+num.query.dnscrypt.shared_secret.cachemiss=0
+num.query.dnscrypt.replay=0
+num.query.authzone.up=0
+num.query.authzone.down=0
+num.query.subnet=0
+num.query.subnet_cache=0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended3.txt b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended3.txt
new file mode 100644
index 000000000..e9448f7d7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/cumulative/extended3.txt
@@ -0,0 +1,163 @@
+thread0.num.queries=165
+thread0.num.queries_ip_ratelimited=0
+thread0.num.cachehits=150
+thread0.num.cachemiss=15
+thread0.num.prefetch=0
+thread0.num.expired=0
+thread0.num.zero_ttl=0
+thread0.num.recursivereplies=15
+thread0.num.dnscrypt.crypted=0
+thread0.num.dnscrypt.cert=0
+thread0.num.dnscrypt.cleartext=0
+thread0.num.dnscrypt.malformed=0
+thread0.requestlist.avg=0.0666667
+thread0.requestlist.max=1
+thread0.requestlist.overwritten=0
+thread0.requestlist.exceeded=0
+thread0.requestlist.current.all=0
+thread0.requestlist.current.user=0
+thread0.recursion.time.avg=0.261497
+thread0.recursion.time.median=0.318318
+thread0.tcpusage=0
+thread1.num.queries=195
+thread1.num.queries_ip_ratelimited=0
+thread1.num.cachehits=184
+thread1.num.cachemiss=11
+thread1.num.prefetch=0
+thread1.num.expired=0
+thread1.num.zero_ttl=0
+thread1.num.recursivereplies=11
+thread1.num.dnscrypt.crypted=0
+thread1.num.dnscrypt.cert=0
+thread1.num.dnscrypt.cleartext=0
+thread1.num.dnscrypt.malformed=0
+thread1.requestlist.avg=0.363636
+thread1.requestlist.max=2
+thread1.requestlist.overwritten=0
+thread1.requestlist.exceeded=0
+thread1.requestlist.current.all=0
+thread1.requestlist.current.user=0
+thread1.recursion.time.avg=0.709047
+thread1.recursion.time.median=0.294912
+thread1.tcpusage=0
+total.num.queries=360
+total.num.queries_ip_ratelimited=0
+total.num.cachehits=334
+total.num.cachemiss=26
+total.num.prefetch=0
+total.num.expired=0
+total.num.zero_ttl=0
+total.num.recursivereplies=26
+total.num.dnscrypt.crypted=0
+total.num.dnscrypt.cert=0
+total.num.dnscrypt.cleartext=0
+total.num.dnscrypt.malformed=0
+total.requestlist.avg=0.192308
+total.requestlist.max=2
+total.requestlist.overwritten=0
+total.requestlist.exceeded=0
+total.requestlist.current.all=0
+total.requestlist.current.user=0
+total.recursion.time.avg=0.450844
+total.recursion.time.median=0.306615
+total.tcpusage=0
+time.now=1574103543.692653
+time.up=288.096493
+time.elapsed=82.531113
+mem.cache.rrset=208839
+mem.cache.message=101198
+mem.mod.iterator=16588
+mem.mod.validator=85725
+mem.mod.respip=0
+mem.mod.subnet=74504
+mem.cache.dnscrypt_shared_secret=0
+mem.cache.dnscrypt_nonce=0
+mem.streamwait=0
+histogram.000000.000000.to.000000.000001=0
+histogram.000000.000001.to.000000.000002=0
+histogram.000000.000002.to.000000.000004=0
+histogram.000000.000004.to.000000.000008=0
+histogram.000000.000008.to.000000.000016=0
+histogram.000000.000016.to.000000.000032=0
+histogram.000000.000032.to.000000.000064=0
+histogram.000000.000064.to.000000.000128=0
+histogram.000000.000128.to.000000.000256=0
+histogram.000000.000256.to.000000.000512=0
+histogram.000000.000512.to.000000.001024=0
+histogram.000000.001024.to.000000.002048=0
+histogram.000000.002048.to.000000.004096=0
+histogram.000000.004096.to.000000.008192=0
+histogram.000000.008192.to.000000.016384=2
+histogram.000000.016384.to.000000.032768=1
+histogram.000000.032768.to.000000.065536=5
+histogram.000000.065536.to.000000.131072=3
+histogram.000000.131072.to.000000.262144=0
+histogram.000000.262144.to.000000.524288=11
+histogram.000000.524288.to.000001.000000=2
+histogram.000001.000000.to.000002.000000=1
+histogram.000002.000000.to.000004.000000=0
+histogram.000004.000000.to.000008.000000=1
+histogram.000008.000000.to.000016.000000=0
+histogram.000016.000000.to.000032.000000=0
+histogram.000032.000000.to.000064.000000=0
+histogram.000064.000000.to.000128.000000=0
+histogram.000128.000000.to.000256.000000=0
+histogram.000256.000000.to.000512.000000=0
+histogram.000512.000000.to.001024.000000=0
+histogram.001024.000000.to.002048.000000=0
+histogram.002048.000000.to.004096.000000=0
+histogram.004096.000000.to.008192.000000=0
+histogram.008192.000000.to.016384.000000=0
+histogram.016384.000000.to.032768.000000=0
+histogram.032768.000000.to.065536.000000=0
+histogram.065536.000000.to.131072.000000=0
+histogram.131072.000000.to.262144.000000=0
+histogram.262144.000000.to.524288.000000=0
+num.query.type.A=120
+num.query.type.PTR=20
+num.query.type.MX=110
+num.query.type.AAAA=110
+num.query.class.IN=360
+num.query.opcode.QUERY=360
+num.query.tcp=0
+num.query.tcpout=0
+num.query.tls=0
+num.query.tls.resume=0
+num.query.ipv6=0
+num.query.flags.QR=0
+num.query.flags.AA=0
+num.query.flags.TC=0
+num.query.flags.RD=360
+num.query.flags.RA=0
+num.query.flags.Z=0
+num.query.flags.AD=0
+num.query.flags.CD=0
+num.query.edns.present=0
+num.query.edns.DO=0
+num.answer.rcode.NOERROR=334
+num.answer.rcode.FORMERR=0
+num.answer.rcode.SERVFAIL=10
+num.answer.rcode.NXDOMAIN=16
+num.answer.rcode.NOTIMPL=0
+num.answer.rcode.REFUSED=0
+num.answer.rcode.nodata=20
+num.query.ratelimited=0
+num.answer.secure=0
+num.answer.bogus=0
+num.rrset.bogus=0
+num.query.aggressive.NOERROR=1
+num.query.aggressive.NXDOMAIN=0
+unwanted.queries=0
+unwanted.replies=0
+msg.cache.count=119
+rrset.cache.count=401
+infra.cache.count=232
+key.cache.count=14
+dnscrypt_shared_secret.cache.count=0
+dnscrypt_nonce.cache.count=0
+num.query.dnscrypt.shared_secret.cachemiss=0
+num.query.dnscrypt.replay=0
+num.query.authzone.up=0
+num.query.authzone.down=0
+num.query.subnet=0
+num.query.subnet_cache=0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended1.txt b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended1.txt
new file mode 100644
index 000000000..8be40ecb2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended1.txt
@@ -0,0 +1,163 @@
+thread0.num.queries=51
+thread0.num.queries_ip_ratelimited=0
+thread0.num.cachehits=44
+thread0.num.cachemiss=7
+thread0.num.prefetch=0
+thread0.num.expired=0
+thread0.num.zero_ttl=0
+thread0.num.recursivereplies=7
+thread0.num.dnscrypt.crypted=0
+thread0.num.dnscrypt.cert=0
+thread0.num.dnscrypt.cleartext=0
+thread0.num.dnscrypt.malformed=0
+thread0.requestlist.avg=0
+thread0.requestlist.max=0
+thread0.requestlist.overwritten=0
+thread0.requestlist.exceeded=0
+thread0.requestlist.current.all=0
+thread0.requestlist.current.user=0
+thread0.recursion.time.avg=0.365956
+thread0.recursion.time.median=0.057344
+thread0.tcpusage=0
+thread1.num.queries=49
+thread1.num.queries_ip_ratelimited=0
+thread1.num.cachehits=46
+thread1.num.cachemiss=3
+thread1.num.prefetch=0
+thread1.num.expired=0
+thread1.num.zero_ttl=0
+thread1.num.recursivereplies=3
+thread1.num.dnscrypt.crypted=0
+thread1.num.dnscrypt.cert=0
+thread1.num.dnscrypt.cleartext=0
+thread1.num.dnscrypt.malformed=0
+thread1.requestlist.avg=0
+thread1.requestlist.max=0
+thread1.requestlist.overwritten=0
+thread1.requestlist.exceeded=0
+thread1.requestlist.current.all=0
+thread1.requestlist.current.user=0
+thread1.recursion.time.avg=1.582766
+thread1.recursion.time.median=0
+thread1.tcpusage=0
+total.num.queries=100
+total.num.queries_ip_ratelimited=0
+total.num.cachehits=90
+total.num.cachemiss=10
+total.num.prefetch=0
+total.num.expired=0
+total.num.zero_ttl=0
+total.num.recursivereplies=10
+total.num.dnscrypt.crypted=0
+total.num.dnscrypt.cert=0
+total.num.dnscrypt.cleartext=0
+total.num.dnscrypt.malformed=0
+total.requestlist.avg=0
+total.requestlist.max=0
+total.requestlist.overwritten=0
+total.requestlist.exceeded=0
+total.requestlist.current.all=0
+total.requestlist.current.user=0
+total.recursion.time.avg=0.730999
+total.recursion.time.median=0.028672
+total.tcpusage=0
+time.now=1574103644.993894
+time.up=45.285130
+time.elapsed=45.285130
+mem.cache.rrset=172757
+mem.cache.message=86064
+mem.mod.iterator=16588
+mem.mod.validator=79979
+mem.mod.respip=0
+mem.mod.subnet=74504
+mem.cache.dnscrypt_shared_secret=0
+mem.cache.dnscrypt_nonce=0
+mem.streamwait=0
+histogram.000000.000000.to.000000.000001=0
+histogram.000000.000001.to.000000.000002=0
+histogram.000000.000002.to.000000.000004=0
+histogram.000000.000004.to.000000.000008=0
+histogram.000000.000008.to.000000.000016=0
+histogram.000000.000016.to.000000.000032=0
+histogram.000000.000032.to.000000.000064=0
+histogram.000000.000064.to.000000.000128=0
+histogram.000000.000128.to.000000.000256=0
+histogram.000000.000256.to.000000.000512=0
+histogram.000000.000512.to.000000.001024=0
+histogram.000000.001024.to.000000.002048=0
+histogram.000000.002048.to.000000.004096=0
+histogram.000000.004096.to.000000.008192=0
+histogram.000000.008192.to.000000.016384=0
+histogram.000000.016384.to.000000.032768=2
+histogram.000000.032768.to.000000.065536=3
+histogram.000000.065536.to.000000.131072=1
+histogram.000000.131072.to.000000.262144=1
+histogram.000000.262144.to.000000.524288=1
+histogram.000000.524288.to.000001.000000=0
+histogram.000001.000000.to.000002.000000=1
+histogram.000002.000000.to.000004.000000=0
+histogram.000004.000000.to.000008.000000=1
+histogram.000008.000000.to.000016.000000=0
+histogram.000016.000000.to.000032.000000=0
+histogram.000032.000000.to.000064.000000=0
+histogram.000064.000000.to.000128.000000=0
+histogram.000128.000000.to.000256.000000=0
+histogram.000256.000000.to.000512.000000=0
+histogram.000512.000000.to.001024.000000=0
+histogram.001024.000000.to.002048.000000=0
+histogram.002048.000000.to.004096.000000=0
+histogram.004096.000000.to.008192.000000=0
+histogram.008192.000000.to.016384.000000=0
+histogram.016384.000000.to.032768.000000=0
+histogram.032768.000000.to.065536.000000=0
+histogram.065536.000000.to.131072.000000=0
+histogram.131072.000000.to.262144.000000=0
+histogram.262144.000000.to.524288.000000=0
+num.query.type.A=30
+num.query.type.PTR=10
+num.query.type.MX=30
+num.query.type.AAAA=30
+num.query.class.IN=100
+num.query.opcode.QUERY=100
+num.query.tcp=0
+num.query.tcpout=1
+num.query.tls=0
+num.query.tls.resume=0
+num.query.ipv6=0
+num.query.flags.QR=0
+num.query.flags.AA=0
+num.query.flags.TC=0
+num.query.flags.RD=100
+num.query.flags.RA=0
+num.query.flags.Z=0
+num.query.flags.AD=0
+num.query.flags.CD=0
+num.query.edns.present=0
+num.query.edns.DO=0
+num.answer.rcode.NOERROR=90
+num.answer.rcode.FORMERR=0
+num.answer.rcode.SERVFAIL=0
+num.answer.rcode.NXDOMAIN=10
+num.answer.rcode.NOTIMPL=0
+num.answer.rcode.REFUSED=0
+num.answer.rcode.nodata=10
+num.query.ratelimited=0
+num.answer.secure=0
+num.answer.bogus=0
+num.rrset.bogus=0
+num.query.aggressive.NOERROR=2
+num.query.aggressive.NXDOMAIN=0
+unwanted.queries=0
+unwanted.replies=0
+msg.cache.count=67
+rrset.cache.count=303
+infra.cache.count=181
+key.cache.count=10
+dnscrypt_shared_secret.cache.count=0
+dnscrypt_nonce.cache.count=0
+num.query.dnscrypt.shared_secret.cachemiss=0
+num.query.dnscrypt.replay=0
+num.query.authzone.up=0
+num.query.authzone.down=0
+num.query.subnet=0
+num.query.subnet_cache=0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended2.txt b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended2.txt
new file mode 100644
index 000000000..08ff128b3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended2.txt
@@ -0,0 +1,156 @@
+thread0.num.queries=0
+thread0.num.queries_ip_ratelimited=0
+thread0.num.cachehits=0
+thread0.num.cachemiss=0
+thread0.num.prefetch=0
+thread0.num.expired=0
+thread0.num.zero_ttl=0
+thread0.num.recursivereplies=0
+thread0.num.dnscrypt.crypted=0
+thread0.num.dnscrypt.cert=0
+thread0.num.dnscrypt.cleartext=0
+thread0.num.dnscrypt.malformed=0
+thread0.requestlist.avg=0
+thread0.requestlist.max=0
+thread0.requestlist.overwritten=0
+thread0.requestlist.exceeded=0
+thread0.requestlist.current.all=0
+thread0.requestlist.current.user=0
+thread0.recursion.time.avg=0.000000
+thread0.recursion.time.median=0
+thread0.tcpusage=0
+thread1.num.queries=0
+thread1.num.queries_ip_ratelimited=0
+thread1.num.cachehits=0
+thread1.num.cachemiss=0
+thread1.num.prefetch=0
+thread1.num.expired=0
+thread1.num.zero_ttl=0
+thread1.num.recursivereplies=0
+thread1.num.dnscrypt.crypted=0
+thread1.num.dnscrypt.cert=0
+thread1.num.dnscrypt.cleartext=0
+thread1.num.dnscrypt.malformed=0
+thread1.requestlist.avg=0
+thread1.requestlist.max=0
+thread1.requestlist.overwritten=0
+thread1.requestlist.exceeded=0
+thread1.requestlist.current.all=0
+thread1.requestlist.current.user=0
+thread1.recursion.time.avg=0.000000
+thread1.recursion.time.median=0
+thread1.tcpusage=0
+total.num.queries=0
+total.num.queries_ip_ratelimited=0
+total.num.cachehits=0
+total.num.cachemiss=0
+total.num.prefetch=0
+total.num.expired=0
+total.num.zero_ttl=0
+total.num.recursivereplies=0
+total.num.dnscrypt.crypted=0
+total.num.dnscrypt.cert=0
+total.num.dnscrypt.cleartext=0
+total.num.dnscrypt.malformed=0
+total.requestlist.avg=0
+total.requestlist.max=0
+total.requestlist.overwritten=0
+total.requestlist.exceeded=0
+total.requestlist.current.all=0
+total.requestlist.current.user=0
+total.recursion.time.avg=0.000000
+total.recursion.time.median=0
+total.tcpusage=0
+time.now=1574103671.543847
+time.up=71.835083
+time.elapsed=26.549953
+mem.cache.rrset=172757
+mem.cache.message=86064
+mem.mod.iterator=16588
+mem.mod.validator=79979
+mem.mod.respip=0
+mem.mod.subnet=74504
+mem.cache.dnscrypt_shared_secret=0
+mem.cache.dnscrypt_nonce=0
+mem.streamwait=0
+histogram.000000.000000.to.000000.000001=0
+histogram.000000.000001.to.000000.000002=0
+histogram.000000.000002.to.000000.000004=0
+histogram.000000.000004.to.000000.000008=0
+histogram.000000.000008.to.000000.000016=0
+histogram.000000.000016.to.000000.000032=0
+histogram.000000.000032.to.000000.000064=0
+histogram.000000.000064.to.000000.000128=0
+histogram.000000.000128.to.000000.000256=0
+histogram.000000.000256.to.000000.000512=0
+histogram.000000.000512.to.000000.001024=0
+histogram.000000.001024.to.000000.002048=0
+histogram.000000.002048.to.000000.004096=0
+histogram.000000.004096.to.000000.008192=0
+histogram.000000.008192.to.000000.016384=0
+histogram.000000.016384.to.000000.032768=0
+histogram.000000.032768.to.000000.065536=0
+histogram.000000.065536.to.000000.131072=0
+histogram.000000.131072.to.000000.262144=0
+histogram.000000.262144.to.000000.524288=0
+histogram.000000.524288.to.000001.000000=0
+histogram.000001.000000.to.000002.000000=0
+histogram.000002.000000.to.000004.000000=0
+histogram.000004.000000.to.000008.000000=0
+histogram.000008.000000.to.000016.000000=0
+histogram.000016.000000.to.000032.000000=0
+histogram.000032.000000.to.000064.000000=0
+histogram.000064.000000.to.000128.000000=0
+histogram.000128.000000.to.000256.000000=0
+histogram.000256.000000.to.000512.000000=0
+histogram.000512.000000.to.001024.000000=0
+histogram.001024.000000.to.002048.000000=0
+histogram.002048.000000.to.004096.000000=0
+histogram.004096.000000.to.008192.000000=0
+histogram.008192.000000.to.016384.000000=0
+histogram.016384.000000.to.032768.000000=0
+histogram.032768.000000.to.065536.000000=0
+histogram.065536.000000.to.131072.000000=0
+histogram.131072.000000.to.262144.000000=0
+histogram.262144.000000.to.524288.000000=0
+num.query.tcp=0
+num.query.tcpout=0
+num.query.tls=0
+num.query.tls.resume=0
+num.query.ipv6=0
+num.query.flags.QR=0
+num.query.flags.AA=0
+num.query.flags.TC=0
+num.query.flags.RD=0
+num.query.flags.RA=0
+num.query.flags.Z=0
+num.query.flags.AD=0
+num.query.flags.CD=0
+num.query.edns.present=0
+num.query.edns.DO=0
+num.answer.rcode.NOERROR=0
+num.answer.rcode.FORMERR=0
+num.answer.rcode.SERVFAIL=0
+num.answer.rcode.NXDOMAIN=0
+num.answer.rcode.NOTIMPL=0
+num.answer.rcode.REFUSED=0
+num.query.ratelimited=0
+num.answer.secure=0
+num.answer.bogus=0
+num.rrset.bogus=0
+num.query.aggressive.NOERROR=0
+num.query.aggressive.NXDOMAIN=0
+unwanted.queries=0
+unwanted.replies=0
+msg.cache.count=67
+rrset.cache.count=303
+infra.cache.count=181
+key.cache.count=10
+dnscrypt_shared_secret.cache.count=0
+dnscrypt_nonce.cache.count=0
+num.query.dnscrypt.shared_secret.cachemiss=0
+num.query.dnscrypt.replay=0
+num.query.authzone.up=0
+num.query.authzone.down=0
+num.query.subnet=0
+num.query.subnet_cache=0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended3.txt b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended3.txt
new file mode 100644
index 000000000..45324bef9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/stats/lifecycle/reset/extended3.txt
@@ -0,0 +1,163 @@
+thread0.num.queries=34
+thread0.num.queries_ip_ratelimited=0
+thread0.num.cachehits=30
+thread0.num.cachemiss=4
+thread0.num.prefetch=0
+thread0.num.expired=0
+thread0.num.zero_ttl=0
+thread0.num.recursivereplies=4
+thread0.num.dnscrypt.crypted=0
+thread0.num.dnscrypt.cert=0
+thread0.num.dnscrypt.cleartext=0
+thread0.num.dnscrypt.malformed=0
+thread0.requestlist.avg=0
+thread0.requestlist.max=0
+thread0.requestlist.overwritten=0
+thread0.requestlist.exceeded=0
+thread0.requestlist.current.all=0
+thread0.requestlist.current.user=0
+thread0.recursion.time.avg=0.541654
+thread0.recursion.time.median=0.098304
+thread0.tcpusage=0
+thread1.num.queries=36
+thread1.num.queries_ip_ratelimited=0
+thread1.num.cachehits=33
+thread1.num.cachemiss=3
+thread1.num.prefetch=0
+thread1.num.expired=0
+thread1.num.zero_ttl=0
+thread1.num.recursivereplies=3
+thread1.num.dnscrypt.crypted=0
+thread1.num.dnscrypt.cert=0
+thread1.num.dnscrypt.cleartext=0
+thread1.num.dnscrypt.malformed=0
+thread1.requestlist.avg=1.66667
+thread1.requestlist.max=5
+thread1.requestlist.overwritten=0
+thread1.requestlist.exceeded=0
+thread1.requestlist.current.all=0
+thread1.requestlist.current.user=0
+thread1.recursion.time.avg=0.062328
+thread1.recursion.time.median=0
+thread1.tcpusage=0
+total.num.queries=70
+total.num.queries_ip_ratelimited=0
+total.num.cachehits=63
+total.num.cachemiss=7
+total.num.prefetch=0
+total.num.expired=0
+total.num.zero_ttl=0
+total.num.recursivereplies=7
+total.num.dnscrypt.crypted=0
+total.num.dnscrypt.cert=0
+total.num.dnscrypt.cleartext=0
+total.num.dnscrypt.malformed=0
+total.requestlist.avg=0.714286
+total.requestlist.max=5
+total.requestlist.overwritten=0
+total.requestlist.exceeded=0
+total.requestlist.current.all=0
+total.requestlist.current.user=0
+total.recursion.time.avg=0.336228
+total.recursion.time.median=0.049152
+total.tcpusage=0
+time.now=1574103731.371896
+time.up=131.663132
+time.elapsed=59.828049
+mem.cache.rrset=235917
+mem.cache.message=105471
+mem.mod.iterator=16588
+mem.mod.validator=87270
+mem.mod.respip=0
+mem.mod.subnet=74504
+mem.cache.dnscrypt_shared_secret=0
+mem.cache.dnscrypt_nonce=0
+mem.streamwait=0
+histogram.000000.000000.to.000000.000001=0
+histogram.000000.000001.to.000000.000002=0
+histogram.000000.000002.to.000000.000004=0
+histogram.000000.000004.to.000000.000008=0
+histogram.000000.000008.to.000000.000016=0
+histogram.000000.000016.to.000000.000032=0
+histogram.000000.000032.to.000000.000064=0
+histogram.000000.000064.to.000000.000128=0
+histogram.000000.000128.to.000000.000256=0
+histogram.000000.000256.to.000000.000512=0
+histogram.000000.000512.to.000000.001024=0
+histogram.000000.001024.to.000000.002048=0
+histogram.000000.002048.to.000000.004096=0
+histogram.000000.004096.to.000000.008192=0
+histogram.000000.008192.to.000000.016384=0
+histogram.000000.016384.to.000000.032768=2
+histogram.000000.032768.to.000000.065536=1
+histogram.000000.065536.to.000000.131072=3
+histogram.000000.131072.to.000000.262144=0
+histogram.000000.262144.to.000000.524288=0
+histogram.000000.524288.to.000001.000000=0
+histogram.000001.000000.to.000002.000000=1
+histogram.000002.000000.to.000004.000000=0
+histogram.000004.000000.to.000008.000000=0
+histogram.000008.000000.to.000016.000000=0
+histogram.000016.000000.to.000032.000000=0
+histogram.000032.000000.to.000064.000000=0
+histogram.000064.000000.to.000128.000000=0
+histogram.000128.000000.to.000256.000000=0
+histogram.000256.000000.to.000512.000000=0
+histogram.000512.000000.to.001024.000000=0
+histogram.001024.000000.to.002048.000000=0
+histogram.002048.000000.to.004096.000000=0
+histogram.004096.000000.to.008192.000000=0
+histogram.008192.000000.to.016384.000000=0
+histogram.016384.000000.to.032768.000000=0
+histogram.032768.000000.to.065536.000000=0
+histogram.065536.000000.to.131072.000000=0
+histogram.131072.000000.to.262144.000000=0
+histogram.262144.000000.to.524288.000000=0
+num.query.type.A=20
+num.query.type.PTR=10
+num.query.type.MX=20
+num.query.type.AAAA=20
+num.query.class.IN=70
+num.query.opcode.QUERY=70
+num.query.tcp=0
+num.query.tcpout=0
+num.query.tls=0
+num.query.tls.resume=0
+num.query.ipv6=0
+num.query.flags.QR=0
+num.query.flags.AA=0
+num.query.flags.TC=0
+num.query.flags.RD=70
+num.query.flags.RA=0
+num.query.flags.Z=0
+num.query.flags.AD=0
+num.query.flags.CD=0
+num.query.edns.present=0
+num.query.edns.DO=0
+num.answer.rcode.NOERROR=60
+num.answer.rcode.FORMERR=0
+num.answer.rcode.SERVFAIL=0
+num.answer.rcode.NXDOMAIN=10
+num.answer.rcode.NOTIMPL=0
+num.answer.rcode.REFUSED=0
+num.answer.rcode.nodata=10
+num.query.ratelimited=0
+num.answer.secure=0
+num.answer.bogus=0
+num.rrset.bogus=0
+num.query.aggressive.NOERROR=2
+num.query.aggressive.NXDOMAIN=0
+unwanted.queries=0
+unwanted.replies=0
+msg.cache.count=127
+rrset.cache.count=501
+infra.cache.count=303
+key.cache.count=15
+dnscrypt_shared_secret.cache.count=0
+dnscrypt_nonce.cache.count=0
+num.query.dnscrypt.shared_secret.cachemiss=0
+num.query.dnscrypt.replay=0
+num.query.authzone.up=0
+num.query.authzone.down=0
+num.query.subnet=0
+num.query.subnet_cache=0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/unbound.conf b/src/go/plugin/go.d/modules/unbound/testdata/unbound.conf
new file mode 100644
index 000000000..a061a3476
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/unbound.conf
@@ -0,0 +1,85 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+#include: "otherfile.conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+ statistics-cumulative: yes
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+ # extended-statistics: yes
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ control-enable: yes
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 10.0.0.1
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ control-port: 8954
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+ control-use-cert: "no"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ control-key-file: "/etc/unbound/unbound_control_other.key"
+
+ # unbound-control certificate file.
+ control-cert-file: "/etc/unbound/unbound_control_other.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/unbound_disabled.conf b/src/go/plugin/go.d/modules/unbound/testdata/unbound_disabled.conf
new file mode 100644
index 000000000..1cef549f8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/unbound_disabled.conf
@@ -0,0 +1,85 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+#include: "otherfile.conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+ statistics-cumulative: yes
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+ # extended-statistics: yes
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ control-enable: no
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ control-interface: 0.0.0.0
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ control-port: 8953
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+ control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ control-key-file: "/etc/unbound/unbound_control.key"
+
+ # unbound-control certificate file.
+ control-cert-file: "/etc/unbound/unbound_control.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/testdata/unbound_empty.conf b/src/go/plugin/go.d/modules/unbound/testdata/unbound_empty.conf
new file mode 100644
index 000000000..a2d158376
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/testdata/unbound_empty.conf
@@ -0,0 +1,85 @@
+#
+# Example configuration file.
+#
+# See unbound.conf(5) man page, version 1.9.4.
+#
+# this is a comment.
+
+#Use this to include other text into the file.
+#include: "otherfile.conf"
+
+# The server clause sets the main parameters.
+server:
+ # whitespace is not necessary, but looks cleaner.
+
+ # verbosity number, 0 is least verbose. 1 is default.
+ # verbosity: 1
+
+ # print statistics to the log (for every thread) every N seconds.
+ # Set to "" or 0 to disable. Default is disabled.
+ # statistics-interval: 0
+
+ # enable shm for stats, default no. if you enable also enable
+ # statistics-interval, every time it also writes stats to the
+ # shared memory segment keyed with shm-key.
+ # shm-enable: no
+
+ # shm for stats uses this key, and key+1 for the shared mem segment.
+ # shm-key: 11777
+
+ # enable cumulative statistics, without clearing them after printing.
+ # statistics-cumulative: no
+ # statistics-cumulative: yes
+
+ # enable extended statistics (query types, answer codes, status)
+ # printed from unbound-control. default off, because of speed.
+ # extended-statistics: no
+ # extended-statistics: yes
+
+ # number of threads to create. 1 disables threading.
+ # num-threads: 2
+
+# Python config section. To enable:
+# o use --with-pythonmodule to configure before compiling.
+# o list python in the module-config string (above) to enable.
+# It can be at the start, it gets validated results, or just before
+# the iterator and process before DNSSEC validation.
+# o and give a python-script to run.
+python:
+ # Script file to load
+ # python-script: "/etc/unbound/ubmodule-tst.py"
+
+# Remote control config section.
+remote-control:
+ # Enable remote control with unbound-control(8) here.
+ # set up the keys and certificates with unbound-control-setup.
+ # control-enable: no
+
+ # what interfaces are listened to for remote control.
+ # give 0.0.0.0 and ::0 to listen to all interfaces.
+ # set to an absolute path to use a unix local name pipe, certificates
+ # are not used for that, so key and cert files need not be present.
+ # control-interface: 127.0.0.1
+ # control-interface: 0.0.0.0
+ # control-interface: ::1
+ # control-interface: /var/run/test.sock
+
+ # port number for remote control operations.
+ # control-port: 8953
+
+ # for localhost, you can disable use of TLS by setting this to "no"
+ # For local sockets this option is ignored, and TLS is not used.
+ # control-use-cert: "yes"
+ # control-use-cert: "yes"
+
+ # unbound server key file.
+ # server-key-file: "/etc/unbound/unbound_server.key"
+
+ # unbound server certificate file.
+ # server-cert-file: "/etc/unbound/unbound_server.pem"
+
+ # unbound-control key file.
+ # control-key-file: "/etc/unbound/unbound_control.key"
+
+ # unbound-control certificate file.
+ # control-cert-file: "/etc/unbound/unbound_control.pem"
diff --git a/src/go/plugin/go.d/modules/unbound/unbound.go b/src/go/plugin/go.d/modules/unbound/unbound.go
new file mode 100644
index 000000000..fa071bb0f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/unbound.go
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package unbound
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("unbound", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Unbound {
+ return &Unbound{
+ Config: Config{
+ Address: "127.0.0.1:8953",
+ ConfPath: "/etc/unbound/unbound.conf",
+ Timeout: web.Duration(time.Second),
+ Cumulative: false,
+ UseTLS: true,
+ TLSConfig: tlscfg.TLSConfig{
+ TLSCert: "/etc/unbound/unbound_control.pem",
+ TLSKey: "/etc/unbound/unbound_control.key",
+ InsecureSkipVerify: true,
+ },
+ },
+ curCache: newCollectCache(),
+ cache: newCollectCache(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ ConfPath string `yaml:"conf_path,omitempty" json:"conf_path"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ Cumulative bool `yaml:"cumulative_stats" json:"cumulative_stats"`
+ UseTLS bool `yaml:"use_tls,omitempty" json:"use_tls"`
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+}
+
+type Unbound struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ client socket.Client
+
+ cache collectCache
+ curCache collectCache
+ prevCacheMiss float64 // needed for cumulative mode
+ extChartsCreated bool
+}
+
+func (u *Unbound) Configuration() any {
+ return u.Config
+}
+
+func (u *Unbound) Init() error {
+ if enabled := u.initConfig(); !enabled {
+ return errors.New("remote control is disabled in the configuration file")
+ }
+
+ if err := u.initClient(); err != nil {
+ u.Errorf("creating client: %v", err)
+ return err
+ }
+
+ u.charts = charts(u.Cumulative)
+
+ u.Debugf("using address: %s, cumulative: %v, use_tls: %v, timeout: %s", u.Address, u.Cumulative, u.UseTLS, u.Timeout)
+ if u.UseTLS {
+ u.Debugf("using tls_skip_verify: %v, tls_key: %s, tls_cert: %s", u.InsecureSkipVerify, u.TLSKey, u.TLSCert)
+ }
+
+ return nil
+}
+
+func (u *Unbound) Check() error {
+ mx, err := u.collect()
+ if err != nil {
+ u.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (u *Unbound) Charts() *module.Charts {
+ return u.charts
+}
+
+func (u *Unbound) Collect() map[string]int64 {
+ mx, err := u.collect()
+ if err != nil {
+ u.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (u *Unbound) Cleanup() {
+ if u.client != nil {
+ _ = u.client.Disconnect()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/unbound/unbound_test.go b/src/go/plugin/go.d/modules/unbound/unbound_test.go
new file mode 100644
index 000000000..f9ed73afe
--- /dev/null
+++ b/src/go/plugin/go.d/modules/unbound/unbound_test.go
@@ -0,0 +1,1288 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package unbound
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataCommonStats, _ = os.ReadFile("testdata/stats/common.txt")
+ dataExtendedStats, _ = os.ReadFile("testdata/stats/extended.txt")
+ dataLifeCycleCumulative1, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended1.txt")
+ dataLifeCycleCumulative2, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended2.txt")
+ dataLifeCycleCumulative3, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended3.txt")
+ dataLifeCycleReset1, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended1.txt")
+ dataLifeCycleReset2, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended2.txt")
+ dataLifeCycleReset3, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended3.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataCommonStats": dataCommonStats,
+ "dataExtendedStats": dataExtendedStats,
+ "dataLifeCycleCumulative1": dataLifeCycleCumulative1,
+ "dataLifeCycleCumulative2": dataLifeCycleCumulative2,
+ "dataLifeCycleCumulative3": dataLifeCycleCumulative3,
+ "dataLifeCycleReset1": dataLifeCycleReset1,
+ "dataLifeCycleReset2": dataLifeCycleReset2,
+ "dataLifeCycleReset3": dataLifeCycleReset3,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestUnbound_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Unbound{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestUnbound_Init(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+
+ assert.NoError(t, unbound.Init())
+}
+
+func TestUnbound_Init_SetEverythingFromUnboundConf(t *testing.T) {
+ unbound := New()
+ unbound.ConfPath = "testdata/unbound.conf"
+ expectedConfig := Config{
+ Address: "10.0.0.1:8954",
+ ConfPath: unbound.ConfPath,
+ Timeout: unbound.Timeout,
+ Cumulative: true,
+ UseTLS: false,
+ TLSConfig: tlscfg.TLSConfig{
+ TLSCert: "/etc/unbound/unbound_control_other.pem",
+ TLSKey: "/etc/unbound/unbound_control_other.key",
+ InsecureSkipVerify: unbound.TLSConfig.InsecureSkipVerify,
+ },
+ }
+
+ assert.NoError(t, unbound.Init())
+ assert.Equal(t, expectedConfig, unbound.Config)
+}
+
+func TestUnbound_Init_DisabledInUnboundConf(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ unbound.ConfPath = "testdata/unbound_disabled.conf"
+
+ assert.Error(t, unbound.Init())
+}
+
+func TestUnbound_Init_HandleEmptyConfig(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ unbound.ConfPath = "testdata/unbound_empty.conf"
+
+ assert.NoError(t, unbound.Init())
+}
+
+func TestUnbound_Init_HandleNonExistentConfig(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ unbound.ConfPath = "testdata/unbound_non_existent.conf"
+
+ assert.NoError(t, unbound.Init())
+}
+
+func TestUnbound_Check(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{data: dataCommonStats, err: false}
+
+ assert.NoError(t, unbound.Check())
+}
+
+func TestUnbound_Check_ErrorDuringScrapingUnbound(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{err: true}
+
+ assert.Error(t, unbound.Check())
+}
+
+func TestUnbound_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestUnbound_Charts(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+
+ assert.NotNil(t, unbound.Charts())
+}
+
+func TestUnbound_Collect(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{data: dataCommonStats, err: false}
+
+ collected := unbound.Collect()
+ assert.Equal(t, expectedCommon, collected)
+ testCharts(t, unbound, collected)
+}
+
+func TestUnbound_Collect_ExtendedStats(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{data: dataExtendedStats, err: false}
+
+ collected := unbound.Collect()
+ assert.Equal(t, expectedExtended, collected)
+ testCharts(t, unbound, collected)
+}
+
+func TestUnbound_Collect_LifeCycleCumulativeExtendedStats(t *testing.T) {
+ tests := []struct {
+ input []byte
+ expected map[string]int64
+ }{
+ {input: dataLifeCycleCumulative1, expected: expectedCumulative1},
+ {input: dataLifeCycleCumulative2, expected: expectedCumulative2},
+ {input: dataLifeCycleCumulative3, expected: expectedCumulative3},
+ }
+
+ unbound := prepareNonTLSUnbound()
+ unbound.Cumulative = true
+ require.NoError(t, unbound.Init())
+ ubClient := &mockUnboundClient{err: false}
+ unbound.client = ubClient
+
+ var collected map[string]int64
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("run %d", i+1), func(t *testing.T) {
+ ubClient.data = test.input
+ collected = unbound.Collect()
+ assert.Equal(t, test.expected, collected)
+ })
+ }
+
+ testCharts(t, unbound, collected)
+}
+
+func TestUnbound_Collect_LifeCycleResetExtendedStats(t *testing.T) {
+ tests := []struct {
+ input []byte
+ expected map[string]int64
+ }{
+ {input: dataLifeCycleReset1, expected: expectedReset1},
+ {input: dataLifeCycleReset2, expected: expectedReset2},
+ {input: dataLifeCycleReset3, expected: expectedReset3},
+ }
+
+ unbound := prepareNonTLSUnbound()
+ unbound.Cumulative = false
+ require.NoError(t, unbound.Init())
+ ubClient := &mockUnboundClient{err: false}
+ unbound.client = ubClient
+
+ var collected map[string]int64
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("run %d", i+1), func(t *testing.T) {
+ ubClient.data = test.input
+ collected = unbound.Collect()
+ assert.Equal(t, test.expected, collected)
+ })
+ }
+
+ testCharts(t, unbound, collected)
+}
+
+func TestUnbound_Collect_EmptyResponse(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{data: []byte{}, err: false}
+
+ assert.Nil(t, unbound.Collect())
+}
+
+func TestUnbound_Collect_ErrorResponse(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{data: []byte("error unknown command 'unknown'"), err: false}
+
+ assert.Nil(t, unbound.Collect())
+}
+
+func TestUnbound_Collect_ErrorOnSend(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{err: true}
+
+ assert.Nil(t, unbound.Collect())
+}
+
+func TestUnbound_Collect_ErrorOnParseBadSyntax(t *testing.T) {
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ data := strings.Repeat("zk_avg_latency 0\nzk_min_latency 0\nzk_mix_latency 0\n", 10)
+ unbound.client = mockUnboundClient{data: []byte(data), err: false}
+
+ assert.Nil(t, unbound.Collect())
+}
+
+func prepareNonTLSUnbound() *Unbound {
+ unbound := New()
+ unbound.ConfPath = ""
+ unbound.UseTLS = false
+
+ return unbound
+}
+
+type mockUnboundClient struct {
+ data []byte
+ err bool
+}
+
+func (m mockUnboundClient) Connect() error {
+ return nil
+}
+
+func (m mockUnboundClient) Disconnect() error {
+ return nil
+}
+
+func (m mockUnboundClient) Command(_ string, process socket.Processor) error {
+ if m.err {
+ return errors.New("mock send error")
+ }
+ s := bufio.NewScanner(bytes.NewReader(m.data))
+ for s.Scan() {
+ process(s.Bytes())
+ }
+ return nil
+}
+
+func testCharts(t *testing.T, unbound *Unbound, collected map[string]int64) {
+ t.Helper()
+ ensureChartsCreatedForEveryThread(t, unbound)
+ ensureExtendedChartsCreated(t, unbound)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, unbound, collected)
+}
+
+func ensureChartsCreatedForEveryThread(t *testing.T, u *Unbound) {
+ for thread := range u.cache.threads {
+ for _, chart := range *threadCharts(thread, u.Cumulative) {
+ assert.Truef(t, u.Charts().Has(chart.ID), "chart '%s' is not created for '%s' thread", chart.ID, thread)
+ }
+ }
+}
+
+func ensureExtendedChartsCreated(t *testing.T, u *Unbound) {
+ if len(u.cache.answerRCode) == 0 {
+ return
+ }
+ for _, chart := range *extendedCharts(u.Cumulative) {
+ assert.Truef(t, u.Charts().Has(chart.ID), "chart '%s' is not added", chart.ID)
+ }
+
+ if chart := u.Charts().Get(queryTypeChart.ID); chart != nil {
+ for typ := range u.cache.queryType {
+ dimID := "num.query.type." + typ
+ assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' type, expected '%s'", chart.ID, typ, dimID)
+ }
+ }
+ if chart := u.Charts().Get(queryClassChart.ID); chart != nil {
+ for class := range u.cache.queryClass {
+ dimID := "num.query.class." + class
+ assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' class, expected '%s'", chart.ID, class, dimID)
+ }
+ }
+ if chart := u.Charts().Get(queryOpCodeChart.ID); chart != nil {
+ for opcode := range u.cache.queryOpCode {
+ dimID := "num.query.opcode." + opcode
+ assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' opcode, expected '%s'", chart.ID, opcode, dimID)
+ }
+ }
+ if chart := u.Charts().Get(answerRCodeChart.ID); chart != nil {
+ for rcode := range u.cache.answerRCode {
+ dimID := "num.answer.rcode." + rcode
+ assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' rcode, expected '%s'", chart.ID, rcode, dimID)
+ }
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, u *Unbound, collected map[string]int64) {
+ for _, chart := range *u.Charts() {
+ for _, dim := range chart.Dims {
+ if dim.ID == "mem.mod.ipsecmod" {
+ continue
+ }
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+var (
+ expectedCommon = map[string]int64{
+ "thread0.num.cachehits": 21,
+ "thread0.num.cachemiss": 7,
+ "thread0.num.dnscrypt.cert": 0,
+ "thread0.num.dnscrypt.cleartext": 0,
+ "thread0.num.dnscrypt.crypted": 0,
+ "thread0.num.dnscrypt.malformed": 0,
+ "thread0.num.expired": 0,
+ "thread0.num.prefetch": 0,
+ "thread0.num.queries": 28,
+ "thread0.num.queries_ip_ratelimited": 0,
+ "thread0.num.recursivereplies": 7,
+ "thread0.num.zero_ttl": 0,
+ "thread0.recursion.time.avg": 1255,
+ "thread0.recursion.time.median": 480,
+ "thread0.requestlist.avg": 857,
+ "thread0.requestlist.current.all": 0,
+ "thread0.requestlist.current.user": 0,
+ "thread0.requestlist.exceeded": 0,
+ "thread0.requestlist.max": 6,
+ "thread0.requestlist.overwritten": 0,
+ "thread0.tcpusage": 0,
+ "thread1.num.cachehits": 13,
+ "thread1.num.cachemiss": 3,
+ "thread1.num.dnscrypt.cert": 0,
+ "thread1.num.dnscrypt.cleartext": 0,
+ "thread1.num.dnscrypt.crypted": 0,
+ "thread1.num.dnscrypt.malformed": 0,
+ "thread1.num.prefetch": 0,
+ "thread1.num.expired": 0,
+ "thread1.num.queries": 16,
+ "thread1.num.queries_ip_ratelimited": 0,
+ "thread1.num.recursivereplies": 3,
+ "thread1.num.zero_ttl": 0,
+ "thread1.recursion.time.avg": 93,
+ "thread1.recursion.time.median": 0,
+ "thread1.requestlist.avg": 0,
+ "thread1.requestlist.current.all": 0,
+ "thread1.requestlist.current.user": 0,
+ "thread1.requestlist.exceeded": 0,
+ "thread1.requestlist.max": 0,
+ "thread1.requestlist.overwritten": 0,
+ "thread1.tcpusage": 0,
+ "time.elapsed": 88,
+ "time.now": 1574094836,
+ "time.up": 88,
+ "total.num.cachehits": 34,
+ "total.num.cachemiss": 10,
+ "total.num.dnscrypt.cert": 0,
+ "total.num.dnscrypt.cleartext": 0,
+ "total.num.dnscrypt.crypted": 0,
+ "total.num.dnscrypt.malformed": 0,
+ "total.num.prefetch": 0,
+ "total.num.expired": 0,
+ "total.num.queries": 44,
+ "total.num.queries_ip_ratelimited": 0,
+ "total.num.recursivereplies": 10,
+ "total.num.zero_ttl": 0,
+ "total.recursion.time.avg": 907,
+ "total.recursion.time.median": 240,
+ "total.requestlist.avg": 600,
+ "total.requestlist.current.all": 0,
+ "total.requestlist.current.user": 0,
+ "total.requestlist.exceeded": 0,
+ "total.requestlist.max": 6,
+ "total.requestlist.overwritten": 0,
+ "total.tcpusage": 0,
+ }
+
+ expectedExtended = map[string]int64{
+ "dnscrypt_nonce.cache.count": 0,
+ "dnscrypt_shared_secret.cache.count": 0,
+ "infra.cache.count": 205,
+ "key.cache.count": 9,
+ "mem.cache.dnscrypt_nonce": 0,
+ "mem.cache.dnscrypt_shared_secret": 0,
+ "mem.cache.message": 90357,
+ "mem.cache.rrset": 178642,
+ "mem.mod.iterator": 16588,
+ "mem.mod.respip": 0,
+ "mem.mod.subnet": 74504,
+ "mem.mod.validator": 81059,
+ "mem.streamwait": 0,
+ "msg.cache.count": 81,
+ "num.answer.bogus": 0,
+ "num.answer.rcode.FORMERR": 0,
+ "num.answer.rcode.NOERROR": 40,
+ "num.answer.rcode.NOTIMPL": 0,
+ "num.answer.rcode.NXDOMAIN": 4,
+ "num.answer.rcode.REFUSED": 0,
+ "num.answer.rcode.SERVFAIL": 0,
+ "num.answer.secure": 0,
+ "num.query.aggressive.NOERROR": 2,
+ "num.query.aggressive.NXDOMAIN": 0,
+ "num.query.authzone.down": 0,
+ "num.query.authzone.up": 0,
+ "num.query.class.IN": 44,
+ "num.query.dnscrypt.replay": 0,
+ "num.query.dnscrypt.shared_secret.cachemiss": 0,
+ "num.query.edns.DO": 0,
+ "num.query.edns.present": 0,
+ "num.query.flags.AA": 0,
+ "num.query.flags.AD": 0,
+ "num.query.flags.CD": 0,
+ "num.query.flags.QR": 0,
+ "num.query.flags.RA": 0,
+ "num.query.flags.RD": 44,
+ "num.query.flags.TC": 0,
+ "num.query.flags.Z": 0,
+ "num.query.ipv6": 39,
+ "num.query.opcode.QUERY": 44,
+ "num.query.ratelimited": 0,
+ "num.query.subnet": 0,
+ "num.query.subnet_cache": 0,
+ "num.query.tcp": 0,
+ "num.query.tcpout": 1,
+ "num.query.tls": 0,
+ "num.query.tls.resume": 0,
+ "num.query.type.A": 13,
+ "num.query.type.AAAA": 13,
+ "num.query.type.MX": 13,
+ "num.query.type.PTR": 5,
+ "num.rrset.bogus": 0,
+ "rrset.cache.count": 314,
+ "thread0.num.cachehits": 21,
+ "thread0.num.cachemiss": 7,
+ "thread0.num.dnscrypt.cert": 0,
+ "thread0.num.dnscrypt.cleartext": 0,
+ "thread0.num.dnscrypt.crypted": 0,
+ "thread0.num.dnscrypt.malformed": 0,
+ "thread0.num.expired": 0,
+ "thread0.num.prefetch": 0,
+ "thread0.num.queries": 28,
+ "thread0.num.queries_ip_ratelimited": 0,
+ "thread0.num.recursivereplies": 7,
+ "thread0.num.zero_ttl": 0,
+ "thread0.recursion.time.avg": 1255,
+ "thread0.recursion.time.median": 480,
+ "thread0.requestlist.avg": 857,
+ "thread0.requestlist.current.all": 0,
+ "thread0.requestlist.current.user": 0,
+ "thread0.requestlist.exceeded": 0,
+ "thread0.requestlist.max": 6,
+ "thread0.requestlist.overwritten": 0,
+ "thread0.tcpusage": 0,
+ "thread1.num.cachehits": 13,
+ "thread1.num.cachemiss": 3,
+ "thread1.num.dnscrypt.cert": 0,
+ "thread1.num.dnscrypt.cleartext": 0,
+ "thread1.num.dnscrypt.crypted": 0,
+ "thread1.num.dnscrypt.malformed": 0,
+ "thread1.num.prefetch": 0,
+ "thread1.num.expired": 0,
+ "thread1.num.queries": 16,
+ "thread1.num.queries_ip_ratelimited": 0,
+ "thread1.num.recursivereplies": 3,
+ "thread1.num.zero_ttl": 0,
+ "thread1.recursion.time.avg": 93,
+ "thread1.recursion.time.median": 0,
+ "thread1.requestlist.avg": 0,
+ "thread1.requestlist.current.all": 0,
+ "thread1.requestlist.current.user": 0,
+ "thread1.requestlist.exceeded": 0,
+ "thread1.requestlist.max": 0,
+ "thread1.requestlist.overwritten": 0,
+ "thread1.tcpusage": 0,
+ "time.elapsed": 88,
+ "time.now": 1574094836,
+ "time.up": 88,
+ "total.num.cachehits": 34,
+ "total.num.cachemiss": 10,
+ "total.num.dnscrypt.cert": 0,
+ "total.num.dnscrypt.cleartext": 0,
+ "total.num.dnscrypt.crypted": 0,
+ "total.num.dnscrypt.malformed": 0,
+ "total.num.prefetch": 0,
+ "total.num.expired": 0,
+ "total.num.queries": 44,
+ "total.num.queries_ip_ratelimited": 0,
+ "total.num.recursivereplies": 10,
+ "total.num.zero_ttl": 0,
+ "total.recursion.time.avg": 907,
+ "total.recursion.time.median": 240,
+ "total.requestlist.avg": 600,
+ "total.requestlist.current.all": 0,
+ "total.requestlist.current.user": 0,
+ "total.requestlist.exceeded": 0,
+ "total.requestlist.max": 6,
+ "total.requestlist.overwritten": 0,
+ "total.tcpusage": 0,
+ "unwanted.queries": 0,
+ "unwanted.replies": 0,
+ }
+)
+
+var (
+ expectedCumulative1 = map[string]int64{
+ "dnscrypt_nonce.cache.count": 0,
+ "dnscrypt_shared_secret.cache.count": 0,
+ "infra.cache.count": 192,
+ "key.cache.count": 11,
+ "mem.cache.dnscrypt_nonce": 0,
+ "mem.cache.dnscrypt_shared_secret": 0,
+ "mem.cache.message": 93392,
+ "mem.cache.rrset": 175745,
+ "mem.mod.iterator": 16588,
+ "mem.mod.respip": 0,
+ "mem.mod.subnet": 74504,
+ "mem.mod.validator": 81479,
+ "mem.streamwait": 0,
+ "msg.cache.count": 94,
+ "num.answer.bogus": 0,
+ "num.answer.rcode.FORMERR": 0,
+ "num.answer.rcode.NOERROR": 184,
+ "num.answer.rcode.NOTIMPL": 0,
+ "num.answer.rcode.NXDOMAIN": 16,
+ "num.answer.rcode.REFUSED": 0,
+ "num.answer.rcode.SERVFAIL": 0,
+ "num.answer.secure": 0,
+ "num.query.aggressive.NOERROR": 1,
+ "num.query.aggressive.NXDOMAIN": 0,
+ "num.query.authzone.down": 0,
+ "num.query.authzone.up": 0,
+ "num.query.class.IN": 200,
+ "num.query.dnscrypt.replay": 0,
+ "num.query.dnscrypt.shared_secret.cachemiss": 0,
+ "num.query.edns.DO": 0,
+ "num.query.edns.present": 0,
+ "num.query.flags.AA": 0,
+ "num.query.flags.AD": 0,
+ "num.query.flags.CD": 0,
+ "num.query.flags.QR": 0,
+ "num.query.flags.RA": 0,
+ "num.query.flags.RD": 200,
+ "num.query.flags.TC": 0,
+ "num.query.flags.Z": 0,
+ "num.query.ipv6": 0,
+ "num.query.opcode.QUERY": 200,
+ "num.query.ratelimited": 0,
+ "num.query.subnet": 0,
+ "num.query.subnet_cache": 0,
+ "num.query.tcp": 0,
+ "num.query.tcpout": 0,
+ "num.query.tls": 0,
+ "num.query.tls.resume": 0,
+ "num.query.type.A": 60,
+ "num.query.type.AAAA": 60,
+ "num.query.type.MX": 60,
+ "num.query.type.PTR": 20,
+ "num.rrset.bogus": 0,
+ "rrset.cache.count": 304,
+ "thread0.num.cachehits": 80,
+ "thread0.num.cachemiss": 10,
+ "thread0.num.dnscrypt.cert": 0,
+ "thread0.num.dnscrypt.cleartext": 0,
+ "thread0.num.dnscrypt.crypted": 0,
+ "thread0.num.dnscrypt.malformed": 0,
+ "thread0.num.expired": 0,
+ "thread0.num.prefetch": 0,
+ "thread0.num.queries": 90,
+ "thread0.num.queries_ip_ratelimited": 0,
+ "thread0.num.recursivereplies": 10,
+ "thread0.num.zero_ttl": 0,
+ "thread0.recursion.time.avg": 222,
+ "thread0.recursion.time.median": 337,
+ "thread0.requestlist.avg": 100,
+ "thread0.requestlist.current.all": 0,
+ "thread0.requestlist.current.user": 0,
+ "thread0.requestlist.exceeded": 0,
+ "thread0.requestlist.max": 1,
+ "thread0.requestlist.overwritten": 0,
+ "thread0.tcpusage": 0,
+ "thread1.num.cachehits": 101,
+ "thread1.num.cachemiss": 9,
+ "thread1.num.dnscrypt.cert": 0,
+ "thread1.num.dnscrypt.cleartext": 0,
+ "thread1.num.dnscrypt.crypted": 0,
+ "thread1.num.dnscrypt.malformed": 0,
+ "thread1.num.expired": 0,
+ "thread1.num.prefetch": 0,
+ "thread1.num.queries": 110,
+ "thread1.num.queries_ip_ratelimited": 0,
+ "thread1.num.recursivereplies": 9,
+ "thread1.num.zero_ttl": 0,
+ "thread1.recursion.time.avg": 844,
+ "thread1.recursion.time.median": 360,
+ "thread1.requestlist.avg": 222,
+ "thread1.requestlist.current.all": 0,
+ "thread1.requestlist.current.user": 0,
+ "thread1.requestlist.exceeded": 0,
+ "thread1.requestlist.max": 1,
+ "thread1.requestlist.overwritten": 0,
+ "thread1.tcpusage": 0,
+ "time.elapsed": 122,
+ "time.now": 1574103378,
+ "time.up": 122,
+ "total.num.cachehits": 181,
+ "total.num.cachemiss": 19,
+ "total.num.dnscrypt.cert": 0,
+ "total.num.dnscrypt.cleartext": 0,
+ "total.num.dnscrypt.crypted": 0,
+ "total.num.dnscrypt.malformed": 0,
+ "total.num.expired": 0,
+ "total.num.prefetch": 0,
+ "total.num.queries": 200,
+ "total.num.queries_ip_ratelimited": 0,
+ "total.num.recursivereplies": 19,
+ "total.num.zero_ttl": 0,
+ "total.recursion.time.avg": 516,
+ "total.recursion.time.median": 348,
+ "total.requestlist.avg": 157,
+ "total.requestlist.current.all": 0,
+ "total.requestlist.current.user": 0,
+ "total.requestlist.exceeded": 0,
+ "total.requestlist.max": 1,
+ "total.requestlist.overwritten": 0,
+ "total.tcpusage": 0,
+ "unwanted.queries": 0,
+ "unwanted.replies": 0,
+ }
+
+ expectedCumulative2 = map[string]int64{
+ "dnscrypt_nonce.cache.count": 0,
+ "dnscrypt_shared_secret.cache.count": 0,
+ "infra.cache.count": 192,
+ "key.cache.count": 11,
+ "mem.cache.dnscrypt_nonce": 0,
+ "mem.cache.dnscrypt_shared_secret": 0,
+ "mem.cache.message": 93392,
+ "mem.cache.rrset": 175745,
+ "mem.mod.iterator": 16588,
+ "mem.mod.respip": 0,
+ "mem.mod.subnet": 74504,
+ "mem.mod.validator": 81479,
+ "mem.streamwait": 0,
+ "msg.cache.count": 94,
+ "num.answer.bogus": 0,
+ "num.answer.rcode.FORMERR": 0,
+ "num.answer.rcode.NOERROR": 274,
+ "num.answer.rcode.NOTIMPL": 0,
+ "num.answer.rcode.NXDOMAIN": 16,
+ "num.answer.rcode.REFUSED": 0,
+ "num.answer.rcode.SERVFAIL": 0,
+ "num.answer.secure": 0,
+ "num.query.aggressive.NOERROR": 1,
+ "num.query.aggressive.NXDOMAIN": 0,
+ "num.query.authzone.down": 0,
+ "num.query.authzone.up": 0,
+ "num.query.class.IN": 290,
+ "num.query.dnscrypt.replay": 0,
+ "num.query.dnscrypt.shared_secret.cachemiss": 0,
+ "num.query.edns.DO": 0,
+ "num.query.edns.present": 0,
+ "num.query.flags.AA": 0,
+ "num.query.flags.AD": 0,
+ "num.query.flags.CD": 0,
+ "num.query.flags.QR": 0,
+ "num.query.flags.RA": 0,
+ "num.query.flags.RD": 290,
+ "num.query.flags.TC": 0,
+ "num.query.flags.Z": 0,
+ "num.query.ipv6": 0,
+ "num.query.opcode.QUERY": 290,
+ "num.query.ratelimited": 0,
+ "num.query.subnet": 0,
+ "num.query.subnet_cache": 0,
+ "num.query.tcp": 0,
+ "num.query.tcpout": 0,
+ "num.query.tls": 0,
+ "num.query.tls.resume": 0,
+ "num.query.type.A": 90,
+ "num.query.type.AAAA": 90,
+ "num.query.type.MX": 90,
+ "num.query.type.PTR": 20,
+ "num.rrset.bogus": 0,
+ "rrset.cache.count": 304,
+ "thread0.num.cachehits": 123,
+ "thread0.num.cachemiss": 10,
+ "thread0.num.dnscrypt.cert": 0,
+ "thread0.num.dnscrypt.cleartext": 0,
+ "thread0.num.dnscrypt.crypted": 0,
+ "thread0.num.dnscrypt.malformed": 0,
+ "thread0.num.expired": 0,
+ "thread0.num.prefetch": 0,
+ "thread0.num.queries": 133,
+ "thread0.num.queries_ip_ratelimited": 0,
+ "thread0.num.recursivereplies": 10,
+ "thread0.num.zero_ttl": 0,
+ "thread0.recursion.time.avg": 0,
+ "thread0.recursion.time.median": 0,
+ "thread0.requestlist.avg": 0,
+ "thread0.requestlist.current.all": 0,
+ "thread0.requestlist.current.user": 0,
+ "thread0.requestlist.exceeded": 0,
+ "thread0.requestlist.max": 1,
+ "thread0.requestlist.overwritten": 0,
+ "thread0.tcpusage": 0,
+ "thread1.num.cachehits": 148,
+ "thread1.num.cachemiss": 9,
+ "thread1.num.dnscrypt.cert": 0,
+ "thread1.num.dnscrypt.cleartext": 0,
+ "thread1.num.dnscrypt.crypted": 0,
+ "thread1.num.dnscrypt.malformed": 0,
+ "thread1.num.prefetch": 0,
+ "thread1.num.expired": 0,
+ "thread1.num.queries": 157,
+ "thread1.num.queries_ip_ratelimited": 0,
+ "thread1.num.recursivereplies": 9,
+ "thread1.num.zero_ttl": 0,
+ "thread1.recursion.time.avg": 0,
+ "thread1.recursion.time.median": 0,
+ "thread1.requestlist.avg": 0,
+ "thread1.requestlist.current.all": 0,
+ "thread1.requestlist.current.user": 0,
+ "thread1.requestlist.exceeded": 0,
+ "thread1.requestlist.max": 1,
+ "thread1.requestlist.overwritten": 0,
+ "thread1.tcpusage": 0,
+ "time.elapsed": 82,
+ "time.now": 1574103461,
+ "time.up": 205,
+ "total.num.cachehits": 271,
+ "total.num.cachemiss": 19,
+ "total.num.dnscrypt.cert": 0,
+ "total.num.dnscrypt.cleartext": 0,
+ "total.num.dnscrypt.crypted": 0,
+ "total.num.dnscrypt.malformed": 0,
+ "total.num.prefetch": 0,
+ "total.num.expired": 0,
+ "total.num.queries": 290,
+ "total.num.queries_ip_ratelimited": 0,
+ "total.num.recursivereplies": 19,
+ "total.num.zero_ttl": 0,
+ "total.recursion.time.avg": 0,
+ "total.recursion.time.median": 0,
+ "total.requestlist.avg": 0,
+ "total.requestlist.current.all": 0,
+ "total.requestlist.current.user": 0,
+ "total.requestlist.exceeded": 0,
+ "total.requestlist.max": 1,
+ "total.requestlist.overwritten": 0,
+ "total.tcpusage": 0,
+ "unwanted.queries": 0,
+ "unwanted.replies": 0,
+ }
+
+ expectedCumulative3 = map[string]int64{
+ "dnscrypt_nonce.cache.count": 0,
+ "dnscrypt_shared_secret.cache.count": 0,
+ "infra.cache.count": 232,
+ "key.cache.count": 14,
+ "mem.cache.dnscrypt_nonce": 0,
+ "mem.cache.dnscrypt_shared_secret": 0,
+ "mem.cache.message": 101198,
+ "mem.cache.rrset": 208839,
+ "mem.mod.iterator": 16588,
+ "mem.mod.respip": 0,
+ "mem.mod.subnet": 74504,
+ "mem.mod.validator": 85725,
+ "mem.streamwait": 0,
+ "msg.cache.count": 119,
+ "num.answer.bogus": 0,
+ "num.answer.rcode.FORMERR": 0,
+ "num.answer.rcode.NOERROR": 334,
+ "num.answer.rcode.NOTIMPL": 0,
+ "num.answer.rcode.NXDOMAIN": 16,
+ "num.answer.rcode.REFUSED": 0,
+ "num.answer.rcode.SERVFAIL": 10,
+ "num.answer.rcode.nodata": 20,
+ "num.answer.secure": 0,
+ "num.query.aggressive.NOERROR": 1,
+ "num.query.aggressive.NXDOMAIN": 0,
+ "num.query.authzone.down": 0,
+ "num.query.authzone.up": 0,
+ "num.query.class.IN": 360,
+ "num.query.dnscrypt.replay": 0,
+ "num.query.dnscrypt.shared_secret.cachemiss": 0,
+ "num.query.edns.DO": 0,
+ "num.query.edns.present": 0,
+ "num.query.flags.AA": 0,
+ "num.query.flags.AD": 0,
+ "num.query.flags.CD": 0,
+ "num.query.flags.QR": 0,
+ "num.query.flags.RA": 0,
+ "num.query.flags.RD": 360,
+ "num.query.flags.TC": 0,
+ "num.query.flags.Z": 0,
+ "num.query.ipv6": 0,
+ "num.query.opcode.QUERY": 360,
+ "num.query.ratelimited": 0,
+ "num.query.subnet": 0,
+ "num.query.subnet_cache": 0,
+ "num.query.tcp": 0,
+ "num.query.tcpout": 0,
+ "num.query.tls": 0,
+ "num.query.tls.resume": 0,
+ "num.query.type.A": 120,
+ "num.query.type.AAAA": 110,
+ "num.query.type.MX": 110,
+ "num.query.type.PTR": 20,
+ "num.rrset.bogus": 0,
+ "rrset.cache.count": 401,
+ "thread0.num.cachehits": 150,
+ "thread0.num.cachemiss": 15,
+ "thread0.num.dnscrypt.cert": 0,
+ "thread0.num.dnscrypt.cleartext": 0,
+ "thread0.num.dnscrypt.crypted": 0,
+ "thread0.num.dnscrypt.malformed": 0,
+ "thread0.num.expired": 0,
+ "thread0.num.prefetch": 0,
+ "thread0.num.queries": 165,
+ "thread0.num.queries_ip_ratelimited": 0,
+ "thread0.num.recursivereplies": 15,
+ "thread0.num.zero_ttl": 0,
+ "thread0.recursion.time.avg": 261,
+ "thread0.recursion.time.median": 318,
+ "thread0.requestlist.avg": 66,
+ "thread0.requestlist.current.all": 0,
+ "thread0.requestlist.current.user": 0,
+ "thread0.requestlist.exceeded": 0,
+ "thread0.requestlist.max": 1,
+ "thread0.requestlist.overwritten": 0,
+ "thread0.tcpusage": 0,
+ "thread1.num.cachehits": 184,
+ "thread1.num.cachemiss": 11,
+ "thread1.num.dnscrypt.cert": 0,
+ "thread1.num.dnscrypt.cleartext": 0,
+ "thread1.num.dnscrypt.crypted": 0,
+ "thread1.num.dnscrypt.malformed": 0,
+ "thread1.num.prefetch": 0,
+ "thread1.num.expired": 0,
+ "thread1.num.queries": 195,
+ "thread1.num.queries_ip_ratelimited": 0,
+ "thread1.num.recursivereplies": 11,
+ "thread1.num.zero_ttl": 0,
+ "thread1.recursion.time.avg": 709,
+ "thread1.recursion.time.median": 294,
+ "thread1.requestlist.avg": 363,
+ "thread1.requestlist.current.all": 0,
+ "thread1.requestlist.current.user": 0,
+ "thread1.requestlist.exceeded": 0,
+ "thread1.requestlist.max": 2,
+ "thread1.requestlist.overwritten": 0,
+ "thread1.tcpusage": 0,
+ "time.elapsed": 82,
+ "time.now": 1574103543,
+ "time.up": 288,
+ "total.num.cachehits": 334,
+ "total.num.cachemiss": 26,
+ "total.num.dnscrypt.cert": 0,
+ "total.num.dnscrypt.cleartext": 0,
+ "total.num.dnscrypt.crypted": 0,
+ "total.num.dnscrypt.malformed": 0,
+ "total.num.prefetch": 0,
+ "total.num.expired": 0,
+ "total.num.queries": 360,
+ "total.num.queries_ip_ratelimited": 0,
+ "total.num.recursivereplies": 26,
+ "total.num.zero_ttl": 0,
+ "total.recursion.time.avg": 450,
+ "total.recursion.time.median": 306,
+ "total.requestlist.avg": 192,
+ "total.requestlist.current.all": 0,
+ "total.requestlist.current.user": 0,
+ "total.requestlist.exceeded": 0,
+ "total.requestlist.max": 2,
+ "total.requestlist.overwritten": 0,
+ "total.tcpusage": 0,
+ "unwanted.queries": 0,
+ "unwanted.replies": 0,
+ }
+)
+
+var (
+ expectedReset1 = map[string]int64{
+ "dnscrypt_nonce.cache.count": 0,
+ "dnscrypt_shared_secret.cache.count": 0,
+ "infra.cache.count": 181,
+ "key.cache.count": 10,
+ "mem.cache.dnscrypt_nonce": 0,
+ "mem.cache.dnscrypt_shared_secret": 0,
+ "mem.cache.message": 86064,
+ "mem.cache.rrset": 172757,
+ "mem.mod.iterator": 16588,
+ "mem.mod.respip": 0,
+ "mem.mod.subnet": 74504,
+ "mem.mod.validator": 79979,
+ "mem.streamwait": 0,
+ "msg.cache.count": 67,
+ "num.answer.bogus": 0,
+ "num.answer.rcode.FORMERR": 0,
+ "num.answer.rcode.NOERROR": 90,
+ "num.answer.rcode.NOTIMPL": 0,
+ "num.answer.rcode.NXDOMAIN": 10,
+ "num.answer.rcode.REFUSED": 0,
+ "num.answer.rcode.SERVFAIL": 0,
+ "num.answer.rcode.nodata": 10,
+ "num.answer.secure": 0,
+ "num.query.aggressive.NOERROR": 2,
+ "num.query.aggressive.NXDOMAIN": 0,
+ "num.query.authzone.down": 0,
+ "num.query.authzone.up": 0,
+ "num.query.class.IN": 100,
+ "num.query.dnscrypt.replay": 0,
+ "num.query.dnscrypt.shared_secret.cachemiss": 0,
+ "num.query.edns.DO": 0,
+ "num.query.edns.present": 0,
+ "num.query.flags.AA": 0,
+ "num.query.flags.AD": 0,
+ "num.query.flags.CD": 0,
+ "num.query.flags.QR": 0,
+ "num.query.flags.RA": 0,
+ "num.query.flags.RD": 100,
+ "num.query.flags.TC": 0,
+ "num.query.flags.Z": 0,
+ "num.query.ipv6": 0,
+ "num.query.opcode.QUERY": 100,
+ "num.query.ratelimited": 0,
+ "num.query.subnet": 0,
+ "num.query.subnet_cache": 0,
+ "num.query.tcp": 0,
+ "num.query.tcpout": 1,
+ "num.query.tls": 0,
+ "num.query.tls.resume": 0,
+ "num.query.type.A": 30,
+ "num.query.type.AAAA": 30,
+ "num.query.type.MX": 30,
+ "num.query.type.PTR": 10,
+ "num.rrset.bogus": 0,
+ "rrset.cache.count": 303,
+ "thread0.num.cachehits": 44,
+ "thread0.num.cachemiss": 7,
+ "thread0.num.dnscrypt.cert": 0,
+ "thread0.num.dnscrypt.cleartext": 0,
+ "thread0.num.dnscrypt.crypted": 0,
+ "thread0.num.dnscrypt.malformed": 0,
+ "thread0.num.expired": 0,
+ "thread0.num.prefetch": 0,
+ "thread0.num.queries": 51,
+ "thread0.num.queries_ip_ratelimited": 0,
+ "thread0.num.recursivereplies": 7,
+ "thread0.num.zero_ttl": 0,
+ "thread0.recursion.time.avg": 365,
+ "thread0.recursion.time.median": 57,
+ "thread0.requestlist.avg": 0,
+ "thread0.requestlist.current.all": 0,
+ "thread0.requestlist.current.user": 0,
+ "thread0.requestlist.exceeded": 0,
+ "thread0.requestlist.max": 0,
+ "thread0.requestlist.overwritten": 0,
+ "thread0.tcpusage": 0,
+ "thread1.num.cachehits": 46,
+ "thread1.num.cachemiss": 3,
+ "thread1.num.dnscrypt.cert": 0,
+ "thread1.num.dnscrypt.cleartext": 0,
+ "thread1.num.dnscrypt.crypted": 0,
+ "thread1.num.dnscrypt.malformed": 0,
+ "thread1.num.prefetch": 0,
+ "thread1.num.expired": 0,
+ "thread1.num.queries": 49,
+ "thread1.num.queries_ip_ratelimited": 0,
+ "thread1.num.recursivereplies": 3,
+ "thread1.num.zero_ttl": 0,
+ "thread1.recursion.time.avg": 1582,
+ "thread1.recursion.time.median": 0,
+ "thread1.requestlist.avg": 0,
+ "thread1.requestlist.current.all": 0,
+ "thread1.requestlist.current.user": 0,
+ "thread1.requestlist.exceeded": 0,
+ "thread1.requestlist.max": 0,
+ "thread1.requestlist.overwritten": 0,
+ "thread1.tcpusage": 0,
+ "time.elapsed": 45,
+ "time.now": 1574103644,
+ "time.up": 45,
+ "total.num.cachehits": 90,
+ "total.num.cachemiss": 10,
+ "total.num.dnscrypt.cert": 0,
+ "total.num.dnscrypt.cleartext": 0,
+ "total.num.dnscrypt.crypted": 0,
+ "total.num.dnscrypt.malformed": 0,
+ "total.num.prefetch": 0,
+ "total.num.expired": 0,
+ "total.num.queries": 100,
+ "total.num.queries_ip_ratelimited": 0,
+ "total.num.recursivereplies": 10,
+ "total.num.zero_ttl": 0,
+ "total.recursion.time.avg": 730,
+ "total.recursion.time.median": 28,
+ "total.requestlist.avg": 0,
+ "total.requestlist.current.all": 0,
+ "total.requestlist.current.user": 0,
+ "total.requestlist.exceeded": 0,
+ "total.requestlist.max": 0,
+ "total.requestlist.overwritten": 0,
+ "total.tcpusage": 0,
+ "unwanted.queries": 0,
+ "unwanted.replies": 0,
+ }
+ expectedReset2 = map[string]int64{
+ "dnscrypt_nonce.cache.count": 0,
+ "dnscrypt_shared_secret.cache.count": 0,
+ "infra.cache.count": 181,
+ "key.cache.count": 10,
+ "mem.cache.dnscrypt_nonce": 0,
+ "mem.cache.dnscrypt_shared_secret": 0,
+ "mem.cache.message": 86064,
+ "mem.cache.rrset": 172757,
+ "mem.mod.iterator": 16588,
+ "mem.mod.respip": 0,
+ "mem.mod.subnet": 74504,
+ "mem.mod.validator": 79979,
+ "mem.streamwait": 0,
+ "msg.cache.count": 67,
+ "num.answer.bogus": 0,
+ "num.answer.rcode.FORMERR": 0,
+ "num.answer.rcode.NOERROR": 0,
+ "num.answer.rcode.NOTIMPL": 0,
+ "num.answer.rcode.NXDOMAIN": 0,
+ "num.answer.rcode.REFUSED": 0,
+ "num.answer.rcode.SERVFAIL": 0,
+ "num.answer.rcode.nodata": 0,
+ "num.answer.secure": 0,
+ "num.query.aggressive.NOERROR": 0,
+ "num.query.aggressive.NXDOMAIN": 0,
+ "num.query.authzone.down": 0,
+ "num.query.authzone.up": 0,
+ "num.query.class.IN": 0,
+ "num.query.dnscrypt.replay": 0,
+ "num.query.dnscrypt.shared_secret.cachemiss": 0,
+ "num.query.edns.DO": 0,
+ "num.query.edns.present": 0,
+ "num.query.flags.AA": 0,
+ "num.query.flags.AD": 0,
+ "num.query.flags.CD": 0,
+ "num.query.flags.QR": 0,
+ "num.query.flags.RA": 0,
+ "num.query.flags.RD": 0,
+ "num.query.flags.TC": 0,
+ "num.query.flags.Z": 0,
+ "num.query.ipv6": 0,
+ "num.query.opcode.QUERY": 0,
+ "num.query.ratelimited": 0,
+ "num.query.subnet": 0,
+ "num.query.subnet_cache": 0,
+ "num.query.tcp": 0,
+ "num.query.tcpout": 0,
+ "num.query.tls": 0,
+ "num.query.tls.resume": 0,
+ "num.query.type.A": 0,
+ "num.query.type.AAAA": 0,
+ "num.query.type.MX": 0,
+ "num.query.type.PTR": 0,
+ "num.rrset.bogus": 0,
+ "rrset.cache.count": 303,
+ "thread0.num.cachehits": 0,
+ "thread0.num.cachemiss": 0,
+ "thread0.num.dnscrypt.cert": 0,
+ "thread0.num.dnscrypt.cleartext": 0,
+ "thread0.num.dnscrypt.crypted": 0,
+ "thread0.num.dnscrypt.malformed": 0,
+ "thread0.num.expired": 0,
+ "thread0.num.prefetch": 0,
+ "thread0.num.queries": 0,
+ "thread0.num.queries_ip_ratelimited": 0,
+ "thread0.num.recursivereplies": 0,
+ "thread0.num.zero_ttl": 0,
+ "thread0.recursion.time.avg": 0,
+ "thread0.recursion.time.median": 0,
+ "thread0.requestlist.avg": 0,
+ "thread0.requestlist.current.all": 0,
+ "thread0.requestlist.current.user": 0,
+ "thread0.requestlist.exceeded": 0,
+ "thread0.requestlist.max": 0,
+ "thread0.requestlist.overwritten": 0,
+ "thread0.tcpusage": 0,
+ "thread1.num.cachehits": 0,
+ "thread1.num.cachemiss": 0,
+ "thread1.num.dnscrypt.cert": 0,
+ "thread1.num.dnscrypt.cleartext": 0,
+ "thread1.num.dnscrypt.crypted": 0,
+ "thread1.num.dnscrypt.malformed": 0,
+ "thread1.num.prefetch": 0,
+ "thread1.num.expired": 0,
+ "thread1.num.queries": 0,
+ "thread1.num.queries_ip_ratelimited": 0,
+ "thread1.num.recursivereplies": 0,
+ "thread1.num.zero_ttl": 0,
+ "thread1.recursion.time.avg": 0,
+ "thread1.recursion.time.median": 0,
+ "thread1.requestlist.avg": 0,
+ "thread1.requestlist.current.all": 0,
+ "thread1.requestlist.current.user": 0,
+ "thread1.requestlist.exceeded": 0,
+ "thread1.requestlist.max": 0,
+ "thread1.requestlist.overwritten": 0,
+ "thread1.tcpusage": 0,
+ "time.elapsed": 26,
+ "time.now": 1574103671,
+ "time.up": 71,
+ "total.num.cachehits": 0,
+ "total.num.cachemiss": 0,
+ "total.num.dnscrypt.cert": 0,
+ "total.num.dnscrypt.cleartext": 0,
+ "total.num.dnscrypt.crypted": 0,
+ "total.num.dnscrypt.malformed": 0,
+ "total.num.prefetch": 0,
+ "total.num.expired": 0,
+ "total.num.queries": 0,
+ "total.num.queries_ip_ratelimited": 0,
+ "total.num.recursivereplies": 0,
+ "total.num.zero_ttl": 0,
+ "total.recursion.time.avg": 0,
+ "total.recursion.time.median": 0,
+ "total.requestlist.avg": 0,
+ "total.requestlist.current.all": 0,
+ "total.requestlist.current.user": 0,
+ "total.requestlist.exceeded": 0,
+ "total.requestlist.max": 0,
+ "total.requestlist.overwritten": 0,
+ "total.tcpusage": 0,
+ "unwanted.queries": 0,
+ "unwanted.replies": 0,
+ }
+
+ expectedReset3 = map[string]int64{
+ "dnscrypt_nonce.cache.count": 0,
+ "dnscrypt_shared_secret.cache.count": 0,
+ "infra.cache.count": 303,
+ "key.cache.count": 15,
+ "mem.cache.dnscrypt_nonce": 0,
+ "mem.cache.dnscrypt_shared_secret": 0,
+ "mem.cache.message": 105471,
+ "mem.cache.rrset": 235917,
+ "mem.mod.iterator": 16588,
+ "mem.mod.respip": 0,
+ "mem.mod.subnet": 74504,
+ "mem.mod.validator": 87270,
+ "mem.streamwait": 0,
+ "msg.cache.count": 127,
+ "num.answer.bogus": 0,
+ "num.answer.rcode.FORMERR": 0,
+ "num.answer.rcode.NOERROR": 60,
+ "num.answer.rcode.NOTIMPL": 0,
+ "num.answer.rcode.NXDOMAIN": 10,
+ "num.answer.rcode.REFUSED": 0,
+ "num.answer.rcode.SERVFAIL": 0,
+ "num.answer.rcode.nodata": 10,
+ "num.answer.secure": 0,
+ "num.query.aggressive.NOERROR": 2,
+ "num.query.aggressive.NXDOMAIN": 0,
+ "num.query.authzone.down": 0,
+ "num.query.authzone.up": 0,
+ "num.query.class.IN": 70,
+ "num.query.dnscrypt.replay": 0,
+ "num.query.dnscrypt.shared_secret.cachemiss": 0,
+ "num.query.edns.DO": 0,
+ "num.query.edns.present": 0,
+ "num.query.flags.AA": 0,
+ "num.query.flags.AD": 0,
+ "num.query.flags.CD": 0,
+ "num.query.flags.QR": 0,
+ "num.query.flags.RA": 0,
+ "num.query.flags.RD": 70,
+ "num.query.flags.TC": 0,
+ "num.query.flags.Z": 0,
+ "num.query.ipv6": 0,
+ "num.query.opcode.QUERY": 70,
+ "num.query.ratelimited": 0,
+ "num.query.subnet": 0,
+ "num.query.subnet_cache": 0,
+ "num.query.tcp": 0,
+ "num.query.tcpout": 0,
+ "num.query.tls": 0,
+ "num.query.tls.resume": 0,
+ "num.query.type.A": 20,
+ "num.query.type.AAAA": 20,
+ "num.query.type.MX": 20,
+ "num.query.type.PTR": 10,
+ "num.rrset.bogus": 0,
+ "rrset.cache.count": 501,
+ "thread0.num.cachehits": 30,
+ "thread0.num.cachemiss": 4,
+ "thread0.num.dnscrypt.cert": 0,
+ "thread0.num.dnscrypt.cleartext": 0,
+ "thread0.num.dnscrypt.crypted": 0,
+ "thread0.num.dnscrypt.malformed": 0,
+ "thread0.num.expired": 0,
+ "thread0.num.prefetch": 0,
+ "thread0.num.queries": 34,
+ "thread0.num.queries_ip_ratelimited": 0,
+ "thread0.num.recursivereplies": 4,
+ "thread0.num.zero_ttl": 0,
+ "thread0.recursion.time.avg": 541,
+ "thread0.recursion.time.median": 98,
+ "thread0.requestlist.avg": 0,
+ "thread0.requestlist.current.all": 0,
+ "thread0.requestlist.current.user": 0,
+ "thread0.requestlist.exceeded": 0,
+ "thread0.requestlist.max": 0,
+ "thread0.requestlist.overwritten": 0,
+ "thread0.tcpusage": 0,
+ "thread1.num.cachehits": 33,
+ "thread1.num.cachemiss": 3,
+ "thread1.num.dnscrypt.cert": 0,
+ "thread1.num.dnscrypt.cleartext": 0,
+ "thread1.num.dnscrypt.crypted": 0,
+ "thread1.num.dnscrypt.malformed": 0,
+ "thread1.num.prefetch": 0,
+ "thread1.num.expired": 0,
+ "thread1.num.queries": 36,
+ "thread1.num.queries_ip_ratelimited": 0,
+ "thread1.num.recursivereplies": 3,
+ "thread1.num.zero_ttl": 0,
+ "thread1.recursion.time.avg": 62,
+ "thread1.recursion.time.median": 0,
+ "thread1.requestlist.avg": 1666,
+ "thread1.requestlist.current.all": 0,
+ "thread1.requestlist.current.user": 0,
+ "thread1.requestlist.exceeded": 0,
+ "thread1.requestlist.max": 5,
+ "thread1.requestlist.overwritten": 0,
+ "thread1.tcpusage": 0,
+ "time.elapsed": 59,
+ "time.now": 1574103731,
+ "time.up": 131,
+ "total.num.cachehits": 63,
+ "total.num.cachemiss": 7,
+ "total.num.dnscrypt.cert": 0,
+ "total.num.dnscrypt.cleartext": 0,
+ "total.num.dnscrypt.crypted": 0,
+ "total.num.dnscrypt.malformed": 0,
+ "total.num.prefetch": 0,
+ "total.num.expired": 0,
+ "total.num.queries": 70,
+ "total.num.queries_ip_ratelimited": 0,
+ "total.num.recursivereplies": 7,
+ "total.num.zero_ttl": 0,
+ "total.recursion.time.avg": 336,
+ "total.recursion.time.median": 49,
+ "total.requestlist.avg": 714,
+ "total.requestlist.current.all": 0,
+ "total.requestlist.current.user": 0,
+ "total.requestlist.exceeded": 0,
+ "total.requestlist.max": 5,
+ "total.requestlist.overwritten": 0,
+ "total.tcpusage": 0,
+ "unwanted.queries": 0,
+ "unwanted.replies": 0,
+ }
+)
diff --git a/src/go/plugin/go.d/modules/upsd/README.md b/src/go/plugin/go.d/modules/upsd/README.md
new file mode 120000
index 000000000..8dcef84dd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/README.md
@@ -0,0 +1 @@
+integrations/ups_nut.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/upsd/charts.go b/src/go/plugin/go.d/modules/upsd/charts.go
new file mode 100644
index 000000000..909c111d1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/charts.go
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package upsd
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioUpsLoad = module.Priority + iota
+ prioUpsLoadUsage
+ prioUpsStatus
+ prioUpsTemperature
+
+ prioBatteryCharge
+ prioBatteryEstimatedRuntime
+ prioBatteryVoltage
+ prioBatteryVoltageNominal
+
+ prioInputVoltage
+ prioInputVoltageNominal
+ prioInputCurrent
+ prioInputCurrentNominal
+ prioInputFrequency
+ prioInputFrequencyNominal
+
+ prioOutputVoltage
+ prioOutputVoltageNominal
+ prioOutputCurrent
+ prioOutputCurrentNominal
+ prioOutputFrequency
+ prioOutputFrequencyNominal
+)
+
+var upsChartsTmpl = module.Charts{
+ upsLoadChartTmpl.Copy(),
+ upsLoadUsageChartTmpl.Copy(),
+ upsStatusChartTmpl.Copy(),
+ upsTemperatureChartTmpl.Copy(),
+
+ upsBatteryChargePercentChartTmpl.Copy(),
+ upsBatteryEstimatedRuntimeChartTmpl.Copy(),
+ upsBatteryVoltageChartTmpl.Copy(),
+ upsBatteryVoltageNominalChartTmpl.Copy(),
+
+ upsInputVoltageChartTmpl.Copy(),
+ upsInputVoltageNominalChartTmpl.Copy(),
+ upsInputCurrentChartTmpl.Copy(),
+ upsInputCurrentNominalChartTmpl.Copy(),
+ upsInputFrequencyChartTmpl.Copy(),
+ upsInputFrequencyNominalChartTmpl.Copy(),
+
+ upsOutputVoltageChartTmpl.Copy(),
+ upsOutputVoltageNominalChartTmpl.Copy(),
+ upsOutputCurrentChartTmpl.Copy(),
+ upsOutputCurrentNominalChartTmpl.Copy(),
+ upsOutputFrequencyChartTmpl.Copy(),
+ upsOutputFrequencyNominalChartTmpl.Copy(),
+}
+
+var (
+ upsLoadChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.load_percentage",
+ Title: "UPS load",
+ Units: "percentage",
+ Fam: "ups",
+ Ctx: "upsd.ups_load",
+ Priority: prioUpsLoad,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "ups_%s_ups.load", Name: "load", Div: varPrecision},
+ },
+ }
+ upsLoadUsageChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.load_usage",
+ Title: "UPS load usage (power output)",
+ Units: "Watts",
+ Fam: "ups",
+ Ctx: "upsd.ups_load_usage",
+ Priority: prioUpsLoadUsage,
+ Dims: module.Dims{
+ {ID: "ups_%s_ups.load.usage", Name: "load_usage", Div: varPrecision},
+ },
+ }
+ upsStatusChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.status",
+ Title: "UPS status",
+ Units: "status",
+ Fam: "ups",
+ Ctx: "upsd.ups_status",
+ Priority: prioUpsStatus,
+ Dims: module.Dims{
+ {ID: "ups_%s_ups.status.OL", Name: "on_line"},
+ {ID: "ups_%s_ups.status.OB", Name: "on_battery"},
+ {ID: "ups_%s_ups.status.LB", Name: "low_battery"},
+ {ID: "ups_%s_ups.status.HB", Name: "high_battery"},
+ {ID: "ups_%s_ups.status.RB", Name: "replace_battery"},
+ {ID: "ups_%s_ups.status.CHRG", Name: "charging"},
+ {ID: "ups_%s_ups.status.DISCHRG", Name: "discharging"},
+ {ID: "ups_%s_ups.status.BYPASS", Name: "bypass"},
+ {ID: "ups_%s_ups.status.CAL", Name: "calibration"},
+ {ID: "ups_%s_ups.status.OFF", Name: "offline"},
+ {ID: "ups_%s_ups.status.OVER", Name: "overloaded"},
+ {ID: "ups_%s_ups.status.TRIM", Name: "trim_input_voltage"},
+ {ID: "ups_%s_ups.status.BOOST", Name: "boost_input_voltage"},
+ {ID: "ups_%s_ups.status.FSD", Name: "forced_shutdown"},
+ {ID: "ups_%s_ups.status.other", Name: "other"},
+ },
+ }
+ upsTemperatureChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.temperature",
+ Title: "UPS temperature",
+ Units: "Celsius",
+ Fam: "ups",
+ Ctx: "upsd.ups_temperature",
+ Priority: prioUpsTemperature,
+ Dims: module.Dims{
+ {ID: "ups_%s_ups.temperature", Name: "temperature", Div: varPrecision},
+ },
+ }
+)
+
+var (
+ upsBatteryChargePercentChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.battery_charge_percentage",
+ Title: "UPS Battery charge",
+ Units: "percentage",
+ Fam: "battery",
+ Ctx: "upsd.ups_battery_charge",
+ Priority: prioBatteryCharge,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "ups_%s_battery.charge", Name: "charge", Div: varPrecision},
+ },
+ }
+ upsBatteryEstimatedRuntimeChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.battery_estimated_runtime",
+ Title: "UPS Battery estimated runtime",
+ Units: "seconds",
+ Fam: "battery",
+ Ctx: "upsd.ups_battery_estimated_runtime",
+ Priority: prioBatteryEstimatedRuntime,
+ Dims: module.Dims{
+ {ID: "ups_%s_battery.runtime", Name: "runtime", Div: varPrecision},
+ },
+ }
+ upsBatteryVoltageChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.battery_voltage",
+ Title: "UPS Battery voltage",
+ Units: "Volts",
+ Fam: "battery",
+ Ctx: "upsd.ups_battery_voltage",
+ Priority: prioBatteryVoltage,
+ Dims: module.Dims{
+ {ID: "ups_%s_battery.voltage", Name: "voltage", Div: varPrecision},
+ },
+ }
+ upsBatteryVoltageNominalChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.battery_voltage_nominal",
+ Title: "UPS Battery voltage nominal",
+ Units: "Volts",
+ Fam: "battery",
+ Ctx: "upsd.ups_battery_voltage_nominal",
+ Priority: prioBatteryVoltageNominal,
+ Dims: module.Dims{
+ {ID: "ups_%s_battery.voltage.nominal", Name: "nominal_voltage", Div: varPrecision},
+ },
+ }
+)
+
+var (
+ upsInputVoltageChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.input_voltage",
+ Title: "UPS Input voltage",
+ Units: "Volts",
+ Fam: "input",
+ Ctx: "upsd.ups_input_voltage",
+ Priority: prioInputVoltage,
+ Dims: module.Dims{
+ {ID: "ups_%s_input.voltage", Name: "voltage", Div: varPrecision},
+ },
+ }
+ upsInputVoltageNominalChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.input_voltage_nominal",
+ Title: "UPS Input voltage nominal",
+ Units: "Volts",
+ Fam: "input",
+ Ctx: "upsd.ups_input_voltage_nominal",
+ Priority: prioInputVoltageNominal,
+ Dims: module.Dims{
+ {ID: "ups_%s_input.voltage.nominal", Name: "nominal_voltage", Div: varPrecision},
+ },
+ }
+ upsInputCurrentChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.input_current",
+ Title: "UPS Input current",
+ Units: "Ampere",
+ Fam: "input",
+ Ctx: "upsd.ups_input_current",
+ Priority: prioInputCurrent,
+ Dims: module.Dims{
+ {ID: "ups_%s_input.current", Name: "current", Div: varPrecision},
+ },
+ }
+ upsInputCurrentNominalChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.input_current_nominal",
+ Title: "UPS Input current nominal",
+ Units: "Ampere",
+ Fam: "input",
+ Ctx: "upsd.ups_input_current_nominal",
+ Priority: prioInputCurrentNominal,
+ Dims: module.Dims{
+ {ID: "ups_%s_input.current.nominal", Name: "nominal_current", Div: varPrecision},
+ },
+ }
+ upsInputFrequencyChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.input_frequency",
+ Title: "UPS Input frequency",
+ Units: "Hz",
+ Fam: "input",
+ Ctx: "upsd.ups_input_frequency",
+ Priority: prioInputFrequency,
+ Dims: module.Dims{
+ {ID: "ups_%s_input.frequency", Name: "frequency", Div: varPrecision},
+ },
+ }
+ upsInputFrequencyNominalChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.input_frequency_nominal",
+ Title: "UPS Input frequency nominal",
+ Units: "Hz",
+ Fam: "input",
+ Ctx: "upsd.ups_input_frequency_nominal",
+ Priority: prioInputFrequencyNominal,
+ Dims: module.Dims{
+ {ID: "ups_%s_input.frequency.nominal", Name: "nominal_frequency", Div: varPrecision},
+ },
+ }
+)
+
+var (
+ upsOutputVoltageChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.output_voltage",
+ Title: "UPS Output voltage",
+ Units: "Volts",
+ Fam: "output",
+ Ctx: "upsd.ups_output_voltage",
+ Priority: prioOutputVoltage,
+ Dims: module.Dims{
+ {ID: "ups_%s_output.voltage", Name: "voltage", Div: varPrecision},
+ },
+ }
+ upsOutputVoltageNominalChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.output_voltage_nominal",
+ Title: "UPS Output voltage nominal",
+ Units: "Volts",
+ Fam: "output",
+ Ctx: "upsd.ups_output_voltage_nominal",
+ Priority: prioOutputVoltageNominal,
+ Dims: module.Dims{
+ {ID: "ups_%s_output.voltage.nominal", Name: "nominal_voltage", Div: varPrecision},
+ },
+ }
+ upsOutputCurrentChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.output_current",
+ Title: "UPS Output current",
+ Units: "Ampere",
+ Fam: "output",
+ Ctx: "upsd.ups_output_current",
+ Priority: prioOutputCurrent,
+ Dims: module.Dims{
+ {ID: "ups_%s_output.current", Name: "current", Div: varPrecision},
+ },
+ }
+ upsOutputCurrentNominalChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.output_current_nominal",
+ Title: "UPS Output current nominal",
+ Units: "Ampere",
+ Fam: "output",
+ Ctx: "upsd.ups_output_current_nominal",
+ Priority: prioOutputCurrentNominal,
+ Dims: module.Dims{
+ {ID: "ups_%s_output.current.nominal", Name: "nominal_current", Div: varPrecision},
+ },
+ }
+ upsOutputFrequencyChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.output_frequency",
+ Title: "UPS Output frequency",
+ Units: "Hz",
+ Fam: "output",
+ Ctx: "upsd.ups_output_frequency",
+ Priority: prioOutputFrequency,
+ Dims: module.Dims{
+ {ID: "ups_%s_output.frequency", Name: "frequency", Div: varPrecision},
+ },
+ }
+ upsOutputFrequencyNominalChartTmpl = module.Chart{
+ IDSep: true,
+ ID: "%s.output_frequency_nominal",
+ Title: "UPS Output frequency nominal",
+ Units: "Hz",
+ Fam: "output",
+ Ctx: "upsd.ups_output_frequency_nominal",
+ Priority: prioOutputFrequencyNominal,
+ Dims: module.Dims{
+ {ID: "ups_%s_output.frequency.nominal", Name: "nominal_frequency", Div: varPrecision},
+ },
+ }
+)
+
+func (u *Upsd) addUPSCharts(ups upsUnit) {
+ charts := upsChartsTmpl.Copy()
+
+ var removed []string
+ for _, v := range []struct{ v, id string }{
+ {varBatteryVoltage, upsBatteryVoltageChartTmpl.ID},
+ {varBatteryVoltageNominal, upsBatteryVoltageNominalChartTmpl.ID},
+
+ {varUpsTemperature, upsTemperatureChartTmpl.ID},
+
+ {varInputVoltage, upsInputVoltageChartTmpl.ID},
+ {varInputVoltageNominal, upsInputVoltageNominalChartTmpl.ID},
+ {varInputCurrent, upsInputCurrentChartTmpl.ID},
+ {varInputCurrentNominal, upsInputCurrentNominalChartTmpl.ID},
+ {varInputFrequency, upsInputFrequencyChartTmpl.ID},
+ {varInputFrequencyNominal, upsInputFrequencyNominalChartTmpl.ID},
+
+ {varOutputVoltage, upsOutputVoltageChartTmpl.ID},
+ {varOutputVoltageNominal, upsOutputVoltageNominalChartTmpl.ID},
+ {varOutputCurrent, upsOutputCurrentChartTmpl.ID},
+ {varOutputCurrentNominal, upsOutputCurrentNominalChartTmpl.ID},
+ {varOutputFrequency, upsOutputFrequencyChartTmpl.ID},
+ {varOutputFrequencyNominal, upsOutputFrequencyNominalChartTmpl.ID},
+ } {
+ if !hasVar(ups.vars, v.v) {
+ removed = append(removed, v.v)
+ _ = charts.Remove(v.id)
+ }
+ }
+
+ u.Debugf("UPS '%s' no metrics: %v", ups.name, removed)
+
+ name := cleanUpsName(ups.name)
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Labels = []module.Label{
+ {Key: "ups_name", Value: ups.name},
+ {Key: "battery_type", Value: ups.vars[varBatteryType]},
+ {Key: "device_model", Value: ups.vars[varDeviceModel]},
+ {Key: "device_serial", Value: ups.vars[varDeviceSerial]},
+ {Key: "device_manufacturer", Value: ups.vars[varDeviceMfr]},
+ {Key: "device_type", Value: ups.vars[varDeviceType]},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, ups.name)
+ }
+ }
+
+ if err := u.Charts().Add(*charts...); err != nil {
+ u.Warning(err)
+ }
+}
+
+func (u *Upsd) removeUPSCharts(name string) {
+ name = cleanUpsName(name)
+ for _, chart := range *u.Charts() {
+ if strings.HasPrefix(chart.ID, name) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func cleanUpsName(name string) string {
+ name = strings.ReplaceAll(name, " ", "_")
+ name = strings.ReplaceAll(name, ".", "_")
+ return name
+}
diff --git a/src/go/plugin/go.d/modules/upsd/client.go b/src/go/plugin/go.d/modules/upsd/client.go
new file mode 100644
index 000000000..a708bdcaf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/client.go
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package upsd
+
+import (
+ "encoding/csv"
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+const (
+ commandUsername = "USERNAME %s"
+ commandPassword = "PASSWORD %s"
+ commandListUPS = "LIST UPS"
+ commandListVar = "LIST VAR %s"
+ commandLogout = "LOGOUT"
+)
+
+// https://github.com/networkupstools/nut/blob/81fca30b2998fa73085ce4654f075605ff0b9e01/docs/net-protocol.txt#L647
+var errUpsdCommand = errors.New("upsd command error")
+
+type upsUnit struct {
+ name string
+ vars map[string]string
+}
+
+func newUpsdConn(conf Config) upsdConn {
+ return &upsdClient{conn: socket.New(socket.Config{
+ ConnectTimeout: conf.Timeout.Duration(),
+ ReadTimeout: conf.Timeout.Duration(),
+ WriteTimeout: conf.Timeout.Duration(),
+ Address: conf.Address,
+ })}
+}
+
+type upsdClient struct {
+ conn socket.Client
+}
+
+func (c *upsdClient) connect() error {
+ return c.conn.Connect()
+}
+
+func (c *upsdClient) disconnect() error {
+ _, _ = c.sendCommand(commandLogout)
+ return c.conn.Disconnect()
+}
+
+func (c *upsdClient) authenticate(username, password string) error {
+ cmd := fmt.Sprintf(commandUsername, username)
+ resp, err := c.sendCommand(cmd)
+ if err != nil {
+ return err
+ }
+ if resp[0] != "OK" {
+ return errors.New("authentication failed: invalid username")
+ }
+
+ cmd = fmt.Sprintf(commandPassword, password)
+ resp, err = c.sendCommand(cmd)
+ if err != nil {
+ return err
+ }
+ if resp[0] != "OK" {
+ return errors.New("authentication failed: invalid password")
+ }
+
+ return nil
+}
+
+func (c *upsdClient) upsUnits() ([]upsUnit, error) {
+ resp, err := c.sendCommand(commandListUPS)
+ if err != nil {
+ return nil, err
+ }
+
+ var upsNames []string
+
+ for _, v := range resp {
+ if !strings.HasPrefix(v, "UPS ") {
+ continue
+ }
+ parts := splitLine(v)
+ if len(parts) < 2 {
+ continue
+ }
+ name := parts[1]
+ upsNames = append(upsNames, name)
+ }
+
+ var upsUnits []upsUnit
+
+ for _, name := range upsNames {
+ cmd := fmt.Sprintf(commandListVar, name)
+ resp, err := c.sendCommand(cmd)
+ if err != nil {
+ return nil, err
+ }
+
+ ups := upsUnit{
+ name: name,
+ vars: make(map[string]string),
+ }
+
+ upsUnits = append(upsUnits, ups)
+
+ for _, v := range resp {
+ if !strings.HasPrefix(v, "VAR ") {
+ continue
+ }
+ parts := splitLine(v)
+ if len(parts) < 4 {
+ continue
+ }
+ n, v := parts[2], parts[3]
+ ups.vars[n] = v
+ }
+ }
+
+ return upsUnits, nil
+}
+
+func (c *upsdClient) sendCommand(cmd string) ([]string, error) {
+ var resp []string
+ var errMsg string
+ endLine := getEndLine(cmd)
+
+ err := c.conn.Command(cmd+"\n", func(bytes []byte) bool {
+ line := string(bytes)
+ resp = append(resp, line)
+
+ if strings.HasPrefix(line, "ERR ") {
+ errMsg = strings.TrimPrefix(line, "ERR ")
+ }
+
+ return line != endLine && errMsg == ""
+ })
+ if err != nil {
+ return nil, err
+ }
+ if errMsg != "" {
+ return nil, fmt.Errorf("%w: %s (cmd: '%s')", errUpsdCommand, errMsg, cmd)
+ }
+
+ return resp, nil
+}
+
+func getEndLine(cmd string) string {
+ px, _, _ := strings.Cut(cmd, " ")
+
+ switch px {
+ case "USERNAME", "PASSWORD", "VER":
+ return "OK"
+ }
+ return fmt.Sprintf("END %s", cmd)
+}
+
+func splitLine(s string) []string {
+ r := csv.NewReader(strings.NewReader(s))
+ r.Comma = ' '
+
+ parts, _ := r.Read()
+
+ return parts
+}
diff --git a/src/go/plugin/go.d/modules/upsd/collect.go b/src/go/plugin/go.d/modules/upsd/collect.go
new file mode 100644
index 000000000..39e3d1b55
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/collect.go
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package upsd
+
+import (
+ "errors"
+ "strconv"
+ "strings"
+)
+
+func (u *Upsd) collect() (map[string]int64, error) {
+ if u.conn == nil {
+ conn, err := u.establishConnection()
+ if err != nil {
+ return nil, err
+ }
+ u.conn = conn
+ }
+
+ upsUnits, err := u.conn.upsUnits()
+ if err != nil {
+ if !errors.Is(err, errUpsdCommand) {
+ _ = u.conn.disconnect()
+ u.conn = nil
+ }
+ return nil, err
+ }
+
+ u.Debugf("found %d UPS units", len(upsUnits))
+
+ mx := make(map[string]int64)
+
+ u.collectUPSUnits(mx, upsUnits)
+
+ return mx, nil
+}
+
+func (u *Upsd) establishConnection() (upsdConn, error) {
+ conn := u.newUpsdConn(u.Config)
+
+ if err := conn.connect(); err != nil {
+ return nil, err
+ }
+
+ if u.Username != "" && u.Password != "" {
+ if err := conn.authenticate(u.Username, u.Password); err != nil {
+ _ = conn.disconnect()
+ return nil, err
+ }
+ }
+
+ return conn, nil
+}
+
+func (u *Upsd) collectUPSUnits(mx map[string]int64, upsUnits []upsUnit) {
+ seen := make(map[string]bool)
+
+ for _, ups := range upsUnits {
+ seen[ups.name] = true
+ u.Debugf("collecting metrics UPS '%s'", ups.name)
+
+ if !u.upsUnits[ups.name] {
+ u.upsUnits[ups.name] = true
+ u.addUPSCharts(ups)
+ }
+
+ writeVar(mx, ups, varBatteryCharge)
+ writeVar(mx, ups, varBatteryRuntime)
+ writeVar(mx, ups, varBatteryVoltage)
+ writeVar(mx, ups, varBatteryVoltageNominal)
+
+ writeVar(mx, ups, varInputVoltage)
+ writeVar(mx, ups, varInputVoltageNominal)
+ writeVar(mx, ups, varInputCurrent)
+ writeVar(mx, ups, varInputCurrentNominal)
+ writeVar(mx, ups, varInputFrequency)
+ writeVar(mx, ups, varInputFrequencyNominal)
+
+ writeVar(mx, ups, varOutputVoltage)
+ writeVar(mx, ups, varOutputVoltageNominal)
+ writeVar(mx, ups, varOutputCurrent)
+ writeVar(mx, ups, varOutputCurrentNominal)
+ writeVar(mx, ups, varOutputFrequency)
+ writeVar(mx, ups, varOutputFrequencyNominal)
+
+ writeVar(mx, ups, varUpsLoad)
+ writeVar(mx, ups, varUpsRealPowerNominal)
+ writeVar(mx, ups, varUpsTemperature)
+ writeUpsLoadUsage(mx, ups)
+ writeUpsStatus(mx, ups)
+ }
+
+ for name := range u.upsUnits {
+ if !seen[name] {
+ delete(u.upsUnits, name)
+ u.removeUPSCharts(name)
+ }
+ }
+}
+
+func writeVar(mx map[string]int64, ups upsUnit, v string) {
+ s, ok := ups.vars[v]
+ if !ok {
+ return
+ }
+ n, err := strconv.ParseFloat(s, 64)
+ if err != nil {
+ return
+ }
+ mx[prefix(ups)+v] = int64(n * varPrecision)
+}
+
+func writeUpsLoadUsage(mx map[string]int64, ups upsUnit) {
+ if hasVar(ups.vars, varUpsRealPower) {
+ pow, _ := strconv.ParseFloat(ups.vars[varUpsRealPower], 64)
+ mx[prefix(ups)+"ups.load.usage"] = int64(pow * varPrecision)
+ return
+ }
+
+ if !hasVar(ups.vars, varUpsLoad) || !hasVar(ups.vars, varUpsRealPowerNominal) {
+ return
+ }
+ load, err := strconv.ParseFloat(ups.vars[varUpsLoad], 64)
+ if err != nil {
+ return
+ }
+ nomPower, err := strconv.ParseFloat(ups.vars[varUpsRealPowerNominal], 64)
+ if err != nil || nomPower == 0 {
+ return
+ }
+ mx[prefix(ups)+"ups.load.usage"] = int64((load / 100 * nomPower) * varPrecision)
+}
+
+// https://networkupstools.org/docs/developer-guide.chunked/ar01s04.html#_status_data
+var upsStatuses = map[string]bool{
+ "OL": true,
+ "OB": true,
+ "LB": true,
+ "HB": true,
+ "RB": true,
+ "CHRG": true,
+ "DISCHRG": true,
+ "BYPASS": true,
+ "CAL": true,
+ "OFF": true,
+ "OVER": true,
+ "TRIM": true,
+ "BOOST": true,
+ "FSD": true,
+}
+
+func writeUpsStatus(mx map[string]int64, ups upsUnit) {
+ if !hasVar(ups.vars, varUpsStatus) {
+ return
+ }
+
+ px := prefix(ups) + "ups.status."
+
+ for st := range upsStatuses {
+ mx[px+st] = 0
+ }
+ mx[px+"other"] = 0
+
+ for _, st := range strings.Split(ups.vars[varUpsStatus], " ") {
+ if _, ok := upsStatuses[st]; ok {
+ mx[px+st] = 1
+ } else {
+ mx[px+"other"] = 1
+ }
+ }
+}
+
+func hasVar(vars map[string]string, v string) bool {
+ _, ok := vars[v]
+ return ok
+}
+
+func prefix(ups upsUnit) string {
+ return "ups_" + ups.name + "_"
+}
diff --git a/src/go/plugin/go.d/modules/upsd/config_schema.json b/src/go/plugin/go.d/modules/upsd/config_schema.json
new file mode 100644
index 000000000..564c0179c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/config_schema.json
@@ -0,0 +1,85 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "UPSd collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the UPSd daemon listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:3493"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for authentication.",
+ "type": "string"
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for authentication.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ },
+ "dependencies": {
+ "username": [
+ "password"
+ ],
+ "password": [
+ "username"
+ ]
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/upsd/integrations/ups_nut.md b/src/go/plugin/go.d/modules/upsd/integrations/ups_nut.md
new file mode 100644
index 000000000..002617bdf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/integrations/ups_nut.md
@@ -0,0 +1,246 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/upsd/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/upsd/metadata.yaml"
+sidebar_label: "UPS (NUT)"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/UPS"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# UPS (NUT)
+
+
+<img src="https://netdata.cloud/img/plug-circle-bolt.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: upsd
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per ups
+
+These metrics refer to the UPS unit.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| ups_name | UPS name. |
+| battery_type | Battery type (chemistry). "battery.type" variable value. |
+| device_model | Device model. "device.mode" variable value. |
+| device_serial | Device serial number. "device.serial" variable value. |
+| device_manufacturer | Device manufacturer. "device.mfr" variable value. |
+| device_type | Device type (ups, pdu, scd, psu, ats). "device.type" variable value. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| upsd.ups_load | load | percentage |
+| upsd.ups_load_usage | load_usage | Watts |
+| upsd.ups_status | on_line, on_battery, low_battery, high_battery, replace_battery, charging, discharging, bypass, calibration, offline, overloaded, trim_input_voltage, boost_input_voltage, forced_shutdown, other | status |
+| upsd.ups_temperature | temperature | Celsius |
+| upsd.ups_battery_charge | charge | percentage |
+| upsd.ups_battery_estimated_runtime | runtime | seconds |
+| upsd.ups_battery_voltage | voltage | Volts |
+| upsd.ups_battery_voltage_nominal | nominal_voltage | Volts |
+| upsd.ups_input_voltage | voltage | Volts |
+| upsd.ups_input_voltage_nominal | nominal_voltage | Volts |
+| upsd.ups_input_current | current | Ampere |
+| upsd.ups_input_current_nominal | nominal_current | Ampere |
+| upsd.ups_input_frequency | frequency | Hz |
+| upsd.ups_input_frequency_nominal | nominal_frequency | Hz |
+| upsd.ups_output_voltage | voltage | Volts |
+| upsd.ups_output_voltage_nominal | nominal_voltage | Volts |
+| upsd.ups_output_current | current | Ampere |
+| upsd.ups_output_current_nominal | nominal_current | Ampere |
+| upsd.ups_output_frequency | frequency | Hz |
+| upsd.ups_output_frequency_nominal | nominal_frequency | Hz |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ upsd_10min_ups_load ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} average load over the last 10 minutes |
+| [ upsd_ups_battery_charge ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_battery_charge | UPS ${label:ups_name} average battery charge over the last minute |
+| [ upsd_ups_last_collected_secs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf) | upsd.ups_load | UPS ${label:ups_name} number of seconds since the last successful data collection |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/upsd.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/upsd.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | UPS daemon address in IP:PORT format. | 127.0.0.1:3493 | yes |
+| timeout | Connection/read/write timeout in seconds. The timeout includes name resolution, if required. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:3493
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:3493
+
+ - name: remote
+ address: 203.0.113.0:3493
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `upsd` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m upsd
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `upsd` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep upsd
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep upsd /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep upsd
+```
+
+
diff --git a/src/go/plugin/go.d/modules/upsd/metadata.yaml b/src/go/plugin/go.d/modules/upsd/metadata.yaml
new file mode 100644
index 000000000..070b33852
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/metadata.yaml
@@ -0,0 +1,264 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-upsd
+ plugin_name: go.d.plugin
+ module_name: upsd
+ monitored_instance:
+ name: UPS (NUT)
+ link: ""
+ icon_filename: plug-circle-bolt.svg
+ categories:
+ - data-collection.ups
+ keywords:
+ - ups
+ - nut
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors Uninterruptible Power Supplies by polling the UPS daemon using the NUT network protocol.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/upsd.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: UPS daemon address in IP:PORT format.
+ default_value: 127.0.0.1:3493
+ required: true
+ - name: timeout
+ description: Connection/read/write timeout in seconds. The timeout includes name resolution, if required.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:3493
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:3493
+
+ - name: remote
+ address: 203.0.113.0:3493
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: upsd_10min_ups_load
+ metric: upsd.ups_load
+ info: "UPS ${label:ups_name} average load over the last 10 minutes"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf
+ - name: upsd_ups_battery_charge
+ metric: upsd.ups_battery_charge
+ info: "UPS ${label:ups_name} average battery charge over the last minute"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf
+ - name: upsd_ups_last_collected_secs
+ metric: upsd.ups_load
+ info: "UPS ${label:ups_name} number of seconds since the last successful data collection"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/upsd.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: ups
+ description: These metrics refer to the UPS unit.
+ labels:
+ - name: ups_name
+ description: UPS name.
+ - name: battery_type
+ description: Battery type (chemistry). "battery.type" variable value.
+ - name: device_model
+ description: Device model. "device.mode" variable value.
+ - name: device_serial
+ description: Device serial number. "device.serial" variable value.
+ - name: device_manufacturer
+ description: Device manufacturer. "device.mfr" variable value.
+ - name: device_type
+ description: Device type (ups, pdu, scd, psu, ats). "device.type" variable value.
+ metrics:
+ - name: upsd.ups_load
+ description: UPS load
+ unit: percentage
+ chart_type: area
+ dimensions:
+ - name: load
+ - name: upsd.ups_load_usage
+ description: UPS load usage (power output)
+ unit: Watts
+ chart_type: line
+ dimensions:
+ - name: load_usage
+ - name: upsd.ups_status
+ description: UPS status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: on_line
+ - name: on_battery
+ - name: low_battery
+ - name: high_battery
+ - name: replace_battery
+ - name: charging
+ - name: discharging
+ - name: bypass
+ - name: calibration
+ - name: offline
+ - name: overloaded
+ - name: trim_input_voltage
+ - name: boost_input_voltage
+ - name: forced_shutdown
+ - name: other
+ - name: upsd.ups_temperature
+ description: UPS temperature
+ unit: Celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: upsd.ups_battery_charge
+ description: UPS Battery charge
+ unit: percentage
+ chart_type: area
+ dimensions:
+ - name: charge
+ - name: upsd.ups_battery_estimated_runtime
+ description: UPS Battery estimated runtime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: runtime
+ - name: upsd.ups_battery_voltage
+ description: UPS Battery voltage
+ unit: Volts
+ chart_type: line
+ dimensions:
+ - name: voltage
+ - name: upsd.ups_battery_voltage_nominal
+ description: UPS Battery voltage nominal
+ unit: Volts
+ chart_type: line
+ dimensions:
+ - name: nominal_voltage
+ - name: upsd.ups_input_voltage
+ description: UPS Input voltage
+ unit: Volts
+ chart_type: line
+ dimensions:
+ - name: voltage
+ - name: upsd.ups_input_voltage_nominal
+ description: UPS Input voltage nominal
+ unit: Volts
+ chart_type: line
+ dimensions:
+ - name: nominal_voltage
+ - name: upsd.ups_input_current
+ description: UPS Input current
+ unit: Ampere
+ chart_type: line
+ dimensions:
+ - name: current
+ - name: upsd.ups_input_current_nominal
+ description: UPS Input current nominal
+ unit: Ampere
+ chart_type: line
+ dimensions:
+ - name: nominal_current
+ - name: upsd.ups_input_frequency
+ description: UPS Input frequency
+ unit: Hz
+ chart_type: line
+ dimensions:
+ - name: frequency
+ - name: upsd.ups_input_frequency_nominal
+ description: UPS Input frequency nominal
+ unit: Hz
+ chart_type: line
+ dimensions:
+ - name: nominal_frequency
+ - name: upsd.ups_output_voltage
+ description: UPS Output voltage
+ unit: Volts
+ chart_type: line
+ dimensions:
+ - name: voltage
+ - name: upsd.ups_output_voltage_nominal
+ description: UPS Output voltage nominal
+ unit: Volts
+ chart_type: line
+ dimensions:
+ - name: nominal_voltage
+ - name: upsd.ups_output_current
+ description: UPS Output current
+ unit: Ampere
+ chart_type: line
+ dimensions:
+ - name: current
+ - name: upsd.ups_output_current_nominal
+ description: UPS Output current nominal
+ unit: Ampere
+ chart_type: line
+ dimensions:
+ - name: nominal_current
+ - name: upsd.ups_output_frequency
+ description: UPS Output frequency
+ unit: Hz
+ chart_type: line
+ dimensions:
+ - name: frequency
+ - name: upsd.ups_output_frequency_nominal
+ description: UPS Output frequency nominal
+ unit: Hz
+ chart_type: line
+ dimensions:
+ - name: nominal_frequency
diff --git a/src/go/plugin/go.d/modules/upsd/testdata/config.json b/src/go/plugin/go.d/modules/upsd/testdata/config.json
new file mode 100644
index 000000000..ab7a8654c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/testdata/config.json
@@ -0,0 +1,7 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "username": "ok",
+ "password": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/upsd/testdata/config.yaml b/src/go/plugin/go.d/modules/upsd/testdata/config.yaml
new file mode 100644
index 000000000..276370415
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+address: "ok"
+username: "ok"
+password: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/upsd/upsd.go b/src/go/plugin/go.d/modules/upsd/upsd.go
new file mode 100644
index 000000000..752697faa
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/upsd.go
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package upsd
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("upsd", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Upsd {
+ return &Upsd{
+ Config: Config{
+ Address: "127.0.0.1:3493",
+ Timeout: web.Duration(time.Second * 2),
+ },
+ newUpsdConn: newUpsdConn,
+ charts: &module.Charts{},
+ upsUnits: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ Username string `yaml:"username,omitempty" json:"username"`
+ Password string `yaml:"password,omitempty" json:"password"`
+}
+
+type (
+ Upsd struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ conn upsdConn
+ newUpsdConn func(Config) upsdConn
+
+ upsUnits map[string]bool
+ }
+
+ upsdConn interface {
+ connect() error
+ disconnect() error
+ authenticate(string, string) error
+ upsUnits() ([]upsUnit, error)
+ }
+)
+
+func (u *Upsd) Configuration() any {
+ return u.Config
+}
+
+func (u *Upsd) Init() error {
+ if u.Address == "" {
+ u.Error("config: 'address' not set")
+ return errors.New("address not set")
+ }
+
+ return nil
+}
+
+func (u *Upsd) Check() error {
+ mx, err := u.collect()
+ if err != nil {
+ u.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (u *Upsd) Charts() *module.Charts {
+ return u.charts
+}
+
+func (u *Upsd) Collect() map[string]int64 {
+ mx, err := u.collect()
+ if err != nil {
+ u.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (u *Upsd) Cleanup() {
+ if u.conn == nil {
+ return
+ }
+ if err := u.conn.disconnect(); err != nil {
+ u.Warningf("error on disconnect: %v", err)
+ }
+ u.conn = nil
+}
diff --git a/src/go/plugin/go.d/modules/upsd/upsd_test.go b/src/go/plugin/go.d/modules/upsd/upsd_test.go
new file mode 100644
index 000000000..e654aa90e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/upsd_test.go
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package upsd
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestUpsd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Upsd{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestUpsd_Cleanup(t *testing.T) {
+ upsd := New()
+
+ require.NotPanics(t, upsd.Cleanup)
+
+ mock := prepareMockConnOK()
+ upsd.newUpsdConn = func(Config) upsdConn { return mock }
+
+ require.NoError(t, upsd.Init())
+ _ = upsd.Collect()
+ require.NotPanics(t, upsd.Cleanup)
+ assert.True(t, mock.calledDisconnect)
+}
+
+func TestUpsd_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success on default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails when 'address' option not set": {
+ wantFail: true,
+ config: Config{Address: ""},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ upsd := New()
+ upsd.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, upsd.Init())
+ } else {
+ assert.NoError(t, upsd.Init())
+ }
+ })
+ }
+}
+
+func TestUpsd_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareUpsd func() *Upsd
+ prepareMock func() *mockUpsdConn
+ wantFail bool
+ }{
+ "successful data collection": {
+ wantFail: false,
+ prepareUpsd: New,
+ prepareMock: prepareMockConnOK,
+ },
+ "error on connect()": {
+ wantFail: true,
+ prepareUpsd: New,
+ prepareMock: prepareMockConnErrOnConnect,
+ },
+ "error on authenticate()": {
+ wantFail: true,
+ prepareUpsd: func() *Upsd {
+ upsd := New()
+ upsd.Username = "user"
+ upsd.Password = "pass"
+ return upsd
+ },
+ prepareMock: prepareMockConnErrOnAuthenticate,
+ },
+ "error on upsList()": {
+ wantFail: true,
+ prepareUpsd: New,
+ prepareMock: prepareMockConnErrOnUpsUnits,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ upsd := test.prepareUpsd()
+ upsd.newUpsdConn = func(Config) upsdConn { return test.prepareMock() }
+
+ require.NoError(t, upsd.Init())
+
+ if test.wantFail {
+ assert.Error(t, upsd.Check())
+ } else {
+ assert.NoError(t, upsd.Check())
+ }
+ })
+ }
+}
+
+func TestUpsd_Charts(t *testing.T) {
+ upsd := New()
+ require.NoError(t, upsd.Init())
+ assert.NotNil(t, upsd.Charts())
+}
+
+func TestUpsd_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareUpsd func() *Upsd
+ prepareMock func() *mockUpsdConn
+ wantCollected map[string]int64
+ wantCharts int
+ wantConnConnect bool
+ wantConnDisconnect bool
+ wantConnAuthenticate bool
+ }{
+ "successful data collection": {
+ prepareUpsd: New,
+ prepareMock: prepareMockConnOK,
+ wantCollected: map[string]int64{
+ "ups_cp1500_battery.charge": 10000,
+ "ups_cp1500_battery.runtime": 489000,
+ "ups_cp1500_battery.voltage": 2400,
+ "ups_cp1500_battery.voltage.nominal": 2400,
+ "ups_cp1500_input.voltage": 22700,
+ "ups_cp1500_input.voltage.nominal": 23000,
+ "ups_cp1500_output.voltage": 26000,
+ "ups_cp1500_ups.load": 800,
+ "ups_cp1500_ups.load.usage": 4300,
+ "ups_cp1500_ups.realpower.nominal": 90000,
+ "ups_cp1500_ups.status.BOOST": 0,
+ "ups_cp1500_ups.status.BYPASS": 0,
+ "ups_cp1500_ups.status.CAL": 0,
+ "ups_cp1500_ups.status.CHRG": 0,
+ "ups_cp1500_ups.status.DISCHRG": 0,
+ "ups_cp1500_ups.status.FSD": 0,
+ "ups_cp1500_ups.status.HB": 0,
+ "ups_cp1500_ups.status.LB": 0,
+ "ups_cp1500_ups.status.OB": 0,
+ "ups_cp1500_ups.status.OFF": 0,
+ "ups_cp1500_ups.status.OL": 1,
+ "ups_cp1500_ups.status.OVER": 0,
+ "ups_cp1500_ups.status.RB": 0,
+ "ups_cp1500_ups.status.TRIM": 0,
+ "ups_cp1500_ups.status.other": 0,
+ "ups_pr3000_battery.charge": 10000,
+ "ups_pr3000_battery.runtime": 110800,
+ "ups_pr3000_battery.voltage": 5990,
+ "ups_pr3000_battery.voltage.nominal": 4800,
+ "ups_pr3000_input.voltage": 22500,
+ "ups_pr3000_input.voltage.nominal": 23000,
+ "ups_pr3000_output.voltage": 22500,
+ "ups_pr3000_ups.load": 2800,
+ "ups_pr3000_ups.load.usage": 84000,
+ "ups_pr3000_ups.realpower.nominal": 300000,
+ "ups_pr3000_ups.status.BOOST": 0,
+ "ups_pr3000_ups.status.BYPASS": 0,
+ "ups_pr3000_ups.status.CAL": 0,
+ "ups_pr3000_ups.status.CHRG": 0,
+ "ups_pr3000_ups.status.DISCHRG": 0,
+ "ups_pr3000_ups.status.FSD": 0,
+ "ups_pr3000_ups.status.HB": 0,
+ "ups_pr3000_ups.status.LB": 0,
+ "ups_pr3000_ups.status.OB": 0,
+ "ups_pr3000_ups.status.OFF": 0,
+ "ups_pr3000_ups.status.OL": 1,
+ "ups_pr3000_ups.status.OVER": 0,
+ "ups_pr3000_ups.status.RB": 0,
+ "ups_pr3000_ups.status.TRIM": 0,
+ "ups_pr3000_ups.status.other": 0,
+ },
+ wantCharts: 20,
+ wantConnConnect: true,
+ wantConnDisconnect: false,
+ wantConnAuthenticate: false,
+ },
+ "error on connect()": {
+ prepareUpsd: New,
+ prepareMock: prepareMockConnErrOnConnect,
+ wantCollected: nil,
+ wantCharts: 0,
+ wantConnConnect: true,
+ wantConnDisconnect: false,
+ wantConnAuthenticate: false,
+ },
+ "error on authenticate()": {
+ prepareUpsd: func() *Upsd {
+ upsd := New()
+ upsd.Username = "user"
+ upsd.Password = "pass"
+ return upsd
+ },
+ prepareMock: prepareMockConnErrOnAuthenticate,
+ wantCollected: nil,
+ wantCharts: 0,
+ wantConnConnect: true,
+ wantConnDisconnect: true,
+ wantConnAuthenticate: true,
+ },
+ "err on upsList()": {
+ prepareUpsd: New,
+ prepareMock: prepareMockConnErrOnUpsUnits,
+ wantCollected: nil,
+ wantCharts: 0,
+ wantConnConnect: true,
+ wantConnDisconnect: true,
+ wantConnAuthenticate: false,
+ },
+ "command err on upsList() (unknown ups)": {
+ prepareUpsd: New,
+ prepareMock: prepareMockConnCommandErrOnUpsUnits,
+ wantCollected: nil,
+ wantCharts: 0,
+ wantConnConnect: true,
+ wantConnDisconnect: false,
+ wantConnAuthenticate: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ upsd := test.prepareUpsd()
+ require.NoError(t, upsd.Init())
+
+ mock := test.prepareMock()
+ upsd.newUpsdConn = func(Config) upsdConn { return mock }
+
+ mx := upsd.Collect()
+
+ assert.Equal(t, test.wantCollected, mx)
+ assert.Equalf(t, test.wantCharts, len(*upsd.Charts()), "number of charts")
+ if len(test.wantCollected) > 0 {
+ ensureCollectedHasAllChartsDims(t, upsd, mx)
+ }
+ assert.Equalf(t, test.wantConnConnect, mock.calledConnect, "calledConnect")
+ assert.Equalf(t, test.wantConnDisconnect, mock.calledDisconnect, "calledDisconnect")
+ assert.Equal(t, test.wantConnAuthenticate, mock.calledAuthenticate, "calledAuthenticate")
+ })
+ }
+}
+
+func ensureCollectedHasAllChartsDims(t *testing.T, upsd *Upsd, mx map[string]int64) {
+ for _, chart := range *upsd.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareMockConnOK() *mockUpsdConn {
+ return &mockUpsdConn{}
+}
+
+func prepareMockConnErrOnConnect() *mockUpsdConn {
+ return &mockUpsdConn{errOnConnect: true}
+}
+
+func prepareMockConnErrOnAuthenticate() *mockUpsdConn {
+ return &mockUpsdConn{errOnAuthenticate: true}
+}
+
+func prepareMockConnErrOnUpsUnits() *mockUpsdConn {
+ return &mockUpsdConn{errOnUpsUnits: true}
+}
+
+func prepareMockConnCommandErrOnUpsUnits() *mockUpsdConn {
+ return &mockUpsdConn{commandErrOnUpsUnits: true}
+}
+
+type mockUpsdConn struct {
+ errOnConnect bool
+ errOnDisconnect bool
+ errOnAuthenticate bool
+ errOnUpsUnits bool
+ commandErrOnUpsUnits bool
+
+ calledConnect bool
+ calledDisconnect bool
+ calledAuthenticate bool
+}
+
+func (m *mockUpsdConn) connect() error {
+ m.calledConnect = true
+ if m.errOnConnect {
+ return errors.New("mock error on connect()")
+ }
+ return nil
+}
+
+func (m *mockUpsdConn) disconnect() error {
+ m.calledDisconnect = true
+ if m.errOnDisconnect {
+ return errors.New("mock error on disconnect()")
+ }
+ return nil
+}
+
+func (m *mockUpsdConn) authenticate(_, _ string) error {
+ m.calledAuthenticate = true
+ if m.errOnAuthenticate {
+ return errors.New("mock error on authenticate()")
+ }
+ return nil
+}
+
+func (m *mockUpsdConn) upsUnits() ([]upsUnit, error) {
+ if m.errOnUpsUnits {
+ return nil, errors.New("mock error on upsUnits()")
+ }
+ if m.commandErrOnUpsUnits {
+ return nil, fmt.Errorf("%w: mock command error on upsUnits()", errUpsdCommand)
+ }
+
+ upsUnits := []upsUnit{
+ {
+ name: "pr3000",
+ vars: map[string]string{
+ "battery.charge": "100",
+ "battery.charge.warning": "35",
+ "battery.mfr.date": "CPS",
+ "battery.runtime": "1108",
+ "battery.runtime.low": "300",
+ "battery.type": "PbAcid",
+ "battery.voltage": "59.9",
+ "battery.voltage.nominal": "48",
+ "device.mfr": "CPS",
+ "device.model": "PR3000ERT2U",
+ "device.serial": "P11MQ2000041",
+ "device.type": "ups",
+ "driver.name": "usbhid-ups",
+ "driver.parameter.pollfreq": "30",
+ "driver.parameter.pollinterval": "2",
+ "driver.parameter.port": "auto",
+ "driver.parameter.synchronous": "no",
+ "driver.version": "2.7.4",
+ "driver.version.data": "CyberPower HID 0.4",
+ "driver.version.internal": "0.41",
+ "input.voltage": "225.0",
+ "input.voltage.nominal": "230",
+ "output.voltage": "225.0",
+ "ups.beeper.status": "enabled",
+ "ups.delay.shutdown": "20",
+ "ups.delay.start": "30",
+ "ups.load": "28",
+ "ups.mfr": "CPS",
+ "ups.model": "PR3000ERT2U",
+ "ups.productid": "0601",
+ "ups.realpower.nominal": "3000",
+ "ups.serial": "P11MQ2000041",
+ "ups.status": "OL",
+ "ups.test.result": "No test initiated",
+ "ups.timer.shutdown": "0",
+ "ups.timer.start": "0",
+ "ups.vendorid": "0764",
+ },
+ },
+ {
+ name: "cp1500",
+ vars: map[string]string{
+ "battery.charge": "100",
+ "battery.charge.low": "10",
+ "battery.charge.warning": "20",
+ "battery.mfr.date": "CPS",
+ "battery.runtime": "4890",
+ "battery.runtime.low": "300",
+ "battery.type": "PbAcid",
+ "battery.voltage": "24.0",
+ "battery.voltage.nominal": "24",
+ "device.mfr": "CPS",
+ "device.model": "CP1500EPFCLCD",
+ "device.serial": "CRMNO2000312",
+ "device.type": "ups",
+ "driver.name": "usbhid-ups",
+ "driver.parameter.bus": "001",
+ "driver.parameter.pollfreq": "30",
+ "driver.parameter.pollinterval": "2",
+ "driver.parameter.port": "auto",
+ "driver.parameter.product": "CP1500EPFCLCD",
+ "driver.parameter.productid": "0501",
+ "driver.parameter.serial": "CRMNO2000312",
+ "driver.parameter.synchronous": "no",
+ "driver.parameter.vendor": "CPS",
+ "driver.parameter.vendorid": "0764",
+ "driver.version": "2.7.4",
+ "driver.version.data": "CyberPower HID 0.4",
+ "driver.version.internal": "0.41",
+ "input.transfer.high": "260",
+ "input.transfer.low": "170",
+ "input.voltage": "227.0",
+ "input.voltage.nominal": "230",
+ "output.voltage": "260.0",
+ "ups.beeper.status": "enabled",
+ "ups.delay.shutdown": "20",
+ "ups.delay.start": "30",
+ "ups.load": "8",
+ "ups.mfr": "CPS",
+ "ups.model": "CP1500EPFCLCD",
+ "ups.productid": "0501",
+ "ups.realpower": "43",
+ "ups.realpower.nominal": "900",
+ "ups.serial": "CRMNO2000312",
+ "ups.status": "OL",
+ "ups.test.result": "No test initiated",
+ "ups.timer.shutdown": "-60",
+ "ups.timer.start": "-60",
+ "ups.vendorid": "0764",
+ },
+ },
+ }
+
+ return upsUnits, nil
+}
diff --git a/src/go/plugin/go.d/modules/upsd/variables.go b/src/go/plugin/go.d/modules/upsd/variables.go
new file mode 100644
index 000000000..9792e62b9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/upsd/variables.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package upsd
+
+const varPrecision = 100
+
+// https://networkupstools.org/docs/developer-guide.chunked/apas02.html
+const (
+ varBatteryCharge = "battery.charge"
+ varBatteryRuntime = "battery.runtime"
+ varBatteryVoltage = "battery.voltage"
+ varBatteryVoltageNominal = "battery.voltage.nominal"
+ varBatteryType = "battery.type"
+
+ varInputVoltage = "input.voltage"
+ varInputVoltageNominal = "input.voltage.nominal"
+ varInputCurrent = "input.current"
+ varInputCurrentNominal = "input.current.nominal"
+ varInputFrequency = "input.frequency"
+ varInputFrequencyNominal = "input.frequency.nominal"
+
+ varOutputVoltage = "output.voltage"
+ varOutputVoltageNominal = "output.voltage.nominal"
+ varOutputCurrent = "output.current"
+ varOutputCurrentNominal = "output.current.nominal"
+ varOutputFrequency = "output.frequency"
+ varOutputFrequencyNominal = "output.frequency.nominal"
+
+ varUpsLoad = "ups.load"
+ varUpsRealPower = "ups.realpower"
+ varUpsRealPowerNominal = "ups.realpower.nominal"
+ varUpsTemperature = "ups.temperature"
+ varUpsStatus = "ups.status"
+
+ varDeviceModel = "device.model"
+ varDeviceSerial = "device.serial"
+ varDeviceMfr = "device.mfr"
+ varDeviceType = "device.type"
+)
diff --git a/src/go/plugin/go.d/modules/uwsgi/README.md b/src/go/plugin/go.d/modules/uwsgi/README.md
new file mode 120000
index 000000000..44b855949
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/README.md
@@ -0,0 +1 @@
+integrations/uwsgi.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/uwsgi/charts.go b/src/go/plugin/go.d/modules/uwsgi/charts.go
new file mode 100644
index 000000000..d79b3938b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/charts.go
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package uwsgi
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioTransmittedData = module.Priority + iota
+ prioRequests
+ prioHarakiris
+ prioExceptions
+ prioRespawns
+
+ prioWorkerTransmittedData
+ prioWorkerRequests
+ prioWorkerDeltaRequests
+ prioWorkerAvgRequestTime
+ prioWorkerHarakiris
+ prioWorkerExceptions
+ prioWorkerStatus
+ prioWorkerRequestHandlingStatus
+ prioWorkerRespawns
+ prioWorkerMemoryRss
+ prioWorkerMemoryVsz
+)
+
+var charts = module.Charts{
+ transmittedDataChart.Copy(),
+ requestsChart.Copy(),
+ harakirisChart.Copy(),
+ exceptionsChart.Copy(),
+ respawnsChart.Copy(),
+}
+
+var (
+ transmittedDataChart = module.Chart{
+ ID: "transmitted_data",
+ Title: "UWSGI Transmitted Data",
+ Units: "bytes/s",
+ Fam: "workers",
+ Ctx: "uwsgi.transmitted_data",
+ Priority: prioTransmittedData,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "workers_tx", Name: "tx", Algo: module.Incremental},
+ },
+ }
+ requestsChart = module.Chart{
+ ID: "requests",
+ Title: "UWSGI Requests",
+ Units: "requests/s",
+ Fam: "workers",
+ Ctx: "uwsgi.requests",
+ Priority: prioRequests,
+ Dims: module.Dims{
+ {ID: "workers_requests", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ harakirisChart = module.Chart{
+ ID: "harakiris",
+ Title: "UWSGI Dropped Requests",
+ Units: "harakiris/s",
+ Fam: "workers",
+ Ctx: "uwsgi.harakiris",
+ Priority: prioHarakiris,
+ Dims: module.Dims{
+ {ID: "workers_harakiris", Name: "harakiris", Algo: module.Incremental},
+ },
+ }
+ exceptionsChart = module.Chart{
+ ID: "exceptions",
+ Title: "UWSGI Raised Exceptions",
+ Units: "exceptions/s",
+ Fam: "workers",
+ Ctx: "uwsgi.exceptions",
+ Priority: prioExceptions,
+ Dims: module.Dims{
+ {ID: "workers_exceptions", Name: "exceptions", Algo: module.Incremental},
+ },
+ }
+ respawnsChart = module.Chart{
+ ID: "respawns",
+ Title: "UWSGI Respawns",
+ Units: "respawns/s",
+ Fam: "workers",
+ Ctx: "uwsgi.respawns",
+ Priority: prioRespawns,
+ Dims: module.Dims{
+ {ID: "workers_respawns", Name: "respawns", Algo: module.Incremental},
+ },
+ }
+)
+
+var workerChartsTmpl = module.Charts{
+ workerTransmittedDataChartTmpl.Copy(),
+ workerRequestsChartTmpl.Copy(),
+ workerDeltaRequestsChartTmpl.Copy(),
+ workerAvgRequestTimeChartTmpl.Copy(),
+ workerHarakirisChartTmpl.Copy(),
+ workerExceptionsChartTmpl.Copy(),
+ workerStatusChartTmpl.Copy(),
+ workerRequestHandlingStatusChartTmpl.Copy(),
+ workerRespawnsChartTmpl.Copy(),
+ workerMemoryRssChartTmpl.Copy(),
+ workerMemoryVszChartTmpl.Copy(),
+}
+
+var (
+ workerTransmittedDataChartTmpl = module.Chart{
+ ID: "worker_%s_transmitted_data",
+ Title: "UWSGI Worker Transmitted Data",
+ Units: "bytes/s",
+ Fam: "wrk transmitted data",
+ Ctx: "uwsgi.worker_transmitted_data",
+ Priority: prioWorkerTransmittedData,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "worker_%s_tx", Name: "tx", Algo: module.Incremental},
+ },
+ }
+ workerRequestsChartTmpl = module.Chart{
+ ID: "worker_%s_requests",
+ Title: "UWSGI Worker Requests",
+ Units: "requests/s",
+ Fam: "wrk requests",
+ Ctx: "uwsgi.worker_requests",
+ Priority: prioWorkerRequests,
+ Dims: module.Dims{
+ {ID: "worker_%s_requests", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ workerDeltaRequestsChartTmpl = module.Chart{
+ ID: "worker_%s_delta_requests",
+ Title: "UWSGI Worker Delta Requests",
+ Units: "requests/s",
+ Fam: "wrk requests",
+ Ctx: "uwsgi.worker_delta_requests",
+ Priority: prioWorkerDeltaRequests,
+ Dims: module.Dims{
+ {ID: "worker_%s_delta_requests", Name: "delta_requests", Algo: module.Incremental},
+ },
+ }
+ workerAvgRequestTimeChartTmpl = module.Chart{
+ ID: "worker_%s_average_request_time",
+ Title: "UWSGI Worker Average Request Time",
+ Units: "milliseconds",
+ Fam: "wrk request time",
+ Ctx: "uwsgi.worker_average_request_time",
+ Priority: prioWorkerAvgRequestTime,
+ Dims: module.Dims{
+ {ID: "worker_%s_average_request_time", Name: "avg"},
+ },
+ }
+ workerHarakirisChartTmpl = module.Chart{
+ ID: "worker_%s_harakiris",
+ Title: "UWSGI Worker Dropped Requests",
+ Units: "harakiris/s",
+ Fam: "wrk harakiris",
+ Ctx: "uwsgi.worker_harakiris",
+ Priority: prioWorkerHarakiris,
+ Dims: module.Dims{
+ {ID: "worker_%s_harakiris", Name: "harakiris", Algo: module.Incremental},
+ },
+ }
+ workerExceptionsChartTmpl = module.Chart{
+ ID: "worker_%s_exceptions",
+ Title: "UWSGI Worker Raised Exceptions",
+ Units: "exceptions/s",
+ Fam: "wrk exceptions",
+ Ctx: "uwsgi.worker_exceptions",
+ Priority: prioWorkerExceptions,
+ Dims: module.Dims{
+ {ID: "worker_%s_exceptions", Name: "exceptions", Algo: module.Incremental},
+ },
+ }
+ workerStatusChartTmpl = module.Chart{
+ ID: "worker_%s_status",
+ Title: "UWSGI Worker Status",
+ Units: "status",
+ Fam: "wrk status",
+ Ctx: "uwsgi.status",
+ Priority: prioWorkerStatus,
+ Dims: module.Dims{
+ {ID: "worker_%s_status_idle", Name: "idle"},
+ {ID: "worker_%s_status_busy", Name: "busy"},
+ {ID: "worker_%s_status_cheap", Name: "cheap"},
+ {ID: "worker_%s_status_pause", Name: "pause"},
+ {ID: "worker_%s_status_sig", Name: "sig"},
+ },
+ }
+ workerRequestHandlingStatusChartTmpl = module.Chart{
+ ID: "worker_%s_request_handling_status",
+ Title: "UWSGI Worker Request Handling Status",
+ Units: "status",
+ Fam: "wrk status",
+ Ctx: "uwsgi.request_handling_status",
+ Priority: prioWorkerRequestHandlingStatus,
+ Dims: module.Dims{
+ {ID: "worker_%s_request_handling_status_accepting", Name: "accepting"},
+ {ID: "worker_%s_request_handling_status_not_accepting", Name: "not_accepting"},
+ },
+ }
+ workerRespawnsChartTmpl = module.Chart{
+ ID: "worker_%s_respawns",
+ Title: "UWSGI Worker Respawns",
+ Units: "respawns/s",
+ Fam: "wrk respawns",
+ Ctx: "uwsgi.worker_respawns",
+ Priority: prioWorkerRespawns,
+ Dims: module.Dims{
+ {ID: "worker_%s_respawns", Name: "respawns", Algo: module.Incremental},
+ },
+ }
+ workerMemoryRssChartTmpl = module.Chart{
+ ID: "worker_%s_memory_rss",
+ Title: "UWSGI Worker Memory RSS (Resident Set Size)",
+ Units: "bytes",
+ Fam: "wrk memory",
+ Ctx: "uwsgi.worker_memory_rss",
+ Priority: prioWorkerMemoryRss,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "worker_%s_memory_rss", Name: "rss"},
+ },
+ }
+ workerMemoryVszChartTmpl = module.Chart{
+ ID: "worker_%s_memory_vsz",
+ Title: "UWSGI Worker Memory VSZ (Virtual Memory Size)",
+ Units: "bytes",
+ Fam: "wrk memory",
+ Ctx: "uwsgi.worker_memory_vsz",
+ Priority: prioWorkerMemoryVsz,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "worker_%s_memory_vsz", Name: "vsz"},
+ },
+ }
+)
+
+func (u *Uwsgi) addWorkerCharts(workerID int) {
+ charts := workerChartsTmpl.Copy()
+
+ id := strconv.Itoa(workerID)
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, id)
+ chart.Labels = []module.Label{
+ {Key: "worker_id", Value: id},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, id)
+ }
+ }
+
+ if err := u.Charts().Add(*charts...); err != nil {
+ u.Warning(err)
+ }
+}
+
+func (u *Uwsgi) removeWorkerCharts(workerID int) {
+ px := fmt.Sprintf("worker_%d_", workerID)
+
+ for _, chart := range *u.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/uwsgi/client.go b/src/go/plugin/go.d/modules/uwsgi/client.go
new file mode 100644
index 000000000..403680743
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/client.go
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package uwsgi
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+type uwsgiConn interface {
+ connect() error
+ disconnect()
+ queryStats() ([]byte, error)
+}
+
+func newUwsgiConn(conf Config) uwsgiConn {
+ return &uwsgiClient{conn: socket.New(socket.Config{
+ Address: conf.Address,
+ ConnectTimeout: conf.Timeout.Duration(),
+ ReadTimeout: conf.Timeout.Duration(),
+ WriteTimeout: conf.Timeout.Duration(),
+ })}
+}
+
+type uwsgiClient struct {
+ conn socket.Client
+}
+
+func (c *uwsgiClient) connect() error {
+ return c.conn.Connect()
+}
+
+func (c *uwsgiClient) disconnect() {
+ _ = c.conn.Disconnect()
+}
+
+func (c *uwsgiClient) queryStats() ([]byte, error) {
+ var b bytes.Buffer
+ var n int64
+ var err error
+ const readLineLimit = 1000 * 10
+
+ clientErr := c.conn.Command("", func(bs []byte) bool {
+ b.Write(bs)
+ b.WriteByte('\n')
+
+ if n++; n >= readLineLimit {
+ err = fmt.Errorf("read line limit exceeded %d", readLineLimit)
+ return false
+ }
+ // The server will close the connection when it has finished sending data.
+ return true
+ })
+ if clientErr != nil {
+ return nil, clientErr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return b.Bytes(), nil
+}
diff --git a/src/go/plugin/go.d/modules/uwsgi/collect.go b/src/go/plugin/go.d/modules/uwsgi/collect.go
new file mode 100644
index 000000000..3f4405354
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/collect.go
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package uwsgi
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type statsResponse struct {
+ Workers []workerStats `json:"workers"`
+}
+
+type workerStats struct {
+ ID int `json:"id"`
+ Accepting int64 `json:"accepting"`
+ Requests int64 `json:"requests"`
+ DeltaRequests int64 `json:"delta_requests"`
+ Exceptions int64 `json:"exceptions"`
+ HarakiriCount int64 `json:"harakiri_count"`
+ Status string `json:"status"`
+ RSS int64 `json:"rss"`
+ VSZ int64 `json:"vsz"`
+ RespawnCount int64 `json:"respawn_count"`
+ TX int64 `json:"tx"`
+ AvgRT int64 `json:"avg_rt"`
+}
+
+func (u *Uwsgi) collect() (map[string]int64, error) {
+ conn, err := u.establishConn()
+ if err != nil {
+ return nil, fmt.Errorf("failed to connect: %v", err)
+ }
+
+ defer conn.disconnect()
+
+ stats, err := conn.queryStats()
+ if err != nil {
+ return nil, fmt.Errorf("failed to query stats: %v", err)
+ }
+
+ mx := make(map[string]int64)
+
+ if err := u.collectStats(mx, stats); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (u *Uwsgi) collectStats(mx map[string]int64, stats []byte) error {
+ var resp statsResponse
+ if err := json.Unmarshal(stats, &resp); err != nil {
+ return fmt.Errorf("failed to json decode stats response: %v", err)
+ }
+
+ // stats server returns an empty array if there are no workers
+ if resp.Workers == nil {
+ return fmt.Errorf("unexpected stats response: no workers found")
+ }
+
+ seen := make(map[int]bool)
+
+ mx["workers_tx"] = 0
+ mx["workers_requests"] = 0
+ mx["workers_harakiris"] = 0
+ mx["workers_exceptions"] = 0
+ mx["workers_respawns"] = 0
+
+ for _, w := range resp.Workers {
+ mx["workers_tx"] += w.TX
+ mx["workers_requests"] += w.Requests
+ mx["workers_harakiris"] += w.HarakiriCount
+ mx["workers_exceptions"] += w.Exceptions
+ mx["workers_respawns"] += w.RespawnCount
+
+ seen[w.ID] = true
+
+ if !u.seenWorkers[w.ID] {
+ u.seenWorkers[w.ID] = true
+ u.addWorkerCharts(w.ID)
+ }
+
+ px := fmt.Sprintf("worker_%d_", w.ID)
+
+ mx[px+"tx"] = w.TX
+ mx[px+"requests"] = w.Requests
+ mx[px+"delta_requests"] = w.DeltaRequests
+ mx[px+"average_request_time"] = w.AvgRT
+ mx[px+"harakiris"] = w.HarakiriCount
+ mx[px+"exceptions"] = w.Exceptions
+ mx[px+"respawns"] = w.RespawnCount
+ mx[px+"memory_rss"] = w.RSS
+ mx[px+"memory_vsz"] = w.VSZ
+
+ for _, v := range []string{"idle", "busy", "cheap", "pause", "sig"} {
+ mx[px+"status_"+v] = boolToInt(w.Status == v)
+ }
+ mx[px+"request_handling_status_accepting"] = boolToInt(w.Accepting == 1)
+ mx[px+"request_handling_status_not_accepting"] = boolToInt(w.Accepting == 0)
+ }
+
+ for id := range u.seenWorkers {
+ if !seen[id] {
+ delete(u.seenWorkers, id)
+ u.removeWorkerCharts(id)
+ }
+ }
+
+ return nil
+}
+
+func (u *Uwsgi) establishConn() (uwsgiConn, error) {
+ conn := u.newConn(u.Config)
+
+ if err := conn.connect(); err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+func boolToInt(b bool) int64 {
+ if b {
+ return 1
+ }
+ return 0
+}
diff --git a/src/go/plugin/go.d/modules/uwsgi/config_schema.json b/src/go/plugin/go.d/modules/uwsgi/config_schema.json
new file mode 100644
index 000000000..14c750432
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/config_schema.json
@@ -0,0 +1,44 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "UWSGI collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the UWSGI [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:1717"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for establishing a connection and communication (reading and writing) in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/uwsgi/init.go b/src/go/plugin/go.d/modules/uwsgi/init.go
new file mode 100644
index 000000000..ab5999708
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/init.go
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package uwsgi
diff --git a/src/go/plugin/go.d/modules/uwsgi/integrations/uwsgi.md b/src/go/plugin/go.d/modules/uwsgi/integrations/uwsgi.md
new file mode 100644
index 000000000..6fe19263e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/integrations/uwsgi.md
@@ -0,0 +1,248 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/uwsgi/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/uwsgi/metadata.yaml"
+sidebar_label: "uWSGI"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# uWSGI
+
+
+<img src="https://netdata.cloud/img/uwsgi.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: uwsgi
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+Monitors UWSGI worker health and performance by collecting metrics like requests, transmitted data, exceptions, and harakiris.
+
+
+It fetches [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) statistics over TCP.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+Automatically discovers and collects UWSGI statistics from the following default locations:
+
+- localhost:1717
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per uWSGI instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| uwsgi.transmitted_data | tx | bytes/s |
+| uwsgi.requests | requests | requests/s |
+| uwsgi.harakiris | harakiris | harakiris/s |
+| uwsgi.respawns | respawns | respawns/s |
+
+### Per worker
+
+These metrics refer to the Worker process.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| worker_id | Worker ID. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| uwsgi.worker_transmitted_data | tx | bytes/s |
+| uwsgi.worker_requests | requests | requests/s |
+| uwsgi.worker_delta_requests | delta_requests | requests/s |
+| uwsgi.worker_average_request_time | avg | milliseconds |
+| uwsgi.worker_harakiris | harakiris | harakiris/s |
+| uwsgi.worker_exceptions | exceptions | exceptions/s |
+| uwsgi.worker_status | idle, busy, cheap, pause, sig | status |
+| uwsgi.worker_request_handling_status | accepting, not_accepting | status |
+| uwsgi.worker_respawns | respawns | respawns/s |
+| uwsgi.worker_memory_rss | rss | bytes |
+| uwsgi.worker_memory_vsz | vsz | bytes |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Enable the uWSGI Stats Server
+
+See [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) for details.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/uwsgi.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/uwsgi.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | The IP address and port where the UWSGI [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) listens for connections. | 127.0.0.1:1717 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:1717
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:1717
+
+ - name: remote
+ address: 203.0.113.0:1717
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `uwsgi` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m uwsgi
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `uwsgi` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep uwsgi
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep uwsgi /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep uwsgi
+```
+
+
diff --git a/src/go/plugin/go.d/modules/uwsgi/metadata.yaml b/src/go/plugin/go.d/modules/uwsgi/metadata.yaml
new file mode 100644
index 000000000..698d6abbf
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/metadata.yaml
@@ -0,0 +1,215 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-uwsgi
+ plugin_name: go.d.plugin
+ module_name: uwsgi
+ monitored_instance:
+ name: uWSGI
+ link: https://uwsgi-docs.readthedocs.io/en/latest/
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: "uwsgi.svg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - application server
+ - python
+ - web applications
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ Monitors UWSGI worker health and performance by collecting metrics like requests, transmitted data, exceptions, and harakiris.
+ method_description: |
+ It fetches [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) statistics over TCP.
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ Automatically discovers and collects UWSGI statistics from the following default locations:
+
+ - localhost:1717
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Enable the uWSGI Stats Server
+ description: |
+ See [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) for details.
+ configuration:
+ file:
+ name: go.d/uwsgi.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: "The IP address and port where the UWSGI [Stats Server](https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html) listens for connections."
+ default_value: 127.0.0.1:1717
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:1717
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:1717
+
+ - name: remote
+ address: 203.0.113.0:1717
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics refer to the entire monitored application."
+ labels: []
+ metrics:
+ - name: uwsgi.transmitted_data
+ description: UWSGI Transmitted Data
+ unit: "bytes/s"
+ chart_type: area
+ dimensions:
+ - name: tx
+ - name: uwsgi.requests
+ description: UWSGI Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: uwsgi.harakiris
+ description: UWSGI Dropped Requests
+ unit: "harakiris/s"
+ chart_type: line
+ dimensions:
+ - name: harakiris
+ - name: uwsgi.respawns
+ description: UWSGI Respawns
+ unit: "respawns/s"
+ chart_type: line
+ dimensions:
+ - name: respawns
+ - name: worker
+ description: "These metrics refer to the Worker process."
+ labels:
+ - name: "worker_id"
+ description: Worker ID.
+ metrics:
+ - name: uwsgi.worker_transmitted_data
+ description: UWSGI Worker Transmitted Data
+ unit: "bytes/s"
+ chart_type: area
+ dimensions:
+ - name: tx
+ - name: uwsgi.worker_requests
+ description: UWSGI Worker Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: uwsgi.worker_delta_requests
+ description: UWSGI Worker Delta Requests
+ unit: "requests/s"
+ chart_type: line
+ dimensions:
+ - name: delta_requests
+ - name: uwsgi.worker_average_request_time
+ description: UWSGI Worker Average Request Time
+ unit: "milliseconds"
+ chart_type: line
+ dimensions:
+ - name: avg
+ - name: uwsgi.worker_harakiris
+ description: UWSGI Worker Dropped Requests
+ unit: "harakiris/s"
+ chart_type: line
+ dimensions:
+ - name: harakiris
+ - name: uwsgi.worker_exceptions
+ description: UWSGI Worker Raised Exceptions
+ unit: "exceptions/s"
+ chart_type: line
+ dimensions:
+ - name: exceptions
+ - name: uwsgi.worker_status
+ description: UWSGI Worker Status
+ unit: "status"
+ chart_type: line
+ dimensions:
+ - name: idle
+ - name: busy
+ - name: cheap
+ - name: pause
+ - name: sig
+ - name: uwsgi.worker_request_handling_status
+ description: UWSGI Worker Request Handling Status
+ unit: "status"
+ chart_type: line
+ dimensions:
+ - name: accepting
+ - name: not_accepting
+ - name: uwsgi.worker_respawns
+ description: UWSGI Worker Respawns
+ unit: "respawns/s"
+ chart_type: line
+ dimensions:
+ - name: respawns
+ - name: uwsgi.worker_memory_rss
+ description: UWSGI Worker Memory RSS (Resident Set Size)
+ unit: "bytes"
+ chart_type: area
+ dimensions:
+ - name: rss
+ - name: uwsgi.worker_memory_vsz
+ description: UWSGI Worker Memory VSZ (Virtual Memory Size)
+ unit: "bytes"
+ chart_type: area
+ dimensions:
+ - name: vsz
diff --git a/src/go/plugin/go.d/modules/uwsgi/testdata/config.json b/src/go/plugin/go.d/modules/uwsgi/testdata/config.json
new file mode 100644
index 000000000..e86834720
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123
+}
diff --git a/src/go/plugin/go.d/modules/uwsgi/testdata/config.yaml b/src/go/plugin/go.d/modules/uwsgi/testdata/config.yaml
new file mode 100644
index 000000000..1b81d09eb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
diff --git a/src/go/plugin/go.d/modules/uwsgi/testdata/stats.json b/src/go/plugin/go.d/modules/uwsgi/testdata/stats.json
new file mode 100644
index 000000000..d00a340ba
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/testdata/stats.json
@@ -0,0 +1,117 @@
+{
+ "version": "2.1.21-debian",
+ "listen_queue": 1,
+ "listen_queue_errors": 1,
+ "signal_queue": 1,
+ "load": 1,
+ "pid": 859919,
+ "uid": 1111,
+ "gid": 1111,
+ "cwd": "/home/ilyam",
+ "locks": [
+ {
+ "user 1": 1
+ },
+ {
+ "signal": 1
+ },
+ {
+ "filemon": 1
+ },
+ {
+ "timer": 1
+ },
+ {
+ "rbtimer": 1
+ },
+ {
+ "cron": 1
+ },
+ {
+ "rpc": 1
+ },
+ {
+ "snmp": 1
+ }
+ ],
+ "sockets": [
+ {
+ "name": ":3131",
+ "proto": "uwsgi",
+ "queue": 1,
+ "max_queue": 111,
+ "shared": 1,
+ "can_offload": 1
+ }
+ ],
+ "workers": [
+ {
+ "id": 1,
+ "pid": 859911,
+ "accepting": 1,
+ "requests": 1,
+ "delta_requests": 1,
+ "exceptions": 1,
+ "harakiri_count": 1,
+ "signals": 1,
+ "signal_queue": 1,
+ "status": "idle",
+ "rss": 1,
+ "vsz": 1,
+ "running_time": 1,
+ "last_spawn": 1723542786,
+ "respawn_count": 1,
+ "tx": 1,
+ "avg_rt": 1,
+ "apps": [],
+ "cores": [
+ {
+ "id": 1,
+ "requests": 1,
+ "static_requests": 1,
+ "routed_requests": 1,
+ "offloaded_requests": 1,
+ "write_errors": 1,
+ "read_errors": 1,
+ "in_request": 1,
+ "vars": [],
+ "req_info": {}
+ }
+ ]
+ },
+ {
+ "id": 2,
+ "pid": 859911,
+ "accepting": 1,
+ "requests": 1,
+ "delta_requests": 1,
+ "exceptions": 1,
+ "harakiri_count": 1,
+ "signals": 1,
+ "signal_queue": 1,
+ "status": "idle",
+ "rss": 1,
+ "vsz": 1,
+ "running_time": 1,
+ "last_spawn": 1723542786,
+ "respawn_count": 1,
+ "tx": 1,
+ "avg_rt": 1,
+ "apps": [],
+ "cores": [
+ {
+ "id": 1,
+ "requests": 1,
+ "static_requests": 1,
+ "routed_requests": 1,
+ "offloaded_requests": 1,
+ "write_errors": 1,
+ "read_errors": 1,
+ "in_request": 1,
+ "vars": [],
+ "req_info": {}
+ }
+ ]
+ }
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/uwsgi/testdata/stats_no_workers.json b/src/go/plugin/go.d/modules/uwsgi/testdata/stats_no_workers.json
new file mode 100644
index 000000000..8b8c782fd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/testdata/stats_no_workers.json
@@ -0,0 +1,49 @@
+{
+ "version": "2.0.21-debian",
+ "listen_queue": 0,
+ "listen_queue_errors": 0,
+ "signal_queue": 0,
+ "load": 0,
+ "pid": 1267323,
+ "uid": 1001,
+ "gid": 1001,
+ "cwd": "/home/ilyam",
+ "locks": [
+ {
+ "user 0": 0
+ },
+ {
+ "signal": 0
+ },
+ {
+ "filemon": 0
+ },
+ {
+ "timer": 0
+ },
+ {
+ "rbtimer": 0
+ },
+ {
+ "cron": 0
+ },
+ {
+ "rpc": 0
+ },
+ {
+ "snmp": 0
+ }
+ ],
+ "sockets": [
+ {
+ "name": ":3031",
+ "proto": "uwsgi",
+ "queue": 0,
+ "max_queue": 100,
+ "shared": 0,
+ "can_offload": 0
+ }
+ ],
+ "workers": [
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/uwsgi/uwsgi.go b/src/go/plugin/go.d/modules/uwsgi/uwsgi.go
new file mode 100644
index 000000000..7fe98503e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/uwsgi.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package uwsgi
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("uwsgi", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Uwsgi {
+ return &Uwsgi{
+ Config: Config{
+ Address: "127.0.0.1:1717",
+ Timeout: web.Duration(time.Second * 1),
+ },
+ newConn: newUwsgiConn,
+ charts: charts.Copy(),
+ seenWorkers: make(map[int]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+}
+
+type Uwsgi struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ newConn func(Config) uwsgiConn
+
+ seenWorkers map[int]bool
+}
+
+func (u *Uwsgi) Configuration() any {
+ return u.Config
+}
+
+func (u *Uwsgi) Init() error {
+ if u.Address == "" {
+ u.Error("config: 'address' not set")
+ return errors.New("address not set")
+ }
+
+ return nil
+}
+
+func (u *Uwsgi) Check() error {
+ mx, err := u.collect()
+ if err != nil {
+ u.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (u *Uwsgi) Charts() *module.Charts {
+ return u.charts
+}
+
+func (u *Uwsgi) Collect() map[string]int64 {
+ mx, err := u.collect()
+ if err != nil {
+ u.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (u *Uwsgi) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/uwsgi/uwsgi_test.go b/src/go/plugin/go.d/modules/uwsgi/uwsgi_test.go
new file mode 100644
index 000000000..900c48538
--- /dev/null
+++ b/src/go/plugin/go.d/modules/uwsgi/uwsgi_test.go
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package uwsgi
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStats, _ = os.ReadFile("testdata/stats.json")
+ dataStatsNoWorkers, _ = os.ReadFile("testdata/stats_no_workers.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStats": dataStats,
+ "dataStatsNoWorkers": dataStatsNoWorkers,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestUwsgi_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Uwsgi{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestUwsgi_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success with default config": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fails if address not set": {
+ wantFail: true,
+ config: func() Config {
+ conf := New().Config
+ conf.Address = ""
+ return conf
+ }(),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ uw := New()
+ uw.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, uw.Init())
+ } else {
+ assert.NoError(t, uw.Init())
+ }
+ })
+ }
+}
+
+func TestUwsgi_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *Uwsgi
+ }{
+ "not initialized": {
+ prepare: func() *Uwsgi {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *Uwsgi {
+ uw := New()
+ uw.newConn = func(config Config) uwsgiConn { return prepareMockOk() }
+ _ = uw.Check()
+ return uw
+ },
+ },
+ "after collect": {
+ prepare: func() *Uwsgi {
+ uw := New()
+ uw.newConn = func(config Config) uwsgiConn { return prepareMockOk() }
+ _ = uw.Collect()
+ return uw
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ uw := test.prepare()
+
+ assert.NotPanics(t, uw.Cleanup)
+ })
+ }
+}
+
+func TestUwsgi_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestUwsgi_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockUwsgiConn
+ wantFail bool
+ }{
+ "success case": {
+ wantFail: false,
+ prepareMock: prepareMockOk,
+ },
+ "success case no workers": {
+ wantFail: false,
+ prepareMock: prepareMockOkNoWorkers,
+ },
+ "err on connect": {
+ wantFail: true,
+ prepareMock: prepareMockErrOnConnect,
+ },
+ "unexpected response": {
+ wantFail: true,
+ prepareMock: prepareMockUnexpectedResponse,
+ },
+ "empty response": {
+ wantFail: true,
+ prepareMock: prepareMockEmptyResponse,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ uw := New()
+ mock := test.prepareMock()
+ uw.newConn = func(config Config) uwsgiConn { return mock }
+
+ if test.wantFail {
+ assert.Error(t, uw.Check())
+ } else {
+ assert.NoError(t, uw.Check())
+ }
+ })
+ }
+}
+
+func TestUwsgi_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockUwsgiConn
+ wantMetrics map[string]int64
+ wantCharts int
+ disconnectBeforeCleanup bool
+ disconnectAfterCleanup bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOk,
+ wantCharts: len(charts) + len(workerChartsTmpl)*2,
+ disconnectBeforeCleanup: true,
+ disconnectAfterCleanup: true,
+ wantMetrics: map[string]int64{
+ "worker_1_average_request_time": 1,
+ "worker_1_delta_requests": 1,
+ "worker_1_exceptions": 1,
+ "worker_1_harakiris": 1,
+ "worker_1_memory_rss": 1,
+ "worker_1_memory_vsz": 1,
+ "worker_1_request_handling_status_accepting": 1,
+ "worker_1_request_handling_status_not_accepting": 0,
+ "worker_1_requests": 1,
+ "worker_1_respawns": 1,
+ "worker_1_status_busy": 0,
+ "worker_1_status_cheap": 0,
+ "worker_1_status_idle": 1,
+ "worker_1_status_pause": 0,
+ "worker_1_status_sig": 0,
+ "worker_1_tx": 1,
+ "worker_2_average_request_time": 1,
+ "worker_2_delta_requests": 1,
+ "worker_2_exceptions": 1,
+ "worker_2_harakiris": 1,
+ "worker_2_memory_rss": 1,
+ "worker_2_memory_vsz": 1,
+ "worker_2_request_handling_status_accepting": 1,
+ "worker_2_request_handling_status_not_accepting": 0,
+ "worker_2_requests": 1,
+ "worker_2_respawns": 1,
+ "worker_2_status_busy": 0,
+ "worker_2_status_cheap": 0,
+ "worker_2_status_idle": 1,
+ "worker_2_status_pause": 0,
+ "worker_2_status_sig": 0,
+ "worker_2_tx": 1,
+ "workers_exceptions": 2,
+ "workers_harakiris": 2,
+ "workers_requests": 2,
+ "workers_respawns": 2,
+ "workers_tx": 2,
+ },
+ },
+ "success case no workers": {
+ prepareMock: prepareMockOkNoWorkers,
+ wantCharts: len(charts),
+ wantMetrics: map[string]int64{
+ "workers_exceptions": 0,
+ "workers_harakiris": 0,
+ "workers_requests": 0,
+ "workers_respawns": 0,
+ "workers_tx": 0,
+ },
+ disconnectBeforeCleanup: true,
+ disconnectAfterCleanup: true,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantCharts: len(charts),
+ disconnectBeforeCleanup: true,
+ disconnectAfterCleanup: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantCharts: len(charts),
+ disconnectBeforeCleanup: true,
+ disconnectAfterCleanup: true,
+ },
+ "err on connect": {
+ prepareMock: prepareMockErrOnConnect,
+ wantCharts: len(charts),
+ disconnectBeforeCleanup: false,
+ disconnectAfterCleanup: false,
+ },
+ "err on query stats": {
+ prepareMock: prepareMockErrOnQueryStats,
+ wantCharts: len(charts),
+ disconnectBeforeCleanup: true,
+ disconnectAfterCleanup: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ uw := New()
+ mock := test.prepareMock()
+ uw.newConn = func(config Config) uwsgiConn { return mock }
+
+ mx := uw.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ module.TestMetricsHasAllChartsDims(t, uw.Charts(), mx)
+ }
+ assert.Equal(t, test.wantCharts, len(*uw.Charts()), "want charts")
+
+ assert.Equal(t, test.disconnectBeforeCleanup, mock.disconnectCalled, "disconnect before cleanup")
+ uw.Cleanup()
+ assert.Equal(t, test.disconnectAfterCleanup, mock.disconnectCalled, "disconnect after cleanup")
+ })
+ }
+}
+
+func prepareMockOk() *mockUwsgiConn {
+ return &mockUwsgiConn{
+ statsResponse: dataStats,
+ }
+}
+
+func prepareMockOkNoWorkers() *mockUwsgiConn {
+ return &mockUwsgiConn{
+ statsResponse: dataStatsNoWorkers,
+ }
+}
+
+func prepareMockErrOnConnect() *mockUwsgiConn {
+ return &mockUwsgiConn{
+ errOnConnect: true,
+ }
+}
+
+func prepareMockErrOnQueryStats() *mockUwsgiConn {
+ return &mockUwsgiConn{
+ errOnQueryStats: true,
+ }
+}
+
+func prepareMockUnexpectedResponse() *mockUwsgiConn {
+ return &mockUwsgiConn{
+ statsResponse: []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit."),
+ }
+}
+
+func prepareMockEmptyResponse() *mockUwsgiConn {
+ return &mockUwsgiConn{}
+}
+
+type mockUwsgiConn struct {
+ errOnConnect bool
+ errOnQueryStats bool
+ statsResponse []byte
+ disconnectCalled bool
+}
+
+func (m *mockUwsgiConn) connect() error {
+ if m.errOnConnect {
+ return errors.New("mock.connect() error")
+ }
+ return nil
+}
+
+func (m *mockUwsgiConn) disconnect() {
+ m.disconnectCalled = true
+}
+
+func (m *mockUwsgiConn) queryStats() ([]byte, error) {
+ if m.errOnQueryStats {
+ return nil, errors.New("mock.queryStats() error")
+ }
+ return m.statsResponse, nil
+}
diff --git a/src/go/plugin/go.d/modules/vcsa/README.md b/src/go/plugin/go.d/modules/vcsa/README.md
new file mode 120000
index 000000000..0d00f4673
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/README.md
@@ -0,0 +1 @@
+integrations/vcenter_server_appliance.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/vcsa/charts.go b/src/go/plugin/go.d/modules/vcsa/charts.go
new file mode 100644
index 000000000..8d4103a10
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/charts.go
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vcsa
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+var (
+ vcsaHealthCharts = module.Charts{
+ systemHealthStatus.Copy(),
+ applMgmtHealthChart.Copy(),
+ loadHealthChart.Copy(),
+ memHealthChart.Copy(),
+ swapHealthChart.Copy(),
+ dbStorageHealthChart.Copy(),
+ storageHealthChart.Copy(),
+ softwarePackagesHealthChart.Copy(),
+ }
+
+ systemHealthStatus = module.Chart{
+ ID: "system_health_status",
+ Title: "VCSA Overall System health status",
+ Units: "status",
+ Fam: "system",
+ Ctx: "vcsa.system_health_status",
+ Dims: module.Dims{
+ {ID: "system_status_green", Name: "green"},
+ {ID: "system_status_red", Name: "red"},
+ {ID: "system_status_yellow", Name: "yellow"},
+ {ID: "system_status_orange", Name: "orange"},
+ {ID: "system_status_gray", Name: "gray"},
+ {ID: "system_status_unknown", Name: "unknown"},
+ },
+ }
+ applMgmtHealthChart = module.Chart{
+ ID: "applmgmt_health_status",
+ Title: "VCSA Appliance Management Service (applmgmt) health status",
+ Units: "status",
+ Fam: "appliance mgmt service",
+ Ctx: "vcsa.applmgmt_health_status",
+ Dims: module.Dims{
+ {ID: "applmgmt_status_green", Name: "green"},
+ {ID: "applmgmt_status_red", Name: "red"},
+ {ID: "applmgmt_status_yellow", Name: "yellow"},
+ {ID: "applmgmt_status_orange", Name: "orange"},
+ {ID: "applmgmt_status_gray", Name: "gray"},
+ {ID: "applmgmt_status_unknown", Name: "unknown"},
+ },
+ }
+ loadHealthChart = module.Chart{
+ ID: "load_health_status",
+ Title: "VCSA Load health status",
+ Units: "status",
+ Fam: "load",
+ Ctx: "vcsa.load_health_status",
+ Dims: module.Dims{
+ {ID: "load_status_green", Name: "green"},
+ {ID: "load_status_red", Name: "red"},
+ {ID: "load_status_yellow", Name: "yellow"},
+ {ID: "load_status_orange", Name: "orange"},
+ {ID: "load_status_gray", Name: "gray"},
+ {ID: "load_status_unknown", Name: "unknown"},
+ },
+ }
+ memHealthChart = module.Chart{
+ ID: "mem_health_status",
+ Title: "VCSA Memory health status",
+ Units: "status",
+ Fam: "mem",
+ Ctx: "vcsa.mem_health_status",
+ Dims: module.Dims{
+ {ID: "mem_status_green", Name: "green"},
+ {ID: "mem_status_red", Name: "red"},
+ {ID: "mem_status_yellow", Name: "yellow"},
+ {ID: "mem_status_orange", Name: "orange"},
+ {ID: "mem_status_gray", Name: "gray"},
+ {ID: "mem_status_unknown", Name: "unknown"},
+ },
+ }
+ swapHealthChart = module.Chart{
+ ID: "swap_health_status",
+ Title: "VCSA Swap health status",
+ Units: "status",
+ Fam: "swap",
+ Ctx: "vcsa.swap_health_status",
+ Dims: module.Dims{
+ {ID: "swap_status_green", Name: "green"},
+ {ID: "swap_status_red", Name: "red"},
+ {ID: "swap_status_yellow", Name: "yellow"},
+ {ID: "swap_status_orange", Name: "orange"},
+ {ID: "swap_status_gray", Name: "gray"},
+ {ID: "swap_status_unknown", Name: "unknown"},
+ },
+ }
+ dbStorageHealthChart = module.Chart{
+ ID: "database_storage_health_status",
+ Title: "VCSA Database Storage health status",
+ Units: "status",
+ Fam: "db storage",
+ Ctx: "vcsa.database_storage_health_status",
+ Dims: module.Dims{
+ {ID: "database_storage_status_green", Name: "green"},
+ {ID: "database_storage_status_red", Name: "red"},
+ {ID: "database_storage_status_yellow", Name: "yellow"},
+ {ID: "database_storage_status_orange", Name: "orange"},
+ {ID: "database_storage_status_gray", Name: "gray"},
+ {ID: "database_storage_status_unknown", Name: "unknown"},
+ },
+ }
+ storageHealthChart = module.Chart{
+ ID: "storage_health_status",
+ Title: "VCSA Storage health status",
+ Units: "status",
+ Fam: "storage",
+ Ctx: "vcsa.storage_health_status",
+ Dims: module.Dims{
+ {ID: "storage_status_green", Name: "green"},
+ {ID: "storage_status_red", Name: "red"},
+ {ID: "storage_status_yellow", Name: "yellow"},
+ {ID: "storage_status_orange", Name: "orange"},
+ {ID: "storage_status_gray", Name: "gray"},
+ {ID: "storage_status_unknown", Name: "unknown"},
+ },
+ }
+ softwarePackagesHealthChart = module.Chart{
+ ID: "software_packages_health_status",
+ Title: "VCSA Software Updates health status",
+ Units: "status",
+ Fam: "software packages",
+ Ctx: "vcsa.software_packages_health_status",
+ Dims: module.Dims{
+ {ID: "software_packages_status_green", Name: "green"},
+ {ID: "software_packages_status_red", Name: "red"},
+ {ID: "software_packages_status_orange", Name: "orange"},
+ {ID: "software_packages_status_gray", Name: "gray"},
+ {ID: "software_packages_status_unknown", Name: "unknown"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/vcsa/client/client.go b/src/go/plugin/go.d/modules/vcsa/client/client.go
new file mode 100644
index 000000000..ea0dd1618
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/client/client.go
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "net/http"
+ "sync"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+// Session: https://vmware.github.io/vsphere-automation-sdk-rest/vsphere/index.html#SVC_com.vmware.cis.session
+// Health: https://vmware.github.io/vsphere-automation-sdk-rest/vsphere/index.html#SVC_com.vmware.appliance.health
+
+const (
+ pathCISSession = "/rest/com/vmware/cis/session"
+ pathHealthSystem = "/rest/appliance/health/system"
+ pathHealthSwap = "/rest/appliance/health/swap"
+ pathHealthStorage = "/rest/appliance/health/storage"
+ pathHealthSoftwarePackager = "/rest/appliance/health/software-packages"
+ pathHealthMem = "/rest/appliance/health/mem"
+ pathHealthLoad = "/rest/appliance/health/load"
+ pathHealthDatabaseStorage = "/rest/appliance/health/database-storage"
+ pathHealthApplMgmt = "/rest/appliance/health/applmgmt"
+
+ apiSessIDKey = "vmware-api-session-id"
+)
+
+type sessionToken struct {
+ m *sync.RWMutex
+ id string
+}
+
+func (s *sessionToken) set(id string) {
+ s.m.Lock()
+ defer s.m.Unlock()
+ s.id = id
+}
+
+func (s *sessionToken) get() string {
+ s.m.RLock()
+ defer s.m.RUnlock()
+ return s.id
+}
+
+func New(httpClient *http.Client, url, username, password string) *Client {
+ if httpClient == nil {
+ httpClient = &http.Client{}
+ }
+ return &Client{
+ httpClient: httpClient,
+ url: url,
+ username: username,
+ password: password,
+ token: &sessionToken{m: new(sync.RWMutex)},
+ }
+}
+
+type Client struct {
+ httpClient *http.Client
+
+ url string
+ username string
+ password string
+
+ token *sessionToken
+}
+
+// Login creates a session with the API. This operation exchanges user credentials supplied in the security context
+// for a session identifier that is to be used for authenticating subsequent calls.
+func (c *Client) Login() error {
+ req := web.Request{
+ URL: fmt.Sprintf("%s%s", c.url, pathCISSession),
+ Username: c.username,
+ Password: c.password,
+ Method: http.MethodPost,
+ }
+ s := struct{ Value string }{}
+
+ err := c.doOKWithDecode(req, &s)
+ if err == nil {
+ c.token.set(s.Value)
+ }
+ return err
+}
+
+// Logout terminates the validity of a session token.
+func (c *Client) Logout() error {
+ req := web.Request{
+ URL: fmt.Sprintf("%s%s", c.url, pathCISSession),
+ Method: http.MethodDelete,
+ Headers: map[string]string{apiSessIDKey: c.token.get()},
+ }
+
+ resp, err := c.doOK(req)
+ closeBody(resp)
+ c.token.set("")
+ return err
+}
+
+// Ping sent a request to VCSA server to ensure the link is operating.
+// In case of 401 error Ping tries to re authenticate.
+func (c *Client) Ping() error {
+ req := web.Request{
+ URL: fmt.Sprintf("%s%s?~action=get", c.url, pathCISSession),
+ Method: http.MethodPost,
+ Headers: map[string]string{apiSessIDKey: c.token.get()},
+ }
+ resp, err := c.doOK(req)
+ defer closeBody(resp)
+ if resp != nil && resp.StatusCode == http.StatusUnauthorized {
+ return c.Login()
+ }
+ return err
+}
+
+func (c *Client) health(urlPath string) (string, error) {
+ req := web.Request{
+ URL: fmt.Sprintf("%s%s", c.url, urlPath),
+ Headers: map[string]string{apiSessIDKey: c.token.get()},
+ }
+ s := struct{ Value string }{}
+ err := c.doOKWithDecode(req, &s)
+ return s.Value, err
+}
+
+// ApplMgmt provides health status of applmgmt services.
+func (c *Client) ApplMgmt() (string, error) {
+ return c.health(pathHealthApplMgmt)
+}
+
+// DatabaseStorage provides health status of database storage health.
+func (c *Client) DatabaseStorage() (string, error) {
+ return c.health(pathHealthDatabaseStorage)
+}
+
+// Load provides health status of load health.
+func (c *Client) Load() (string, error) {
+ return c.health(pathHealthLoad)
+}
+
+// Mem provides health status of memory health.
+func (c *Client) Mem() (string, error) {
+ return c.health(pathHealthMem)
+}
+
+// SoftwarePackages provides information on available software updates available in remote VUM repository.
+// Red indicates that security updates are available.
+// Orange indicates that non-security updates are available.
+// Green indicates that there are no updates available.
+// Gray indicates that there was an error retrieving information on software updates.
+func (c *Client) SoftwarePackages() (string, error) {
+ return c.health(pathHealthSoftwarePackager)
+}
+
+// Storage provides health status of storage health.
+func (c *Client) Storage() (string, error) {
+ return c.health(pathHealthStorage)
+}
+
+// Swap provides health status of swap health.
+func (c *Client) Swap() (string, error) {
+ return c.health(pathHealthSwap)
+}
+
+// System provides overall health of system.
+func (c *Client) System() (string, error) {
+ return c.health(pathHealthSystem)
+}
+
+func (c *Client) do(req web.Request) (*http.Response, error) {
+ httpReq, err := web.NewHTTPRequest(req)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating http request to %s : %v", req.URL, err)
+ }
+ return c.httpClient.Do(httpReq)
+}
+
+func (c *Client) doOK(req web.Request) (*http.Response, error) {
+ resp, err := c.do(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.StatusCode != http.StatusOK {
+ return resp, fmt.Errorf("%s returned %d", req.URL, resp.StatusCode)
+ }
+ return resp, nil
+}
+
+func (c *Client) doOKWithDecode(req web.Request, dst interface{}) error {
+ resp, err := c.doOK(req)
+ defer closeBody(resp)
+ if err != nil {
+ return err
+ }
+
+ err = json.NewDecoder(resp.Body).Decode(dst)
+ if err != nil {
+ return fmt.Errorf("error on decoding response from %s : %v", req.URL, err)
+ }
+ return nil
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/vcsa/client/client_test.go b/src/go/plugin/go.d/modules/vcsa/client/client_test.go
new file mode 100644
index 000000000..379644b89
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/client/client_test.go
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ testUser = "user"
+ testPass = "pass"
+ testSessToken = "sessToken"
+ testHealthValue = "green"
+)
+
+func newTestClient(srvURL string) *Client {
+ return New(nil, srvURL, testUser, testPass)
+}
+
+func TestClient_Login(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ assert.NoError(t, cl.Login())
+ assert.Equal(t, testSessToken, cl.token.get())
+}
+
+func TestClient_LoginWrongCredentials(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+ cl.username += "!"
+
+ assert.Error(t, cl.Login())
+}
+
+func TestClient_Logout(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ assert.NoError(t, cl.Login())
+ assert.NoError(t, cl.Logout())
+ assert.Zero(t, cl.token.get())
+}
+
+func TestClient_Ping(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ assert.NoError(t, cl.Ping())
+}
+
+func TestClient_PingWithReAuthentication(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ cl.token.set("")
+ assert.NoError(t, cl.Ping())
+ assert.Equal(t, testSessToken, cl.token.get())
+}
+
+func TestClient_ApplMgmt(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ v, err := cl.ApplMgmt()
+ assert.NoError(t, err)
+ assert.Equal(t, testHealthValue, v)
+}
+
+func TestClient_DatabaseStorage(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ v, err := cl.DatabaseStorage()
+ assert.NoError(t, err)
+ assert.Equal(t, testHealthValue, v)
+}
+
+func TestClient_Load(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ v, err := cl.Load()
+ assert.NoError(t, err)
+ assert.Equal(t, testHealthValue, v)
+}
+
+func TestClient_Mem(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ v, err := cl.Mem()
+ assert.NoError(t, err)
+ assert.Equal(t, testHealthValue, v)
+}
+
+func TestClient_SoftwarePackages(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ v, err := cl.SoftwarePackages()
+ assert.NoError(t, err)
+ assert.Equal(t, testHealthValue, v)
+}
+
+func TestClient_Storage(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ v, err := cl.Storage()
+ assert.NoError(t, err)
+ assert.Equal(t, testHealthValue, v)
+}
+
+func TestClient_Swap(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ v, err := cl.Swap()
+ assert.NoError(t, err)
+ assert.Equal(t, testHealthValue, v)
+}
+
+func TestClient_System(t *testing.T) {
+ ts := newTestHTTPServer()
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ require.NoError(t, cl.Login())
+ v, err := cl.System()
+ assert.NoError(t, err)
+ assert.Equal(t, testHealthValue, v)
+}
+
+func TestClient_InvalidDataOnLogin(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello\n and goodbye!"))
+ }))
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ assert.Error(t, cl.Login())
+}
+
+func TestClient_404OnLogin(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(404)
+ }))
+ defer ts.Close()
+ cl := newTestClient(ts.URL)
+
+ assert.Error(t, cl.Login())
+}
+
+func newTestHTTPServer() *httptest.Server {
+ return httptest.NewServer(&mockVCSAServer{
+ username: testUser,
+ password: testPass,
+ sessionID: testSessToken,
+ })
+}
+
+type mockVCSAServer struct {
+ username string
+ password string
+ sessionID string
+}
+
+func (m mockVCSAServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Path {
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ case pathCISSession:
+ m.handleSession(w, r)
+ case
+ pathHealthApplMgmt,
+ pathHealthDatabaseStorage,
+ pathHealthLoad,
+ pathHealthMem,
+ pathHealthSoftwarePackager,
+ pathHealthStorage,
+ pathHealthSwap,
+ pathHealthSystem:
+ m.handleHealth(w, r)
+ }
+}
+
+func (m mockVCSAServer) handleHealth(w http.ResponseWriter, r *http.Request) {
+ if r.Method != http.MethodGet {
+ w.WriteHeader(http.StatusBadRequest)
+ return
+ }
+
+ if !m.isSessionAuthenticated(r) {
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(http.StatusOK)
+
+ s := struct{ Value string }{Value: testHealthValue}
+ b, _ := json.Marshal(s)
+ _, _ = w.Write(b)
+}
+
+func (m mockVCSAServer) handleSession(w http.ResponseWriter, r *http.Request) {
+ switch r.Method {
+ default:
+ w.WriteHeader(http.StatusBadRequest)
+ case http.MethodDelete:
+ m.handleSessionDelete(w, r)
+ case http.MethodPost:
+ if r.URL.RawQuery == "" {
+ m.handleSessionCreate(w, r)
+ } else {
+ m.handleSessionGet(w, r)
+ }
+ }
+}
+
+func (m mockVCSAServer) handleSessionCreate(w http.ResponseWriter, r *http.Request) {
+ if !m.isReqAuthenticated(r) {
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+ s := struct{ Value string }{Value: m.sessionID}
+ b, _ := json.Marshal(s)
+ _, _ = w.Write(b)
+}
+
+func (m mockVCSAServer) handleSessionGet(w http.ResponseWriter, r *http.Request) {
+ if !m.isSessionAuthenticated(r) {
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+ s := struct{ Value struct{ User string } }{Value: struct{ User string }{User: m.username}}
+ b, _ := json.Marshal(s)
+ _, _ = w.Write(b)
+}
+
+func (m mockVCSAServer) handleSessionDelete(w http.ResponseWriter, r *http.Request) {
+ if !m.isSessionAuthenticated(r) {
+ w.WriteHeader(http.StatusUnauthorized)
+ return
+ }
+ w.WriteHeader(http.StatusOK)
+}
+
+func (m mockVCSAServer) isReqAuthenticated(r *http.Request) bool {
+ u, p, ok := r.BasicAuth()
+ return ok && m.username == u && p == m.password
+}
+
+func (m mockVCSAServer) isSessionAuthenticated(r *http.Request) bool {
+ return r.Header.Get(apiSessIDKey) == m.sessionID
+}
diff --git a/src/go/plugin/go.d/modules/vcsa/collect.go b/src/go/plugin/go.d/modules/vcsa/collect.go
new file mode 100644
index 000000000..8a734d9e8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/collect.go
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vcsa
+
+import (
+ "sync"
+)
+
+var componentHealthStatuses = []string{"green", "red", "yellow", "orange", "gray"}
+var softwareHealthStatuses = []string{"green", "red", "orange", "gray"}
+
+type vcsaHealthStatus struct {
+ System *string
+ ApplMgmt *string
+ Load *string
+ Mem *string
+ Swap *string
+ DatabaseStorage *string
+ Storage *string
+ SoftwarePackages *string
+}
+
+func (vc *VCSA) collect() (map[string]int64, error) {
+ err := vc.client.Ping()
+ if err != nil {
+ return nil, err
+ }
+
+ var status vcsaHealthStatus
+ vc.scrapeHealth(&status)
+
+ mx := make(map[string]int64)
+
+ writeStatus(mx, "system", componentHealthStatuses, status.System)
+ writeStatus(mx, "applmgmt", componentHealthStatuses, status.ApplMgmt)
+ writeStatus(mx, "load", componentHealthStatuses, status.Load)
+ writeStatus(mx, "mem", componentHealthStatuses, status.Mem)
+ writeStatus(mx, "swap", componentHealthStatuses, status.Swap)
+ writeStatus(mx, "database_storage", componentHealthStatuses, status.DatabaseStorage)
+ writeStatus(mx, "storage", componentHealthStatuses, status.Storage)
+ writeStatus(mx, "software_packages", softwareHealthStatuses, status.SoftwarePackages)
+
+ return mx, nil
+}
+
+func (vc *VCSA) scrapeHealth(status *vcsaHealthStatus) {
+ wg := &sync.WaitGroup{}
+
+ scrape := func(fn func() (string, error), value **string) {
+ v, err := fn()
+ if err != nil {
+ vc.Error(err)
+ return
+ }
+ *value = &v
+ }
+
+ for _, fn := range []func(){
+ func() { scrape(vc.client.System, &status.System) },
+ func() { scrape(vc.client.ApplMgmt, &status.ApplMgmt) },
+ func() { scrape(vc.client.Load, &status.Load) },
+ func() { scrape(vc.client.DatabaseStorage, &status.DatabaseStorage) },
+ func() { scrape(vc.client.Storage, &status.Storage) },
+ func() { scrape(vc.client.Mem, &status.Mem) },
+ func() { scrape(vc.client.Swap, &status.Swap) },
+ func() { scrape(vc.client.SoftwarePackages, &status.SoftwarePackages) },
+ } {
+ fn := fn
+
+ wg.Add(1)
+ go func() { defer wg.Done(); fn() }()
+ }
+
+ wg.Wait()
+}
+
+func writeStatus(mx map[string]int64, key string, statuses []string, status *string) {
+ if status == nil {
+ return
+ }
+
+ var found bool
+ for _, s := range statuses {
+ mx[key+"_status_"+s] = boolToInt(s == *status)
+ found = found || s == *status
+ }
+ mx[key+"_status_unknown"] = boolToInt(!found)
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
diff --git a/src/go/plugin/go.d/modules/vcsa/config_schema.json b/src/go/plugin/go.d/modules/vcsa/config_schema.json
new file mode 100644
index 000000000..3302794c6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/config_schema.json
@@ -0,0 +1,186 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "vCenter Server Appliance collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the VCSA server.",
+ "type": "string",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "url": {
+ "ui:placeholder": "https://203.0.113.0"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:placeholder": "admin@vsphere.local",
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/vcsa/init.go b/src/go/plugin/go.d/modules/vcsa/init.go
new file mode 100644
index 000000000..20631ab48
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/init.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vcsa
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vcsa/client"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (vc *VCSA) validateConfig() error {
+ if vc.URL == "" {
+ return errors.New("URL not set")
+ }
+ if vc.Username == "" || vc.Password == "" {
+ return errors.New("username or password not set")
+ }
+ return nil
+}
+
+func (vc *VCSA) initHealthClient() (*client.Client, error) {
+ httpClient, err := web.NewHTTPClient(vc.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return client.New(httpClient, vc.URL, vc.Username, vc.Password), nil
+}
diff --git a/src/go/plugin/go.d/modules/vcsa/integrations/vcenter_server_appliance.md b/src/go/plugin/go.d/modules/vcsa/integrations/vcenter_server_appliance.md
new file mode 100644
index 000000000..99517af3e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/integrations/vcenter_server_appliance.md
@@ -0,0 +1,292 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/vcsa/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/vcsa/metadata.yaml"
+sidebar_label: "vCenter Server Appliance"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Containers and VMs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# vCenter Server Appliance
+
+
+<img src="https://netdata.cloud/img/vmware.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: vcsa
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per vCenter Server Appliance instance
+
+These metrics refer to the entire monitored application.
+<details>
+<summary>See health statuses</summary>
+Overall System Health:
+
+| Status | Description |
+|:-------:|:-------------------------------------------------------------------------------------------------------------------------|
+| green | All components in the appliance are healthy. |
+| yellow | One or more components in the appliance might become overloaded soon. |
+| orange | One or more components in the appliance might be degraded. |
+| red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |
+| gray | No health data is available. |
+| unknown | Collector failed to decode status. |
+
+Components Health:
+
+| Status | Description |
+|:-------:|:-------------------------------------------------------------|
+| green | The component is healthy. |
+| yellow | The component is healthy, but may have some problems. |
+| orange | The component is degraded, and may have serious problems. |
+| red | The component is unavailable, or will stop functioning soon. |
+| gray | No health data is available. |
+| unknown | Collector failed to decode status. |
+
+Software Updates Health:
+
+| Status | Description |
+|:-------:|:-----------------------------------------------------|
+| green | No updates available. |
+| orange | Non-security patches might be available. |
+| red | Security patches might be available. |
+| gray | An error retrieving information on software updates. |
+| unknown | Collector failed to decode status. |
+
+</details>
+
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| vcsa.system_health_status | green, red, yellow, orange, gray, unknown | status |
+| vcsa.applmgmt_health_status | green, red, yellow, orange, gray, unknown | status |
+| vcsa.load_health_status | green, red, yellow, orange, gray, unknown | status |
+| vcsa.mem_health_status | green, red, yellow, orange, gray, unknown | status |
+| vcsa.swap_health_status | green, red, yellow, orange, gray, unknown | status |
+| vcsa.database_storage_health_status | green, red, yellow, orange, gray, unknown | status |
+| vcsa.storage_health_status | green, red, yellow, orange, gray, unknown | status |
+| vcsa.software_packages_health_status | green, red, orange, gray, unknown | status |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ vcsa_system_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is orange. One or more components are degraded. |
+| [ vcsa_system_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.system_health_status | VCSA overall system status is red. One or more components are unavailable or will stop functioning soon. |
+| [ vcsa_applmgmt_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems. |
+| [ vcsa_applmgmt_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.applmgmt_health_status | VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon. |
+| [ vcsa_load_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is orange. It is degraded, and may have serious problems. |
+| [ vcsa_load_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.load_health_status | VCSA Load component status is red. It is unavailable, or will stop functioning soon. |
+| [ vcsa_mem_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is orange. It is degraded, and may have serious problems. |
+| [ vcsa_mem_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.mem_health_status | VCSA Memory component status is red. It is unavailable, or will stop functioning soon. |
+| [ vcsa_swap_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is orange. It is degraded, and may have serious problems. |
+| [ vcsa_swap_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.swap_health_status | VCSA Swap component status is red. It is unavailable, or will stop functioning soon. |
+| [ vcsa_database_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is orange. It is degraded, and may have serious problems. |
+| [ vcsa_database_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.database_storage_health_status | VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon. |
+| [ vcsa_storage_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is orange. It is degraded, and may have serious problems. |
+| [ vcsa_storage_health_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.storage_health_status | VCSA Storage component status is red. It is unavailable, or will stop functioning soon. |
+| [ vcsa_software_packages_health_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf) | vcsa.software_packages_health_status | VCSA software packages security updates are available. |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/vcsa.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/vcsa.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | yes |
+| password | Password for basic HTTP authentication. | | yes |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | false | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | false | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: vcsa1
+ url: https://203.0.113.1
+ username: admin@vsphere.local
+ password: password
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Two instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: vcsa1
+ url: https://203.0.113.1
+ username: admin@vsphere.local
+ password: password
+
+ - name: vcsa2
+ url: https://203.0.113.10
+ username: admin@vsphere.local
+ password: password
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `vcsa` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m vcsa
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `vcsa` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep vcsa
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep vcsa /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep vcsa
+```
+
+
diff --git a/src/go/plugin/go.d/modules/vcsa/metadata.yaml b/src/go/plugin/go.d/modules/vcsa/metadata.yaml
new file mode 100644
index 000000000..d619f3d96
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/metadata.yaml
@@ -0,0 +1,346 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-vcsa
+ plugin_name: go.d.plugin
+ module_name: vcsa
+ monitored_instance:
+ name: vCenter Server Appliance
+ link: https://docs.vmware.com/en/VMware-vSphere/6.5/com.vmware.vsphere.vcsa.doc/GUID-223C2821-BD98-4C7A-936B-7DBE96291BA4.html
+ icon_filename: vmware.svg
+ categories:
+ - data-collection.containers-and-vms
+ keywords:
+ - vmware
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors [health statistics](https://developer.vmware.com/apis/vsphere-automation/latest/appliance/health/) of vCenter Server Appliance servers.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "go.d/vcsa.conf"
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: "5"
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: "0"
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: ""
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: "1"
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: true
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: true
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: "false"
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: "false"
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: vcsa1
+ url: https://203.0.113.1
+ username: admin@vsphere.local
+ password: password
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Two instances.
+ config: |
+ jobs:
+ - name: vcsa1
+ url: https://203.0.113.1
+ username: admin@vsphere.local
+ password: password
+
+ - name: vcsa2
+ url: https://203.0.113.10
+ username: admin@vsphere.local
+ password: password
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: vcsa_system_health_warn
+ metric: vcsa.system_health_status
+ info: VCSA overall system status is orange. One or more components are degraded.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_system_health_crit
+ metric: vcsa.system_health_status
+ info: VCSA overall system status is red. One or more components are unavailable or will stop functioning soon.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_applmgmt_health_warn
+ metric: vcsa.applmgmt_health_status
+ info: VCSA ApplMgmt component status is orange. It is degraded, and may have serious problems.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_applmgmt_health_crit
+ metric: vcsa.applmgmt_health_status
+ info: VCSA ApplMgmt component status is red. It is unavailable, or will stop functioning soon.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_load_health_warn
+ metric: vcsa.load_health_status
+ info: VCSA Load component status is orange. It is degraded, and may have serious problems.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_load_health_crit
+ metric: vcsa.load_health_status
+ info: VCSA Load component status is red. It is unavailable, or will stop functioning soon.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_mem_health_warn
+ metric: vcsa.mem_health_status
+ info: VCSA Memory component status is orange. It is degraded, and may have serious problems.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_mem_health_crit
+ metric: vcsa.mem_health_status
+ info: VCSA Memory component status is red. It is unavailable, or will stop functioning soon.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_swap_health_warn
+ metric: vcsa.swap_health_status
+ info: VCSA Swap component status is orange. It is degraded, and may have serious problems.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_swap_health_crit
+ metric: vcsa.swap_health_status
+ info: VCSA Swap component status is red. It is unavailable, or will stop functioning soon.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_database_storage_health_warn
+ metric: vcsa.database_storage_health_status
+ info: VCSA Database Storage component status is orange. It is degraded, and may have serious problems.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_database_storage_health_crit
+ metric: vcsa.database_storage_health_status
+ info: VCSA Database Storage component status is red. It is unavailable, or will stop functioning soon.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_storage_health_warn
+ metric: vcsa.storage_health_status
+ info: VCSA Storage component status is orange. It is degraded, and may have serious problems.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_storage_health_crit
+ metric: vcsa.storage_health_status
+ info: VCSA Storage component status is red. It is unavailable, or will stop functioning soon.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ - name: vcsa_software_packages_health_warn
+ metric: vcsa.software_packages_health_status
+ info: VCSA software packages security updates are available.
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vcsa.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: |
+ These metrics refer to the entire monitored application.
+ <details>
+ <summary>See health statuses</summary>
+ Overall System Health:
+
+ | Status | Description |
+ |:-------:|:-------------------------------------------------------------------------------------------------------------------------|
+ | green | All components in the appliance are healthy. |
+ | yellow | One or more components in the appliance might become overloaded soon. |
+ | orange | One or more components in the appliance might be degraded. |
+ | red | One or more components in the appliance might be in an unusable status and the appliance might become unresponsive soon. |
+ | gray | No health data is available. |
+ | unknown | Collector failed to decode status. |
+
+ Components Health:
+
+ | Status | Description |
+ |:-------:|:-------------------------------------------------------------|
+ | green | The component is healthy. |
+ | yellow | The component is healthy, but may have some problems. |
+ | orange | The component is degraded, and may have serious problems. |
+ | red | The component is unavailable, or will stop functioning soon. |
+ | gray | No health data is available. |
+ | unknown | Collector failed to decode status. |
+
+ Software Updates Health:
+
+ | Status | Description |
+ |:-------:|:-----------------------------------------------------|
+ | green | No updates available. |
+ | orange | Non-security patches might be available. |
+ | red | Security patches might be available. |
+ | gray | An error retrieving information on software updates. |
+ | unknown | Collector failed to decode status. |
+
+ </details>
+ labels: []
+ metrics:
+ - name: vcsa.system_health_status
+ description: VCSA Overall System health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: yellow
+ - name: orange
+ - name: gray
+ - name: unknown
+ - name: vcsa.applmgmt_health_status
+ description: VCSA ApplMgmt health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: yellow
+ - name: orange
+ - name: gray
+ - name: unknown
+ - name: vcsa.load_health_status
+ description: VCSA Load health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: yellow
+ - name: orange
+ - name: gray
+ - name: unknown
+ - name: vcsa.mem_health_status
+ description: VCSA Memory health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: yellow
+ - name: orange
+ - name: gray
+ - name: unknown
+ - name: vcsa.swap_health_status
+ description: VCSA Swap health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: yellow
+ - name: orange
+ - name: gray
+ - name: unknown
+ - name: vcsa.database_storage_health_status
+ description: VCSA Database Storage health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: yellow
+ - name: orange
+ - name: gray
+ - name: unknown
+ - name: vcsa.storage_health_status
+ description: VCSA Storage health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: yellow
+ - name: orange
+ - name: gray
+ - name: unknown
+ - name: vcsa.software_packages_health_status
+ description: VCSA Software Updates health status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: orange
+ - name: gray
+ - name: unknown
diff --git a/src/go/plugin/go.d/modules/vcsa/testdata/config.json b/src/go/plugin/go.d/modules/vcsa/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/vcsa/testdata/config.yaml b/src/go/plugin/go.d/modules/vcsa/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/vcsa/vcsa.go b/src/go/plugin/go.d/modules/vcsa/vcsa.go
new file mode 100644
index 000000000..aa12d7c60
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/vcsa.go
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vcsa
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("vcsa", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5, // VCSA health checks freq is 5 second.
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *VCSA {
+ return &VCSA{
+ Config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 5),
+ },
+ },
+ },
+ charts: vcsaHealthCharts.Copy(),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type (
+ VCSA struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ client healthClient
+ }
+
+ healthClient interface {
+ Login() error
+ Logout() error
+ Ping() error
+ ApplMgmt() (string, error)
+ DatabaseStorage() (string, error)
+ Load() (string, error)
+ Mem() (string, error)
+ SoftwarePackages() (string, error)
+ Storage() (string, error)
+ Swap() (string, error)
+ System() (string, error)
+ }
+)
+
+func (vc *VCSA) Configuration() any {
+ return vc.Config
+}
+
+func (vc *VCSA) Init() error {
+ if err := vc.validateConfig(); err != nil {
+ vc.Error(err)
+ return err
+ }
+
+ c, err := vc.initHealthClient()
+ if err != nil {
+ vc.Errorf("error on creating health client : %vc", err)
+ return err
+ }
+ vc.client = c
+
+ vc.Debugf("using URL %s", vc.URL)
+ vc.Debugf("using timeout: %s", vc.Timeout)
+
+ return nil
+}
+
+func (vc *VCSA) Check() error {
+ err := vc.client.Login()
+ if err != nil {
+ vc.Error(err)
+ return err
+ }
+
+ mx, err := vc.collect()
+ if err != nil {
+ vc.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (vc *VCSA) Charts() *module.Charts {
+ return vc.charts
+}
+
+func (vc *VCSA) Collect() map[string]int64 {
+ mx, err := vc.collect()
+ if err != nil {
+ vc.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (vc *VCSA) Cleanup() {
+ if vc.client == nil {
+ return
+ }
+ err := vc.client.Logout()
+ if err != nil {
+ vc.Errorf("error on logout : %v", err)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/vcsa/vcsa_test.go b/src/go/plugin/go.d/modules/vcsa/vcsa_test.go
new file mode 100644
index 000000000..2c51723d4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vcsa/vcsa_test.go
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vcsa
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestVCSA_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &VCSA{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestVCSA_Init(t *testing.T) {
+ job := prepareVCSA()
+
+ assert.NoError(t, job.Init())
+ assert.NotNil(t, job.client)
+}
+
+func TestVCenter_InitErrorOnValidatingInitParameters(t *testing.T) {
+ job := New()
+
+ assert.Error(t, job.Init())
+}
+
+func TestVCenter_InitErrorOnCreatingClient(t *testing.T) {
+ job := prepareVCSA()
+ job.Client.TLSConfig.TLSCA = "testdata/tls"
+
+ assert.Error(t, job.Init())
+}
+
+func TestVCenter_Check(t *testing.T) {
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
+ job.client = &mockVCenterHealthClient{}
+
+ assert.NoError(t, job.Check())
+}
+
+func TestVCenter_CheckErrorOnLogin(t *testing.T) {
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
+ job.client = &mockVCenterHealthClient{
+ login: func() error { return errors.New("login mock error") },
+ }
+
+ assert.Error(t, job.Check())
+}
+
+func TestVCenter_CheckEnsureLoggedIn(t *testing.T) {
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
+ mock := &mockVCenterHealthClient{}
+ job.client = mock
+
+ assert.NoError(t, job.Check())
+ assert.True(t, mock.loginCalls == 1)
+}
+
+func TestVCenter_Cleanup(t *testing.T) {
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
+ mock := &mockVCenterHealthClient{}
+ job.client = mock
+ job.Cleanup()
+
+ assert.True(t, mock.logoutCalls == 1)
+}
+
+func TestVCenter_CleanupWithNilClient(t *testing.T) {
+ job := prepareVCSA()
+
+ assert.NotPanics(t, job.Cleanup)
+}
+
+func TestVCenter_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestVCenter_Collect(t *testing.T) {
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
+ mock := &mockVCenterHealthClient{}
+ job.client = mock
+
+ expected := map[string]int64{
+ "applmgmt_status_gray": 0,
+ "applmgmt_status_green": 1,
+ "applmgmt_status_orange": 0,
+ "applmgmt_status_red": 0,
+ "applmgmt_status_unknown": 0,
+ "applmgmt_status_yellow": 0,
+ "database_storage_status_gray": 0,
+ "database_storage_status_green": 1,
+ "database_storage_status_orange": 0,
+ "database_storage_status_red": 0,
+ "database_storage_status_unknown": 0,
+ "database_storage_status_yellow": 0,
+ "load_status_gray": 0,
+ "load_status_green": 1,
+ "load_status_orange": 0,
+ "load_status_red": 0,
+ "load_status_unknown": 0,
+ "load_status_yellow": 0,
+ "mem_status_gray": 0,
+ "mem_status_green": 1,
+ "mem_status_orange": 0,
+ "mem_status_red": 0,
+ "mem_status_unknown": 0,
+ "mem_status_yellow": 0,
+ "software_packages_status_gray": 0,
+ "software_packages_status_green": 1,
+ "software_packages_status_orange": 0,
+ "software_packages_status_red": 0,
+ "software_packages_status_unknown": 0,
+ "storage_status_gray": 0,
+ "storage_status_green": 1,
+ "storage_status_orange": 0,
+ "storage_status_red": 0,
+ "storage_status_unknown": 0,
+ "storage_status_yellow": 0,
+ "swap_status_gray": 0,
+ "swap_status_green": 1,
+ "swap_status_orange": 0,
+ "swap_status_red": 0,
+ "swap_status_unknown": 0,
+ "swap_status_yellow": 0,
+ "system_status_gray": 0,
+ "system_status_green": 1,
+ "system_status_orange": 0,
+ "system_status_red": 0,
+ "system_status_unknown": 0,
+ "system_status_yellow": 0,
+ }
+
+ assert.Equal(t, expected, job.Collect())
+}
+
+func TestVCenter_CollectEnsurePingIsCalled(t *testing.T) {
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
+ mock := &mockVCenterHealthClient{}
+ job.client = mock
+ job.Collect()
+
+ assert.True(t, mock.pingCalls == 1)
+}
+
+func TestVCenter_CollectErrorOnPing(t *testing.T) {
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
+ mock := &mockVCenterHealthClient{
+ ping: func() error { return errors.New("ping mock error") },
+ }
+ job.client = mock
+
+ assert.Zero(t, job.Collect())
+}
+
+func TestVCenter_CollectErrorOnHealthCalls(t *testing.T) {
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
+ mock := &mockVCenterHealthClient{
+ applMgmt: func() (string, error) { return "", errors.New("applMgmt mock error") },
+ databaseStorage: func() (string, error) { return "", errors.New("databaseStorage mock error") },
+ load: func() (string, error) { return "", errors.New("load mock error") },
+ mem: func() (string, error) { return "", errors.New("mem mock error") },
+ softwarePackages: func() (string, error) { return "", errors.New("softwarePackages mock error") },
+ storage: func() (string, error) { return "", errors.New("storage mock error") },
+ swap: func() (string, error) { return "", errors.New("swap mock error") },
+ system: func() (string, error) { return "", errors.New("system mock error") },
+ }
+ job.client = mock
+
+ assert.Zero(t, job.Collect())
+}
+
+func prepareVCSA() *VCSA {
+ vc := New()
+ vc.URL = "https://127.0.0.1:38001"
+ vc.Username = "user"
+ vc.Password = "pass"
+
+ return vc
+}
+
+type mockVCenterHealthClient struct {
+ login func() error
+ logout func() error
+ ping func() error
+ applMgmt func() (string, error)
+ databaseStorage func() (string, error)
+ load func() (string, error)
+ mem func() (string, error)
+ softwarePackages func() (string, error)
+ storage func() (string, error)
+ swap func() (string, error)
+ system func() (string, error)
+ loginCalls int
+ logoutCalls int
+ pingCalls int
+}
+
+func (m *mockVCenterHealthClient) Login() error {
+ m.loginCalls += 1
+ if m.login == nil {
+ return nil
+ }
+ return m.login()
+}
+
+func (m *mockVCenterHealthClient) Logout() error {
+ m.logoutCalls += 1
+ if m.logout == nil {
+ return nil
+ }
+ return m.logout()
+}
+
+func (m *mockVCenterHealthClient) Ping() error {
+ m.pingCalls += 1
+ if m.ping == nil {
+ return nil
+ }
+ return m.ping()
+}
+
+func (m *mockVCenterHealthClient) ApplMgmt() (string, error) {
+ if m.applMgmt == nil {
+ return "green", nil
+ }
+ return m.applMgmt()
+}
+
+func (m *mockVCenterHealthClient) DatabaseStorage() (string, error) {
+ if m.databaseStorage == nil {
+ return "green", nil
+ }
+ return m.databaseStorage()
+}
+
+func (m *mockVCenterHealthClient) Load() (string, error) {
+ if m.load == nil {
+ return "green", nil
+ }
+ return m.load()
+}
+
+func (m *mockVCenterHealthClient) Mem() (string, error) {
+ if m.mem == nil {
+ return "green", nil
+ }
+ return m.mem()
+}
+
+func (m *mockVCenterHealthClient) SoftwarePackages() (string, error) {
+ if m.softwarePackages == nil {
+ return "green", nil
+ }
+ return m.softwarePackages()
+}
+
+func (m *mockVCenterHealthClient) Storage() (string, error) {
+ if m.storage == nil {
+ return "green", nil
+ }
+ return m.storage()
+}
+
+func (m *mockVCenterHealthClient) Swap() (string, error) {
+ if m.swap == nil {
+ return "green", nil
+ }
+ return m.swap()
+}
+
+func (m *mockVCenterHealthClient) System() (string, error) {
+ if m.system == nil {
+ return "green", nil
+ }
+ return m.system()
+}
diff --git a/src/go/plugin/go.d/modules/vernemq/README.md b/src/go/plugin/go.d/modules/vernemq/README.md
new file mode 120000
index 000000000..3d984de71
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/README.md
@@ -0,0 +1 @@
+integrations/vernemq.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/vernemq/charts.go b/src/go/plugin/go.d/modules/vernemq/charts.go
new file mode 100644
index 000000000..5d81a26bc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/charts.go
@@ -0,0 +1,911 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vernemq
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ Charts = module.Charts
+ Chart = module.Chart
+ Dims = module.Dims
+ Dim = module.Dim
+)
+
+var charts = Charts{
+ chartOpenSockets.Copy(),
+ chartSocketEvents.Copy(),
+ chartClientKeepaliveExpired.Copy(),
+ chartSocketErrors.Copy(),
+ chartSocketCloseTimeout.Copy(),
+
+ chartQueueProcesses.Copy(),
+ chartQueueProcessesEvents.Copy(),
+ chartQueueProcessesOfflineStorage.Copy(),
+ chartQueueMessages.Copy(),
+ chartQueueUndeliveredMessages.Copy(),
+
+ chartRouterSubscriptions.Copy(),
+ chartRouterMatchedSubscriptions.Copy(),
+ chartRouterMemory.Copy(),
+
+ chartAverageSchedulerUtilization.Copy(),
+ chartSchedulerUtilization.Copy(),
+ chartSystemProcesses.Copy(),
+ chartSystemReductions.Copy(),
+ chartSystemContextSwitches.Copy(),
+ chartSystemIO.Copy(),
+ chartSystemRunQueue.Copy(),
+ chartSystemGCCount.Copy(),
+ chartSystemGCWordsReclaimed.Copy(),
+ chartSystemMemoryAllocated.Copy(),
+
+ chartBandwidth.Copy(),
+
+ chartRetainMessages.Copy(),
+ chartRetainMemoryUsage.Copy(),
+
+ chartClusterCommunicationBandwidth.Copy(),
+ chartClusterCommunicationDropped.Copy(),
+ chartNetSplitUnresolved.Copy(),
+ chartNetSplits.Copy(),
+
+ chartMQTTv5AUTH.Copy(),
+ chartMQTTv5AUTHReceivedReason.Copy(),
+ chartMQTTv5AUTHSentReason.Copy(),
+
+ chartMQTTv3v5CONNECT.Copy(),
+ chartMQTTv3v5CONNACKSentReason.Copy(),
+
+ chartMQTTv3v5DISCONNECT.Copy(),
+ chartMQTTv5DISCONNECTReceivedReason.Copy(),
+ chartMQTTv5DISCONNECTSentReason.Copy(),
+
+ chartMQTTv3v5SUBSCRIBE.Copy(),
+ chartMQTTv3v5SUBSCRIBEError.Copy(),
+ chartMQTTv3v5SUBSCRIBEAuthError.Copy(),
+
+ chartMQTTv3v5UNSUBSCRIBE.Copy(),
+ chartMQTTv3v5UNSUBSCRIBEError.Copy(),
+
+ chartMQTTv3v5PUBLISH.Copy(),
+ chartMQTTv3v5PUBLISHErrors.Copy(),
+ chartMQTTv3v5PUBLISHAuthErrors.Copy(),
+ chartMQTTv3v5PUBACK.Copy(),
+ chartMQTTv5PUBACKReceivedReason.Copy(),
+ chartMQTTv5PUBACKSentReason.Copy(),
+ chartMQTTv3v5PUBACKUnexpected.Copy(),
+ chartMQTTv3v5PUBREC.Copy(),
+ chartMQTTv5PUBRECReceivedReason.Copy(),
+ chartMQTTv5PUBRECSentReason.Copy(),
+ chartMQTTv3PUBRECUnexpected.Copy(),
+ chartMQTTv3v5PUBREL.Copy(),
+ chartMQTTv5PUBRELReceivedReason.Copy(),
+ chartMQTTv5PUBRELSentReason.Copy(),
+ chartMQTTv3v5PUBCOMP.Copy(),
+ chartMQTTv5PUBCOMPReceivedReason.Copy(),
+ chartMQTTv5PUBCOMPSentReason.Copy(),
+ chartMQTTv3v5PUBCOMPUnexpected.Copy(),
+
+ chartMQTTv3v5PING.Copy(),
+
+ chartUptime.Copy(),
+}
+
+// Sockets
+var (
+ chartOpenSockets = Chart{
+ ID: "sockets",
+ Title: "Open Sockets",
+ Units: "sockets",
+ Fam: "sockets",
+ Ctx: "vernemq.sockets",
+ Dims: Dims{
+ {ID: "open_sockets", Name: "open"},
+ },
+ }
+ chartSocketEvents = Chart{
+ ID: "socket_events",
+ Title: "Socket Open and Close Events",
+ Units: "events/s",
+ Fam: "sockets",
+ Ctx: "vernemq.socket_operations",
+ Dims: Dims{
+ {ID: metricSocketOpen, Name: "open", Algo: module.Incremental},
+ {ID: metricSocketClose, Name: "close", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartClientKeepaliveExpired = Chart{
+ ID: "client_keepalive_expired",
+ Title: "Closed Sockets due to Keepalive Time Expired",
+ Units: "sockets/s",
+ Fam: "sockets",
+ Ctx: "vernemq.client_keepalive_expired",
+ Dims: Dims{
+ {ID: metricClientKeepaliveExpired, Name: "closed", Algo: module.Incremental},
+ },
+ }
+ chartSocketCloseTimeout = Chart{
+ ID: "socket_close_timeout",
+ Title: "Closed Sockets due to no CONNECT Frame On Time",
+ Units: "sockets/s",
+ Fam: "sockets",
+ Ctx: "vernemq.socket_close_timeout",
+ Dims: Dims{
+ {ID: metricSocketCloseTimeout, Name: "closed", Algo: module.Incremental},
+ },
+ }
+ chartSocketErrors = Chart{
+ ID: "socket_errors",
+ Title: "Socket Errors",
+ Units: "errors/s",
+ Fam: "sockets",
+ Ctx: "vernemq.socket_errors",
+ Dims: Dims{
+ {ID: metricSocketError, Name: "errors", Algo: module.Incremental},
+ },
+ }
+)
+
+// Queues
+var (
+ chartQueueProcesses = Chart{
+ ID: "queue_processes",
+ Title: "Living Queues in an Online or an Offline State",
+ Units: "queue processes",
+ Fam: "queues",
+ Ctx: "vernemq.queue_processes",
+ Dims: Dims{
+ {ID: metricQueueProcesses, Name: "queue_processes"},
+ },
+ }
+ chartQueueProcessesEvents = Chart{
+ ID: "queue_processes_events",
+ Title: "Queue Processes Setup and Teardown Events",
+ Units: "events/s",
+ Fam: "queues",
+ Ctx: "vernemq.queue_processes_operations",
+ Dims: Dims{
+ {ID: metricQueueSetup, Name: "setup", Algo: module.Incremental},
+ {ID: metricQueueTeardown, Name: "teardown", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartQueueProcessesOfflineStorage = Chart{
+ ID: "queue_process_init_from_storage",
+ Title: "Queue Processes Initialized from Offline Storage",
+ Units: "queue processes/s",
+ Fam: "queues",
+ Ctx: "vernemq.queue_process_init_from_storage",
+ Dims: Dims{
+ {ID: metricQueueInitializedFromStorage, Name: "queue processes", Algo: module.Incremental},
+ },
+ }
+ chartQueueMessages = Chart{
+ ID: "queue_messages",
+ Title: "Received and Sent PUBLISH Messages",
+ Units: "messages/s",
+ Fam: "queues",
+ Ctx: "vernemq.queue_messages",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricQueueMessageIn, Name: "received", Algo: module.Incremental},
+ {ID: metricQueueMessageOut, Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartQueueUndeliveredMessages = Chart{
+ ID: "queue_undelivered_messages",
+ Title: "Undelivered PUBLISH Messages",
+ Units: "messages/s",
+ Fam: "queues",
+ Ctx: "vernemq.queue_undelivered_messages",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricQueueMessageDrop, Name: "dropped", Algo: module.Incremental},
+ {ID: metricQueueMessageExpired, Name: "expired", Algo: module.Incremental},
+ {ID: metricQueueMessageUnhandled, Name: "unhandled", Algo: module.Incremental},
+ },
+ }
+)
+
+// Subscriptions
+var (
+ chartRouterSubscriptions = Chart{
+ ID: "router_subscriptions",
+ Title: "Subscriptions in the Routing Table",
+ Units: "subscriptions",
+ Fam: "subscriptions",
+ Ctx: "vernemq.router_subscriptions",
+ Dims: Dims{
+ {ID: metricRouterSubscriptions, Name: "subscriptions"},
+ },
+ }
+ chartRouterMatchedSubscriptions = Chart{
+ ID: "router_matched_subscriptions",
+ Title: "Matched Subscriptions",
+ Units: "subscriptions/s",
+ Fam: "subscriptions",
+ Ctx: "vernemq.router_matched_subscriptions",
+ Dims: Dims{
+ {ID: metricRouterMatchesLocal, Name: "local", Algo: module.Incremental},
+ {ID: metricRouterMatchesRemote, Name: "remote", Algo: module.Incremental},
+ },
+ }
+ chartRouterMemory = Chart{
+ ID: "router_memory",
+ Title: "Routing Table Memory Usage",
+ Units: "KiB",
+ Fam: "subscriptions",
+ Ctx: "vernemq.router_memory",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricRouterMemory, Name: "used", Div: 1024},
+ },
+ }
+)
+
+// Erlang VM
+var (
+ chartAverageSchedulerUtilization = Chart{
+ ID: "average_scheduler_utilization",
+ Title: "Average Scheduler Utilization",
+ Units: "percentage",
+ Fam: "erlang vm",
+ Ctx: "vernemq.average_scheduler_utilization",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricSystemUtilization, Name: "utilization"},
+ },
+ }
+ chartSchedulerUtilization = Chart{
+ ID: "scheduler_utilization",
+ Title: "Scheduler Utilization",
+ Units: "percentage",
+ Fam: "erlang vm",
+ Type: module.Stacked,
+ Ctx: "vernemq.system_utilization_scheduler",
+ }
+ chartSystemProcesses = Chart{
+ ID: "system_processes",
+ Title: "Erlang Processes",
+ Units: "processes",
+ Fam: "erlang vm",
+ Ctx: "vernemq.system_processes",
+ Dims: Dims{
+ {ID: metricSystemProcessCount, Name: "processes"},
+ },
+ }
+ chartSystemReductions = Chart{
+ ID: "system_reductions",
+ Title: "Reductions",
+ Units: "ops/s",
+ Fam: "erlang vm",
+ Ctx: "vernemq.system_reductions",
+ Dims: Dims{
+ {ID: metricSystemReductions, Name: "reductions", Algo: module.Incremental},
+ },
+ }
+ chartSystemContextSwitches = Chart{
+ ID: "system_context_switches",
+ Title: "Context Switches",
+ Units: "ops/s",
+ Fam: "erlang vm",
+ Ctx: "vernemq.system_context_switches",
+ Dims: Dims{
+ {ID: metricSystemContextSwitches, Name: "context switches", Algo: module.Incremental},
+ },
+ }
+ chartSystemIO = Chart{
+ ID: "system_io",
+ Title: "Received and Sent Traffic through Ports",
+ Units: "kilobits/s",
+ Fam: "erlang vm",
+ Ctx: "vernemq.system_io",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricSystemIOIn, Name: "received", Algo: module.Incremental, Mul: 8, Div: 1024},
+ {ID: metricSystemIOOut, Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1024},
+ },
+ }
+ chartSystemRunQueue = Chart{
+ ID: "system_run_queue",
+ Title: "Processes that are Ready to Run on All Run-Queues",
+ Units: "processes",
+ Fam: "erlang vm",
+ Ctx: "vernemq.system_run_queue",
+ Dims: Dims{
+ {ID: metricSystemRunQueue, Name: "ready"},
+ },
+ }
+ chartSystemGCCount = Chart{
+ ID: "system_gc_count",
+ Title: "GC Count",
+ Units: "ops/s",
+ Fam: "erlang vm",
+ Ctx: "vernemq.system_gc_count",
+ Dims: Dims{
+ {ID: metricSystemGCCount, Name: "gc", Algo: module.Incremental},
+ },
+ }
+ chartSystemGCWordsReclaimed = Chart{
+ ID: "system_gc_words_reclaimed",
+ Title: "GC Words Reclaimed",
+ Units: "ops/s",
+ Fam: "erlang vm",
+ Ctx: "vernemq.system_gc_words_reclaimed",
+ Dims: Dims{
+ {ID: metricSystemWordsReclaimedByGC, Name: "words reclaimed", Algo: module.Incremental},
+ },
+ }
+ chartSystemMemoryAllocated = Chart{
+ ID: "system_allocated_memory",
+ Title: "Memory Allocated by the Erlang Processes and by the Emulator",
+ Units: "KiB",
+ Fam: "erlang vm",
+ Ctx: "vernemq.system_allocated_memory",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricVMMemoryProcesses, Name: "processes", Div: 1024},
+ {ID: metricVMMemorySystem, Name: "system", Div: 1024},
+ },
+ }
+)
+
+// Bandwidth
+var (
+ chartBandwidth = Chart{
+ ID: "bandwidth",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "bandwidth",
+ Ctx: "vernemq.bandwidth",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricBytesReceived, Name: "received", Algo: module.Incremental, Mul: 8, Div: 1024},
+ {ID: metricBytesSent, Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1024},
+ },
+ }
+)
+
+// Retain
+var (
+ chartRetainMessages = Chart{
+ ID: "retain_messages",
+ Title: "Stored Retained Messages",
+ Units: "messages",
+ Fam: "retain",
+ Ctx: "vernemq.retain_messages",
+ Dims: Dims{
+ {ID: metricRetainMessages, Name: "messages"},
+ },
+ }
+ chartRetainMemoryUsage = Chart{
+ ID: "retain_memory",
+ Title: "Stored Retained Messages Memory Usage",
+ Units: "KiB",
+ Fam: "retain",
+ Ctx: "vernemq.retain_memory",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricRetainMemory, Name: "used", Div: 1024},
+ },
+ }
+)
+
+// Cluster
+var (
+ chartClusterCommunicationBandwidth = Chart{
+ ID: "cluster_bandwidth",
+ Title: "Communication with Other Cluster Nodes",
+ Units: "kilobits/s",
+ Fam: "cluster",
+ Ctx: "vernemq.cluster_bandwidth",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricClusterBytesReceived, Name: "received", Algo: module.Incremental, Mul: 8, Div: 1024},
+ {ID: metricClusterBytesSent, Name: "sent", Algo: module.Incremental, Mul: 8, Div: -1024},
+ },
+ }
+ chartClusterCommunicationDropped = Chart{
+ ID: "cluster_dropped",
+ Title: "Traffic Dropped During Communication with Other Cluster Nodes",
+ Units: "kilobits/s",
+ Fam: "cluster",
+ Type: module.Area,
+ Ctx: "vernemq.cluster_dropped",
+ Dims: Dims{
+ {ID: metricClusterBytesDropped, Name: "dropped", Algo: module.Incremental, Mul: 8, Div: 1024},
+ },
+ }
+ chartNetSplitUnresolved = Chart{
+ ID: "netsplit_unresolved",
+ Title: "Unresolved Netsplits",
+ Units: "netsplits",
+ Fam: "cluster",
+ Ctx: "vernemq.netsplit_unresolved",
+ Dims: Dims{
+ {ID: "netsplit_unresolved", Name: "unresolved"},
+ },
+ }
+ chartNetSplits = Chart{
+ ID: "netsplit",
+ Title: "Netsplits",
+ Units: "netsplits/s",
+ Fam: "cluster",
+ Ctx: "vernemq.netsplits",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: metricNetSplitResolved, Name: "resolved", Algo: module.Incremental},
+ {ID: metricNetSplitDetected, Name: "detected", Algo: module.Incremental},
+ },
+ }
+)
+
+// AUTH
+var (
+ chartMQTTv5AUTH = Chart{
+ ID: "mqtt_auth",
+ Title: "v5 AUTH",
+ Units: "packets/s",
+ Fam: "mqtt auth",
+ Ctx: "vernemq.mqtt_auth",
+ Dims: Dims{
+ {ID: metricAUTHReceived, Name: "received", Algo: module.Incremental},
+ {ID: metricAUTHSent, Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv5AUTHReceivedReason = Chart{
+ ID: "mqtt_auth_received_reason",
+ Title: "v5 AUTH Received by Reason",
+ Units: "packets/s",
+ Fam: "mqtt auth",
+ Ctx: "vernemq.mqtt_auth_received_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricAUTHReceived, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv5AUTHSentReason = Chart{
+ ID: "mqtt_auth_sent_reason",
+ Title: "v5 AUTH Sent by Reason",
+ Units: "packets/s",
+ Fam: "mqtt auth",
+ Ctx: "vernemq.mqtt_auth_sent_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricAUTHSent, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+)
+
+// CONNECT
+var (
+ chartMQTTv3v5CONNECT = Chart{
+ ID: "mqtt_connect",
+ Title: "v3/v5 CONNECT and CONNACK",
+ Units: "packets/s",
+ Fam: "mqtt connect",
+ Ctx: "vernemq.mqtt_connect",
+ Dims: Dims{
+ {ID: metricCONNECTReceived, Name: "CONNECT", Algo: module.Incremental},
+ {ID: metricCONNACKSent, Name: "CONNACK", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv3v5CONNACKSentReason = Chart{
+ ID: "mqtt_connack_sent_reason",
+ Title: "v3/v5 CONNACK Sent by Reason",
+ Units: "packets/s",
+ Fam: "mqtt connect",
+ Ctx: "vernemq.mqtt_connack_sent_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricCONNACKSent, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+)
+
+// DISCONNECT
+var (
+ chartMQTTv3v5DISCONNECT = Chart{
+ ID: "mqtt_disconnect",
+ Title: "v3/v5 DISCONNECT",
+ Units: "packets/s",
+ Fam: "mqtt disconnect",
+ Ctx: "vernemq.mqtt_disconnect",
+ Dims: Dims{
+ {ID: metricDISCONNECTReceived, Name: "received", Algo: module.Incremental},
+ {ID: metricDISCONNECTSent, Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv5DISCONNECTReceivedReason = Chart{
+ ID: "mqtt_disconnect_received_reason",
+ Title: "v5 DISCONNECT Received by Reason",
+ Units: "packets/s",
+ Fam: "mqtt disconnect",
+ Ctx: "vernemq.mqtt_disconnect_received_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricDISCONNECTReceived, "normal_disconnect"), Name: "normal_disconnect", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv5DISCONNECTSentReason = Chart{
+ ID: "mqtt_disconnect_sent_reason",
+ Title: "v5 DISCONNECT Sent by Reason",
+ Units: "packets/s",
+ Fam: "mqtt disconnect",
+ Ctx: "vernemq.mqtt_disconnect_sent_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricDISCONNECTSent, "normal_disconnect"), Name: "normal_disconnect", Algo: module.Incremental},
+ },
+ }
+)
+
+// SUBSCRIBE
+var (
+ chartMQTTv3v5SUBSCRIBE = Chart{
+ ID: "mqtt_subscribe",
+ Title: "v3/v5 SUBSCRIBE and SUBACK",
+ Units: "packets/s",
+ Fam: "mqtt subscribe",
+ Ctx: "vernemq.mqtt_subscribe",
+ Dims: Dims{
+ {ID: metricSUBSCRIBEReceived, Name: "SUBSCRIBE", Algo: module.Incremental},
+ {ID: metricSUBACKSent, Name: "SUBACK", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv3v5SUBSCRIBEError = Chart{
+ ID: "mqtt_subscribe_error",
+ Title: "v3/v5 Failed SUBSCRIBE Operations due to a Netsplit",
+ Units: "ops/s",
+ Fam: "mqtt subscribe",
+ Ctx: "vernemq.mqtt_subscribe_error",
+ Dims: Dims{
+ {ID: metricSUBSCRIBEError, Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv3v5SUBSCRIBEAuthError = Chart{
+ ID: "mqtt_subscribe_auth_error",
+ Title: "v3/v5 Unauthorized SUBSCRIBE Attempts",
+ Units: "attempts/s",
+ Fam: "mqtt subscribe",
+ Ctx: "vernemq.mqtt_subscribe_auth_error",
+ Dims: Dims{
+ {ID: metricSUBSCRIBEAuthError, Name: "unauth", Algo: module.Incremental},
+ },
+ }
+)
+
+// UNSUBSCRIBE
+var (
+ chartMQTTv3v5UNSUBSCRIBE = Chart{
+ ID: "mqtt_unsubscribe",
+ Title: "v3/v5 UNSUBSCRIBE and UNSUBACK",
+ Units: "packets/s",
+ Fam: "mqtt unsubscribe",
+ Ctx: "vernemq.mqtt_unsubscribe",
+ Dims: Dims{
+ {ID: metricUNSUBSCRIBEReceived, Name: "UNSUBSCRIBE", Algo: module.Incremental},
+ {ID: metricUNSUBACKSent, Name: "UNSUBACK", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv3v5UNSUBSCRIBEError = Chart{
+ ID: "mqtt_unsubscribe_error",
+ Title: "v3/v5 Failed UNSUBSCRIBE Operations due to a Netsplit",
+ Units: "ops/s",
+ Fam: "mqtt unsubscribe",
+ Ctx: "vernemq.mqtt_unsubscribe_error",
+ Dims: Dims{
+ {ID: metricUNSUBSCRIBEError, Name: "failed", Algo: module.Incremental},
+ },
+ }
+)
+
+// PUBLISH
+var (
+ chartMQTTv3v5PUBLISH = Chart{
+ ID: "mqtt_publish",
+ Title: "v3/v5 QoS 0,1,2 PUBLISH",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_publish",
+ Dims: Dims{
+ {ID: metricPUBSLISHReceived, Name: "received", Algo: module.Incremental},
+ {ID: metricPUBSLIHSent, Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv3v5PUBLISHErrors = Chart{
+ ID: "mqtt_publish_errors",
+ Title: "v3/v5 Failed PUBLISH Operations due to a Netsplit",
+ Units: "ops/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_publish_errors",
+ Dims: Dims{
+ {ID: metricPUBLISHError, Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv3v5PUBLISHAuthErrors = Chart{
+ ID: "mqtt_publish_auth_errors",
+ Title: "v3/v5 Unauthorized PUBLISH Attempts",
+ Units: "attempts/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_publish_auth_errors",
+ Type: module.Area,
+ Dims: Dims{
+ {ID: metricPUBLISHAuthError, Name: "unauth", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv3v5PUBACK = Chart{
+ ID: "mqtt_puback",
+ Title: "v3/v5 QoS 1 PUBACK",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_puback",
+ Dims: Dims{
+ {ID: metricPUBACKReceived, Name: "received", Algo: module.Incremental},
+ {ID: metricPUBACKSent, Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv5PUBACKReceivedReason = Chart{
+ ID: "mqtt_puback_received_reason",
+ Title: "v5 PUBACK QoS 1 Received by Reason",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_puback_received_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricPUBACKReceived, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv5PUBACKSentReason = Chart{
+ ID: "mqtt_puback_sent_reason",
+ Title: "v5 PUBACK QoS 1 Sent by Reason",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_puback_sent_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricPUBACKSent, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv3v5PUBACKUnexpected = Chart{
+ ID: "mqtt_puback_unexpected",
+ Title: "v3/v5 PUBACK QoS 1 Received Unexpected Messages",
+ Units: "messages/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_puback_invalid_error",
+ Dims: Dims{
+ {ID: metricPUBACKInvalid, Name: "unexpected", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv3v5PUBREC = Chart{
+ ID: "mqtt_pubrec",
+ Title: "v3/v5 PUBREC QoS 2",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubrec",
+ Dims: Dims{
+ {ID: metricPUBRECReceived, Name: "received", Algo: module.Incremental},
+ {ID: metricPUBRECSent, Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv5PUBRECReceivedReason = Chart{
+ ID: "mqtt_pubrec_received_reason",
+ Title: "v5 PUBREC QoS 2 Received by Reason",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubrec_received_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricPUBRECReceived, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv5PUBRECSentReason = Chart{
+ ID: "mqtt_pubrec_sent_reason",
+ Title: "v5 PUBREC QoS 2 Sent by Reason",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubrec_sent_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricPUBRECSent, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv3PUBRECUnexpected = Chart{
+ ID: "mqtt_pubrec_unexpected",
+ Title: "v3 PUBREC QoS 2 Received Unexpected Messages",
+ Units: "messages/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubrec_invalid_error",
+ Dims: Dims{
+ {ID: metricPUBRECInvalid, Name: "unexpected", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv3v5PUBREL = Chart{
+ ID: "mqtt_pubrel",
+ Title: "v3/v5 PUBREL QoS 2",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubrel",
+ Dims: Dims{
+ {ID: metricPUBRELReceived, Name: "received", Algo: module.Incremental},
+ {ID: metricPUBRELSent, Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv5PUBRELReceivedReason = Chart{
+ ID: "mqtt_pubrel_received_reason",
+ Title: "v5 PUBREL QoS 2 Received by Reason",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubrel_received_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricPUBRELReceived, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv5PUBRELSentReason = Chart{
+ ID: "mqtt_pubrel_sent_reason",
+ Title: "v5 PUBREL QoS 2 Sent by Reason",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubrel_sent_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricPUBRELSent, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv3v5PUBCOMP = Chart{
+ ID: "mqtt_pubcomp",
+ Title: "v3/v5 PUBCOMP QoS 2",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubcom",
+ Dims: Dims{
+ {ID: metricPUBCOMPReceived, Name: "received", Algo: module.Incremental},
+ {ID: metricPUBCOMPSent, Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ chartMQTTv5PUBCOMPReceivedReason = Chart{
+ ID: "mqtt_pubcomp_received_reason",
+ Title: "v5 PUBCOMP QoS 2 Received by Reason",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubcomp_received_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricPUBCOMPReceived, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv5PUBCOMPSentReason = Chart{
+ ID: "mqtt_pubcomp_sent_reason",
+ Title: "v5 PUBCOMP QoS 2 Sent by Reason",
+ Units: "packets/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubcomp_sent_reason",
+ Type: module.Stacked,
+ Dims: Dims{
+ {ID: join(metricPUBCOMPSent, "success"), Name: "success", Algo: module.Incremental},
+ },
+ }
+ chartMQTTv3v5PUBCOMPUnexpected = Chart{
+ ID: "mqtt_pubcomp_unexpected",
+ Title: "v3/v5 PUBCOMP QoS 2 Received Unexpected Messages",
+ Units: "messages/s",
+ Fam: "mqtt publish",
+ Ctx: "vernemq.mqtt_pubcomp_invalid_error",
+ Dims: Dims{
+ {ID: metricPUNCOMPInvalid, Name: "unexpected", Algo: module.Incremental},
+ },
+ }
+)
+
+// PING
+var (
+ chartMQTTv3v5PING = Chart{
+ ID: "mqtt_ping",
+ Title: "v3/v5 PING",
+ Units: "packets/s",
+ Fam: "mqtt ping",
+ Ctx: "vernemq.mqtt_ping",
+ Dims: Dims{
+ {ID: metricPINGREQReceived, Name: "PINGREQ", Algo: module.Incremental},
+ {ID: metricPINGRESPSent, Name: "PINGRESP", Algo: module.Incremental, Mul: -1},
+ },
+ }
+)
+
+var (
+ chartUptime = Chart{
+ ID: "node_uptime",
+ Title: "Node Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "vernemq.node_uptime",
+ Dims: Dims{
+ {ID: metricSystemWallClock, Name: "time", Div: 1000},
+ },
+ }
+)
+
+func (v *VerneMQ) notifyNewScheduler(name string) {
+ if v.cache[name] {
+ return
+ }
+ v.cache[name] = true
+
+ id := chartSchedulerUtilization.ID
+ num := name[len("system_utilization_scheduler_"):]
+
+ v.addAbsDimToChart(id, name, num)
+}
+
+func (v *VerneMQ) notifyNewReason(name, reason string) {
+ if reason == "success" || reason == "normal_disconnect" {
+ return
+ }
+ key := join(name, reason)
+ if v.cache[key] {
+ return
+ }
+ v.cache[key] = true
+
+ var chart Chart
+ switch name {
+ case metricAUTHReceived:
+ chart = chartMQTTv5AUTHReceivedReason
+ case metricAUTHSent:
+ chart = chartMQTTv5AUTHSentReason
+ case metricCONNACKSent:
+ chart = chartMQTTv3v5CONNACKSentReason
+ case metricDISCONNECTReceived:
+ chart = chartMQTTv5DISCONNECTReceivedReason
+ case metricDISCONNECTSent:
+ chart = chartMQTTv5DISCONNECTSentReason
+ case metricPUBACKReceived:
+ chart = chartMQTTv5PUBACKReceivedReason
+ case metricPUBACKSent:
+ chart = chartMQTTv5PUBACKSentReason
+ case metricPUBRECReceived:
+ chart = chartMQTTv5PUBRECReceivedReason
+ case metricPUBRECSent:
+ chart = chartMQTTv5PUBRECSentReason
+ case metricPUBRELReceived:
+ chart = chartMQTTv5PUBRELReceivedReason
+ case metricPUBRELSent:
+ chart = chartMQTTv5PUBRELSentReason
+ case metricPUBCOMPReceived:
+ chart = chartMQTTv5PUBCOMPReceivedReason
+ case metricPUBCOMPSent:
+ chart = chartMQTTv5PUBCOMPSentReason
+ default:
+ v.Warningf("unknown metric name, wont be added to the charts: '%s'", name)
+ return
+ }
+
+ v.addIncDimToChart(chart.ID, key, reason)
+}
+
+func (v *VerneMQ) addAbsDimToChart(chartID, dimID, dimName string) {
+ v.addDimToChart(chartID, dimID, dimName, false)
+}
+
+func (v *VerneMQ) addIncDimToChart(chartID, dimID, dimName string) {
+ v.addDimToChart(chartID, dimID, dimName, true)
+}
+
+func (v *VerneMQ) addDimToChart(chartID, dimID, dimName string, inc bool) {
+ chart := v.Charts().Get(chartID)
+ if chart == nil {
+ v.Warningf("add '%s' dim: couldn't find '%s' chart", dimID, chartID)
+ return
+ }
+
+ dim := &Dim{ID: dimID, Name: dimName}
+ if inc {
+ dim.Algo = module.Incremental
+ }
+
+ if err := chart.AddDim(dim); err != nil {
+ v.Warningf("add '%s' dim: %v", dimID, err)
+ return
+ }
+ chart.MarkNotCreated()
+}
diff --git a/src/go/plugin/go.d/modules/vernemq/collect.go b/src/go/plugin/go.d/modules/vernemq/collect.go
new file mode 100644
index 000000000..c6fb3ecb9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/collect.go
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vernemq
+
+import (
+ "errors"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+func isValidVerneMQMetrics(pms prometheus.Series) bool {
+ return pms.FindByName(metricPUBLISHError).Len() > 0 && pms.FindByName(metricRouterSubscriptions).Len() > 0
+}
+
+func (v *VerneMQ) collect() (map[string]int64, error) {
+ pms, err := v.prom.ScrapeSeries()
+ if err != nil {
+ return nil, err
+ }
+
+ if !isValidVerneMQMetrics(pms) {
+ return nil, errors.New("returned metrics aren't VerneMQ metrics")
+ }
+
+ mx := v.collectVerneMQ(pms)
+
+ return stm.ToMap(mx), nil
+}
+
+func (v *VerneMQ) collectVerneMQ(pms prometheus.Series) map[string]float64 {
+ mx := make(map[string]float64)
+ collectSockets(mx, pms)
+ collectQueues(mx, pms)
+ collectSubscriptions(mx, pms)
+ v.collectErlangVM(mx, pms)
+ collectBandwidth(mx, pms)
+ collectRetain(mx, pms)
+ collectCluster(mx, pms)
+ collectUptime(mx, pms)
+
+ v.collectAUTH(mx, pms)
+ v.collectCONNECT(mx, pms)
+ v.collectDISCONNECT(mx, pms)
+ v.collectSUBSCRIBE(mx, pms)
+ v.collectUNSUBSCRIBE(mx, pms)
+ v.collectPUBLISH(mx, pms)
+ v.collectPING(mx, pms)
+ v.collectMQTTInvalidMsgSize(mx, pms)
+ return mx
+}
+
+func (v *VerneMQ) collectCONNECT(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricCONNECTReceived,
+ metricCONNACKSent,
+ )
+ v.collectMQTT(mx, pms)
+}
+
+func (v *VerneMQ) collectDISCONNECT(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricDISCONNECTReceived,
+ metricDISCONNECTSent,
+ )
+ v.collectMQTT(mx, pms)
+}
+
+func (v *VerneMQ) collectPUBLISH(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricPUBACKReceived,
+ metricPUBACKSent,
+ metricPUBACKInvalid,
+
+ metricPUBCOMPReceived,
+ metricPUBCOMPSent,
+ metricPUNCOMPInvalid,
+
+ metricPUBSLISHReceived,
+ metricPUBSLIHSent,
+ metricPUBLISHError,
+ metricPUBLISHAuthError,
+
+ metricPUBRECReceived,
+ metricPUBRECSent,
+ metricPUBRECInvalid,
+
+ metricPUBRELReceived,
+ metricPUBRELSent,
+ )
+ v.collectMQTT(mx, pms)
+}
+
+func (v *VerneMQ) collectSUBSCRIBE(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricSUBSCRIBEReceived,
+ metricSUBACKSent,
+ metricSUBSCRIBEError,
+ metricSUBSCRIBEAuthError,
+ )
+ v.collectMQTT(mx, pms)
+}
+
+func (v *VerneMQ) collectUNSUBSCRIBE(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricUNSUBSCRIBEReceived,
+ metricUNSUBACKSent,
+ metricUNSUBSCRIBEError,
+ )
+ v.collectMQTT(mx, pms)
+}
+
+func (v *VerneMQ) collectPING(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricPINGREQReceived,
+ metricPINGRESPSent,
+ )
+ v.collectMQTT(mx, pms)
+}
+
+func (v *VerneMQ) collectAUTH(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricAUTHReceived,
+ metricAUTHSent,
+ )
+ v.collectMQTT(mx, pms)
+}
+
+func (v *VerneMQ) collectMQTTInvalidMsgSize(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByName(metricMQTTInvalidMsgSizeError)
+ v.collectMQTT(mx, pms)
+}
+
+func collectSockets(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricSocketClose,
+ metricSocketCloseTimeout,
+ metricSocketError,
+ metricSocketOpen,
+ metricClientKeepaliveExpired,
+ )
+ collectNonMQTT(mx, pms)
+ mx["open_sockets"] = mx[metricSocketOpen] - mx[metricSocketClose]
+}
+
+func collectQueues(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricQueueInitializedFromStorage,
+ metricQueueMessageDrop,
+ metricQueueMessageExpired,
+ metricQueueMessageIn,
+ metricQueueMessageOut,
+ metricQueueMessageUnhandled,
+ metricQueueProcesses,
+ metricQueueSetup,
+ metricQueueTeardown,
+ )
+ collectNonMQTT(mx, pms)
+}
+
+func collectSubscriptions(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricRouterMatchesLocal,
+ metricRouterMatchesRemote,
+ metricRouterMemory,
+ metricRouterSubscriptions,
+ )
+ collectNonMQTT(mx, pms)
+}
+
+func (v *VerneMQ) collectErlangVM(mx map[string]float64, pms prometheus.Series) {
+ v.collectSchedulersUtilization(mx, pms)
+ pms = pms.FindByNames(
+ metricSystemContextSwitches,
+ metricSystemGCCount,
+ metricSystemIOIn,
+ metricSystemIOOut,
+ metricSystemProcessCount,
+ metricSystemReductions,
+ metricSystemRunQueue,
+ metricSystemUtilization,
+ metricSystemWordsReclaimedByGC,
+ metricVMMemoryProcesses,
+ metricVMMemorySystem,
+ )
+ collectNonMQTT(mx, pms)
+}
+
+func (v *VerneMQ) collectSchedulersUtilization(mx map[string]float64, pms prometheus.Series) {
+ for _, pm := range pms {
+ if isSchedulerUtilizationMetric(pm) {
+ mx[pm.Name()] += pm.Value
+ v.notifyNewScheduler(pm.Name())
+ }
+ }
+}
+
+func collectBandwidth(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricBytesReceived,
+ metricBytesSent,
+ )
+ collectNonMQTT(mx, pms)
+}
+
+func collectRetain(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricRetainMemory,
+ metricRetainMessages,
+ )
+ collectNonMQTT(mx, pms)
+}
+
+func collectCluster(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricClusterBytesDropped,
+ metricClusterBytesReceived,
+ metricClusterBytesSent,
+ metricNetSplitDetected,
+ metricNetSplitResolved,
+ )
+ collectNonMQTT(mx, pms)
+ mx["netsplit_unresolved"] = mx[metricNetSplitDetected] - mx[metricNetSplitResolved]
+}
+
+func collectUptime(mx map[string]float64, pms prometheus.Series) {
+ pms = pms.FindByName(metricSystemWallClock)
+ collectNonMQTT(mx, pms)
+}
+
+func collectNonMQTT(mx map[string]float64, pms prometheus.Series) {
+ for _, pm := range pms {
+ mx[pm.Name()] += pm.Value
+ }
+}
+
+func (v *VerneMQ) collectMQTT(mx map[string]float64, pms prometheus.Series) {
+ for _, pm := range pms {
+ if !isMQTTMetric(pm) {
+ continue
+ }
+ version := versionLabelValue(pm)
+ if version == "" {
+ continue
+ }
+
+ mx[pm.Name()] += pm.Value
+ mx[join(pm.Name(), "v", version)] += pm.Value
+
+ if reason := reasonCodeLabelValue(pm); reason != "" {
+ mx[join(pm.Name(), reason)] += pm.Value
+ mx[join(pm.Name(), "v", version, reason)] += pm.Value
+
+ v.notifyNewReason(pm.Name(), reason)
+ }
+ }
+}
+
+func isMQTTMetric(pm prometheus.SeriesSample) bool {
+ return strings.HasPrefix(pm.Name(), "mqtt_")
+}
+
+func isSchedulerUtilizationMetric(pm prometheus.SeriesSample) bool {
+ return strings.HasPrefix(pm.Name(), "system_utilization_scheduler_")
+}
+
+func reasonCodeLabelValue(pm prometheus.SeriesSample) string {
+ if v := pm.Labels.Get("reason_code"); v != "" {
+ return v
+ }
+ // "mqtt_connack_sent" v4 has return_code
+ return pm.Labels.Get("return_code")
+}
+
+func versionLabelValue(pm prometheus.SeriesSample) string {
+ return pm.Labels.Get("mqtt_version")
+}
+
+func join(a, b string, rest ...string) string {
+ v := a + "_" + b
+ switch len(rest) {
+ case 0:
+ return v
+ default:
+ return join(v, rest[0], rest[1:]...)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/vernemq/config_schema.json b/src/go/plugin/go.d/modules/vernemq/config_schema.json
new file mode 100644
index 000000000..092d7f417
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/config_schema.json
@@ -0,0 +1,183 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "VerneMQ collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the VerneMQ [metrics endpoint](https://docs.vernemq.com/monitoring/prometheus).",
+ "type": "string",
+ "default": "http://127.0.0.1:8888/metrics",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/vernemq/init.go b/src/go/plugin/go.d/modules/vernemq/init.go
new file mode 100644
index 000000000..64ed3418c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/init.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vernemq
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (v *VerneMQ) validateConfig() error {
+ if v.URL == "" {
+ return errors.New("url is not set")
+ }
+ return nil
+}
+
+func (v *VerneMQ) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(v.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(client, v.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/vernemq/integrations/vernemq.md b/src/go/plugin/go.d/modules/vernemq/integrations/vernemq.md
new file mode 100644
index 000000000..f3b4c2877
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/integrations/vernemq.md
@@ -0,0 +1,332 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/vernemq/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/vernemq/metadata.yaml"
+sidebar_label: "VerneMQ"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Message Brokers"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# VerneMQ
+
+
+<img src="https://netdata.cloud/img/vernemq.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: vernemq
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors VerneMQ instances.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per VerneMQ instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| vernemq.sockets | open | sockets |
+| vernemq.socket_operations | open, close | sockets/s |
+| vernemq.client_keepalive_expired | closed | sockets/s |
+| vernemq.socket_close_timeout | closed | sockets/s |
+| vernemq.socket_errors | errors | errors/s |
+| vernemq.queue_processes | queue_processes | queue processes |
+| vernemq.queue_processes_operations | setup, teardown | events/s |
+| vernemq.queue_process_init_from_storage | queue_processes | queue processes/s |
+| vernemq.queue_messages | received, sent | messages/s |
+| vernemq.queue_undelivered_messages | dropped, expired, unhandled | messages/s |
+| vernemq.router_subscriptions | subscriptions | subscriptions |
+| vernemq.router_matched_subscriptions | local, remote | subscriptions/s |
+| vernemq.router_memory | used | KiB |
+| vernemq.average_scheduler_utilization | utilization | percentage |
+| vernemq.system_utilization_scheduler | a dimension per scheduler | percentage |
+| vernemq.system_processes | processes | processes |
+| vernemq.system_reductions | reductions | ops/s |
+| vernemq.system_context_switches | context_switches | ops/s |
+| vernemq.system_io | received, sent | kilobits/s |
+| vernemq.system_run_queue | ready | processes |
+| vernemq.system_gc_count | gc | ops/s |
+| vernemq.system_gc_words_reclaimed | words_reclaimed | ops/s |
+| vernemq.system_allocated_memory | processes, system | KiB |
+| vernemq.bandwidth | received, sent | kilobits/s |
+| vernemq.retain_messages | messages | messages |
+| vernemq.retain_memory | used | KiB |
+| vernemq.cluster_bandwidth | received, sent | kilobits/s |
+| vernemq.cluster_dropped | dropped | kilobits/s |
+| vernemq.netsplit_unresolved | unresolved | netsplits |
+| vernemq.netsplits | resolved, detected | netsplits/s |
+| vernemq.mqtt_auth | received, sent | packets/s |
+| vernemq.mqtt_auth_received_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_auth_sent_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_connect | connect, connack | packets/s |
+| vernemq.mqtt_connack_sent_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_disconnect | received, sent | packets/s |
+| vernemq.mqtt_disconnect_received_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_disconnect_sent_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_subscribe | subscribe, suback | packets/s |
+| vernemq.mqtt_subscribe_error | failed | ops/s |
+| vernemq.mqtt_subscribe_auth_error | unauth | attempts/s |
+| vernemq.mqtt_unsubscribe | unsubscribe, unsuback | packets/s |
+| vernemq.mqtt_unsubscribe_error | mqtt_unsubscribe_error | ops/s |
+| vernemq.mqtt_publish | received, sent | packets/s |
+| vernemq.mqtt_publish_errors | failed | ops/s |
+| vernemq.mqtt_publish_auth_errors | unauth | attempts/s |
+| vernemq.mqtt_puback | received, sent | packets/s |
+| vernemq.mqtt_puback_received_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_puback_sent_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_puback_invalid_error | unexpected | messages/s |
+| vernemq.mqtt_pubrec | received, sent | packets/s |
+| vernemq.mqtt_pubrec_received_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_pubrec_sent_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_pubrec_invalid_error | unexpected | messages/s |
+| vernemq.mqtt_pubrel | received, sent | packets/s |
+| vernemq.mqtt_pubrel_received_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_pubrel_sent_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_pubcom | received, sent | packets/s |
+| vernemq.mqtt_pubcomp_received_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_pubcomp_sent_reason | a dimensions per reason | packets/s |
+| vernemq.mqtt_pubcomp_invalid_error | unexpected | messages/s |
+| vernemq.mqtt_ping | pingreq, pingresp | packets/s |
+| vernemq.node_uptime | time | seconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ vernemq_socket_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.socket_errors | number of socket errors in the last minute |
+| [ vernemq_queue_message_drop ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of dropped messaged due to full queues in the last minute |
+| [ vernemq_queue_message_expired ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of messages which expired before delivery in the last minute |
+| [ vernemq_queue_message_unhandled ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.queue_undelivered_messages | number of unhandled messages (connections with clean session=true) in the last minute |
+| [ vernemq_average_scheduler_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.average_scheduler_utilization | average scheduler utilization over the last 10 minutes |
+| [ vernemq_cluster_dropped ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.cluster_dropped | amount of traffic dropped during communication with the cluster nodes in the last minute |
+| [ vernemq_netsplits ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vvernemq.netsplits | number of detected netsplits (split brain situation) in the last minute |
+| [ vernemq_mqtt_connack_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_connack_sent_reason | number of sent unsuccessful v3/v5 CONNACK packets in the last minute |
+| [ vernemq_mqtt_disconnect_received_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_received_reason | number of received not normal v5 DISCONNECT packets in the last minute |
+| [ vernemq_mqtt_disconnect_sent_reason_not_normal ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_disconnect_sent_reason | number of sent not normal v5 DISCONNECT packets in the last minute |
+| [ vernemq_mqtt_subscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_error | number of failed v3/v5 SUBSCRIBE operations in the last minute |
+| [ vernemq_mqtt_subscribe_auth_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_subscribe_auth_error | number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute |
+| [ vernemq_mqtt_unsubscribe_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_unsubscribe_error | number of failed v3/v5 UNSUBSCRIBE operations in the last minute |
+| [ vernemq_mqtt_publish_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_errors | number of failed v3/v5 PUBLISH operations in the last minute |
+| [ vernemq_mqtt_publish_auth_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_publish_auth_errors | number of unauthorized v3/v5 PUBLISH attempts in the last minute |
+| [ vernemq_mqtt_puback_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_received_reason | number of received unsuccessful v5 PUBACK packets in the last minute |
+| [ vernemq_mqtt_puback_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_sent_reason | number of sent unsuccessful v5 PUBACK packets in the last minute |
+| [ vernemq_mqtt_puback_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_puback_invalid_error | number of received unexpected v3/v5 PUBACK packets in the last minute |
+| [ vernemq_mqtt_pubrec_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_received_reason | number of received unsuccessful v5 PUBREC packets in the last minute |
+| [ vernemq_mqtt_pubrec_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_sent_reason | number of sent unsuccessful v5 PUBREC packets in the last minute |
+| [ vernemq_mqtt_pubrec_invalid_error ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrec_invalid_error | number of received unexpected v3 PUBREC packets in the last minute |
+| [ vernemq_mqtt_pubrel_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_received_reason | number of received unsuccessful v5 PUBREL packets in the last minute |
+| [ vernemq_mqtt_pubrel_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubrel_sent_reason | number of sent unsuccessful v5 PUBREL packets in the last minute |
+| [ vernemq_mqtt_pubcomp_received_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_received_reason | number of received unsuccessful v5 PUBCOMP packets in the last minute |
+| [ vernemq_mqtt_pubcomp_sent_reason_unsuccessful ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_sent_reason | number of sent unsuccessful v5 PUBCOMP packets in the last minute |
+| [ vernemq_mqtt_pubcomp_unexpected ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf) | vernemq.mqtt_pubcomp_invalid_error | number of received unexpected v3/v5 PUBCOMP packets in the last minute |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/vernemq.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/vernemq.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8888/metrics | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8888/metrics
+
+```
+</details>
+
+##### HTTP authentication
+
+Local instance with basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8888/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8888/metrics
+
+ - name: remote
+ url: http://203.0.113.10:8888/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `vernemq` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m vernemq
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `vernemq` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep vernemq
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep vernemq /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep vernemq
+```
+
+
diff --git a/src/go/plugin/go.d/modules/vernemq/metadata.yaml b/src/go/plugin/go.d/modules/vernemq/metadata.yaml
new file mode 100644
index 000000000..2ec25fb77
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/metadata.yaml
@@ -0,0 +1,670 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-vernemq
+ plugin_name: go.d.plugin
+ module_name: vernemq
+ monitored_instance:
+ name: VerneMQ
+ link: https://vernemq.com
+ icon_filename: vernemq.svg
+ categories:
+ - data-collection.message-brokers
+ keywords:
+ - vernemq
+ - message brokers
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors VerneMQ instances.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/vernemq.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8888/metrics
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8888/metrics
+ - name: HTTP authentication
+ description: Local instance with basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8888/metrics
+ username: username
+ password: password
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8888/metrics
+
+ - name: remote
+ url: http://203.0.113.10:8888/metrics
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: vernemq_socket_errors
+ metric: vernemq.socket_errors
+ info: number of socket errors in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_queue_message_drop
+ metric: vernemq.queue_undelivered_messages
+ info: number of dropped messaged due to full queues in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_queue_message_expired
+ metric: vernemq.queue_undelivered_messages
+ info: number of messages which expired before delivery in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_queue_message_unhandled
+ metric: vernemq.queue_undelivered_messages
+ info: "number of unhandled messages (connections with clean session=true) in the last minute"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_average_scheduler_utilization
+ metric: vernemq.average_scheduler_utilization
+ info: average scheduler utilization over the last 10 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_cluster_dropped
+ metric: vernemq.cluster_dropped
+ info: amount of traffic dropped during communication with the cluster nodes in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_netsplits
+ metric: vvernemq.netsplits
+ info: "number of detected netsplits (split brain situation) in the last minute"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_connack_sent_reason_unsuccessful
+ metric: vernemq.mqtt_connack_sent_reason
+ info: number of sent unsuccessful v3/v5 CONNACK packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_disconnect_received_reason_not_normal
+ metric: vernemq.mqtt_disconnect_received_reason
+ info: number of received not normal v5 DISCONNECT packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_disconnect_sent_reason_not_normal
+ metric: vernemq.mqtt_disconnect_sent_reason
+ info: number of sent not normal v5 DISCONNECT packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_subscribe_error
+ metric: vernemq.mqtt_subscribe_error
+ info: number of failed v3/v5 SUBSCRIBE operations in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_subscribe_auth_error
+ metric: vernemq.mqtt_subscribe_auth_error
+ info: number of unauthorized v3/v5 SUBSCRIBE attempts in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_unsubscribe_error
+ metric: vernemq.mqtt_unsubscribe_error
+ info: number of failed v3/v5 UNSUBSCRIBE operations in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_publish_errors
+ metric: vernemq.mqtt_publish_errors
+ info: number of failed v3/v5 PUBLISH operations in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_publish_auth_errors
+ metric: vernemq.mqtt_publish_auth_errors
+ info: number of unauthorized v3/v5 PUBLISH attempts in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_puback_received_reason_unsuccessful
+ metric: vernemq.mqtt_puback_received_reason
+ info: number of received unsuccessful v5 PUBACK packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_puback_sent_reason_unsuccessful
+ metric: vernemq.mqtt_puback_sent_reason
+ info: number of sent unsuccessful v5 PUBACK packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_puback_unexpected
+ metric: vernemq.mqtt_puback_invalid_error
+ info: number of received unexpected v3/v5 PUBACK packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_pubrec_received_reason_unsuccessful
+ metric: vernemq.mqtt_pubrec_received_reason
+ info: number of received unsuccessful v5 PUBREC packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_pubrec_sent_reason_unsuccessful
+ metric: vernemq.mqtt_pubrec_sent_reason
+ info: number of sent unsuccessful v5 PUBREC packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_pubrec_invalid_error
+ metric: vernemq.mqtt_pubrec_invalid_error
+ info: number of received unexpected v3 PUBREC packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_pubrel_received_reason_unsuccessful
+ metric: vernemq.mqtt_pubrel_received_reason
+ info: number of received unsuccessful v5 PUBREL packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_pubrel_sent_reason_unsuccessful
+ metric: vernemq.mqtt_pubrel_sent_reason
+ info: number of sent unsuccessful v5 PUBREL packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_pubcomp_received_reason_unsuccessful
+ metric: vernemq.mqtt_pubcomp_received_reason
+ info: number of received unsuccessful v5 PUBCOMP packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_pubcomp_sent_reason_unsuccessful
+ metric: vernemq.mqtt_pubcomp_sent_reason
+ info: number of sent unsuccessful v5 PUBCOMP packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ - name: vernemq_mqtt_pubcomp_unexpected
+ metric: vernemq.mqtt_pubcomp_invalid_error
+ info: number of received unexpected v3/v5 PUBCOMP packets in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vernemq.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: vernemq.sockets
+ description: Open Sockets
+ unit: sockets
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: vernemq.socket_operations
+ description: Socket Open and Close Events
+ unit: sockets/s
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: close
+ - name: vernemq.client_keepalive_expired
+ description: Closed Sockets due to Keepalive Time Expired
+ unit: sockets/s
+ chart_type: line
+ dimensions:
+ - name: closed
+ - name: vernemq.socket_close_timeout
+ description: Closed Sockets due to no CONNECT Frame On Time
+ unit: sockets/s
+ chart_type: line
+ dimensions:
+ - name: closed
+ - name: vernemq.socket_errors
+ description: Socket Errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: errors
+ - name: vernemq.queue_processes
+ description: Living Queues in an Online or an Offline State
+ unit: queue processes
+ chart_type: line
+ dimensions:
+ - name: queue_processes
+ - name: vernemq.queue_processes_operations
+ description: Queue Processes Setup and Teardown Events
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: setup
+ - name: teardown
+ - name: vernemq.queue_process_init_from_storage
+ description: Queue Processes Initialized from Offline Storage
+ unit: queue processes/s
+ chart_type: line
+ dimensions:
+ - name: queue_processes
+ - name: vernemq.queue_messages
+ description: Received and Sent PUBLISH Messages
+ unit: messages/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.queue_undelivered_messages
+ description: Undelivered PUBLISH Messages
+ unit: messages/s
+ chart_type: stacked
+ dimensions:
+ - name: dropped
+ - name: expired
+ - name: unhandled
+ - name: vernemq.router_subscriptions
+ description: Subscriptions in the Routing Table
+ unit: subscriptions
+ chart_type: line
+ dimensions:
+ - name: subscriptions
+ - name: vernemq.router_matched_subscriptions
+ description: Matched Subscriptions
+ unit: subscriptions/s
+ chart_type: line
+ dimensions:
+ - name: local
+ - name: remote
+ - name: vernemq.router_memory
+ description: Routing Table Memory Usage
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: vernemq.average_scheduler_utilization
+ description: Average Scheduler Utilization
+ unit: percentage
+ chart_type: area
+ dimensions:
+ - name: utilization
+ - name: vernemq.system_utilization_scheduler
+ description: Scheduler Utilization
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per scheduler
+ - name: vernemq.system_processes
+ description: Erlang Processes
+ unit: processes
+ chart_type: line
+ dimensions:
+ - name: processes
+ - name: vernemq.system_reductions
+ description: Reductions
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: reductions
+ - name: vernemq.system_context_switches
+ description: Context Switches
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: context_switches
+ - name: vernemq.system_io
+ description: Received and Sent Traffic through Ports
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.system_run_queue
+ description: Processes that are Ready to Run on All Run-Queues
+ unit: processes
+ chart_type: line
+ dimensions:
+ - name: ready
+ - name: vernemq.system_gc_count
+ description: GC Count
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: gc
+ - name: vernemq.system_gc_words_reclaimed
+ description: GC Words Reclaimed
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: words_reclaimed
+ - name: vernemq.system_allocated_memory
+ description: Memory Allocated by the Erlang Processes and by the Emulator
+ unit: KiB
+ chart_type: stacked
+ dimensions:
+ - name: processes
+ - name: system
+ - name: vernemq.bandwidth
+ description: Bandwidth
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.retain_messages
+ description: Stored Retained Messages
+ unit: messages
+ chart_type: line
+ dimensions:
+ - name: messages
+ - name: vernemq.retain_memory
+ description: Stored Retained Messages Memory Usage
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: vernemq.cluster_bandwidth
+ description: Communication with Other Cluster Nodes
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.cluster_dropped
+ description: Traffic Dropped During Communication with Other Cluster Nodes
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: dropped
+ - name: vernemq.netsplit_unresolved
+ description: Unresolved Netsplits
+ unit: netsplits
+ chart_type: line
+ dimensions:
+ - name: unresolved
+ - name: vernemq.netsplits
+ description: Netsplits
+ unit: netsplits/s
+ chart_type: stacked
+ dimensions:
+ - name: resolved
+ - name: detected
+ - name: vernemq.mqtt_auth
+ description: v5 AUTH
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.mqtt_auth_received_reason
+ description: v5 AUTH Received by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_auth_sent_reason
+ description: v5 AUTH Sent by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_connect
+ description: v3/v5 CONNECT and CONNACK
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: connect
+ - name: connack
+ - name: vernemq.mqtt_connack_sent_reason
+ description: v3/v5 CONNACK Sent by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_disconnect
+ description: v3/v5 DISCONNECT
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.mqtt_disconnect_received_reason
+ description: v5 DISCONNECT Received by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_disconnect_sent_reason
+ description: v5 DISCONNECT Sent by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_subscribe
+ description: v3/v5 SUBSCRIBE and SUBACK
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: subscribe
+ - name: suback
+ - name: vernemq.mqtt_subscribe_error
+ description: v3/v5 Failed SUBSCRIBE Operations due to a Netsplit
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: failed
+ - name: vernemq.mqtt_subscribe_auth_error
+ description: v3/v5 Unauthorized SUBSCRIBE Attempts
+ unit: attempts/s
+ chart_type: line
+ dimensions:
+ - name: unauth
+ - name: vernemq.mqtt_unsubscribe
+ description: v3/v5 UNSUBSCRIBE and UNSUBACK
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: unsubscribe
+ - name: unsuback
+ - name: vernemq.mqtt_unsubscribe_error
+ description: v3/v5 Failed UNSUBSCRIBE Operations due to a Netsplit
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: mqtt_unsubscribe_error
+ - name: vernemq.mqtt_publish
+ description: v3/v5 QoS 0,1,2 PUBLISH
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.mqtt_publish_errors
+ description: v3/v5 Failed PUBLISH Operations due to a Netsplit
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: failed
+ - name: vernemq.mqtt_publish_auth_errors
+ description: v3/v5 Unauthorized PUBLISH Attempts
+ unit: attempts/s
+ chart_type: area
+ dimensions:
+ - name: unauth
+ - name: vernemq.mqtt_puback
+ description: v3/v5 QoS 1 PUBACK
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.mqtt_puback_received_reason
+ description: v5 PUBACK QoS 1 Received by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_puback_sent_reason
+ description: v5 PUBACK QoS 1 Sent by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_puback_invalid_error
+ description: v3/v5 PUBACK QoS 1 Received Unexpected Messages
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: unexpected
+ - name: vernemq.mqtt_pubrec
+ description: v3/v5 PUBREC QoS 2
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.mqtt_pubrec_received_reason
+ description: v5 PUBREC QoS 2 Received by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_pubrec_sent_reason
+ description: v5 PUBREC QoS 2 Sent by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_pubrec_invalid_error
+ description: v3 PUBREC QoS 2 Received Unexpected Messages
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: unexpected
+ - name: vernemq.mqtt_pubrel
+ description: v3/v5 PUBREL QoS 2
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.mqtt_pubrel_received_reason
+ description: v5 PUBREL QoS 2 Received by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_pubrel_sent_reason
+ description: v5 PUBREL QoS 2 Sent by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_pubcom
+ description: v3/v5 PUBCOMP QoS 2
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vernemq.mqtt_pubcomp_received_reason
+ description: v5 PUBCOMP QoS 2 Received by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_pubcomp_sent_reason
+ description: v5 PUBCOMP QoS 2 Sent by Reason
+ unit: packets/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimensions per reason
+ - name: vernemq.mqtt_pubcomp_invalid_error
+ description: v3/v5 PUBCOMP QoS 2 Received Unexpected Messages
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: unexpected
+ - name: vernemq.mqtt_ping
+ description: v3/v5 PING
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: pingreq
+ - name: pingresp
+ - name: vernemq.node_uptime
+ description: Node Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
diff --git a/src/go/plugin/go.d/modules/vernemq/metrics.go b/src/go/plugin/go.d/modules/vernemq/metrics.go
new file mode 100644
index 000000000..863cc6355
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/metrics.go
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vernemq
+
+// Source Code Metrics:
+// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_metrics.erl
+// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_metrics.hrl
+
+// Source Code FSM:
+// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_mqtt_fsm.erl
+// - https://github.com/vernemq/vernemq/blob/master/apps/vmq_server/src/vmq_mqtt5_fsm.erl
+
+// MQTT Packet Types:
+// - v4: http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/errata01/os/mqtt-v3.1.1-errata01-os-complete.html#_Toc442180834
+// - v5: https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901019
+
+// Erlang VM:
+// - http://erlang.org/documentation/doc-5.7.1/erts-5.7.1/doc/html/erlang.html
+
+// Not used metrics (https://docs.vernemq.com/monitoring/introduction):
+// - "mqtt_connack_accepted_sent" // v4, not populated, "mqtt_connack_sent" used instead
+// - "mqtt_connack_unacceptable_protocol_sent" // v4, not populated, "mqtt_connack_sent" used instead
+// - "mqtt_connack_identifier_rejected_sent" // v4, not populated, "mqtt_connack_sent" used instead
+// - "mqtt_connack_server_unavailable_sent" // v4, not populated, "mqtt_connack_sent" used instead
+// - "mqtt_connack_bad_credentials_sent" // v4, not populated, "mqtt_connack_sent" used instead
+// - "mqtt_connack_not_authorized_sent" // v4, not populated, "mqtt_connack_sent" used instead
+// - "system_exact_reductions"
+// - "system_runtime"
+// - "vm_memory_atom"
+// - "vm_memory_atom_used"
+// - "vm_memory_binary"
+// - "vm_memory_code"
+// - "vm_memory_ets"
+// - "vm_memory_processes_used"
+// - "vm_memory_total"
+
+// -----------------------------------------------MQTT------------------------------------------------------------------
+const (
+ // AUTH
+ metricAUTHReceived = "mqtt_auth_received" // v5 has 'reason_code' label
+ metricAUTHSent = "mqtt_auth_sent" // v5 has 'reason_code' label
+
+ // CONNECT
+ metricCONNECTReceived = "mqtt_connect_received" // v4, v5
+ metricCONNACKSent = "mqtt_connack_sent" // v4 has 'return_code' label, v5 has 'reason_code'
+
+ // SUBSCRIBE
+ metricSUBSCRIBEReceived = "mqtt_subscribe_received" // v4, v5
+ metricSUBACKSent = "mqtt_suback_sent" // v4, v5
+ metricSUBSCRIBEError = "mqtt_subscribe_error" // v4, v5
+ metricSUBSCRIBEAuthError = "mqtt_subscribe_auth_error" // v4, v5
+
+ // UNSUBSCRIBE
+ metricUNSUBSCRIBEReceived = "mqtt_unsubscribe_received" // v4, v5
+ metricUNSUBACKSent = "mqtt_unsuback_sent" // v4, v5
+ metricUNSUBSCRIBEError = "mqtt_unsubscribe_error" // v4, v5
+
+ // PUBLISH
+ metricPUBSLISHReceived = "mqtt_publish_received" // v4, v5
+ metricPUBSLIHSent = "mqtt_publish_sent" // v4, v5
+ metricPUBLISHError = "mqtt_publish_error" // v4, v5
+ metricPUBLISHAuthError = "mqtt_publish_auth_error" // v4, v5
+
+ // Publish acknowledgment (QoS 1)
+ metricPUBACKReceived = "mqtt_puback_received" // v4, v5 has 'reason_code' label
+ metricPUBACKSent = "mqtt_puback_sent" // v4, v5 has 'reason_code' label
+ metricPUBACKInvalid = "mqtt_puback_invalid_error" // v4, v5
+
+ // Publish received (QoS 2 delivery part 1)
+ metricPUBRECReceived = "mqtt_pubrec_received" // v4, v5 has 'reason_code' label
+ metricPUBRECSent = "mqtt_pubrec_sent" // v4, v5 has 'reason_code' label
+ metricPUBRECInvalid = "mqtt_pubrec_invalid_error" // v4
+
+ // Publish release (QoS 2 delivery part 2)
+ metricPUBRELReceived = "mqtt_pubrel_received" // v4, v5 has 'reason_code' label
+ metricPUBRELSent = "mqtt_pubrel_sent" // v4, v5 has 'reason_code' label
+
+ // Publish complete (QoS 2 delivery part 3)
+ metricPUBCOMPReceived = "mqtt_pubcomp_received" // v4, v5 has 'reason_code' label
+ metricPUBCOMPSent = "mqtt_pubcomp_sent" // v4, v5 has 'reason_code' label
+ metricPUNCOMPInvalid = "mqtt_pubcomp_invalid_error" // v4, v5
+
+ // PING
+ metricPINGREQReceived = "mqtt_pingreq_received" // v4, v5
+ metricPINGRESPSent = "mqtt_pingresp_sent" // v4, v5
+
+ // DISCONNECT
+ metricDISCONNECTReceived = "mqtt_disconnect_received" // v4, v5 has 'reason_code' label
+ metricDISCONNECTSent = "mqtt_disconnect_sent" // v5 has 'reason_code' label
+
+ // Misc
+ metricMQTTInvalidMsgSizeError = "mqtt_invalid_msg_size_error" // v4, v5
+)
+
+const (
+ // Sockets
+ metricSocketOpen = "socket_open"
+ metricSocketClose = "socket_close"
+ metricSocketError = "socket_error"
+ metricSocketCloseTimeout = "socket_close_timeout"
+ metricClientKeepaliveExpired = "client_keepalive_expired" // v4, v5
+
+ // Queues
+ metricQueueProcesses = "queue_processes"
+ metricQueueSetup = "queue_setup"
+ metricQueueTeardown = "queue_teardown"
+ metricQueueMessageIn = "queue_message_in"
+ metricQueueMessageOut = "queue_message_out"
+ metricQueueMessageDrop = "queue_message_drop"
+ metricQueueMessageExpired = "queue_message_expired"
+ metricQueueMessageUnhandled = "queue_message_unhandled"
+ metricQueueInitializedFromStorage = "queue_initialized_from_storage"
+
+ // Subscriptions
+ metricRouterMatchesLocal = "router_matches_local"
+ metricRouterMatchesRemote = "router_matches_remote"
+ metricRouterMemory = "router_memory"
+ metricRouterSubscriptions = "router_subscriptions"
+
+ // Erlang VM
+ metricSystemUtilization = "system_utilization"
+ metricSystemProcessCount = "system_process_count"
+ metricSystemReductions = "system_reductions"
+ metricSystemContextSwitches = "system_context_switches"
+ metricSystemIOIn = "system_io_in"
+ metricSystemIOOut = "system_io_out"
+ metricSystemRunQueue = "system_run_queue"
+ metricSystemGCCount = "system_gc_count"
+ metricSystemWordsReclaimedByGC = "system_words_reclaimed_by_gc"
+ metricVMMemoryProcesses = "vm_memory_processes"
+ metricVMMemorySystem = "vm_memory_system"
+
+ // Bandwidth
+ metricBytesReceived = "bytes_received"
+ metricBytesSent = "bytes_sent"
+
+ // Retain
+ metricRetainMemory = "retain_memory"
+ metricRetainMessages = "retain_messages"
+
+ // Cluster
+ metricClusterBytesDropped = "cluster_bytes_dropped"
+ metricClusterBytesReceived = "cluster_bytes_received"
+ metricClusterBytesSent = "cluster_bytes_sent"
+ metricNetSplitDetected = "netsplit_detected"
+ metricNetSplitResolved = "netsplit_resolved"
+
+ // Uptime
+ metricSystemWallClock = "system_wallclock"
+)
diff --git a/src/go/plugin/go.d/modules/vernemq/testdata/config.json b/src/go/plugin/go.d/modules/vernemq/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/vernemq/testdata/config.yaml b/src/go/plugin/go.d/modules/vernemq/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt b/src/go/plugin/go.d/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt
new file mode 100644
index 000000000..2e98a3e94
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/testdata/metrics-v1.10.1-mqtt5.txt
@@ -0,0 +1,416 @@
+# HELP socket_open The number of times an MQTT socket has been opened.
+# TYPE socket_open counter
+socket_open{node="VerneMQ@172.17.0.2"} 338956
+# HELP socket_close The number of times an MQTT socket has been closed.
+# TYPE socket_close counter
+socket_close{node="VerneMQ@172.17.0.2"} 338956
+# HELP socket_close_timeout The number of times VerneMQ closed an MQTT socket due to no CONNECT frame has been received on time.
+# TYPE socket_close_timeout counter
+socket_close_timeout{node="VerneMQ@172.17.0.2"} 0
+# HELP socket_error The total number of socket errors that have occurred.
+# TYPE socket_error counter
+socket_error{node="VerneMQ@172.17.0.2"} 0
+# HELP bytes_received The total number of bytes received.
+# TYPE bytes_received counter
+bytes_received{node="VerneMQ@172.17.0.2"} 36796908
+# HELP bytes_sent The total number of bytes sent.
+# TYPE bytes_sent counter
+bytes_sent{node="VerneMQ@172.17.0.2"} 23361693
+# HELP mqtt_connect_received The number of CONNECT packets received.
+# TYPE mqtt_connect_received counter
+mqtt_connect_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 338956
+# HELP mqtt_publish_received The number of PUBLISH packets received.
+# TYPE mqtt_publish_received counter
+mqtt_publish_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 537088
+# HELP mqtt_puback_received The number of PUBACK packets received.
+# TYPE mqtt_puback_received counter
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 525694
+# HELP mqtt_pubrec_received The number of PUBREC packets received.
+# TYPE mqtt_pubrec_received counter
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_pubrel_received The number of PUBREL packets received.
+# TYPE mqtt_pubrel_received counter
+mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_pubcomp_received The number of PUBCOMP packets received.
+# TYPE mqtt_pubcomp_received counter
+mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_subscribe_received The number of SUBSCRIBE packets received.
+# TYPE mqtt_subscribe_received counter
+mqtt_subscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 122
+# HELP mqtt_unsubscribe_received The number of UNSUBSCRIBE packets received.
+# TYPE mqtt_unsubscribe_received counter
+mqtt_unsubscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 108
+# HELP mqtt_pingreq_received The number of PINGREQ packets received.
+# TYPE mqtt_pingreq_received counter
+mqtt_pingreq_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 205
+# HELP mqtt_disconnect_received The number of DISCONNECT packets received.
+# TYPE mqtt_disconnect_received counter
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="4"} 107
+# HELP mqtt_connack_accepted_sent The number of times a connection has been accepted.
+# TYPE mqtt_connack_accepted_sent counter
+mqtt_connack_accepted_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_connack_unacceptable_protocol_sent The number of times the broker is not able to support the requested protocol.
+# TYPE mqtt_connack_unacceptable_protocol_sent counter
+mqtt_connack_unacceptable_protocol_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_connack_identifier_rejected_sent The number of times a client was rejected due to a unacceptable identifier.
+# TYPE mqtt_connack_identifier_rejected_sent counter
+mqtt_connack_identifier_rejected_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_connack_server_unavailable_sent The number of times a client was rejected due the the broker being unavailable.
+# TYPE mqtt_connack_server_unavailable_sent counter
+mqtt_connack_server_unavailable_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_connack_bad_credentials_sent The number of times a client sent bad credentials.
+# TYPE mqtt_connack_bad_credentials_sent counter
+mqtt_connack_bad_credentials_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_connack_not_authorized_sent The number of times a client was rejected due to insufficient authorization.
+# TYPE mqtt_connack_not_authorized_sent counter
+mqtt_connack_not_authorized_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_publish_sent The number of PUBLISH packets sent.
+# TYPE mqtt_publish_sent counter
+mqtt_publish_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 525721
+# HELP mqtt_puback_sent The number of PUBACK packets sent.
+# TYPE mqtt_puback_sent counter
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 537068
+# HELP mqtt_pubrec_sent The number of PUBREC packets sent.
+# TYPE mqtt_pubrec_sent counter
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_pubrel_sent The number of PUBREL packets sent.
+# TYPE mqtt_pubrel_sent counter
+mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_pubcomp_sent The number of PUBCOMP packets sent.
+# TYPE mqtt_pubcomp_sent counter
+mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_suback_sent The number of SUBACK packets sent.
+# TYPE mqtt_suback_sent counter
+mqtt_suback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 122
+# HELP mqtt_unsuback_sent The number of UNSUBACK packets sent.
+# TYPE mqtt_unsuback_sent counter
+mqtt_unsuback_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 108
+# HELP mqtt_pingresp_sent The number of PINGRESP packets sent.
+# TYPE mqtt_pingresp_sent counter
+mqtt_pingresp_sent{node="VerneMQ@172.17.0.2",mqtt_version="4"} 205
+# HELP mqtt_publish_auth_error The number of unauthorized publish attempts.
+# TYPE mqtt_publish_auth_error counter
+mqtt_publish_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_subscribe_auth_error The number of unauthorized subscription attempts.
+# TYPE mqtt_subscribe_auth_error counter
+mqtt_subscribe_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_invalid_msg_size_error The number of packages exceeding the maximum allowed size.
+# TYPE mqtt_invalid_msg_size_error counter
+mqtt_invalid_msg_size_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_puback_invalid_error The number of unexpected PUBACK messages received.
+# TYPE mqtt_puback_invalid_error counter
+mqtt_puback_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_pubrec_invalid_error The number of unexpected PUBREC messages received.
+# TYPE mqtt_pubrec_invalid_error counter
+mqtt_pubrec_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_pubcomp_invalid_error The number of unexpected PUBCOMP messages received.
+# TYPE mqtt_pubcomp_invalid_error counter
+mqtt_pubcomp_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_publish_error The number of times a PUBLISH operation failed due to a netsplit.
+# TYPE mqtt_publish_error counter
+mqtt_publish_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_subscribe_error The number of times a SUBSCRIBE operation failed due to a netsplit.
+# TYPE mqtt_subscribe_error counter
+mqtt_subscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP mqtt_unsubscribe_error The number of times an UNSUBSCRIBE operation failed due to a netsplit.
+# TYPE mqtt_unsubscribe_error counter
+mqtt_unsubscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="4"} 0
+# HELP client_keepalive_expired The number of clients which failed to communicate within the keepalive time period.
+# TYPE client_keepalive_expired counter
+client_keepalive_expired{node="VerneMQ@172.17.0.2",mqtt_version="4"} 1
+mqtt_connect_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_invalid_msg_size_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_pingreq_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_pingresp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_puback_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_pubcomp_invalid_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_publish_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_publish_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_publish_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_publish_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_suback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_subscribe_auth_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_subscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_subscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_unsuback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_unsubscribe_error{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+mqtt_unsubscribe_received{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+client_keepalive_expired{node="VerneMQ@172.17.0.2",mqtt_version="5"} 0
+# HELP queue_setup The number of times a MQTT queue process has been started.
+# TYPE queue_setup counter
+queue_setup{node="VerneMQ@172.17.0.2"} 338948
+# HELP queue_initialized_from_storage The number of times a MQTT queue process has been initialized from offline storage.
+# TYPE queue_initialized_from_storage counter
+queue_initialized_from_storage{node="VerneMQ@172.17.0.2"} 0
+# HELP queue_teardown The number of times a MQTT queue process has been terminated.
+# TYPE queue_teardown counter
+queue_teardown{node="VerneMQ@172.17.0.2"} 338948
+# HELP queue_message_drop The number of messages dropped due to full queues.
+# TYPE queue_message_drop counter
+queue_message_drop{node="VerneMQ@172.17.0.2"} 0
+# HELP queue_message_expired The number of messages which expired before delivery.
+# TYPE queue_message_expired counter
+queue_message_expired{node="VerneMQ@172.17.0.2"} 0
+# HELP queue_message_unhandled The number of unhandled messages when connecting with clean session=true.
+# TYPE queue_message_unhandled counter
+queue_message_unhandled{node="VerneMQ@172.17.0.2"} 1
+# HELP queue_message_in The number of PUBLISH packets received by MQTT queue processes.
+# TYPE queue_message_in counter
+queue_message_in{node="VerneMQ@172.17.0.2"} 525722
+# HELP queue_message_out The number of PUBLISH packets sent from MQTT queue processes.
+# TYPE queue_message_out counter
+queue_message_out{node="VerneMQ@172.17.0.2"} 525721
+# HELP client_expired Not in use (deprecated)
+# TYPE client_expired counter
+client_expired{node="VerneMQ@172.17.0.2"} 0
+# HELP cluster_bytes_received The number of bytes received from other cluster nodes.
+# TYPE cluster_bytes_received counter
+cluster_bytes_received{node="VerneMQ@172.17.0.2"} 0
+# HELP cluster_bytes_sent The number of bytes send to other cluster nodes.
+# TYPE cluster_bytes_sent counter
+cluster_bytes_sent{node="VerneMQ@172.17.0.2"} 0
+# HELP cluster_bytes_dropped The number of bytes dropped while sending data to other cluster nodes.
+# TYPE cluster_bytes_dropped counter
+cluster_bytes_dropped{node="VerneMQ@172.17.0.2"} 0
+# HELP router_matches_local The number of matched local subscriptions.
+# TYPE router_matches_local counter
+router_matches_local{node="VerneMQ@172.17.0.2"} 525722
+# HELP router_matches_remote The number of matched remote subscriptions.
+# TYPE router_matches_remote counter
+router_matches_remote{node="VerneMQ@172.17.0.2"} 0
+# HELP mqtt_connack_sent The number of CONNACK packets sent.
+# TYPE mqtt_connack_sent counter
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="success"} 338948
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="unsupported_protocol_version"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="client_identifier_not_valid"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="server_unavailable"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="bad_username_or_password"} 4
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="4",return_code="not_authorized"} 4
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="normal_disconnect"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="disconnect_with_will_msg"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="receive_max_exceeded"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_alias_invalid"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="message_rate_too_high"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="administrative_action"} 0
+mqtt_disconnect_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0
+# HELP mqtt_disconnect_sent The number of DISCONNECT packets sent.
+# TYPE mqtt_disconnect_sent counter
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="normal_disconnect"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_busy"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_shutting_down"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="keep_alive_timeout"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="session_taken_over"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_filter_invalid"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="receive_max_exceeded"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_alias_invalid"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="message_rate_too_high"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="administrative_action"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="retain_not_supported"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="qos_not_supported"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="use_another_server"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_moved"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="shared_subs_not_supported"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="connection_rate_exceeded"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="max_connect_time"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="subscription_ids_not_supported"} 0
+mqtt_disconnect_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="wildcard_subs_not_supported"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="malformed_packet"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="protocol_error"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unsupported_protocol_version"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="client_identifier_not_valid"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="bad_username_or_password"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_unavailable"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_busy"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="banned"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="bad_authentication_method"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_too_large"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="retain_not_supported"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="qos_not_supported"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="use_another_server"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="server_moved"} 0
+mqtt_connack_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="connection_rate_exceeded"} 0
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0
+mqtt_puback_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0
+mqtt_puback_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0
+mqtt_pubrec_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="no_matching_subscribers"} 0
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="unspecified_error"} 0
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="impl_specific_error"} 0
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="not_authorized"} 0
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="topic_name_invalid"} 0
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_in_use"} 0
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="quota_exceeded"} 0
+mqtt_pubrec_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="payload_format_invalid"} 0
+mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_pubrel_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0
+mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_pubrel_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0
+mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_pubcomp_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0
+mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_pubcomp_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="packet_id_not_found"} 0
+# HELP mqtt_auth_sent The number of AUTH packets sent.
+# TYPE mqtt_auth_sent counter
+mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="continue_authentication"} 0
+mqtt_auth_sent{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="reauthenticate"} 0
+# HELP mqtt_auth_received The number of AUTH packets received.
+# TYPE mqtt_auth_received counter
+mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="success"} 0
+mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="continue_authentication"} 0
+mqtt_auth_received{node="VerneMQ@172.17.0.2",mqtt_version="5",reason_code="reauthenticate"} 0
+# HELP queue_processes The number of MQTT queue processes.
+# TYPE queue_processes gauge
+queue_processes{node="VerneMQ@172.17.0.2"} 0
+# HELP retain_memory The number of bytes used for storing retained messages.
+# TYPE retain_memory gauge
+retain_memory{node="VerneMQ@172.17.0.2"} 11344
+# HELP retain_messages The number of currently stored retained messages.
+# TYPE retain_messages gauge
+retain_messages{node="VerneMQ@172.17.0.2"} 0
+# HELP router_memory The number of bytes used by the routing table.
+# TYPE router_memory gauge
+router_memory{node="VerneMQ@172.17.0.2"} 12752
+# HELP router_subscriptions The number of subscriptions in the routing table.
+# TYPE router_subscriptions gauge
+router_subscriptions{node="VerneMQ@172.17.0.2"} 0
+# HELP netsplit_resolved The number of resolved netsplits.
+# TYPE netsplit_resolved counter
+netsplit_resolved{node="VerneMQ@172.17.0.2"} 0
+# HELP netsplit_detected The number of detected netsplits.
+# TYPE netsplit_detected counter
+netsplit_detected{node="VerneMQ@172.17.0.2"} 0
+# HELP system_utilization_scheduler_8 Scheduler 8 utilization (percentage)
+# TYPE system_utilization_scheduler_8 gauge
+system_utilization_scheduler_8{node="VerneMQ@172.17.0.2"} 0
+# HELP system_utilization_scheduler_7 Scheduler 7 utilization (percentage)
+# TYPE system_utilization_scheduler_7 gauge
+system_utilization_scheduler_7{node="VerneMQ@172.17.0.2"} 0
+# HELP system_utilization_scheduler_6 Scheduler 6 utilization (percentage)
+# TYPE system_utilization_scheduler_6 gauge
+system_utilization_scheduler_6{node="VerneMQ@172.17.0.2"} 0
+# HELP system_utilization_scheduler_5 Scheduler 5 utilization (percentage)
+# TYPE system_utilization_scheduler_5 gauge
+system_utilization_scheduler_5{node="VerneMQ@172.17.0.2"} 0
+# HELP system_utilization_scheduler_4 Scheduler 4 utilization (percentage)
+# TYPE system_utilization_scheduler_4 gauge
+system_utilization_scheduler_4{node="VerneMQ@172.17.0.2"} 19
+# HELP system_utilization_scheduler_3 Scheduler 3 utilization (percentage)
+# TYPE system_utilization_scheduler_3 gauge
+system_utilization_scheduler_3{node="VerneMQ@172.17.0.2"} 14
+# HELP system_utilization_scheduler_2 Scheduler 2 utilization (percentage)
+# TYPE system_utilization_scheduler_2 gauge
+system_utilization_scheduler_2{node="VerneMQ@172.17.0.2"} 8
+# HELP system_utilization_scheduler_1 Scheduler 1 utilization (percentage)
+# TYPE system_utilization_scheduler_1 gauge
+system_utilization_scheduler_1{node="VerneMQ@172.17.0.2"} 34
+# HELP system_utilization The average system (scheduler) utilization (percentage).
+# TYPE system_utilization gauge
+system_utilization{node="VerneMQ@172.17.0.2"} 9
+# HELP vm_memory_ets The amount of memory allocated for ETS tables.
+# TYPE vm_memory_ets gauge
+vm_memory_ets{node="VerneMQ@172.17.0.2"} 6065944
+# HELP vm_memory_code The amount of memory allocated for code.
+# TYPE vm_memory_code gauge
+vm_memory_code{node="VerneMQ@172.17.0.2"} 11372082
+# HELP vm_memory_binary The amount of memory allocated for binaries.
+# TYPE vm_memory_binary gauge
+vm_memory_binary{node="VerneMQ@172.17.0.2"} 1293672
+# HELP vm_memory_atom_used The amount of memory used by atoms.
+# TYPE vm_memory_atom_used gauge
+vm_memory_atom_used{node="VerneMQ@172.17.0.2"} 755998
+# HELP vm_memory_atom The amount of memory allocated for atoms.
+# TYPE vm_memory_atom gauge
+vm_memory_atom{node="VerneMQ@172.17.0.2"} 768953
+# HELP vm_memory_system The amount of memory allocated for the emulator.
+# TYPE vm_memory_system gauge
+vm_memory_system{node="VerneMQ@172.17.0.2"} 27051848
+# HELP vm_memory_processes_used The amount of memory used by processes.
+# TYPE vm_memory_processes_used gauge
+vm_memory_processes_used{node="VerneMQ@172.17.0.2"} 8671232
+# HELP vm_memory_processes The amount of memory allocated for processes.
+# TYPE vm_memory_processes gauge
+vm_memory_processes{node="VerneMQ@172.17.0.2"} 8673288
+# HELP vm_memory_total The total amount of memory allocated.
+# TYPE vm_memory_total gauge
+vm_memory_total{node="VerneMQ@172.17.0.2"} 35725136
+# HELP system_process_count The number of Erlang processes.
+# TYPE system_process_count gauge
+system_process_count{node="VerneMQ@172.17.0.2"} 329
+# HELP system_wallclock The number of milli-seconds passed since the node was started.
+# TYPE system_wallclock counter
+system_wallclock{node="VerneMQ@172.17.0.2"} 163457858
+# HELP system_runtime The sum of the runtime for all threads in the Erlang runtime system.
+# TYPE system_runtime counter
+system_runtime{node="VerneMQ@172.17.0.2"} 1775355
+# HELP system_run_queue The total number of processes and ports ready to run on all run-queues.
+# TYPE system_run_queue gauge
+system_run_queue{node="VerneMQ@172.17.0.2"} 0
+# HELP system_reductions The number of reductions performed in the VM since the node was started.
+# TYPE system_reductions counter
+system_reductions{node="VerneMQ@172.17.0.2"} 3857458067
+# HELP system_io_out The total number of bytes sent through ports.
+# TYPE system_io_out counter
+system_io_out{node="VerneMQ@172.17.0.2"} 961001488
+# HELP system_io_in The total number of bytes received through ports.
+# TYPE system_io_in counter
+system_io_in{node="VerneMQ@172.17.0.2"} 68998296
+# HELP system_words_reclaimed_by_gc The number of words reclaimed by the garbage collector.
+# TYPE system_words_reclaimed_by_gc counter
+system_words_reclaimed_by_gc{node="VerneMQ@172.17.0.2"} 7158470019
+# HELP system_gc_count The number of garbage collections performed.
+# TYPE system_gc_count counter
+system_gc_count{node="VerneMQ@172.17.0.2"} 12189976
+# HELP system_exact_reductions The exact number of reductions performed.
+# TYPE system_exact_reductions counter
+system_exact_reductions{node="VerneMQ@172.17.0.2"} 3854024620
+# HELP system_context_switches The total number of context switches.
+# TYPE system_context_switches counter
+system_context_switches{node="VerneMQ@172.17.0.2"} 39088198 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/vernemq/testdata/non_vernemq.txt b/src/go/plugin/go.d/modules/vernemq/testdata/non_vernemq.txt
new file mode 100644
index 000000000..f5f0ae082
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/testdata/non_vernemq.txt
@@ -0,0 +1,27 @@
+# HELP wmi_os_process_memory_limix_bytes OperatingSystem.MaxProcessMemorySize
+# TYPE wmi_os_process_memory_limix_bytes gauge
+wmi_os_process_memory_limix_bytes 1.40737488224256e+14
+# HELP wmi_os_processes OperatingSystem.NumberOfProcesses
+# TYPE wmi_os_processes gauge
+wmi_os_processes 124
+# HELP wmi_os_processes_limit OperatingSystem.MaxNumberOfProcesses
+# TYPE wmi_os_processes_limit gauge
+wmi_os_processes_limit 4.294967295e+09
+# HELP wmi_os_time OperatingSystem.LocalDateTime
+# TYPE wmi_os_time gauge
+wmi_os_time 1.57804974e+09
+# HELP wmi_os_timezone OperatingSystem.LocalDateTime
+# TYPE wmi_os_timezone gauge
+wmi_os_timezone{timezone="MSK"} 1
+# HELP wmi_os_users OperatingSystem.NumberOfUsers
+# TYPE wmi_os_users gauge
+wmi_os_users 2
+# HELP wmi_os_virtual_memory_bytes OperatingSystem.TotalVirtualMemorySize
+# TYPE wmi_os_virtual_memory_bytes gauge
+wmi_os_virtual_memory_bytes 5.770891264e+09
+# HELP wmi_os_virtual_memory_free_bytes OperatingSystem.FreeVirtualMemory
+# TYPE wmi_os_virtual_memory_free_bytes gauge
+wmi_os_virtual_memory_free_bytes 3.76489984e+09
+# HELP wmi_os_visible_memory_bytes OperatingSystem.TotalVisibleMemorySize
+# TYPE wmi_os_visible_memory_bytes gauge
+wmi_os_visible_memory_bytes 4.294496256e+09 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/vernemq/vernemq.go b/src/go/plugin/go.d/modules/vernemq/vernemq.go
new file mode 100644
index 000000000..2f1de38ff
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/vernemq.go
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vernemq
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("vernemq", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *VerneMQ {
+ return &VerneMQ{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8888/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: charts.Copy(),
+ cache: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type (
+ VerneMQ struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ prom prometheus.Prometheus
+
+ cache map[string]bool
+ }
+)
+
+func (v *VerneMQ) Configuration() any {
+ return v.Config
+}
+
+func (v *VerneMQ) Init() error {
+ if err := v.validateConfig(); err != nil {
+ v.Errorf("error on validating config: %v", err)
+ return err
+ }
+
+ prom, err := v.initPrometheusClient()
+ if err != nil {
+ v.Error(err)
+ return err
+ }
+ v.prom = prom
+
+ return nil
+}
+
+func (v *VerneMQ) Check() error {
+ mx, err := v.collect()
+ if err != nil {
+ v.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (v *VerneMQ) Charts() *Charts {
+ return v.charts
+}
+
+func (v *VerneMQ) Collect() map[string]int64 {
+ mx, err := v.collect()
+ if err != nil {
+ v.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (v *VerneMQ) Cleanup() {
+ if v.prom != nil && v.prom.HTTPClient() != nil {
+ v.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/vernemq/vernemq_test.go b/src/go/plugin/go.d/modules/vernemq/vernemq_test.go
new file mode 100644
index 000000000..13eb3dceb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vernemq/vernemq_test.go
@@ -0,0 +1,578 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vernemq
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer1101MQTTv5Metrics, _ = os.ReadFile("testdata/metrics-v1.10.1-mqtt5.txt")
+ dataUnexpectedMetrics, _ = os.ReadFile("testdata/non_vernemq.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer1101MQTTv5Metrics": dataVer1101MQTTv5Metrics,
+ "dataUnexpectedMetrics": dataUnexpectedMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestVerneMQ_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &VerneMQ{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestVerneMQ_Init(t *testing.T) {
+ verneMQ := prepareVerneMQ()
+
+ assert.NoError(t, verneMQ.Init())
+}
+
+func TestVerneMQ_Init_ReturnsFalseIfURLIsNotSet(t *testing.T) {
+ verneMQ := prepareVerneMQ()
+ verneMQ.URL = ""
+
+ assert.Error(t, verneMQ.Init())
+}
+
+func TestVerneMQ_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) {
+ verneMQ := prepareVerneMQ()
+ verneMQ.Client.TLSConfig.TLSCA = "testdata/tls"
+
+ assert.Error(t, verneMQ.Init())
+}
+
+func TestVerneMQ_Check(t *testing.T) {
+ verneMQ, srv := prepareClientServerV1101(t)
+ defer srv.Close()
+
+ assert.NoError(t, verneMQ.Check())
+}
+
+func TestVerneMQ_Check_ReturnsFalseIfConnectionRefused(t *testing.T) {
+ verneMQ := prepareVerneMQ()
+ require.NoError(t, verneMQ.Init())
+
+ assert.Error(t, verneMQ.Check())
+}
+
+func TestVerneMQ_Check_ReturnsFalseIfMetricsAreNotVerneMQ(t *testing.T) {
+ verneMQ, srv := prepareClientServerNotVerneMQ(t)
+ defer srv.Close()
+ require.NoError(t, verneMQ.Init())
+
+ assert.Error(t, verneMQ.Check())
+}
+
+func TestVerneMQ_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestVerneMQ_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestVerneMQ_Collect(t *testing.T) {
+ verneMQ, srv := prepareClientServerV1101(t)
+ defer srv.Close()
+
+ collected := verneMQ.Collect()
+ assert.Equal(t, v1101ExpectedMetrics, collected)
+ testCharts(t, verneMQ, collected)
+}
+
+func TestVerneMQ_Collect_ReturnsNilIfConnectionRefused(t *testing.T) {
+ verneMQ := prepareVerneMQ()
+ require.NoError(t, verneMQ.Init())
+
+ assert.Nil(t, verneMQ.Collect())
+}
+
+func TestVerneMQ_Collect_ReturnsNilIfMetricsAreNotVerneMQ(t *testing.T) {
+ verneMQ, srv := prepareClientServerNotVerneMQ(t)
+ defer srv.Close()
+
+ assert.Nil(t, verneMQ.Collect())
+}
+
+func TestVerneMQ_Collect_ReturnsNilIfReceiveInvalidResponse(t *testing.T) {
+ verneMQ, ts := prepareClientServerInvalid(t)
+ defer ts.Close()
+
+ assert.Nil(t, verneMQ.Collect())
+}
+
+func TestVerneMQ_Collect_ReturnsNilIfReceiveResponse404(t *testing.T) {
+ verneMQ, ts := prepareClientServerResponse404(t)
+ defer ts.Close()
+
+ assert.Nil(t, verneMQ.Collect())
+}
+
+func testCharts(t *testing.T, verneMQ *VerneMQ, collected map[string]int64) {
+ ensureCollectedHasAllChartsDimsVarsIDs(t, verneMQ, collected)
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, verneMQ *VerneMQ, collected map[string]int64) {
+ for _, chart := range *verneMQ.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareVerneMQ() *VerneMQ {
+ verneMQ := New()
+ verneMQ.URL = "http://127.0.0.1:38001/metrics"
+ return verneMQ
+}
+
+func prepareClientServerV1101(t *testing.T) (*VerneMQ, *httptest.Server) {
+ t.Helper()
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer1101MQTTv5Metrics)
+ }))
+
+ verneMQ := New()
+ verneMQ.URL = ts.URL
+ require.NoError(t, verneMQ.Init())
+
+ return verneMQ, ts
+}
+
+func prepareClientServerNotVerneMQ(t *testing.T) (*VerneMQ, *httptest.Server) {
+ t.Helper()
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataUnexpectedMetrics)
+ }))
+
+ verneMQ := New()
+ verneMQ.URL = ts.URL
+ require.NoError(t, verneMQ.Init())
+
+ return verneMQ, ts
+}
+
+func prepareClientServerInvalid(t *testing.T) (*VerneMQ, *httptest.Server) {
+ t.Helper()
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ verneMQ := New()
+ verneMQ.URL = ts.URL
+ require.NoError(t, verneMQ.Init())
+
+ return verneMQ, ts
+}
+
+func prepareClientServerResponse404(t *testing.T) (*VerneMQ, *httptest.Server) {
+ t.Helper()
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+
+ verneMQ := New()
+ verneMQ.URL = ts.URL
+ require.NoError(t, verneMQ.Init())
+ return verneMQ, ts
+}
+
+var v1101ExpectedMetrics = map[string]int64{
+ "bytes_received": 36796908,
+ "bytes_sent": 23361693,
+ "client_keepalive_expired": 1,
+ "cluster_bytes_dropped": 0,
+ "cluster_bytes_received": 0,
+ "cluster_bytes_sent": 0,
+ "mqtt_auth_received": 0,
+ "mqtt_auth_received_continue_authentication": 0,
+ "mqtt_auth_received_reauthenticate": 0,
+ "mqtt_auth_received_success": 0,
+ "mqtt_auth_received_v_5": 0,
+ "mqtt_auth_received_v_5_continue_authentication": 0,
+ "mqtt_auth_received_v_5_reauthenticate": 0,
+ "mqtt_auth_received_v_5_success": 0,
+ "mqtt_auth_sent": 0,
+ "mqtt_auth_sent_continue_authentication": 0,
+ "mqtt_auth_sent_reauthenticate": 0,
+ "mqtt_auth_sent_success": 0,
+ "mqtt_auth_sent_v_5": 0,
+ "mqtt_auth_sent_v_5_continue_authentication": 0,
+ "mqtt_auth_sent_v_5_reauthenticate": 0,
+ "mqtt_auth_sent_v_5_success": 0,
+ "mqtt_connack_sent": 338956,
+ "mqtt_connack_sent_bad_authentication_method": 0,
+ "mqtt_connack_sent_bad_username_or_password": 4,
+ "mqtt_connack_sent_banned": 0,
+ "mqtt_connack_sent_client_identifier_not_valid": 0,
+ "mqtt_connack_sent_connection_rate_exceeded": 0,
+ "mqtt_connack_sent_impl_specific_error": 0,
+ "mqtt_connack_sent_malformed_packet": 0,
+ "mqtt_connack_sent_not_authorized": 4,
+ "mqtt_connack_sent_packet_too_large": 0,
+ "mqtt_connack_sent_payload_format_invalid": 0,
+ "mqtt_connack_sent_protocol_error": 0,
+ "mqtt_connack_sent_qos_not_supported": 0,
+ "mqtt_connack_sent_quota_exceeded": 0,
+ "mqtt_connack_sent_retain_not_supported": 0,
+ "mqtt_connack_sent_server_busy": 0,
+ "mqtt_connack_sent_server_moved": 0,
+ "mqtt_connack_sent_server_unavailable": 0,
+ "mqtt_connack_sent_success": 338948,
+ "mqtt_connack_sent_topic_name_invalid": 0,
+ "mqtt_connack_sent_unspecified_error": 0,
+ "mqtt_connack_sent_unsupported_protocol_version": 0,
+ "mqtt_connack_sent_use_another_server": 0,
+ "mqtt_connack_sent_v_4": 338956,
+ "mqtt_connack_sent_v_4_bad_username_or_password": 4,
+ "mqtt_connack_sent_v_4_client_identifier_not_valid": 0,
+ "mqtt_connack_sent_v_4_not_authorized": 4,
+ "mqtt_connack_sent_v_4_server_unavailable": 0,
+ "mqtt_connack_sent_v_4_success": 338948,
+ "mqtt_connack_sent_v_4_unsupported_protocol_version": 0,
+ "mqtt_connack_sent_v_5": 0,
+ "mqtt_connack_sent_v_5_bad_authentication_method": 0,
+ "mqtt_connack_sent_v_5_bad_username_or_password": 0,
+ "mqtt_connack_sent_v_5_banned": 0,
+ "mqtt_connack_sent_v_5_client_identifier_not_valid": 0,
+ "mqtt_connack_sent_v_5_connection_rate_exceeded": 0,
+ "mqtt_connack_sent_v_5_impl_specific_error": 0,
+ "mqtt_connack_sent_v_5_malformed_packet": 0,
+ "mqtt_connack_sent_v_5_not_authorized": 0,
+ "mqtt_connack_sent_v_5_packet_too_large": 0,
+ "mqtt_connack_sent_v_5_payload_format_invalid": 0,
+ "mqtt_connack_sent_v_5_protocol_error": 0,
+ "mqtt_connack_sent_v_5_qos_not_supported": 0,
+ "mqtt_connack_sent_v_5_quota_exceeded": 0,
+ "mqtt_connack_sent_v_5_retain_not_supported": 0,
+ "mqtt_connack_sent_v_5_server_busy": 0,
+ "mqtt_connack_sent_v_5_server_moved": 0,
+ "mqtt_connack_sent_v_5_server_unavailable": 0,
+ "mqtt_connack_sent_v_5_success": 0,
+ "mqtt_connack_sent_v_5_topic_name_invalid": 0,
+ "mqtt_connack_sent_v_5_unspecified_error": 0,
+ "mqtt_connack_sent_v_5_unsupported_protocol_version": 0,
+ "mqtt_connack_sent_v_5_use_another_server": 0,
+ "mqtt_connect_received": 338956,
+ "mqtt_connect_received_v_4": 338956,
+ "mqtt_connect_received_v_5": 0,
+ "mqtt_disconnect_received": 107,
+ "mqtt_disconnect_received_administrative_action": 0,
+ "mqtt_disconnect_received_disconnect_with_will_msg": 0,
+ "mqtt_disconnect_received_impl_specific_error": 0,
+ "mqtt_disconnect_received_malformed_packet": 0,
+ "mqtt_disconnect_received_message_rate_too_high": 0,
+ "mqtt_disconnect_received_normal_disconnect": 0,
+ "mqtt_disconnect_received_packet_too_large": 0,
+ "mqtt_disconnect_received_payload_format_invalid": 0,
+ "mqtt_disconnect_received_protocol_error": 0,
+ "mqtt_disconnect_received_quota_exceeded": 0,
+ "mqtt_disconnect_received_receive_max_exceeded": 0,
+ "mqtt_disconnect_received_topic_alias_invalid": 0,
+ "mqtt_disconnect_received_topic_name_invalid": 0,
+ "mqtt_disconnect_received_unspecified_error": 0,
+ "mqtt_disconnect_received_v_4": 107,
+ "mqtt_disconnect_received_v_5": 0,
+ "mqtt_disconnect_received_v_5_administrative_action": 0,
+ "mqtt_disconnect_received_v_5_disconnect_with_will_msg": 0,
+ "mqtt_disconnect_received_v_5_impl_specific_error": 0,
+ "mqtt_disconnect_received_v_5_malformed_packet": 0,
+ "mqtt_disconnect_received_v_5_message_rate_too_high": 0,
+ "mqtt_disconnect_received_v_5_normal_disconnect": 0,
+ "mqtt_disconnect_received_v_5_packet_too_large": 0,
+ "mqtt_disconnect_received_v_5_payload_format_invalid": 0,
+ "mqtt_disconnect_received_v_5_protocol_error": 0,
+ "mqtt_disconnect_received_v_5_quota_exceeded": 0,
+ "mqtt_disconnect_received_v_5_receive_max_exceeded": 0,
+ "mqtt_disconnect_received_v_5_topic_alias_invalid": 0,
+ "mqtt_disconnect_received_v_5_topic_name_invalid": 0,
+ "mqtt_disconnect_received_v_5_unspecified_error": 0,
+ "mqtt_disconnect_sent": 0,
+ "mqtt_disconnect_sent_administrative_action": 0,
+ "mqtt_disconnect_sent_connection_rate_exceeded": 0,
+ "mqtt_disconnect_sent_impl_specific_error": 0,
+ "mqtt_disconnect_sent_keep_alive_timeout": 0,
+ "mqtt_disconnect_sent_malformed_packet": 0,
+ "mqtt_disconnect_sent_max_connect_time": 0,
+ "mqtt_disconnect_sent_message_rate_too_high": 0,
+ "mqtt_disconnect_sent_normal_disconnect": 0,
+ "mqtt_disconnect_sent_not_authorized": 0,
+ "mqtt_disconnect_sent_packet_too_large": 0,
+ "mqtt_disconnect_sent_payload_format_invalid": 0,
+ "mqtt_disconnect_sent_protocol_error": 0,
+ "mqtt_disconnect_sent_qos_not_supported": 0,
+ "mqtt_disconnect_sent_quota_exceeded": 0,
+ "mqtt_disconnect_sent_receive_max_exceeded": 0,
+ "mqtt_disconnect_sent_retain_not_supported": 0,
+ "mqtt_disconnect_sent_server_busy": 0,
+ "mqtt_disconnect_sent_server_moved": 0,
+ "mqtt_disconnect_sent_server_shutting_down": 0,
+ "mqtt_disconnect_sent_session_taken_over": 0,
+ "mqtt_disconnect_sent_shared_subs_not_supported": 0,
+ "mqtt_disconnect_sent_subscription_ids_not_supported": 0,
+ "mqtt_disconnect_sent_topic_alias_invalid": 0,
+ "mqtt_disconnect_sent_topic_filter_invalid": 0,
+ "mqtt_disconnect_sent_topic_name_invalid": 0,
+ "mqtt_disconnect_sent_unspecified_error": 0,
+ "mqtt_disconnect_sent_use_another_server": 0,
+ "mqtt_disconnect_sent_v_5": 0,
+ "mqtt_disconnect_sent_v_5_administrative_action": 0,
+ "mqtt_disconnect_sent_v_5_connection_rate_exceeded": 0,
+ "mqtt_disconnect_sent_v_5_impl_specific_error": 0,
+ "mqtt_disconnect_sent_v_5_keep_alive_timeout": 0,
+ "mqtt_disconnect_sent_v_5_malformed_packet": 0,
+ "mqtt_disconnect_sent_v_5_max_connect_time": 0,
+ "mqtt_disconnect_sent_v_5_message_rate_too_high": 0,
+ "mqtt_disconnect_sent_v_5_normal_disconnect": 0,
+ "mqtt_disconnect_sent_v_5_not_authorized": 0,
+ "mqtt_disconnect_sent_v_5_packet_too_large": 0,
+ "mqtt_disconnect_sent_v_5_payload_format_invalid": 0,
+ "mqtt_disconnect_sent_v_5_protocol_error": 0,
+ "mqtt_disconnect_sent_v_5_qos_not_supported": 0,
+ "mqtt_disconnect_sent_v_5_quota_exceeded": 0,
+ "mqtt_disconnect_sent_v_5_receive_max_exceeded": 0,
+ "mqtt_disconnect_sent_v_5_retain_not_supported": 0,
+ "mqtt_disconnect_sent_v_5_server_busy": 0,
+ "mqtt_disconnect_sent_v_5_server_moved": 0,
+ "mqtt_disconnect_sent_v_5_server_shutting_down": 0,
+ "mqtt_disconnect_sent_v_5_session_taken_over": 0,
+ "mqtt_disconnect_sent_v_5_shared_subs_not_supported": 0,
+ "mqtt_disconnect_sent_v_5_subscription_ids_not_supported": 0,
+ "mqtt_disconnect_sent_v_5_topic_alias_invalid": 0,
+ "mqtt_disconnect_sent_v_5_topic_filter_invalid": 0,
+ "mqtt_disconnect_sent_v_5_topic_name_invalid": 0,
+ "mqtt_disconnect_sent_v_5_unspecified_error": 0,
+ "mqtt_disconnect_sent_v_5_use_another_server": 0,
+ "mqtt_disconnect_sent_v_5_wildcard_subs_not_supported": 0,
+ "mqtt_disconnect_sent_wildcard_subs_not_supported": 0,
+ "mqtt_invalid_msg_size_error": 0,
+ "mqtt_invalid_msg_size_error_v_4": 0,
+ "mqtt_invalid_msg_size_error_v_5": 0,
+ "mqtt_pingreq_received": 205,
+ "mqtt_pingreq_received_v_4": 205,
+ "mqtt_pingreq_received_v_5": 0,
+ "mqtt_pingresp_sent": 205,
+ "mqtt_pingresp_sent_v_4": 205,
+ "mqtt_pingresp_sent_v_5": 0,
+ "mqtt_puback_invalid_error": 0,
+ "mqtt_puback_invalid_error_v_4": 0,
+ "mqtt_puback_invalid_error_v_5": 0,
+ "mqtt_puback_received": 525694,
+ "mqtt_puback_received_impl_specific_error": 0,
+ "mqtt_puback_received_no_matching_subscribers": 0,
+ "mqtt_puback_received_not_authorized": 0,
+ "mqtt_puback_received_packet_id_in_use": 0,
+ "mqtt_puback_received_payload_format_invalid": 0,
+ "mqtt_puback_received_quota_exceeded": 0,
+ "mqtt_puback_received_success": 0,
+ "mqtt_puback_received_topic_name_invalid": 0,
+ "mqtt_puback_received_unspecified_error": 0,
+ "mqtt_puback_received_v_4": 525694,
+ "mqtt_puback_received_v_5": 0,
+ "mqtt_puback_received_v_5_impl_specific_error": 0,
+ "mqtt_puback_received_v_5_no_matching_subscribers": 0,
+ "mqtt_puback_received_v_5_not_authorized": 0,
+ "mqtt_puback_received_v_5_packet_id_in_use": 0,
+ "mqtt_puback_received_v_5_payload_format_invalid": 0,
+ "mqtt_puback_received_v_5_quota_exceeded": 0,
+ "mqtt_puback_received_v_5_success": 0,
+ "mqtt_puback_received_v_5_topic_name_invalid": 0,
+ "mqtt_puback_received_v_5_unspecified_error": 0,
+ "mqtt_puback_sent": 537068,
+ "mqtt_puback_sent_impl_specific_error": 0,
+ "mqtt_puback_sent_no_matching_subscribers": 0,
+ "mqtt_puback_sent_not_authorized": 0,
+ "mqtt_puback_sent_packet_id_in_use": 0,
+ "mqtt_puback_sent_payload_format_invalid": 0,
+ "mqtt_puback_sent_quota_exceeded": 0,
+ "mqtt_puback_sent_success": 0,
+ "mqtt_puback_sent_topic_name_invalid": 0,
+ "mqtt_puback_sent_unspecified_error": 0,
+ "mqtt_puback_sent_v_4": 537068,
+ "mqtt_puback_sent_v_5": 0,
+ "mqtt_puback_sent_v_5_impl_specific_error": 0,
+ "mqtt_puback_sent_v_5_no_matching_subscribers": 0,
+ "mqtt_puback_sent_v_5_not_authorized": 0,
+ "mqtt_puback_sent_v_5_packet_id_in_use": 0,
+ "mqtt_puback_sent_v_5_payload_format_invalid": 0,
+ "mqtt_puback_sent_v_5_quota_exceeded": 0,
+ "mqtt_puback_sent_v_5_success": 0,
+ "mqtt_puback_sent_v_5_topic_name_invalid": 0,
+ "mqtt_puback_sent_v_5_unspecified_error": 0,
+ "mqtt_pubcomp_invalid_error": 0,
+ "mqtt_pubcomp_invalid_error_v_4": 0,
+ "mqtt_pubcomp_invalid_error_v_5": 0,
+ "mqtt_pubcomp_received": 0,
+ "mqtt_pubcomp_received_packet_id_not_found": 0,
+ "mqtt_pubcomp_received_success": 0,
+ "mqtt_pubcomp_received_v_4": 0,
+ "mqtt_pubcomp_received_v_5": 0,
+ "mqtt_pubcomp_received_v_5_packet_id_not_found": 0,
+ "mqtt_pubcomp_received_v_5_success": 0,
+ "mqtt_pubcomp_sent": 0,
+ "mqtt_pubcomp_sent_packet_id_not_found": 0,
+ "mqtt_pubcomp_sent_success": 0,
+ "mqtt_pubcomp_sent_v_4": 0,
+ "mqtt_pubcomp_sent_v_5": 0,
+ "mqtt_pubcomp_sent_v_5_packet_id_not_found": 0,
+ "mqtt_pubcomp_sent_v_5_success": 0,
+ "mqtt_publish_auth_error": 0,
+ "mqtt_publish_auth_error_v_4": 0,
+ "mqtt_publish_auth_error_v_5": 0,
+ "mqtt_publish_error": 0,
+ "mqtt_publish_error_v_4": 0,
+ "mqtt_publish_error_v_5": 0,
+ "mqtt_publish_received": 537088,
+ "mqtt_publish_received_v_4": 537088,
+ "mqtt_publish_received_v_5": 0,
+ "mqtt_publish_sent": 525721,
+ "mqtt_publish_sent_v_4": 525721,
+ "mqtt_publish_sent_v_5": 0,
+ "mqtt_pubrec_invalid_error": 0,
+ "mqtt_pubrec_invalid_error_v_4": 0,
+ "mqtt_pubrec_received": 0,
+ "mqtt_pubrec_received_impl_specific_error": 0,
+ "mqtt_pubrec_received_no_matching_subscribers": 0,
+ "mqtt_pubrec_received_not_authorized": 0,
+ "mqtt_pubrec_received_packet_id_in_use": 0,
+ "mqtt_pubrec_received_payload_format_invalid": 0,
+ "mqtt_pubrec_received_quota_exceeded": 0,
+ "mqtt_pubrec_received_success": 0,
+ "mqtt_pubrec_received_topic_name_invalid": 0,
+ "mqtt_pubrec_received_unspecified_error": 0,
+ "mqtt_pubrec_received_v_4": 0,
+ "mqtt_pubrec_received_v_5": 0,
+ "mqtt_pubrec_received_v_5_impl_specific_error": 0,
+ "mqtt_pubrec_received_v_5_no_matching_subscribers": 0,
+ "mqtt_pubrec_received_v_5_not_authorized": 0,
+ "mqtt_pubrec_received_v_5_packet_id_in_use": 0,
+ "mqtt_pubrec_received_v_5_payload_format_invalid": 0,
+ "mqtt_pubrec_received_v_5_quota_exceeded": 0,
+ "mqtt_pubrec_received_v_5_success": 0,
+ "mqtt_pubrec_received_v_5_topic_name_invalid": 0,
+ "mqtt_pubrec_received_v_5_unspecified_error": 0,
+ "mqtt_pubrec_sent": 0,
+ "mqtt_pubrec_sent_impl_specific_error": 0,
+ "mqtt_pubrec_sent_no_matching_subscribers": 0,
+ "mqtt_pubrec_sent_not_authorized": 0,
+ "mqtt_pubrec_sent_packet_id_in_use": 0,
+ "mqtt_pubrec_sent_payload_format_invalid": 0,
+ "mqtt_pubrec_sent_quota_exceeded": 0,
+ "mqtt_pubrec_sent_success": 0,
+ "mqtt_pubrec_sent_topic_name_invalid": 0,
+ "mqtt_pubrec_sent_unspecified_error": 0,
+ "mqtt_pubrec_sent_v_4": 0,
+ "mqtt_pubrec_sent_v_5": 0,
+ "mqtt_pubrec_sent_v_5_impl_specific_error": 0,
+ "mqtt_pubrec_sent_v_5_no_matching_subscribers": 0,
+ "mqtt_pubrec_sent_v_5_not_authorized": 0,
+ "mqtt_pubrec_sent_v_5_packet_id_in_use": 0,
+ "mqtt_pubrec_sent_v_5_payload_format_invalid": 0,
+ "mqtt_pubrec_sent_v_5_quota_exceeded": 0,
+ "mqtt_pubrec_sent_v_5_success": 0,
+ "mqtt_pubrec_sent_v_5_topic_name_invalid": 0,
+ "mqtt_pubrec_sent_v_5_unspecified_error": 0,
+ "mqtt_pubrel_received": 0,
+ "mqtt_pubrel_received_packet_id_not_found": 0,
+ "mqtt_pubrel_received_success": 0,
+ "mqtt_pubrel_received_v_4": 0,
+ "mqtt_pubrel_received_v_5": 0,
+ "mqtt_pubrel_received_v_5_packet_id_not_found": 0,
+ "mqtt_pubrel_received_v_5_success": 0,
+ "mqtt_pubrel_sent": 0,
+ "mqtt_pubrel_sent_packet_id_not_found": 0,
+ "mqtt_pubrel_sent_success": 0,
+ "mqtt_pubrel_sent_v_4": 0,
+ "mqtt_pubrel_sent_v_5": 0,
+ "mqtt_pubrel_sent_v_5_packet_id_not_found": 0,
+ "mqtt_pubrel_sent_v_5_success": 0,
+ "mqtt_suback_sent": 122,
+ "mqtt_suback_sent_v_4": 122,
+ "mqtt_suback_sent_v_5": 0,
+ "mqtt_subscribe_auth_error": 0,
+ "mqtt_subscribe_auth_error_v_4": 0,
+ "mqtt_subscribe_auth_error_v_5": 0,
+ "mqtt_subscribe_error": 0,
+ "mqtt_subscribe_error_v_4": 0,
+ "mqtt_subscribe_error_v_5": 0,
+ "mqtt_subscribe_received": 122,
+ "mqtt_subscribe_received_v_4": 122,
+ "mqtt_subscribe_received_v_5": 0,
+ "mqtt_unsuback_sent": 108,
+ "mqtt_unsuback_sent_v_4": 108,
+ "mqtt_unsuback_sent_v_5": 0,
+ "mqtt_unsubscribe_error": 0,
+ "mqtt_unsubscribe_error_v_4": 0,
+ "mqtt_unsubscribe_error_v_5": 0,
+ "mqtt_unsubscribe_received": 108,
+ "mqtt_unsubscribe_received_v_4": 108,
+ "mqtt_unsubscribe_received_v_5": 0,
+ "netsplit_detected": 0,
+ "netsplit_resolved": 0,
+ "netsplit_unresolved": 0,
+ "open_sockets": 0,
+ "queue_initialized_from_storage": 0,
+ "queue_message_drop": 0,
+ "queue_message_expired": 0,
+ "queue_message_in": 525722,
+ "queue_message_out": 525721,
+ "queue_message_unhandled": 1,
+ "queue_processes": 0,
+ "queue_setup": 338948,
+ "queue_teardown": 338948,
+ "retain_memory": 11344,
+ "retain_messages": 0,
+ "router_matches_local": 525722,
+ "router_matches_remote": 0,
+ "router_memory": 12752,
+ "router_subscriptions": 0,
+ "socket_close": 338956,
+ "socket_close_timeout": 0,
+ "socket_error": 0,
+ "socket_open": 338956,
+ "system_context_switches": 39088198,
+ "system_gc_count": 12189976,
+ "system_io_in": 68998296,
+ "system_io_out": 961001488,
+ "system_process_count": 329,
+ "system_reductions": 3857458067,
+ "system_run_queue": 0,
+ "system_utilization": 9,
+ "system_utilization_scheduler_1": 34,
+ "system_utilization_scheduler_2": 8,
+ "system_utilization_scheduler_3": 14,
+ "system_utilization_scheduler_4": 19,
+ "system_utilization_scheduler_5": 0,
+ "system_utilization_scheduler_6": 0,
+ "system_utilization_scheduler_7": 0,
+ "system_utilization_scheduler_8": 0,
+ "system_wallclock": 163457858,
+ "system_words_reclaimed_by_gc": 7158470019,
+ "vm_memory_processes": 8673288,
+ "vm_memory_system": 27051848,
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/README.md b/src/go/plugin/go.d/modules/vsphere/README.md
new file mode 120000
index 000000000..0a6b0146e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/README.md
@@ -0,0 +1 @@
+integrations/vmware_vcenter_server.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/vsphere/charts.go b/src/go/plugin/go.d/modules/vsphere/charts.go
new file mode 100644
index 000000000..3cc21bef4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/charts.go
@@ -0,0 +1,506 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vsphere
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+)
+
+const (
+ prioVMCPUUtilization = module.Priority + iota
+ prioVmMemoryUtilization
+ prioVmMemoryUsage
+ prioVmMemorySwapUsage
+ prioVmMemorySwapIO
+ prioVmDiskIO
+ prioVmDiskMaxLatency
+ prioVmNetworkTraffic
+ prioVmNetworkPackets
+ prioVmNetworkDrops
+ prioVmOverallStatus
+ prioVmSystemUptime
+
+ prioHostCPUUtilization
+ prioHostMemoryUtilization
+ prioHostMemoryUsage
+ prioHostMemorySwapIO
+ prioHostDiskIO
+ prioHostDiskMaxLatency
+ prioHostNetworkTraffic
+ prioHostNetworkPackets
+ prioHostNetworkDrops
+ prioHostNetworkErrors
+ prioHostOverallStatus
+ prioHostSystemUptime
+)
+
+var (
+ vmChartsTmpl = module.Charts{
+ vmCPUUtilizationChartTmpl.Copy(),
+
+ vmMemoryUtilizationChartTmpl.Copy(),
+ vmMemoryUsageChartTmpl.Copy(),
+ vmMemorySwapUsageChartTmpl.Copy(),
+ vmMemorySwapIOChartTmpl.Copy(),
+
+ vmDiskIOChartTmpl.Copy(),
+ vmDiskMaxLatencyChartTmpl.Copy(),
+
+ vmNetworkTrafficChartTmpl.Copy(),
+ vmNetworkPacketsChartTmpl.Copy(),
+ vmNetworkDropsChartTmpl.Copy(),
+
+ vmOverallStatusChartTmpl.Copy(),
+
+ vmSystemUptimeChartTmpl.Copy(),
+ }
+
+ vmCPUUtilizationChartTmpl = module.Chart{
+ ID: "%s_cpu_utilization",
+ Title: "Virtual Machine CPU utilization",
+ Units: "percentage",
+ Fam: "vms cpu",
+ Ctx: "vsphere.vm_cpu_utilization",
+ Priority: prioVMCPUUtilization,
+ Dims: module.Dims{
+ {ID: "%s_cpu.usage.average", Name: "used", Div: 100},
+ },
+ }
+
+ // Ref: https://www.vmware.com/support/developer/converter-sdk/conv51_apireference/memory_counters.html
+ vmMemoryUtilizationChartTmpl = module.Chart{
+ ID: "%s_mem_utilization",
+ Title: "Virtual Machine memory utilization",
+ Units: "percentage",
+ Fam: "vms mem",
+ Ctx: "vsphere.vm_mem_utilization",
+ Priority: prioVmMemoryUtilization,
+ Dims: module.Dims{
+ {ID: "%s_mem.usage.average", Name: "used", Div: 100},
+ },
+ }
+ vmMemoryUsageChartTmpl = module.Chart{
+ ID: "%s_mem_usage",
+ Title: "Virtual Machine memory usage",
+ Units: "KiB",
+ Fam: "vms mem",
+ Ctx: "vsphere.vm_mem_usage",
+ Priority: prioVmMemoryUsage,
+ Dims: module.Dims{
+ {ID: "%s_mem.granted.average", Name: "granted"},
+ {ID: "%s_mem.consumed.average", Name: "consumed"},
+ {ID: "%s_mem.active.average", Name: "active"},
+ {ID: "%s_mem.shared.average", Name: "shared"},
+ },
+ }
+ vmMemorySwapUsageChartTmpl = module.Chart{
+ ID: "%s_mem_swap_usage",
+ Title: "Virtual Machine VMKernel memory swap usage",
+ Units: "KiB",
+ Fam: "vms mem",
+ Ctx: "vsphere.vm_mem_swap_usage",
+ Priority: prioVmMemorySwapUsage,
+ Dims: module.Dims{
+ {ID: "%s_mem.swapped.average", Name: "swapped"},
+ },
+ }
+ vmMemorySwapIOChartTmpl = module.Chart{
+ ID: "%s_mem_swap_io_rate",
+ Title: "Virtual Machine VMKernel memory swap IO",
+ Units: "KiB/s",
+ Fam: "vms mem",
+ Ctx: "vsphere.vm_mem_swap_io",
+ Type: module.Area,
+ Priority: prioVmMemorySwapIO,
+ Dims: module.Dims{
+ {ID: "%s_mem.swapinRate.average", Name: "in"},
+ {ID: "%s_mem.swapoutRate.average", Name: "out"},
+ },
+ }
+
+ vmDiskIOChartTmpl = module.Chart{
+ ID: "%s_disk_io",
+ Title: "Virtual Machine disk IO",
+ Units: "KiB/s",
+ Fam: "vms disk",
+ Ctx: "vsphere.vm_disk_io",
+ Type: module.Area,
+ Priority: prioVmDiskIO,
+ Dims: module.Dims{
+ {ID: "%s_disk.read.average", Name: "read"},
+ {ID: "%s_disk.write.average", Name: "write", Mul: -1},
+ },
+ }
+ vmDiskMaxLatencyChartTmpl = module.Chart{
+ ID: "%s_disk_max_latency",
+ Title: "Virtual Machine disk max latency",
+ Units: "milliseconds",
+ Fam: "vms disk",
+ Ctx: "vsphere.vm_disk_max_latency",
+ Priority: prioVmDiskMaxLatency,
+ Dims: module.Dims{
+ {ID: "%s_disk.maxTotalLatency.latest", Name: "latency"},
+ },
+ }
+
+ vmNetworkTrafficChartTmpl = module.Chart{
+ ID: "%s_net_traffic",
+ Title: "Virtual Machine network traffic",
+ Units: "KiB/s",
+ Fam: "vms net",
+ Ctx: "vsphere.vm_net_traffic",
+ Type: module.Area,
+ Priority: prioVmNetworkTraffic,
+ Dims: module.Dims{
+ {ID: "%s_net.bytesRx.average", Name: "received"},
+ {ID: "%s_net.bytesTx.average", Name: "sent", Mul: -1},
+ },
+ }
+ vmNetworkPacketsChartTmpl = module.Chart{
+ ID: "%s_net_packets",
+ Title: "Virtual Machine network packets",
+ Units: "packets",
+ Fam: "vms net",
+ Ctx: "vsphere.vm_net_packets",
+ Priority: prioVmNetworkPackets,
+ Dims: module.Dims{
+ {ID: "%s_net.packetsRx.summation", Name: "received"},
+ {ID: "%s_net.packetsTx.summation", Name: "sent", Mul: -1},
+ },
+ }
+ vmNetworkDropsChartTmpl = module.Chart{
+ ID: "%s_net_drops",
+ Title: "Virtual Machine network dropped packets",
+ Units: "drops",
+ Fam: "vms net",
+ Ctx: "vsphere.vm_net_drops",
+ Priority: prioVmNetworkDrops,
+ Dims: module.Dims{
+ {ID: "%s_net.droppedRx.summation", Name: "received"},
+ {ID: "%s_net.droppedTx.summation", Name: "sent", Mul: -1},
+ },
+ }
+
+ vmOverallStatusChartTmpl = module.Chart{
+ ID: "%s_overall_status",
+ Title: "Virtual Machine overall alarm status",
+ Units: "status",
+ Fam: "vms status",
+ Ctx: "vsphere.vm_overall_status",
+ Priority: prioVmOverallStatus,
+ Dims: module.Dims{
+ {ID: "%s_overall.status.green", Name: "green"},
+ {ID: "%s_overall.status.red", Name: "red"},
+ {ID: "%s_overall.status.yellow", Name: "yellow"},
+ {ID: "%s_overall.status.gray", Name: "gray"},
+ },
+ }
+
+ vmSystemUptimeChartTmpl = module.Chart{
+ ID: "%s_system_uptime",
+ Title: "Virtual Machine system uptime",
+ Units: "seconds",
+ Fam: "vms uptime",
+ Ctx: "vsphere.vm_system_uptime",
+ Priority: prioVmSystemUptime,
+ Dims: module.Dims{
+ {ID: "%s_sys.uptime.latest", Name: "uptime"},
+ },
+ }
+)
+
+var (
+ hostChartsTmpl = module.Charts{
+ hostCPUUtilizationChartTmpl.Copy(),
+
+ hostMemUtilizationChartTmpl.Copy(),
+ hostMemUsageChartTmpl.Copy(),
+ hostMemSwapIOChartTmpl.Copy(),
+
+ hostDiskIOChartTmpl.Copy(),
+ hostDiskMaxLatencyChartTmpl.Copy(),
+
+ hostNetworkTraffic.Copy(),
+ hostNetworkPacketsChartTmpl.Copy(),
+ hostNetworkDropsChartTmpl.Copy(),
+ hostNetworkErrorsChartTmpl.Copy(),
+
+ hostOverallStatusChartTmpl.Copy(),
+
+ hostSystemUptimeChartTmpl.Copy(),
+ }
+ hostCPUUtilizationChartTmpl = module.Chart{
+ ID: "%s_cpu_usage_total",
+ Title: "ESXi Host CPU utilization",
+ Units: "percentage",
+ Fam: "hosts cpu",
+ Ctx: "vsphere.host_cpu_utilization",
+ Priority: prioHostCPUUtilization,
+ Dims: module.Dims{
+ {ID: "%s_cpu.usage.average", Name: "used", Div: 100},
+ },
+ }
+ hostMemUtilizationChartTmpl = module.Chart{
+ ID: "%s_mem_utilization",
+ Title: "ESXi Host memory utilization",
+ Units: "percentage",
+ Fam: "hosts mem",
+ Ctx: "vsphere.host_mem_utilization",
+ Priority: prioHostMemoryUtilization,
+ Dims: module.Dims{
+ {ID: "%s_mem.usage.average", Name: "used", Div: 100},
+ },
+ }
+ hostMemUsageChartTmpl = module.Chart{
+ ID: "%s_mem_usage",
+ Title: "ESXi Host memory usage",
+ Units: "KiB",
+ Fam: "hosts mem",
+ Ctx: "vsphere.host_mem_usage",
+ Priority: prioHostMemoryUsage,
+ Dims: module.Dims{
+ {ID: "%s_mem.granted.average", Name: "granted"},
+ {ID: "%s_mem.consumed.average", Name: "consumed"},
+ {ID: "%s_mem.active.average", Name: "active"},
+ {ID: "%s_mem.shared.average", Name: "shared"},
+ {ID: "%s_mem.sharedcommon.average", Name: "sharedcommon"},
+ },
+ }
+ hostMemSwapIOChartTmpl = module.Chart{
+ ID: "%s_mem_swap_rate",
+ Title: "ESXi Host VMKernel memory swap IO",
+ Units: "KiB/s",
+ Fam: "hosts mem",
+ Ctx: "vsphere.host_mem_swap_io",
+ Type: module.Area,
+ Priority: prioHostMemorySwapIO,
+ Dims: module.Dims{
+ {ID: "%s_mem.swapinRate.average", Name: "in"},
+ {ID: "%s_mem.swapoutRate.average", Name: "out"},
+ },
+ }
+
+ hostDiskIOChartTmpl = module.Chart{
+ ID: "%s_disk_io",
+ Title: "ESXi Host disk IO",
+ Units: "KiB/s",
+ Fam: "hosts disk",
+ Ctx: "vsphere.host_disk_io",
+ Type: module.Area,
+ Priority: prioHostDiskIO,
+ Dims: module.Dims{
+ {ID: "%s_disk.read.average", Name: "read"},
+ {ID: "%s_disk.write.average", Name: "write", Mul: -1},
+ },
+ }
+ hostDiskMaxLatencyChartTmpl = module.Chart{
+ ID: "%s_disk_max_latency",
+ Title: "ESXi Host disk max latency",
+ Units: "milliseconds",
+ Fam: "hosts disk",
+ Ctx: "vsphere.host_disk_max_latency",
+ Priority: prioHostDiskMaxLatency,
+ Dims: module.Dims{
+ {ID: "%s_disk.maxTotalLatency.latest", Name: "latency"},
+ },
+ }
+
+ hostNetworkTraffic = module.Chart{
+ ID: "%s_net_traffic",
+ Title: "ESXi Host network traffic",
+ Units: "KiB/s",
+ Fam: "hosts net",
+ Ctx: "vsphere.host_net_traffic",
+ Type: module.Area,
+ Priority: prioHostNetworkTraffic,
+ Dims: module.Dims{
+ {ID: "%s_net.bytesRx.average", Name: "received"},
+ {ID: "%s_net.bytesTx.average", Name: "sent", Mul: -1},
+ },
+ }
+ hostNetworkPacketsChartTmpl = module.Chart{
+ ID: "%s_net_packets",
+ Title: "ESXi Host network packets",
+ Units: "packets",
+ Fam: "hosts net",
+ Ctx: "vsphere.host_net_packets",
+ Priority: prioHostNetworkPackets,
+ Dims: module.Dims{
+ {ID: "%s_net.packetsRx.summation", Name: "received"},
+ {ID: "%s_net.packetsTx.summation", Name: "sent", Mul: -1},
+ },
+ }
+ hostNetworkDropsChartTmpl = module.Chart{
+ ID: "%s_net_drops_total",
+ Title: "ESXi Host network drops",
+ Units: "drops",
+ Fam: "hosts net",
+ Ctx: "vsphere.host_net_drops",
+ Priority: prioHostNetworkDrops,
+ Dims: module.Dims{
+ {ID: "%s_net.droppedRx.summation", Name: "received"},
+ {ID: "%s_net.droppedTx.summation", Name: "sent", Mul: -1},
+ },
+ }
+ hostNetworkErrorsChartTmpl = module.Chart{
+ ID: "%s_net_errors",
+ Title: "ESXi Host network errors",
+ Units: "errors",
+ Fam: "hosts net",
+ Ctx: "vsphere.host_net_errors",
+ Priority: prioHostNetworkErrors,
+ Dims: module.Dims{
+ {ID: "%s_net.errorsRx.summation", Name: "received"},
+ {ID: "%s_net.errorsTx.summation", Name: "sent", Mul: -1},
+ },
+ }
+
+ hostOverallStatusChartTmpl = module.Chart{
+ ID: "%s_overall_status",
+ Title: "ESXi Host overall alarm status",
+ Units: "status",
+ Fam: "hosts status",
+ Ctx: "vsphere.host_overall_status",
+ Priority: prioHostOverallStatus,
+ Dims: module.Dims{
+ {ID: "%s_overall.status.green", Name: "green"},
+ {ID: "%s_overall.status.red", Name: "red"},
+ {ID: "%s_overall.status.yellow", Name: "yellow"},
+ {ID: "%s_overall.status.gray", Name: "gray"},
+ },
+ }
+ hostSystemUptimeChartTmpl = module.Chart{
+ ID: "%s_system_uptime",
+ Title: "ESXi Host system uptime",
+ Units: "seconds",
+ Fam: "hosts uptime",
+ Ctx: "vsphere.host_system_uptime",
+ Priority: prioHostSystemUptime,
+ Dims: module.Dims{
+ {ID: "%s_sys.uptime.latest", Name: "uptime"},
+ },
+ }
+)
+
+const failedUpdatesLimit = 10
+
+func (vs *VSphere) updateCharts() {
+ for id, fails := range vs.discoveredHosts {
+ if fails >= failedUpdatesLimit {
+ vs.removeFromCharts(id)
+ delete(vs.charted, id)
+ delete(vs.discoveredHosts, id)
+ continue
+ }
+
+ host := vs.resources.Hosts.Get(id)
+ if host == nil || vs.charted[id] || fails != 0 {
+ continue
+ }
+
+ vs.charted[id] = true
+ charts := newHostCharts(host)
+ if err := vs.Charts().Add(*charts...); err != nil {
+ vs.Error(err)
+ }
+ }
+
+ for id, fails := range vs.discoveredVMs {
+ if fails >= failedUpdatesLimit {
+ vs.removeFromCharts(id)
+ delete(vs.charted, id)
+ delete(vs.discoveredVMs, id)
+ continue
+ }
+
+ vm := vs.resources.VMs.Get(id)
+ if vm == nil || vs.charted[id] || fails != 0 {
+ continue
+ }
+
+ vs.charted[id] = true
+ charts := newVMCHarts(vm)
+ if err := vs.Charts().Add(*charts...); err != nil {
+ vs.Error(err)
+ }
+ }
+}
+
+func newVMCHarts(vm *rs.VM) *module.Charts {
+ charts := vmChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, vm.ID)
+ chart.Labels = []module.Label{
+ {Key: "datacenter", Value: vm.Hier.DC.Name},
+ {Key: "cluster", Value: getVMClusterName(vm)},
+ {Key: "host", Value: vm.Hier.Host.Name},
+ {Key: "vm", Value: vm.Name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, vm.ID)
+ }
+ }
+
+ return charts
+}
+
+func getVMClusterName(vm *rs.VM) string {
+ if vm.Hier.Cluster.Name == vm.Hier.Host.Name {
+ return ""
+ }
+ return vm.Hier.Cluster.Name
+}
+
+func newHostCharts(host *rs.Host) *module.Charts {
+ charts := hostChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, host.ID)
+ chart.Labels = []module.Label{
+ {Key: "datacenter", Value: host.Hier.DC.Name},
+ {Key: "cluster", Value: getHostClusterName(host)},
+ {Key: "host", Value: host.Name},
+ }
+
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, host.ID)
+ }
+ }
+
+ return charts
+}
+
+func getHostClusterName(host *rs.Host) string {
+ if host.Hier.Cluster.Name == host.Name {
+ return ""
+ }
+ return host.Hier.Cluster.Name
+}
+
+func (vs *VSphere) removeFromCharts(prefix string) {
+ for _, c := range *vs.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+//func findMetricSeriesByPrefix(ms []performance.MetricSeries, prefix string) []performance.MetricSeries {
+// from := sort.Search(len(ms), func(i int) bool { return ms[i].Name >= prefix })
+//
+// if from == len(ms) || !strings.HasPrefix(ms[from].Name, prefix) {
+// return nil
+// }
+//
+// until := from + 1
+// for until < len(ms) && strings.HasPrefix(ms[until].Name, prefix) {
+// until++
+// }
+// return ms[from:until]
+//}
diff --git a/src/go/plugin/go.d/modules/vsphere/client/client.go b/src/go/plugin/go.d/modules/vsphere/client/client.go
new file mode 100644
index 000000000..ba74eca94
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/client/client.go
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "context"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/vmware/govmomi"
+ "github.com/vmware/govmomi/performance"
+ "github.com/vmware/govmomi/session"
+ "github.com/vmware/govmomi/view"
+ "github.com/vmware/govmomi/vim25"
+ "github.com/vmware/govmomi/vim25/mo"
+ "github.com/vmware/govmomi/vim25/soap"
+ "github.com/vmware/govmomi/vim25/types"
+)
+
+const (
+ datacenter = "Datacenter"
+ folder = "Folder"
+ computeResource = "ComputeResource"
+ hostSystem = "HostSystem"
+ virtualMachine = "VirtualMachine"
+
+ maxIdleConnections = 32
+)
+
+type Config struct {
+ URL string
+ User string
+ Password string
+ tlscfg.TLSConfig
+ Timeout time.Duration
+}
+
+type Client struct {
+ client *govmomi.Client
+ root *view.ContainerView
+ perf *performance.Manager
+}
+
+func newSoapClient(config Config) (*soap.Client, error) {
+ soapURL, err := soap.ParseURL(config.URL)
+ if err != nil || soapURL == nil {
+ return nil, err
+ }
+ soapURL.User = url.UserPassword(config.User, config.Password)
+ soapClient := soap.NewClient(soapURL, config.TLSConfig.InsecureSkipVerify)
+
+ tlsConfig, err := tlscfg.NewTLSConfig(config.TLSConfig)
+ if err != nil {
+ return nil, err
+ }
+ if tlsConfig != nil && len(tlsConfig.Certificates) > 0 {
+ soapClient.SetCertificate(tlsConfig.Certificates[0])
+ }
+ if config.TLSConfig.TLSCA != "" {
+ if err := soapClient.SetRootCAs(config.TLSConfig.TLSCA); err != nil {
+ return nil, err
+ }
+ }
+
+ if t, ok := soapClient.Transport.(*http.Transport); ok {
+ t.MaxIdleConnsPerHost = maxIdleConnections
+ t.TLSHandshakeTimeout = config.Timeout
+ }
+ soapClient.Timeout = config.Timeout
+
+ return soapClient, nil
+}
+
+func newContainerView(ctx context.Context, client *govmomi.Client) (*view.ContainerView, error) {
+ viewManager := view.NewManager(client.Client)
+ return viewManager.CreateContainerView(ctx, client.ServiceContent.RootFolder, []string{}, true)
+}
+
+func newPerformanceManager(client *vim25.Client) *performance.Manager {
+ perfManager := performance.NewManager(client)
+ perfManager.Sort = true
+ return perfManager
+}
+
+func New(config Config) (*Client, error) {
+ ctx := context.Background()
+ soapClient, err := newSoapClient(config)
+ if err != nil {
+ return nil, err
+ }
+
+ vimClient, err := vim25.NewClient(ctx, soapClient)
+ if err != nil {
+ return nil, err
+ }
+
+ vmomiClient := &govmomi.Client{
+ Client: vimClient,
+ SessionManager: session.NewManager(vimClient),
+ }
+
+ userInfo := url.UserPassword(config.User, config.Password)
+ addKeepAlive(vmomiClient, userInfo)
+
+ err = vmomiClient.Login(ctx, userInfo)
+ if err != nil {
+ return nil, err
+ }
+
+ containerView, err := newContainerView(ctx, vmomiClient)
+ if err != nil {
+ return nil, err
+ }
+
+ perfManager := newPerformanceManager(vimClient)
+
+ client := &Client{
+ client: vmomiClient,
+ perf: perfManager,
+ root: containerView,
+ }
+
+ return client, nil
+}
+
+func (c *Client) IsSessionActive() (bool, error) {
+ return c.client.SessionManager.SessionIsActive(context.Background())
+}
+
+func (c *Client) Version() string {
+ return c.client.ServiceContent.About.Version
+}
+
+func (c *Client) Login(userinfo *url.Userinfo) error {
+ return c.client.Login(context.Background(), userinfo)
+}
+
+func (c *Client) Logout() error {
+ return c.client.Logout(context.Background())
+}
+
+func (c *Client) PerformanceMetrics(pqs []types.PerfQuerySpec) ([]performance.EntityMetric, error) {
+ metrics, err := c.perf.Query(context.Background(), pqs)
+ if err != nil {
+ return nil, err
+ }
+ return c.perf.ToMetricSeries(context.Background(), metrics)
+}
+
+func (c *Client) Datacenters(pathSet ...string) (dcs []mo.Datacenter, err error) {
+ err = c.root.Retrieve(context.Background(), []string{datacenter}, pathSet, &dcs)
+ return
+}
+
+func (c *Client) Folders(pathSet ...string) (folders []mo.Folder, err error) {
+ err = c.root.Retrieve(context.Background(), []string{folder}, pathSet, &folders)
+ return
+}
+
+func (c *Client) ComputeResources(pathSet ...string) (computes []mo.ComputeResource, err error) {
+ err = c.root.Retrieve(context.Background(), []string{computeResource}, pathSet, &computes)
+ return
+}
+
+func (c *Client) Hosts(pathSet ...string) (hosts []mo.HostSystem, err error) {
+ err = c.root.Retrieve(context.Background(), []string{hostSystem}, pathSet, &hosts)
+ return
+}
+
+func (c *Client) VirtualMachines(pathSet ...string) (vms []mo.VirtualMachine, err error) {
+ err = c.root.Retrieve(context.Background(), []string{virtualMachine}, pathSet, &vms)
+ return
+}
+
+func (c *Client) CounterInfoByName() (map[string]*types.PerfCounterInfo, error) {
+ return c.perf.CounterInfoByName(context.Background())
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/client/client_test.go b/src/go/plugin/go.d/modules/vsphere/client/client_test.go
new file mode 100644
index 000000000..c82ce1993
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/client/client_test.go
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "crypto/tls"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/vmware/govmomi/simulator"
+ "github.com/vmware/govmomi/vim25/mo"
+ "github.com/vmware/govmomi/vim25/types"
+)
+
+func TestNew(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ v, err := client.IsSessionActive()
+ assert.NoError(t, err)
+ assert.True(t, v)
+}
+
+func TestClient_Version(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ assert.NotEmpty(t, client.Version())
+}
+
+func TestClient_CounterInfoByName(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ v, err := client.CounterInfoByName()
+ assert.NoError(t, err)
+ assert.IsType(t, map[string]*types.PerfCounterInfo{}, v)
+ assert.NotEmpty(t, v)
+}
+
+func TestClient_IsSessionActive(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ v, err := client.IsSessionActive()
+ assert.NoError(t, err)
+ assert.True(t, v)
+}
+
+func TestClient_Login(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ assert.NoError(t, client.Logout())
+
+ err := client.Login(url.UserPassword("admin", "password"))
+ assert.NoError(t, err)
+
+ ok, err := client.IsSessionActive()
+ assert.NoError(t, err)
+ assert.True(t, ok)
+}
+
+func TestClient_Logout(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ assert.NoError(t, client.Logout())
+
+ v, err := client.IsSessionActive()
+ assert.NoError(t, err)
+ assert.False(t, v)
+}
+
+func TestClient_Datacenters(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ dcs, err := client.Datacenters()
+ assert.NoError(t, err)
+ assert.NotEmpty(t, dcs)
+}
+
+func TestClient_Folders(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ folders, err := client.Folders()
+ assert.NoError(t, err)
+ assert.NotEmpty(t, folders)
+}
+
+func TestClient_ComputeResources(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ computes, err := client.ComputeResources()
+ assert.NoError(t, err)
+ assert.NotEmpty(t, computes)
+}
+
+func TestClient_Hosts(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ hosts, err := client.Hosts()
+ assert.NoError(t, err)
+ assert.NotEmpty(t, hosts)
+}
+
+func TestClient_VirtualMachines(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ vms, err := client.VirtualMachines()
+ assert.NoError(t, err)
+ assert.NotEmpty(t, vms)
+}
+
+func TestClient_PerformanceMetrics(t *testing.T) {
+ client, teardown := prepareClient(t)
+ defer teardown()
+
+ hosts, err := client.Hosts()
+ require.NoError(t, err)
+ metrics, err := client.PerformanceMetrics(hostsPerfQuerySpecs(hosts))
+ require.NoError(t, err)
+ assert.True(t, len(metrics) > 0)
+}
+
+func prepareClient(t *testing.T) (client *Client, teardown func()) {
+ model, srv := createSim(t)
+ teardown = func() { model.Remove(); srv.Close() }
+ return newClient(t, srv.URL), teardown
+}
+
+func newClient(t *testing.T, vCenterURL *url.URL) *Client {
+ client, err := New(Config{
+ URL: vCenterURL.String(),
+ User: "admin",
+ Password: "password",
+ Timeout: time.Second * 3,
+ TLSConfig: tlscfg.TLSConfig{InsecureSkipVerify: true},
+ })
+ require.NoError(t, err)
+ return client
+}
+
+func createSim(t *testing.T) (*simulator.Model, *simulator.Server) {
+ model := simulator.VPX()
+ err := model.Create()
+ require.NoError(t, err)
+ model.Service.TLS = new(tls.Config)
+ return model, model.Service.NewServer()
+}
+
+func hostsPerfQuerySpecs(hosts []mo.HostSystem) []types.PerfQuerySpec {
+ var pqs []types.PerfQuerySpec
+ for _, host := range hosts {
+ pq := types.PerfQuerySpec{
+ Entity: host.Reference(),
+ MaxSample: 1,
+ MetricId: []types.PerfMetricId{{CounterId: 32, Instance: ""}},
+ IntervalId: 20,
+ Format: "normal",
+ }
+ pqs = append(pqs, pq)
+ }
+ return pqs
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/client/keepalive.go b/src/go/plugin/go.d/modules/vsphere/client/keepalive.go
new file mode 100644
index 000000000..0ce1ef5c0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/client/keepalive.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package client
+
+import (
+ "context"
+ "net/url"
+ "time"
+
+ "github.com/vmware/govmomi"
+ "github.com/vmware/govmomi/session"
+ "github.com/vmware/govmomi/vim25/methods"
+ "github.com/vmware/govmomi/vim25/soap"
+ "github.com/vmware/govmomi/vim25/types"
+)
+
+const (
+ keepAliveEvery = time.Second * 15
+)
+
+// TODO: survive vCenter reboot, it looks like we need to re New()
+func addKeepAlive(client *govmomi.Client, userinfo *url.Userinfo) {
+ f := func(rt soap.RoundTripper) error {
+ _, err := methods.GetCurrentTime(context.Background(), rt)
+ if err == nil {
+ return nil
+ }
+
+ if !isNotAuthenticated(err) {
+ return nil
+ }
+
+ _ = client.Login(context.Background(), userinfo)
+ return nil
+ }
+ client.Client.RoundTripper = session.KeepAliveHandler(client.Client.RoundTripper, keepAliveEvery, f)
+}
+
+func isNotAuthenticated(err error) bool {
+ if !soap.IsSoapFault(err) {
+ return false
+ }
+ _, ok := soap.ToSoapFault(err).VimFault().(*types.NotAuthenticated)
+ return ok
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/collect.go b/src/go/plugin/go.d/modules/vsphere/collect.go
new file mode 100644
index 000000000..e5672d3fd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/collect.go
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vsphere
+
+import (
+ "errors"
+ "fmt"
+ "time"
+
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+
+ "github.com/vmware/govmomi/performance"
+)
+
+// ManagedEntityStatus
+var overallStatuses = []string{"green", "red", "yellow", "gray"}
+
+func (vs *VSphere) collect() (map[string]int64, error) {
+ vs.collectionLock.Lock()
+ defer vs.collectionLock.Unlock()
+
+ vs.Debug("starting collection process")
+ t := time.Now()
+ mx := make(map[string]int64)
+
+ err := vs.collectHosts(mx)
+ if err != nil {
+ return nil, err
+ }
+
+ err = vs.collectVMs(mx)
+ if err != nil {
+ return nil, err
+ }
+
+ vs.updateCharts()
+
+ vs.Debugf("metrics collected, process took %s", time.Since(t))
+
+ return mx, nil
+}
+
+func (vs *VSphere) collectHosts(mx map[string]int64) error {
+ if len(vs.resources.Hosts) == 0 {
+ return nil
+ }
+ // NOTE: returns unsorted if at least one types.PerfMetricId Instance is not ""
+ metrics := vs.ScrapeHosts(vs.resources.Hosts)
+ if len(metrics) == 0 {
+ return errors.New("failed to scrape hosts metrics")
+ }
+
+ vs.collectHostsMetrics(mx, metrics)
+
+ return nil
+}
+
+func (vs *VSphere) collectHostsMetrics(mx map[string]int64, metrics []performance.EntityMetric) {
+ for k := range vs.discoveredHosts {
+ vs.discoveredHosts[k]++
+ }
+
+ for _, metric := range metrics {
+ if host := vs.resources.Hosts.Get(metric.Entity.Value); host != nil {
+ vs.discoveredHosts[host.ID] = 0
+ writeHostMetrics(mx, host, metric.Value)
+ }
+ }
+}
+
+func writeHostMetrics(mx map[string]int64, host *rs.Host, metrics []performance.MetricSeries) {
+ for _, metric := range metrics {
+ if len(metric.Value) == 0 || metric.Value[0] == -1 {
+ continue
+ }
+ key := fmt.Sprintf("%s_%s", host.ID, metric.Name)
+ mx[key] = metric.Value[0]
+ }
+ for _, v := range overallStatuses {
+ key := fmt.Sprintf("%s_overall.status.%s", host.ID, v)
+ mx[key] = boolToInt(host.OverallStatus == v)
+ }
+}
+
+func (vs *VSphere) collectVMs(mx map[string]int64) error {
+ if len(vs.resources.VMs) == 0 {
+ return nil
+ }
+ // NOTE: returns unsorted if at least one types.PerfMetricId Instance is not ""
+ ems := vs.ScrapeVMs(vs.resources.VMs)
+ if len(ems) == 0 {
+ return errors.New("failed to scrape vms metrics")
+ }
+
+ vs.collectVMsMetrics(mx, ems)
+
+ return nil
+}
+
+func (vs *VSphere) collectVMsMetrics(mx map[string]int64, metrics []performance.EntityMetric) {
+ for id := range vs.discoveredVMs {
+ vs.discoveredVMs[id]++
+ }
+
+ for _, metric := range metrics {
+ if vm := vs.resources.VMs.Get(metric.Entity.Value); vm != nil {
+ writeVMMetrics(mx, vm, metric.Value)
+ vs.discoveredVMs[vm.ID] = 0
+ }
+ }
+}
+
+func writeVMMetrics(mx map[string]int64, vm *rs.VM, metrics []performance.MetricSeries) {
+ for _, metric := range metrics {
+ if len(metric.Value) == 0 || metric.Value[0] == -1 {
+ continue
+ }
+ key := fmt.Sprintf("%s_%s", vm.ID, metric.Name)
+ mx[key] = metric.Value[0]
+ }
+ for _, v := range overallStatuses {
+ key := fmt.Sprintf("%s_overall.status.%s", vm.ID, v)
+ mx[key] = boolToInt(vm.OverallStatus == v)
+ }
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/config_schema.json b/src/go/plugin/go.d/modules/vsphere/config_schema.json
new file mode 100644
index 000000000..8902e73ed
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/config_schema.json
@@ -0,0 +1,252 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "VMware vCenter Server collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 20
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the VMware vCenter Server.",
+ "type": "string",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 20
+ },
+ "discovery_interval": {
+ "title": "Discovery interval",
+ "description": "Hosts and VMs discovery interval in seconds.",
+ "type": "number",
+ "minimum": 60,
+ "default": 300
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "host_include": {
+ "title": "Host selectors",
+ "description": "Configuration for monitoring specific hosts. The selector format follows the pattern `/Datacenter/Cluster/Host`, where each value can be set using [Netdata simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "items": {
+ "title": "Host selector",
+ "description": "",
+ "type": "string",
+ "default": "/*/*/*",
+ "pattern": "^$|^/"
+ },
+ "default": [
+ "/*"
+ ]
+ },
+ "vm_include": {
+ "title": "Virtual machine selectors",
+ "description": "Configuration for monitoring specific virtual machines. The selector format follows the pattern `/Datacenter/Cluster/Host/VM`, where each value can be set using [Netdata simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme).",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "items": {
+ "title": "VM selector",
+ "description": "",
+ "type": "string",
+ "default": "/*/*/*/*",
+ "pattern": "^$|^/"
+ },
+ "default": [
+ "/*"
+ ]
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url",
+ "username",
+ "password",
+ "host_include",
+ "vm_include"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "discovery_interval",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Hosts & VMs selector",
+ "fields": [
+ "host_include",
+ "vm_include"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "update_every": {
+ "ui:help": "**Important**: vSphere generates real-time statistics every 20 seconds. Setting this value lower won't improve data accuracy. For larger vSphere deployments, consider increasing this value to ensure complete data collection during each cycle. To find the optimal value, run the collector in debug mode and see how long it takes to collect metrics."
+ },
+ "url": {
+ "ui:placeholder": "https://203.0.113.0"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "host_include": {
+ "ui:listFlavour": "list"
+ },
+ "vm_include": {
+ "ui:listFlavour": "list"
+ },
+ "username": {
+ "ui:placeholder": "admin@vsphere.local",
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/discover.go b/src/go/plugin/go.d/modules/vsphere/discover.go
new file mode 100644
index 000000000..1ea0a4d6e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/discover.go
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vsphere
+
+func (vs *VSphere) goDiscovery() {
+ if vs.discoveryTask != nil {
+ vs.discoveryTask.stop()
+ }
+ vs.Infof("starting discovery process, will do discovery every %s", vs.DiscoveryInterval)
+
+ job := func() {
+ err := vs.discoverOnce()
+ if err != nil {
+ vs.Errorf("error on discovering : %v", err)
+ }
+ }
+ vs.discoveryTask = newTask(job, vs.DiscoveryInterval.Duration())
+}
+
+func (vs *VSphere) discoverOnce() error {
+ res, err := vs.Discover()
+ if err != nil {
+ return err
+ }
+
+ vs.collectionLock.Lock()
+ vs.resources = res
+ vs.collectionLock.Unlock()
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/discover/build.go b/src/go/plugin/go.d/modules/vsphere/discover/build.go
new file mode 100644
index 000000000..dbd0baab2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/discover/build.go
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discover
+
+import (
+ "time"
+
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+
+ "github.com/vmware/govmomi/vim25/mo"
+)
+
+func (d Discoverer) build(raw *resources) *rs.Resources {
+ d.Debug("discovering : building : starting building resources process")
+ t := time.Now()
+
+ var res rs.Resources
+ res.DataCenters = d.buildDatacenters(raw.dcs)
+ res.Folders = d.buildFolders(raw.folders)
+ res.Clusters = d.buildClusters(raw.clusters)
+ fixClustersParentID(&res)
+ res.Hosts = d.buildHosts(raw.hosts)
+ res.VMs = d.buildVMs(raw.vms)
+
+ d.Infof("discovering : building : built %d/%d dcs, %d/%d folders, %d/%d clusters, %d/%d hosts, %d/%d vms, process took %s",
+ len(res.DataCenters),
+ len(raw.dcs),
+ len(res.Folders),
+ len(raw.folders),
+ len(res.Clusters),
+ len(raw.clusters),
+ len(res.Hosts),
+ len(raw.hosts),
+ len(res.VMs),
+ len(raw.vms),
+ time.Since(t),
+ )
+ return &res
+}
+
+// cluster parent is folder by default
+// should be called after buildDatacenters, buildFolders and buildClusters
+func fixClustersParentID(res *rs.Resources) {
+ for _, c := range res.Clusters {
+ c.ParentID = findClusterDcID(c.ParentID, res.Folders)
+ }
+}
+
+func findClusterDcID(parentID string, folders rs.Folders) string {
+ f := folders.Get(parentID)
+ if f == nil {
+ return parentID
+ }
+ return findClusterDcID(f.ParentID, folders)
+}
+
+func (Discoverer) buildDatacenters(raw []mo.Datacenter) rs.DataCenters {
+ dcs := make(rs.DataCenters)
+ for _, d := range raw {
+ dcs.Put(newDC(d))
+ }
+ return dcs
+}
+
+func newDC(raw mo.Datacenter) *rs.Datacenter {
+ // Datacenter1 datacenter-2 group-h4 group-v3
+ return &rs.Datacenter{
+ Name: raw.Name,
+ ID: raw.Reference().Value,
+ }
+}
+
+func (Discoverer) buildFolders(raw []mo.Folder) rs.Folders {
+ fs := make(rs.Folders)
+ for _, d := range raw {
+ fs.Put(newFolder(d))
+ }
+ return fs
+}
+
+func newFolder(raw mo.Folder) *rs.Folder {
+ // vm group-v55 datacenter-54
+ // host group-h56 datacenter-54
+ // datastore group-s57 datacenter-54
+ // network group-n58 datacenter-54
+ return &rs.Folder{
+ Name: raw.Name,
+ ID: raw.Reference().Value,
+ ParentID: raw.Parent.Value,
+ }
+}
+
+func (Discoverer) buildClusters(raw []mo.ComputeResource) rs.Clusters {
+ clusters := make(rs.Clusters)
+ for _, c := range raw {
+ clusters.Put(newCluster(c))
+ }
+ return clusters
+}
+
+func newCluster(raw mo.ComputeResource) *rs.Cluster {
+ // s - dummy cluster, c - created by user cluster
+ // 192.168.0.201 domain-s61 group-h4
+ // New Cluster1 domain-c52 group-h67
+ return &rs.Cluster{
+ Name: raw.Name,
+ ID: raw.Reference().Value,
+ ParentID: raw.Parent.Value,
+ }
+}
+
+const (
+ poweredOn = "poweredOn"
+)
+
+func (d Discoverer) buildHosts(raw []mo.HostSystem) rs.Hosts {
+ var num int
+ hosts := make(rs.Hosts)
+ for _, h := range raw {
+ // poweredOn | poweredOff | standBy | unknown
+ if h.Runtime.PowerState != poweredOn {
+ num++
+ continue
+ }
+ // connected | notResponding | disconnected
+ //if v.Runtime.ConnectionState == "" {
+ //
+ //}
+ hosts.Put(newHost(h))
+ }
+ if num > 0 {
+ d.Infof("discovering : building : removed %d hosts (not powered on)", num)
+ }
+ return hosts
+}
+
+func newHost(raw mo.HostSystem) *rs.Host {
+ // 192.168.0.201 host-22 domain-s61
+ // 192.168.0.202 host-28 domain-c52
+ // 192.168.0.203 host-33 domain-c52
+ return &rs.Host{
+ Name: raw.Name,
+ ID: raw.Reference().Value,
+ ParentID: raw.Parent.Value,
+ OverallStatus: string(raw.Summary.OverallStatus),
+ Ref: raw.Reference(),
+ }
+}
+
+func (d Discoverer) buildVMs(raw []mo.VirtualMachine) rs.VMs {
+ var num int
+ vms := make(rs.VMs)
+ for _, v := range raw {
+ // poweredOff | poweredOn | suspended
+ if v.Runtime.PowerState != poweredOn {
+ num++
+ continue
+ }
+ // connected | disconnected | orphaned | inaccessible | invalid
+ //if v.Runtime.ConnectionState == "" {
+ //
+ //}
+ vms.Put(newVM(v))
+ }
+ if num > 0 {
+ d.Infof("discovering : building : removed %d vms (not powered on)", num)
+ }
+ return vms
+}
+
+func newVM(raw mo.VirtualMachine) *rs.VM {
+ // deb91 vm-25 group-v3 host-22
+ return &rs.VM{
+ Name: raw.Name,
+ ID: raw.Reference().Value,
+ ParentID: raw.Runtime.Host.Value,
+ OverallStatus: string(raw.Summary.OverallStatus),
+ Ref: raw.Reference(),
+ }
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/discover/discover.go b/src/go/plugin/go.d/modules/vsphere/discover/discover.go
new file mode 100644
index 000000000..f73c58c66
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/discover/discover.go
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discover
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/match"
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/vmware/govmomi/vim25/mo"
+ "github.com/vmware/govmomi/vim25/types"
+)
+
+type Client interface {
+ Datacenters(pathSet ...string) ([]mo.Datacenter, error)
+ Folders(pathSet ...string) ([]mo.Folder, error)
+ ComputeResources(pathSet ...string) ([]mo.ComputeResource, error)
+ Hosts(pathSet ...string) ([]mo.HostSystem, error)
+ VirtualMachines(pathSet ...string) ([]mo.VirtualMachine, error)
+
+ CounterInfoByName() (map[string]*types.PerfCounterInfo, error)
+}
+
+func New(client Client) *Discoverer {
+ return &Discoverer{
+ Client: client,
+ }
+}
+
+type Discoverer struct {
+ *logger.Logger
+ Client
+ match.HostMatcher
+ match.VMMatcher
+}
+
+type resources struct {
+ dcs []mo.Datacenter
+ folders []mo.Folder
+ clusters []mo.ComputeResource
+ hosts []mo.HostSystem
+ vms []mo.VirtualMachine
+}
+
+func (d Discoverer) Discover() (*rs.Resources, error) {
+ startTime := time.Now()
+ raw, err := d.discover()
+ if err != nil {
+ return nil, fmt.Errorf("discovering resources : %v", err)
+ }
+
+ res := d.build(raw)
+
+ err = d.setHierarchy(res)
+ if err != nil {
+ // TODO: handle objects w/o hier?
+ d.Error(err)
+ }
+
+ numH := len(res.Hosts)
+ numV := len(res.VMs)
+ removed := d.removeUnmatched(res)
+ if removed == (numH + numV) {
+ return nil, fmt.Errorf("all resoursces were filtered (%d hosts, %d vms)", numH, numV)
+ }
+
+ err = d.collectMetricLists(res)
+ if err != nil {
+ return nil, fmt.Errorf("collecting metric lists : %v", err)
+ }
+
+ d.Infof("discovering : discovered %d/%d hosts, %d/%d vms, the whole process took %s",
+ len(res.Hosts),
+ len(raw.hosts),
+ len(res.VMs),
+ len(raw.vms),
+ time.Since(startTime))
+
+ return res, nil
+}
+
+var (
+ // properties to set
+ datacenterPathSet = []string{"name", "parent"}
+ folderPathSet = []string{"name", "parent"}
+ clusterPathSet = []string{"name", "parent"}
+ hostPathSet = []string{"name", "parent", "runtime.powerState", "summary.overallStatus"}
+ vmPathSet = []string{"name", "runtime.host", "runtime.powerState", "summary.overallStatus"}
+)
+
+func (d Discoverer) discover() (*resources, error) {
+ d.Debug("discovering : starting resource discovering process")
+
+ start := time.Now()
+ t := start
+ datacenters, err := d.Datacenters(datacenterPathSet...)
+ if err != nil {
+ return nil, err
+ }
+ d.Debugf("discovering : found %d dcs, process took %s", len(datacenters), time.Since(t))
+
+ t = time.Now()
+ folders, err := d.Folders(folderPathSet...)
+ if err != nil {
+ return nil, err
+ }
+ d.Debugf("discovering : found %d folders, process took %s", len(folders), time.Since(t))
+
+ t = time.Now()
+ clusters, err := d.ComputeResources(clusterPathSet...)
+ if err != nil {
+ return nil, err
+ }
+ d.Debugf("discovering : found %d clusters, process took %s", len(clusters), time.Since(t))
+
+ t = time.Now()
+ hosts, err := d.Hosts(hostPathSet...)
+ if err != nil {
+ return nil, err
+ }
+ d.Debugf("discovering : found %d hosts, process took %s", len(hosts), time.Since(t))
+
+ t = time.Now()
+ vms, err := d.VirtualMachines(vmPathSet...)
+ if err != nil {
+ return nil, err
+ }
+ d.Debugf("discovering : found %d vms, process took %s", len(hosts), time.Since(t))
+
+ raw := resources{
+ dcs: datacenters,
+ folders: folders,
+ clusters: clusters,
+ hosts: hosts,
+ vms: vms,
+ }
+
+ d.Infof("discovering : found %d dcs, %d folders, %d clusters (%d dummy), %d hosts, %d vms, process took %s",
+ len(raw.dcs),
+ len(raw.folders),
+ len(clusters),
+ numOfDummyClusters(clusters),
+ len(raw.hosts),
+ len(raw.vms),
+ time.Since(start),
+ )
+
+ return &raw, nil
+}
+
+func numOfDummyClusters(clusters []mo.ComputeResource) (num int) {
+ for _, c := range clusters {
+ // domain-s61 | domain-c52
+ if strings.HasPrefix(c.Reference().Value, "domain-s") {
+ num++
+ }
+ }
+ return num
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/discover/discover_test.go b/src/go/plugin/go.d/modules/vsphere/discover/discover_test.go
new file mode 100644
index 000000000..9d0df6077
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/discover/discover_test.go
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discover
+
+import (
+ "crypto/tls"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/client"
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/vmware/govmomi/simulator"
+)
+
+func TestDiscoverer_Discover(t *testing.T) {
+ d, _, teardown := prepareDiscovererSim(t)
+ defer teardown()
+
+ res, err := d.Discover()
+
+ require.NoError(t, err)
+ assert.True(t, len(res.DataCenters) > 0)
+ assert.True(t, len(res.Folders) > 0)
+ assert.True(t, len(res.Clusters) > 0)
+ assert.True(t, len(res.Hosts) > 0)
+ assert.True(t, len(res.VMs) > 0)
+ assert.True(t, isHierarchySet(res))
+ assert.True(t, isMetricListsCollected(res))
+}
+
+func TestDiscoverer_discover(t *testing.T) {
+ d, model, teardown := prepareDiscovererSim(t)
+ defer teardown()
+
+ raw, err := d.discover()
+
+ require.NoError(t, err)
+ count := model.Count()
+ assert.Lenf(t, raw.dcs, count.Datacenter, "datacenters")
+ assert.Lenf(t, raw.folders, count.Folder-1, "folders") // minus root folder
+ dummyClusters := model.Host * count.Datacenter
+ assert.Lenf(t, raw.clusters, count.Cluster+dummyClusters, "clusters")
+ assert.Lenf(t, raw.hosts, count.Host, "hosts")
+ assert.Lenf(t, raw.vms, count.Machine, "hosts")
+}
+
+func TestDiscoverer_build(t *testing.T) {
+ d, _, teardown := prepareDiscovererSim(t)
+ defer teardown()
+
+ raw, err := d.discover()
+ require.NoError(t, err)
+
+ res := d.build(raw)
+
+ assert.Lenf(t, res.DataCenters, len(raw.dcs), "datacenters")
+ assert.Lenf(t, res.Folders, len(raw.folders), "folders")
+ assert.Lenf(t, res.Clusters, len(raw.clusters), "clusters")
+ assert.Lenf(t, res.Hosts, len(raw.hosts), "hosts")
+ assert.Lenf(t, res.VMs, len(raw.vms), "hosts")
+}
+
+func TestDiscoverer_setHierarchy(t *testing.T) {
+ d, _, teardown := prepareDiscovererSim(t)
+ defer teardown()
+
+ raw, err := d.discover()
+ require.NoError(t, err)
+ res := d.build(raw)
+
+ err = d.setHierarchy(res)
+
+ require.NoError(t, err)
+ assert.True(t, isHierarchySet(res))
+}
+
+func TestDiscoverer_removeUnmatched(t *testing.T) {
+ d, _, teardown := prepareDiscovererSim(t)
+ defer teardown()
+
+ d.HostMatcher = falseHostMatcher{}
+ d.VMMatcher = falseVMMatcher{}
+ raw, err := d.discover()
+ require.NoError(t, err)
+ res := d.build(raw)
+
+ numVMs, numHosts := len(res.VMs), len(res.Hosts)
+ assert.Equal(t, numVMs+numHosts, d.removeUnmatched(res))
+ assert.Lenf(t, res.Hosts, 0, "hosts")
+ assert.Lenf(t, res.VMs, 0, "vms")
+}
+
+func TestDiscoverer_collectMetricLists(t *testing.T) {
+ d, _, teardown := prepareDiscovererSim(t)
+ defer teardown()
+
+ raw, err := d.discover()
+ require.NoError(t, err)
+
+ res := d.build(raw)
+ err = d.collectMetricLists(res)
+
+ require.NoError(t, err)
+ assert.True(t, isMetricListsCollected(res))
+}
+
+func prepareDiscovererSim(t *testing.T) (d *Discoverer, model *simulator.Model, teardown func()) {
+ model, srv := createSim(t)
+ teardown = func() { model.Remove(); srv.Close() }
+ c := newClient(t, srv.URL)
+
+ return New(c), model, teardown
+}
+
+func newClient(t *testing.T, vCenterURL *url.URL) *client.Client {
+ c, err := client.New(client.Config{
+ URL: vCenterURL.String(),
+ User: "admin",
+ Password: "password",
+ Timeout: time.Second * 3,
+ TLSConfig: tlscfg.TLSConfig{InsecureSkipVerify: true},
+ })
+ require.NoError(t, err)
+ return c
+}
+
+func createSim(t *testing.T) (*simulator.Model, *simulator.Server) {
+ model := simulator.VPX()
+ err := model.Create()
+ require.NoError(t, err)
+ model.Service.TLS = new(tls.Config)
+ return model, model.Service.NewServer()
+}
+
+func isHierarchySet(res *rs.Resources) bool {
+ for _, c := range res.Clusters {
+ if !c.Hier.IsSet() {
+ return false
+ }
+ }
+ for _, h := range res.Hosts {
+ if !h.Hier.IsSet() {
+ return false
+ }
+ }
+ for _, v := range res.VMs {
+ if !v.Hier.IsSet() {
+ return false
+ }
+ }
+ return true
+}
+
+func isMetricListsCollected(res *rs.Resources) bool {
+ for _, h := range res.Hosts {
+ if h.MetricList == nil {
+ return false
+ }
+ }
+ for _, v := range res.VMs {
+ if v.MetricList == nil {
+ return false
+ }
+ }
+ return true
+}
+
+type falseHostMatcher struct{}
+
+func (falseHostMatcher) Match(*rs.Host) bool { return false }
+
+type falseVMMatcher struct{}
+
+func (falseVMMatcher) Match(*rs.VM) bool { return false }
diff --git a/src/go/plugin/go.d/modules/vsphere/discover/filter.go b/src/go/plugin/go.d/modules/vsphere/discover/filter.go
new file mode 100644
index 000000000..f9fb5ba95
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/discover/filter.go
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discover
+
+import (
+ "time"
+
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+)
+
+func (d Discoverer) matchHost(host *rs.Host) bool {
+ if d.HostMatcher == nil {
+ return true
+ }
+ return d.HostMatcher.Match(host)
+}
+
+func (d Discoverer) matchVM(vm *rs.VM) bool {
+ if d.VMMatcher == nil {
+ return true
+ }
+ return d.VMMatcher.Match(vm)
+}
+
+func (d Discoverer) removeUnmatched(res *rs.Resources) (removed int) {
+ d.Debug("discovering : filtering : starting filtering resources process")
+ t := time.Now()
+ numH, numV := len(res.Hosts), len(res.VMs)
+ removed += d.removeUnmatchedHosts(res.Hosts)
+ removed += d.removeUnmatchedVMs(res.VMs)
+ d.Infof("discovering : filtering : filtered %d/%d hosts, %d/%d vms, process took %s",
+ numH-len(res.Hosts),
+ numH,
+ numV-len(res.VMs),
+ numV,
+ time.Since(t))
+ return
+}
+
+func (d Discoverer) removeUnmatchedHosts(hosts rs.Hosts) (removed int) {
+ for _, v := range hosts {
+ if !d.matchHost(v) {
+ removed++
+ hosts.Remove(v.ID)
+ }
+ }
+ d.Debugf("discovering : filtering : removed %d unmatched hosts", removed)
+ return removed
+}
+
+func (d Discoverer) removeUnmatchedVMs(vms rs.VMs) (removed int) {
+ for _, v := range vms {
+ if !d.matchVM(v) {
+ removed++
+ vms.Remove(v.ID)
+ }
+ }
+ d.Debugf("discovering : filtering : removed %d unmatched vms", removed)
+ return removed
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/discover/hierarchy.go b/src/go/plugin/go.d/modules/vsphere/discover/hierarchy.go
new file mode 100644
index 000000000..0f84da2df
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/discover/hierarchy.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discover
+
+import (
+ "time"
+
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+)
+
+func (d Discoverer) setHierarchy(res *rs.Resources) error {
+ d.Debug("discovering : hierarchy : start setting resources hierarchy process")
+ t := time.Now()
+
+ c := d.setClustersHierarchy(res)
+ h := d.setHostsHierarchy(res)
+ v := d.setVMsHierarchy(res)
+
+ // notSet := len(res.Clusters) + len(res.Hosts) + len(res.VMs) - (c + h + v)
+ d.Infof("discovering : hierarchy : set %d/%d clusters, %d/%d hosts, %d/%d vms, process took %s",
+ c, len(res.Clusters),
+ h, len(res.Hosts),
+ v, len(res.VMs),
+ time.Since(t),
+ )
+
+ return nil
+}
+
+func (d Discoverer) setClustersHierarchy(res *rs.Resources) (set int) {
+ for _, cluster := range res.Clusters {
+ if setClusterHierarchy(cluster, res) {
+ set++
+ }
+ }
+ return set
+}
+
+func (d Discoverer) setHostsHierarchy(res *rs.Resources) (set int) {
+ for _, host := range res.Hosts {
+ if setHostHierarchy(host, res) {
+ set++
+ }
+ }
+ return set
+}
+
+func (d Discoverer) setVMsHierarchy(res *rs.Resources) (set int) {
+ for _, vm := range res.VMs {
+ if setVMHierarchy(vm, res) {
+ set++
+ }
+ }
+ return set
+}
+
+func setClusterHierarchy(cluster *rs.Cluster, res *rs.Resources) bool {
+ dc := res.DataCenters.Get(cluster.ParentID)
+ if dc == nil {
+ return false
+ }
+ cluster.Hier.DC.Set(dc.ID, dc.Name)
+ return cluster.Hier.IsSet()
+}
+
+func setHostHierarchy(host *rs.Host, res *rs.Resources) bool {
+ cr := res.Clusters.Get(host.ParentID)
+ if cr == nil {
+ return false
+ }
+ host.Hier.Cluster.Set(cr.ID, cr.Name)
+
+ dc := res.DataCenters.Get(cr.ParentID)
+ if dc == nil {
+ return false
+ }
+ host.Hier.DC.Set(dc.ID, dc.Name)
+ return host.Hier.IsSet()
+}
+
+func setVMHierarchy(vm *rs.VM, res *rs.Resources) bool {
+ h := res.Hosts.Get(vm.ParentID)
+ if h == nil {
+ return false
+ }
+ vm.Hier.Host.Set(h.ID, h.Name)
+
+ cr := res.Clusters.Get(h.ParentID)
+ if cr == nil {
+ return false
+ }
+ vm.Hier.Cluster.Set(cr.ID, cr.Name)
+
+ dc := res.DataCenters.Get(cr.ParentID)
+ if dc == nil {
+ return false
+ }
+ vm.Hier.DC.Set(dc.ID, dc.Name)
+ return vm.Hier.IsSet()
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/discover/metric_lists.go b/src/go/plugin/go.d/modules/vsphere/discover/metric_lists.go
new file mode 100644
index 000000000..03ae6d53a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/discover/metric_lists.go
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package discover
+
+import (
+ "sort"
+ "time"
+
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+
+ "github.com/vmware/govmomi/performance"
+ "github.com/vmware/govmomi/vim25/types"
+)
+
+func (d Discoverer) collectMetricLists(res *rs.Resources) error {
+ d.Debug("discovering : metric lists : starting resources metric lists collection process")
+ t := time.Now()
+ perfCounters, err := d.CounterInfoByName()
+ if err != nil {
+ return err
+ }
+
+ hostML := simpleHostMetricList(perfCounters)
+ for _, h := range res.Hosts {
+ h.MetricList = hostML
+ }
+ vmML := simpleVMMetricList(perfCounters)
+ for _, v := range res.VMs {
+ v.MetricList = vmML
+ }
+
+ d.Infof("discovering : metric lists : collected metric lists for %d/%d hosts, %d/%d vms, process took %s",
+ len(res.Hosts),
+ len(res.Hosts),
+ len(res.VMs),
+ len(res.VMs),
+ time.Since(t),
+ )
+
+ return nil
+}
+
+func simpleHostMetricList(pci map[string]*types.PerfCounterInfo) performance.MetricList {
+ return simpleMetricList(hostMetrics, pci)
+}
+
+func simpleVMMetricList(pci map[string]*types.PerfCounterInfo) performance.MetricList {
+ return simpleMetricList(vmMetrics, pci)
+}
+
+func simpleMetricList(metrics []string, pci map[string]*types.PerfCounterInfo) performance.MetricList {
+ sort.Strings(metrics)
+
+ var pml performance.MetricList
+ for _, v := range metrics {
+ m, ok := pci[v]
+ if !ok {
+ // TODO: should be logged
+ continue
+ }
+ // TODO: only summary metrics for now
+ // TODO: some metrics only appear if Instance is *, for example
+ // virtualDisk.totalWriteLatency.average.scsi0:0
+ // virtualDisk.numberWriteAveraged.average.scsi0:0
+ // virtualDisk.write.average.scsi0:0
+ // virtualDisk.totalReadLatency.average.scsi0:0
+ // virtualDisk.numberReadAveraged.average.scsi0:0
+ // virtualDisk.read.average.scsi0:0
+ // disk.numberReadAveraged.average
+ // disk.numberWriteAveraged.average
+ // TODO: metrics will be unsorted after if at least one Instance is *
+ pml = append(pml, types.PerfMetricId{CounterId: m.Key, Instance: ""})
+ }
+ return pml
+}
+
+var (
+ vmMetrics = []string{
+ "cpu.usage.average",
+
+ "mem.usage.average",
+ "mem.granted.average",
+ "mem.consumed.average",
+ "mem.active.average",
+ "mem.shared.average",
+ // Refers to VMkernel swapping!
+ "mem.swapinRate.average",
+ "mem.swapoutRate.average",
+ "mem.swapped.average",
+
+ "net.bytesRx.average",
+ "net.bytesTx.average",
+ "net.packetsRx.summation",
+ "net.packetsTx.summation",
+ "net.droppedRx.summation",
+ "net.droppedTx.summation",
+
+ // the only summary disk metrics
+ "disk.read.average",
+ "disk.write.average",
+ "disk.maxTotalLatency.latest",
+
+ "sys.uptime.latest",
+ }
+
+ hostMetrics = []string{
+ "cpu.usage.average",
+
+ "mem.usage.average",
+ "mem.granted.average",
+ "mem.consumed.average",
+ "mem.active.average",
+ "mem.shared.average",
+ "mem.sharedcommon.average",
+ // Refers to VMkernel swapping!
+ "mem.swapinRate.average",
+ "mem.swapoutRate.average",
+
+ "net.bytesRx.average",
+ "net.bytesTx.average",
+ "net.packetsRx.summation",
+ "net.packetsTx.summation",
+ "net.droppedRx.summation",
+ "net.droppedTx.summation",
+ "net.errorsRx.summation",
+ "net.errorsTx.summation",
+
+ // the only summary disk metrics
+ "disk.read.average",
+ "disk.write.average",
+ "disk.maxTotalLatency.latest",
+
+ "sys.uptime.latest",
+ }
+)
diff --git a/src/go/plugin/go.d/modules/vsphere/init.go b/src/go/plugin/go.d/modules/vsphere/init.go
new file mode 100644
index 000000000..e9bfc4e5a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/init.go
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vsphere
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/client"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/discover"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/scrape"
+)
+
+func (vs *VSphere) validateConfig() error {
+ const minRecommendedUpdateEvery = 20
+
+ if vs.URL == "" {
+ return errors.New("URL is not set")
+ }
+ if vs.Username == "" || vs.Password == "" {
+ return errors.New("username or password not set")
+ }
+ if vs.UpdateEvery < minRecommendedUpdateEvery {
+ vs.Warningf("update_every is to low, minimum recommended is %d", minRecommendedUpdateEvery)
+ }
+ return nil
+}
+
+func (vs *VSphere) initClient() (*client.Client, error) {
+ config := client.Config{
+ URL: vs.URL,
+ User: vs.Username,
+ Password: vs.Password,
+ Timeout: vs.Timeout.Duration(),
+ TLSConfig: vs.Client.TLSConfig,
+ }
+ return client.New(config)
+}
+
+func (vs *VSphere) initDiscoverer(c *client.Client) error {
+ d := discover.New(c)
+ d.Logger = vs.Logger
+
+ hm, err := vs.HostsInclude.Parse()
+ if err != nil {
+ return err
+ }
+ if hm != nil {
+ d.HostMatcher = hm
+ }
+ vmm, err := vs.VMsInclude.Parse()
+ if err != nil {
+ return err
+ }
+ if vmm != nil {
+ d.VMMatcher = vmm
+ }
+
+ vs.discoverer = d
+ return nil
+}
+
+func (vs *VSphere) initScraper(c *client.Client) {
+ ms := scrape.New(c)
+ ms.Logger = vs.Logger
+ vs.scraper = ms
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/integrations/vmware_vcenter_server.md b/src/go/plugin/go.d/modules/vsphere/integrations/vmware_vcenter_server.md
new file mode 100644
index 000000000..3f05eadfd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/integrations/vmware_vcenter_server.md
@@ -0,0 +1,357 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/vsphere/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/vsphere/metadata.yaml"
+sidebar_label: "VMware vCenter Server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Containers and VMs"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# VMware vCenter Server
+
+
+<img src="https://netdata.cloud/img/vmware.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: vsphere
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors hosts and vms performance statistics from `vCenter` servers.
+
+> **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.
+> go.d.plugin needs to be restarted.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.
+**VMware real-time statistics are generated at the 20-second specificity**.
+
+It is likely that 20 seconds is not enough for big installations and the value should be tuned.
+
+To get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.
+
+<details>
+<summary>Example (all not related debug lines were removed)</summary>
+
+```
+[ilyam@pc]$ ./go.d.plugin -d -m vsphere
+[ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process
+[ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms
+[ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms
+[ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms
+[ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms
+[ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms
+[ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms
+[ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process
+[ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3µs
+[ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process
+[ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522µs
+[ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process
+[ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts
+[ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms
+[ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973µs
+[ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process
+[ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms
+[ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms
+[ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s
+[ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process
+[ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms
+[ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms
+[ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms
+```
+
+</details>
+
+There you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.
+`update_every` and `timeout` parameters should be adjusted based on these numbers.
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per virtual machine
+
+These metrics refer to the Virtual Machine.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| datacenter | Datacenter name |
+| cluster | Cluster name |
+| host | Host name |
+| vm | Virtual Machine name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| vsphere.vm_cpu_utilization | used | percentage |
+| vsphere.vm_mem_utilization | used | percentage |
+| vsphere.vm_mem_usage | granted, consumed, active, shared | KiB |
+| vsphere.vm_mem_swap_usage | swapped | KiB |
+| vsphere.vm_mem_swap_io | in, out | KiB/s |
+| vsphere.vm_disk_io | read, write | KiB/s |
+| vsphere.vm_disk_max_latency | latency | milliseconds |
+| vsphere.vm_net_traffic | received, sent | KiB/s |
+| vsphere.vm_net_packets | received, sent | packets |
+| vsphere.vm_net_drops | received, sent | packets |
+| vsphere.vm_overall_status | green, red, yellow, gray | status |
+| vsphere.vm_system_uptime | uptime | seconds |
+
+### Per host
+
+These metrics refer to the ESXi host.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| datacenter | Datacenter name |
+| cluster | Cluster name |
+| host | Host name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| vsphere.host_cpu_utilization | used | percentage |
+| vsphere.host_mem_utilization | used | percentage |
+| vsphere.host_mem_usage | granted, consumed, active, shared, sharedcommon | KiB |
+| vsphere.host_mem_swap_io | in, out | KiB/s |
+| vsphere.host_disk_io | read, write | KiB/s |
+| vsphere.host_disk_max_latency | latency | milliseconds |
+| vsphere.host_net_traffic | received, sent | KiB/s |
+| vsphere.host_net_packets | received, sent | packets |
+| vsphere.host_net_drops | received, sent | packets |
+| vsphere.host_net_errors | received, sent | errors |
+| vsphere.host_overall_status | green, red, yellow, gray | status |
+| vsphere.host_system_uptime | uptime | seconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ vsphere_vm_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_cpu_utilization | Virtual Machine CPU utilization |
+| [ vsphere_vm_mem_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.vm_mem_utilization | Virtual Machine memory utilization |
+| [ vsphere_host_cpu_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_cpu_utilization | ESXi Host CPU utilization |
+| [ vsphere_host_mem_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf) | vsphere.host_mem_utilization | ESXi Host memory utilization |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/vsphere.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/vsphere.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 20 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | vCenter server URL. | | yes |
+| host_include | Hosts selector (filter). | | no |
+| vm_include | Virtual machines selector (filter). | | no |
+| discovery_interval | Hosts and VMs discovery interval. | 300 | no |
+| timeout | HTTP request timeout. | 20 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+##### host_include
+
+Metrics of hosts matching the selector will be collected.
+
+- Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern".
+- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).
+- Syntax:
+
+ ```yaml
+ host_include:
+ - '/DC1/*' # select all hosts from datacenter DC1
+ - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2
+ - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3
+ ```
+
+
+##### vm_include
+
+Metrics of VMs matching the selector will be collected.
+
+- Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern/VM pattern".
+- Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).
+- Syntax:
+
+ ```yaml
+ vm_include:
+ - '/DC1/*' # select all VMs from datacenter DC
+ - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2
+ - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3
+ ```
+
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name : vcenter1
+ url : https://203.0.113.1
+ username : admin@vsphere.local
+ password : somepassword
+
+```
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name : vcenter1
+ url : https://203.0.113.1
+ username : admin@vsphere.local
+ password : somepassword
+
+ - name : vcenter2
+ url : https://203.0.113.10
+ username : admin@vsphere.local
+ password : somepassword
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `vsphere` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m vsphere
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `vsphere` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep vsphere
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep vsphere /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep vsphere
+```
+
+
diff --git a/src/go/plugin/go.d/modules/vsphere/match/match.go b/src/go/plugin/go.d/modules/vsphere/match/match.go
new file mode 100644
index 000000000..969b5d7c5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/match/match.go
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package match
+
+import (
+ "fmt"
+ "strings"
+
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+type HostMatcher interface {
+ Match(*rs.Host) bool
+}
+
+type VMMatcher interface {
+ Match(*rs.VM) bool
+}
+
+type (
+ hostDCMatcher struct{ m matcher.Matcher }
+ hostClusterMatcher struct{ m matcher.Matcher }
+ hostHostMatcher struct{ m matcher.Matcher }
+ vmDCMatcher struct{ m matcher.Matcher }
+ vmClusterMatcher struct{ m matcher.Matcher }
+ vmHostMatcher struct{ m matcher.Matcher }
+ vmVMMatcher struct{ m matcher.Matcher }
+ orHostMatcher struct{ lhs, rhs HostMatcher }
+ orVMMatcher struct{ lhs, rhs VMMatcher }
+ andHostMatcher struct{ lhs, rhs HostMatcher }
+ andVMMatcher struct{ lhs, rhs VMMatcher }
+)
+
+func (m hostDCMatcher) Match(host *rs.Host) bool { return m.m.MatchString(host.Hier.DC.Name) }
+func (m hostClusterMatcher) Match(host *rs.Host) bool { return m.m.MatchString(host.Hier.Cluster.Name) }
+func (m hostHostMatcher) Match(host *rs.Host) bool { return m.m.MatchString(host.Name) }
+func (m vmDCMatcher) Match(vm *rs.VM) bool { return m.m.MatchString(vm.Hier.DC.Name) }
+func (m vmClusterMatcher) Match(vm *rs.VM) bool { return m.m.MatchString(vm.Hier.Cluster.Name) }
+func (m vmHostMatcher) Match(vm *rs.VM) bool { return m.m.MatchString(vm.Hier.Host.Name) }
+func (m vmVMMatcher) Match(vm *rs.VM) bool { return m.m.MatchString(vm.Name) }
+func (m orHostMatcher) Match(host *rs.Host) bool { return m.lhs.Match(host) || m.rhs.Match(host) }
+func (m orVMMatcher) Match(vm *rs.VM) bool { return m.lhs.Match(vm) || m.rhs.Match(vm) }
+func (m andHostMatcher) Match(host *rs.Host) bool { return m.lhs.Match(host) && m.rhs.Match(host) }
+func (m andVMMatcher) Match(vm *rs.VM) bool { return m.lhs.Match(vm) && m.rhs.Match(vm) }
+
+func newAndHostMatcher(lhs, rhs HostMatcher, others ...HostMatcher) andHostMatcher {
+ m := andHostMatcher{lhs: lhs, rhs: rhs}
+ switch len(others) {
+ case 0:
+ return m
+ default:
+ return newAndHostMatcher(m, others[0], others[1:]...)
+ }
+}
+
+func newAndVMMatcher(lhs, rhs VMMatcher, others ...VMMatcher) andVMMatcher {
+ m := andVMMatcher{lhs: lhs, rhs: rhs}
+ switch len(others) {
+ case 0:
+ return m
+ default:
+ return newAndVMMatcher(m, others[0], others[1:]...)
+ }
+}
+
+func newOrHostMatcher(lhs, rhs HostMatcher, others ...HostMatcher) orHostMatcher {
+ m := orHostMatcher{lhs: lhs, rhs: rhs}
+ switch len(others) {
+ case 0:
+ return m
+ default:
+ return newOrHostMatcher(m, others[0], others[1:]...)
+ }
+}
+
+func newOrVMMatcher(lhs, rhs VMMatcher, others ...VMMatcher) orVMMatcher {
+ m := orVMMatcher{lhs: lhs, rhs: rhs}
+ switch len(others) {
+ case 0:
+ return m
+ default:
+ return newOrVMMatcher(m, others[0], others[1:]...)
+ }
+}
+
+type (
+ VMIncludes []string
+ HostIncludes []string
+)
+
+func (vi VMIncludes) Parse() (VMMatcher, error) {
+ var ms []VMMatcher
+ for _, v := range vi {
+ m, err := parseVMInclude(v)
+ if err != nil {
+ return nil, err
+ }
+ if m == nil {
+ continue
+ }
+ ms = append(ms, m)
+ }
+
+ switch len(ms) {
+ case 0:
+ return nil, nil
+ case 1:
+ return ms[0], nil
+ default:
+ return newOrVMMatcher(ms[0], ms[1], ms[2:]...), nil
+ }
+}
+
+func (hi HostIncludes) Parse() (HostMatcher, error) {
+ var ms []HostMatcher
+ for _, v := range hi {
+ m, err := parseHostInclude(v)
+ if err != nil {
+ return nil, err
+ }
+ if m == nil {
+ continue
+ }
+ ms = append(ms, m)
+ }
+
+ switch len(ms) {
+ case 0:
+ return nil, nil
+ case 1:
+ return ms[0], nil
+ default:
+ return newOrHostMatcher(ms[0], ms[1], ms[2:]...), nil
+ }
+}
+
+const (
+ datacenterIdx = iota
+ clusterIdx
+ hostIdx
+ vmIdx
+)
+
+func cleanInclude(include string) string {
+ return strings.Trim(include, "/")
+}
+
+func parseHostInclude(include string) (HostMatcher, error) {
+ if !isIncludeFormatValid(include) {
+ return nil, fmt.Errorf("bad include format: %s", include)
+ }
+
+ include = cleanInclude(include)
+ parts := strings.Split(include, "/") // /dc/clusterIdx/hostIdx
+ var ms []HostMatcher
+
+ for i, v := range parts {
+ m, err := parseSubInclude(v)
+ if err != nil {
+ return nil, err
+ }
+ switch i {
+ case datacenterIdx:
+ ms = append(ms, hostDCMatcher{m})
+ case clusterIdx:
+ ms = append(ms, hostClusterMatcher{m})
+ case hostIdx:
+ ms = append(ms, hostHostMatcher{m})
+ default:
+ }
+ }
+
+ switch len(ms) {
+ case 0:
+ return nil, nil
+ case 1:
+ return ms[0], nil
+ default:
+ return newAndHostMatcher(ms[0], ms[1], ms[2:]...), nil
+ }
+}
+
+func parseVMInclude(include string) (VMMatcher, error) {
+ if !isIncludeFormatValid(include) {
+ return nil, fmt.Errorf("bad include format: %s", include)
+ }
+
+ include = cleanInclude(include)
+ parts := strings.Split(include, "/") // /dc/clusterIdx/hostIdx/vmIdx
+ var ms []VMMatcher
+
+ for i, v := range parts {
+ m, err := parseSubInclude(v)
+ if err != nil {
+ return nil, err
+ }
+ switch i {
+ case datacenterIdx:
+ ms = append(ms, vmDCMatcher{m})
+ case clusterIdx:
+ ms = append(ms, vmClusterMatcher{m})
+ case hostIdx:
+ ms = append(ms, vmHostMatcher{m})
+ case vmIdx:
+ ms = append(ms, vmVMMatcher{m})
+ }
+ }
+
+ switch len(ms) {
+ case 0:
+ return nil, nil
+ case 1:
+ return ms[0], nil
+ default:
+ return newAndVMMatcher(ms[0], ms[1], ms[2:]...), nil
+ }
+}
+
+func parseSubInclude(sub string) (matcher.Matcher, error) {
+ sub = strings.TrimSpace(sub)
+ if sub == "" || sub == "!*" {
+ return matcher.FALSE(), nil
+ }
+ if sub == "*" {
+ return matcher.TRUE(), nil
+ }
+ return matcher.NewSimplePatternsMatcher(sub)
+}
+
+func isIncludeFormatValid(line string) bool {
+ return strings.HasPrefix(line, "/")
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/match/match_test.go b/src/go/plugin/go.d/modules/vsphere/match/match_test.go
new file mode 100644
index 000000000..c11697783
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/match/match_test.go
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package match
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var (
+ trueHostDC = hostDCMatcher{matcher.TRUE()}
+ falseHostDC = hostDCMatcher{matcher.FALSE()}
+ trueVMDC = vmDCMatcher{matcher.TRUE()}
+ falseVMDC = vmDCMatcher{matcher.FALSE()}
+)
+
+func TestOrHostMatcher_Match(t *testing.T) {
+ tests := map[string]struct {
+ expected bool
+ lhs HostMatcher
+ rhs HostMatcher
+ }{
+ "true, true": {expected: true, lhs: trueHostDC, rhs: trueHostDC},
+ "true, false": {expected: true, lhs: trueHostDC, rhs: falseHostDC},
+ "false, true": {expected: true, lhs: falseHostDC, rhs: trueHostDC},
+ "false, false": {expected: false, lhs: falseHostDC, rhs: falseHostDC},
+ }
+
+ var host resources.Host
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ m := newOrHostMatcher(test.lhs, test.rhs)
+ assert.Equal(t, test.expected, m.Match(&host))
+ })
+ }
+}
+
+func TestAndHostMatcher_Match(t *testing.T) {
+ tests := map[string]struct {
+ expected bool
+ lhs HostMatcher
+ rhs HostMatcher
+ }{
+ "true, true": {expected: true, lhs: trueHostDC, rhs: trueHostDC},
+ "true, false": {expected: false, lhs: trueHostDC, rhs: falseHostDC},
+ "false, true": {expected: false, lhs: falseHostDC, rhs: trueHostDC},
+ "false, false": {expected: false, lhs: falseHostDC, rhs: falseHostDC},
+ }
+
+ var host resources.Host
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ m := newAndHostMatcher(test.lhs, test.rhs)
+ assert.Equal(t, test.expected, m.Match(&host))
+ })
+ }
+}
+
+func TestOrVMMatcher_Match(t *testing.T) {
+ tests := map[string]struct {
+ expected bool
+ lhs VMMatcher
+ rhs VMMatcher
+ }{
+ "true, true": {expected: true, lhs: trueVMDC, rhs: trueVMDC},
+ "true, false": {expected: true, lhs: trueVMDC, rhs: falseVMDC},
+ "false, true": {expected: true, lhs: falseVMDC, rhs: trueVMDC},
+ "false, false": {expected: false, lhs: falseVMDC, rhs: falseVMDC},
+ }
+
+ var vm resources.VM
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ m := newOrVMMatcher(test.lhs, test.rhs)
+ assert.Equal(t, test.expected, m.Match(&vm))
+ })
+ }
+}
+
+func TestAndVMMatcher_Match(t *testing.T) {
+ tests := map[string]struct {
+ expected bool
+ lhs VMMatcher
+ rhs VMMatcher
+ }{
+ "true, true": {expected: true, lhs: trueVMDC, rhs: trueVMDC},
+ "true, false": {expected: false, lhs: trueVMDC, rhs: falseVMDC},
+ "false, true": {expected: false, lhs: falseVMDC, rhs: trueVMDC},
+ "false, false": {expected: false, lhs: falseVMDC, rhs: falseVMDC},
+ }
+
+ var vm resources.VM
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ m := newAndVMMatcher(test.lhs, test.rhs)
+ assert.Equal(t, test.expected, m.Match(&vm))
+ })
+ }
+}
+
+func TestHostIncludes_Parse(t *testing.T) {
+ tests := map[string]struct {
+ valid bool
+ expected HostMatcher
+ }{
+ "": {valid: false},
+ "*/C1/H1": {valid: false},
+ "/": {valid: true, expected: falseHostDC},
+ "/*": {valid: true, expected: trueHostDC},
+ "/!*": {valid: true, expected: falseHostDC},
+ "/!*/": {valid: true, expected: falseHostDC},
+ "/!*/ ": {
+ valid: true,
+ expected: andHostMatcher{
+ lhs: falseHostDC,
+ rhs: hostClusterMatcher{matcher.FALSE()},
+ },
+ },
+ "/DC1* DC2* !*/Cluster*": {
+ valid: true,
+ expected: andHostMatcher{
+ lhs: hostDCMatcher{mustSP("DC1* DC2* !*")},
+ rhs: hostClusterMatcher{mustSP("Cluster*")},
+ },
+ },
+ "/*/*/HOST1*": {
+ valid: true,
+ expected: andHostMatcher{
+ lhs: andHostMatcher{
+ lhs: trueHostDC,
+ rhs: hostClusterMatcher{matcher.TRUE()},
+ },
+ rhs: hostHostMatcher{mustSP("HOST1*")},
+ },
+ },
+ "/*/*/HOST1*/*/*": {
+ valid: true,
+ expected: andHostMatcher{
+ lhs: andHostMatcher{
+ lhs: trueHostDC,
+ rhs: hostClusterMatcher{matcher.TRUE()},
+ },
+ rhs: hostHostMatcher{mustSP("HOST1*")},
+ },
+ },
+ "[/DC1*,/DC2*]": {
+ valid: true,
+ expected: orHostMatcher{
+ lhs: hostDCMatcher{mustSP("DC1*")},
+ rhs: hostDCMatcher{mustSP("DC2*")},
+ },
+ },
+ "[/DC1*,/DC2*,/DC3*/Cluster1*/H*]": {
+ valid: true,
+ expected: orHostMatcher{
+ lhs: orHostMatcher{
+ lhs: hostDCMatcher{mustSP("DC1*")},
+ rhs: hostDCMatcher{mustSP("DC2*")},
+ },
+ rhs: andHostMatcher{
+ lhs: andHostMatcher{
+ lhs: hostDCMatcher{mustSP("DC3*")},
+ rhs: hostClusterMatcher{mustSP("Cluster1*")},
+ },
+ rhs: hostHostMatcher{mustSP("H*")},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ includes := prepareIncludes(name)
+ m, err := HostIncludes(includes).Parse()
+
+ if !test.valid {
+ assert.Error(t, err)
+ } else {
+ assert.Equal(t, test.expected, m)
+ }
+ })
+ }
+}
+
+func TestVMIncludes_Parse(t *testing.T) {
+ tests := map[string]struct {
+ valid bool
+ includes []string
+ expected VMMatcher
+ }{
+ "": {valid: false},
+ "*/C1/H1/V1": {valid: false},
+ "/*": {valid: true, expected: trueVMDC},
+ "/!*": {valid: true, expected: falseVMDC},
+ "/!*/": {valid: true, expected: falseVMDC},
+ "/!*/ ": {
+ valid: true,
+ expected: andVMMatcher{
+ lhs: falseVMDC,
+ rhs: vmClusterMatcher{matcher.FALSE()},
+ },
+ },
+ "/DC1* DC2* !*/Cluster*": {
+ valid: true,
+ expected: andVMMatcher{
+ lhs: vmDCMatcher{mustSP("DC1* DC2* !*")},
+ rhs: vmClusterMatcher{mustSP("Cluster*")},
+ },
+ },
+ "/*/*/HOST1": {
+ valid: true,
+ expected: andVMMatcher{
+ lhs: andVMMatcher{
+ lhs: trueVMDC,
+ rhs: vmClusterMatcher{matcher.TRUE()},
+ },
+ rhs: vmHostMatcher{mustSP("HOST1")},
+ },
+ },
+ "/*/*/HOST1*/*/*": {
+ valid: true,
+ expected: andVMMatcher{
+ lhs: andVMMatcher{
+ lhs: andVMMatcher{
+ lhs: trueVMDC,
+ rhs: vmClusterMatcher{matcher.TRUE()},
+ },
+ rhs: vmHostMatcher{mustSP("HOST1*")},
+ },
+ rhs: vmVMMatcher{matcher.TRUE()},
+ },
+ },
+ "[/DC1*,/DC2*]": {
+ valid: true,
+ expected: orVMMatcher{
+ lhs: vmDCMatcher{mustSP("DC1*")},
+ rhs: vmDCMatcher{mustSP("DC2*")},
+ },
+ },
+ "[/DC1*,/DC2*,/DC3*/Cluster1*/H*/VM*]": {
+ valid: true,
+ expected: orVMMatcher{
+ lhs: orVMMatcher{
+ lhs: vmDCMatcher{mustSP("DC1*")},
+ rhs: vmDCMatcher{mustSP("DC2*")},
+ },
+ rhs: andVMMatcher{
+ lhs: andVMMatcher{
+ lhs: andVMMatcher{
+ lhs: vmDCMatcher{mustSP("DC3*")},
+ rhs: vmClusterMatcher{mustSP("Cluster1*")},
+ },
+ rhs: vmHostMatcher{mustSP("H*")},
+ },
+ rhs: vmVMMatcher{mustSP("VM*")},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ includes := prepareIncludes(name)
+ m, err := VMIncludes(includes).Parse()
+
+ if !test.valid {
+ assert.Error(t, err)
+ } else {
+ assert.Equal(t, test.expected, m)
+ }
+ })
+ }
+}
+
+func prepareIncludes(include string) []string {
+ trimmed := strings.Trim(include, "[]")
+ return strings.Split(trimmed, ",")
+}
+
+func mustSP(expr string) matcher.Matcher {
+ return matcher.Must(matcher.NewSimplePatternsMatcher(expr))
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/metadata.yaml b/src/go/plugin/go.d/modules/vsphere/metadata.yaml
new file mode 100644
index 000000000..b40c7af93
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/metadata.yaml
@@ -0,0 +1,439 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-vsphere
+ plugin_name: go.d.plugin
+ module_name: vsphere
+ monitored_instance:
+ name: VMware vCenter Server
+ link: https://www.vmware.com/products/vcenter-server.html
+ icon_filename: vmware.svg
+ categories:
+ - data-collection.containers-and-vms
+ keywords:
+ - vmware
+ - esxi
+ - vcenter
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: true
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors hosts and vms performance statistics from `vCenter` servers.
+
+ > **Warning**: The `vsphere` collector cannot re-login and continue collecting metrics after a vCenter reboot.
+ > go.d.plugin needs to be restarted.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: |
+ The default `update_every` is 20 seconds, and it doesn't make sense to decrease the value.
+ **VMware real-time statistics are generated at the 20-second specificity**.
+
+ It is likely that 20 seconds is not enough for big installations and the value should be tuned.
+
+ To get a better view we recommend running the collector in debug mode and seeing how much time it will take to collect metrics.
+
+ <details>
+ <summary>Example (all not related debug lines were removed)</summary>
+
+ ```
+ [ilyam@pc]$ ./go.d.plugin -d -m vsphere
+ [ DEBUG ] vsphere[vsphere] discover.go:94 discovering : starting resource discovering process
+ [ DEBUG ] vsphere[vsphere] discover.go:102 discovering : found 3 dcs, process took 49.329656ms
+ [ DEBUG ] vsphere[vsphere] discover.go:109 discovering : found 12 folders, process took 49.538688ms
+ [ DEBUG ] vsphere[vsphere] discover.go:116 discovering : found 3 clusters, process took 47.722692ms
+ [ DEBUG ] vsphere[vsphere] discover.go:123 discovering : found 2 hosts, process took 52.966995ms
+ [ DEBUG ] vsphere[vsphere] discover.go:130 discovering : found 2 vms, process took 49.832979ms
+ [ INFO ] vsphere[vsphere] discover.go:140 discovering : found 3 dcs, 12 folders, 3 clusters (2 dummy), 2 hosts, 3 vms, process took 249.655993ms
+ [ DEBUG ] vsphere[vsphere] build.go:12 discovering : building : starting building resources process
+ [ INFO ] vsphere[vsphere] build.go:23 discovering : building : built 3/3 dcs, 12/12 folders, 3/3 clusters, 2/2 hosts, 3/3 vms, process took 63.3µs
+ [ DEBUG ] vsphere[vsphere] hierarchy.go:10 discovering : hierarchy : start setting resources hierarchy process
+ [ INFO ] vsphere[vsphere] hierarchy.go:18 discovering : hierarchy : set 3/3 clusters, 2/2 hosts, 3/3 vms, process took 6.522µs
+ [ DEBUG ] vsphere[vsphere] filter.go:24 discovering : filtering : starting filtering resources process
+ [ DEBUG ] vsphere[vsphere] filter.go:45 discovering : filtering : removed 0 unmatched hosts
+ [ DEBUG ] vsphere[vsphere] filter.go:56 discovering : filtering : removed 0 unmatched vms
+ [ INFO ] vsphere[vsphere] filter.go:29 discovering : filtering : filtered 0/2 hosts, 0/3 vms, process took 42.973µs
+ [ DEBUG ] vsphere[vsphere] metric_lists.go:14 discovering : metric lists : starting resources metric lists collection process
+ [ INFO ] vsphere[vsphere] metric_lists.go:30 discovering : metric lists : collected metric lists for 2/2 hosts, 3/3 vms, process took 275.60764ms
+ [ INFO ] vsphere[vsphere] discover.go:74 discovering : discovered 2/2 hosts, 3/3 vms, the whole process took 525.614041ms
+ [ INFO ] vsphere[vsphere] discover.go:11 starting discovery process, will do discovery every 5m0s
+ [ DEBUG ] vsphere[vsphere] collect.go:11 starting collection process
+ [ DEBUG ] vsphere[vsphere] scrape.go:48 scraping : scraped metrics for 2/2 hosts, process took 96.257374ms
+ [ DEBUG ] vsphere[vsphere] scrape.go:60 scraping : scraped metrics for 3/3 vms, process took 57.879697ms
+ [ DEBUG ] vsphere[vsphere] collect.go:23 metrics collected, process took 154.77997ms
+ ```
+
+ </details>
+
+ There you can see that discovering took `525.614041ms`, and collecting metrics took `154.77997ms`. Discovering is a separate thread, it doesn't affect collecting.
+ `update_every` and `timeout` parameters should be adjusted based on these numbers.
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/vsphere.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 20
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: vCenter server URL.
+ default_value: ""
+ required: true
+ - name: host_include
+ description: Hosts selector (filter).
+ default_value: ""
+ required: false
+ detailed_description: |
+ Metrics of hosts matching the selector will be collected.
+
+ - Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern".
+ - Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).
+ - Syntax:
+
+ ```yaml
+ host_include:
+ - '/DC1/*' # select all hosts from datacenter DC1
+ - '/DC2/*/!Host2 *' # select all hosts from datacenter DC2 except HOST2
+ - '/DC3/Cluster3/*' # select all hosts from datacenter DC3 cluster Cluster3
+ ```
+ - name: vm_include
+ description: Virtual machines selector (filter).
+ default_value: ""
+ required: false
+ detailed_description: |
+ Metrics of VMs matching the selector will be collected.
+
+ - Include pattern syntax: "/Datacenter pattern/Cluster pattern/Host pattern/VM pattern".
+ - Match pattern syntax: [simple patterns](/src/libnetdata/simple_pattern/README.md#simple-patterns).
+ - Syntax:
+
+ ```yaml
+ vm_include:
+ - '/DC1/*' # select all VMs from datacenter DC
+ - '/DC2/*/*/!VM2 *' # select all VMs from datacenter DC2 except VM2
+ - '/DC3/Cluster3/*' # select all VMs from datacenter DC3 cluster Cluster3
+ ```
+ - name: discovery_interval
+ description: Hosts and VMs discovery interval.
+ default_value: 300
+ required: false
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 20
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name : vcenter1
+ url : https://203.0.113.1
+ username : admin@vsphere.local
+ password : somepassword
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name : vcenter1
+ url : https://203.0.113.1
+ username : admin@vsphere.local
+ password : somepassword
+
+ - name : vcenter2
+ url : https://203.0.113.10
+ username : admin@vsphere.local
+ password : somepassword
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: vsphere_vm_cpu_utilization
+ metric: vsphere.vm_cpu_utilization
+ info: Virtual Machine CPU utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf
+ - name: vsphere_vm_mem_usage
+ metric: vsphere.vm_mem_utilization
+ info: Virtual Machine memory utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf
+ - name: vsphere_host_cpu_utilization
+ metric: vsphere.host_cpu_utilization
+ info: ESXi Host CPU utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf
+ - name: vsphere_host_mem_utilization
+ metric: vsphere.host_mem_utilization
+ info: ESXi Host memory utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/vsphere.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: virtual machine
+ description: These metrics refer to the Virtual Machine.
+ labels:
+ - name: datacenter
+ description: Datacenter name
+ - name: cluster
+ description: Cluster name
+ - name: host
+ description: Host name
+ - name: vm
+ description: Virtual Machine name
+ metrics:
+ - name: vsphere.vm_cpu_utilization
+ description: Virtual Machine CPU utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: vsphere.vm_mem_utilization
+ description: Virtual Machine memory utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: vsphere.vm_mem_usage
+ description: Virtual Machine memory usage
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: granted
+ - name: consumed
+ - name: active
+ - name: shared
+ - name: vsphere.vm_mem_swap_usage
+ description: Virtual Machine VMKernel memory swap usage
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: swapped
+ - name: vsphere.vm_mem_swap_io
+ description: Virtual Machine VMKernel memory swap IO
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: vsphere.vm_disk_io
+ description: Virtual Machine disk IO
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: vsphere.vm_disk_max_latency
+ description: Virtual Machine disk max latency
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: latency
+ - name: vsphere.vm_net_traffic
+ description: Virtual Machine network traffic
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vsphere.vm_net_packets
+ description: Virtual Machine network packets
+ unit: packets
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vsphere.vm_net_drops
+ description: Virtual Machine network dropped packets
+ unit: packets
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vsphere.vm_overall_status
+ description: Virtual Machine overall alarm status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: yellow
+ - name: gray
+ - name: vsphere.vm_system_uptime
+ description: Virtual Machine system uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: host
+ description: These metrics refer to the ESXi host.
+ labels:
+ - name: datacenter
+ description: Datacenter name
+ - name: cluster
+ description: Cluster name
+ - name: host
+ description: Host name
+ metrics:
+ - name: vsphere.host_cpu_utilization
+ description: ESXi Host CPU utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: vsphere.host_mem_utilization
+ description: ESXi Host memory utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: vsphere.host_mem_usage
+ description: ESXi Host memory usage
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: granted
+ - name: consumed
+ - name: active
+ - name: shared
+ - name: sharedcommon
+ - name: vsphere.host_mem_swap_io
+ description: ESXi Host VMKernel memory swap IO
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: in
+ - name: out
+ - name: vsphere.host_disk_io
+ description: ESXi Host disk IO
+ unit: KiB/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: vsphere.host_disk_max_latency
+ description: ESXi Host disk max latency
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: latency
+ - name: vsphere.host_net_traffic
+ description: ESXi Host network traffic
+ unit: KiB/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vsphere.host_net_packets
+ description: ESXi Host network packets
+ unit: packets
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vsphere.host_net_drops
+ description: ESXi Host network drops
+ unit: packets
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vsphere.host_net_errors
+ description: ESXi Host network errors
+ unit: errors
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: vsphere.host_overall_status
+ description: ESXi Host overall alarm status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: green
+ - name: red
+ - name: yellow
+ - name: gray
+ - name: vsphere.host_system_uptime
+ description: ESXi Host system uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
diff --git a/src/go/plugin/go.d/modules/vsphere/metrics.txt b/src/go/plugin/go.d/modules/vsphere/metrics.txt
new file mode 100644
index 000000000..30c1f55e2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/metrics.txt
@@ -0,0 +1,328 @@
+// [units, statsType, hasInstance]
+
+/*
+ virtualMachine:
+
+ cpu.run.summation [ms, delta, true] [Time the virtual machine is scheduled to run]
+ cpu.ready.summation [ms, delta, true] [Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement interval]
+ cpu.usagemhz.average [MHz, rate, true] [CPU usage in megahertz during the interval]
+ cpu.demandEntitlementRatio.latest [%, absolute, false] [CPU resource entitlement to CPU demand ratio (in percents)]
+ cpu.used.summation [ms, delta, true] [Total CPU usage]
+ cpu.idle.summation [ms, delta, true] [Total time that the CPU spent in an idle state]
+ cpu.maxlimited.summation [ms, delta, true] [Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit setting]
+ cpu.overlap.summation [ms, delta, true] [Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machines]
+ cpu.system.summation [ms, delta, false] [Amount of time spent on system processes on each virtual CPU in the virtual machine]
+ cpu.demand.average [MHz, absolute, false] [The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limit]
+ cpu.wait.summation [ms, delta, true] [Total CPU time spent in wait state]
+ cpu.latency.average [%, rate, false] [Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)]
+ cpu.costop.summation [ms, delta, true] [Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraints]
+ cpu.entitlement.latest [MHz, absolute, false] [CPU resources devoted by the ESX scheduler]
+ cpu.readiness.average [%, rate, true] [Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU]
+ cpu.swapwait.summation [ms, delta, true] [CPU time spent waiting for swap-in]
+ cpu.usage.average [%, rate, false] [CPU usage as a percentage during the interval]
+
+ datastore.totalReadLatency.average [ms, absolute, true] [The average time a read from the datastore takes]
+ datastore.read.average [KBps, rate, true] [Rate of reading data from the datastore]
+ datastore.write.average [KBps, rate, true] [Rate of writing data to the datastore]
+ datastore.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all datastores used by the host]
+ datastore.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second to the datastore during the collection interval]
+ datastore.totalWriteLatency.average [ms, absolute, true] [The average time a write to the datastore takes]
+ datastore.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second to the datastore during the collection interval]
+
+ disk.read.average [KBps, rate, true] [Average number of kilobytes read from the disk each second during the collection interval]
+ disk.commands.summation [num, delta, true] [Number of SCSI commands issued during the collection interval]
+ disk.commandsAborted.summation [num, delta, true] [Number of SCSI commands aborted during the collection interval]
+ disk.busResets.summation [num, delta, true] [Number of SCSI-bus reset commands issued during the collection interval]
+ disk.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all disks used by the host]
+ disk.write.average [KBps, rate, true] [Average number of kilobytes written to disk each second during the collection interval]
+ disk.numberReadAveraged.average [num, rate, true] [Average number of disk reads per second during the collection interval]
+ disk.usage.average [KBps, rate, false] [Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.]
+ disk.numberWrite.summation [num, delta, true] [Number of disk writes during the collection interval]
+ disk.commandsAveraged.average [num, rate, true] [Average number of SCSI commands issued per second during the collection interval]
+ disk.numberWriteAveraged.average [num, rate, true] [Average number of disk writes per second during the collection interval]
+ disk.numberRead.summation [num, delta, true] [Number of disk reads during the collection interval]
+
+ mem.vmmemctltarget.average [KB, absolute, false] [Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXi]
+ mem.overhead.average [KB, absolute, false] [host physical memory consumed by ESXi data structures for running the virtual machines]
+ mem.zipSaved.latest [KB, absolute, false] [host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memory]
+ mem.overheadMax.average [KB, absolute, false] [host physical memory reserved by ESXi, for its data structures, for running the virtual machine]
+ mem.consumed.average [KB, absolute, false] [Amount of host physical memory consumed for backing up guest physical memory pages]
+ mem.overheadTouched.average [KB, absolute, false] [Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXi]
+ mem.compressionRate.average [KBps, rate, false] [Rate of guest physical memory page compression by ESXi]
+ mem.swapin.average [KB, absolute, false] [Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter]
+ mem.swaptarget.average [KB, absolute, false] [Amount of memory that ESXi needs to reclaim by swapping]
+ mem.activewrite.average [KB, absolute, false] [Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXi]
+ mem.decompressionRate.average [KBps, rate, false] [Rate of guest physical memory decompression]
+ mem.entitlement.average [KB, absolute, false] [Amount of host physical memory the virtual machine deserves, as determined by ESXi]
+ mem.swapoutRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped out to the swap space]
+ mem.swapout.average [KB, absolute, false] [Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.]
+ mem.shared.average [KB, absolute, false] [Amount of guest physical memory that is shared within a single virtual machine or across virtual machines]
+ mem.compressed.average [KB, absolute, false] [Guest physical memory pages that have undergone memory compression]
+ mem.llSwapOutRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped out to the host swap cache]
+ mem.latency.average [%, absolute, false] [Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memory]
+ mem.llSwapInRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped in from the host swap cache]
+ mem.zero.average [KB, absolute, false] [Guest physical memory pages whose content is 0x00]
+ mem.swapinRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped in from the swap space]
+ mem.llSwapUsed.average [KB, absolute, false] [Storage space consumed on the host swap cache for storing swapped guest physical memory pages]
+ mem.vmmemctl.average [KB, absolute, false] [Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest]
+ mem.active.average [KB, absolute, false] [Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi]
+ mem.granted.average [KB, absolute, false] [Amount of host physical memory or physical memory that is mapped for a virtual machine or a host]
+ mem.usage.average [%, absolute, false] [Percentage of host physical memory that has been consumed]
+ mem.zipped.latest [KB, absolute, false] [Amount of guest physical memory pages compressed by ESXi]
+ mem.swapped.average [KB, absolute, false] [Amount of guest physical memory that is swapped out to the swap space]
+
+ net.droppedTx.summation [num, delta, true] [Number of transmits dropped]
+ net.bytesTx.average [KBps, rate, true] [Average amount of data transmitted per second]
+ net.transmitted.average [KBps, rate, true] [Average rate at which data was transmitted during the interval]
+ net.droppedRx.summation [num, delta, true] [Number of receives dropped]
+ net.bytesRx.average [KBps, rate, true] [Average amount of data received per second]
+ net.usage.average [KBps, rate, true] [Network utilization (combined transmit-rates and receive-rates) during the interval]
+ net.multicastRx.summation [num, delta, true] [Number of multicast packets received during the sampling interval]
+ net.broadcastTx.summation [num, delta, true] [Number of broadcast packets transmitted during the sampling interval]
+ net.received.average [KBps, rate, true] [Average rate at which data was received during the interval]
+ net.broadcastRx.summation [num, delta, true] [Number of broadcast packets received during the sampling interval]
+ net.pnicBytesRx.average [KBps, rate, true] [pnicBytesRx]
+ net.pnicBytesTx.average [KBps, rate, true] [pnicBytesTx]
+ net.multicastTx.summation [num, delta, true] [Number of multicast packets transmitted during the sampling interval]
+ net.packetsTx.summation [num, delta, true] [Number of packets transmitted during the interval]
+ net.packetsRx.summation [num, delta, true] [Number of packets received during the interval]
+
+ power.energy.summation [J, delta, false] [Total energy used since last stats reset]
+ power.power.average [W, rate, false] [Current power usage]
+
+ rescpu.actpk5.latest [%, absolute, false] [CPU active peak over 5 minutes]
+ rescpu.actpk15.latest [%, absolute, false] [CPU active peak over 15 minutes]
+ rescpu.sampleCount.latest [num, absolute, false] [Group CPU sample count]
+ rescpu.runav15.latest [%, absolute, false] [CPU running average over 15 minutes]
+ rescpu.actav1.latest [%, absolute, false] [CPU active average over 1 minute]
+ rescpu.runpk1.latest [%, absolute, false] [CPU running peak over 1 minute]
+ rescpu.actav5.latest [%, absolute, false] [CPU active average over 5 minutes]
+ rescpu.maxLimited5.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 5 minutes]
+ rescpu.maxLimited1.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 1 minute]
+ rescpu.runav5.latest [%, absolute, false] [CPU running average over 5 minutes]
+ rescpu.samplePeriod.latest [ms, absolute, false] [Group CPU sample period]
+ rescpu.runpk15.latest [%, absolute, false] [CPU running peak over 15 minutes]
+ rescpu.maxLimited15.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 15 minutes]
+ rescpu.actav15.latest [%, absolute, false] [CPU active average over 15 minutes]
+ rescpu.runav1.latest [%, absolute, false] [CPU running average over 1 minute]
+ rescpu.runpk5.latest [%, absolute, false] [CPU running peak over 5 minutes]
+ rescpu.actpk1.latest [%, absolute, false] [CPU active peak over 1 minute]
+
+ sys.uptime.latest [s, absolute, false] [Total time elapsed, in seconds, since last system startup]
+ sys.heartbeat.latest [num, absolute, false] [Number of heartbeats issued per virtual machine during the interval]
+ sys.osUptime.latest [s, absolute, false] [Total time elapsed, in seconds, since last operating system boot-up]
+
+ virtualDisk.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second to the virtual disk during the collection interval]
+ virtualDisk.largeSeeks.latest [num, absolute, true] [Number of seeks during the interval that were greater than 8192 LBNs apart]
+ virtualDisk.readOIO.latest [num, absolute, true] [Average number of outstanding read requests to the virtual disk during the collection interval]
+ virtualDisk.mediumSeeks.latest [num, absolute, true] [Number of seeks during the interval that were between 64 and 8192 LBNs apart]
+ virtualDisk.write.average [KBps, rate, true] [Rate of writing data to the virtual disk]
+ virtualDisk.smallSeeks.latest [num, absolute, true] [Number of seeks during the interval that were less than 64 LBNs apart]
+ virtualDisk.read.average [KBps, rate, true] [Rate of reading data from the virtual disk]
+ virtualDisk.writeLatencyUS.latest [µs, absolute, true] [Write latency in microseconds]
+ virtualDisk.writeOIO.latest [num, absolute, true] [Average number of outstanding write requests to the virtual disk during the collection interval]
+ virtualDisk.totalWriteLatency.average [ms, absolute, true] [The average time a write to the virtual disk takes]
+ virtualDisk.readLoadMetric.latest [num, absolute, true] [Storage DRS virtual disk metric for the read workload model]
+ virtualDisk.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second to the virtual disk during the collection interval]
+ virtualDisk.writeLoadMetric.latest [num, absolute, true] [Storage DRS virtual disk metric for the write workload model]
+ virtualDisk.totalReadLatency.average [ms, absolute, true] [The average time a read from the virtual disk takes]
+ virtualDisk.readIOSize.latest [num, absolute, true] [Average read request size in bytes]
+ virtualDisk.writeIOSize.latest [num, absolute, true] [Average write request size in bytes]
+ virtualDisk.readLatencyUS.latest [µs, absolute, true] [Read latency in microseconds]
+*/
+
+/*
+ HOST:
+
+ cpu.usage.average [%, rate, true] [CPU usage as a percentage during the interval]
+ cpu.wait.summation [ms, delta, false] [Total CPU time spent in wait state]
+ cpu.ready.summation [ms, delta, false] [Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement interval]
+ cpu.used.summation [ms, delta, true] [Total CPU usage]
+ cpu.demand.average [MHz, absolute, false] [The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limit]
+ cpu.idle.summation [ms, delta, true] [Total time that the CPU spent in an idle state]
+ cpu.latency.average [%, rate, false] [Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)]
+ cpu.utilization.average [%, rate, true] [CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)]
+ cpu.coreUtilization.average [%, rate, true] [CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)]
+ cpu.costop.summation [ms, delta, false] [Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraints]
+ cpu.totalCapacity.average [MHz, absolute, false] [Total CPU capacity reserved by and available for virtual machines]
+ cpu.usagemhz.average [MHz, rate, false] [CPU usage in megahertz during the interval]
+ cpu.swapwait.summation [ms, delta, false] [CPU time spent waiting for swap-in]
+ cpu.reservedCapacity.average [MHz, absolute, false] [Total CPU capacity reserved by virtual machines]
+ cpu.readiness.average [%, rate, false] [Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPU]
+
+ datastore.datastoreReadLoadMetric.latest [num, absolute, true] [Storage DRS datastore metric for read workload model]
+ datastore.datastoreNormalReadLatency.latest [num, absolute, true] [Storage DRS datastore normalized read latency]
+ datastore.datastoreWriteLoadMetric.latest [num, absolute, true] [Storage DRS datastore metric for write workload model]
+ datastore.datastoreMaxQueueDepth.latest [num, absolute, true] [Storage I/O Control datastore maximum queue depth]
+ datastore.totalReadLatency.average [ms, absolute, true] [The average time a read from the datastore takes]
+ datastore.datastoreWriteOIO.latest [num, absolute, true] [Storage DRS datastore outstanding write requests]
+ datastore.datastoreReadIops.latest [num, absolute, true] [Storage DRS datastore read I/O rate]
+ datastore.sizeNormalizedDatastoreLatency.average [µs, absolute, true] [Storage I/O Control size-normalized I/O latency]
+ datastore.datastoreIops.average [num, absolute, true] [Storage I/O Control aggregated IOPS]
+ datastore.datastoreVMObservedLatency.latest [µs, absolute, true] [The average datastore latency as seen by virtual machines]
+ datastore.unmapIOs.summation [num, delta, true] [unmapIOs]
+ datastore.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second to the datastore during the collection interval]
+ datastore.datastoreNormalWriteLatency.latest [num, absolute, true] [Storage DRS datastore normalized write latency]
+ datastore.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second to the datastore during the collection interval]
+ datastore.unmapSize.summation [MB, delta, true] [unmapSize]
+ datastore.datastoreReadOIO.latest [num, absolute, true] [Storage DRS datastore outstanding read requests]
+ datastore.write.average [KBps, rate, true] [Rate of writing data to the datastore]
+ datastore.totalWriteLatency.average [ms, absolute, true] [The average time a write to the datastore takes]
+ datastore.datastoreWriteIops.latest [num, absolute, true] [Storage DRS datastore write I/O rate]
+ datastore.datastoreReadBytes.latest [num, absolute, true] [Storage DRS datastore bytes read]
+ datastore.read.average [KBps, rate, true] [Rate of reading data from the datastore]
+ datastore.siocActiveTimePercentage.average [%, absolute, true] [Percentage of time Storage I/O Control actively controlled datastore latency]
+ datastore.datastoreWriteBytes.latest [num, absolute, true] [Storage DRS datastore bytes written]
+ datastore.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all datastores used by the host]
+
+ disk.queueReadLatency.average [ms, absolute, true] [Average amount of time spent in the VMkernel queue, per SCSI read command, during the collection interval]
+ disk.numberReadAveraged.average [num, rate, true] [Average number of disk reads per second during the collection interval]
+ disk.numberRead.summation [num, delta, true] [Number of disk reads during the collection interval]
+ disk.queueWriteLatency.average [ms, absolute, true] [Average amount of time spent in the VMkernel queue, per SCSI write command, during the collection interval]
+ disk.totalWriteLatency.average [ms, absolute, true] [Average amount of time taken during the collection interval to process a SCSI write command issued by the guest OS to the virtual machine]
+ disk.kernelWriteLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, spent by VMkernel to process each SCSI write command]
+ disk.read.average [KBps, rate, true] [Average number of kilobytes read from the disk each second during the collection interval]
+ disk.usage.average [KBps, rate, false] [Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.]
+ disk.kernelLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, spent by VMkernel to process each SCSI command]
+ disk.commandsAveraged.average [num, rate, true] [Average number of SCSI commands issued per second during the collection interval]
+ disk.numberWrite.summation [num, delta, true] [Number of disk writes during the collection interval]
+ disk.write.average [KBps, rate, true] [Average number of kilobytes written to disk each second during the collection interval]
+ disk.queueLatency.average [ms, absolute, true] [Average amount of time spent in the VMkernel queue, per SCSI command, during the collection interval]
+ disk.busResets.summation [num, delta, true] [Number of SCSI-bus reset commands issued during the collection interval]
+ disk.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all disks used by the host]
+ disk.kernelReadLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, spent by VMkernel to process each SCSI read command]
+ disk.deviceLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, to complete a SCSI command from the physical device]
+ disk.totalLatency.average [ms, absolute, true] [Average amount of time taken during the collection interval to process a SCSI command issued by the guest OS to the virtual machine]
+ disk.commands.summation [num, delta, true] [Number of SCSI commands issued during the collection interval]
+ disk.numberWriteAveraged.average [num, rate, true] [Average number of disk writes per second during the collection interval]
+ disk.totalReadLatency.average [ms, absolute, true] [Average amount of time taken during the collection interval to process a SCSI read command issued from the guest OS to the virtual machine]
+ disk.maxQueueDepth.average [num, absolute, true] [Maximum queue depth]
+ disk.deviceWriteLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, to write to the physical device]
+ disk.commandsAborted.summation [num, delta, true] [Number of SCSI commands aborted during the collection interval]
+ disk.deviceReadLatency.average [ms, absolute, true] [Average amount of time, in milliseconds, to read from the physical device]
+
+ hbr.hbrNetRx.average [KBps, rate, false] [Average amount of data received per second]
+ hbr.hbrNumVms.average [num, absolute, false] [Current number of replicated virtual machines]
+ hbr.hbrNetTx.average [KBps, rate, false] [Average amount of data transmitted per second]
+
+ mem.reservedCapacity.average [MB, absolute, false] [Memory reservation consumed by powered-on virtual machines]
+ mem.swapinRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped in from the swap space]
+ mem.zero.average [KB, absolute, false] [Guest physical memory pages whose content is 0x00]
+ mem.heapfree.average [KB, absolute, false] [Free address space in the heap of ESXi. This is less than or equal to Heap]
+ mem.sharedcommon.average [KB, absolute, false] [Amount of host physical memory that backs shared guest physical memory (Shared)]
+ mem.swapin.average [KB, absolute, false] [Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counter]
+ mem.unreserved.average [KB, absolute, false] [Amount by which reservation can be raised]
+ mem.lowfreethreshold.average [KB, absolute, false] [Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooning]
+ mem.state.latest [num, absolute, false] [Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machines]
+ mem.decompressionRate.average [KBps, rate, false] [Rate of guest physical memory decompression]
+ mem.swapout.average [KB, absolute, false] [Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.]
+ mem.vmfs.pbc.capMissRatio.latest [%, absolute, false] [Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cache]
+ mem.swapused.average [KB, absolute, false] [Swap storage space consumed]
+ mem.consumed.average [KB, absolute, false] [Amount of host physical memory consumed for backing up guest physical memory pages]
+ mem.llSwapOutRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped out to the host swap cache]
+ mem.llSwapOut.average [KB, absolute, false] [Amount of guest physical memory swapped out to the host swap cache]
+ mem.swapoutRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped out to the swap space]
+ mem.llSwapIn.average [KB, absolute, false] [Amount of guest physical memory swapped in from host cache]
+ mem.active.average [KB, absolute, false] [Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXi]
+ mem.latency.average [%, absolute, false] [Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memory]
+ mem.llSwapInRate.average [KBps, rate, false] [Rate at which guest physical memory is swapped in from the host swap cache]
+ mem.vmfs.pbc.sizeMax.latest [MB, absolute, false] [Maximum size the VMFS Pointer Block Cache can grow to]
+ mem.vmmemctl.average [KB, absolute, false] [Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guest]
+ mem.vmfs.pbc.size.latest [MB, absolute, false] [Space used for holding VMFS Pointer Blocks in memory]
+ mem.overhead.average [KB, absolute, false] [host physical memory consumed by ESXi data structures for running the virtual machines]
+ mem.vmfs.pbc.workingSet.latest [TB, absolute, false] [Amount of file blocks whose addresses are cached in the VMFS PB Cache]
+ mem.shared.average [KB, absolute, false] [Amount of guest physical memory that is shared within a single virtual machine or across virtual machines]
+ mem.usage.average [%, absolute, false] [Percentage of host physical memory that has been consumed]
+ mem.vmfs.pbc.workingSetMax.latest [TB, absolute, false] [Maximum amount of file blocks whose addresses are cached in the VMFS PB Cache]
+ mem.sysUsage.average [KB, absolute, false] [Amount of host physical memory consumed by VMkernel]
+ mem.compressed.average [KB, absolute, false] [Guest physical memory pages that have undergone memory compression]
+ mem.vmfs.pbc.overhead.latest [KB, absolute, false] [Amount of VMFS heap used by the VMFS PB Cache]
+ mem.totalCapacity.average [MB, absolute, false] [Total reservation, available and consumed, for powered-on virtual machines]
+ mem.activewrite.average [KB, absolute, false] [Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXi]
+ mem.granted.average [KB, absolute, false] [Amount of host physical memory or physical memory that is mapped for a virtual machine or a host]
+ mem.compressionRate.average [KBps, rate, false] [Rate of guest physical memory page compression by ESXi]
+ mem.heap.average [KB, absolute, false] [Virtual address space of ESXi that is dedicated to its heap]
+ mem.llSwapUsed.average [KB, absolute, false] [Storage space consumed on the host swap cache for storing swapped guest physical memory pages]
+
+ net.bytesTx.average [KBps, rate, true] [Average amount of data transmitted per second]
+ net.droppedRx.summation [num, delta, true] [Number of receives dropped]
+ net.transmitted.average [KBps, rate, true] [Average rate at which data was transmitted during the interval]
+ net.multicastTx.summation [num, delta, true] [Number of multicast packets transmitted during the sampling interval]
+ net.errorsTx.summation [num, delta, true] [Number of packets with errors transmitted during the sampling interval]
+ net.unknownProtos.summation [num, delta, true] [Number of frames with unknown protocol received during the sampling interval]
+ net.multicastRx.summation [num, delta, true] [Number of multicast packets received during the sampling interval]
+ net.broadcastTx.summation [num, delta, true] [Number of broadcast packets transmitted during the sampling interval]
+ net.received.average [KBps, rate, true] [Average rate at which data was received during the interval]
+ net.droppedTx.summation [num, delta, true] [Number of transmits dropped]
+ net.usage.average [KBps, rate, true] [Network utilization (combined transmit-rates and receive-rates) during the interval]
+ net.broadcastRx.summation [num, delta, true] [Number of broadcast packets received during the sampling interval]
+ net.packetsRx.summation [num, delta, true] [Number of packets received during the interval]
+ net.packetsTx.summation [num, delta, true] [Number of packets transmitted during the interval]
+ net.errorsRx.summation [num, delta, true] [Number of packets with errors received during the sampling interval]
+ net.bytesRx.average [KBps, rate, true] [Average amount of data received per second]
+
+ power.energy.summation [J, delta, false] [Total energy used since last stats reset]
+ power.power.average [W, rate, false] [Current power usage]
+ power.powerCap.average [W, absolute, false] [Maximum allowed power usage]
+
+ rescpu.sampleCount.latest [num, absolute, false] [Group CPU sample count]
+ rescpu.maxLimited5.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 5 minutes]
+ rescpu.runav1.latest [%, absolute, false] [CPU running average over 1 minute]
+ rescpu.actpk5.latest [%, absolute, false] [CPU active peak over 5 minutes]
+ rescpu.runav5.latest [%, absolute, false] [CPU running average over 5 minutes]
+ rescpu.actav1.latest [%, absolute, false] [CPU active average over 1 minute]
+ rescpu.runav15.latest [%, absolute, false] [CPU running average over 15 minutes]
+ rescpu.actav15.latest [%, absolute, false] [CPU active average over 15 minutes]
+ rescpu.actav5.latest [%, absolute, false] [CPU active average over 5 minutes]
+ rescpu.maxLimited15.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 15 minutes]
+ rescpu.actpk1.latest [%, absolute, false] [CPU active peak over 1 minute]
+ rescpu.runpk15.latest [%, absolute, false] [CPU running peak over 15 minutes]
+ rescpu.samplePeriod.latest [ms, absolute, false] [Group CPU sample period]
+ rescpu.actpk15.latest [%, absolute, false] [CPU active peak over 15 minutes]
+ rescpu.runpk5.latest [%, absolute, false] [CPU running peak over 5 minutes]
+ rescpu.runpk1.latest [%, absolute, false] [CPU running peak over 1 minute]
+ rescpu.maxLimited1.latest [%, absolute, false] [Amount of CPU resources over the limit that were refused, average over 1 minute]
+
+ storageAdapter.read.average [KBps, rate, true] [Rate of reading data by the storage adapter]
+ storageAdapter.commandsAveraged.average [num, rate, true] [Average number of commands issued per second by the storage adapter during the collection interval]
+ storageAdapter.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second by the storage adapter during the collection interval]
+ storageAdapter.totalWriteLatency.average [ms, absolute, true] [The average time a write by the storage adapter takes]
+ storageAdapter.totalReadLatency.average [ms, absolute, true] [The average time a read by the storage adapter takes]
+ storageAdapter.write.average [KBps, rate, true] [Rate of writing data by the storage adapter]
+ storageAdapter.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second by the storage adapter during the collection interval]
+ storageAdapter.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all storage adapters used by the host]
+ storagePath.numberWriteAveraged.average [num, rate, true] [Average number of write commands issued per second on the storage path during the collection interval]
+ storagePath.write.average [KBps, rate, true] [Rate of writing data on the storage path]
+ storagePath.maxTotalLatency.latest [ms, absolute, false] [Highest latency value across all storage paths used by the host]
+ storagePath.read.average [KBps, rate, true] [Rate of reading data on the storage path]
+ storagePath.numberReadAveraged.average [num, rate, true] [Average number of read commands issued per second on the storage path during the collection interval]
+ storagePath.totalWriteLatency.average [ms, absolute, true] [The average time a write issued on the storage path takes]
+ storagePath.totalReadLatency.average [ms, absolute, true] [The average time a read issued on the storage path takes]
+ storagePath.commandsAveraged.average [num, rate, true] [Average number of commands issued per second on the storage path during the collection interval]
+
+ sys.resourceMemTouched.latest [KB, absolute, true] [Memory touched by the system resource group]
+ sys.resourceMemSwapped.latest [KB, absolute, true] [Memory swapped out by the system resource group]
+ sys.resourceMemShared.latest [KB, absolute, true] [Memory saved due to sharing by the system resource group]
+ sys.resourceMemZero.latest [KB, absolute, true] [Zero filled memory used by the system resource group]
+ sys.resourceMemMapped.latest [KB, absolute, true] [Memory mapped by the system resource group]
+ sys.resourceCpuAllocShares.latest [num, absolute, true] [CPU allocation shares of the system resource group]
+ sys.resourceFdUsage.latest [num, absolute, true] [Number of file descriptors used by the system resource group]
+ sys.resourceCpuAct5.latest [%, absolute, true] [CPU active average over 5 minutes of the system resource group]
+ sys.resourceCpuAct1.latest [%, absolute, true] [CPU active average over 1 minute of the system resource group]
+ sys.resourceCpuUsage.average [MHz, rate, true] [Amount of CPU used by the Service Console and other applications during the interval]
+ sys.resourceMemOverhead.latest [KB, absolute, true] [Overhead memory consumed by the system resource group]
+ sys.resourceMemCow.latest [KB, absolute, true] [Memory shared by the system resource group]
+ sys.resourceCpuAllocMax.latest [MHz, absolute, true] [CPU allocation limit (in MHz) of the system resource group]
+ sys.resourceMemAllocMax.latest [KB, absolute, true] [Memory allocation limit (in KB) of the system resource group]
+ sys.resourceMemAllocMin.latest [KB, absolute, true] [Memory allocation reservation (in KB) of the system resource group]
+ sys.resourceCpuAllocMin.latest [MHz, absolute, true] [CPU allocation reservation (in MHz) of the system resource group]
+ sys.resourceCpuMaxLimited1.latest [%, absolute, true] [CPU maximum limited over 1 minute of the system resource group]
+ sys.resourceMemAllocShares.latest [num, absolute, true] [Memory allocation shares of the system resource group]
+ sys.resourceMemConsumed.latest [KB, absolute, true] [Memory consumed by the system resource group]
+ sys.uptime.latest [s, absolute, false] [Total time elapsed, in seconds, since last system startup]
+ sys.resourceCpuMaxLimited5.latest [%, absolute, true] [CPU maximum limited over 5 minutes of the system resource group]
+ sys.resourceCpuRun5.latest [%, absolute, true] [CPU running average over 5 minutes of the system resource group]
+ sys.resourceCpuRun1.latest [%, absolute, true] [CPU running average over 1 minute of the system resource group]
+
+ vflashModule.numActiveVMDKs.latest [num, absolute, true] [Number of caches controlled by the virtual flash module]
+*/
diff --git a/src/go/plugin/go.d/modules/vsphere/resources/resources.go b/src/go/plugin/go.d/modules/vsphere/resources/resources.go
new file mode 100644
index 000000000..8f967f16c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/resources/resources.go
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package resources
+
+import (
+ "github.com/vmware/govmomi/performance"
+ "github.com/vmware/govmomi/vim25/types"
+)
+
+/*
+
+```
+Virtual Datacenter Architecture Representation (partial).
+
+<root>
++-DC0 # Virtual datacenter
+ +-datastore # Datastore folder (created by system)
+ | +-Datastore1
+ |
+ +-host # Host folder (created by system)
+ | +-Folder1 # Host and Cluster folder
+ | | +-NestedFolder1
+ | | | +-Cluster1
+ | | | | +-Host1
+ | +-Cluster2
+ | | +-Host2
+ | | | +-VM1
+ | | | +-VM2
+ | | | +-hadoop1
+ | +-Host3 # Dummy folder for non-clustered host (created by system)
+ | | +-Host3
+ | | | +-VM3
+ | | | +-VM4
+ | | |
+ +-vm # VM folder (created by system)
+ | +-VM1
+ | +-VM2
+ | +-Folder2 # VM and Template folder
+ | | +-hadoop1
+ | | +-NestedFolder1
+ | | | +-VM3
+ | | | +-VM4
+```
+*/
+
+type Resources struct {
+ DataCenters DataCenters
+ Folders Folders
+ Clusters Clusters
+ Hosts Hosts
+ VMs VMs
+}
+
+type (
+ Datacenter struct {
+ Name string
+ ID string
+ }
+
+ Folder struct {
+ Name string
+ ID string
+ ParentID string
+ }
+
+ HierarchyValue struct {
+ ID, Name string
+ }
+
+ ClusterHierarchy struct {
+ DC HierarchyValue
+ }
+ Cluster struct {
+ Name string
+ ID string
+ ParentID string
+ Hier ClusterHierarchy
+ }
+
+ HostHierarchy struct {
+ DC HierarchyValue
+ Cluster HierarchyValue
+ }
+ Host struct {
+ Name string
+ ID string
+ ParentID string
+ Hier HostHierarchy
+ OverallStatus string
+ MetricList performance.MetricList
+ Ref types.ManagedObjectReference
+ }
+
+ VMHierarchy struct {
+ DC HierarchyValue
+ Cluster HierarchyValue
+ Host HierarchyValue
+ }
+
+ VM struct {
+ Name string
+ ID string
+ ParentID string
+ Hier VMHierarchy
+ OverallStatus string
+ MetricList performance.MetricList
+ Ref types.ManagedObjectReference
+ }
+)
+
+func (v *HierarchyValue) IsSet() bool { return v.ID != "" && v.Name != "" }
+func (v *HierarchyValue) Set(id, name string) { v.ID = id; v.Name = name }
+
+func (h ClusterHierarchy) IsSet() bool { return h.DC.IsSet() }
+func (h HostHierarchy) IsSet() bool { return h.DC.IsSet() && h.Cluster.IsSet() }
+func (h VMHierarchy) IsSet() bool { return h.DC.IsSet() && h.Cluster.IsSet() && h.Host.IsSet() }
+
+type (
+ DataCenters map[string]*Datacenter
+ Folders map[string]*Folder
+ Clusters map[string]*Cluster
+ Hosts map[string]*Host
+ VMs map[string]*VM
+)
+
+func (dcs DataCenters) Put(dc *Datacenter) { dcs[dc.ID] = dc }
+func (dcs DataCenters) Get(id string) *Datacenter { return dcs[id] }
+func (fs Folders) Put(folder *Folder) { fs[folder.ID] = folder }
+func (fs Folders) Get(id string) *Folder { return fs[id] }
+func (cs Clusters) Put(cluster *Cluster) { cs[cluster.ID] = cluster }
+func (cs Clusters) Get(id string) *Cluster { return cs[id] }
+func (hs Hosts) Put(host *Host) { hs[host.ID] = host }
+func (hs Hosts) Remove(id string) { delete(hs, id) }
+func (hs Hosts) Get(id string) *Host { return hs[id] }
+func (vs VMs) Put(vm *VM) { vs[vm.ID] = vm }
+func (vs VMs) Remove(id string) { delete(vs, id) }
+func (vs VMs) Get(id string) *VM { return vs[id] }
diff --git a/src/go/plugin/go.d/modules/vsphere/scrape/scrape.go b/src/go/plugin/go.d/modules/vsphere/scrape/scrape.go
new file mode 100644
index 000000000..ef882d73e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/scrape/scrape.go
@@ -0,0 +1,159 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scrape
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+ "github.com/vmware/govmomi/performance"
+ "github.com/vmware/govmomi/vim25/types"
+)
+
+type Client interface {
+ Version() string
+ PerformanceMetrics([]types.PerfQuerySpec) ([]performance.EntityMetric, error)
+}
+
+func New(client Client) *Scraper {
+ v := &Scraper{Client: client}
+ v.calcMaxQuery()
+ return v
+}
+
+type Scraper struct {
+ *logger.Logger
+ Client
+ maxQuery int
+}
+
+// Default settings for vCenter 6.5 and above is 256, prior versions of vCenter have this set to 64.
+func (s *Scraper) calcMaxQuery() {
+ major, minor, err := parseVersion(s.Version())
+ if err != nil || major < 6 || minor == 0 {
+ s.maxQuery = 64
+ return
+ }
+ s.maxQuery = 256
+}
+
+func (s *Scraper) ScrapeHosts(hosts rs.Hosts) []performance.EntityMetric {
+ t := time.Now()
+ pqs := newHostsPerfQuerySpecs(hosts)
+ ms := s.scrapeMetrics(pqs)
+ s.Debugf("scraping : scraped metrics for %d/%d hosts, process took %s",
+ len(ms),
+ len(hosts),
+ time.Since(t),
+ )
+ return ms
+}
+
+func (s *Scraper) ScrapeVMs(vms rs.VMs) []performance.EntityMetric {
+ t := time.Now()
+ pqs := newVMsPerfQuerySpecs(vms)
+ ms := s.scrapeMetrics(pqs)
+ s.Debugf("scraping : scraped metrics for %d/%d vms, process took %s",
+ len(ms),
+ len(vms),
+ time.Since(t),
+ )
+ return ms
+}
+
+func (s *Scraper) scrapeMetrics(pqs []types.PerfQuerySpec) []performance.EntityMetric {
+ tc := newThrottledCaller(5)
+ var ms []performance.EntityMetric
+ lock := &sync.Mutex{}
+
+ chunks := chunkify(pqs, s.maxQuery)
+ for _, chunk := range chunks {
+ pqs := chunk
+ job := func() {
+ s.scrape(&ms, lock, pqs)
+ }
+ tc.call(job)
+ }
+ tc.wait()
+
+ return ms
+}
+
+func (s *Scraper) scrape(metrics *[]performance.EntityMetric, lock *sync.Mutex, pqs []types.PerfQuerySpec) {
+ m, err := s.PerformanceMetrics(pqs)
+ if err != nil {
+ s.Error(err)
+ return
+ }
+
+ lock.Lock()
+ *metrics = append(*metrics, m...)
+ lock.Unlock()
+}
+
+func chunkify(pqs []types.PerfQuerySpec, chunkSize int) (chunks [][]types.PerfQuerySpec) {
+ for i := 0; i < len(pqs); i += chunkSize {
+ end := i + chunkSize
+ if end > len(pqs) {
+ end = len(pqs)
+ }
+ chunks = append(chunks, pqs[i:end])
+ }
+ return chunks
+}
+
+const (
+ pqsMaxSample = 1
+ pqsIntervalID = 20
+ pqsFormat = "normal"
+)
+
+func newHostsPerfQuerySpecs(hosts rs.Hosts) []types.PerfQuerySpec {
+ pqs := make([]types.PerfQuerySpec, 0, len(hosts))
+ for _, host := range hosts {
+ pq := types.PerfQuerySpec{
+ Entity: host.Ref,
+ MaxSample: pqsMaxSample,
+ MetricId: host.MetricList,
+ IntervalId: pqsIntervalID,
+ Format: pqsFormat,
+ }
+ pqs = append(pqs, pq)
+ }
+ return pqs
+}
+
+func newVMsPerfQuerySpecs(vms rs.VMs) []types.PerfQuerySpec {
+ pqs := make([]types.PerfQuerySpec, 0, len(vms))
+ for _, vm := range vms {
+ pq := types.PerfQuerySpec{
+ Entity: vm.Ref,
+ MaxSample: pqsMaxSample,
+ MetricId: vm.MetricList,
+ IntervalId: pqsIntervalID,
+ Format: pqsFormat,
+ }
+ pqs = append(pqs, pq)
+ }
+ return pqs
+}
+
+func parseVersion(version string) (major, minor int, err error) {
+ parts := strings.Split(version, ".")
+ if len(parts) < 2 {
+ return 0, 0, fmt.Errorf("unparsable version string : %s", version)
+ }
+ if major, err = strconv.Atoi(parts[0]); err != nil {
+ return 0, 0, err
+ }
+ if minor, err = strconv.Atoi(parts[1]); err != nil {
+ return 0, 0, err
+ }
+ return major, minor, nil
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/scrape/scrape_test.go b/src/go/plugin/go.d/modules/vsphere/scrape/scrape_test.go
new file mode 100644
index 000000000..d6232ff66
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/scrape/scrape_test.go
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scrape
+
+import (
+ "crypto/tls"
+ "net/url"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/client"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/discover"
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/vmware/govmomi/simulator"
+)
+
+func TestNew(t *testing.T) {
+}
+
+func TestScraper_ScrapeVMs(t *testing.T) {
+ s, res, teardown := prepareScraper(t)
+ defer teardown()
+
+ metrics := s.ScrapeVMs(res.VMs)
+ assert.Len(t, metrics, len(res.VMs))
+}
+
+func TestScraper_ScrapeHosts(t *testing.T) {
+ s, res, teardown := prepareScraper(t)
+ defer teardown()
+
+ metrics := s.ScrapeHosts(res.Hosts)
+ assert.Len(t, metrics, len(res.Hosts))
+}
+
+func prepareScraper(t *testing.T) (s *Scraper, res *rs.Resources, teardown func()) {
+ model, srv := createSim(t)
+ teardown = func() { model.Remove(); srv.Close() }
+
+ c := newClient(t, srv.URL)
+ d := discover.New(c)
+ res, err := d.Discover()
+ require.NoError(t, err)
+
+ return New(c), res, teardown
+}
+
+func newClient(t *testing.T, vCenterURL *url.URL) *client.Client {
+ c, err := client.New(client.Config{
+ URL: vCenterURL.String(),
+ User: "admin",
+ Password: "password",
+ Timeout: time.Second * 3,
+ TLSConfig: tlscfg.TLSConfig{InsecureSkipVerify: true},
+ })
+ require.NoError(t, err)
+ return c
+}
+
+func createSim(t *testing.T) (*simulator.Model, *simulator.Server) {
+ model := simulator.VPX()
+ err := model.Create()
+ require.NoError(t, err)
+ model.Service.TLS = new(tls.Config)
+ return model, model.Service.NewServer()
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller.go b/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller.go
new file mode 100644
index 000000000..5127c28c1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller.go
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scrape
+
+import "sync"
+
+type throttledCaller struct {
+ limit chan struct{}
+ wg sync.WaitGroup
+}
+
+func newThrottledCaller(limit int) *throttledCaller {
+ if limit <= 0 {
+ panic("limit must be > 0")
+ }
+ return &throttledCaller{limit: make(chan struct{}, limit)}
+}
+
+func (t *throttledCaller) call(job func()) {
+ t.wg.Add(1)
+ go func() {
+ defer t.wg.Done()
+ t.limit <- struct{}{}
+ defer func() {
+ <-t.limit
+ }()
+ job()
+ }()
+}
+
+func (t *throttledCaller) wait() {
+ t.wg.Wait()
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller_test.go b/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller_test.go
new file mode 100644
index 000000000..545ed1603
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/scrape/throttled_caller_test.go
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package scrape
+
+import (
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_throttledCaller(t *testing.T) {
+ var current int64
+ var max int64
+ var total int64
+ var mux sync.Mutex
+ limit := 5
+ n := 10000
+ tc := newThrottledCaller(limit)
+
+ for i := 0; i < n; i++ {
+ job := func() {
+ atomic.AddInt64(&total, 1)
+ atomic.AddInt64(&current, 1)
+ time.Sleep(100 * time.Microsecond)
+
+ mux.Lock()
+ defer mux.Unlock()
+ if atomic.LoadInt64(&current) > max {
+ max = atomic.LoadInt64(&current)
+ }
+ atomic.AddInt64(&current, -1)
+ }
+ tc.call(job)
+ }
+ tc.wait()
+
+ assert.Equal(t, int64(n), total)
+ assert.Equal(t, max, int64(limit))
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/task.go b/src/go/plugin/go.d/modules/vsphere/task.go
new file mode 100644
index 000000000..103ca1ed6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/task.go
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vsphere
+
+import (
+ "sync"
+ "time"
+)
+
+func newTask(doWork func(), doEvery time.Duration) *task {
+ task := task{
+ done: make(chan struct{}),
+ running: make(chan struct{}),
+ }
+
+ go func() {
+ t := time.NewTicker(doEvery)
+ defer func() {
+ t.Stop()
+ close(task.running)
+ }()
+ for {
+ select {
+ case <-task.done:
+ return
+ case <-t.C:
+ doWork()
+ }
+ }
+ }()
+
+ return &task
+}
+
+type task struct {
+ once sync.Once
+ done chan struct{}
+ running chan struct{}
+}
+
+func (t *task) stop() {
+ t.once.Do(func() { close(t.done) })
+}
+
+func (t *task) isStopped() bool {
+ select {
+ case <-t.done:
+ return true
+ default:
+ return false
+ }
+}
+
+func (t *task) isRunning() bool {
+ select {
+ case <-t.running:
+ return false
+ default:
+ return true
+ }
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/task_test.go b/src/go/plugin/go.d/modules/vsphere/task_test.go
new file mode 100644
index 000000000..ed55a28a3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/task_test.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vsphere
+
+import (
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_task(t *testing.T) {
+ var i int64
+ job := func() {
+ atomic.AddInt64(&i, 1)
+ }
+
+ task := newTask(job, time.Millisecond*200)
+ defer task.stop()
+ time.Sleep(time.Second)
+ assert.True(t, atomic.LoadInt64(&i) > 0)
+}
+
+func Test_task_isStopped(t *testing.T) {
+ task := newTask(func() {}, time.Second)
+ assert.False(t, task.isStopped())
+
+ task.stop()
+ time.Sleep(time.Millisecond * 500)
+ assert.True(t, task.isStopped())
+}
+
+func Test_task_isRunning(t *testing.T) {
+ task := newTask(func() {}, time.Second)
+ assert.True(t, task.isRunning())
+
+ task.stop()
+ time.Sleep(time.Millisecond * 500)
+ assert.False(t, task.isRunning())
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/testdata/config.json b/src/go/plugin/go.d/modules/vsphere/testdata/config.json
new file mode 100644
index 000000000..3e4a77396
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/testdata/config.json
@@ -0,0 +1,27 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "discovery_interval": 123.123,
+ "host_include": [
+ "ok"
+ ],
+ "vm_include": [
+ "ok"
+ ]
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/testdata/config.yaml b/src/go/plugin/go.d/modules/vsphere/testdata/config.yaml
new file mode 100644
index 000000000..d15e2346f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/testdata/config.yaml
@@ -0,0 +1,22 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+discovery_interval: 123.123
+host_include:
+ - "ok"
+vm_include:
+ - "ok"
diff --git a/src/go/plugin/go.d/modules/vsphere/vsphere.go b/src/go/plugin/go.d/modules/vsphere/vsphere.go
new file mode 100644
index 000000000..8df3ce6f0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/vsphere.go
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vsphere
+
+import (
+ _ "embed"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/match"
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/vmware/govmomi/performance"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("vsphere", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 20,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *VSphere {
+ return &VSphere{
+ Config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 20),
+ },
+ },
+ DiscoveryInterval: web.Duration(time.Minute * 5),
+ HostsInclude: []string{"/*"},
+ VMsInclude: []string{"/*"},
+ },
+ collectionLock: &sync.RWMutex{},
+ charts: &module.Charts{},
+ discoveredHosts: make(map[string]int),
+ discoveredVMs: make(map[string]int),
+ charted: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ DiscoveryInterval web.Duration `yaml:"discovery_interval,omitempty" json:"discovery_interval"`
+ HostsInclude match.HostIncludes `yaml:"host_include,omitempty" json:"host_include"`
+ VMsInclude match.VMIncludes `yaml:"vm_include,omitempty" json:"vm_include"`
+}
+
+type (
+ VSphere struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ discoverer
+ scraper
+
+ collectionLock *sync.RWMutex
+ resources *rs.Resources
+ discoveryTask *task
+ discoveredHosts map[string]int
+ discoveredVMs map[string]int
+ charted map[string]bool
+ }
+ discoverer interface {
+ Discover() (*rs.Resources, error)
+ }
+ scraper interface {
+ ScrapeHosts(rs.Hosts) []performance.EntityMetric
+ ScrapeVMs(rs.VMs) []performance.EntityMetric
+ }
+)
+
+func (vs *VSphere) Configuration() any {
+ return vs.Config
+}
+
+func (vs *VSphere) Init() error {
+ if err := vs.validateConfig(); err != nil {
+ vs.Errorf("error on validating config: %v", err)
+ return err
+ }
+
+ vsClient, err := vs.initClient()
+ if err != nil {
+ vs.Errorf("error on creating vsphere client: %v", err)
+ return err
+ }
+
+ if err := vs.initDiscoverer(vsClient); err != nil {
+ vs.Errorf("error on creating vsphere discoverer: %v", err)
+ return err
+ }
+
+ vs.initScraper(vsClient)
+
+ if err := vs.discoverOnce(); err != nil {
+ vs.Errorf("error on discovering: %v", err)
+ return err
+ }
+
+ vs.goDiscovery()
+
+ return nil
+}
+
+func (vs *VSphere) Check() error {
+ return nil
+}
+
+func (vs *VSphere) Charts() *module.Charts {
+ return vs.charts
+}
+
+func (vs *VSphere) Collect() map[string]int64 {
+ mx, err := vs.collect()
+ if err != nil {
+ vs.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (vs *VSphere) Cleanup() {
+ if vs.discoveryTask == nil {
+ return
+ }
+ vs.discoveryTask.stop()
+}
diff --git a/src/go/plugin/go.d/modules/vsphere/vsphere_test.go b/src/go/plugin/go.d/modules/vsphere/vsphere_test.go
new file mode 100644
index 000000000..c7a91e253
--- /dev/null
+++ b/src/go/plugin/go.d/modules/vsphere/vsphere_test.go
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+package vsphere
+
+import (
+ "crypto/tls"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/discover"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/match"
+ rs "github.com/netdata/netdata/go/plugins/plugin/go.d/modules/vsphere/resources"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/vmware/govmomi/performance"
+ "github.com/vmware/govmomi/simulator"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestVSphere_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &VSphere{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestVSphere_Init(t *testing.T) {
+ vSphere, _, teardown := prepareVSphereSim(t)
+ defer teardown()
+
+ assert.NoError(t, vSphere.Init())
+ assert.NotNil(t, vSphere.discoverer)
+ assert.NotNil(t, vSphere.scraper)
+ assert.NotNil(t, vSphere.resources)
+ assert.NotNil(t, vSphere.discoveryTask)
+ assert.True(t, vSphere.discoveryTask.isRunning())
+}
+
+func TestVSphere_Init_ReturnsFalseIfURLNotSet(t *testing.T) {
+ vSphere, _, teardown := prepareVSphereSim(t)
+ defer teardown()
+ vSphere.URL = ""
+
+ assert.Error(t, vSphere.Init())
+}
+
+func TestVSphere_Init_ReturnsFalseIfUsernameNotSet(t *testing.T) {
+ vSphere, _, teardown := prepareVSphereSim(t)
+ defer teardown()
+ vSphere.Username = ""
+
+ assert.Error(t, vSphere.Init())
+}
+
+func TestVSphere_Init_ReturnsFalseIfPasswordNotSet(t *testing.T) {
+ vSphere, _, teardown := prepareVSphereSim(t)
+ defer teardown()
+ vSphere.Password = ""
+
+ assert.Error(t, vSphere.Init())
+}
+
+func TestVSphere_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) {
+ vSphere, _, teardown := prepareVSphereSim(t)
+ defer teardown()
+ vSphere.Client.TLSConfig.TLSCA = "testdata/tls"
+
+ assert.Error(t, vSphere.Init())
+}
+
+func TestVSphere_Init_ReturnsFalseIfConnectionRefused(t *testing.T) {
+ vSphere, _, teardown := prepareVSphereSim(t)
+ defer teardown()
+ vSphere.URL = "http://127.0.0.1:32001"
+
+ assert.Error(t, vSphere.Init())
+}
+
+func TestVSphere_Init_ReturnsFalseIfInvalidHostVMIncludeFormat(t *testing.T) {
+ vSphere, _, teardown := prepareVSphereSim(t)
+ defer teardown()
+
+ vSphere.HostsInclude = match.HostIncludes{"invalid"}
+ assert.Error(t, vSphere.Init())
+
+ vSphere.HostsInclude = vSphere.HostsInclude[:0]
+
+ vSphere.VMsInclude = match.VMIncludes{"invalid"}
+ assert.Error(t, vSphere.Init())
+}
+
+func TestVSphere_Check(t *testing.T) {
+ assert.NoError(t, New().Check())
+}
+
+func TestVSphere_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestVSphere_Cleanup(t *testing.T) {
+ vSphere, _, teardown := prepareVSphereSim(t)
+ defer teardown()
+
+ require.NoError(t, vSphere.Init())
+
+ vSphere.Cleanup()
+ time.Sleep(time.Second)
+ assert.True(t, vSphere.discoveryTask.isStopped())
+ assert.False(t, vSphere.discoveryTask.isRunning())
+}
+
+func TestVSphere_Cleanup_NotPanicsIfNotInitialized(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestVSphere_Collect(t *testing.T) {
+ vSphere, model, teardown := prepareVSphereSim(t)
+ defer teardown()
+
+ require.NoError(t, vSphere.Init())
+
+ vSphere.scraper = mockScraper{vSphere.scraper}
+
+ expected := map[string]int64{
+ "host-22_cpu.usage.average": 100,
+ "host-22_disk.maxTotalLatency.latest": 100,
+ "host-22_disk.read.average": 100,
+ "host-22_disk.write.average": 100,
+ "host-22_mem.active.average": 100,
+ "host-22_mem.consumed.average": 100,
+ "host-22_mem.granted.average": 100,
+ "host-22_mem.shared.average": 100,
+ "host-22_mem.sharedcommon.average": 100,
+ "host-22_mem.swapinRate.average": 100,
+ "host-22_mem.swapoutRate.average": 100,
+ "host-22_mem.usage.average": 100,
+ "host-22_net.bytesRx.average": 100,
+ "host-22_net.bytesTx.average": 100,
+ "host-22_net.droppedRx.summation": 100,
+ "host-22_net.droppedTx.summation": 100,
+ "host-22_net.errorsRx.summation": 100,
+ "host-22_net.errorsTx.summation": 100,
+ "host-22_net.packetsRx.summation": 100,
+ "host-22_net.packetsTx.summation": 100,
+ "host-22_overall.status.gray": 1,
+ "host-22_overall.status.green": 0,
+ "host-22_overall.status.red": 0,
+ "host-22_overall.status.yellow": 0,
+ "host-22_sys.uptime.latest": 100,
+ "host-38_cpu.usage.average": 100,
+ "host-38_disk.maxTotalLatency.latest": 100,
+ "host-38_disk.read.average": 100,
+ "host-38_disk.write.average": 100,
+ "host-38_mem.active.average": 100,
+ "host-38_mem.consumed.average": 100,
+ "host-38_mem.granted.average": 100,
+ "host-38_mem.shared.average": 100,
+ "host-38_mem.sharedcommon.average": 100,
+ "host-38_mem.swapinRate.average": 100,
+ "host-38_mem.swapoutRate.average": 100,
+ "host-38_mem.usage.average": 100,
+ "host-38_net.bytesRx.average": 100,
+ "host-38_net.bytesTx.average": 100,
+ "host-38_net.droppedRx.summation": 100,
+ "host-38_net.droppedTx.summation": 100,
+ "host-38_net.errorsRx.summation": 100,
+ "host-38_net.errorsTx.summation": 100,
+ "host-38_net.packetsRx.summation": 100,
+ "host-38_net.packetsTx.summation": 100,
+ "host-38_overall.status.gray": 1,
+ "host-38_overall.status.green": 0,
+ "host-38_overall.status.red": 0,
+ "host-38_overall.status.yellow": 0,
+ "host-38_sys.uptime.latest": 100,
+ "host-48_cpu.usage.average": 100,
+ "host-48_disk.maxTotalLatency.latest": 100,
+ "host-48_disk.read.average": 100,
+ "host-48_disk.write.average": 100,
+ "host-48_mem.active.average": 100,
+ "host-48_mem.consumed.average": 100,
+ "host-48_mem.granted.average": 100,
+ "host-48_mem.shared.average": 100,
+ "host-48_mem.sharedcommon.average": 100,
+ "host-48_mem.swapinRate.average": 100,
+ "host-48_mem.swapoutRate.average": 100,
+ "host-48_mem.usage.average": 100,
+ "host-48_net.bytesRx.average": 100,
+ "host-48_net.bytesTx.average": 100,
+ "host-48_net.droppedRx.summation": 100,
+ "host-48_net.droppedTx.summation": 100,
+ "host-48_net.errorsRx.summation": 100,
+ "host-48_net.errorsTx.summation": 100,
+ "host-48_net.packetsRx.summation": 100,
+ "host-48_net.packetsTx.summation": 100,
+ "host-48_overall.status.gray": 1,
+ "host-48_overall.status.green": 0,
+ "host-48_overall.status.red": 0,
+ "host-48_overall.status.yellow": 0,
+ "host-48_sys.uptime.latest": 100,
+ "host-58_cpu.usage.average": 100,
+ "host-58_disk.maxTotalLatency.latest": 100,
+ "host-58_disk.read.average": 100,
+ "host-58_disk.write.average": 100,
+ "host-58_mem.active.average": 100,
+ "host-58_mem.consumed.average": 100,
+ "host-58_mem.granted.average": 100,
+ "host-58_mem.shared.average": 100,
+ "host-58_mem.sharedcommon.average": 100,
+ "host-58_mem.swapinRate.average": 100,
+ "host-58_mem.swapoutRate.average": 100,
+ "host-58_mem.usage.average": 100,
+ "host-58_net.bytesRx.average": 100,
+ "host-58_net.bytesTx.average": 100,
+ "host-58_net.droppedRx.summation": 100,
+ "host-58_net.droppedTx.summation": 100,
+ "host-58_net.errorsRx.summation": 100,
+ "host-58_net.errorsTx.summation": 100,
+ "host-58_net.packetsRx.summation": 100,
+ "host-58_net.packetsTx.summation": 100,
+ "host-58_overall.status.gray": 1,
+ "host-58_overall.status.green": 0,
+ "host-58_overall.status.red": 0,
+ "host-58_overall.status.yellow": 0,
+ "host-58_sys.uptime.latest": 100,
+ "vm-63_cpu.usage.average": 200,
+ "vm-63_disk.maxTotalLatency.latest": 200,
+ "vm-63_disk.read.average": 200,
+ "vm-63_disk.write.average": 200,
+ "vm-63_mem.active.average": 200,
+ "vm-63_mem.consumed.average": 200,
+ "vm-63_mem.granted.average": 200,
+ "vm-63_mem.shared.average": 200,
+ "vm-63_mem.swapinRate.average": 200,
+ "vm-63_mem.swapoutRate.average": 200,
+ "vm-63_mem.swapped.average": 200,
+ "vm-63_mem.usage.average": 200,
+ "vm-63_net.bytesRx.average": 200,
+ "vm-63_net.bytesTx.average": 200,
+ "vm-63_net.droppedRx.summation": 200,
+ "vm-63_net.droppedTx.summation": 200,
+ "vm-63_net.packetsRx.summation": 200,
+ "vm-63_net.packetsTx.summation": 200,
+ "vm-63_overall.status.gray": 0,
+ "vm-63_overall.status.green": 1,
+ "vm-63_overall.status.red": 0,
+ "vm-63_overall.status.yellow": 0,
+ "vm-63_sys.uptime.latest": 200,
+ "vm-66_cpu.usage.average": 200,
+ "vm-66_disk.maxTotalLatency.latest": 200,
+ "vm-66_disk.read.average": 200,
+ "vm-66_disk.write.average": 200,
+ "vm-66_mem.active.average": 200,
+ "vm-66_mem.consumed.average": 200,
+ "vm-66_mem.granted.average": 200,
+ "vm-66_mem.shared.average": 200,
+ "vm-66_mem.swapinRate.average": 200,
+ "vm-66_mem.swapoutRate.average": 200,
+ "vm-66_mem.swapped.average": 200,
+ "vm-66_mem.usage.average": 200,
+ "vm-66_net.bytesRx.average": 200,
+ "vm-66_net.bytesTx.average": 200,
+ "vm-66_net.droppedRx.summation": 200,
+ "vm-66_net.droppedTx.summation": 200,
+ "vm-66_net.packetsRx.summation": 200,
+ "vm-66_net.packetsTx.summation": 200,
+ "vm-66_overall.status.gray": 0,
+ "vm-66_overall.status.green": 1,
+ "vm-66_overall.status.red": 0,
+ "vm-66_overall.status.yellow": 0,
+ "vm-66_sys.uptime.latest": 200,
+ "vm-69_cpu.usage.average": 200,
+ "vm-69_disk.maxTotalLatency.latest": 200,
+ "vm-69_disk.read.average": 200,
+ "vm-69_disk.write.average": 200,
+ "vm-69_mem.active.average": 200,
+ "vm-69_mem.consumed.average": 200,
+ "vm-69_mem.granted.average": 200,
+ "vm-69_mem.shared.average": 200,
+ "vm-69_mem.swapinRate.average": 200,
+ "vm-69_mem.swapoutRate.average": 200,
+ "vm-69_mem.swapped.average": 200,
+ "vm-69_mem.usage.average": 200,
+ "vm-69_net.bytesRx.average": 200,
+ "vm-69_net.bytesTx.average": 200,
+ "vm-69_net.droppedRx.summation": 200,
+ "vm-69_net.droppedTx.summation": 200,
+ "vm-69_net.packetsRx.summation": 200,
+ "vm-69_net.packetsTx.summation": 200,
+ "vm-69_overall.status.gray": 0,
+ "vm-69_overall.status.green": 1,
+ "vm-69_overall.status.red": 0,
+ "vm-69_overall.status.yellow": 0,
+ "vm-69_sys.uptime.latest": 200,
+ "vm-72_cpu.usage.average": 200,
+ "vm-72_disk.maxTotalLatency.latest": 200,
+ "vm-72_disk.read.average": 200,
+ "vm-72_disk.write.average": 200,
+ "vm-72_mem.active.average": 200,
+ "vm-72_mem.consumed.average": 200,
+ "vm-72_mem.granted.average": 200,
+ "vm-72_mem.shared.average": 200,
+ "vm-72_mem.swapinRate.average": 200,
+ "vm-72_mem.swapoutRate.average": 200,
+ "vm-72_mem.swapped.average": 200,
+ "vm-72_mem.usage.average": 200,
+ "vm-72_net.bytesRx.average": 200,
+ "vm-72_net.bytesTx.average": 200,
+ "vm-72_net.droppedRx.summation": 200,
+ "vm-72_net.droppedTx.summation": 200,
+ "vm-72_net.packetsRx.summation": 200,
+ "vm-72_net.packetsTx.summation": 200,
+ "vm-72_overall.status.gray": 0,
+ "vm-72_overall.status.green": 1,
+ "vm-72_overall.status.red": 0,
+ "vm-72_overall.status.yellow": 0,
+ "vm-72_sys.uptime.latest": 200,
+ }
+
+ collected := vSphere.Collect()
+
+ require.Equal(t, expected, collected)
+
+ count := model.Count()
+ assert.Len(t, vSphere.discoveredHosts, count.Host)
+ assert.Len(t, vSphere.discoveredVMs, count.Machine)
+ assert.Len(t, vSphere.charted, count.Host+count.Machine)
+
+ assert.Len(t, *vSphere.Charts(), count.Host*len(hostChartsTmpl)+count.Machine*len(vmChartsTmpl))
+ ensureCollectedHasAllChartsDimsVarsIDs(t, vSphere, collected)
+}
+
+func TestVSphere_Collect_RemoveHostsVMsInRuntime(t *testing.T) {
+ vSphere, _, teardown := prepareVSphereSim(t)
+ defer teardown()
+
+ require.NoError(t, vSphere.Init())
+ require.NoError(t, vSphere.Check())
+
+ okHostID := "host-58"
+ okVMID := "vm-63"
+ vSphere.discoverer.(*discover.Discoverer).HostMatcher = mockHostMatcher{okHostID}
+ vSphere.discoverer.(*discover.Discoverer).VMMatcher = mockVMMatcher{okVMID}
+
+ require.NoError(t, vSphere.discoverOnce())
+
+ numOfRuns := 5
+ for i := 0; i < numOfRuns; i++ {
+ vSphere.Collect()
+ }
+
+ host := vSphere.resources.Hosts.Get(okHostID)
+ for k, v := range vSphere.discoveredHosts {
+ if k == host.ID {
+ assert.Equal(t, 0, v)
+ } else {
+ assert.Equal(t, numOfRuns, v)
+ }
+ }
+
+ vm := vSphere.resources.VMs.Get(okVMID)
+ for id, fails := range vSphere.discoveredVMs {
+ if id == vm.ID {
+ assert.Equal(t, 0, fails)
+ } else {
+ assert.Equal(t, numOfRuns, fails)
+ }
+
+ }
+
+ for i := numOfRuns; i < failedUpdatesLimit; i++ {
+ vSphere.Collect()
+ }
+
+ assert.Len(t, vSphere.discoveredHosts, 1)
+ assert.Len(t, vSphere.discoveredVMs, 1)
+ assert.Len(t, vSphere.charted, 2)
+
+ for _, c := range *vSphere.Charts() {
+ if strings.HasPrefix(c.ID, okHostID) || strings.HasPrefix(c.ID, okVMID) {
+ assert.False(t, c.Obsolete)
+ } else {
+ assert.True(t, c.Obsolete)
+ }
+ }
+}
+
+func TestVSphere_Collect_Run(t *testing.T) {
+ vSphere, model, teardown := prepareVSphereSim(t)
+ defer teardown()
+
+ vSphere.DiscoveryInterval = web.Duration(time.Second * 2)
+ require.NoError(t, vSphere.Init())
+ require.NoError(t, vSphere.Check())
+
+ runs := 20
+ for i := 0; i < runs; i++ {
+ assert.True(t, len(vSphere.Collect()) > 0)
+ if i < 6 {
+ time.Sleep(time.Second)
+ }
+ }
+
+ count := model.Count()
+ assert.Len(t, vSphere.discoveredHosts, count.Host)
+ assert.Len(t, vSphere.discoveredVMs, count.Machine)
+ assert.Len(t, vSphere.charted, count.Host+count.Machine)
+ assert.Len(t, *vSphere.charts, count.Host*len(hostChartsTmpl)+count.Machine*len(vmChartsTmpl))
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, vSphere *VSphere, collected map[string]int64) {
+ for _, chart := range *vSphere.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareVSphereSim(t *testing.T) (vSphere *VSphere, model *simulator.Model, teardown func()) {
+ model, srv := createSim(t)
+ vSphere = New()
+ teardown = func() { model.Remove(); srv.Close(); vSphere.Cleanup() }
+
+ vSphere.Username = "administrator"
+ vSphere.Password = "password"
+ vSphere.URL = srv.URL.String()
+ vSphere.TLSConfig.InsecureSkipVerify = true
+
+ return vSphere, model, teardown
+}
+
+func createSim(t *testing.T) (*simulator.Model, *simulator.Server) {
+ model := simulator.VPX()
+ err := model.Create()
+ require.NoError(t, err)
+ model.Service.TLS = new(tls.Config)
+ return model, model.Service.NewServer()
+}
+
+type mockScraper struct {
+ scraper
+}
+
+func (s mockScraper) ScrapeHosts(hosts rs.Hosts) []performance.EntityMetric {
+ ms := s.scraper.ScrapeHosts(hosts)
+ return populateMetrics(ms, 100)
+}
+func (s mockScraper) ScrapeVMs(vms rs.VMs) []performance.EntityMetric {
+ ms := s.scraper.ScrapeVMs(vms)
+ return populateMetrics(ms, 200)
+}
+
+func populateMetrics(ms []performance.EntityMetric, value int64) []performance.EntityMetric {
+ for i := range ms {
+ for ii := range ms[i].Value {
+ v := &ms[i].Value[ii].Value
+ if *v == nil {
+ *v = append(*v, value)
+ } else {
+ (*v)[0] = value
+ }
+ }
+ }
+ return ms
+}
+
+type mockHostMatcher struct{ name string }
+type mockVMMatcher struct{ name string }
+
+func (m mockHostMatcher) Match(host *rs.Host) bool { return m.name == host.ID }
+func (m mockVMMatcher) Match(vm *rs.VM) bool { return m.name == vm.ID }
diff --git a/src/go/plugin/go.d/modules/weblog/README.md b/src/go/plugin/go.d/modules/weblog/README.md
new file mode 120000
index 000000000..9da3f21c2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/README.md
@@ -0,0 +1 @@
+integrations/web_server_log_files.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/weblog/charts.go b/src/go/plugin/go.d/modules/weblog/charts.go
new file mode 100644
index 000000000..c7d5a7673
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/charts.go
@@ -0,0 +1,890 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+type (
+ Charts = module.Charts
+ Chart = module.Chart
+ Dims = module.Dims
+ Dim = module.Dim
+)
+
+const (
+ prioReqTotal = module.Priority + iota
+ prioReqExcluded
+ prioReqType
+
+ prioRespCodesClass
+ prioRespCodes
+ prioRespCodes1xx
+ prioRespCodes2xx
+ prioRespCodes3xx
+ prioRespCodes4xx
+ prioRespCodes5xx
+
+ prioBandwidth
+
+ prioReqProcTime
+ prioRespTimeHist
+ prioUpsRespTime
+ prioUpsRespTimeHist
+
+ prioUniqIP
+
+ prioReqVhost
+ prioReqPort
+ prioReqScheme
+ prioReqMethod
+ prioReqVersion
+ prioReqIPProto
+ prioReqSSLProto
+ prioReqSSLCipherSuite
+
+ prioReqCustomFieldPattern // chart per custom field, alphabetical order
+ prioReqCustomTimeField // chart per custom time field, alphabetical order
+ prioReqCustomTimeFieldHist // histogram chart per custom time field
+ prioReqURLPattern
+ prioURLPatternStats
+
+ prioReqCustomNumericFieldSummary // 3 charts per url pattern, alphabetical order
+)
+
+// NOTE: inconsistency with python web_log
+// TODO: current histogram charts are misleading in netdata
+
+// Requests
+var (
+ reqTotal = Chart{
+ ID: "requests",
+ Title: "Total Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "web_log.requests",
+ Priority: prioReqTotal,
+ Dims: Dims{
+ {ID: "requests", Algo: module.Incremental},
+ },
+ }
+ reqExcluded = Chart{
+ ID: "excluded_requests",
+ Title: "Excluded Requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "web_log.excluded_requests",
+ Type: module.Stacked,
+ Priority: prioReqExcluded,
+ Dims: Dims{
+ {ID: "req_unmatched", Name: "unmatched", Algo: module.Incremental},
+ },
+ }
+ // netdata specific grouping
+ reqTypes = Chart{
+ ID: "requests_by_type",
+ Title: "Requests By Type",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "web_log.type_requests",
+ Type: module.Stacked,
+ Priority: prioReqType,
+ Dims: Dims{
+ {ID: "req_type_success", Name: "success", Algo: module.Incremental},
+ {ID: "req_type_bad", Name: "bad", Algo: module.Incremental},
+ {ID: "req_type_redirect", Name: "redirect", Algo: module.Incremental},
+ {ID: "req_type_error", Name: "error", Algo: module.Incremental},
+ },
+ }
+)
+
+// Responses
+var (
+ respCodeClass = Chart{
+ ID: "responses_by_status_code_class",
+ Title: "Responses By Status Code Class",
+ Units: "responses/s",
+ Fam: "responses",
+ Ctx: "web_log.status_code_class_responses",
+ Type: module.Stacked,
+ Priority: prioRespCodesClass,
+ Dims: Dims{
+ {ID: "resp_2xx", Name: "2xx", Algo: module.Incremental},
+ {ID: "resp_5xx", Name: "5xx", Algo: module.Incremental},
+ {ID: "resp_3xx", Name: "3xx", Algo: module.Incremental},
+ {ID: "resp_4xx", Name: "4xx", Algo: module.Incremental},
+ {ID: "resp_1xx", Name: "1xx", Algo: module.Incremental},
+ },
+ }
+ respCodes = Chart{
+ ID: "responses_by_status_code",
+ Title: "Responses By Status Code",
+ Units: "responses/s",
+ Fam: "responses",
+ Ctx: "web_log.status_code_responses",
+ Type: module.Stacked,
+ Priority: prioRespCodes,
+ }
+ respCodes1xx = Chart{
+ ID: "status_code_class_1xx_responses",
+ Title: "Informational Responses By Status Code",
+ Units: "responses/s",
+ Fam: "responses",
+ Ctx: "web_log.status_code_class_1xx_responses",
+ Type: module.Stacked,
+ Priority: prioRespCodes1xx,
+ }
+ respCodes2xx = Chart{
+ ID: "status_code_class_2xx_responses",
+ Title: "Successful Responses By Status Code",
+ Units: "responses/s",
+ Fam: "responses",
+ Ctx: "web_log.status_code_class_2xx_responses",
+ Type: module.Stacked,
+ Priority: prioRespCodes2xx,
+ }
+ respCodes3xx = Chart{
+ ID: "status_code_class_3xx_responses",
+ Title: "Redirects Responses By Status Code",
+ Units: "responses/s",
+ Fam: "responses",
+ Ctx: "web_log.status_code_class_3xx_responses",
+ Type: module.Stacked,
+ Priority: prioRespCodes3xx,
+ }
+ respCodes4xx = Chart{
+ ID: "status_code_class_4xx_responses",
+ Title: "Client Errors Responses By Status Code",
+ Units: "responses/s",
+ Fam: "responses",
+ Ctx: "web_log.status_code_class_4xx_responses",
+ Type: module.Stacked,
+ Priority: prioRespCodes4xx,
+ }
+ respCodes5xx = Chart{
+ ID: "status_code_class_5xx_responses",
+ Title: "Server Errors Responses By Status Code",
+ Units: "responses/s",
+ Fam: "responses",
+ Ctx: "web_log.status_code_class_5xx_responses",
+ Type: module.Stacked,
+ Priority: prioRespCodes5xx,
+ }
+)
+
+// Bandwidth
+var (
+ bandwidth = Chart{
+ ID: "bandwidth",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "bandwidth",
+ Ctx: "web_log.bandwidth",
+ Type: module.Area,
+ Priority: prioBandwidth,
+ Dims: Dims{
+ {ID: "bytes_received", Name: "received", Algo: module.Incremental, Mul: 8, Div: 1000},
+ {ID: "bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -8, Div: 1000},
+ },
+ }
+)
+
+// Timings
+var (
+ reqProcTime = Chart{
+ ID: "request_processing_time",
+ Title: "Request Processing Time",
+ Units: "milliseconds",
+ Fam: "timings",
+ Ctx: "web_log.request_processing_time",
+ Priority: prioReqProcTime,
+ Dims: Dims{
+ {ID: "req_proc_time_min", Name: "min", Div: 1000},
+ {ID: "req_proc_time_max", Name: "max", Div: 1000},
+ {ID: "req_proc_time_avg", Name: "avg", Div: 1000},
+ },
+ }
+ reqProcTimeHist = Chart{
+ ID: "requests_processing_time_histogram",
+ Title: "Requests Processing Time Histogram",
+ Units: "requests/s",
+ Fam: "timings",
+ Ctx: "web_log.requests_processing_time_histogram",
+ Priority: prioRespTimeHist,
+ }
+)
+
+// Upstream
+var (
+ upsRespTime = Chart{
+ ID: "upstream_response_time",
+ Title: "Upstream Response Time",
+ Units: "milliseconds",
+ Fam: "timings",
+ Ctx: "web_log.upstream_response_time",
+ Priority: prioUpsRespTime,
+ Dims: Dims{
+ {ID: "upstream_resp_time_min", Name: "min", Div: 1000},
+ {ID: "upstream_resp_time_max", Name: "max", Div: 1000},
+ {ID: "upstream_resp_time_avg", Name: "avg", Div: 1000},
+ },
+ }
+ upsRespTimeHist = Chart{
+ ID: "upstream_responses_time_histogram",
+ Title: "Upstream Responses Time Histogram",
+ Units: "responses/s",
+ Fam: "timings",
+ Ctx: "web_log.upstream_responses_time_histogram",
+ Priority: prioUpsRespTimeHist,
+ }
+)
+
+// Clients
+var (
+ uniqIPsCurPoll = Chart{
+ ID: "current_poll_uniq_clients",
+ Title: "Current Poll Unique Clients",
+ Units: "clients",
+ Fam: "client",
+ Ctx: "web_log.current_poll_uniq_clients",
+ Type: module.Stacked,
+ Priority: prioUniqIP,
+ Dims: Dims{
+ {ID: "uniq_ipv4", Name: "ipv4", Algo: module.Absolute},
+ {ID: "uniq_ipv6", Name: "ipv6", Algo: module.Absolute},
+ },
+ }
+)
+
+// Request By N
+var (
+ reqByVhost = Chart{
+ ID: "requests_by_vhost",
+ Title: "Requests By Vhost",
+ Units: "requests/s",
+ Fam: "vhost",
+ Ctx: "web_log.vhost_requests",
+ Type: module.Stacked,
+ Priority: prioReqVhost,
+ }
+ reqByPort = Chart{
+ ID: "requests_by_port",
+ Title: "Requests By Port",
+ Units: "requests/s",
+ Fam: "port",
+ Ctx: "web_log.port_requests",
+ Type: module.Stacked,
+ Priority: prioReqPort,
+ }
+ reqByScheme = Chart{
+ ID: "requests_by_scheme",
+ Title: "Requests By Scheme",
+ Units: "requests/s",
+ Fam: "scheme",
+ Ctx: "web_log.scheme_requests",
+ Type: module.Stacked,
+ Priority: prioReqScheme,
+ Dims: Dims{
+ {ID: "req_http_scheme", Name: "http", Algo: module.Incremental},
+ {ID: "req_https_scheme", Name: "https", Algo: module.Incremental},
+ },
+ }
+ reqByMethod = Chart{
+ ID: "requests_by_http_method",
+ Title: "Requests By HTTP Method",
+ Units: "requests/s",
+ Fam: "http method",
+ Ctx: "web_log.http_method_requests",
+ Type: module.Stacked,
+ Priority: prioReqMethod,
+ }
+ reqByVersion = Chart{
+ ID: "requests_by_http_version",
+ Title: "Requests By HTTP Version",
+ Units: "requests/s",
+ Fam: "http version",
+ Ctx: "web_log.http_version_requests",
+ Type: module.Stacked,
+ Priority: prioReqVersion,
+ }
+ reqByIPProto = Chart{
+ ID: "requests_by_ip_proto",
+ Title: "Requests By IP Protocol",
+ Units: "requests/s",
+ Fam: "ip proto",
+ Ctx: "web_log.ip_proto_requests",
+ Type: module.Stacked,
+ Priority: prioReqIPProto,
+ Dims: Dims{
+ {ID: "req_ipv4", Name: "ipv4", Algo: module.Incremental},
+ {ID: "req_ipv6", Name: "ipv6", Algo: module.Incremental},
+ },
+ }
+ reqBySSLProto = Chart{
+ ID: "requests_by_ssl_proto",
+ Title: "Requests By SSL Connection Protocol",
+ Units: "requests/s",
+ Fam: "ssl conn",
+ Ctx: "web_log.ssl_proto_requests",
+ Type: module.Stacked,
+ Priority: prioReqSSLProto,
+ }
+ reqBySSLCipherSuite = Chart{
+ ID: "requests_by_ssl_cipher_suite",
+ Title: "Requests By SSL Connection Cipher Suite",
+ Units: "requests/s",
+ Fam: "ssl conn",
+ Ctx: "web_log.ssl_cipher_suite_requests",
+ Type: module.Stacked,
+ Priority: prioReqSSLCipherSuite,
+ }
+)
+
+// Request By N Patterns
+var (
+ reqByURLPattern = Chart{
+ ID: "requests_by_url_pattern",
+ Title: "URL Field Requests By Pattern",
+ Units: "requests/s",
+ Fam: "url ptn",
+ Ctx: "web_log.url_pattern_requests",
+ Type: module.Stacked,
+ Priority: prioReqURLPattern,
+ }
+ reqByCustomFieldPattern = Chart{
+ ID: "custom_field_%s_requests_by_pattern",
+ Title: "Custom Field %s Requests By Pattern",
+ Units: "requests/s",
+ Fam: "custom field ptn",
+ Ctx: "web_log.custom_field_pattern_requests",
+ Type: module.Stacked,
+ Priority: prioReqCustomFieldPattern,
+ }
+)
+
+// custom time field
+var (
+ reqByCustomTimeField = Chart{
+ ID: "custom_time_field_%s_summary",
+ Title: `Custom Time Field "%s" Summary`,
+ Units: "milliseconds",
+ Fam: "custom time field",
+ Ctx: "web_log.custom_time_field_summary",
+ Priority: prioReqCustomTimeField,
+ Dims: Dims{
+ {ID: "custom_time_field_%s_time_min", Name: "min", Div: 1000},
+ {ID: "custom_time_field_%s_time_max", Name: "max", Div: 1000},
+ {ID: "custom_time_field_%s_time_avg", Name: "avg", Div: 1000},
+ },
+ }
+ reqByCustomTimeFieldHist = Chart{
+ ID: "custom_time_field_%s_histogram",
+ Title: `Custom Time Field "%s" Histogram`,
+ Units: "observations",
+ Fam: "custom time field",
+ Ctx: "web_log.custom_time_field_histogram",
+ Priority: prioReqCustomTimeFieldHist,
+ }
+)
+
+var (
+ customNumericFieldSummaryChartTmpl = Chart{
+ ID: "custom_numeric_field_%s_summary",
+ Title: "Custom Numeric Field Summary",
+ Units: "",
+ Fam: "custom numeric fields",
+ Ctx: "web_log.custom_numeric_field_%s_summary",
+ Priority: prioReqCustomNumericFieldSummary,
+ Dims: Dims{
+ {ID: "custom_numeric_field_%s_summary_min", Name: "min"},
+ {ID: "custom_numeric_field_%s_summary_max", Name: "max"},
+ {ID: "custom_numeric_field_%s_summary_avg", Name: "avg"},
+ },
+ }
+)
+
+// URL pattern stats
+var (
+ urlPatternRespCodes = Chart{
+ ID: "url_pattern_%s_responses_by_status_code",
+ Title: "Responses By Status Code",
+ Units: "responses/s",
+ Fam: "url ptn %s",
+ Ctx: "web_log.url_pattern_status_code_responses",
+ Type: module.Stacked,
+ Priority: prioURLPatternStats,
+ }
+ urlPatternReqMethods = Chart{
+ ID: "url_pattern_%s_requests_by_http_method",
+ Title: "Requests By HTTP Method",
+ Units: "requests/s",
+ Fam: "url ptn %s",
+ Ctx: "web_log.url_pattern_http_method_requests",
+ Type: module.Stacked,
+ Priority: prioURLPatternStats + 1,
+ }
+ urlPatternBandwidth = Chart{
+ ID: "url_pattern_%s_bandwidth",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "url ptn %s",
+ Ctx: "web_log.url_pattern_bandwidth",
+ Type: module.Area,
+ Priority: prioURLPatternStats + 2,
+ Dims: Dims{
+ {ID: "url_ptn_%s_bytes_received", Name: "received", Algo: module.Incremental, Mul: 8, Div: 1000},
+ {ID: "url_ptn_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -8, Div: 1000},
+ },
+ }
+ urlPatternReqProcTime = Chart{
+ ID: "url_pattern_%s_request_processing_time",
+ Title: "Request Processing Time",
+ Units: "milliseconds",
+ Fam: "url ptn %s",
+ Ctx: "web_log.url_pattern_request_processing_time",
+ Priority: prioURLPatternStats + 3,
+ Dims: Dims{
+ {ID: "url_ptn_%s_req_proc_time_min", Name: "min", Div: 1000},
+ {ID: "url_ptn_%s_req_proc_time_max", Name: "max", Div: 1000},
+ {ID: "url_ptn_%s_req_proc_time_avg", Name: "avg", Div: 1000},
+ },
+ }
+)
+
+func newReqProcTimeHistChart(histogram []float64) (*Chart, error) {
+ chart := reqProcTimeHist.Copy()
+ for i, v := range histogram {
+ dim := &Dim{
+ ID: fmt.Sprintf("req_proc_time_hist_bucket_%d", i+1),
+ Name: fmt.Sprintf("%.3f", v),
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+ }
+ if err := chart.AddDim(&Dim{
+ ID: "req_proc_time_hist_count",
+ Name: "+Inf",
+ Algo: module.Incremental,
+ }); err != nil {
+ return nil, err
+ }
+ return chart, nil
+}
+
+func newUpsRespTimeHistChart(histogram []float64) (*Chart, error) {
+ chart := upsRespTimeHist.Copy()
+ for i, v := range histogram {
+ dim := &Dim{
+ ID: fmt.Sprintf("upstream_resp_time_hist_bucket_%d", i+1),
+ Name: fmt.Sprintf("%.3f", v),
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+ }
+ if err := chart.AddDim(&Dim{
+ ID: "upstream_resp_time_hist_count",
+ Name: "+Inf",
+ Algo: module.Incremental,
+ }); err != nil {
+ return nil, err
+ }
+ return chart, nil
+}
+
+func newURLPatternChart(patterns []userPattern) (*Chart, error) {
+ chart := reqByURLPattern.Copy()
+ for _, p := range patterns {
+ dim := &Dim{
+ ID: "req_url_ptn_" + p.Name,
+ Name: p.Name,
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+ }
+ return chart, nil
+}
+
+func newURLPatternRespCodesChart(name string) *Chart {
+ chart := urlPatternRespCodes.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ return chart
+}
+
+func newURLPatternReqMethodsChart(name string) *Chart {
+ chart := urlPatternReqMethods.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ return chart
+}
+
+func newURLPatternBandwidthChart(name string) *Chart {
+ chart := urlPatternBandwidth.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, name)
+ }
+ return chart
+}
+
+func newURLPatternReqProcTimeChart(name string) *Chart {
+ chart := urlPatternReqProcTime.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, name)
+ }
+ return chart
+}
+
+func newCustomFieldCharts(fields []customField) (Charts, error) {
+ charts := Charts{}
+ for _, f := range fields {
+ chart, err := newCustomFieldChart(f)
+ if err != nil {
+ return nil, err
+ }
+ if err := charts.Add(chart); err != nil {
+ return nil, err
+ }
+ }
+ return charts, nil
+}
+
+func newCustomFieldChart(f customField) (*Chart, error) {
+ chart := reqByCustomFieldPattern.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, f.Name)
+ chart.Title = fmt.Sprintf(chart.Title, f.Name)
+ for _, p := range f.Patterns {
+ dim := &Dim{
+ ID: fmt.Sprintf("custom_field_%s_%s", f.Name, p.Name),
+ Name: p.Name,
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+ }
+ return chart, nil
+}
+
+func newCustomTimeFieldCharts(fields []customTimeField) (Charts, error) {
+ charts := Charts{}
+ for i, f := range fields {
+ chartTime, err := newCustomTimeFieldChart(f)
+ if err != nil {
+ return nil, err
+ }
+ chartTime.Priority += i
+ if err := charts.Add(chartTime); err != nil {
+ return nil, err
+ }
+ if len(f.Histogram) < 1 {
+ continue
+ }
+
+ chartHist, err := newCustomTimeFieldHistChart(f)
+ if err != nil {
+ return nil, err
+ }
+ chartHist.Priority += i
+
+ if err := charts.Add(chartHist); err != nil {
+ return nil, err
+ }
+ }
+ return charts, nil
+}
+
+func newCustomTimeFieldChart(f customTimeField) (*Chart, error) {
+ chart := reqByCustomTimeField.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, f.Name)
+ chart.Title = fmt.Sprintf(chart.Title, f.Name)
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, f.Name)
+ }
+ return chart, nil
+}
+
+func newCustomTimeFieldHistChart(f customTimeField) (*Chart, error) {
+ chart := reqByCustomTimeFieldHist.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, f.Name)
+ chart.Title = fmt.Sprintf(chart.Title, f.Name)
+ for i, v := range f.Histogram {
+ dim := &Dim{
+ ID: fmt.Sprintf("custom_time_field_%s_time_hist_bucket_%d", f.Name, i+1),
+ Name: fmt.Sprintf("%.3f", v),
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+ }
+ if err := chart.AddDim(&Dim{
+ ID: fmt.Sprintf("custom_time_field_%s_time_hist_count", f.Name),
+ Name: "+Inf",
+ Algo: module.Incremental,
+ }); err != nil {
+ return nil, err
+ }
+ return chart, nil
+}
+
+func (w *WebLog) createCharts(line *logLine) error {
+ if line.empty() {
+ return errors.New("empty line")
+ }
+ w.charts = nil
+ // Following charts are created during runtime:
+ // - reqBySSLProto, reqBySSLCipherSuite - it is likely line has no SSL stuff at this moment
+ charts := &Charts{
+ reqTotal.Copy(),
+ reqExcluded.Copy(),
+ }
+ if line.hasVhost() {
+ if err := addVhostCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasPort() {
+ if err := addPortCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasReqScheme() {
+ if err := addSchemeCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasReqClient() {
+ if err := addClientCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasReqMethod() {
+ if err := addMethodCharts(charts, w.URLPatterns); err != nil {
+ return err
+ }
+ }
+ if line.hasReqURL() {
+ if err := addURLCharts(charts, w.URLPatterns); err != nil {
+ return err
+ }
+ }
+ if line.hasReqProto() {
+ if err := addReqProtoCharts(charts); err != nil {
+ return err
+ }
+ }
+ if line.hasRespCode() {
+ if err := addRespCodesCharts(charts, w.GroupRespCodes); err != nil {
+ return err
+ }
+ }
+ if line.hasReqSize() || line.hasRespSize() {
+ if err := addBandwidthCharts(charts, w.URLPatterns); err != nil {
+ return err
+ }
+ }
+ if line.hasReqProcTime() {
+ if err := addReqProcTimeCharts(charts, w.Histogram, w.URLPatterns); err != nil {
+ return err
+ }
+ }
+ if line.hasUpsRespTime() {
+ if err := addUpstreamRespTimeCharts(charts, w.Histogram); err != nil {
+ return err
+ }
+ }
+ if line.hasCustomFields() {
+ if len(w.CustomFields) > 0 {
+ if err := addCustomFieldsCharts(charts, w.CustomFields); err != nil {
+ return err
+ }
+ }
+ if len(w.CustomTimeFields) > 0 {
+ if err := addCustomTimeFieldsCharts(charts, w.CustomTimeFields); err != nil {
+ return err
+ }
+ }
+ if len(w.CustomNumericFields) > 0 {
+ if err := addCustomNumericFieldsCharts(charts, w.CustomNumericFields); err != nil {
+ return err
+ }
+ }
+ }
+
+ w.charts = charts
+
+ return nil
+}
+
+func addVhostCharts(charts *Charts) error {
+ return charts.Add(reqByVhost.Copy())
+}
+
+func addPortCharts(charts *Charts) error {
+ return charts.Add(reqByPort.Copy())
+}
+
+func addSchemeCharts(charts *Charts) error {
+ return charts.Add(reqByScheme.Copy())
+}
+
+func addClientCharts(charts *Charts) error {
+ if err := charts.Add(reqByIPProto.Copy()); err != nil {
+ return err
+ }
+ return charts.Add(uniqIPsCurPoll.Copy())
+}
+
+func addMethodCharts(charts *Charts, patterns []userPattern) error {
+ if err := charts.Add(reqByMethod.Copy()); err != nil {
+ return err
+ }
+
+ for _, p := range patterns {
+ chart := newURLPatternReqMethodsChart(p.Name)
+ if err := charts.Add(chart); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addURLCharts(charts *Charts, patterns []userPattern) error {
+ if len(patterns) == 0 {
+ return nil
+ }
+ chart, err := newURLPatternChart(patterns)
+ if err != nil {
+ return err
+ }
+ if err := charts.Add(chart); err != nil {
+ return err
+ }
+
+ for _, p := range patterns {
+ chart := newURLPatternRespCodesChart(p.Name)
+ if err := charts.Add(chart); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addReqProtoCharts(charts *Charts) error {
+ return charts.Add(reqByVersion.Copy())
+}
+
+func addRespCodesCharts(charts *Charts, group bool) error {
+ if err := charts.Add(reqTypes.Copy()); err != nil {
+ return err
+ }
+ if err := charts.Add(respCodeClass.Copy()); err != nil {
+ return err
+ }
+ if !group {
+ return charts.Add(respCodes.Copy())
+ }
+ for _, c := range []Chart{respCodes1xx, respCodes2xx, respCodes3xx, respCodes4xx, respCodes5xx} {
+ if err := charts.Add(c.Copy()); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addBandwidthCharts(charts *Charts, patterns []userPattern) error {
+ if err := charts.Add(bandwidth.Copy()); err != nil {
+ return err
+ }
+
+ for _, p := range patterns {
+ chart := newURLPatternBandwidthChart(p.Name)
+ if err := charts.Add(chart); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func addReqProcTimeCharts(charts *Charts, histogram []float64, patterns []userPattern) error {
+ if err := charts.Add(reqProcTime.Copy()); err != nil {
+ return err
+ }
+ for _, p := range patterns {
+ chart := newURLPatternReqProcTimeChart(p.Name)
+ if err := charts.Add(chart); err != nil {
+ return err
+ }
+ }
+ if len(histogram) == 0 {
+ return nil
+ }
+ chart, err := newReqProcTimeHistChart(histogram)
+ if err != nil {
+ return err
+ }
+ return charts.Add(chart)
+}
+
+func addUpstreamRespTimeCharts(charts *Charts, histogram []float64) error {
+ if err := charts.Add(upsRespTime.Copy()); err != nil {
+ return err
+ }
+ if len(histogram) == 0 {
+ return nil
+ }
+ chart, err := newUpsRespTimeHistChart(histogram)
+ if err != nil {
+ return err
+ }
+ return charts.Add(chart)
+}
+
+func addCustomFieldsCharts(charts *Charts, fields []customField) error {
+ cs, err := newCustomFieldCharts(fields)
+ if err != nil {
+ return err
+ }
+ return charts.Add(cs...)
+}
+
+func addCustomTimeFieldsCharts(charts *Charts, fields []customTimeField) error {
+ cs, err := newCustomTimeFieldCharts(fields)
+ if err != nil {
+ return err
+ }
+ return charts.Add(cs...)
+}
+
+func addCustomNumericFieldsCharts(charts *module.Charts, fields []customNumericField) error {
+ for _, f := range fields {
+ chart := customNumericFieldSummaryChartTmpl.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, f.Name)
+ chart.Units = f.Units
+ chart.Ctx = fmt.Sprintf(chart.Ctx, f.Name)
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, f.Name)
+ dim.Div = f.Divisor
+ }
+
+ if err := charts.Add(chart); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/weblog/collect.go b/src/go/plugin/go.d/modules/weblog/collect.go
new file mode 100644
index 000000000..8f6bceb0f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/collect.go
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "runtime"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (w *WebLog) logPanicStackIfAny() {
+ err := recover()
+ if err == nil {
+ return
+ }
+ w.Errorf("[ERROR] %s\n", err)
+ for depth := 0; ; depth++ {
+ _, file, line, ok := runtime.Caller(depth)
+ if !ok {
+ break
+ }
+ w.Errorf("======> %d: %v:%d", depth, file, line)
+ }
+ panic(err)
+}
+
+func (w *WebLog) collect() (map[string]int64, error) {
+ defer w.logPanicStackIfAny()
+ w.mx.reset()
+
+ var mx map[string]int64
+
+ n, err := w.collectLogLines()
+
+ if n > 0 || err == nil {
+ mx = stm.ToMap(w.mx)
+ }
+ return mx, err
+}
+
+func (w *WebLog) collectLogLines() (int, error) {
+ logOnce := true
+ var n int
+ for {
+ w.line.reset()
+ err := w.parser.ReadLine(w.line)
+ if err != nil {
+ if err == io.EOF {
+ return n, nil
+ }
+ if !logs.IsParseError(err) {
+ return n, err
+ }
+ n++
+ if logOnce {
+ w.Infof("unmatched line: %v (parser: %s)", err, w.parser.Info())
+ logOnce = false
+ }
+ w.collectUnmatched()
+ continue
+ }
+ n++
+ if w.line.empty() {
+ w.collectUnmatched()
+ } else {
+ w.collectLogLine()
+ }
+ }
+}
+
+func (w *WebLog) collectLogLine() {
+ // https://github.com/netdata/netdata/issues/17716
+ if w.line.hasReqProcTime() && w.line.respCode == http.StatusSwitchingProtocols {
+ w.line.reqProcTime = emptyNumber
+ }
+ w.mx.Requests.Inc()
+ w.collectVhost()
+ w.collectPort()
+ w.collectReqScheme()
+ w.collectReqClient()
+ w.collectReqMethod()
+ w.collectReqURL()
+ w.collectReqProto()
+ w.collectRespCode()
+ w.collectReqSize()
+ w.collectRespSize()
+ w.collectReqProcTime()
+ w.collectUpsRespTime()
+ w.collectSSLProto()
+ w.collectSSLCipherSuite()
+ w.collectCustomFields()
+}
+
+func (w *WebLog) collectUnmatched() {
+ w.mx.Requests.Inc()
+ w.mx.ReqUnmatched.Inc()
+}
+
+func (w *WebLog) collectVhost() {
+ if !w.line.hasVhost() {
+ return
+ }
+ c, ok := w.mx.ReqVhost.GetP(w.line.vhost)
+ if !ok {
+ w.addDimToVhostChart(w.line.vhost)
+ }
+ c.Inc()
+}
+
+func (w *WebLog) collectPort() {
+ if !w.line.hasPort() {
+ return
+ }
+ c, ok := w.mx.ReqPort.GetP(w.line.port)
+ if !ok {
+ w.addDimToPortChart(w.line.port)
+ }
+ c.Inc()
+}
+
+func (w *WebLog) collectReqClient() {
+ if !w.line.hasReqClient() {
+ return
+ }
+ if strings.ContainsRune(w.line.reqClient, ':') {
+ w.mx.ReqIPv6.Inc()
+ w.mx.UniqueIPv6.Insert(w.line.reqClient)
+ return
+ }
+ // NOTE: count hostname as IPv4 address
+ w.mx.ReqIPv4.Inc()
+ w.mx.UniqueIPv4.Insert(w.line.reqClient)
+}
+
+func (w *WebLog) collectReqScheme() {
+ if !w.line.hasReqScheme() {
+ return
+ }
+ if w.line.reqScheme == "https" {
+ w.mx.ReqHTTPSScheme.Inc()
+ } else {
+ w.mx.ReqHTTPScheme.Inc()
+ }
+}
+
+func (w *WebLog) collectReqMethod() {
+ if !w.line.hasReqMethod() {
+ return
+ }
+ c, ok := w.mx.ReqMethod.GetP(w.line.reqMethod)
+ if !ok {
+ w.addDimToReqMethodChart(w.line.reqMethod)
+ }
+ c.Inc()
+}
+
+func (w *WebLog) collectReqURL() {
+ if !w.line.hasReqURL() {
+ return
+ }
+ for _, p := range w.urlPatterns {
+ if !p.MatchString(w.line.reqURL) {
+ continue
+ }
+ c, _ := w.mx.ReqURLPattern.GetP(p.name)
+ c.Inc()
+
+ w.collectURLPatternStats(p.name)
+ return
+ }
+}
+
+func (w *WebLog) collectReqProto() {
+ if !w.line.hasReqProto() {
+ return
+ }
+ c, ok := w.mx.ReqVersion.GetP(w.line.reqProto)
+ if !ok {
+ w.addDimToReqVersionChart(w.line.reqProto)
+ }
+ c.Inc()
+}
+
+func (w *WebLog) collectRespCode() {
+ if !w.line.hasRespCode() {
+ return
+ }
+
+ code := w.line.respCode
+ switch {
+ case code >= 100 && code < 300, code == 304, code == 401:
+ w.mx.ReqSuccess.Inc()
+ case code >= 300 && code < 400:
+ w.mx.ReqRedirect.Inc()
+ case code >= 400 && code < 500:
+ w.mx.ReqBad.Inc()
+ case code >= 500 && code < 600:
+ w.mx.ReqError.Inc()
+ }
+
+ switch code / 100 {
+ case 1:
+ w.mx.Resp1xx.Inc()
+ case 2:
+ w.mx.Resp2xx.Inc()
+ case 3:
+ w.mx.Resp3xx.Inc()
+ case 4:
+ w.mx.Resp4xx.Inc()
+ case 5:
+ w.mx.Resp5xx.Inc()
+ }
+
+ codeStr := strconv.Itoa(code)
+ c, ok := w.mx.RespCode.GetP(codeStr)
+ if !ok {
+ w.addDimToRespCodesChart(codeStr)
+ }
+ c.Inc()
+}
+
+func (w *WebLog) collectReqSize() {
+ if !w.line.hasReqSize() {
+ return
+ }
+ w.mx.BytesReceived.Add(float64(w.line.reqSize))
+}
+
+func (w *WebLog) collectRespSize() {
+ if !w.line.hasRespSize() {
+ return
+ }
+ w.mx.BytesSent.Add(float64(w.line.respSize))
+}
+
+func (w *WebLog) collectReqProcTime() {
+ if !w.line.hasReqProcTime() {
+ return
+ }
+ w.mx.ReqProcTime.Observe(w.line.reqProcTime)
+ if w.mx.ReqProcTimeHist == nil {
+ return
+ }
+ w.mx.ReqProcTimeHist.Observe(w.line.reqProcTime)
+}
+
+func (w *WebLog) collectUpsRespTime() {
+ if !w.line.hasUpsRespTime() {
+ return
+ }
+ w.mx.UpsRespTime.Observe(w.line.upsRespTime)
+ if w.mx.UpsRespTimeHist == nil {
+ return
+ }
+ w.mx.UpsRespTimeHist.Observe(w.line.upsRespTime)
+}
+
+func (w *WebLog) collectSSLProto() {
+ if !w.line.hasSSLProto() {
+ return
+ }
+ c, ok := w.mx.ReqSSLProto.GetP(w.line.sslProto)
+ if !ok {
+ w.addDimToSSLProtoChart(w.line.sslProto)
+ }
+ c.Inc()
+}
+
+func (w *WebLog) collectSSLCipherSuite() {
+ if !w.line.hasSSLCipherSuite() {
+ return
+ }
+ c, ok := w.mx.ReqSSLCipherSuite.GetP(w.line.sslCipherSuite)
+ if !ok {
+ w.addDimToSSLCipherSuiteChart(w.line.sslCipherSuite)
+ }
+ c.Inc()
+}
+
+func (w *WebLog) collectURLPatternStats(name string) {
+ v, ok := w.mx.URLPatternStats[name]
+ if !ok {
+ return
+ }
+ if w.line.hasRespCode() {
+ status := strconv.Itoa(w.line.respCode)
+ c, ok := v.RespCode.GetP(status)
+ if !ok {
+ w.addDimToURLPatternRespCodesChart(name, status)
+ }
+ c.Inc()
+ }
+
+ if w.line.hasReqMethod() {
+ c, ok := v.ReqMethod.GetP(w.line.reqMethod)
+ if !ok {
+ w.addDimToURLPatternReqMethodsChart(name, w.line.reqMethod)
+ }
+ c.Inc()
+ }
+
+ if w.line.hasReqSize() {
+ v.BytesReceived.Add(float64(w.line.reqSize))
+ }
+
+ if w.line.hasRespSize() {
+ v.BytesSent.Add(float64(w.line.respSize))
+ }
+ if w.line.hasReqProcTime() {
+ v.ReqProcTime.Observe(w.line.reqProcTime)
+ }
+}
+
+func (w *WebLog) collectCustomFields() {
+ if !w.line.hasCustomFields() {
+ return
+ }
+
+ for _, cv := range w.line.custom.values {
+ _, _ = cv.name, cv.value
+
+ if patterns, ok := w.customFields[cv.name]; ok {
+ for _, pattern := range patterns {
+ if !pattern.MatchString(cv.value) {
+ continue
+ }
+ v, ok := w.mx.ReqCustomField[cv.name]
+ if !ok {
+ break
+ }
+ c, _ := v.GetP(pattern.name)
+ c.Inc()
+ break
+ }
+ } else if histogram, ok := w.customTimeFields[cv.name]; ok {
+ v, ok := w.mx.ReqCustomTimeField[cv.name]
+ if !ok {
+ continue
+ }
+ ctf, err := strconv.ParseFloat(cv.value, 64)
+ if err != nil || !isTimeValid(ctf) {
+ continue
+ }
+ v.Time.Observe(ctf)
+ if histogram != nil {
+ v.TimeHist.Observe(ctf * timeMultiplier(cv.value))
+ }
+ } else if w.customNumericFields[cv.name] {
+ m, ok := w.mx.ReqCustomNumericField[cv.name]
+ if !ok {
+ continue
+ }
+ v, err := strconv.ParseFloat(cv.value, 64)
+ if err != nil {
+ continue
+ }
+ v *= float64(m.multiplier)
+ m.Summary.Observe(v)
+ }
+ }
+}
+
+func (w *WebLog) addDimToVhostChart(vhost string) {
+ chart := w.Charts().Get(reqByVhost.ID)
+ if chart == nil {
+ w.Warningf("add dimension: no '%s' chart", reqByVhost.ID)
+ return
+ }
+ dim := &Dim{
+ ID: "req_vhost_" + vhost,
+ Name: vhost,
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (w *WebLog) addDimToPortChart(port string) {
+ chart := w.Charts().Get(reqByPort.ID)
+ if chart == nil {
+ w.Warningf("add dimension: no '%s' chart", reqByPort.ID)
+ return
+ }
+ dim := &Dim{
+ ID: "req_port_" + port,
+ Name: port,
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (w *WebLog) addDimToReqMethodChart(method string) {
+ chart := w.Charts().Get(reqByMethod.ID)
+ if chart == nil {
+ w.Warningf("add dimension: no '%s' chart", reqByMethod.ID)
+ return
+ }
+ dim := &Dim{
+ ID: "req_method_" + method,
+ Name: method,
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (w *WebLog) addDimToReqVersionChart(version string) {
+ chart := w.Charts().Get(reqByVersion.ID)
+ if chart == nil {
+ w.Warningf("add dimension: no '%s' chart", reqByVersion.ID)
+ return
+ }
+ dim := &Dim{
+ ID: "req_version_" + version,
+ Name: version,
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (w *WebLog) addDimToSSLProtoChart(proto string) {
+ chart := w.Charts().Get(reqBySSLProto.ID)
+ if chart == nil {
+ chart = reqBySSLProto.Copy()
+ if err := w.Charts().Add(chart); err != nil {
+ w.Warning(err)
+ return
+ }
+ }
+ dim := &Dim{
+ ID: "req_ssl_proto_" + proto,
+ Name: proto,
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (w *WebLog) addDimToSSLCipherSuiteChart(cipher string) {
+ chart := w.Charts().Get(reqBySSLCipherSuite.ID)
+ if chart == nil {
+ chart = reqBySSLCipherSuite.Copy()
+ if err := w.Charts().Add(chart); err != nil {
+ w.Warning(err)
+ return
+ }
+ }
+ dim := &Dim{
+ ID: "req_ssl_cipher_suite_" + cipher,
+ Name: cipher,
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (w *WebLog) addDimToRespCodesChart(code string) {
+ chart := w.findRespCodesChart(code)
+ if chart == nil {
+ w.Warning("add dimension: cant find resp codes chart")
+ return
+ }
+ dim := &Dim{
+ ID: "resp_code_" + code,
+ Name: code,
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (w *WebLog) addDimToURLPatternRespCodesChart(name, code string) {
+ id := fmt.Sprintf(urlPatternRespCodes.ID, name)
+ chart := w.Charts().Get(id)
+ if chart == nil {
+ w.Warningf("add dimension: no '%s' chart", id)
+ return
+ }
+ dim := &Dim{
+ ID: fmt.Sprintf("url_ptn_%s_resp_code_%s", name, code),
+ Name: code,
+ Algo: module.Incremental,
+ }
+
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (w *WebLog) addDimToURLPatternReqMethodsChart(name, method string) {
+ id := fmt.Sprintf(urlPatternReqMethods.ID, name)
+ chart := w.Charts().Get(id)
+ if chart == nil {
+ w.Warningf("add dimension: no '%s' chart", id)
+ return
+ }
+ dim := &Dim{
+ ID: fmt.Sprintf("url_ptn_%s_req_method_%s", name, method),
+ Name: method,
+ Algo: module.Incremental,
+ }
+
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ return
+ }
+ chart.MarkNotCreated()
+}
+
+func (w *WebLog) findRespCodesChart(code string) *Chart {
+ if !w.GroupRespCodes {
+ return w.Charts().Get(respCodes.ID)
+ }
+
+ var id string
+ switch class := code[:1]; class {
+ case "1":
+ id = respCodes1xx.ID
+ case "2":
+ id = respCodes2xx.ID
+ case "3":
+ id = respCodes3xx.ID
+ case "4":
+ id = respCodes4xx.ID
+ case "5":
+ id = respCodes5xx.ID
+ default:
+ return nil
+ }
+ return w.Charts().Get(id)
+}
diff --git a/src/go/plugin/go.d/modules/weblog/config_schema.json b/src/go/plugin/go.d/modules/weblog/config_schema.json
new file mode 100644
index 000000000..61da661a4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/config_schema.json
@@ -0,0 +1,453 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "path": {
+ "title": "Log file",
+ "description": "The file path to the Webserver log file.",
+ "type": "string",
+ "default": "/var/log/nginx/access.log",
+ "pattern": "^$|^/"
+ },
+ "exclude_path": {
+ "title": "Exclude path",
+ "description": "Pattern to exclude log files.",
+ "type": "string",
+ "default": "*.gz"
+ },
+ "histogram": {
+ "title": "Request processing time histogram",
+ "description": "Buckets for the histogram in milliseconds.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "uniqueItems": true
+ },
+ "log_type": {
+ "title": "Log parser",
+ "description": "Type of parser to use for parsing log files.",
+ "type": "string",
+ "enum": [
+ "auto",
+ "csv",
+ "regexp",
+ "json",
+ "ltsv"
+ ],
+ "default": "auto"
+ },
+ "url_patterns": {
+ "title": "URL patterns",
+ "description": "Patterns used to match against the full original request URI. For each pattern, the web log will collect responses by status code, method, bandwidth, and processing time.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Patterns",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "name": {
+ "title": "Dimension",
+ "description": "A unique name used as a dimension name for the pattern.",
+ "type": "string"
+ },
+ "match": {
+ "title": "Pattern",
+ "description": "The [pattern string](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme) used to match against the full original request URI.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "match"
+ ]
+ },
+ "uniqueItems": true
+ },
+ "custom_fields": {
+ "title": "Custom fields",
+ "description": "Configuration for custom fields. Fild value expected to be string. Patterns used to match against the value of the specified field. For each pattern, the web log will collect responses by status code.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "items": {
+ "title": "Field configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "name": {
+ "title": "Field name",
+ "description": "The name of the custom field.",
+ "type": "string"
+ },
+ "patterns": {
+ "title": "Patterns",
+ "description": "",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "User patterns",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "name": {
+ "title": "Dimension",
+ "description": "A unique name used as a dimension name for the pattern.",
+ "type": "string"
+ },
+ "match": {
+ "title": "Pattern",
+ "description": "The [pattern string](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#readme) used to match against the field value.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "match"
+ ]
+ }
+ }
+ },
+ "required": [
+ "name",
+ "patterns"
+ ]
+ }
+ },
+ "custom_time_fields": {
+ "title": "Custom time fields",
+ "description": "Configuration for custom time fields. Field value expected to be numeric and represent time. For each field, the web log will calculate the minimum, average, maximum value, and histogram.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Field configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "name": {
+ "title": "Field mame",
+ "description": "The name of the custom time field.",
+ "type": "string"
+ },
+ "histogram": {
+ "title": "Histogram",
+ "description": "Buckets for the histogram in milliseconds.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "uniqueItems": true,
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "default": [
+ 0.005,
+ 0.01,
+ 0.025,
+ 0.05,
+ 0.1,
+ 0.25,
+ 0.5,
+ 1,
+ 2.5,
+ 5,
+ 10
+ ]
+ }
+ },
+ "required": [
+ "name"
+ ]
+ }
+ },
+ "custom_numeric_fields": {
+ "title": "Custom numeric field",
+ "description": "Configuration for custom numeric fields. Fild value expected to be numeric. For each field, the web log will calculate the minimum, average, maximum value.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Field configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "name": {
+ "title": "Name",
+ "description": "The name of the custom numeric field.",
+ "type": "string"
+ },
+ "units": {
+ "title": "Units",
+ "description": "The unit label for the vertical axis on charts.",
+ "type": "string"
+ },
+ "multiplier": {
+ "title": "Multiplier",
+ "description": "A value to multiply the field value.",
+ "type": "number",
+ "not": {
+ "const": 0
+ },
+ "default": 1
+ },
+ "divisor": {
+ "title": "Divisor",
+ "description": "A value to divide the field value.",
+ "type": "number",
+ "not": {
+ "const": 0
+ },
+ "default": 1
+ }
+ },
+ "required": [
+ "name",
+ "units",
+ "multiplier",
+ "divisor"
+ ]
+ }
+ }
+ },
+ "required": [
+ "path",
+ "log_type"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ },
+ "dependencies": {
+ "log_type": {
+ "oneOf": [
+ {
+ "properties": {
+ "log_type": {
+ "const": "auto"
+ }
+ }
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "csv"
+ },
+ "csv_config": {
+ "title": "CSV parser configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "format": {
+ "title": "Format",
+ "description": "Log format.",
+ "type": "string",
+ "default": "$remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent"
+ },
+ "delimiter": {
+ "title": "Delimiter",
+ "description": "Delimiter used to separate fields in the log file. Default: space (' ').",
+ "type": "string",
+ "default": " "
+ }
+ },
+ "required": [
+ "format",
+ "delimiter"
+ ]
+ }
+ },
+ "required": [
+ "csv_config"
+ ]
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "regexp"
+ },
+ "regexp_config": {
+ "title": "Regular expression parser configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "pattern": {
+ "title": "Pattern with named groups",
+ "description": "Regular expression pattern with named groups. Use named groups for known fields.",
+ "type": "string",
+ "default": ""
+ }
+ },
+ "required": [
+ "pattern"
+ ]
+ }
+ },
+ "required": [
+ "regexp_config"
+ ]
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "json"
+ },
+ "json_config": {
+ "title": "JSON parser configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "mapping": {
+ "title": "Field mapping",
+ "description": "Dictionary mapping fields in logs to known fields.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "ltsv"
+ },
+ "ltsv_config": {
+ "title": "LTSV parser configuration",
+ "type": [
+ "object",
+ "null"
+ ],
+ "properties": {
+ "field_delimiter": {
+ "title": "Field delimiter",
+ "description": "Delimiter used to separate fields in LTSV logs. Default: tab ('\\t').",
+ "type": "string",
+ "default": "\t"
+ },
+ "value_delimiter": {
+ "title": "Value delimiter",
+ "description": "Delimiter used to separate label-value pairs in LTSV logs.",
+ "type": "string",
+ "default": ":"
+ },
+ "mapping": {
+ "title": "Field mapping",
+ "description": "Dictionary mapping fields in logs to known fields.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ }
+ }
+ ]
+ }
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "log_type": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "custom_fields": {
+ "ui:collapsible": true
+ },
+ "custom_time_fields": {
+ "ui:collapsible": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "path",
+ "exclude_path",
+ "histogram"
+ ]
+ },
+ {
+ "title": "Parser",
+ "fields": [
+ "log_type",
+ "csv_config",
+ "ltsv_config",
+ "regexp_config",
+ "json_config"
+ ]
+ },
+ {
+ "title": "URL patterns",
+ "fields": [
+ "url_patterns"
+ ]
+ },
+ {
+ "title": "Custom fields",
+ "fields": [
+ "custom_fields",
+ "custom_time_fields",
+ "custom_numeric_fields"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/weblog/init.go b/src/go/plugin/go.d/modules/weblog/init.go
new file mode 100644
index 000000000..c76e43f30
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/init.go
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+type pattern struct {
+ name string
+ matcher.Matcher
+}
+
+func newPattern(up userPattern) (*pattern, error) {
+ if up.Name == "" || up.Match == "" {
+ return nil, errors.New("empty 'name' or 'match'")
+ }
+
+ m, err := matcher.Parse(up.Match)
+ if err != nil {
+ return nil, err
+ }
+ return &pattern{name: up.Name, Matcher: m}, nil
+}
+
+func (w *WebLog) createURLPatterns() error {
+ if len(w.URLPatterns) == 0 {
+ w.Debug("skipping URL patterns creating, no patterns provided")
+ return nil
+ }
+ w.Debug("starting URL patterns creating")
+ for _, up := range w.URLPatterns {
+ p, err := newPattern(up)
+ if err != nil {
+ return fmt.Errorf("create pattern %+v: %v", up, err)
+ }
+ w.Debugf("created pattern '%s', type '%T', match '%s'", p.name, p.Matcher, up.Match)
+ w.urlPatterns = append(w.urlPatterns, p)
+ }
+ w.Debugf("created %d URL pattern(s)", len(w.URLPatterns))
+ return nil
+}
+
+func (w *WebLog) createCustomFields() error {
+ if len(w.CustomFields) == 0 {
+ w.Debug("skipping custom fields creating, no custom fields provided")
+ return nil
+ }
+
+ w.Debug("starting custom fields creating")
+ w.customFields = make(map[string][]*pattern)
+ for i, cf := range w.CustomFields {
+ if cf.Name == "" {
+ return fmt.Errorf("create custom field: name not set (field %d)", i+1)
+ }
+ for _, up := range cf.Patterns {
+ p, err := newPattern(up)
+ if err != nil {
+ return fmt.Errorf("create field '%s' pattern %+v: %v", cf.Name, up, err)
+ }
+ w.Debugf("created field '%s', pattern '%s', type '%T', match '%s'", cf.Name, p.name, p.Matcher, up.Match)
+ w.customFields[cf.Name] = append(w.customFields[cf.Name], p)
+ }
+ }
+ w.Debugf("created %d custom field(s)", len(w.CustomFields))
+ return nil
+}
+
+func (w *WebLog) createCustomTimeFields() error {
+ if len(w.CustomTimeFields) == 0 {
+ w.Debug("skipping custom time fields creating, no custom time fields provided")
+ return nil
+ }
+
+ w.Debug("starting custom time fields creating")
+ w.customTimeFields = make(map[string][]float64)
+ for i, ctf := range w.CustomTimeFields {
+ if ctf.Name == "" {
+ return fmt.Errorf("create custom field: name not set (field %d)", i+1)
+ }
+ w.customTimeFields[ctf.Name] = ctf.Histogram
+ w.Debugf("created time field '%s', histogram '%v'", ctf.Name, ctf.Histogram)
+ }
+ w.Debugf("created %d custom time field(s)", len(w.CustomTimeFields))
+ return nil
+}
+
+func (w *WebLog) createCustomNumericFields() error {
+ if len(w.CustomNumericFields) == 0 {
+ w.Debug("no custom time fields provided")
+ return nil
+ }
+
+ w.Debugf("creating custom numeric fields for '%+v'", w.CustomNumericFields)
+
+ w.customNumericFields = make(map[string]bool)
+
+ for i := range w.CustomNumericFields {
+ v := w.CustomNumericFields[i]
+ if v.Name == "" {
+ return fmt.Errorf("custom numeric field (%d): 'name' not set", i+1)
+ }
+ if v.Units == "" {
+ return fmt.Errorf("custom numeric field (%s): 'units' not set", v.Name)
+ }
+ if v.Multiplier <= 0 {
+ v.Multiplier = 1
+ }
+ if v.Divisor <= 0 {
+ v.Divisor = 1
+ }
+ w.CustomNumericFields[i] = v
+ w.customNumericFields[v.Name] = true
+ }
+
+ return nil
+}
+
+func (w *WebLog) createLogLine() {
+ w.line = newEmptyLogLine()
+
+ for v := range w.customFields {
+ w.line.custom.fields[v] = struct{}{}
+ }
+ for v := range w.customTimeFields {
+ w.line.custom.fields[v] = struct{}{}
+ }
+ for v := range w.customNumericFields {
+ w.line.custom.fields[v] = struct{}{}
+ }
+}
+
+func (w *WebLog) createLogReader() error {
+ w.Cleanup()
+ w.Debug("starting log reader creating")
+
+ reader, err := logs.Open(w.Path, w.ExcludePath, w.Logger)
+ if err != nil {
+ return fmt.Errorf("creating log reader: %v", err)
+ }
+
+ w.Debugf("created log reader, current file '%s'", reader.CurrentFilename())
+ w.file = reader
+
+ return nil
+}
+
+func (w *WebLog) createParser() error {
+ w.Debug("starting parser creating")
+
+ const readLinesNum = 100
+
+ lines, err := logs.ReadLastLines(w.file.CurrentFilename(), readLinesNum)
+ if err != nil {
+ return fmt.Errorf("failed to read last lines: %v", err)
+ }
+
+ var found bool
+ for _, line := range lines {
+ if line = strings.TrimSpace(line); line == "" {
+ continue
+ }
+ w.Debugf("last line: '%s'", line)
+
+ w.parser, err = w.newParser([]byte(line))
+ if err != nil {
+ w.Debugf("failed to create parser from line: %v", err)
+ continue
+ }
+
+ w.line.reset()
+
+ if err = w.parser.Parse([]byte(line), w.line); err != nil {
+ w.Debugf("failed to parse line: %v", err)
+ continue
+ }
+
+ if err = w.line.verify(); err != nil {
+ w.Debugf("failed to verify line: %v", err)
+ continue
+ }
+
+ found = true
+ break
+ }
+
+ if !found {
+ return fmt.Errorf("failed to create log parser (file '%s')", w.file.CurrentFilename())
+ }
+
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md b/src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md
new file mode 100644
index 000000000..740af5f1d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/integrations/web_server_log_files.md
@@ -0,0 +1,405 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/weblog/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/weblog/metadata.yaml"
+sidebar_label: "Web server log files"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Web server log files
+
+
+<img src="https://netdata.cloud/img/webservers.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: web_log
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors web servers by parsing their log files.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It automatically detects log files of web servers running on localhost.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per Web server log files instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| web_log.requests | requests | requests/s |
+| web_log.excluded_requests | unmatched | requests/s |
+| web_log.type_requests | success, bad, redirect, error | requests/s |
+| web_log.status_code_class_responses | 1xx, 2xx, 3xx, 4xx, 5xx | responses/s |
+| web_log.status_code_class_1xx_responses | a dimension per 1xx code | responses/s |
+| web_log.status_code_class_2xx_responses | a dimension per 2xx code | responses/s |
+| web_log.status_code_class_3xx_responses | a dimension per 3xx code | responses/s |
+| web_log.status_code_class_4xx_responses | a dimension per 4xx code | responses/s |
+| web_log.status_code_class_5xx_responses | a dimension per 5xx code | responses/s |
+| web_log.bandwidth | received, sent | kilobits/s |
+| web_log.request_processing_time | min, max, avg | milliseconds |
+| web_log.requests_processing_time_histogram | a dimension per bucket | requests/s |
+| web_log.upstream_response_time | min, max, avg | milliseconds |
+| web_log.upstream_responses_time_histogram | a dimension per bucket | requests/s |
+| web_log.current_poll_uniq_clients | ipv4, ipv6 | clients |
+| web_log.vhost_requests | a dimension per vhost | requests/s |
+| web_log.port_requests | a dimension per port | requests/s |
+| web_log.scheme_requests | http, https | requests/s |
+| web_log.http_method_requests | a dimension per HTTP method | requests/s |
+| web_log.http_version_requests | a dimension per HTTP version | requests/s |
+| web_log.ip_proto_requests | ipv4, ipv6 | requests/s |
+| web_log.ssl_proto_requests | a dimension per SSL protocol | requests/s |
+| web_log.ssl_cipher_suite_requests | a dimension per SSL cipher suite | requests/s |
+| web_log.url_pattern_requests | a dimension per URL pattern | requests/s |
+| web_log.custom_field_pattern_requests | a dimension per custom field pattern | requests/s |
+
+### Per custom time field
+
+TBD
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| web_log.custom_time_field_summary | min, max, avg | milliseconds |
+| web_log.custom_time_field_histogram | a dimension per bucket | observations |
+
+### Per custom numeric field
+
+TBD
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| web_log.custom_numeric_field_{{field_name}}_summary | min, max, avg | {{units}} |
+
+### Per URL pattern
+
+TBD
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| web_log.url_pattern_status_code_responses | a dimension per pattern | responses/s |
+| web_log.url_pattern_http_method_requests | a dimension per HTTP method | requests/s |
+| web_log.url_pattern_bandwidth | received, sent | kilobits/s |
+| web_log.url_pattern_request_processing_time | min, max, avg | milliseconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ web_log_1m_unmatched ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.excluded_requests | percentage of unparsed log lines over the last minute |
+| [ web_log_1m_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401) |
+| [ web_log_1m_redirects ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of redirection HTTP requests over the last minute (3xx except 304) |
+| [ web_log_1m_bad_requests ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of client error HTTP requests over the last minute (4xx except 401) |
+| [ web_log_1m_internal_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of server error HTTP requests over the last minute (5xx) |
+| [ web_log_web_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.request_processing_time | average HTTP response time over the last 1 minute |
+| [ web_log_5m_requests_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf) | web_log.type_requests | ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/web_log.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/web_log.conf
+```
+#### Options
+
+Weblog is aware of how to parse and interpret the following fields (**known fields**):
+
+> [nginx](https://nginx.org/en/docs/varindex.html)
+>
+> [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)
+
+| nginx | apache | description |
+|-------------------------|----------|------------------------------------------------------------------------------------------|
+| $host ($http_host) | %v | Name of the server which accepted a request. |
+| $server_port | %p | Port of the server which accepted a request. |
+| $scheme | - | Request scheme. "http" or "https". |
+| $remote_addr | %a (%h) | Client address. |
+| $request | %r | Full original request line. The line is "$request_method $request_uri $server_protocol". |
+| $request_method | %m | Request method. Usually "GET" or "POST". |
+| $request_uri | %U | Full original request URI. |
+| $server_protocol | %H | Request protocol. Usually "HTTP/1.0", "HTTP/1.1", or "HTTP/2.0". |
+| $status | %s (%>s) | Response status code. |
+| $request_length | %I | Bytes received from a client, including request and headers. |
+| $bytes_sent | %O | Bytes sent to a client, including request and headers. |
+| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |
+| $request_time | %D | Request processing time. |
+| $upstream_response_time | - | Time spent on receiving the response from the upstream server. |
+| $ssl_protocol | - | Protocol of an established SSL connection. |
+| $ssl_cipher | - | String of ciphers used for an established SSL connection. |
+
+Notes:
+
+- Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.
+- Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.
+- To get `%I` and `%O` working you need to enable `mod_logio` on Apache.
+- NGINX logs URI with query parameters, Apache doesnt.
+- `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.
+- Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| path | Path to the web server log file. | | yes |
+| exclude_path | Path to exclude. | *.gz | no |
+| url_patterns | List of URL patterns. | [] | no |
+| url_patterns.name | Used as a dimension name. | | yes |
+| url_patterns.pattern | Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format). | | yes |
+| log_type | Log parser type. | auto | no |
+| csv_config | CSV log parser config. | | no |
+| csv_config.delimiter | CSV field delimiter. | , | no |
+| csv_config.format | CSV log format. | | no |
+| ltsv_config | LTSV log parser config. | | no |
+| ltsv_config.field_delimiter | LTSV field delimiter. | \t | no |
+| ltsv_config.value_delimiter | LTSV value delimiter. | : | no |
+| ltsv_config.mapping | LTSV fields mapping to **known fields**. | | yes |
+| json_config | JSON log parser config. | | no |
+| json_config.mapping | JSON fields mapping to **known fields**. | | yes |
+| regexp_config | RegExp log parser config. | | no |
+| regexp_config.pattern | RegExp pattern with named groups. | | yes |
+
+##### url_patterns
+
+"URL pattern" scope metrics will be collected for each URL pattern.
+
+Option syntax:
+
+```yaml
+url_patterns:
+ - name: name1
+ pattern: pattern1
+ - name: name2
+ pattern: pattern2
+```
+
+
+##### log_type
+
+Weblog supports 5 different log parsers:
+
+| Parser type | Description |
+|-------------|-------------------------------------------|
+| auto | Use CSV and auto-detect format |
+| csv | A comma-separated values |
+| json | [JSON](https://www.json.org/json-en.html) |
+| ltsv | [LTSV](http://ltsv.org/) |
+| regexp | Regular expression with named groups |
+
+Syntax:
+
+```yaml
+log_type: auto
+```
+
+If `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.
+
+- checks if format is `CSV` (using regexp).
+- checks if format is `JSON` (using regexp).
+- assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):
+
+ ```sh
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent
+ ```
+
+ If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.
+
+
+##### csv_config.format
+
+
+
+##### ltsv_config.mapping
+
+The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.
+
+> **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+```yaml
+log_type: ltsv
+ltsv_config:
+ mapping:
+ label1: field1
+ label2: field2
+```
+
+
+##### json_config.mapping
+
+The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.
+
+> **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+```yaml
+log_type: json
+json_config:
+ mapping:
+ label1: field1
+ label2: field2
+```
+
+
+##### regexp_config.pattern
+
+Use pattern with subexpressions names. These names should be **known fields**.
+
+> **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+Syntax:
+
+```yaml
+log_type: regexp
+regexp_config:
+ pattern: PATTERN
+```
+
+
+</details>
+
+#### Examples
+There are no configuration examples.
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `web_log` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m web_log
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `web_log` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep web_log
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep web_log /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep web_log
+```
+
+
diff --git a/src/go/plugin/go.d/modules/weblog/logline.go b/src/go/plugin/go.d/modules/weblog/logline.go
new file mode 100644
index 000000000..5a69593b9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/logline.go
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// TODO: it is not clear how to handle "-", current handling is not good
+// In general it is:
+// - If a field is unused in a particular entry dash "-" marks the omitted field.
+// In addition to that "-" is used as zero value in:
+// - apache: %b '-' when no bytes are sent.
+//
+// Log Format:
+// - CLF: https://www.w3.org/Daemon/User/Config/Logging.html#common-logfile-format
+// - ELF: https://www.w3.org/TR/WD-logfile.html
+// - Apache CLF: https://httpd.apache.org/docs/trunk/logs.html#common
+
+// Variables:
+// - nginx: http://nginx.org/en/docs/varindex.html
+// - apache: http://httpd.apache.org/docs/current/mod/mod_log_config.html#logformat
+// - IIS: https://learn.microsoft.com/en-us/windows/win32/http/w3c-logging
+
+/*
+| nginx | apache | description |
+|-------------------------|-----------|-----------------------------------------------|
+| $host ($http_host) | %v | Name of the server which accepted a request.
+| $server_port | %p | Port of the server which accepted a request.
+| $scheme | - | Request scheme. "http" or "https".
+| $remote_addr | %a (%h) | Client address.
+| $request | %r | Full original request line. The line is "$request_method $request_uri $server_protocol".
+| $request_method | %m | Request method. Usually "GET" or "POST".
+| $request_uri | %U | Full original request URI.
+| $server_protocol | %H | Request protocol. Usually "HTTP/1.0", "HTTP/1.1", or "HTTP/2.0".
+| $status | %s (%>s) | Response status code.
+| $request_length | %I | Bytes received from a client, including request and headers.
+| $bytes_sent | %O | Bytes sent to a client, including request and headers.
+| $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header.
+| $request_time | %D | Request processing time.
+| $upstream_response_time | - | Time spent on receiving the response from the upstream server.
+| $ssl_protocol | - | Protocol of an established SSL connection.
+| $ssl_cipher | - | String of ciphers used for an established SSL connection.
+*/
+
+var (
+ errEmptyLine = errors.New("empty line")
+ errBadVhost = errors.New("bad vhost")
+ errBadVhostPort = errors.New("bad vhost with port")
+ errBadPort = errors.New("bad port")
+ errBadReqScheme = errors.New("bad req scheme")
+ errBadReqClient = errors.New("bad req client")
+ errBadRequest = errors.New("bad request")
+ errBadReqMethod = errors.New("bad req method")
+ errBadReqURL = errors.New("bad req url")
+ errBadReqProto = errors.New("bad req protocol")
+ errBadReqSize = errors.New("bad req size")
+ errBadRespCode = errors.New("bad resp status code")
+ errBadRespSize = errors.New("bad resp size")
+ errBadReqProcTime = errors.New("bad req processing time")
+ errBadUpsRespTime = errors.New("bad upstream resp time")
+ errBadSSLProto = errors.New("bad ssl protocol")
+ errBadSSLCipherSuite = errors.New("bad ssl cipher suite")
+)
+
+func newEmptyLogLine() *logLine {
+ var l logLine
+ l.custom.fields = make(map[string]struct{})
+ l.custom.values = make([]customValue, 0, 20)
+ l.reset()
+ return &l
+}
+
+type (
+ logLine struct {
+ web
+ custom custom
+ }
+ web struct {
+ vhost string
+ port string
+ reqScheme string
+ reqClient string
+ reqMethod string
+ reqURL string
+ reqProto string
+ reqSize int
+ reqProcTime float64
+ respCode int
+ respSize int
+ upsRespTime float64
+ sslProto string
+ sslCipherSuite string
+ }
+ custom struct {
+ fields map[string]struct{}
+ values []customValue
+ }
+ customValue struct {
+ name string
+ value string
+ }
+)
+
+func (l *logLine) Assign(field string, value string) (err error) {
+ if value == "" {
+ return
+ }
+
+ switch field {
+ case "host", "http_host", "v":
+ err = l.assignVhost(value)
+ case "server_port", "p":
+ err = l.assignPort(value)
+ case "host:$server_port", "v:%p":
+ err = l.assignVhostWithPort(value)
+ case "scheme":
+ err = l.assignReqScheme(value)
+ case "remote_addr", "a", "h":
+ err = l.assignReqClient(value)
+ case "request", "r":
+ err = l.assignRequest(value)
+ case "request_method", "m":
+ err = l.assignReqMethod(value)
+ case "request_uri", "U":
+ err = l.assignReqURL(value)
+ case "server_protocol", "H":
+ err = l.assignReqProto(value)
+ case "status", "s", ">s":
+ err = l.assignRespCode(value)
+ case "request_length", "I":
+ err = l.assignReqSize(value)
+ case "bytes_sent", "body_bytes_sent", "b", "O", "B":
+ err = l.assignRespSize(value)
+ case "request_time", "D":
+ err = l.assignReqProcTime(value)
+ case "upstream_response_time":
+ err = l.assignUpsRespTime(value)
+ case "ssl_protocol":
+ err = l.assignSSLProto(value)
+ case "ssl_cipher":
+ err = l.assignSSLCipherSuite(value)
+ default:
+ err = l.assignCustom(field, value)
+ }
+ if err != nil {
+ err = fmt.Errorf("assign '%s': %w", field, err)
+ }
+ return err
+}
+
+const hyphen = "-"
+
+func (l *logLine) assignVhost(vhost string) error {
+ if vhost == hyphen {
+ return nil
+ }
+ // nginx $host and $http_host returns ipv6 in [], apache not
+ if idx := strings.IndexByte(vhost, ']'); idx > 0 {
+ vhost = vhost[1:idx]
+ }
+ l.vhost = vhost
+ return nil
+}
+
+func (l *logLine) assignPort(port string) error {
+ if port == hyphen {
+ return nil
+ }
+ if !isPortValid(port) {
+ return fmt.Errorf("assign '%s' : %w", port, errBadPort)
+ }
+ l.port = port
+ return nil
+}
+
+func (l *logLine) assignVhostWithPort(vhostPort string) error {
+ if vhostPort == hyphen {
+ return nil
+ }
+ idx := strings.LastIndexByte(vhostPort, ':')
+ if idx == -1 {
+ return fmt.Errorf("assign '%s' : %w", vhostPort, errBadVhostPort)
+ }
+ if err := l.assignPort(vhostPort[idx+1:]); err != nil {
+ return fmt.Errorf("assign '%s' : %w", vhostPort, errBadVhostPort)
+ }
+ if err := l.assignVhost(vhostPort[0:idx]); err != nil {
+ return fmt.Errorf("assign '%s' : %w", vhostPort, errBadVhostPort)
+ }
+ return nil
+}
+
+func (l *logLine) assignReqScheme(scheme string) error {
+ if scheme == hyphen {
+ return nil
+ }
+ if !isSchemeValid(scheme) {
+ return fmt.Errorf("assign '%s' : %w", scheme, errBadReqScheme)
+ }
+ l.reqScheme = scheme
+ return nil
+}
+
+func (l *logLine) assignReqClient(client string) error {
+ if client == hyphen {
+ return nil
+ }
+ l.reqClient = client
+ return nil
+}
+
+func (l *logLine) assignRequest(request string) error {
+ if request == hyphen {
+ return nil
+ }
+ var first, last int
+ if first = strings.IndexByte(request, ' '); first < 0 {
+ return fmt.Errorf("assign '%s': %w", request, errBadRequest)
+ }
+ if last = strings.LastIndexByte(request, ' '); first == last {
+ return fmt.Errorf("assign '%s': %w", request, errBadRequest)
+ }
+ proto := request[last+1:]
+ url := request[first+1 : last]
+ method := request[0:first]
+ if err := l.assignReqMethod(method); err != nil {
+ return err
+ }
+ if err := l.assignReqURL(url); err != nil {
+ return err
+ }
+ return l.assignReqProto(proto)
+}
+
+func (l *logLine) assignReqMethod(method string) error {
+ if method == hyphen {
+ return nil
+ }
+ if !isReqMethodValid(method) {
+ return fmt.Errorf("assign '%s' : %w", method, errBadReqMethod)
+ }
+ l.reqMethod = method
+ return nil
+}
+
+func (l *logLine) assignReqURL(url string) error {
+ if url == hyphen {
+ return nil
+ }
+ if isEmptyString(url) {
+ return fmt.Errorf("assign '%s' : %w", url, errBadReqURL)
+ }
+ l.reqURL = url
+ return nil
+}
+
+func (l *logLine) assignReqProto(proto string) error {
+ if proto == hyphen {
+ return nil
+ }
+ if !isReqProtoValid(proto) {
+ return fmt.Errorf("assign '%s': %w", proto, errBadReqProto)
+ }
+ l.reqProto = proto[5:]
+ return nil
+}
+
+func (l *logLine) assignRespCode(status string) error {
+ if status == hyphen {
+ return nil
+ }
+ v, err := strconv.Atoi(status)
+ if err != nil || !isRespCodeValid(v) {
+ return fmt.Errorf("assign '%s': %w", status, errBadRespCode)
+ }
+ l.respCode = v
+ return nil
+}
+
+func (l *logLine) assignReqSize(size string) error {
+ // apache: can be "-" according web_log py regexp.
+ if size == hyphen {
+ l.reqSize = 0
+ return nil
+ }
+ v, err := strconv.Atoi(size)
+ if err != nil || !isSizeValid(v) {
+ return fmt.Errorf("assign '%s': %w", size, errBadReqSize)
+ }
+ l.reqSize = v
+ return nil
+}
+
+func (l *logLine) assignRespSize(size string) error {
+ // apache: %b. In CLF format, i.e. a '-' rather than a 0 when no bytes are sent.
+ if size == hyphen {
+ l.respSize = 0
+ return nil
+ }
+ v, err := strconv.Atoi(size)
+ if err != nil || !isSizeValid(v) {
+ return fmt.Errorf("assign '%s': %w", size, errBadRespSize)
+ }
+ l.respSize = v
+ return nil
+}
+
+func (l *logLine) assignReqProcTime(time string) error {
+ if time == hyphen {
+ return nil
+ }
+ if time == "0.000" {
+ l.reqProcTime = 0
+ return nil
+ }
+ v, err := strconv.ParseFloat(time, 64)
+ if err != nil || !isTimeValid(v) {
+ return fmt.Errorf("assign '%s': %w", time, errBadReqProcTime)
+ }
+ l.reqProcTime = v * timeMultiplier(time)
+ return nil
+}
+
+func isUpstreamTimeSeparator(r rune) bool { return r == ',' || r == ':' }
+
+func (l *logLine) assignUpsRespTime(time string) error {
+ if time == hyphen {
+ return nil
+ }
+
+ // the upstream response time string can contain multiple values, separated
+ // by commas (in case the request was handled by multiple servers), or colons
+ // (in case the request passed between multiple server groups via an internal redirect)
+ // the individual values should be summed up to obtain the correct amount of time
+ // the request spent in upstream
+ var sum float64
+ for _, val := range strings.FieldsFunc(time, isUpstreamTimeSeparator) {
+ val = strings.TrimSpace(val)
+ v, err := strconv.ParseFloat(val, 64)
+ if err != nil || !isTimeValid(v) {
+ return fmt.Errorf("assign '%s': %w", time, errBadUpsRespTime)
+ }
+
+ sum += v
+ }
+
+ l.upsRespTime = sum * timeMultiplier(time)
+ return nil
+}
+
+func (l *logLine) assignSSLProto(proto string) error {
+ if proto == hyphen {
+ return nil
+ }
+ if !isSSLProtoValid(proto) {
+ return fmt.Errorf("assign '%s': %w", proto, errBadSSLProto)
+ }
+ l.sslProto = proto
+ return nil
+}
+
+func (l *logLine) assignSSLCipherSuite(cipher string) error {
+ if cipher == hyphen {
+ return nil
+ }
+ if strings.IndexByte(cipher, '-') <= 0 && strings.IndexByte(cipher, '_') <= 0 {
+ return fmt.Errorf("assign '%s': %w", cipher, errBadSSLCipherSuite)
+ }
+ l.sslCipherSuite = cipher
+ return nil
+}
+
+func (l *logLine) assignCustom(field, value string) error {
+ if len(l.custom.fields) == 0 || value == hyphen {
+ return nil
+ }
+ if _, ok := l.custom.fields[field]; ok {
+ l.custom.values = append(l.custom.values, customValue{name: field, value: value})
+ }
+ return nil
+}
+
+func (l *logLine) verify() error {
+ if l.empty() {
+ return fmt.Errorf("verify: %w", errEmptyLine)
+ }
+ if l.hasRespCode() && !l.isRespCodeValid() {
+ return fmt.Errorf("verify '%d': %w", l.respCode, errBadRespCode)
+ }
+ if l.hasVhost() && !l.isVhostValid() {
+ return fmt.Errorf("verify '%s': %w", l.vhost, errBadVhost)
+ }
+ if l.hasPort() && !l.isPortValid() {
+ return fmt.Errorf("verify '%s': %w", l.port, errBadPort)
+ }
+ if l.hasReqScheme() && !l.isSchemeValid() {
+ return fmt.Errorf("verify '%s': %w", l.reqScheme, errBadReqScheme)
+ }
+ if l.hasReqClient() && !l.isClientValid() {
+ return fmt.Errorf("verify '%s': %w", l.reqClient, errBadReqClient)
+ }
+ if l.hasReqMethod() && !l.isMethodValid() {
+ return fmt.Errorf("verify '%s': %w", l.reqMethod, errBadReqMethod)
+ }
+ if l.hasReqURL() && !l.isURLValid() {
+ return fmt.Errorf("verify '%s': %w", l.reqURL, errBadReqURL)
+ }
+ if l.hasReqProto() && !l.isProtoValid() {
+ return fmt.Errorf("verify '%s': %w", l.reqProto, errBadReqProto)
+ }
+ if l.hasReqSize() && !l.isReqSizeValid() {
+ return fmt.Errorf("verify '%d': %w", l.reqSize, errBadReqSize)
+ }
+ if l.hasRespSize() && !l.isRespSizeValid() {
+ return fmt.Errorf("verify '%d': %w", l.respSize, errBadRespSize)
+ }
+ if l.hasReqProcTime() && !l.isReqProcTimeValid() {
+ return fmt.Errorf("verify '%f': %w", l.reqProcTime, errBadReqProcTime)
+ }
+ if l.hasUpsRespTime() && !l.isUpsRespTimeValid() {
+ return fmt.Errorf("verify '%f': %w", l.upsRespTime, errBadUpsRespTime)
+ }
+ if l.hasSSLProto() && !l.isSSLProtoValid() {
+ return fmt.Errorf("verify '%s': %w", l.sslProto, errBadSSLProto)
+ }
+ if l.hasSSLCipherSuite() && !l.isSSLCipherSuiteValid() {
+ return fmt.Errorf("verify '%s': %w", l.sslCipherSuite, errBadSSLCipherSuite)
+ }
+ return nil
+}
+
+func (l *logLine) empty() bool { return !l.hasWebFields() && !l.hasCustomFields() }
+func (l *logLine) hasCustomFields() bool { return len(l.custom.values) > 0 }
+func (l *logLine) hasWebFields() bool { return l.web != emptyWebFields }
+func (l *logLine) hasVhost() bool { return !isEmptyString(l.vhost) }
+func (l *logLine) hasPort() bool { return !isEmptyString(l.port) }
+func (l *logLine) hasReqScheme() bool { return !isEmptyString(l.reqScheme) }
+func (l *logLine) hasReqClient() bool { return !isEmptyString(l.reqClient) }
+func (l *logLine) hasReqMethod() bool { return !isEmptyString(l.reqMethod) }
+func (l *logLine) hasReqURL() bool { return !isEmptyString(l.reqURL) }
+func (l *logLine) hasReqProto() bool { return !isEmptyString(l.reqProto) }
+func (l *logLine) hasRespCode() bool { return !isEmptyNumber(l.respCode) }
+func (l *logLine) hasReqSize() bool { return !isEmptyNumber(l.reqSize) }
+func (l *logLine) hasRespSize() bool { return !isEmptyNumber(l.respSize) }
+func (l *logLine) hasReqProcTime() bool { return !isEmptyNumber(int(l.reqProcTime)) }
+func (l *logLine) hasUpsRespTime() bool { return !isEmptyNumber(int(l.upsRespTime)) }
+func (l *logLine) hasSSLProto() bool { return !isEmptyString(l.sslProto) }
+func (l *logLine) hasSSLCipherSuite() bool { return !isEmptyString(l.sslCipherSuite) }
+func (l *logLine) isVhostValid() bool { return reVhost.MatchString(l.vhost) }
+func (l *logLine) isPortValid() bool { return isPortValid(l.port) }
+func (l *logLine) isSchemeValid() bool { return isSchemeValid(l.reqScheme) }
+func (l *logLine) isClientValid() bool { return reClient.MatchString(l.reqClient) }
+func (l *logLine) isMethodValid() bool { return isReqMethodValid(l.reqMethod) }
+func (l *logLine) isURLValid() bool { return !isEmptyString(l.reqURL) }
+func (l *logLine) isProtoValid() bool { return isReqProtoVerValid(l.reqProto) }
+func (l *logLine) isRespCodeValid() bool { return isRespCodeValid(l.respCode) }
+func (l *logLine) isReqSizeValid() bool { return isSizeValid(l.reqSize) }
+func (l *logLine) isRespSizeValid() bool { return isSizeValid(l.respSize) }
+func (l *logLine) isReqProcTimeValid() bool { return isTimeValid(l.reqProcTime) }
+func (l *logLine) isUpsRespTimeValid() bool { return isTimeValid(l.upsRespTime) }
+func (l *logLine) isSSLProtoValid() bool { return isSSLProtoValid(l.sslProto) }
+func (l *logLine) isSSLCipherSuiteValid() bool { return reCipherSuite.MatchString(l.sslCipherSuite) }
+
+func (l *logLine) reset() {
+ l.web = emptyWebFields
+ l.custom.values = l.custom.values[:0]
+}
+
+var (
+ // TODO: reClient doesn't work with %h when HostnameLookups is On.
+ reVhost = regexp.MustCompile(`^[a-zA-Z0-9-:.]+$`)
+ reClient = regexp.MustCompile(`^([\da-f:.]+|localhost)$`)
+ reCipherSuite = regexp.MustCompile(`^[A-Z0-9-_]+$`) // openssl -v
+)
+
+var emptyWebFields = web{
+ vhost: emptyString,
+ port: emptyString,
+ reqScheme: emptyString,
+ reqClient: emptyString,
+ reqMethod: emptyString,
+ reqURL: emptyString,
+ reqProto: emptyString,
+ reqSize: emptyNumber,
+ reqProcTime: emptyNumber,
+ respCode: emptyNumber,
+ respSize: emptyNumber,
+ upsRespTime: emptyNumber,
+ sslProto: emptyString,
+ sslCipherSuite: emptyString,
+}
+
+const (
+ emptyString = "__empty_string__"
+ emptyNumber = -9999
+)
+
+func isEmptyString(s string) bool {
+ return s == emptyString || s == ""
+}
+
+func isEmptyNumber(n int) bool {
+ return n == emptyNumber
+}
+
+func isReqMethodValid(method string) bool {
+ // https://www.iana.org/assignments/http-methods/http-methods.xhtml
+ switch method {
+ case "GET",
+ "ACL",
+ "BASELINE-CONTROL",
+ "BIND",
+ "CHECKIN",
+ "CHECKOUT",
+ "CONNECT",
+ "COPY",
+ "DELETE",
+ "HEAD",
+ "LABEL",
+ "LINK",
+ "LOCK",
+ "MERGE",
+ "MKACTIVITY",
+ "MKCALENDAR",
+ "MKCOL",
+ "MKREDIRECTREF",
+ "MKWORKSPACE",
+ "MOVE",
+ "OPTIONS",
+ "ORDERPATCH",
+ "PATCH",
+ "POST",
+ "PRI",
+ "PROPFIND",
+ "PROPPATCH",
+ "PURGE", // not a standardized HTTP method
+ "PUT",
+ "REBIND",
+ "REPORT",
+ "SEARCH",
+ "TRACE",
+ "UNBIND",
+ "UNCHECKOUT",
+ "UNLINK",
+ "UNLOCK",
+ "UPDATE",
+ "UPDATEREDIRECTREF":
+ return true
+ }
+ return false
+}
+
+func isReqProtoValid(proto string) bool {
+ return len(proto) >= 6 && proto[:5] == "HTTP/" && isReqProtoVerValid(proto[5:])
+}
+
+func isReqProtoVerValid(version string) bool {
+ switch version {
+ case "1.1", "1", "1.0", "2", "2.0", "3", "3.0":
+ return true
+ }
+ return false
+}
+
+func isPortValid(port string) bool {
+ v, err := strconv.Atoi(port)
+ return err == nil && v >= 80 && v <= 49151
+}
+
+func isSchemeValid(scheme string) bool {
+ return scheme == "http" || scheme == "https"
+}
+
+func isRespCodeValid(code int) bool {
+ // rfc7231
+ // Informational responses (100–199),
+ // Successful responses (200–299),
+ // Redirects (300–399),
+ // Client errors (400–499),
+ // Server errors (500–599).
+ return code >= 100 && code <= 600
+}
+
+func isSizeValid(size int) bool {
+ return size >= 0
+}
+
+func isTimeValid(time float64) bool {
+ return time >= 0
+}
+
+func isSSLProtoValid(proto string) bool {
+ if proto == "TLSv1.2" {
+ return true
+ }
+ switch proto {
+ case "TLSv1.3", "SSLv2", "SSLv3", "TLSv1", "TLSv1.1":
+ return true
+ }
+ return false
+}
+
+func timeMultiplier(time string) float64 {
+ // TODO: Change code to detect and modify properly IIS time (in milliseconds)
+ // Convert to microseconds:
+ // - nginx time is in seconds with a milliseconds' resolution.
+ if strings.IndexByte(time, '.') > 0 {
+ return 1e6
+ }
+ // - apache time is in microseconds.
+ return 1
+}
diff --git a/src/go/plugin/go.d/modules/weblog/logline_test.go b/src/go/plugin/go.d/modules/weblog/logline_test.go
new file mode 100644
index 000000000..d3055863a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/logline_test.go
@@ -0,0 +1,669 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ emptyStr = ""
+)
+
+var emptyLogLine = *newEmptyLogLine()
+
+func TestLogLine_Assign(t *testing.T) {
+ type subTest struct {
+ input string
+ wantLine logLine
+ wantErr error
+ }
+ type test struct {
+ name string
+ fields []string
+ cases []subTest
+ }
+ tests := []test{
+ {
+ name: "Vhost",
+ fields: []string{
+ "host",
+ "http_host",
+ "v",
+ },
+ cases: []subTest{
+ {input: "1.1.1.1", wantLine: logLine{web: web{vhost: "1.1.1.1"}}},
+ {input: "::1", wantLine: logLine{web: web{vhost: "::1"}}},
+ {input: "[::1]", wantLine: logLine{web: web{vhost: "::1"}}},
+ {input: "1ce:1ce::babe", wantLine: logLine{web: web{vhost: "1ce:1ce::babe"}}},
+ {input: "[1ce:1ce::babe]", wantLine: logLine{web: web{vhost: "1ce:1ce::babe"}}},
+ {input: "localhost", wantLine: logLine{web: web{vhost: "localhost"}}},
+ {input: "debian10.debian", wantLine: logLine{web: web{vhost: "debian10.debian"}}},
+ {input: "my_vhost", wantLine: logLine{web: web{vhost: "my_vhost"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ },
+ },
+ {
+ name: "Server Port",
+ fields: []string{
+ "server_port",
+ "p",
+ },
+ cases: []subTest{
+ {input: "80", wantLine: logLine{web: web{port: "80"}}},
+ {input: "8081", wantLine: logLine{web: web{port: "8081"}}},
+ {input: "30000", wantLine: logLine{web: web{port: "30000"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "-1", wantLine: emptyLogLine, wantErr: errBadPort},
+ {input: "0", wantLine: emptyLogLine, wantErr: errBadPort},
+ {input: "50000", wantLine: emptyLogLine, wantErr: errBadPort},
+ },
+ },
+ {
+ name: "Vhost With Port",
+ fields: []string{
+ "host:$server_port",
+ "v:%p",
+ },
+ cases: []subTest{
+ {input: "1.1.1.1:80", wantLine: logLine{web: web{vhost: "1.1.1.1", port: "80"}}},
+ {input: "::1:80", wantLine: logLine{web: web{vhost: "::1", port: "80"}}},
+ {input: "[::1]:80", wantLine: logLine{web: web{vhost: "::1", port: "80"}}},
+ {input: "1ce:1ce::babe:80", wantLine: logLine{web: web{vhost: "1ce:1ce::babe", port: "80"}}},
+ {input: "debian10.debian:81", wantLine: logLine{web: web{vhost: "debian10.debian", port: "81"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "1.1.1.1", wantLine: emptyLogLine, wantErr: errBadVhostPort},
+ {input: "1.1.1.1:", wantLine: emptyLogLine, wantErr: errBadVhostPort},
+ {input: "1.1.1.1 80", wantLine: emptyLogLine, wantErr: errBadVhostPort},
+ {input: "1.1.1.1:20", wantLine: emptyLogLine, wantErr: errBadVhostPort},
+ {input: "1.1.1.1:50000", wantLine: emptyLogLine, wantErr: errBadVhostPort},
+ },
+ },
+ {
+ name: "Scheme",
+ fields: []string{
+ "scheme",
+ },
+ cases: []subTest{
+ {input: "http", wantLine: logLine{web: web{reqScheme: "http"}}},
+ {input: "https", wantLine: logLine{web: web{reqScheme: "https"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "HTTP", wantLine: emptyLogLine, wantErr: errBadReqScheme},
+ {input: "HTTPS", wantLine: emptyLogLine, wantErr: errBadReqScheme},
+ },
+ },
+ {
+ name: "Client",
+ fields: []string{
+ "remote_addr",
+ "a",
+ "h",
+ },
+ cases: []subTest{
+ {input: "1.1.1.1", wantLine: logLine{web: web{reqClient: "1.1.1.1"}}},
+ {input: "debian10", wantLine: logLine{web: web{reqClient: "debian10"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ },
+ },
+ {
+ name: "Request",
+ fields: []string{
+ "request",
+ "r",
+ },
+ cases: []subTest{
+ {input: "GET / HTTP/1.0", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/", reqProto: "1.0"}}},
+ {input: "HEAD /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "HEAD", reqURL: "/ihs.gif", reqProto: "1.0"}}},
+ {input: "POST /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "POST", reqURL: "/ihs.gif", reqProto: "1.0"}}},
+ {input: "PUT /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "PUT", reqURL: "/ihs.gif", reqProto: "1.0"}}},
+ {input: "PATCH /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "PATCH", reqURL: "/ihs.gif", reqProto: "1.0"}}},
+ {input: "DELETE /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "DELETE", reqURL: "/ihs.gif", reqProto: "1.0"}}},
+ {input: "OPTIONS /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "OPTIONS", reqURL: "/ihs.gif", reqProto: "1.0"}}},
+ {input: "TRACE /ihs.gif HTTP/1.0", wantLine: logLine{web: web{reqMethod: "TRACE", reqURL: "/ihs.gif", reqProto: "1.0"}}},
+ {input: "CONNECT ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "CONNECT", reqURL: "ip.cn:443", reqProto: "1.1"}}},
+ {input: "MKCOL ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "MKCOL", reqURL: "ip.cn:443", reqProto: "1.1"}}},
+ {input: "PROPFIND ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "PROPFIND", reqURL: "ip.cn:443", reqProto: "1.1"}}},
+ {input: "MOVE ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "MOVE", reqURL: "ip.cn:443", reqProto: "1.1"}}},
+ {input: "SEARCH ip.cn:443 HTTP/1.1", wantLine: logLine{web: web{reqMethod: "SEARCH", reqURL: "ip.cn:443", reqProto: "1.1"}}},
+ {input: "GET / HTTP/1.1", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/", reqProto: "1.1"}}},
+ {input: "GET / HTTP/2", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/", reqProto: "2"}}},
+ {input: "GET / HTTP/2.0", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/", reqProto: "2.0"}}},
+ {input: "GET /invalid_version http/1.1", wantLine: logLine{web: web{reqMethod: "GET", reqURL: "/invalid_version", reqProto: emptyString}}, wantErr: errBadReqProto},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "GET no_version", wantLine: emptyLogLine, wantErr: errBadRequest},
+ {input: "GOT / HTTP/2", wantLine: emptyLogLine, wantErr: errBadReqMethod},
+ {input: "get / HTTP/2", wantLine: emptyLogLine, wantErr: errBadReqMethod},
+ {input: "x04\x01\x00P$3\xFE\xEA\x00", wantLine: emptyLogLine, wantErr: errBadRequest},
+ },
+ },
+ {
+ name: "Request HTTP Method",
+ fields: []string{
+ "request_method",
+ "m",
+ },
+ cases: []subTest{
+ {input: "GET", wantLine: logLine{web: web{reqMethod: "GET"}}},
+ {input: "HEAD", wantLine: logLine{web: web{reqMethod: "HEAD"}}},
+ {input: "POST", wantLine: logLine{web: web{reqMethod: "POST"}}},
+ {input: "PUT", wantLine: logLine{web: web{reqMethod: "PUT"}}},
+ {input: "PATCH", wantLine: logLine{web: web{reqMethod: "PATCH"}}},
+ {input: "DELETE", wantLine: logLine{web: web{reqMethod: "DELETE"}}},
+ {input: "OPTIONS", wantLine: logLine{web: web{reqMethod: "OPTIONS"}}},
+ {input: "TRACE", wantLine: logLine{web: web{reqMethod: "TRACE"}}},
+ {input: "CONNECT", wantLine: logLine{web: web{reqMethod: "CONNECT"}}},
+ {input: "MKCOL", wantLine: logLine{web: web{reqMethod: "MKCOL"}}},
+ {input: "PROPFIND", wantLine: logLine{web: web{reqMethod: "PROPFIND"}}},
+ {input: "MOVE", wantLine: logLine{web: web{reqMethod: "MOVE"}}},
+ {input: "SEARCH", wantLine: logLine{web: web{reqMethod: "SEARCH"}}},
+ {input: "PURGE", wantLine: logLine{web: web{reqMethod: "PURGE"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "GET no_version", wantLine: emptyLogLine, wantErr: errBadReqMethod},
+ {input: "GOT / HTTP/2", wantLine: emptyLogLine, wantErr: errBadReqMethod},
+ {input: "get / HTTP/2", wantLine: emptyLogLine, wantErr: errBadReqMethod},
+ },
+ },
+ {
+ name: "Request URL",
+ fields: []string{
+ "request_uri",
+ "U",
+ },
+ cases: []subTest{
+ {input: "/server-status?auto", wantLine: logLine{web: web{reqURL: "/server-status?auto"}}},
+ {input: "/default.html", wantLine: logLine{web: web{reqURL: "/default.html"}}},
+ {input: "10.0.0.1:3128", wantLine: logLine{web: web{reqURL: "10.0.0.1:3128"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ },
+ },
+ {
+ name: "Request HTTP Protocol",
+ fields: []string{
+ "server_protocol",
+ "H",
+ },
+ cases: []subTest{
+ {input: "HTTP/1.0", wantLine: logLine{web: web{reqProto: "1.0"}}},
+ {input: "HTTP/1.1", wantLine: logLine{web: web{reqProto: "1.1"}}},
+ {input: "HTTP/2", wantLine: logLine{web: web{reqProto: "2"}}},
+ {input: "HTTP/2.0", wantLine: logLine{web: web{reqProto: "2.0"}}},
+ {input: "HTTP/3", wantLine: logLine{web: web{reqProto: "3"}}},
+ {input: "HTTP/3.0", wantLine: logLine{web: web{reqProto: "3.0"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "1.1", wantLine: emptyLogLine, wantErr: errBadReqProto},
+ {input: "http/1.1", wantLine: emptyLogLine, wantErr: errBadReqProto},
+ },
+ },
+ {
+ name: "Response Status Code",
+ fields: []string{
+ "status",
+ "s",
+ ">s",
+ },
+ cases: []subTest{
+ {input: "100", wantLine: logLine{web: web{respCode: 100}}},
+ {input: "200", wantLine: logLine{web: web{respCode: 200}}},
+ {input: "300", wantLine: logLine{web: web{respCode: 300}}},
+ {input: "400", wantLine: logLine{web: web{respCode: 400}}},
+ {input: "500", wantLine: logLine{web: web{respCode: 500}}},
+ {input: "600", wantLine: logLine{web: web{respCode: 600}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "99", wantLine: emptyLogLine, wantErr: errBadRespCode},
+ {input: "601", wantLine: emptyLogLine, wantErr: errBadRespCode},
+ {input: "200 ", wantLine: emptyLogLine, wantErr: errBadRespCode},
+ {input: "0.222", wantLine: emptyLogLine, wantErr: errBadRespCode},
+ {input: "localhost", wantLine: emptyLogLine, wantErr: errBadRespCode},
+ },
+ },
+ {
+ name: "Request Size",
+ fields: []string{
+ "request_length",
+ "I",
+ },
+ cases: []subTest{
+ {input: "15", wantLine: logLine{web: web{reqSize: 15}}},
+ {input: "1000000", wantLine: logLine{web: web{reqSize: 1000000}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: logLine{web: web{reqSize: 0}}},
+ {input: "-1", wantLine: emptyLogLine, wantErr: errBadReqSize},
+ {input: "100.222", wantLine: emptyLogLine, wantErr: errBadReqSize},
+ {input: "invalid", wantLine: emptyLogLine, wantErr: errBadReqSize},
+ },
+ },
+ {
+ name: "Response Size",
+ fields: []string{
+ "bytes_sent",
+ "body_bytes_sent",
+ "O",
+ "B",
+ "b",
+ },
+ cases: []subTest{
+ {input: "15", wantLine: logLine{web: web{respSize: 15}}},
+ {input: "1000000", wantLine: logLine{web: web{respSize: 1000000}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: logLine{web: web{respSize: 0}}},
+ {input: "-1", wantLine: emptyLogLine, wantErr: errBadRespSize},
+ {input: "100.222", wantLine: emptyLogLine, wantErr: errBadRespSize},
+ {input: "invalid", wantLine: emptyLogLine, wantErr: errBadRespSize},
+ },
+ },
+ {
+ name: "Request Processing Time",
+ fields: []string{
+ "request_time",
+ "D",
+ },
+ cases: []subTest{
+ {input: "100222", wantLine: logLine{web: web{reqProcTime: 100222}}},
+ {input: "100.222", wantLine: logLine{web: web{reqProcTime: 100222000}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "-1", wantLine: emptyLogLine, wantErr: errBadReqProcTime},
+ {input: "0.333,0.444,0.555", wantLine: emptyLogLine, wantErr: errBadReqProcTime},
+ {input: "number", wantLine: emptyLogLine, wantErr: errBadReqProcTime},
+ },
+ },
+ {
+ name: "Upstream Response Time",
+ fields: []string{
+ "upstream_response_time",
+ },
+ cases: []subTest{
+ {input: "100222", wantLine: logLine{web: web{upsRespTime: 100222}}},
+ {input: "100.222", wantLine: logLine{web: web{upsRespTime: 100222000}}},
+ {input: "0.100 , 0.400 : 0.200 ", wantLine: logLine{web: web{upsRespTime: 700000}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "-1", wantLine: emptyLogLine, wantErr: errBadUpsRespTime},
+ {input: "number", wantLine: emptyLogLine, wantErr: errBadUpsRespTime},
+ },
+ },
+ {
+ name: "SSL Protocol",
+ fields: []string{
+ "ssl_protocol",
+ },
+ cases: []subTest{
+ {input: "SSLv3", wantLine: logLine{web: web{sslProto: "SSLv3"}}},
+ {input: "SSLv2", wantLine: logLine{web: web{sslProto: "SSLv2"}}},
+ {input: "TLSv1", wantLine: logLine{web: web{sslProto: "TLSv1"}}},
+ {input: "TLSv1.1", wantLine: logLine{web: web{sslProto: "TLSv1.1"}}},
+ {input: "TLSv1.2", wantLine: logLine{web: web{sslProto: "TLSv1.2"}}},
+ {input: "TLSv1.3", wantLine: logLine{web: web{sslProto: "TLSv1.3"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "-1", wantLine: emptyLogLine, wantErr: errBadSSLProto},
+ {input: "invalid", wantLine: emptyLogLine, wantErr: errBadSSLProto},
+ },
+ },
+ {
+ name: "SSL Cipher Suite",
+ fields: []string{
+ "ssl_cipher",
+ },
+ cases: []subTest{
+ {input: "ECDHE-RSA-AES256-SHA", wantLine: logLine{web: web{sslCipherSuite: "ECDHE-RSA-AES256-SHA"}}},
+ {input: "DHE-RSA-AES256-SHA", wantLine: logLine{web: web{sslCipherSuite: "DHE-RSA-AES256-SHA"}}},
+ {input: "AES256-SHA", wantLine: logLine{web: web{sslCipherSuite: "AES256-SHA"}}},
+ {input: "PSK-RC4-SHA", wantLine: logLine{web: web{sslCipherSuite: "PSK-RC4-SHA"}}},
+ {input: "TLS_AES_256_GCM_SHA384", wantLine: logLine{web: web{sslCipherSuite: "TLS_AES_256_GCM_SHA384"}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ {input: "-1", wantLine: emptyLogLine, wantErr: errBadSSLCipherSuite},
+ {input: "invalid", wantLine: emptyLogLine, wantErr: errBadSSLCipherSuite},
+ },
+ },
+ {
+ name: "Custom Fields",
+ fields: []string{
+ "custom",
+ },
+ cases: []subTest{
+ {input: "POST", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "POST"}}}}},
+ {input: "/example.com", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "/example.com"}}}}},
+ {input: "HTTP/1.1", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "HTTP/1.1"}}}}},
+ {input: "0.333,0.444,0.555", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "0.333,0.444,0.555"}}}}},
+ {input: "-1", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "-1"}}}}},
+ {input: "invalid", wantLine: logLine{custom: custom{values: []customValue{{name: "custom", value: "invalid"}}}}},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ },
+ },
+ {
+ name: "Custom Fields Not Exist",
+ fields: []string{
+ "custom_field_not_exist",
+ },
+ cases: []subTest{
+ {input: "POST", wantLine: emptyLogLine},
+ {input: "/example.com", wantLine: emptyLogLine},
+ {input: "HTTP/1.1", wantLine: emptyLogLine},
+ {input: "0.333,0.444,0.555", wantLine: emptyLogLine},
+ {input: "-1", wantLine: emptyLogLine},
+ {input: "invalid", wantLine: emptyLogLine},
+ {input: emptyStr, wantLine: emptyLogLine},
+ {input: hyphen, wantLine: emptyLogLine},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ for _, field := range tt.fields {
+ for i, tc := range tt.cases {
+ name := fmt.Sprintf("[%s:%d]field='%s'|line='%s'", tt.name, i+1, field, tc.input)
+ t.Run(name, func(t *testing.T) {
+
+ line := newEmptyLogLineWithFields()
+ err := line.Assign(field, tc.input)
+
+ if tc.wantErr != nil {
+ require.Error(t, err)
+ assert.Truef(t, errors.Is(err, tc.wantErr), "expected '%v' error, got '%v'", tc.wantErr, err)
+ } else {
+ require.NoError(t, err)
+ }
+
+ expected := prepareLogLine(field, tc.wantLine)
+ assert.Equal(t, expected, *line)
+ })
+ }
+ }
+ }
+}
+
+func TestLogLine_verify(t *testing.T) {
+ type subTest struct {
+ line logLine
+ wantErr error
+ }
+ tests := []struct {
+ name string
+ field string
+ cases []subTest
+ }{
+ {
+ name: "Vhost",
+ field: "host",
+ cases: []subTest{
+ {line: logLine{web: web{vhost: "192.168.0.1"}}},
+ {line: logLine{web: web{vhost: "debian10.debian"}}},
+ {line: logLine{web: web{vhost: "1ce:1ce::babe"}}},
+ {line: logLine{web: web{vhost: "localhost"}}},
+ {line: logLine{web: web{vhost: "invalid_vhost"}}, wantErr: errBadVhost},
+ {line: logLine{web: web{vhost: "http://192.168.0.1/"}}, wantErr: errBadVhost},
+ },
+ },
+ {
+ name: "Server Port",
+ field: "server_port",
+ cases: []subTest{
+ {line: logLine{web: web{port: "80"}}},
+ {line: logLine{web: web{port: "8081"}}},
+ {line: logLine{web: web{port: "79"}}, wantErr: errBadPort},
+ {line: logLine{web: web{port: "50000"}}, wantErr: errBadPort},
+ {line: logLine{web: web{port: "0.0.0.0"}}, wantErr: errBadPort},
+ },
+ },
+ {
+ name: "Scheme",
+ field: "scheme",
+ cases: []subTest{
+ {line: logLine{web: web{reqScheme: "http"}}},
+ {line: logLine{web: web{reqScheme: "https"}}},
+ {line: logLine{web: web{reqScheme: "not_https"}}, wantErr: errBadReqScheme},
+ {line: logLine{web: web{reqScheme: "HTTP"}}, wantErr: errBadReqScheme},
+ {line: logLine{web: web{reqScheme: "HTTPS"}}, wantErr: errBadReqScheme},
+ {line: logLine{web: web{reqScheme: "10"}}, wantErr: errBadReqScheme},
+ },
+ },
+ {
+ name: "Client",
+ field: "remote_addr",
+ cases: []subTest{
+ {line: logLine{web: web{reqClient: "1.1.1.1"}}},
+ {line: logLine{web: web{reqClient: "::1"}}},
+ {line: logLine{web: web{reqClient: "1ce:1ce::babe"}}},
+ {line: logLine{web: web{reqClient: "localhost"}}},
+ {line: logLine{web: web{reqClient: "debian10.debian"}}, wantErr: errBadReqClient},
+ {line: logLine{web: web{reqClient: "invalid"}}, wantErr: errBadReqClient},
+ },
+ },
+ {
+ name: "Request HTTP Method",
+ field: "request_method",
+ cases: []subTest{
+ {line: logLine{web: web{reqMethod: "GET"}}},
+ {line: logLine{web: web{reqMethod: "POST"}}},
+ {line: logLine{web: web{reqMethod: "TRACE"}}},
+ {line: logLine{web: web{reqMethod: "OPTIONS"}}},
+ {line: logLine{web: web{reqMethod: "CONNECT"}}},
+ {line: logLine{web: web{reqMethod: "DELETE"}}},
+ {line: logLine{web: web{reqMethod: "PUT"}}},
+ {line: logLine{web: web{reqMethod: "PATCH"}}},
+ {line: logLine{web: web{reqMethod: "HEAD"}}},
+ {line: logLine{web: web{reqMethod: "MKCOL"}}},
+ {line: logLine{web: web{reqMethod: "PROPFIND"}}},
+ {line: logLine{web: web{reqMethod: "MOVE"}}},
+ {line: logLine{web: web{reqMethod: "SEARCH"}}},
+ {line: logLine{web: web{reqMethod: "Get"}}, wantErr: errBadReqMethod},
+ {line: logLine{web: web{reqMethod: "get"}}, wantErr: errBadReqMethod},
+ },
+ },
+ {
+ name: "Request URL",
+ field: "request_uri",
+ cases: []subTest{
+ {line: logLine{web: web{reqURL: "/"}}},
+ {line: logLine{web: web{reqURL: "/status?full&json"}}},
+ {line: logLine{web: web{reqURL: "/icons/openlogo-75.png"}}},
+ {line: logLine{web: web{reqURL: "status?full&json"}}},
+ {line: logLine{web: web{reqURL: "\"req_url=/ \""}}},
+ {line: logLine{web: web{reqURL: "http://192.168.0.1/"}}},
+ {line: logLine{web: web{reqURL: ""}}},
+ },
+ },
+ {
+ name: "Request HTTP Protocol",
+ field: "server_protocol",
+ cases: []subTest{
+ {line: logLine{web: web{reqProto: "1"}}},
+ {line: logLine{web: web{reqProto: "1.0"}}},
+ {line: logLine{web: web{reqProto: "1.1"}}},
+ {line: logLine{web: web{reqProto: "2.0"}}},
+ {line: logLine{web: web{reqProto: "2"}}},
+ {line: logLine{web: web{reqProto: "0.9"}}, wantErr: errBadReqProto},
+ {line: logLine{web: web{reqProto: "1.1.1"}}, wantErr: errBadReqProto},
+ {line: logLine{web: web{reqProto: "2.2"}}, wantErr: errBadReqProto},
+ {line: logLine{web: web{reqProto: "localhost"}}, wantErr: errBadReqProto},
+ },
+ },
+ {
+ name: "Response Status Code",
+ field: "status",
+ cases: []subTest{
+ {line: logLine{web: web{respCode: 100}}},
+ {line: logLine{web: web{respCode: 200}}},
+ {line: logLine{web: web{respCode: 300}}},
+ {line: logLine{web: web{respCode: 400}}},
+ {line: logLine{web: web{respCode: 500}}},
+ {line: logLine{web: web{respCode: 600}}},
+ {line: logLine{web: web{respCode: -1}}, wantErr: errBadRespCode},
+ {line: logLine{web: web{respCode: 99}}, wantErr: errBadRespCode},
+ {line: logLine{web: web{respCode: 601}}, wantErr: errBadRespCode},
+ },
+ },
+ {
+ name: "Request size",
+ field: "request_length",
+ cases: []subTest{
+ {line: logLine{web: web{reqSize: 0}}},
+ {line: logLine{web: web{reqSize: 100}}},
+ {line: logLine{web: web{reqSize: 1000000}}},
+ {line: logLine{web: web{reqSize: -1}}, wantErr: errBadReqSize},
+ },
+ },
+ {
+ name: "Response size",
+ field: "bytes_sent",
+ cases: []subTest{
+ {line: logLine{web: web{respSize: 0}}},
+ {line: logLine{web: web{respSize: 100}}},
+ {line: logLine{web: web{respSize: 1000000}}},
+ {line: logLine{web: web{respSize: -1}}, wantErr: errBadRespSize},
+ },
+ },
+ {
+ name: "Request Processing Time",
+ field: "request_time",
+ cases: []subTest{
+ {line: logLine{web: web{reqProcTime: 0}}},
+ {line: logLine{web: web{reqProcTime: 100}}},
+ {line: logLine{web: web{reqProcTime: 1000.123}}},
+ {line: logLine{web: web{reqProcTime: -1}}, wantErr: errBadReqProcTime},
+ },
+ },
+ {
+ name: "Upstream Response Time",
+ field: "upstream_response_time",
+ cases: []subTest{
+ {line: logLine{web: web{upsRespTime: 0}}},
+ {line: logLine{web: web{upsRespTime: 100}}},
+ {line: logLine{web: web{upsRespTime: 1000.123}}},
+ {line: logLine{web: web{upsRespTime: -1}}, wantErr: errBadUpsRespTime},
+ },
+ },
+ {
+ name: "SSL Protocol",
+ field: "ssl_protocol",
+ cases: []subTest{
+ {line: logLine{web: web{sslProto: "SSLv3"}}},
+ {line: logLine{web: web{sslProto: "SSLv2"}}},
+ {line: logLine{web: web{sslProto: "TLSv1"}}},
+ {line: logLine{web: web{sslProto: "TLSv1.1"}}},
+ {line: logLine{web: web{sslProto: "TLSv1.2"}}},
+ {line: logLine{web: web{sslProto: "TLSv1.3"}}},
+ {line: logLine{web: web{sslProto: "invalid"}}, wantErr: errBadSSLProto},
+ },
+ },
+ {
+ name: "SSL Cipher Suite",
+ field: "ssl_cipher",
+ cases: []subTest{
+ {line: logLine{web: web{sslCipherSuite: "ECDHE-RSA-AES256-SHA"}}},
+ {line: logLine{web: web{sslCipherSuite: "DHE-RSA-AES256-SHA"}}},
+ {line: logLine{web: web{sslCipherSuite: "AES256-SHA"}}},
+ {line: logLine{web: web{sslCipherSuite: "TLS_AES_256_GCM_SHA384"}}},
+ {line: logLine{web: web{sslCipherSuite: "invalid"}}, wantErr: errBadSSLCipherSuite},
+ },
+ },
+ {
+ name: "Custom Fields",
+ field: "custom",
+ cases: []subTest{
+ {line: logLine{custom: custom{values: []customValue{{name: "custom", value: "POST"}}}}},
+ {line: logLine{custom: custom{values: []customValue{{name: "custom", value: "/example.com"}}}}},
+ {line: logLine{custom: custom{values: []customValue{{name: "custom", value: "0.333,0.444,0.555"}}}}},
+ },
+ },
+ {
+ name: "Empty Line",
+ cases: []subTest{
+ {line: emptyLogLine, wantErr: errEmptyLine},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ for i, tc := range tt.cases {
+ name := fmt.Sprintf("[%s:%d]field='%s'", tt.name, i+1, tt.field)
+
+ t.Run(name, func(t *testing.T) {
+ line := prepareLogLine(tt.field, tc.line)
+
+ err := line.verify()
+
+ if tc.wantErr != nil {
+ require.Error(t, err)
+ assert.Truef(t, errors.Is(err, tc.wantErr), "expected '%v' error, got '%v'", tc.wantErr, err)
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+ }
+}
+
+func prepareLogLine(field string, template logLine) logLine {
+ if template.empty() {
+ return *newEmptyLogLineWithFields()
+ }
+
+ line := newEmptyLogLineWithFields()
+ line.reset()
+
+ switch field {
+ case "host", "http_host", "v":
+ line.vhost = template.vhost
+ case "server_port", "p":
+ line.port = template.port
+ case "host:$server_port", "v:%p":
+ line.vhost = template.vhost
+ line.port = template.port
+ case "scheme":
+ line.reqScheme = template.reqScheme
+ case "remote_addr", "a", "h":
+ line.reqClient = template.reqClient
+ case "request", "r":
+ line.reqMethod = template.reqMethod
+ line.reqURL = template.reqURL
+ line.reqProto = template.reqProto
+ case "request_method", "m":
+ line.reqMethod = template.reqMethod
+ case "request_uri", "U":
+ line.reqURL = template.reqURL
+ case "server_protocol", "H":
+ line.reqProto = template.reqProto
+ case "status", "s", ">s":
+ line.respCode = template.respCode
+ case "request_length", "I":
+ line.reqSize = template.reqSize
+ case "bytes_sent", "body_bytes_sent", "b", "O", "B":
+ line.respSize = template.respSize
+ case "request_time", "D":
+ line.reqProcTime = template.reqProcTime
+ case "upstream_response_time":
+ line.upsRespTime = template.upsRespTime
+ case "ssl_protocol":
+ line.sslProto = template.sslProto
+ case "ssl_cipher":
+ line.sslCipherSuite = template.sslCipherSuite
+ default:
+ line.custom.values = template.custom.values
+ }
+ return *line
+}
+
+func newEmptyLogLineWithFields() *logLine {
+ l := newEmptyLogLine()
+ l.custom.fields = map[string]struct{}{"custom": {}}
+ return l
+}
diff --git a/src/go/plugin/go.d/modules/weblog/metadata.yaml b/src/go/plugin/go.d/modules/weblog/metadata.yaml
new file mode 100644
index 000000000..7608b936c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/metadata.yaml
@@ -0,0 +1,525 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-web_log
+ plugin_name: go.d.plugin
+ module_name: web_log
+ monitored_instance:
+ name: Web server log files
+ link: ""
+ categories:
+ - data-collection.web-servers-and-web-proxies
+ icon_filename: webservers.svg
+ keywords:
+ - webserver
+ - apache
+ - httpd
+ - nginx
+ - lighttpd
+ - logs
+ most_popular: false
+ info_provided_to_referring_integrations:
+ description: ""
+ related_resources:
+ integrations:
+ list: []
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors web servers by parsing their log files.
+ method_description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ It automatically detects log files of web servers running on localhost.
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/web_log.conf
+ options:
+ description: |
+ Weblog is aware of how to parse and interpret the following fields (**known fields**):
+
+ > [nginx](https://nginx.org/en/docs/varindex.html)
+ >
+ > [apache](https://httpd.apache.org/docs/current/mod/mod_log_config.html)
+
+ | nginx | apache | description |
+ |-------------------------|----------|------------------------------------------------------------------------------------------|
+ | $host ($http_host) | %v | Name of the server which accepted a request. |
+ | $server_port | %p | Port of the server which accepted a request. |
+ | $scheme | - | Request scheme. "http" or "https". |
+ | $remote_addr | %a (%h) | Client address. |
+ | $request | %r | Full original request line. The line is "$request_method $request_uri $server_protocol". |
+ | $request_method | %m | Request method. Usually "GET" or "POST". |
+ | $request_uri | %U | Full original request URI. |
+ | $server_protocol | %H | Request protocol. Usually "HTTP/1.0", "HTTP/1.1", or "HTTP/2.0". |
+ | $status | %s (%>s) | Response status code. |
+ | $request_length | %I | Bytes received from a client, including request and headers. |
+ | $bytes_sent | %O | Bytes sent to a client, including request and headers. |
+ | $body_bytes_sent | %B (%b) | Bytes sent to a client, not counting the response header. |
+ | $request_time | %D | Request processing time. |
+ | $upstream_response_time | - | Time spent on receiving the response from the upstream server. |
+ | $ssl_protocol | - | Protocol of an established SSL connection. |
+ | $ssl_cipher | - | String of ciphers used for an established SSL connection. |
+
+ Notes:
+
+ - Apache `%h` logs the IP address if [HostnameLookups](https://httpd.apache.org/docs/2.4/mod/core.html#hostnamelookups) is Off. The web log collector counts hostnames as IPv4 addresses. We recommend either to disable HostnameLookups or use `%a` instead of `%h`.
+ - Since httpd 2.0, unlike 1.3, the `%b` and `%B` format strings do not represent the number of bytes sent to the client, but simply the size in bytes of the HTTP response. It will differ, for instance, if the connection is aborted, or if SSL is used. The `%O` format provided by [`mod_logio`](https://httpd.apache.org/docs/2.4/mod/mod_logio.html) will log the actual number of bytes sent over the network.
+ - To get `%I` and `%O` working you need to enable `mod_logio` on Apache.
+ - NGINX logs URI with query parameters, Apache doesnt.
+ - `$request` is parsed into `$request_method`, `$request_uri` and `$server_protocol`. If you have `$request` in your log format, there is no sense to have others.
+ - Don't use both `$bytes_sent` and `$body_bytes_sent` (`%O` and `%B` or `%b`). The module does not distinguish between these parameters.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: path
+ description: Path to the web server log file.
+ default_value: ""
+ required: true
+ - name: exclude_path
+ description: Path to exclude.
+ default_value: "*.gz"
+ required: false
+ - name: url_patterns
+ description: List of URL patterns.
+ default_value: "[]"
+ required: false
+ detailed_description: |
+ "URL pattern" scope metrics will be collected for each URL pattern.
+
+ Option syntax:
+
+ ```yaml
+ url_patterns:
+ - name: name1
+ pattern: pattern1
+ - name: name2
+ pattern: pattern2
+ ```
+ - name: url_patterns.name
+ description: Used as a dimension name.
+ default_value: ""
+ required: true
+ - name: url_patterns.pattern
+ description: Used to match against full original request URI. Pattern syntax in [matcher](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/matcher#supported-format).
+ default_value: ""
+ required: true
+ - name: log_type
+ description: Log parser type.
+ default_value: auto
+ required: false
+ detailed_description: |
+ Weblog supports 5 different log parsers:
+
+ | Parser type | Description |
+ |-------------|-------------------------------------------|
+ | auto | Use CSV and auto-detect format |
+ | csv | A comma-separated values |
+ | json | [JSON](https://www.json.org/json-en.html) |
+ | ltsv | [LTSV](http://ltsv.org/) |
+ | regexp | Regular expression with named groups |
+
+ Syntax:
+
+ ```yaml
+ log_type: auto
+ ```
+
+ If `log_type` parameter set to `auto` (which is default), weblog will try to auto-detect appropriate log parser and log format using the last line of the log file.
+
+ - checks if format is `CSV` (using regexp).
+ - checks if format is `JSON` (using regexp).
+ - assumes format is `CSV` and tries to find appropriate `CSV` log format using predefined list of formats. It tries to parse the line using each of them in the following order (the first one matches is used later):
+
+ ```sh
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time
+ $host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time
+ $remote_addr - - [$time_local] "$request" $status $body_bytes_sent
+ ```
+
+ If you're using the default Apache/NGINX log format, auto-detect will work for you. If it doesn't work you need to set the format manually.
+ - name: csv_config
+ description: CSV log parser config.
+ default_value: ""
+ required: false
+ - name: csv_config.delimiter
+ description: CSV field delimiter.
+ default_value: ","
+ required: false
+ - name: csv_config.format
+ description: CSV log format.
+ default_value: ""
+ required: false
+ detailed_description: ""
+ - name: ltsv_config
+ description: LTSV log parser config.
+ default_value: ""
+ required: false
+ - name: ltsv_config.field_delimiter
+ description: LTSV field delimiter.
+ default_value: "\\t"
+ required: false
+ - name: ltsv_config.value_delimiter
+ description: LTSV value delimiter.
+ default_value: ":"
+ required: false
+ - name: ltsv_config.mapping
+ description: LTSV fields mapping to **known fields**.
+ default_value: ""
+ required: true
+ detailed_description: |
+ The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.
+
+ > **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+ ```yaml
+ log_type: ltsv
+ ltsv_config:
+ mapping:
+ label1: field1
+ label2: field2
+ ```
+ - name: json_config
+ description: JSON log parser config.
+ default_value: ""
+ required: false
+ - name: json_config.mapping
+ description: JSON fields mapping to **known fields**.
+ default_value: ""
+ required: true
+ detailed_description: |
+ The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding **known field**.
+
+ > **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+ ```yaml
+ log_type: json
+ json_config:
+ mapping:
+ label1: field1
+ label2: field2
+ ```
+ - name: regexp_config
+ description: RegExp log parser config.
+ default_value: ""
+ required: false
+ - name: regexp_config.pattern
+ description: RegExp pattern with named groups.
+ default_value: ""
+ required: true
+ detailed_description: |
+ Use pattern with subexpressions names. These names should be **known fields**.
+
+ > **Note**: don't use `$` and `%` prefixes for mapped field names.
+
+ Syntax:
+
+ ```yaml
+ log_type: regexp
+ regexp_config:
+ pattern: PATTERN
+ ```
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: web_log_1m_unmatched
+ metric: web_log.excluded_requests
+ info: percentage of unparsed log lines over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf
+ - name: web_log_1m_requests
+ metric: web_log.type_requests
+ info: "ratio of successful HTTP requests over the last minute (1xx, 2xx, 304, 401)"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf
+ - name: web_log_1m_redirects
+ metric: web_log.type_requests
+ info: "ratio of redirection HTTP requests over the last minute (3xx except 304)"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf
+ - name: web_log_1m_bad_requests
+ metric: web_log.type_requests
+ info: "ratio of client error HTTP requests over the last minute (4xx except 401)"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf
+ - name: web_log_1m_internal_errors
+ metric: web_log.type_requests
+ info: "ratio of server error HTTP requests over the last minute (5xx)"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf
+ - name: web_log_web_slow
+ metric: web_log.request_processing_time
+ info: average HTTP response time over the last 1 minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf
+ - name: web_log_5m_requests_ratio
+ metric: web_log.type_requests
+ info: ratio of successful HTTP requests over over the last 5 minutes, compared with the previous 5 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/web_log.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: web_log.requests
+ description: Total Requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: web_log.excluded_requests
+ description: Excluded Requests
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: unmatched
+ - name: web_log.type_requests
+ description: Requests By Type
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: success
+ - name: bad
+ - name: redirect
+ - name: error
+ - name: web_log.status_code_class_responses
+ description: Responses By Status Code Class
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: 1xx
+ - name: 2xx
+ - name: 3xx
+ - name: 4xx
+ - name: 5xx
+ - name: web_log.status_code_class_1xx_responses
+ description: Informational Responses By Status Code
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per 1xx code
+ - name: web_log.status_code_class_2xx_responses
+ description: Successful Responses By Status Code
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per 2xx code
+ - name: web_log.status_code_class_3xx_responses
+ description: Redirects Responses By Status Code
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per 3xx code
+ - name: web_log.status_code_class_4xx_responses
+ description: Client Errors Responses By Status Code
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per 4xx code
+ - name: web_log.status_code_class_5xx_responses
+ description: Server Errors Responses By Status Code
+ unit: responses/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per 5xx code
+ - name: web_log.bandwidth
+ description: Bandwidth
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: web_log.request_processing_time
+ description: Request Processing Time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: web_log.requests_processing_time_histogram
+ description: Requests Processing Time Histogram
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: a dimension per bucket
+ - name: web_log.upstream_response_time
+ description: Upstream Response Time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: web_log.upstream_responses_time_histogram
+ description: Upstream Responses Time Histogram
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: a dimension per bucket
+ - name: web_log.current_poll_uniq_clients
+ description: Current Poll Unique Clients
+ unit: clients
+ chart_type: stacked
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: web_log.vhost_requests
+ description: Requests By Vhost
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per vhost
+ - name: web_log.port_requests
+ description: Requests By Port
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per port
+ - name: web_log.scheme_requests
+ description: Requests By Scheme
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: http
+ - name: https
+ - name: web_log.http_method_requests
+ description: Requests By HTTP Method
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per HTTP method
+ - name: web_log.http_version_requests
+ description: Requests By HTTP Version
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per HTTP version
+ - name: web_log.ip_proto_requests
+ description: Requests By IP Protocol
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: web_log.ssl_proto_requests
+ description: Requests By SSL Connection Protocol
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per SSL protocol
+ - name: web_log.ssl_cipher_suite_requests
+ description: Requests By SSL Connection Cipher Suite
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per SSL cipher suite
+ - name: web_log.url_pattern_requests
+ description: URL Field Requests By Pattern
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per URL pattern
+ - name: web_log.custom_field_pattern_requests
+ description: Custom Field Requests By Pattern
+ unit: requests/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per custom field pattern
+ - name: custom time field
+ description: TBD
+ labels: []
+ metrics:
+ - name: web_log.custom_time_field_summary
+ description: Custom Time Field Summary
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: web_log.custom_time_field_histogram
+ description: Custom Time Field Histogram
+ unit: observations
+ chart_type: line
+ dimensions:
+ - name: a dimension per bucket
+ - name: custom numeric field
+ description: TBD
+ labels: []
+ metrics:
+ - name: web_log.custom_numeric_field_{{field_name}}_summary
+ description: Custom Numeric Field Summary
+ unit: '{{units}}'
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
+ - name: URL pattern
+ description: TBD
+ labels: []
+ metrics:
+ - name: web_log.url_pattern_status_code_responses
+ description: Responses By Status Code
+ unit: responses/s
+ chart_type: line
+ dimensions:
+ - name: a dimension per pattern
+ - name: web_log.url_pattern_http_method_requests
+ description: Requests By HTTP Method
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: a dimension per HTTP method
+ - name: web_log.url_pattern_bandwidth
+ description: Bandwidth
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: web_log.url_pattern_request_processing_time
+ description: Request Processing Time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: max
+ - name: avg
diff --git a/src/go/plugin/go.d/modules/weblog/metrics.go b/src/go/plugin/go.d/modules/weblog/metrics.go
new file mode 100644
index 000000000..30618df8a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/metrics.go
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+)
+
+func newWebLogSummary() metrics.Summary {
+ return &weblogSummary{metrics.NewSummary()}
+}
+
+type weblogSummary struct {
+ metrics.Summary
+}
+
+// WriteTo redefines metrics.Summary.WriteTo
+// TODO: temporary workaround?
+func (s weblogSummary) WriteTo(rv map[string]int64, key string, mul, div int) {
+ s.Summary.WriteTo(rv, key, mul, div)
+ if _, ok := rv[key+"_min"]; !ok {
+ rv[key+"_min"] = 0
+ rv[key+"_max"] = 0
+ rv[key+"_avg"] = 0
+ }
+}
+
+type (
+ metricsData struct {
+ Requests metrics.Counter `stm:"requests"`
+ ReqUnmatched metrics.Counter `stm:"req_unmatched"`
+
+ RespCode metrics.CounterVec `stm:"resp_code"`
+ Resp1xx metrics.Counter `stm:"resp_1xx"`
+ Resp2xx metrics.Counter `stm:"resp_2xx"`
+ Resp3xx metrics.Counter `stm:"resp_3xx"`
+ Resp4xx metrics.Counter `stm:"resp_4xx"`
+ Resp5xx metrics.Counter `stm:"resp_5xx"`
+
+ ReqSuccess metrics.Counter `stm:"req_type_success"`
+ ReqRedirect metrics.Counter `stm:"req_type_redirect"`
+ ReqBad metrics.Counter `stm:"req_type_bad"`
+ ReqError metrics.Counter `stm:"req_type_error"`
+
+ UniqueIPv4 metrics.UniqueCounter `stm:"uniq_ipv4"`
+ UniqueIPv6 metrics.UniqueCounter `stm:"uniq_ipv6"`
+ BytesSent metrics.Counter `stm:"bytes_sent"`
+ BytesReceived metrics.Counter `stm:"bytes_received"`
+ ReqProcTime metrics.Summary `stm:"req_proc_time"`
+ ReqProcTimeHist metrics.Histogram `stm:"req_proc_time_hist"`
+ UpsRespTime metrics.Summary `stm:"upstream_resp_time"`
+ UpsRespTimeHist metrics.Histogram `stm:"upstream_resp_time_hist"`
+
+ ReqVhost metrics.CounterVec `stm:"req_vhost"`
+ ReqPort metrics.CounterVec `stm:"req_port"`
+ ReqMethod metrics.CounterVec `stm:"req_method"`
+ ReqURLPattern metrics.CounterVec `stm:"req_url_ptn"`
+ ReqVersion metrics.CounterVec `stm:"req_version"`
+ ReqSSLProto metrics.CounterVec `stm:"req_ssl_proto"`
+ ReqSSLCipherSuite metrics.CounterVec `stm:"req_ssl_cipher_suite"`
+ ReqHTTPScheme metrics.Counter `stm:"req_http_scheme"`
+ ReqHTTPSScheme metrics.Counter `stm:"req_https_scheme"`
+ ReqIPv4 metrics.Counter `stm:"req_ipv4"`
+ ReqIPv6 metrics.Counter `stm:"req_ipv6"`
+
+ ReqCustomField map[string]metrics.CounterVec `stm:"custom_field"`
+ URLPatternStats map[string]*patternMetrics `stm:"url_ptn"`
+
+ ReqCustomTimeField map[string]*customTimeFieldMetrics `stm:"custom_time_field"`
+ ReqCustomNumericField map[string]*customNumericFieldMetrics `stm:"custom_numeric_field"`
+ }
+ customTimeFieldMetrics struct {
+ Time metrics.Summary `stm:"time"`
+ TimeHist metrics.Histogram `stm:"time_hist"`
+ }
+ customNumericFieldMetrics struct {
+ Summary metrics.Summary `stm:"summary"`
+
+ multiplier int
+ divisor int
+ }
+ patternMetrics struct {
+ RespCode metrics.CounterVec `stm:"resp_code"`
+ ReqMethod metrics.CounterVec `stm:"req_method"`
+ BytesSent metrics.Counter `stm:"bytes_sent"`
+ BytesReceived metrics.Counter `stm:"bytes_received"`
+ ReqProcTime metrics.Summary `stm:"req_proc_time"`
+ }
+)
+
+func newMetricsData(config Config) *metricsData {
+ return &metricsData{
+ ReqVhost: metrics.NewCounterVec(),
+ ReqPort: metrics.NewCounterVec(),
+ ReqMethod: metrics.NewCounterVec(),
+ ReqVersion: metrics.NewCounterVec(),
+ RespCode: metrics.NewCounterVec(),
+ ReqSSLProto: metrics.NewCounterVec(),
+ ReqSSLCipherSuite: metrics.NewCounterVec(),
+ ReqProcTime: newWebLogSummary(),
+ ReqProcTimeHist: metrics.NewHistogram(convHistOptionsToMicroseconds(config.Histogram)),
+ UpsRespTime: newWebLogSummary(),
+ UpsRespTimeHist: metrics.NewHistogram(convHistOptionsToMicroseconds(config.Histogram)),
+ UniqueIPv4: metrics.NewUniqueCounter(true),
+ UniqueIPv6: metrics.NewUniqueCounter(true),
+ ReqURLPattern: newCounterVecFromPatterns(config.URLPatterns),
+ ReqCustomField: newReqCustomField(config.CustomFields),
+ URLPatternStats: newURLPatternStats(config.URLPatterns),
+ ReqCustomTimeField: newReqCustomTimeField(config.CustomTimeFields),
+ ReqCustomNumericField: newReqCustomNumericField(config.CustomNumericFields),
+ }
+}
+
+func (m *metricsData) reset() {
+ m.UniqueIPv4.Reset()
+ m.UniqueIPv6.Reset()
+ m.ReqProcTime.Reset()
+ m.UpsRespTime.Reset()
+ for _, v := range m.URLPatternStats {
+ v.ReqProcTime.Reset()
+ }
+ for _, v := range m.ReqCustomTimeField {
+ v.Time.Reset()
+ }
+ for _, v := range m.ReqCustomNumericField {
+ v.Summary.Reset()
+ }
+}
+
+func newCounterVecFromPatterns(patterns []userPattern) metrics.CounterVec {
+ c := metrics.NewCounterVec()
+ for _, p := range patterns {
+ _, _ = c.GetP(p.Name)
+ }
+ return c
+}
+
+func newURLPatternStats(patterns []userPattern) map[string]*patternMetrics {
+ stats := make(map[string]*patternMetrics)
+ for _, p := range patterns {
+ stats[p.Name] = &patternMetrics{
+ RespCode: metrics.NewCounterVec(),
+ ReqMethod: metrics.NewCounterVec(),
+ ReqProcTime: newWebLogSummary(),
+ }
+ }
+ return stats
+}
+
+func newReqCustomField(fields []customField) map[string]metrics.CounterVec {
+ cf := make(map[string]metrics.CounterVec)
+ for _, f := range fields {
+ cf[f.Name] = newCounterVecFromPatterns(f.Patterns)
+ }
+ return cf
+}
+
+func newReqCustomTimeField(fields []customTimeField) map[string]*customTimeFieldMetrics {
+ cf := make(map[string]*customTimeFieldMetrics)
+ for _, f := range fields {
+ cf[f.Name] = &customTimeFieldMetrics{
+ Time: newWebLogSummary(),
+ TimeHist: metrics.NewHistogram(convHistOptionsToMicroseconds(f.Histogram)),
+ }
+ }
+ return cf
+}
+
+func newReqCustomNumericField(fields []customNumericField) map[string]*customNumericFieldMetrics {
+ rv := make(map[string]*customNumericFieldMetrics)
+ for _, f := range fields {
+ rv[f.Name] = &customNumericFieldMetrics{
+ Summary: newWebLogSummary(),
+ multiplier: f.Multiplier,
+ divisor: f.Divisor,
+ }
+ }
+ return rv
+}
+
+// convert histogram options to microseconds (second => us)
+func convHistOptionsToMicroseconds(histogram []float64) []float64 {
+ var buckets []float64
+ for _, value := range histogram {
+ buckets = append(buckets, value*1e6)
+ }
+ return buckets
+}
diff --git a/src/go/plugin/go.d/modules/weblog/parser.go b/src/go/plugin/go.d/modules/weblog/parser.go
new file mode 100644
index 000000000..f765b1e03
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/parser.go
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+)
+
+/*
+Default apache log format:
+ - "%v:%p %h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" vhost_combined
+ - "%h %l %u %t \"%r\" %>s %O \"%{Referer}i\" \"%{User-Agent}i\"" combined
+ - "%h %l %u %t \"%r\" %>s %O" common
+ - "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-agent}i\" %I %O" Combined I/O (https://httpd.apache.org/docs/2.4/mod/mod_logio.html)
+
+Default nginx log format:
+ - '$remote_addr - $remote_user [$time_local] '
+ '"$request" $status $body_bytes_sent '
+ '"$http_referer" "$http_user_agent"' combined
+
+Netdata recommends:
+ Nginx:
+ - '$remote_addr - $remote_user [$time_local] '
+ '"$request" $status $body_bytes_sent '
+ '$request_length $request_time $upstream_response_time '
+ '"$http_referer" "$http_user_agent"'
+
+ Apache:
+ - "%h %l %u %t \"%r\" %>s %B %I %D \"%{Referer}i\" \"%{User-Agent}i\""
+*/
+
+var (
+ csvCommon = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent`
+ csvCustom1 = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time`
+ csvCustom2 = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time`
+ csvCustom3 = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time`
+ csvCustom4 = ` $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time`
+ csvVhostCommon = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent`
+ csvVhostCustom1 = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time`
+ csvVhostCustom2 = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent $request_length $request_time $upstream_response_time`
+ csvVhostCustom3 = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time`
+ csvVhostCustom4 = `$host:$server_port $remote_addr - - [$time_local] "$request" $status $body_bytes_sent - - $request_length $request_time $upstream_response_time`
+
+ guessOrder = []string{
+ csvVhostCustom4,
+ csvVhostCustom3,
+ csvVhostCustom2,
+ csvVhostCustom1,
+ csvVhostCommon,
+ csvCustom4,
+ csvCustom3,
+ csvCustom2,
+ csvCustom1,
+ csvCommon,
+ }
+)
+
+func cleanCSVFormat(format string) string { return strings.Join(strings.Fields(format), " ") }
+func cleanApacheLogFormat(format string) string { return strings.ReplaceAll(format, `\`, "") }
+
+const (
+ typeAuto = "auto"
+)
+
+var (
+ reLTSV = regexp.MustCompile(`^[a-zA-Z0-9]+:[^\t]*(\t[a-zA-Z0-9]+:[^\t]*)*$`)
+ reJSON = regexp.MustCompile(`^[[:space:]]*{.*}[[:space:]]*$`)
+)
+
+func (w *WebLog) newParser(record []byte) (logs.Parser, error) {
+ if w.ParserConfig.LogType == typeAuto {
+ w.Debugf("log_type is %s, will try format auto-detection", typeAuto)
+ if len(record) == 0 {
+ return nil, fmt.Errorf("empty line, can't auto-detect format (%s)", w.file.CurrentFilename())
+ }
+ return w.guessParser(record)
+ }
+
+ w.ParserConfig.CSV.Format = cleanApacheLogFormat(w.ParserConfig.CSV.Format)
+ w.Debugf("log_type is %s, skipping auto-detection", w.ParserConfig.LogType)
+ switch w.ParserConfig.LogType {
+ case logs.TypeCSV:
+ w.Debugf("config: %+v", w.ParserConfig.CSV)
+ case logs.TypeLTSV:
+ w.Debugf("config: %+v", w.ParserConfig.LogType)
+ case logs.TypeRegExp:
+ w.Debugf("config: %+v", w.ParserConfig.RegExp)
+ case logs.TypeJSON:
+ w.Debugf("config: %+v", w.ParserConfig.JSON)
+ }
+ return logs.NewParser(w.ParserConfig, w.file)
+}
+
+func (w *WebLog) guessParser(record []byte) (logs.Parser, error) {
+ w.Debug("starting log type auto-detection")
+ if reLTSV.Match(record) {
+ w.Debug("log type is LTSV")
+ return logs.NewLTSVParser(w.ParserConfig.LTSV, w.file)
+ }
+ if reJSON.Match(record) {
+ w.Debug("log type is JSON")
+ return logs.NewJSONParser(w.ParserConfig.JSON, w.file)
+ }
+ w.Debug("log type is CSV")
+ return w.guessCSVParser(record)
+}
+
+func (w *WebLog) guessCSVParser(record []byte) (logs.Parser, error) {
+ w.Debug("starting csv log format auto-detection")
+ w.Debugf("config: %+v", w.ParserConfig.CSV)
+ for _, format := range guessOrder {
+ format = cleanCSVFormat(format)
+ cfg := w.ParserConfig.CSV
+ cfg.Format = format
+
+ w.Debugf("trying format: '%s'", format)
+ parser, err := logs.NewCSVParser(cfg, w.file)
+ if err != nil {
+ return nil, err
+ }
+
+ line := newEmptyLogLine()
+ if err := parser.Parse(record, line); err != nil {
+ w.Debug("parse: ", err)
+ continue
+ }
+
+ if err = line.verify(); err != nil {
+ w.Debug("verify: ", err)
+ continue
+ }
+ return parser, nil
+ }
+ return nil, errors.New("cannot auto-detect log format, use custom log format")
+}
+
+func checkCSVFormatField(field string) (newName string, offset int, valid bool) {
+ if isTimeField(field) {
+ return "", 1, false
+ }
+ if !isFieldValid(field) {
+ return "", 0, false
+ }
+ // remove `$` and `%` to have same field names with regexp parser,
+ // these symbols aren't allowed in sub exp names
+ return field[1:], 0, true
+}
+
+func isTimeField(field string) bool {
+ return field == "[$time_local]" || field == "$time_local" || field == "%t"
+}
+
+func isFieldValid(field string) bool {
+ return len(field) > 1 && (isNginxField(field) || isApacheField(field))
+}
+func isNginxField(field string) bool {
+ return strings.HasPrefix(field, "$")
+}
+
+func isApacheField(field string) bool {
+ return strings.HasPrefix(field, "%")
+}
diff --git a/src/go/plugin/go.d/modules/weblog/parser_test.go b/src/go/plugin/go.d/modules/weblog/parser_test.go
new file mode 100644
index 000000000..1ccbc020d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/parser_test.go
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestWebLog_guessParser(t *testing.T) {
+ type test = struct {
+ name string
+ inputs []string
+ wantParserType string
+ wantErr bool
+ }
+ tests := []test{
+ {
+ name: "guessed csv",
+ wantParserType: logs.TypeCSV,
+ inputs: []string{
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123 0.123,0.321`,
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123`,
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123 0.123,0.321`,
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123`,
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`,
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123 0.123,0.321`,
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123`,
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123 0.123,0.321`,
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123`,
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`,
+ },
+ },
+ {
+ name: "guessed ltsv",
+ wantParserType: logs.TypeLTSV,
+ inputs: []string{
+ `field1:test.example.com:80 field2:88.191.254.20 field3:"GET / HTTP/1.0" 200 8674 field4:8674 field5:0.123`,
+ },
+ },
+ {
+ name: "guessed json",
+ wantParserType: logs.TypeJSON,
+ inputs: []string{
+ `{}`,
+ ` {}`,
+ ` {} `,
+ `{"host": "example.com"}`,
+ `{"host": "example.com","time": "2020-08-04T20:23:27+03:00", "upstream_response_time": "0.776", "remote_addr": "1.2.3.4"}`,
+ ` {"host": "example.com","time": "2020-08-04T20:23:27+03:00", "upstream_response_time": "0.776", "remote_addr": "1.2.3.4"} `,
+ },
+ },
+ {
+ name: "unknown",
+ wantErr: true,
+ inputs: []string{
+ `test.example.com 80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`,
+ `test.example.com 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`,
+ },
+ },
+ }
+
+ weblog := prepareWebLog()
+
+ for _, tc := range tests {
+ for i, input := range tc.inputs {
+ name := fmt.Sprintf("name=%s,input_num=%d", tc.name, i+1)
+
+ t.Run(name, func(t *testing.T) {
+ p, err := weblog.newParser([]byte(input))
+
+ if tc.wantErr {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ switch tc.wantParserType {
+ default:
+ t.Errorf("unknown parser type: %s", tc.wantParserType)
+ case logs.TypeLTSV:
+ assert.IsType(t, (*logs.LTSVParser)(nil), p)
+ case logs.TypeCSV:
+ require.IsType(t, (*logs.CSVParser)(nil), p)
+ case logs.TypeJSON:
+ require.IsType(t, (*logs.JSONParser)(nil), p)
+ }
+ }
+ })
+ }
+ }
+}
+
+func TestWebLog_guessCSVParser(t *testing.T) {
+ type test = struct {
+ name string
+ inputs []string
+ wantCSVFormat string
+ wantErr bool
+ }
+ tests := []test{
+ {
+ name: "guessed vhost custom4",
+ wantCSVFormat: csvVhostCustom4,
+ inputs: []string{
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123 0.123,0.321`,
+ },
+ },
+ {
+ name: "guessed vhost custom3",
+ wantCSVFormat: csvVhostCustom3,
+ inputs: []string{
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123`,
+ },
+ },
+ {
+ name: "guessed vhost custom2",
+ wantCSVFormat: csvVhostCustom2,
+ inputs: []string{
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123 0.123,0.321`,
+ },
+ },
+ {
+ name: "guessed vhost custom1",
+ wantCSVFormat: csvVhostCustom1,
+ inputs: []string{
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123`,
+ },
+ },
+ {
+ name: "guessed vhost common",
+ wantCSVFormat: csvVhostCommon,
+ inputs: []string{
+ `test.example.com:80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`,
+ },
+ },
+ {
+ name: "guessed custom4",
+ wantCSVFormat: csvCustom4,
+ inputs: []string{
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123 0.123,0.321`,
+ },
+ },
+ {
+ name: "guessed custom3",
+ wantCSVFormat: csvCustom3,
+ inputs: []string{
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 "-" "-" 8674 0.123`,
+ },
+ },
+ {
+ name: "guessed custom2",
+ wantCSVFormat: csvCustom2,
+ inputs: []string{
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123 0.123,0.321`,
+ },
+ },
+ {
+ name: "guessed custom1",
+ wantCSVFormat: csvCustom1,
+ inputs: []string{
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674 8674 0.123`,
+ },
+ },
+ {
+ name: "guessed common",
+ wantCSVFormat: csvCommon,
+ inputs: []string{
+ `88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`,
+ },
+ },
+ {
+ name: "unknown",
+ wantErr: true,
+ inputs: []string{
+ `test.example.com 80 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`,
+ `test.example.com 88.191.254.20 - - [22/Mar/2009:09:30:31 +0100] "GET / HTTP/1.0" 200 8674`,
+ },
+ },
+ }
+
+ weblog := prepareWebLog()
+
+ for i, tc := range tests {
+ for _, input := range tc.inputs {
+ name := fmt.Sprintf("name=%s,input_num=%d", tc.name, i+1)
+
+ t.Run(name, func(t *testing.T) {
+ p, err := weblog.guessCSVParser([]byte(input))
+
+ if tc.wantErr {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.Equal(t, cleanCSVFormat(tc.wantCSVFormat), p.(*logs.CSVParser).Config.Format)
+ }
+ })
+ }
+ }
+}
+
+func prepareWebLog() *WebLog {
+ cfg := logs.ParserConfig{
+ LogType: typeAuto,
+ CSV: logs.CSVConfig{
+ Delimiter: " ",
+ CheckField: checkCSVFormatField,
+ },
+ LTSV: logs.LTSVConfig{
+ FieldDelimiter: "\t",
+ ValueDelimiter: ":",
+ },
+ }
+
+ return &WebLog{
+ Config: Config{
+ GroupRespCodes: false,
+ ParserConfig: cfg,
+ },
+ }
+}
diff --git a/src/go/plugin/go.d/modules/weblog/testdata/common.log b/src/go/plugin/go.d/modules/weblog/testdata/common.log
new file mode 100644
index 000000000..6860d13e8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/testdata/common.log
@@ -0,0 +1,500 @@
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 100 3441
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 100 4065
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 300 3258
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 201 3189
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 100 3852
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 101 4710
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 400 4091
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 400 4142
+Unmatched! The rat the cat the dog chased killed ate the malt!
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 201 4480
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 100 2554
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 300 2698
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 300 2048
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 101 4678
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 301 1077
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 300 4949
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 300 4170
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 400 3962
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 201 2109
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 400 4028
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 301 2446
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 100 1748
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 301 4185
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 200 2775
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 401 4280
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 401 1592
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 401 2005
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 101 1867
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 401 4866
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 201 4371
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 400 1395
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 300 3549
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 101 2857
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 300 3548
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 301 4773
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 301 4825
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 400 1039
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 101 3619
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 100 3919
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 101 3136
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 400 2415
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 200 4448
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 101 2639
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 200 3251
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 400 4026
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 100 4450
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 100 2267
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 101 4747
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 100 4046
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 400 4818
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 101 3944
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 100 4152
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 101 3407
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 300 4683
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 400 1284
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 401 1221
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 400 2922
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 101 4388
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 401 1636
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 100 3518
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 300 2637
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 100 3566
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 100 3088
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 301 3379
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 400 3304
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 201 2772
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 200 4284
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 401 4486
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 201 2768
+Unmatched! The rat the cat the dog chased killed ate the malt!
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 300 3414
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 401 3377
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 400 3646
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 201 1290
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 300 2500
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 300 4473
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 101 1985
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 400 2607
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 400 1468
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 100 1584
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 400 4366
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 201 3121
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 201 4888
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 100 1723
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 300 3593
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 301 3139
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 301 1915
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 400 1381
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 300 3801
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 301 4757
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 400 2553
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 200 1241
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 100 3723
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 400 2236
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 100 3375
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 100 2941
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 201 3199
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 300 3117
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 400 4041
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 100 1962
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 100 4868
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 101 2810
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 300 2858
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 301 1398
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 200 4304
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 100 3121
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 100 3621
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 201 1922
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 101 1857
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 101 4671
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 301 4404
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 400 1552
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 300 1506
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 401 4942
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 1569
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 101 4946
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 101 2884
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 301 1487
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 100 1488
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 300 2931
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 100 4186
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 100 2110
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 200 1802
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 201 3690
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 300 4811
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 300 2055
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 300 3964
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 201 4282
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 400 4813
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 401 1438
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 100 2254
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 200 4812
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 401 1735
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 301 1363
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 101 3294
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 401 4179
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 401 1844
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 200 3677
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 201 2056
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 200 4041
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 101 3850
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 301 1990
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 200 1729
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 301 4426
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 101 1615
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 200 2683
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 301 3379
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 300 3702
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 301 2462
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 100 4250
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 301 1470
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 200 4572
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 300 4562
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 4339
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 301 1565
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 100 3779
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 100 1372
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 201 2457
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 201 1455
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 100 3573
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 400 2048
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 300 1723
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 301 3720
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 400 4014
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 100 3846
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 400 1773
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 2261
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 300 1630
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 300 3378
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 301 1974
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 101 3055
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 301 1350
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 300 2210
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 100 2339
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 400 2380
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 201 3880
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 100 1334
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 300 3683
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 200 4519
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 300 1549
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 301 1371
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 401 1601
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 301 3826
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 101 2260
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 200 2497
+Unmatched! The rat the cat the dog chased killed ate the malt!
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 100 3076
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 200 3126
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 100 2180
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 400 3291
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 100 1268
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 400 1836
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 101 2953
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 201 4018
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 301 3686
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 401 3320
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 300 1473
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 101 3257
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 300 3530
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 201 3109
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 400 4815
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 100 4414
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 401 4290
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 100 2060
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 100 4651
+Unmatched! The rat the cat the dog chased killed ate the malt!
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 200 1378
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 401 2666
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 400 3376
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 200 4009
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 400 2307
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 100 2928
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 400 4048
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 400 3902
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 201 1512
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 100 4776
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 201 4791
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 201 3219
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 401 3020
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 101 4867
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 401 1276
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 201 3313
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 200 1350
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 101 3561
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 201 4382
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 401 4487
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 401 4595
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 301 1727
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 301 4103
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 100 1454
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 4990
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 300 3753
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 401 3445
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 101 1295
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 301 3430
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 201 3746
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 400 3578
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 401 1389
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 200 1889
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 200 3680
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 300 4623
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 300 1016
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 300 4078
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 100 1023
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 400 4407
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 100 4704
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 401 3575
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 300 1013
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 400 4512
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 100 2563
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 101 2379
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 200 3616
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 401 2782
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 201 3324
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 300 3157
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 301 1299
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 201 3768
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 201 1550
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 200 4683
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 401 4689
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 300 1400
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 300 1234
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 101 4018
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 1981
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 201 4646
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 201 4767
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 101 4446
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 101 1829
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 401 3967
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 300 4347
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 400 1753
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 201 3592
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 401 3249
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 101 1917
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 101 3295
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 201 2958
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 300 1445
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 301 1025
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 201 2088
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 300 2029
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 401 1157
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 4675
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 100 4606
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 201 1227
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 300 1869
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 200 1614
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 201 4878
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 100 1813
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 100 1643
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 100 3488
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 300 1844
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 300 3527
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 100 4655
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 401 2628
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 300 2380
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 200 1059
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 400 4336
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 100 3951
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 200 4708
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 300 3364
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 101 2704
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 4399
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 4365
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 201 3905
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 300 3544
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 101 2718
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 100 1165
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 100 4053
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 300 1351
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 101 2537
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 100 2934
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 201 3186
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 301 4225
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 200 3432
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 101 2079
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 400 1823
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 101 3692
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 200 2169
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 300 4244
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 200 2605
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 300 2472
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 301 1415
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 101 3667
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 301 3214
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 201 1689
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 201 2180
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 300 1237
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 100 4821
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 201 3739
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 100 4644
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 100 1926
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 400 3835
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 401 2216
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 101 4270
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 300 4876
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 101 2917
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 201 1429
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 400 3952
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 100 1688
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 201 2935
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 300 1968
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 100 2139
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 400 2399
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 201 3705
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 100 1810
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 300 2679
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 301 3638
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 200 1078
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 401 1648
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 100 4064
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 300 4981
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 200 3685
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 201 1145
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 300 1766
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 401 4867
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 101 2972
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 101 3389
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 300 1911
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 4083
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 100 1841
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 301 3929
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 100 2529
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 301 4904
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 401 3593
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 300 3434
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 201 2610
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 301 3577
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 301 1099
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 401 1355
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 1913
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 3582
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 401 1974
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 2248
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 401 4714
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 200 4414
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 400 4661
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 200 2206
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 301 4863
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 100 2792
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 3458
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 401 3559
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 200 3430
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 301 3977
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 400 1199
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 100 3822
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 300 1481
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 100 4760
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 101 1228
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 401 3825
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 400 2678
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 201 1750
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 100 2791
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 100 2895
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 401 4285
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 300 1756
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 200 3869
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 300 4503
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 401 2535
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 301 1316
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 400 2593
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 301 4991
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 101 3336
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 400 2385
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 400 2640
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 401 3748
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 401 1633
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 201 2563
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 400 4912
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 300 4293
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 201 1866
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 200 3271
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 201 4323
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 400 4882
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 300 2762
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 101 1540
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 400 3108
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 301 1775
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 101 2246
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 200 2510
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 300 4898
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 401 3470
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 100 2392
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 400 1805
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 100 2343
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 201 3486
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 200 4805
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 401 1072
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 101 1301
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 300 3148
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 301 3699
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 200 1926
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 100 2011
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 300 2200
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 401 4598
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 201 2969
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 100 3458
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 400 3912
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 301 1370
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 401 2694
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 200 4528
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 301 3490
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 100 2722
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 300 4815
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 300 3511
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 201 1496
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 100 4312
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 100 3768
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 101 3636
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 401 3300
+2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 301 3662
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 400 3264
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 201 3647
+203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 300 1024
+Unmatched! The rat the cat the dog chased killed ate the malt!
+203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 101 1470
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 200 1720
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 301 1130
+2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 401 4736
+localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 200 1955
+localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 401 4246
+localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 200 3138 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/weblog/testdata/config.json b/src/go/plugin/go.d/modules/weblog/testdata/config.json
new file mode 100644
index 000000000..80b51736d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/testdata/config.json
@@ -0,0 +1,64 @@
+{
+ "update_every": 123,
+ "path": "ok",
+ "exclude_path": "ok",
+ "log_type": "ok",
+ "csv_config": {
+ "fields_per_record": 123,
+ "delimiter": "ok",
+ "trim_leading_space": true,
+ "format": "ok"
+ },
+ "ltsv_config": {
+ "field_delimiter": "ok",
+ "value_delimiter": "ok",
+ "mapping": {
+ "ok": "ok"
+ }
+ },
+ "regexp_config": {
+ "pattern": "ok"
+ },
+ "json_config": {
+ "mapping": {
+ "ok": "ok"
+ }
+ },
+ "url_patterns": [
+ {
+ "name": "ok",
+ "match": "ok"
+ }
+ ],
+ "custom_fields": [
+ {
+ "name": "ok",
+ "patterns": [
+ {
+ "name": "ok",
+ "match": "ok"
+ }
+ ]
+ }
+ ],
+ "custom_time_fields": [
+ {
+ "name": "ok",
+ "histogram": [
+ 123.123
+ ]
+ }
+ ],
+ "custom_numeric_fields": [
+ {
+ "name": "ok",
+ "units": "ok",
+ "multiplier": 123,
+ "divisor": 123
+ }
+ ],
+ "histogram": [
+ 123.123
+ ],
+ "group_response_codes": true
+}
diff --git a/src/go/plugin/go.d/modules/weblog/testdata/config.yaml b/src/go/plugin/go.d/modules/weblog/testdata/config.yaml
new file mode 100644
index 000000000..64f60763a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/testdata/config.yaml
@@ -0,0 +1,39 @@
+update_every: 123
+path: "ok"
+exclude_path: "ok"
+log_type: "ok"
+csv_config:
+ fields_per_record: 123
+ delimiter: "ok"
+ trim_leading_space: yes
+ format: "ok"
+ltsv_config:
+ field_delimiter: "ok"
+ value_delimiter: "ok"
+ mapping:
+ ok: "ok"
+regexp_config:
+ pattern: "ok"
+json_config:
+ mapping:
+ ok: "ok"
+url_patterns:
+ - name: "ok"
+ match: "ok"
+custom_fields:
+ - name: "ok"
+ patterns:
+ - name: "ok"
+ match: "ok"
+custom_time_fields:
+ - name: "ok"
+ histogram:
+ - 123.123
+custom_numeric_fields:
+ - name: "ok"
+ units: "ok"
+ multiplier: 123
+ divisor: 123
+histogram:
+ - 123.123
+group_response_codes: yes
diff --git a/src/go/plugin/go.d/modules/weblog/testdata/custom.log b/src/go/plugin/go.d/modules/weblog/testdata/custom.log
new file mode 100644
index 000000000..f2ea80bdb
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/testdata/custom.log
@@ -0,0 +1,100 @@
+dark beer
+dark beer
+light wine
+light beer
+dark wine
+dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+light wine
+dark beer
+light wine
+light wine
+dark beer
+dark wine
+dark wine
+light wine
+light beer
+light wine
+light beer
+light beer
+light beer
+dark beer
+light wine
+dark beer
+light beer
+light wine
+dark wine
+dark wine
+light wine
+light beer
+light wine
+dark wine
+light wine
+light wine
+dark beer
+light wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+light beer
+dark beer
+dark beer
+light beer
+dark beer
+dark wine
+light beer
+light wine
+light beer
+light wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+dark wine
+dark beer
+light beer
+light wine
+dark beer
+light wine
+dark wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+light beer
+dark wine
+dark wine
+Unmatched! The rat the cat the dog chased killed ate the malt!
+dark beer
+light wine
+dark wine
+dark wine
+light beer
+dark wine
+dark beer
+light beer
+light wine
+dark beer
+dark beer
+dark beer
+dark beer
+light wine
+light beer
+dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+dark beer
+light beer
+dark wine
+dark beer
+dark beer
+dark beer
+light wine
+light beer
+light beer
+dark beer
+dark beer
+light beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+light wine
+dark beer
+light wine
+dark beer
+light wine
+light beer
+dark wine
+dark beer
+Unmatched! The rat the cat the dog chased killed ate the malt!
+dark beer
+light beer \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/weblog/testdata/custom_time_fields.log b/src/go/plugin/go.d/modules/weblog/testdata/custom_time_fields.log
new file mode 100644
index 000000000..9d01fb9bc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/testdata/custom_time_fields.log
@@ -0,0 +1,72 @@
+121 321
+431 123
+121 321
+121 321
+121 321
+431 123
+121 321
+121 321
+431 123
+121 321
+121 321
+431 123
+121 321
+431 123
+121 321
+431 123
+121 321
+121 321
+121 321
+431 123
+121 321
+431 123
+121 321
+121 321
+431 123
+121 321
+121 321
+121 321
+431 123
+121 321
+121 321
+431 123
+121 321
+121 321
+431 123
+121 321
+431 123
+121 321
+431 123
+121 321
+121 321
+121 321
+431 123
+121 321
+431 123
+121 321
+121 321
+121 321
+431 123
+121 321
+121 321
+121 321
+431 123
+121 321
+121 321
+431 123
+121 321
+121 321
+431 123
+121 321
+431 123
+121 321
+431 123
+121 321
+121 321
+121 321
+431 123
+121 321
+431 123
+121 321
+121 321
+121 321
diff --git a/src/go/plugin/go.d/modules/weblog/testdata/full.log b/src/go/plugin/go.d/modules/weblog/testdata/full.log
new file mode 100644
index 000000000..460e62127
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/testdata/full.log
@@ -0,0 +1,500 @@
+198.51.100.1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 301 4715 4113 174 465 https TLSv1.2 ECDHE-RSA-AES256-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 1130 1202 409 450 https TLSv1 DHE-RSA-AES256-SHA light beer 230
+198.51.100.1:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 201 4020 1217 492 135 https TLSv1.2 PSK-RC4-SHA light wine 230
+test.example.org:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 401 3784 2349 266 63 http TLSv1 ECDHE-RSA-AES256-SHA dark wine 230
+localhost:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 201 2149 3834 178 197 https TLSv1.1 AES256-SHA dark wine 230
+198.51.100.1:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 200 1442 4125 23 197 https TLSv1.3 DHE-RSA-AES256-SHA light wine 230
+test.example.com:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 300 4134 3965 259 296 https TLSv1.3 PSK-RC4-SHA dark wine 230
+test.example.com:84 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 401 1224 3352 135 468 http SSLv2 PSK-RC4-SHA light wine 230
+localhost:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 200 2504 4754 58 371 http TLSv1.1 DHE-RSA-AES256-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 200 4898 2787 398 476 http SSLv2 DHE-RSA-AES256-SHA dark beer 230
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 100 4957 1848 324 158 https TLSv1.2 AES256-SHA dark wine 230
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 301 1752 1717 75 317 https SSLv3 PSK-RC4-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 301 3799 4120 71 17 http TLSv1.3 ECDHE-RSA-AES256-SHA dark beer 230
+198.51.100.1:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 101 1870 3945 392 323 http TLSv1.1 PSK-RC4-SHA light beer 230
+test.example.com:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 200 1261 3535 52 271 https TLSv1.1 DHE-RSA-AES256-SHA dark wine 230
+test.example.com:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 101 3228 3545 476 168 http TLSv1.1 AES256-SHA light beer 230
+test.example.com:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 300 4731 1574 362 184 https SSLv2 ECDHE-RSA-AES256-SHA light wine 230
+198.51.100.1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 300 4868 1803 23 388 https TLSv1.3 DHE-RSA-AES256-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 100 3744 3546 296 437 http SSLv2 DHE-RSA-AES256-SHA light beer 230
+test.example.org:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 401 4858 1493 151 240 http SSLv2 AES256-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 300 1367 4284 45 443 https TLSv1.1 AES256-SHA light beer 230
+localhost:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 100 4392 4982 143 110 http SSLv3 AES256-SHA light beer 230
+2001:db8:1ce::1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 101 4606 3311 410 273 https TLSv1 PSK-RC4-SHA dark beer 230
+198.51.100.1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 100 1163 1526 10 186 https SSLv2 AES256-SHA light beer 230
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 301 3262 3789 144 124 https TLSv1.3 DHE-RSA-AES256-SHA light wine 230
+198.51.100.1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 400 1365 1447 325 186 http TLSv1.2 PSK-RC4-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 4546 4409 295 153 http SSLv3 ECDHE-RSA-AES256-SHA light beer 230
+localhost:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 300 2297 3318 139 227 https TLSv1 ECDHE-RSA-AES256-SHA dark wine 230
+localhost:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 4671 4285 371 7 https SSLv3 ECDHE-RSA-AES256-SHA dark beer 230
+test.example.org:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 400 3651 1135 172 159 https TLSv1.1 DHE-RSA-AES256-SHA light beer 230
+localhost:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 101 3958 3959 350 121 https SSLv2 DHE-RSA-AES256-SHA dark beer 230
+localhost:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 200 1652 3813 190 11 https SSLv3 AES256-SHA dark wine 230
+test.example.org:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 101 1228 2344 251 366 https TLSv1 ECDHE-RSA-AES256-SHA light beer 230
+test.example.org:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 200 1860 3118 187 419 https TLSv1 PSK-RC4-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost:82 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 401 4518 3837 18 219 http TLSv1.3 DHE-RSA-AES256-SHA dark beer 230
+localhost:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 201 2108 2472 257 470 http TLSv1.1 PSK-RC4-SHA dark beer 230
+2001:db8:1ce::1:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 101 2020 1076 262 106 https TLSv1.3 PSK-RC4-SHA light wine 230
+localhost:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 100 4815 3052 49 322 https TLSv1.3 DHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 300 1642 4001 421 194 https TLSv1 PSK-RC4-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 201 3805 2597 25 187 http TLSv1.1 AES256-SHA dark wine 230
+2001:db8:1ce::1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 301 3435 1760 474 318 https TLSv1.2 ECDHE-RSA-AES256-SHA light wine 230
+localhost:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 101 1911 4082 356 301 https TLSv1 DHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 100 2536 1664 115 474 http SSLv3 PSK-RC4-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 401 3757 3987 441 469 http SSLv2 ECDHE-RSA-AES256-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 400 1221 4244 232 421 https TLSv1.1 ECDHE-RSA-AES256-SHA dark wine 230
+localhost:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 101 2001 2405 6 140 http TLSv1 DHE-RSA-AES256-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+198.51.100.1:81 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 400 4442 4396 64 49 https TLSv1.1 AES256-SHA light beer 230
+2001:db8:1ce::1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 401 1461 4623 46 47 https TLSv1.3 ECDHE-RSA-AES256-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 101 4709 2156 249 137 https TLSv1.3 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 201 2332 3311 172 266 https TLSv1.1 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 301 3571 3672 188 389 https SSLv2 AES256-SHA light wine 230
+localhost:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 100 1739 3940 403 399 https SSLv3 DHE-RSA-AES256-SHA dark wine 230
+test.example.org:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 300 2332 3788 473 372 http SSLv3 DHE-RSA-AES256-SHA dark wine 230
+test.example.org:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 201 4476 1339 420 120 https TLSv1.3 ECDHE-RSA-AES256-SHA light beer 230
+test.example.org:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 101 1040 4417 294 81 http SSLv2 PSK-RC4-SHA dark beer 230
+test.example.org:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 200 1908 1611 265 324 http TLSv1 ECDHE-RSA-AES256-SHA light wine 230
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 300 4725 3638 328 442 https SSLv3 DHE-RSA-AES256-SHA dark wine 230
+198.51.100.1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 100 3943 3001 163 391 http TLSv1.1 AES256-SHA light beer 230
+198.51.100.1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 101 3635 4361 30 431 https SSLv2 DHE-RSA-AES256-SHA light beer 230
+test.example.com:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 201 3348 2997 321 462 http TLSv1 PSK-RC4-SHA dark beer 230
+localhost:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 101 3213 3414 218 267 http TLSv1.3 PSK-RC4-SHA dark wine 230
+localhost:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 400 2845 2448 46 165 https TLSv1 AES256-SHA light beer 230
+test.example.org:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 301 2789 1791 227 314 http SSLv3 ECDHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 301 2283 4644 304 402 http TLSv1.1 PSK-RC4-SHA light wine 230
+localhost:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 201 4748 3274 80 481 http SSLv2 AES256-SHA light beer 230
+localhost:81 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 300 2327 1772 328 174 http TLSv1 ECDHE-RSA-AES256-SHA light beer 230
+test.example.org:81 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 401 1180 3482 115 138 http SSLv2 DHE-RSA-AES256-SHA dark beer 230
+198.51.100.1:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 300 2758 1482 432 426 http TLSv1.1 PSK-RC4-SHA dark wine 230
+2001:db8:1ce::1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 200 4793 3549 258 490 https SSLv3 AES256-SHA light wine 230
+198.51.100.1:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 200 4211 3691 49 241 http TLSv1.2 PSK-RC4-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 201 4853 1043 361 46 http SSLv3 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 400 1025 3378 28 134 https TLSv1.2 DHE-RSA-AES256-SHA light wine 230
+198.51.100.1:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 400 2124 1528 147 144 http TLSv1.1 DHE-RSA-AES256-SHA dark wine 230
+test.example.com:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 201 4910 1613 194 385 https TLSv1.1 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 100 2792 3271 491 104 https SSLv3 DHE-RSA-AES256-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 300 4722 4182 344 237 https TLSv1 DHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 201 3945 3511 153 388 https TLSv1 PSK-RC4-SHA light beer 230
+test.example.com:81 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 100 1456 4467 418 70 http TLSv1 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:80 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 401 1307 1537 422 379 http TLSv1 PSK-RC4-SHA light wine 230
+2001:db8:1ce::1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 400 4768 2420 95 366 http TLSv1.3 DHE-RSA-AES256-SHA light beer 230
+localhost:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 100 4274 4529 296 270 http SSLv3 PSK-RC4-SHA dark beer 230
+test.example.org:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 101 1181 3640 182 479 https TLSv1.3 AES256-SHA light wine 230
+198.51.100.1:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 400 2101 2029 377 210 http TLSv1.3 AES256-SHA dark wine 230
+test.example.com:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 400 2373 1785 157 373 https SSLv2 DHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 400 4812 4212 89 36 https TLSv1.2 AES256-SHA light wine 230
+198.51.100.1:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 201 1421 4737 194 483 https TLSv1.1 AES256-SHA light wine 230
+2001:db8:1ce::1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 200 3485 1976 369 77 https SSLv2 AES256-SHA dark wine 230
+2001:db8:1ce::1:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 100 4414 4356 317 178 http SSLv3 DHE-RSA-AES256-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+198.51.100.1:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 200 1151 2186 490 362 https TLSv1.3 DHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:81 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 400 2991 3256 184 166 https TLSv1.3 PSK-RC4-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 300 3872 2708 139 378 http TLSv1.3 PSK-RC4-SHA dark beer 230
+localhost:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 201 2991 3430 178 104 http TLSv1.2 ECDHE-RSA-AES256-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 400 2825 4431 30 249 http TLSv1.3 ECDHE-RSA-AES256-SHA light wine 230
+test.example.org:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 100 1319 4859 435 44 http TLSv1.2 ECDHE-RSA-AES256-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 300 3962 1663 23 264 https TLSv1.2 DHE-RSA-AES256-SHA dark wine 230
+localhost:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 201 4465 2310 493 99 https TLSv1.1 AES256-SHA dark beer 230
+test.example.com:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 100 2942 4946 119 27 https TLSv1.1 PSK-RC4-SHA dark wine 230
+test.example.org:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 201 3243 2992 432 260 http TLSv1 AES256-SHA light wine 230
+2001:db8:1ce::1:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 301 2312 3695 112 330 http SSLv2 ECDHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 400 3118 3248 347 114 https TLSv1 ECDHE-RSA-AES256-SHA light wine 230
+test.example.com:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 100 3126 4402 19 375 https SSLv3 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 201 2671 2153 195 310 https SSLv2 ECDHE-RSA-AES256-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 401 1582 3558 292 394 http TLSv1.3 PSK-RC4-SHA light wine 230
+test.example.com:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 201 4969 4169 281 71 http TLSv1.2 PSK-RC4-SHA dark beer 230
+test.example.org:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 200 4531 3111 272 437 https TLSv1.2 DHE-RSA-AES256-SHA dark wine 230
+198.51.100.1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 401 1746 4177 224 89 https TLSv1.3 AES256-SHA dark beer 230
+localhost:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 200 4147 4505 454 65 https TLSv1.1 ECDHE-RSA-AES256-SHA light beer 230
+localhost:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 300 2235 3397 290 243 https TLSv1.3 DHE-RSA-AES256-SHA dark beer 230
+localhost:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 201 1633 3774 146 394 https TLSv1.2 AES256-SHA light wine 230
+2001:db8:1ce::1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 100 4580 2717 219 305 https TLSv1.3 PSK-RC4-SHA dark beer 230
+test.example.com:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 401 1395 3562 303 392 http SSLv2 DHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 4827 1947 419 323 https TLSv1.2 DHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 301 1116 4737 55 448 http TLSv1.2 ECDHE-RSA-AES256-SHA light beer 230
+test.example.org:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 401 3130 4303 71 401 https TLSv1.1 DHE-RSA-AES256-SHA light wine 230
+localhost:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 200 4968 4988 75 411 http TLSv1 AES256-SHA dark wine 230
+198.51.100.1:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 401 1586 4626 58 248 http TLSv1.2 ECDHE-RSA-AES256-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+198.51.100.1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 300 2652 2273 379 240 https TLSv1.2 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.org:81 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 101 2696 1585 383 365 http SSLv2 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 200 4278 2629 350 109 http TLSv1.3 ECDHE-RSA-AES256-SHA light wine 230
+localhost:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 301 3012 3094 37 44 http SSLv2 PSK-RC4-SHA light beer 230
+localhost:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 100 3197 1038 391 416 https TLSv1.2 AES256-SHA dark beer 230
+test.example.org:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 100 1842 1947 402 267 https SSLv3 PSK-RC4-SHA light wine 230
+198.51.100.1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 200 3365 4296 23 143 https TLSv1.2 AES256-SHA dark beer 230
+2001:db8:1ce::1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 301 3630 4425 343 460 http TLSv1.3 ECDHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:81 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 101 3175 2967 441 86 http TLSv1.1 ECDHE-RSA-AES256-SHA light wine 230
+198.51.100.1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 100 4423 2052 251 81 https TLSv1.3 DHE-RSA-AES256-SHA dark wine 230
+test.example.com:83 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 400 3440 4089 408 442 https SSLv3 PSK-RC4-SHA dark beer 230
+test.example.org:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 100 3827 3457 288 305 http TLSv1 PSK-RC4-SHA dark beer 230
+198.51.100.1:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 101 1292 2131 382 334 http TLSv1 ECDHE-RSA-AES256-SHA light wine 230
+198.51.100.1:83 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 400 2026 1831 417 123 http TLSv1.1 ECDHE-RSA-AES256-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 401 4300 3883 270 160 https TLSv1 PSK-RC4-SHA light wine 230
+localhost:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 300 1360 1687 49 356 https SSLv3 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:81 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 201 2871 3581 214 269 https TLSv1.1 AES256-SHA dark wine 230
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 201 4426 4191 74 358 http TLSv1.1 PSK-RC4-SHA light beer 230
+198.51.100.1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 200 3533 2075 370 403 https TLSv1.2 DHE-RSA-AES256-SHA dark wine 230
+test.example.com:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 100 3660 3471 272 136 http TLSv1 AES256-SHA light beer 230
+test.example.org:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 200 1999 3259 277 254 https TLSv1.3 AES256-SHA dark wine 230
+198.51.100.1:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 300 3103 2906 200 141 http TLSv1.2 DHE-RSA-AES256-SHA light wine 230
+198.51.100.1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 400 4197 4507 159 311 https SSLv3 AES256-SHA dark wine 230
+test.example.com:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 300 1049 4682 464 353 http TLSv1.2 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.com:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 200 2163 2112 266 133 https TLSv1.1 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:82 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 400 4310 2281 107 217 https SSLv2 AES256-SHA light beer 230
+198.51.100.1:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 400 4215 2676 425 244 https SSLv3 PSK-RC4-SHA dark beer 230
+2001:db8:1ce::1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 200 3707 1631 300 224 http TLSv1 PSK-RC4-SHA light wine 230
+test.example.com:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 201 2082 4603 150 200 http TLSv1.2 DHE-RSA-AES256-SHA dark wine 230
+test.example.com:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 301 3547 4120 146 234 https TLSv1.1 PSK-RC4-SHA dark beer 230
+test.example.org:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 101 1999 2794 47 420 https TLSv1.1 DHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 100 2648 4958 389 16 https SSLv3 AES256-SHA light beer 230
+localhost:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 201 1202 2909 26 340 http TLSv1 DHE-RSA-AES256-SHA light wine 230
+localhost:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 201 1393 3045 248 421 https TLSv1.1 ECDHE-RSA-AES256-SHA light wine 230
+localhost:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 101 2739 4561 61 257 http SSLv3 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 301 4127 4190 374 278 https TLSv1 AES256-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 201 3442 1472 366 373 https SSLv2 ECDHE-RSA-AES256-SHA light wine 230
+test.example.org:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 301 1745 1279 207 55 https SSLv3 DHE-RSA-AES256-SHA light beer 230
+test.example.com:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 400 1462 2721 168 385 https TLSv1.1 DHE-RSA-AES256-SHA light beer 230
+198.51.100.1:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 100 1680 2358 342 237 https TLSv1.2 PSK-RC4-SHA light wine 230
+2001:db8:1ce::1:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 101 1242 3123 296 479 https SSLv2 DHE-RSA-AES256-SHA light wine 230
+test.example.com:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 200 1525 4029 39 30 https TLSv1.1 AES256-SHA dark wine 230
+localhost:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 301 4348 4902 121 103 https TLSv1.3 ECDHE-RSA-AES256-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 201 4992 1046 5 408 https TLSv1.3 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.com:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 401 1331 2834 232 212 https TLSv1.1 DHE-RSA-AES256-SHA dark wine 230
+test.example.com:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 100 1281 3004 261 61 https TLSv1.1 DHE-RSA-AES256-SHA dark beer 230
+localhost:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 300 3985 2627 249 397 https SSLv2 PSK-RC4-SHA dark beer 230
+2001:db8:1ce::1:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 201 2835 3195 194 308 http TLSv1.2 ECDHE-RSA-AES256-SHA light beer 230
+198.51.100.1:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 101 4413 2887 257 108 https TLSv1 PSK-RC4-SHA light beer 230
+198.51.100.1:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 300 2514 2890 186 53 https TLSv1.3 ECDHE-RSA-AES256-SHA dark beer 230
+198.51.100.1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 200 2396 3424 101 295 http SSLv2 PSK-RC4-SHA light wine 230
+2001:db8:1ce::1:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 201 4849 3176 453 302 http TLSv1.1 AES256-SHA dark beer 230
+2001:db8:1ce::1:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 200 4191 2809 300 205 https TLSv1 ECDHE-RSA-AES256-SHA dark wine 230
+localhost:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 300 2920 1745 421 80 http TLSv1.1 AES256-SHA dark wine 230
+localhost:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 100 3313 1900 226 163 http TLSv1.3 PSK-RC4-SHA light wine 230
+localhost:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 401 2298 1179 181 229 https TLSv1 PSK-RC4-SHA dark beer 230
+2001:db8:1ce::1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 401 4604 4392 239 20 http SSLv2 ECDHE-RSA-AES256-SHA dark beer 230
+test.example.org:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 401 2077 2339 132 433 https TLSv1.2 ECDHE-RSA-AES256-SHA dark beer 230
+198.51.100.1:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 201 4448 2085 496 68 https SSLv3 AES256-SHA light wine 230
+localhost:80 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 201 3219 2834 226 50 https SSLv3 PSK-RC4-SHA light wine 230
+test.example.org:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 101 2908 3137 50 236 http TLSv1 DHE-RSA-AES256-SHA light beer 230
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 301 4350 1578 469 206 http TLSv1.2 ECDHE-RSA-AES256-SHA light beer 230
+localhost:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 301 3255 1349 245 492 http TLSv1.3 AES256-SHA dark wine 230
+test.example.com:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 3960 2563 455 228 http SSLv3 DHE-RSA-AES256-SHA light beer 230
+test.example.org:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 301 3302 1004 184 392 https TLSv1 ECDHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 401 1565 4150 93 130 https TLSv1 AES256-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 401 2251 2071 373 471 http TLSv1.2 AES256-SHA light wine 230
+198.51.100.1:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 200 1589 2077 159 389 http TLSv1.1 ECDHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 401 1081 2154 103 244 https TLSv1.1 AES256-SHA light wine 230
+198.51.100.1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 101 3824 4262 478 439 https TLSv1 DHE-RSA-AES256-SHA light beer 230
+test.example.org:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 200 2123 3904 183 420 https TLSv1.1 DHE-RSA-AES256-SHA light wine 230
+localhost:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 100 4324 4867 411 30 https TLSv1.1 ECDHE-RSA-AES256-SHA light beer 230
+198.51.100.1:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 300 2462 3054 286 47 http SSLv3 ECDHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 101 3389 4012 81 113 https TLSv1.2 DHE-RSA-AES256-SHA dark beer 230
+test.example.org:82 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 301 1469 3001 134 460 http TLSv1.1 AES256-SHA dark wine 230
+test.example.com:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 201 1962 1869 269 191 https SSLv3 ECDHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 400 1807 3457 477 77 https TLSv1 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 201 2041 2072 464 193 http SSLv3 ECDHE-RSA-AES256-SHA light wine 230
+198.51.100.1:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 301 2731 1114 92 45 http TLSv1.1 DHE-RSA-AES256-SHA light beer 230
+localhost:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 300 4016 4766 425 405 https TLSv1.1 PSK-RC4-SHA light beer 230
+test.example.com:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 201 3480 3735 420 338 https TLSv1.1 ECDHE-RSA-AES256-SHA light beer 230
+198.51.100.1:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 301 4654 2443 495 322 https TLSv1.3 PSK-RC4-SHA light beer 230
+2001:db8:1ce::1:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 301 1575 1083 214 55 http TLSv1.2 AES256-SHA dark wine 230
+localhost:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 101 3791 3173 436 449 http TLSv1 AES256-SHA dark beer 230
+test.example.org:82 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 301 4446 4004 298 459 https TLSv1.1 ECDHE-RSA-AES256-SHA dark beer 230
+test.example.com:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 300 3414 4751 49 391 http TLSv1.2 ECDHE-RSA-AES256-SHA light wine 230
+test.example.org:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 401 2058 2053 250 290 http TLSv1 AES256-SHA dark beer 230
+test.example.com:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 200 2115 4533 461 278 https SSLv3 AES256-SHA dark wine 230
+test.example.com:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 300 3872 1292 172 275 https TLSv1.1 AES256-SHA light wine 230
+localhost:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 201 4947 4545 50 414 http SSLv3 AES256-SHA dark wine 230
+2001:db8:1ce::1:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 400 1012 3777 305 193 https TLSv1.3 DHE-RSA-AES256-SHA light beer 230
+localhost:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 201 1862 1381 420 109 http TLSv1.2 ECDHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 300 3579 3376 434 67 https TLSv1 PSK-RC4-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 401 4937 1232 470 280 http TLSv1 DHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 100 4926 4244 82 284 http SSLv2 ECDHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 100 4783 4925 497 340 http TLSv1 ECDHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 3308 1377 208 232 http SSLv2 PSK-RC4-SHA light wine 230
+localhost:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 101 4285 4695 426 481 https TLSv1.3 DHE-RSA-AES256-SHA dark wine 230
+localhost:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 100 1953 2196 101 129 https SSLv3 PSK-RC4-SHA light beer 230
+localhost:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 200 2169 4267 65 181 http TLSv1.3 AES256-SHA light wine 230
+test.example.org:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 301 1698 1366 116 101 http TLSv1.3 DHE-RSA-AES256-SHA dark beer 230
+test.example.com:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 200 3534 4390 114 479 https TLSv1.3 ECDHE-RSA-AES256-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 101 3583 1060 400 28 http TLSv1.3 PSK-RC4-SHA light wine 230
+test.example.com:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 100 3078 4116 60 444 http TLSv1.3 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 401 3975 2201 438 419 http SSLv2 AES256-SHA dark beer 230
+test.example.org:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 100 3756 2827 424 411 https TLSv1.3 DHE-RSA-AES256-SHA dark beer 230
+test.example.com:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 400 2898 3218 258 198 http SSLv2 PSK-RC4-SHA dark wine 230
+localhost:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 201 2076 3000 320 196 http SSLv2 PSK-RC4-SHA light beer 230
+198.51.100.1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 201 1439 4814 47 360 http TLSv1 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.org:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 301 2871 2870 491 411 https SSLv2 ECDHE-RSA-AES256-SHA dark beer 230
+test.example.com:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 200 2744 3085 11 151 http SSLv3 ECDHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 101 1241 1752 324 154 https TLSv1.2 DHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 301 3834 4235 270 331 https TLSv1.2 PSK-RC4-SHA dark beer 230
+test.example.com:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 200 2431 3778 103 78 http TLSv1.2 PSK-RC4-SHA light beer 230
+198.51.100.1:81 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 200 2250 1787 340 132 http TLSv1.3 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:81 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 200 4838 1201 79 10 http SSLv2 AES256-SHA dark beer 230
+2001:db8:1ce::1:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 400 2953 1165 492 245 http TLSv1.2 PSK-RC4-SHA light wine 230
+198.51.100.1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 200 2540 3818 490 295 http TLSv1.3 DHE-RSA-AES256-SHA dark beer 230
+test.example.com:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 400 4469 3199 203 107 https TLSv1.2 ECDHE-RSA-AES256-SHA light wine 230
+test.example.com:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 201 3270 3948 223 443 http TLSv1 ECDHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 300 4902 1169 359 328 http TLSv1.3 PSK-RC4-SHA dark wine 230
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 400 1788 4502 355 220 http TLSv1 PSK-RC4-SHA dark beer 230
+198.51.100.1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 100 1565 2909 127 435 http TLSv1.3 PSK-RC4-SHA light wine 230
+test.example.com:83 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 101 4507 2396 259 100 https SSLv3 PSK-RC4-SHA light beer 230
+198.51.100.1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 3119 2306 387 395 http TLSv1.1 DHE-RSA-AES256-SHA light beer 230
+localhost:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 100 1473 4928 364 371 https SSLv3 ECDHE-RSA-AES256-SHA light wine 230
+198.51.100.1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 100 1449 3719 390 401 http TLSv1.2 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.com:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 301 1897 1428 438 210 http TLSv1.1 ECDHE-RSA-AES256-SHA light wine 230
+localhost:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 300 1381 1043 367 453 http TLSv1.2 DHE-RSA-AES256-SHA light wine 230
+localhost:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 400 3495 2740 375 378 http TLSv1 DHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 301 4754 4667 293 56 https SSLv2 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.org:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 200 3447 3853 454 348 http TLSv1 PSK-RC4-SHA light beer 230
+198.51.100.1:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 401 4669 2808 89 235 https TLSv1.3 PSK-RC4-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 401 3134 1040 401 33 https SSLv3 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:80 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 200 3823 3615 48 110 https SSLv3 PSK-RC4-SHA light beer 230
+test.example.org:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 301 2971 3712 2 325 https TLSv1.1 AES256-SHA dark beer 230
+2001:db8:1ce::1:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 301 2932 2388 482 302 http SSLv2 AES256-SHA light wine 230
+198.51.100.1:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 400 2009 3888 347 59 http TLSv1 AES256-SHA light wine 230
+2001:db8:1ce::1:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 401 4252 3808 285 384 https TLSv1.1 ECDHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 301 2664 1505 455 419 https TLSv1.1 DHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 400 2474 2102 40 377 http TLSv1.3 PSK-RC4-SHA light beer 230
+test.example.com:81 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 301 4478 4105 239 420 https TLSv1 DHE-RSA-AES256-SHA light wine 230
+test.example.org:81 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 201 4461 1737 416 129 https TLSv1.3 PSK-RC4-SHA dark wine 230
+198.51.100.1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 100 2381 2018 34 247 http TLSv1.3 DHE-RSA-AES256-SHA light beer 230
+test.example.com:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 101 3138 3141 178 333 http TLSv1.2 DHE-RSA-AES256-SHA dark wine 230
+test.example.com:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 300 2203 4463 450 497 http TLSv1.3 PSK-RC4-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:81 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 400 3937 4320 30 151 http TLSv1.2 PSK-RC4-SHA dark beer 230
+test.example.com:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 100 2858 4431 92 38 http SSLv2 PSK-RC4-SHA light beer 230
+2001:db8:1ce::1:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 301 3339 1333 291 479 http TLSv1.2 PSK-RC4-SHA dark wine 230
+test.example.org:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 201 1799 1725 184 24 http TLSv1 AES256-SHA light wine 230
+localhost:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 400 4743 1337 381 494 http SSLv2 DHE-RSA-AES256-SHA dark beer 230
+198.51.100.1:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 300 4542 4411 280 383 http TLSv1 AES256-SHA dark wine 230
+198.51.100.1:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 101 3600 2913 361 411 https TLSv1.2 AES256-SHA light wine 230
+198.51.100.1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 100 2860 4491 431 82 https TLSv1.3 DHE-RSA-AES256-SHA light wine 230
+localhost:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 201 4544 1146 86 146 http TLSv1.2 PSK-RC4-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+198.51.100.1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 401 1412 3023 474 170 https TLSv1.3 DHE-RSA-AES256-SHA dark wine 230
+198.51.100.1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 201 2870 4503 86 428 https TLSv1.2 ECDHE-RSA-AES256-SHA light beer 230
+198.51.100.1:83 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 200 2250 1801 236 283 https TLSv1.2 PSK-RC4-SHA dark beer 230
+test.example.org:81 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 100 3859 2489 455 150 http SSLv3 PSK-RC4-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:81 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 400 4322 3740 68 383 http TLSv1 AES256-SHA light wine 230
+localhost:83 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 400 1369 3435 223 363 http TLSv1 AES256-SHA dark beer 230
+test.example.org:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 101 1863 1538 81 9 https TLSv1 DHE-RSA-AES256-SHA light beer 230
+localhost:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 100 4390 2872 173 68 https TLSv1.3 DHE-RSA-AES256-SHA dark beer 230
+test.example.com:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 300 2549 4334 353 127 http TLSv1 AES256-SHA light beer 230
+test.example.com:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 300 2314 3541 376 69 https TLSv1.3 ECDHE-RSA-AES256-SHA light beer 230
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 301 2883 3804 95 80 https TLSv1 PSK-RC4-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 201 3245 4083 153 481 https TLSv1 PSK-RC4-SHA dark beer 230
+localhost:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 401 4633 2483 350 196 https TLSv1.3 ECDHE-RSA-AES256-SHA dark beer 230
+198.51.100.1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 201 1944 2389 217 413 http TLSv1.2 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 401 4159 4546 294 252 http TLSv1.2 DHE-RSA-AES256-SHA dark wine 230
+test.example.org:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 200 2100 1268 115 431 http SSLv2 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.com:83 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 301 4386 3222 41 383 http TLSv1.1 AES256-SHA light wine 230
+test.example.org:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 200 4859 2780 28 16 https SSLv2 DHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 301 1541 2755 114 194 https TLSv1 ECDHE-RSA-AES256-SHA light wine 230
+198.51.100.1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 300 2058 3951 312 428 http TLSv1.3 PSK-RC4-SHA dark beer 230
+198.51.100.1:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 400 3076 4237 341 115 http TLSv1.3 AES256-SHA dark wine 230
+test.example.com:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 300 3384 2583 2 348 http TLSv1.3 ECDHE-RSA-AES256-SHA dark wine 230
+localhost:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 200 1283 4090 311 39 https SSLv3 DHE-RSA-AES256-SHA dark wine 230
+localhost:81 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 100 1620 3450 491 119 http TLSv1.1 PSK-RC4-SHA dark beer 230
+test.example.com:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 200 3572 3267 95 80 http TLSv1.2 PSK-RC4-SHA dark beer 230
+localhost:80 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 300 2628 2670 52 307 http TLSv1.1 AES256-SHA dark beer 230
+2001:db8:1ce::1:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 101 3332 4865 246 348 https TLSv1.2 ECDHE-RSA-AES256-SHA light beer 230
+test.example.com:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 3766 1704 147 217 https SSLv2 DHE-RSA-AES256-SHA dark beer 230
+test.example.com:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 200 3763 3904 305 366 http TLSv1.3 DHE-RSA-AES256-SHA dark beer 230
+198.51.100.1:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 201 4205 4011 38 144 http SSLv3 DHE-RSA-AES256-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 101 4573 3168 317 94 https TLSv1 PSK-RC4-SHA light beer 230
+localhost:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 201 1481 1798 190 170 http TLSv1.1 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.com:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 201 1603 1276 51 465 https SSLv2 PSK-RC4-SHA dark wine 230
+test.example.org:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 101 2050 2654 283 287 https TLSv1.1 PSK-RC4-SHA dark wine 230
+198.51.100.1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 201 4943 2143 43 167 http TLSv1.1 ECDHE-RSA-AES256-SHA light beer 230
+198.51.100.1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 401 3854 4082 318 477 https TLSv1.1 ECDHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 100 3235 4635 377 206 http TLSv1.1 AES256-SHA light beer 230
+2001:db8:1ce::1:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 401 4508 2872 185 243 https TLSv1.3 AES256-SHA light beer 230
+test.example.com:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 401 4943 3560 48 473 http TLSv1.1 AES256-SHA light beer 230
+test.example.com:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 300 2693 3536 157 430 https SSLv3 DHE-RSA-AES256-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 300 4321 4966 420 264 http TLSv1.2 DHE-RSA-AES256-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 101 1470 1279 423 248 https TLSv1 DHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 100 2306 4406 237 51 http SSLv2 PSK-RC4-SHA light beer 230
+2001:db8:1ce::1:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 301 1766 2834 429 428 https TLSv1.3 DHE-RSA-AES256-SHA dark wine 230
+test.example.org:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 300 2997 2317 288 312 http SSLv3 PSK-RC4-SHA dark beer 230
+localhost:84 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 301 2968 1042 124 330 https TLSv1.3 AES256-SHA dark wine 230
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 301 1458 4510 268 136 http SSLv2 ECDHE-RSA-AES256-SHA light wine 230
+test.example.org:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 301 4830 2063 255 352 http SSLv2 ECDHE-RSA-AES256-SHA dark beer 230
+test.example.org:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 200 1490 2187 282 484 http TLSv1.3 PSK-RC4-SHA dark wine 230
+localhost:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 100 1015 2608 460 331 http TLSv1.1 AES256-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+localhost:81 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 200 4831 1333 57 68 https TLSv1 ECDHE-RSA-AES256-SHA dark wine 230
+localhost:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 101 2554 1624 18 215 http TLSv1.1 PSK-RC4-SHA dark wine 230
+198.51.100.1:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 401 1579 3208 463 31 https TLSv1.1 DHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 2239 1301 165 27 https SSLv3 ECDHE-RSA-AES256-SHA dark beer 230
+test.example.org:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 200 3874 1581 257 203 http TLSv1.3 AES256-SHA dark beer 230
+localhost:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 400 2498 2533 317 269 https TLSv1.1 ECDHE-RSA-AES256-SHA light beer 230
+198.51.100.1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 101 2898 1790 277 180 https TLSv1 DHE-RSA-AES256-SHA light wine 230
+198.51.100.1:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 200 2899 2599 70 323 http TLSv1.1 PSK-RC4-SHA dark wine 230
+test.example.org:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 300 4546 4841 112 34 https SSLv3 ECDHE-RSA-AES256-SHA dark wine 230
+localhost:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2" 401 4016 3596 394 463 https TLSv1.1 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:83 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 200 1946 2492 32 123 http SSLv3 DHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 201 2296 3174 55 473 http TLSv1.3 ECDHE-RSA-AES256-SHA light wine 230
+198.51.100.1:82 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 201 3037 3632 472 280 https TLSv1.1 DHE-RSA-AES256-SHA light wine 230
+198.51.100.1:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 200 1721 1520 211 157 http TLSv1.2 DHE-RSA-AES256-SHA light beer 230
+localhost:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 401 4044 3518 390 146 http TLSv1.1 DHE-RSA-AES256-SHA light beer 230
+test.example.com:84 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 101 4527 3718 95 207 https TLSv1.3 AES256-SHA dark beer 230
+2001:db8:1ce::1:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 400 2309 4551 423 304 https TLSv1.1 AES256-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 401 3864 2883 115 211 https TLSv1.3 PSK-RC4-SHA light wine 230
+test.example.com:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2" 201 3417 3422 340 242 https SSLv2 PSK-RC4-SHA dark wine 230
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 201 4012 2880 45 302 http SSLv2 AES256-SHA light wine 230
+test.example.com:82 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 300 2834 2781 282 213 http SSLv2 PSK-RC4-SHA dark wine 230
+test.example.org:83 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 300 3421 1800 422 72 http TLSv1 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 300 3052 3602 153 320 https SSLv3 ECDHE-RSA-AES256-SHA light beer 230
+localhost:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 400 1578 4720 230 458 https SSLv3 AES256-SHA light wine 230
+test.example.org:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 1998 3117 220 166 http SSLv2 DHE-RSA-AES256-SHA dark beer 230
+test.example.com:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 100 2041 4031 295 66 http TLSv1.2 DHE-RSA-AES256-SHA light beer 230
+test.example.org:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 401 4941 3742 174 434 https TLSv1.3 PSK-RC4-SHA dark beer 230
+localhost:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 100 1153 2169 24 196 https SSLv2 PSK-RC4-SHA dark wine 230
+test.example.com:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 400 1289 2496 189 98 https SSLv2 PSK-RC4-SHA dark beer 230
+localhost:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 401 4343 2877 90 314 http SSLv2 AES256-SHA light wine 230
+localhost:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 200 1203 2163 465 460 https TLSv1.1 PSK-RC4-SHA dark wine 230
+test.example.org:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2.0" 300 2301 3063 36 178 https TLSv1.1 DHE-RSA-AES256-SHA dark beer 230
+test.example.org:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 201 4306 1154 408 297 https TLSv1 AES256-SHA light beer 230
+198.51.100.1:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 300 1178 3204 79 101 http SSLv2 DHE-RSA-AES256-SHA dark beer 230
+localhost:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 200 4431 4442 348 155 http TLSv1.2 DHE-RSA-AES256-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 400 3897 3618 199 149 https TLSv1.3 ECDHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 300 2221 4730 324 338 https SSLv3 AES256-SHA dark beer 230
+198.51.100.1:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 100 2030 4453 152 414 https TLSv1 ECDHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:81 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 301 4937 1625 213 265 https TLSv1.2 PSK-RC4-SHA light beer 230
+localhost:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 400 1503 3735 466 485 https TLSv1.2 PSK-RC4-SHA light beer 230
+2001:db8:1ce::1:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 200 3255 2804 105 111 http SSLv3 AES256-SHA dark wine 230
+test.example.com:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 300 2376 2896 82 287 https SSLv2 AES256-SHA dark wine 230
+198.51.100.1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 100 3525 3376 192 247 https SSLv2 PSK-RC4-SHA light wine 230
+localhost:83 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 300 2813 1800 365 231 https TLSv1.1 PSK-RC4-SHA light wine 230
+localhost:81 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 201 3589 2334 317 406 https TLSv1.2 PSK-RC4-SHA dark wine 230
+test.example.org:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 100 3216 3159 17 344 http TLSv1.3 PSK-RC4-SHA light beer 230
+test.example.org:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 300 4047 2788 196 105 http TLSv1.2 DHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 301 4253 1092 219 172 https SSLv2 PSK-RC4-SHA light beer 230
+test.example.com:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 300 2612 4876 113 492 http TLSv1 PSK-RC4-SHA dark beer 230
+test.example.org:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 400 1039 4957 283 391 https SSLv2 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:80 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 300 2175 1025 349 62 https TLSv1.2 PSK-RC4-SHA light wine 230
+198.51.100.1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 201 2512 4199 87 90 https TLSv1.2 AES256-SHA dark beer 230
+test.example.com:80 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 300 3685 3490 288 456 http TLSv1.3 ECDHE-RSA-AES256-SHA light beer 230
+test.example.com:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 201 4163 2730 115 186 http TLSv1.3 PSK-RC4-SHA light beer 230
+test.example.org:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 201 4000 1751 482 232 https TLSv1.1 AES256-SHA dark beer 230
+test.example.com:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2" 301 4544 1246 191 426 http TLSv1.3 AES256-SHA light beer 230
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 201 2202 1079 44 93 http TLSv1.1 ECDHE-RSA-AES256-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+198.51.100.1:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 301 2329 3996 388 386 https SSLv2 DHE-RSA-AES256-SHA dark wine 230
+test.example.org:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 101 3564 2870 499 23 https SSLv2 DHE-RSA-AES256-SHA light wine 230
+localhost:84 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 401 3729 1376 161 313 http TLSv1 DHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:81 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 300 4158 3864 444 149 https TLSv1.2 ECDHE-RSA-AES256-SHA light wine 230
+test.example.com:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2" 301 1809 4286 447 418 http TLSv1 AES256-SHA light beer 230
+198.51.100.1:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 100 1942 2004 497 427 https SSLv2 DHE-RSA-AES256-SHA dark wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 300 4471 3841 438 176 https TLSv1.3 PSK-RC4-SHA dark beer 230
+test.example.com:82 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 400 1613 3836 362 432 http SSLv2 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.org:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 300 4394 2628 344 69 http TLSv1.2 AES256-SHA light wine 230
+localhost:84 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 4269 4494 178 149 http SSLv2 ECDHE-RSA-AES256-SHA light wine 230
+198.51.100.1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 100 3413 1039 317 109 http TLSv1.1 DHE-RSA-AES256-SHA light wine 230
+test.example.org:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 201 1110 1662 194 353 https TLSv1.1 AES256-SHA dark wine 230
+2001:db8:1ce::1:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/1.1" 301 3742 1514 220 406 http TLSv1 DHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2" 200 2060 4756 406 119 http SSLv3 DHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:82 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 301 3663 1293 377 420 http TLSv1.2 ECDHE-RSA-AES256-SHA light wine 230
+test.example.com:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2.0" 301 3708 2360 98 293 https TLSv1.2 DHE-RSA-AES256-SHA dark wine 230
+198.51.100.1:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 400 4376 4393 488 173 https SSLv3 PSK-RC4-SHA dark wine 230
+localhost:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/1.1" 100 1129 2917 122 93 http SSLv3 DHE-RSA-AES256-SHA light beer 230
+localhost:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 200 4769 2155 492 41 https TLSv1 AES256-SHA dark beer 230
+2001:db8:1ce::1:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 201 4710 3030 349 392 http TLSv1 AES256-SHA light wine 230
+198.51.100.1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2" 100 2642 2759 363 112 http TLSv1.1 DHE-RSA-AES256-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.org:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 401 3964 1986 204 377 https SSLv2 DHE-RSA-AES256-SHA light beer 230
+198.51.100.1:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 401 1053 3953 284 13 http TLSv1.2 ECDHE-RSA-AES256-SHA light wine 230
+test.example.com:80 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 301 4436 4981 79 323 https SSLv2 ECDHE-RSA-AES256-SHA dark wine 230
+test.example.com:83 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 101 3207 2032 206 398 https SSLv3 PSK-RC4-SHA light wine 230
+2001:db8:1ce::1:80 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 200 3938 1928 216 31 https SSLv2 ECDHE-RSA-AES256-SHA light wine 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+Unmatched! The rat the cat the dog chased killed ate the malt!
+198.51.100.1:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2.0" 100 2193 1470 144 245 https TLSv1.3 ECDHE-RSA-AES256-SHA light wine 230
+2001:db8:1ce::1:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 400 1646 3973 373 78 https TLSv1 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 301 3038 3256 361 321 https TLSv1.2 PSK-RC4-SHA dark wine 230
+198.51.100.1:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2" 401 4535 2424 44 158 http TLSv1.1 ECDHE-RSA-AES256-SHA light wine 230
+localhost:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 301 1366 3163 63 236 http TLSv1 ECDHE-RSA-AES256-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 200 4332 3413 59 412 http TLSv1.1 DHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 200 3347 4042 218 143 https TLSv1.2 DHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.other HTTP/2" 101 2549 3079 207 113 https TLSv1.3 AES256-SHA light wine 230
+test.example.com:81 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 101 4605 2701 285 224 http SSLv3 AES256-SHA dark wine 230
+test.example.com:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/1.1" 400 4963 2096 449 476 https SSLv3 AES256-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+Unmatched! The rat the cat the dog chased killed ate the malt!
+2001:db8:1ce::1:82 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/1.1" 101 4345 2389 145 446 https TLSv1 PSK-RC4-SHA light wine 230
+198.51.100.1:84 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 1050 4840 351 106 https TLSv1.2 AES256-SHA dark beer 230
+localhost:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/1.1" 300 4089 4457 160 277 http TLSv1.3 PSK-RC4-SHA dark wine 230
+localhost:80 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.org HTTP/2.0" 100 1766 3641 395 336 http SSLv2 ECDHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 101 1412 1768 434 79 http SSLv2 ECDHE-RSA-AES256-SHA light wine 230
+localhost:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 300 1912 3209 86 370 https SSLv2 PSK-RC4-SHA dark beer 230
+localhost:84 localhost - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 200 4033 1579 355 409 http TLSv1.3 AES256-SHA dark wine 230
+198.51.100.1:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2.0" 201 1671 3585 339 63 http TLSv1.3 DHE-RSA-AES256-SHA dark wine 230
+localhost:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/1.1" 400 4248 1510 425 430 https TLSv1.3 ECDHE-RSA-AES256-SHA light wine 230
+test.example.org:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 100 4498 1403 239 96 https TLSv1 DHE-RSA-AES256-SHA dark wine 230
+198.51.100.1:83 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 401 2126 4588 167 138 https TLSv1.3 PSK-RC4-SHA light beer 230
+localhost:83 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 101 1279 4755 490 108 http TLSv1.1 ECDHE-RSA-AES256-SHA dark beer 230
+test.example.org:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 301 1536 2798 241 305 http SSLv2 AES256-SHA light beer 230
+test.example.com:80 localhost - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/1.1" 400 2593 3461 118 347 https TLSv1 DHE-RSA-AES256-SHA light wine 230
+test.example.com:83 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 201 2867 3625 418 496 http SSLv2 PSK-RC4-SHA light wine 230
+198.51.100.1:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/1.1" 401 4317 1085 443 410 http SSLv2 AES256-SHA dark wine 230
+2001:db8:1ce::1:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.net HTTP/2.0" 301 1813 4623 250 246 http TLSv1 ECDHE-RSA-AES256-SHA light wine 230
+test.example.com:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.org HTTP/2.0" 301 4548 1008 387 9 https SSLv3 AES256-SHA light wine 230
+localhost:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/2.0" 101 4678 4085 210 103 https TLSv1 AES256-SHA light beer 230
+test.example.com:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 401 4897 3938 74 116 http TLSv1.2 PSK-RC4-SHA light beer 230
+test.example.com:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/1.1" 200 3022 1961 203 393 http TLSv1.3 ECDHE-RSA-AES256-SHA dark beer 230
+2001:db8:1ce::1:83 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 301 1574 3104 364 165 https TLSv1.2 AES256-SHA light beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:82 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/1.1" 301 2944 3376 68 384 http TLSv1.3 PSK-RC4-SHA light wine 230
+localhost:84 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 101 4616 4363 17 28 https SSLv2 ECDHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:83 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2.0" 200 2308 4193 20 257 http SSLv2 PSK-RC4-SHA dark wine 230
+test.example.org:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.other HTTP/2" 300 3503 4056 336 375 https TLSv1.2 DHE-RSA-AES256-SHA dark beer 230
+localhost:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "POST /example.com HTTP/2" 101 4109 2823 250 369 https TLSv1.1 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:81 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/2.0" 300 2069 3457 174 159 http TLSv1.1 ECDHE-RSA-AES256-SHA dark beer 230
+localhost:80 localhost - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 101 2781 3947 414 406 https TLSv1.1 DHE-RSA-AES256-SHA dark beer 230
+198.51.100.1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 1390 1379 214 31 http TLSv1.3 DHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:82 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.com HTTP/1.1" 201 1546 1014 44 351 http TLSv1.1 AES256-SHA light beer 230
+2001:db8:1ce::1:81 2001:db8:2ce:1 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.org HTTP/2.0" 400 1600 4635 219 104 http TLSv1.1 DHE-RSA-AES256-SHA light beer 230
+2001:db8:1ce::1:80 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 201 3604 4845 378 237 http TLSv1.3 ECDHE-RSA-AES256-SHA dark beer 230
+test.example.org:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "HEAD /example.net HTTP/2" 400 1409 3810 180 163 https TLSv1.1 ECDHE-RSA-AES256-SHA dark wine 230
+2001:db8:1ce::1:84 2001:db8:2ce:2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.com HTTP/2" 201 1673 1858 43 405 https SSLv2 AES256-SHA light wine 230
+198.51.100.1:82 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 301 4846 3590 105 492 http SSLv2 PSK-RC4-SHA dark beer 230
+test.example.org:81 203.0.113.1 - - [22/Mar/2009:09:30:31 +0100] "GET /example.net HTTP/1.1" 100 4818 2058 362 393 http TLSv1.2 AES256-SHA dark beer 230
+Unmatched! The rat the cat the dog chased killed ate the malt!
+test.example.com:81 203.0.113.2 - - [22/Mar/2009:09:30:31 +0100] "GET /example.other HTTP/2.0" 101 4719 4878 382 257 https TLSv1.1 AES256-SHA light wine 230
diff --git a/src/go/plugin/go.d/modules/weblog/testdata/u_ex221107.log b/src/go/plugin/go.d/modules/weblog/testdata/u_ex221107.log
new file mode 100644
index 000000000..38fa91cdc
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/testdata/u_ex221107.log
@@ -0,0 +1,168 @@
+#Software: Microsoft Internet Information Services 10.0
+#Version: 1.0
+#Date: 2022-11-07 14:29:06
+#Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Referer) sc-status sc-substatus sc-win32-status time-taken
+2022-11-07 14:29:06 127.0.0.1 GET /us - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 130
+2022-11-07 14:29:06 127.0.0.1 GET /us - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 1
+2022-11-07 14:29:08 127.0.0.1 GET /status full&json 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:29:08 127.0.0.1 GET /status full&json 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:29:08 ::1 GET /status full&json 80 - ::1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:29:09 127.0.0.1 GET /server-status auto 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:29:09 127.0.0.1 GET /server-status auto 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+#Software: Microsoft Internet Information Services 10.0
+#Version: 1.0
+#Date: 2022-11-07 14:55:17
+#Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Referer) sc-status sc-substatus sc-win32-status time-taken
+2022-11-07 14:55:17 127.0.0.1 GET /us - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 187
+2022-11-07 14:55:17 127.0.0.1 GET /us - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:17 127.0.0.1 GET /server-status format=plain 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:17 127.0.0.1 GET /server-status format=plain 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 1
+2022-11-07 14:55:18 127.0.0.1 GET /basic_status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 1
+2022-11-07 14:55:18 127.0.0.1 GET /stub_status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:18 127.0.0.1 GET /stub_status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 1
+2022-11-07 14:55:18 127.0.0.1 GET /nginx_status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:18 127.0.0.1 GET /status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:18 127.0.0.1 GET /status/format/json - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:20 127.0.0.1 GET /admin/api.php version=true 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:20 127.0.0.1 GET /admin/api.php version=true 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:20 127.0.0.1 GET /server-status auto 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:20 127.0.0.1 GET /server-status auto 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:24 127.0.0.1 GET /status full&json 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:24 127.0.0.1 GET /status full&json 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 14:55:24 ::1 GET /status full&json 80 - ::1 Go-http-client/1.1 - 404 0 2 0
+#Software: Microsoft Internet Information Services 10.0
+#Version: 1.0
+#Date: 2022-11-07 15:42:39
+#Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Referer) sc-status sc-substatus sc-win32-status time-taken
+2022-11-07 15:42:39 127.0.0.1 GET /server-status format=plain 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 149
+2022-11-07 15:42:39 127.0.0.1 GET /server-status format=plain 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:39 127.0.0.1 GET /server-status auto 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:39 127.0.0.1 GET /server-status auto 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:39 127.0.0.1 GET /status/format/json - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:41 127.0.0.1 GET /basic_status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:41 127.0.0.1 GET /stub_status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:41 127.0.0.1 GET /stub_status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:41 127.0.0.1 GET /nginx_status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:41 127.0.0.1 GET /status - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:41 127.0.0.1 GET /admin/api.php version=true 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:41 127.0.0.1 GET /admin/api.php version=true 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:41 127.0.0.1 GET /us - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:41 127.0.0.1 GET /us - 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:46 127.0.0.1 GET /status full&json 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:46 127.0.0.1 GET /status full&json 80 - 127.0.0.1 Go-http-client/1.1 - 404 0 2 0
+2022-11-07 15:42:46 ::1 GET /status full&json 80 - ::1 Go-http-client/1.1 - 404 0 2 0
+#Software: Microsoft Internet Information Services 10.0
+#Version: 1.0
+#Date: 2022-11-07 16:47:25
+#Fields: date time s-ip cs-method cs-uri-stem cs-uri-query s-port cs-username c-ip cs(User-Agent) cs(Referer) sc-status sc-substatus sc-win32-status time-taken
+2022-11-07 16:47:25 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 256
+2022-11-07 16:47:25 ::1 GET /iisstart.png - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 http://localhost/ 304 0 0 2
+2022-11-07 16:47:25 ::1 GET /favicon.ico - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 http://localhost/ 404 0 2 16
+2022-11-07 16:48:07 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 0
+2022-11-07 16:48:08 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 1
+2022-11-07 16:48:08 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 0
+2022-11-07 16:48:08 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 0
+2022-11-07 16:48:08 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 0
+2022-11-07 16:48:08 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 0
+2022-11-07 16:48:08 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 0
+2022-11-07 16:48:09 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 0
+2022-11-07 16:48:09 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT+10.0;+Win64;+x64)+AppleWebKit/537.36+(KHTML,+like+Gecko)+Chrome/107.0.0.0+Safari/537.36+Edg/107.0.1418.35 - 304 0 0 0
+2022-11-07 16:49:05 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:05 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:06 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:06 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:06 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 3
+2022-11-07 16:49:06 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:06 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:06 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:07 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:07 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:07 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:07 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:07 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:07 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:09 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:09 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:09 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:09 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:09 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:09 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:10 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:10 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:10 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:10 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:10 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 4
+2022-11-07 16:49:10 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:11 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:11 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:11 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:11 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:11 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:11 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:12 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:12 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:12 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:12 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:12 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:12 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:13 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:13 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:13 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 2
+2022-11-07 16:49:13 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:13 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:13 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:14 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:14 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:14 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:14 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 2
+2022-11-07 16:49:14 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:14 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:15 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:15 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:15 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:15 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:15 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:15 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:16 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:16 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:16 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:16 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:16 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:16 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:17 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:17 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:17 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 2
+2022-11-07 16:49:17 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:17 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:17 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:18 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:18 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 2
+2022-11-07 16:49:18 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:18 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 31
+2022-11-07 16:49:18 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:18 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:19 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 8
+2022-11-07 16:49:19 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:19 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:19 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:19 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:19 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:20 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:20 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:20 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:20 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:20 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:20 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:21 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:21 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:21 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:21 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:21 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:21 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:23 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:23 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:23 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:23 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:23 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:23 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
+2022-11-07 16:49:24 ::1 GET / - 80 - ::1 Mozilla/5.0+(Windows+NT;+Windows+NT+10.0;+en-US)+WindowsPowerShell/5.1.20348.859 - 200 0 0 0
diff --git a/src/go/plugin/go.d/modules/weblog/weblog.go b/src/go/plugin/go.d/modules/weblog/weblog.go
new file mode 100644
index 000000000..242999e68
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/weblog.go
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ _ "embed"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("web_log", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *WebLog {
+ return &WebLog{
+ Config: Config{
+ ExcludePath: "*.gz",
+ GroupRespCodes: true,
+ ParserConfig: logs.ParserConfig{
+ LogType: typeAuto,
+ CSV: logs.CSVConfig{
+ FieldsPerRecord: -1,
+ Delimiter: " ",
+ TrimLeadingSpace: false,
+ CheckField: checkCSVFormatField,
+ },
+ LTSV: logs.LTSVConfig{
+ FieldDelimiter: "\t",
+ ValueDelimiter: ":",
+ },
+ RegExp: logs.RegExpConfig{},
+ JSON: logs.JSONConfig{},
+ },
+ },
+ }
+}
+
+type (
+ Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Path string `yaml:"path" json:"path"`
+ ExcludePath string `yaml:"exclude_path,omitempty" json:"exclude_path"`
+ logs.ParserConfig `yaml:",inline" json:""`
+ URLPatterns []userPattern `yaml:"url_patterns,omitempty" json:"url_patterns"`
+ CustomFields []customField `yaml:"custom_fields,omitempty" json:"custom_fields"`
+ CustomTimeFields []customTimeField `yaml:"custom_time_fields,omitempty" json:"custom_time_fields"`
+ CustomNumericFields []customNumericField `yaml:"custom_numeric_fields,omitempty" json:"custom_numeric_fields"`
+ Histogram []float64 `yaml:"histogram,omitempty" json:"histogram"`
+ GroupRespCodes bool `yaml:"group_response_codes" json:"group_response_codes"`
+ }
+ userPattern struct {
+ Name string `yaml:"name" json:"name"`
+ Match string `yaml:"match" json:"match"`
+ }
+ customField struct {
+ Name string `yaml:"name" json:"name"`
+ Patterns []userPattern `yaml:"patterns" json:"patterns"`
+ }
+ customTimeField struct {
+ Name string `yaml:"name" json:"name"`
+ Histogram []float64 `yaml:"histogram" json:"histogram"`
+ }
+ customNumericField struct {
+ Name string `yaml:"name" json:"name"`
+ Units string `yaml:"units" json:"units"`
+ Multiplier int `yaml:"multiplier,omitempty" json:"multiplier"`
+ Divisor int `yaml:"divisor,omitempty" json:"divisor"`
+ }
+)
+
+type WebLog struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ file *logs.Reader
+ parser logs.Parser
+ line *logLine
+
+ urlPatterns []*pattern
+ customFields map[string][]*pattern
+ customTimeFields map[string][]float64
+ customNumericFields map[string]bool
+
+ mx *metricsData
+}
+
+func (w *WebLog) Configuration() any {
+ return w.Config
+}
+
+func (w *WebLog) Init() error {
+ if err := w.createURLPatterns(); err != nil {
+ w.Errorf("init failed: %v", err)
+ return err
+ }
+
+ if err := w.createCustomFields(); err != nil {
+ w.Errorf("init failed: %v", err)
+ return err
+ }
+
+ if err := w.createCustomTimeFields(); err != nil {
+ w.Errorf("init failed: %v", err)
+ return err
+ }
+
+ if err := w.createCustomNumericFields(); err != nil {
+ w.Errorf("init failed: %v", err)
+ }
+
+ w.createLogLine()
+ w.mx = newMetricsData(w.Config)
+
+ return nil
+}
+
+func (w *WebLog) Check() error {
+ // Note: these inits are here to make auto-detection retry working
+ if err := w.createLogReader(); err != nil {
+ w.Warning("check failed: ", err)
+ return err
+ }
+
+ if err := w.createParser(); err != nil {
+ w.Warning("check failed: ", err)
+ return err
+ }
+
+ if err := w.createCharts(w.line); err != nil {
+ w.Warning("check failed: ", err)
+ return err
+ }
+
+ return nil
+}
+
+func (w *WebLog) Charts() *module.Charts {
+ return w.charts
+}
+
+func (w *WebLog) Collect() map[string]int64 {
+ mx, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (w *WebLog) Cleanup() {
+ if w.file != nil {
+ _ = w.file.Close()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/weblog/weblog_test.go b/src/go/plugin/go.d/modules/weblog/weblog_test.go
new file mode 100644
index 000000000..1e36bbf68
--- /dev/null
+++ b/src/go/plugin/go.d/modules/weblog/weblog_test.go
@@ -0,0 +1,1502 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package weblog
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/logs"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataCommonLog, _ = os.ReadFile("testdata/common.log")
+ dataFullLog, _ = os.ReadFile("testdata/full.log")
+ dataCustomLog, _ = os.ReadFile("testdata/custom.log")
+ dataCustomTimeFieldLog, _ = os.ReadFile("testdata/custom_time_fields.log")
+ dataIISLog, _ = os.ReadFile("testdata/u_ex221107.log")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataCommonLog": dataCommonLog,
+ "dataFullLog": dataFullLog,
+ "dataCustomLog": dataCustomLog,
+ "dataCustomTimeFieldLog": dataCustomTimeFieldLog,
+ "dataIISLog": dataIISLog,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestWebLog_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &WebLog{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestWebLog_Init(t *testing.T) {
+ weblog := New()
+
+ assert.NoError(t, weblog.Init())
+}
+
+func TestWebLog_Init_ErrorOnCreatingURLPatterns(t *testing.T) {
+ weblog := New()
+ weblog.URLPatterns = []userPattern{{Match: "* !*"}}
+
+ assert.Error(t, weblog.Init())
+}
+
+func TestWebLog_Init_ErrorOnCreatingCustomFields(t *testing.T) {
+ weblog := New()
+ weblog.CustomFields = []customField{{Patterns: []userPattern{{Name: "p1", Match: "* !*"}}}}
+
+ assert.Error(t, weblog.Init())
+}
+
+func TestWebLog_Check(t *testing.T) {
+ weblog := New()
+ defer weblog.Cleanup()
+ weblog.Path = "testdata/common.log"
+ require.NoError(t, weblog.Init())
+
+ assert.NoError(t, weblog.Check())
+}
+
+func TestWebLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) {
+ weblog := New()
+ defer weblog.Cleanup()
+ weblog.Path = "testdata/not_exists.log"
+ require.NoError(t, weblog.Init())
+
+ assert.Error(t, weblog.Check())
+}
+
+func TestWebLog_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) {
+ weblog := New()
+ defer weblog.Cleanup()
+ weblog.Path = "testdata/custom.log"
+ require.NoError(t, weblog.Init())
+
+ assert.Error(t, weblog.Check())
+}
+
+func TestWebLog_Check_ErrorOnCreatingParserEmptyLine(t *testing.T) {
+ weblog := New()
+ defer weblog.Cleanup()
+ weblog.Path = "testdata/custom.log"
+ weblog.ParserConfig.LogType = logs.TypeCSV
+ weblog.ParserConfig.CSV.Format = "$one $two"
+ require.NoError(t, weblog.Init())
+
+ assert.Error(t, weblog.Check())
+}
+
+func TestWebLog_Charts(t *testing.T) {
+ weblog := New()
+ defer weblog.Cleanup()
+ weblog.Path = "testdata/common.log"
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
+
+ assert.NotNil(t, weblog.Charts())
+}
+
+func TestWebLog_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestWebLog_Collect(t *testing.T) {
+ weblog := prepareWebLogCollectFull(t)
+
+ //m := weblog.Collect()
+ //l := make([]string, 0)
+ //for k := range m {
+ // l = append(l, k)
+ //}
+ //sort.Strings(l)
+ //for _, value := range l {
+ // fmt.Println(fmt.Sprintf("\"%s\": %d,", value, m[value]))
+ //}
+
+ expected := map[string]int64{
+ "bytes_received": 1374096,
+ "bytes_sent": 1373185,
+ "custom_field_drink_beer": 221,
+ "custom_field_drink_wine": 231,
+ "custom_field_side_dark": 231,
+ "custom_field_side_light": 221,
+ "custom_time_field_random_time_field_time_avg": 230,
+ "custom_time_field_random_time_field_time_count": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_1": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_10": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_11": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_2": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_3": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_4": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_5": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_6": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_7": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_8": 452,
+ "custom_time_field_random_time_field_time_hist_bucket_9": 452,
+ "custom_time_field_random_time_field_time_hist_count": 452,
+ "custom_time_field_random_time_field_time_hist_sum": 103960,
+ "custom_time_field_random_time_field_time_max": 230,
+ "custom_time_field_random_time_field_time_min": 230,
+ "custom_time_field_random_time_field_time_sum": 103960,
+ "req_http_scheme": 218,
+ "req_https_scheme": 234,
+ "req_ipv4": 275,
+ "req_ipv6": 177,
+ "req_method_GET": 156,
+ "req_method_HEAD": 150,
+ "req_method_POST": 146,
+ "req_port_80": 96,
+ "req_port_81": 100,
+ "req_port_82": 84,
+ "req_port_83": 85,
+ "req_port_84": 87,
+ "req_proc_time_avg": 244,
+ "req_proc_time_count": 402,
+ "req_proc_time_hist_bucket_1": 402,
+ "req_proc_time_hist_bucket_10": 402,
+ "req_proc_time_hist_bucket_11": 402,
+ "req_proc_time_hist_bucket_2": 402,
+ "req_proc_time_hist_bucket_3": 402,
+ "req_proc_time_hist_bucket_4": 402,
+ "req_proc_time_hist_bucket_5": 402,
+ "req_proc_time_hist_bucket_6": 402,
+ "req_proc_time_hist_bucket_7": 402,
+ "req_proc_time_hist_bucket_8": 402,
+ "req_proc_time_hist_bucket_9": 402,
+ "req_proc_time_hist_count": 402,
+ "req_proc_time_hist_sum": 98312,
+ "req_proc_time_max": 497,
+ "req_proc_time_min": 2,
+ "req_proc_time_sum": 98312,
+ "req_ssl_cipher_suite_AES256-SHA": 101,
+ "req_ssl_cipher_suite_DHE-RSA-AES256-SHA": 111,
+ "req_ssl_cipher_suite_ECDHE-RSA-AES256-SHA": 127,
+ "req_ssl_cipher_suite_PSK-RC4-SHA": 113,
+ "req_ssl_proto_SSLv2": 74,
+ "req_ssl_proto_SSLv3": 57,
+ "req_ssl_proto_TLSv1": 76,
+ "req_ssl_proto_TLSv1.1": 87,
+ "req_ssl_proto_TLSv1.2": 73,
+ "req_ssl_proto_TLSv1.3": 85,
+ "req_type_bad": 49,
+ "req_type_error": 0,
+ "req_type_redirect": 119,
+ "req_type_success": 284,
+ "req_unmatched": 48,
+ "req_url_ptn_com": 120,
+ "req_url_ptn_net": 116,
+ "req_url_ptn_not_match": 0,
+ "req_url_ptn_org": 113,
+ "req_version_1.1": 168,
+ "req_version_2": 143,
+ "req_version_2.0": 141,
+ "req_vhost_198.51.100.1": 81,
+ "req_vhost_2001:db8:1ce::1": 100,
+ "req_vhost_localhost": 102,
+ "req_vhost_test.example.com": 87,
+ "req_vhost_test.example.org": 82,
+ "requests": 500,
+ "resp_1xx": 110,
+ "resp_2xx": 128,
+ "resp_3xx": 119,
+ "resp_4xx": 95,
+ "resp_5xx": 0,
+ "resp_code_100": 60,
+ "resp_code_101": 50,
+ "resp_code_200": 58,
+ "resp_code_201": 70,
+ "resp_code_300": 58,
+ "resp_code_301": 61,
+ "resp_code_400": 49,
+ "resp_code_401": 46,
+ "uniq_ipv4": 3,
+ "uniq_ipv6": 2,
+ "upstream_resp_time_avg": 255,
+ "upstream_resp_time_count": 452,
+ "upstream_resp_time_hist_bucket_1": 452,
+ "upstream_resp_time_hist_bucket_10": 452,
+ "upstream_resp_time_hist_bucket_11": 452,
+ "upstream_resp_time_hist_bucket_2": 452,
+ "upstream_resp_time_hist_bucket_3": 452,
+ "upstream_resp_time_hist_bucket_4": 452,
+ "upstream_resp_time_hist_bucket_5": 452,
+ "upstream_resp_time_hist_bucket_6": 452,
+ "upstream_resp_time_hist_bucket_7": 452,
+ "upstream_resp_time_hist_bucket_8": 452,
+ "upstream_resp_time_hist_bucket_9": 452,
+ "upstream_resp_time_hist_count": 452,
+ "upstream_resp_time_hist_sum": 115615,
+ "upstream_resp_time_max": 497,
+ "upstream_resp_time_min": 7,
+ "upstream_resp_time_sum": 115615,
+ "url_ptn_com_bytes_received": 379864,
+ "url_ptn_com_bytes_sent": 372669,
+ "url_ptn_com_req_method_GET": 38,
+ "url_ptn_com_req_method_HEAD": 39,
+ "url_ptn_com_req_method_POST": 43,
+ "url_ptn_com_req_proc_time_avg": 209,
+ "url_ptn_com_req_proc_time_count": 105,
+ "url_ptn_com_req_proc_time_max": 495,
+ "url_ptn_com_req_proc_time_min": 5,
+ "url_ptn_com_req_proc_time_sum": 22010,
+ "url_ptn_com_resp_code_100": 12,
+ "url_ptn_com_resp_code_101": 15,
+ "url_ptn_com_resp_code_200": 13,
+ "url_ptn_com_resp_code_201": 26,
+ "url_ptn_com_resp_code_300": 16,
+ "url_ptn_com_resp_code_301": 12,
+ "url_ptn_com_resp_code_400": 13,
+ "url_ptn_com_resp_code_401": 13,
+ "url_ptn_net_bytes_received": 349988,
+ "url_ptn_net_bytes_sent": 339867,
+ "url_ptn_net_req_method_GET": 51,
+ "url_ptn_net_req_method_HEAD": 33,
+ "url_ptn_net_req_method_POST": 32,
+ "url_ptn_net_req_proc_time_avg": 254,
+ "url_ptn_net_req_proc_time_count": 104,
+ "url_ptn_net_req_proc_time_max": 497,
+ "url_ptn_net_req_proc_time_min": 10,
+ "url_ptn_net_req_proc_time_sum": 26510,
+ "url_ptn_net_resp_code_100": 16,
+ "url_ptn_net_resp_code_101": 12,
+ "url_ptn_net_resp_code_200": 16,
+ "url_ptn_net_resp_code_201": 14,
+ "url_ptn_net_resp_code_300": 14,
+ "url_ptn_net_resp_code_301": 17,
+ "url_ptn_net_resp_code_400": 14,
+ "url_ptn_net_resp_code_401": 13,
+ "url_ptn_not_match_bytes_received": 0,
+ "url_ptn_not_match_bytes_sent": 0,
+ "url_ptn_not_match_req_proc_time_avg": 0,
+ "url_ptn_not_match_req_proc_time_count": 0,
+ "url_ptn_not_match_req_proc_time_max": 0,
+ "url_ptn_not_match_req_proc_time_min": 0,
+ "url_ptn_not_match_req_proc_time_sum": 0,
+ "url_ptn_org_bytes_received": 331836,
+ "url_ptn_org_bytes_sent": 340095,
+ "url_ptn_org_req_method_GET": 29,
+ "url_ptn_org_req_method_HEAD": 46,
+ "url_ptn_org_req_method_POST": 38,
+ "url_ptn_org_req_proc_time_avg": 260,
+ "url_ptn_org_req_proc_time_count": 102,
+ "url_ptn_org_req_proc_time_max": 497,
+ "url_ptn_org_req_proc_time_min": 2,
+ "url_ptn_org_req_proc_time_sum": 26599,
+ "url_ptn_org_resp_code_100": 15,
+ "url_ptn_org_resp_code_101": 11,
+ "url_ptn_org_resp_code_200": 20,
+ "url_ptn_org_resp_code_201": 16,
+ "url_ptn_org_resp_code_300": 10,
+ "url_ptn_org_resp_code_301": 19,
+ "url_ptn_org_resp_code_400": 13,
+ "url_ptn_org_resp_code_401": 9,
+ }
+
+ mx := weblog.Collect()
+ assert.Equal(t, expected, mx)
+ testCharts(t, weblog, mx)
+}
+
+func TestWebLog_Collect_CommonLogFormat(t *testing.T) {
+ weblog := prepareWebLogCollectCommon(t)
+
+ expected := map[string]int64{
+ "bytes_received": 0,
+ "bytes_sent": 1388056,
+ "req_http_scheme": 0,
+ "req_https_scheme": 0,
+ "req_ipv4": 283,
+ "req_ipv6": 173,
+ "req_method_GET": 159,
+ "req_method_HEAD": 143,
+ "req_method_POST": 154,
+ "req_proc_time_avg": 0,
+ "req_proc_time_count": 0,
+ "req_proc_time_hist_bucket_1": 0,
+ "req_proc_time_hist_bucket_10": 0,
+ "req_proc_time_hist_bucket_11": 0,
+ "req_proc_time_hist_bucket_2": 0,
+ "req_proc_time_hist_bucket_3": 0,
+ "req_proc_time_hist_bucket_4": 0,
+ "req_proc_time_hist_bucket_5": 0,
+ "req_proc_time_hist_bucket_6": 0,
+ "req_proc_time_hist_bucket_7": 0,
+ "req_proc_time_hist_bucket_8": 0,
+ "req_proc_time_hist_bucket_9": 0,
+ "req_proc_time_hist_count": 0,
+ "req_proc_time_hist_sum": 0,
+ "req_proc_time_max": 0,
+ "req_proc_time_min": 0,
+ "req_proc_time_sum": 0,
+ "req_type_bad": 54,
+ "req_type_error": 0,
+ "req_type_redirect": 122,
+ "req_type_success": 280,
+ "req_unmatched": 44,
+ "req_version_1.1": 155,
+ "req_version_2": 147,
+ "req_version_2.0": 154,
+ "requests": 500,
+ "resp_1xx": 130,
+ "resp_2xx": 100,
+ "resp_3xx": 122,
+ "resp_4xx": 104,
+ "resp_5xx": 0,
+ "resp_code_100": 80,
+ "resp_code_101": 50,
+ "resp_code_200": 43,
+ "resp_code_201": 57,
+ "resp_code_300": 70,
+ "resp_code_301": 52,
+ "resp_code_400": 54,
+ "resp_code_401": 50,
+ "uniq_ipv4": 3,
+ "uniq_ipv6": 2,
+ "upstream_resp_time_avg": 0,
+ "upstream_resp_time_count": 0,
+ "upstream_resp_time_hist_bucket_1": 0,
+ "upstream_resp_time_hist_bucket_10": 0,
+ "upstream_resp_time_hist_bucket_11": 0,
+ "upstream_resp_time_hist_bucket_2": 0,
+ "upstream_resp_time_hist_bucket_3": 0,
+ "upstream_resp_time_hist_bucket_4": 0,
+ "upstream_resp_time_hist_bucket_5": 0,
+ "upstream_resp_time_hist_bucket_6": 0,
+ "upstream_resp_time_hist_bucket_7": 0,
+ "upstream_resp_time_hist_bucket_8": 0,
+ "upstream_resp_time_hist_bucket_9": 0,
+ "upstream_resp_time_hist_count": 0,
+ "upstream_resp_time_hist_sum": 0,
+ "upstream_resp_time_max": 0,
+ "upstream_resp_time_min": 0,
+ "upstream_resp_time_sum": 0,
+ }
+
+ mx := weblog.Collect()
+ assert.Equal(t, expected, mx)
+ testCharts(t, weblog, mx)
+}
+
+func TestWebLog_Collect_CustomLogs(t *testing.T) {
+ weblog := prepareWebLogCollectCustom(t)
+
+ expected := map[string]int64{
+ "bytes_received": 0,
+ "bytes_sent": 0,
+ "custom_field_drink_beer": 52,
+ "custom_field_drink_wine": 40,
+ "custom_field_side_dark": 46,
+ "custom_field_side_light": 46,
+ "req_http_scheme": 0,
+ "req_https_scheme": 0,
+ "req_ipv4": 0,
+ "req_ipv6": 0,
+ "req_proc_time_avg": 0,
+ "req_proc_time_count": 0,
+ "req_proc_time_hist_bucket_1": 0,
+ "req_proc_time_hist_bucket_10": 0,
+ "req_proc_time_hist_bucket_11": 0,
+ "req_proc_time_hist_bucket_2": 0,
+ "req_proc_time_hist_bucket_3": 0,
+ "req_proc_time_hist_bucket_4": 0,
+ "req_proc_time_hist_bucket_5": 0,
+ "req_proc_time_hist_bucket_6": 0,
+ "req_proc_time_hist_bucket_7": 0,
+ "req_proc_time_hist_bucket_8": 0,
+ "req_proc_time_hist_bucket_9": 0,
+ "req_proc_time_hist_count": 0,
+ "req_proc_time_hist_sum": 0,
+ "req_proc_time_max": 0,
+ "req_proc_time_min": 0,
+ "req_proc_time_sum": 0,
+ "req_type_bad": 0,
+ "req_type_error": 0,
+ "req_type_redirect": 0,
+ "req_type_success": 0,
+ "req_unmatched": 8,
+ "requests": 100,
+ "resp_1xx": 0,
+ "resp_2xx": 0,
+ "resp_3xx": 0,
+ "resp_4xx": 0,
+ "resp_5xx": 0,
+ "uniq_ipv4": 0,
+ "uniq_ipv6": 0,
+ "upstream_resp_time_avg": 0,
+ "upstream_resp_time_count": 0,
+ "upstream_resp_time_hist_bucket_1": 0,
+ "upstream_resp_time_hist_bucket_10": 0,
+ "upstream_resp_time_hist_bucket_11": 0,
+ "upstream_resp_time_hist_bucket_2": 0,
+ "upstream_resp_time_hist_bucket_3": 0,
+ "upstream_resp_time_hist_bucket_4": 0,
+ "upstream_resp_time_hist_bucket_5": 0,
+ "upstream_resp_time_hist_bucket_6": 0,
+ "upstream_resp_time_hist_bucket_7": 0,
+ "upstream_resp_time_hist_bucket_8": 0,
+ "upstream_resp_time_hist_bucket_9": 0,
+ "upstream_resp_time_hist_count": 0,
+ "upstream_resp_time_hist_sum": 0,
+ "upstream_resp_time_max": 0,
+ "upstream_resp_time_min": 0,
+ "upstream_resp_time_sum": 0,
+ }
+
+ mx := weblog.Collect()
+ assert.Equal(t, expected, mx)
+ testCharts(t, weblog, mx)
+}
+
+func TestWebLog_Collect_CustomTimeFieldsLogs(t *testing.T) {
+ weblog := prepareWebLogCollectCustomTimeFields(t)
+
+ expected := map[string]int64{
+ "bytes_received": 0,
+ "bytes_sent": 0,
+ "custom_time_field_time1_time_avg": 224,
+ "custom_time_field_time1_time_count": 72,
+ "custom_time_field_time1_time_hist_bucket_1": 72,
+ "custom_time_field_time1_time_hist_bucket_10": 72,
+ "custom_time_field_time1_time_hist_bucket_11": 72,
+ "custom_time_field_time1_time_hist_bucket_2": 72,
+ "custom_time_field_time1_time_hist_bucket_3": 72,
+ "custom_time_field_time1_time_hist_bucket_4": 72,
+ "custom_time_field_time1_time_hist_bucket_5": 72,
+ "custom_time_field_time1_time_hist_bucket_6": 72,
+ "custom_time_field_time1_time_hist_bucket_7": 72,
+ "custom_time_field_time1_time_hist_bucket_8": 72,
+ "custom_time_field_time1_time_hist_bucket_9": 72,
+ "custom_time_field_time1_time_hist_count": 72,
+ "custom_time_field_time1_time_hist_sum": 16152,
+ "custom_time_field_time1_time_max": 431,
+ "custom_time_field_time1_time_min": 121,
+ "custom_time_field_time1_time_sum": 16152,
+ "custom_time_field_time2_time_avg": 255,
+ "custom_time_field_time2_time_count": 72,
+ "custom_time_field_time2_time_hist_bucket_1": 72,
+ "custom_time_field_time2_time_hist_bucket_10": 72,
+ "custom_time_field_time2_time_hist_bucket_11": 72,
+ "custom_time_field_time2_time_hist_bucket_2": 72,
+ "custom_time_field_time2_time_hist_bucket_3": 72,
+ "custom_time_field_time2_time_hist_bucket_4": 72,
+ "custom_time_field_time2_time_hist_bucket_5": 72,
+ "custom_time_field_time2_time_hist_bucket_6": 72,
+ "custom_time_field_time2_time_hist_bucket_7": 72,
+ "custom_time_field_time2_time_hist_bucket_8": 72,
+ "custom_time_field_time2_time_hist_bucket_9": 72,
+ "custom_time_field_time2_time_hist_count": 72,
+ "custom_time_field_time2_time_hist_sum": 18360,
+ "custom_time_field_time2_time_max": 321,
+ "custom_time_field_time2_time_min": 123,
+ "custom_time_field_time2_time_sum": 18360,
+ "req_http_scheme": 0,
+ "req_https_scheme": 0,
+ "req_ipv4": 0,
+ "req_ipv6": 0,
+ "req_proc_time_avg": 0,
+ "req_proc_time_count": 0,
+ "req_proc_time_hist_bucket_1": 0,
+ "req_proc_time_hist_bucket_10": 0,
+ "req_proc_time_hist_bucket_11": 0,
+ "req_proc_time_hist_bucket_2": 0,
+ "req_proc_time_hist_bucket_3": 0,
+ "req_proc_time_hist_bucket_4": 0,
+ "req_proc_time_hist_bucket_5": 0,
+ "req_proc_time_hist_bucket_6": 0,
+ "req_proc_time_hist_bucket_7": 0,
+ "req_proc_time_hist_bucket_8": 0,
+ "req_proc_time_hist_bucket_9": 0,
+ "req_proc_time_hist_count": 0,
+ "req_proc_time_hist_sum": 0,
+ "req_proc_time_max": 0,
+ "req_proc_time_min": 0,
+ "req_proc_time_sum": 0,
+ "req_type_bad": 0,
+ "req_type_error": 0,
+ "req_type_redirect": 0,
+ "req_type_success": 0,
+ "req_unmatched": 0,
+ "requests": 72,
+ "resp_1xx": 0,
+ "resp_2xx": 0,
+ "resp_3xx": 0,
+ "resp_4xx": 0,
+ "resp_5xx": 0,
+ "uniq_ipv4": 0,
+ "uniq_ipv6": 0,
+ "upstream_resp_time_avg": 0,
+ "upstream_resp_time_count": 0,
+ "upstream_resp_time_hist_bucket_1": 0,
+ "upstream_resp_time_hist_bucket_10": 0,
+ "upstream_resp_time_hist_bucket_11": 0,
+ "upstream_resp_time_hist_bucket_2": 0,
+ "upstream_resp_time_hist_bucket_3": 0,
+ "upstream_resp_time_hist_bucket_4": 0,
+ "upstream_resp_time_hist_bucket_5": 0,
+ "upstream_resp_time_hist_bucket_6": 0,
+ "upstream_resp_time_hist_bucket_7": 0,
+ "upstream_resp_time_hist_bucket_8": 0,
+ "upstream_resp_time_hist_bucket_9": 0,
+ "upstream_resp_time_hist_count": 0,
+ "upstream_resp_time_hist_sum": 0,
+ "upstream_resp_time_max": 0,
+ "upstream_resp_time_min": 0,
+ "upstream_resp_time_sum": 0,
+ }
+
+ mx := weblog.Collect()
+ assert.Equal(t, expected, mx)
+ testCharts(t, weblog, mx)
+}
+
+func TestWebLog_Collect_CustomNumericFieldsLogs(t *testing.T) {
+ weblog := prepareWebLogCollectCustomNumericFields(t)
+
+ expected := map[string]int64{
+ "bytes_received": 0,
+ "bytes_sent": 0,
+ "custom_numeric_field_numeric1_summary_avg": 224,
+ "custom_numeric_field_numeric1_summary_count": 72,
+ "custom_numeric_field_numeric1_summary_max": 431,
+ "custom_numeric_field_numeric1_summary_min": 121,
+ "custom_numeric_field_numeric1_summary_sum": 16152,
+ "custom_numeric_field_numeric2_summary_avg": 255,
+ "custom_numeric_field_numeric2_summary_count": 72,
+ "custom_numeric_field_numeric2_summary_max": 321,
+ "custom_numeric_field_numeric2_summary_min": 123,
+ "custom_numeric_field_numeric2_summary_sum": 18360,
+ "req_http_scheme": 0,
+ "req_https_scheme": 0,
+ "req_ipv4": 0,
+ "req_ipv6": 0,
+ "req_proc_time_avg": 0,
+ "req_proc_time_count": 0,
+ "req_proc_time_hist_bucket_1": 0,
+ "req_proc_time_hist_bucket_10": 0,
+ "req_proc_time_hist_bucket_11": 0,
+ "req_proc_time_hist_bucket_2": 0,
+ "req_proc_time_hist_bucket_3": 0,
+ "req_proc_time_hist_bucket_4": 0,
+ "req_proc_time_hist_bucket_5": 0,
+ "req_proc_time_hist_bucket_6": 0,
+ "req_proc_time_hist_bucket_7": 0,
+ "req_proc_time_hist_bucket_8": 0,
+ "req_proc_time_hist_bucket_9": 0,
+ "req_proc_time_hist_count": 0,
+ "req_proc_time_hist_sum": 0,
+ "req_proc_time_max": 0,
+ "req_proc_time_min": 0,
+ "req_proc_time_sum": 0,
+ "req_type_bad": 0,
+ "req_type_error": 0,
+ "req_type_redirect": 0,
+ "req_type_success": 0,
+ "req_unmatched": 0,
+ "requests": 72,
+ "resp_1xx": 0,
+ "resp_2xx": 0,
+ "resp_3xx": 0,
+ "resp_4xx": 0,
+ "resp_5xx": 0,
+ "uniq_ipv4": 0,
+ "uniq_ipv6": 0,
+ "upstream_resp_time_avg": 0,
+ "upstream_resp_time_count": 0,
+ "upstream_resp_time_hist_bucket_1": 0,
+ "upstream_resp_time_hist_bucket_10": 0,
+ "upstream_resp_time_hist_bucket_11": 0,
+ "upstream_resp_time_hist_bucket_2": 0,
+ "upstream_resp_time_hist_bucket_3": 0,
+ "upstream_resp_time_hist_bucket_4": 0,
+ "upstream_resp_time_hist_bucket_5": 0,
+ "upstream_resp_time_hist_bucket_6": 0,
+ "upstream_resp_time_hist_bucket_7": 0,
+ "upstream_resp_time_hist_bucket_8": 0,
+ "upstream_resp_time_hist_bucket_9": 0,
+ "upstream_resp_time_hist_count": 0,
+ "upstream_resp_time_hist_sum": 0,
+ "upstream_resp_time_max": 0,
+ "upstream_resp_time_min": 0,
+ "upstream_resp_time_sum": 0,
+ }
+
+ mx := weblog.Collect()
+
+ assert.Equal(t, expected, mx)
+ testCharts(t, weblog, mx)
+}
+
+func TestWebLog_IISLogs(t *testing.T) {
+ weblog := prepareWebLogCollectIISFields(t)
+
+ expected := map[string]int64{
+ "bytes_received": 0,
+ "bytes_sent": 0,
+ "req_http_scheme": 0,
+ "req_https_scheme": 0,
+ "req_ipv4": 38,
+ "req_ipv6": 114,
+ "req_method_GET": 152,
+ "req_port_80": 152,
+ "req_proc_time_avg": 5,
+ "req_proc_time_count": 152,
+ "req_proc_time_hist_bucket_1": 133,
+ "req_proc_time_hist_bucket_10": 145,
+ "req_proc_time_hist_bucket_11": 146,
+ "req_proc_time_hist_bucket_2": 133,
+ "req_proc_time_hist_bucket_3": 133,
+ "req_proc_time_hist_bucket_4": 133,
+ "req_proc_time_hist_bucket_5": 133,
+ "req_proc_time_hist_bucket_6": 133,
+ "req_proc_time_hist_bucket_7": 133,
+ "req_proc_time_hist_bucket_8": 138,
+ "req_proc_time_hist_bucket_9": 143,
+ "req_proc_time_hist_count": 152,
+ "req_proc_time_hist_sum": 799,
+ "req_proc_time_max": 256,
+ "req_proc_time_min": 0,
+ "req_proc_time_sum": 799,
+ "req_type_bad": 42,
+ "req_type_error": 0,
+ "req_type_redirect": 0,
+ "req_type_success": 110,
+ "req_unmatched": 16,
+ "req_vhost_127.0.0.1": 38,
+ "req_vhost_::1": 114,
+ "requests": 168,
+ "resp_1xx": 0,
+ "resp_2xx": 99,
+ "resp_3xx": 11,
+ "resp_4xx": 42,
+ "resp_5xx": 0,
+ "resp_code_200": 99,
+ "resp_code_304": 11,
+ "resp_code_404": 42,
+ "uniq_ipv4": 1,
+ "uniq_ipv6": 1,
+ "upstream_resp_time_avg": 0,
+ "upstream_resp_time_count": 0,
+ "upstream_resp_time_hist_bucket_1": 0,
+ "upstream_resp_time_hist_bucket_10": 0,
+ "upstream_resp_time_hist_bucket_11": 0,
+ "upstream_resp_time_hist_bucket_2": 0,
+ "upstream_resp_time_hist_bucket_3": 0,
+ "upstream_resp_time_hist_bucket_4": 0,
+ "upstream_resp_time_hist_bucket_5": 0,
+ "upstream_resp_time_hist_bucket_6": 0,
+ "upstream_resp_time_hist_bucket_7": 0,
+ "upstream_resp_time_hist_bucket_8": 0,
+ "upstream_resp_time_hist_bucket_9": 0,
+ "upstream_resp_time_hist_count": 0,
+ "upstream_resp_time_hist_sum": 0,
+ "upstream_resp_time_max": 0,
+ "upstream_resp_time_min": 0,
+ "upstream_resp_time_sum": 0,
+ }
+
+ mx := weblog.Collect()
+ assert.Equal(t, expected, mx)
+}
+
+func testCharts(t *testing.T, w *WebLog, mx map[string]int64) {
+ testVhostChart(t, w)
+ testPortChart(t, w)
+ testSchemeChart(t, w)
+ testClientCharts(t, w)
+ testHTTPMethodChart(t, w)
+ testURLPatternChart(t, w)
+ testHTTPVersionChart(t, w)
+ testRespCodeCharts(t, w)
+ testBandwidthChart(t, w)
+ testReqProcTimeCharts(t, w)
+ testUpsRespTimeCharts(t, w)
+ testSSLProtoChart(t, w)
+ testSSLCipherSuiteChart(t, w)
+ testURLPatternStatsCharts(t, w)
+ testCustomFieldCharts(t, w)
+ testCustomTimeFieldCharts(t, w)
+ testCustomNumericFieldCharts(t, w)
+
+ testChartsDimIDs(t, w, mx)
+}
+
+func testChartsDimIDs(t *testing.T, w *WebLog, mx map[string]int64) {
+ for _, chart := range *w.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ }
+}
+
+func testVhostChart(t *testing.T, w *WebLog) {
+ if len(w.mx.ReqVhost) == 0 {
+ assert.Falsef(t, w.Charts().Has(reqByVhost.ID), "chart '%s' is created", reqByVhost.ID)
+ return
+ }
+
+ chart := w.Charts().Get(reqByVhost.ID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", reqByVhost.ID)
+ if chart == nil {
+ return
+ }
+ for v := range w.mx.ReqVhost {
+ id := "req_vhost_" + v
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' vhost, expected '%s'", chart.ID, v, id)
+ }
+}
+
+func testPortChart(t *testing.T, w *WebLog) {
+ if len(w.mx.ReqPort) == 0 {
+ assert.Falsef(t, w.Charts().Has(reqByPort.ID), "chart '%s' is created", reqByPort.ID)
+ return
+ }
+
+ chart := w.Charts().Get(reqByPort.ID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", reqByPort.ID)
+ if chart == nil {
+ return
+ }
+ for v := range w.mx.ReqPort {
+ id := "req_port_" + v
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' port, expected '%s'", chart.ID, v, id)
+ }
+}
+
+func testSchemeChart(t *testing.T, w *WebLog) {
+ if w.mx.ReqHTTPScheme.Value() == 0 && w.mx.ReqHTTPSScheme.Value() == 0 {
+ assert.Falsef(t, w.Charts().Has(reqByScheme.ID), "chart '%s' is created", reqByScheme.ID)
+ } else {
+ assert.Truef(t, w.Charts().Has(reqByScheme.ID), "chart '%s' is not created", reqByScheme.ID)
+ }
+}
+
+func testClientCharts(t *testing.T, w *WebLog) {
+ if w.mx.ReqIPv4.Value() == 0 && w.mx.ReqIPv6.Value() == 0 {
+ assert.Falsef(t, w.Charts().Has(reqByIPProto.ID), "chart '%s' is created", reqByIPProto.ID)
+ } else {
+ assert.Truef(t, w.Charts().Has(reqByIPProto.ID), "chart '%s' is not created", reqByIPProto.ID)
+ }
+
+ if w.mx.UniqueIPv4.Value() == 0 && w.mx.UniqueIPv6.Value() == 0 {
+ assert.Falsef(t, w.Charts().Has(uniqIPsCurPoll.ID), "chart '%s' is created", uniqIPsCurPoll.ID)
+ } else {
+ assert.Truef(t, w.Charts().Has(uniqIPsCurPoll.ID), "chart '%s' is not created", uniqIPsCurPoll.ID)
+ }
+}
+
+func testHTTPMethodChart(t *testing.T, w *WebLog) {
+ if len(w.mx.ReqMethod) == 0 {
+ assert.Falsef(t, w.Charts().Has(reqByMethod.ID), "chart '%s' is created", reqByMethod.ID)
+ return
+ }
+
+ chart := w.Charts().Get(reqByMethod.ID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", reqByMethod.ID)
+ if chart == nil {
+ return
+ }
+ for v := range w.mx.ReqMethod {
+ id := "req_method_" + v
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' method, expected '%s'", chart.ID, v, id)
+ }
+}
+
+func testURLPatternChart(t *testing.T, w *WebLog) {
+ if isEmptyCounterVec(w.mx.ReqURLPattern) {
+ assert.Falsef(t, w.Charts().Has(reqByURLPattern.ID), "chart '%s' is created", reqByURLPattern.ID)
+ return
+ }
+
+ chart := w.Charts().Get(reqByURLPattern.ID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", reqByURLPattern.ID)
+ if chart == nil {
+ return
+ }
+ for v := range w.mx.ReqURLPattern {
+ id := "req_url_ptn_" + v
+ assert.True(t, chart.HasDim(id), "chart '%s' has no dim for '%s' pattern, expected '%s'", chart.ID, v, id)
+ }
+}
+
+func testHTTPVersionChart(t *testing.T, w *WebLog) {
+ if len(w.mx.ReqVersion) == 0 {
+ assert.Falsef(t, w.Charts().Has(reqByVersion.ID), "chart '%s' is created", reqByVersion.ID)
+ return
+ }
+
+ chart := w.Charts().Get(reqByVersion.ID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", reqByVersion.ID)
+ if chart == nil {
+ return
+ }
+ for v := range w.mx.ReqVersion {
+ id := "req_version_" + v
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' version, expected '%s'", chart.ID, v, id)
+ }
+}
+
+func testRespCodeCharts(t *testing.T, w *WebLog) {
+ if isEmptyCounterVec(w.mx.RespCode) {
+ for _, id := range []string{
+ respCodes.ID,
+ respCodes1xx.ID,
+ respCodes2xx.ID,
+ respCodes3xx.ID,
+ respCodes4xx.ID,
+ respCodes5xx.ID,
+ } {
+ assert.Falsef(t, w.Charts().Has(id), "chart '%s' is created", id)
+ }
+ return
+ }
+
+ if !w.GroupRespCodes {
+ chart := w.Charts().Get(respCodes.ID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", respCodes.ID)
+ if chart == nil {
+ return
+ }
+ for v := range w.mx.RespCode {
+ id := "resp_code_" + v
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' code, expected '%s'", chart.ID, v, id)
+ }
+ return
+ }
+
+ findCodes := func(class string) (codes []string) {
+ for v := range w.mx.RespCode {
+ if v[:1] == class {
+ codes = append(codes, v)
+ }
+ }
+ return codes
+ }
+
+ var n int
+ ids := []string{
+ respCodes1xx.ID,
+ respCodes2xx.ID,
+ respCodes3xx.ID,
+ respCodes4xx.ID,
+ respCodes5xx.ID,
+ }
+ for i, chartID := range ids {
+ class := strconv.Itoa(i + 1)
+ codes := findCodes(class)
+ n += len(codes)
+ chart := w.Charts().Get(chartID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", chartID)
+ if chart == nil {
+ return
+ }
+ for _, v := range codes {
+ id := "resp_code_" + v
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' code, expected '%s'", chartID, v, id)
+ }
+ }
+ assert.Equal(t, len(w.mx.RespCode), n)
+}
+
+func testBandwidthChart(t *testing.T, w *WebLog) {
+ if w.mx.BytesSent.Value() == 0 && w.mx.BytesReceived.Value() == 0 {
+ assert.Falsef(t, w.Charts().Has(bandwidth.ID), "chart '%s' is created", bandwidth.ID)
+ } else {
+ assert.Truef(t, w.Charts().Has(bandwidth.ID), "chart '%s' is not created", bandwidth.ID)
+ }
+}
+
+func testReqProcTimeCharts(t *testing.T, w *WebLog) {
+ if isEmptySummary(w.mx.ReqProcTime) {
+ assert.Falsef(t, w.Charts().Has(reqProcTime.ID), "chart '%s' is created", reqProcTime.ID)
+ } else {
+ assert.Truef(t, w.Charts().Has(reqProcTime.ID), "chart '%s' is not created", reqProcTime.ID)
+ }
+
+ if isEmptyHistogram(w.mx.ReqProcTimeHist) {
+ assert.Falsef(t, w.Charts().Has(reqProcTimeHist.ID), "chart '%s' is created", reqProcTimeHist.ID)
+ } else {
+ assert.Truef(t, w.Charts().Has(reqProcTimeHist.ID), "chart '%s' is not created", reqProcTimeHist.ID)
+ }
+}
+
+func testUpsRespTimeCharts(t *testing.T, w *WebLog) {
+ if isEmptySummary(w.mx.UpsRespTime) {
+ assert.Falsef(t, w.Charts().Has(upsRespTime.ID), "chart '%s' is created", upsRespTime.ID)
+ } else {
+ assert.Truef(t, w.Charts().Has(upsRespTime.ID), "chart '%s' is not created", upsRespTime.ID)
+ }
+
+ if isEmptyHistogram(w.mx.UpsRespTimeHist) {
+ assert.Falsef(t, w.Charts().Has(upsRespTimeHist.ID), "chart '%s' is created", upsRespTimeHist.ID)
+ } else {
+ assert.Truef(t, w.Charts().Has(upsRespTimeHist.ID), "chart '%s' is not created", upsRespTimeHist.ID)
+ }
+}
+
+func testSSLProtoChart(t *testing.T, w *WebLog) {
+ if len(w.mx.ReqSSLProto) == 0 {
+ assert.Falsef(t, w.Charts().Has(reqBySSLProto.ID), "chart '%s' is created", reqBySSLProto.ID)
+ return
+ }
+
+ chart := w.Charts().Get(reqBySSLProto.ID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", reqBySSLProto.ID)
+ if chart == nil {
+ return
+ }
+ for v := range w.mx.ReqSSLProto {
+ id := "req_ssl_proto_" + v
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' ssl proto, expected '%s'", chart.ID, v, id)
+ }
+}
+
+func testSSLCipherSuiteChart(t *testing.T, w *WebLog) {
+ if len(w.mx.ReqSSLCipherSuite) == 0 {
+ assert.Falsef(t, w.Charts().Has(reqBySSLCipherSuite.ID), "chart '%s' is created", reqBySSLCipherSuite.ID)
+ return
+ }
+
+ chart := w.Charts().Get(reqBySSLCipherSuite.ID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", reqBySSLCipherSuite.ID)
+ if chart == nil {
+ return
+ }
+ for v := range w.mx.ReqSSLCipherSuite {
+ id := "req_ssl_cipher_suite_" + v
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' ssl cipher suite, expected '%s'", chart.ID, v, id)
+ }
+}
+
+func testURLPatternStatsCharts(t *testing.T, w *WebLog) {
+ for _, p := range w.URLPatterns {
+ chartID := fmt.Sprintf(urlPatternRespCodes.ID, p.Name)
+
+ if isEmptyCounterVec(w.mx.RespCode) {
+ assert.Falsef(t, w.Charts().Has(chartID), "chart '%s' is created", chartID)
+ continue
+ }
+
+ chart := w.Charts().Get(chartID)
+ assert.NotNilf(t, chart, "chart '%s' is not created", chartID)
+ if chart == nil {
+ continue
+ }
+
+ stats, ok := w.mx.URLPatternStats[p.Name]
+ assert.Truef(t, ok, "url pattern '%s' has no metric in w.mx.URLPatternStats", p.Name)
+ if !ok {
+ continue
+ }
+ for v := range stats.RespCode {
+ id := fmt.Sprintf("url_ptn_%s_resp_code_%s", p.Name, v)
+ assert.Truef(t, chart.HasDim(id), "chart '%s' has no dim for '%s' code, expected '%s'", chartID, v, id)
+ }
+ }
+
+ for _, p := range w.URLPatterns {
+ id := fmt.Sprintf(urlPatternReqMethods.ID, p.Name)
+ if isEmptyCounterVec(w.mx.ReqMethod) {
+ assert.Falsef(t, w.Charts().Has(id), "chart '%s' is created", id)
+ continue
+ }
+
+ chart := w.Charts().Get(id)
+ assert.NotNilf(t, chart, "chart '%s' is not created", id)
+ if chart == nil {
+ continue
+ }
+
+ stats, ok := w.mx.URLPatternStats[p.Name]
+ assert.Truef(t, ok, "url pattern '%s' has no metric in w.mx.URLPatternStats", p.Name)
+ if !ok {
+ continue
+ }
+ for v := range stats.ReqMethod {
+ dimID := fmt.Sprintf("url_ptn_%s_req_method_%s", p.Name, v)
+ assert.Truef(t, chart.HasDim(dimID), "chart '%s' has no dim for '%s' method, expected '%s'", id, v, dimID)
+ }
+ }
+
+ for _, p := range w.URLPatterns {
+ id := fmt.Sprintf(urlPatternBandwidth.ID, p.Name)
+ if w.mx.BytesSent.Value() == 0 && w.mx.BytesReceived.Value() == 0 {
+ assert.Falsef(t, w.Charts().Has(id), "chart '%s' is created", id)
+ } else {
+ assert.Truef(t, w.Charts().Has(id), "chart '%s' is not created", id)
+ }
+ }
+
+ for _, p := range w.URLPatterns {
+ id := fmt.Sprintf(urlPatternReqProcTime.ID, p.Name)
+ if isEmptySummary(w.mx.ReqProcTime) {
+ assert.Falsef(t, w.Charts().Has(id), "chart '%s' is created", id)
+ } else {
+ assert.Truef(t, w.Charts().Has(id), "chart '%s' is not created", id)
+ }
+ }
+}
+
+func testCustomFieldCharts(t *testing.T, w *WebLog) {
+ for _, cf := range w.CustomFields {
+ id := fmt.Sprintf(reqByCustomFieldPattern.ID, cf.Name)
+ chart := w.Charts().Get(id)
+ assert.NotNilf(t, chart, "chart '%s' is not created", id)
+ if chart == nil {
+ continue
+ }
+
+ for _, p := range cf.Patterns {
+ id := fmt.Sprintf("custom_field_%s_%s", cf.Name, p.Name)
+ assert.True(t, chart.HasDim(id), "chart '%s' has no dim for '%s' pattern, expected '%s'", chart.ID, p, id)
+ }
+ }
+}
+
+func testCustomTimeFieldCharts(t *testing.T, w *WebLog) {
+ for _, cf := range w.CustomTimeFields {
+ id := fmt.Sprintf(reqByCustomTimeField.ID, cf.Name)
+ chart := w.Charts().Get(id)
+ assert.NotNilf(t, chart, "chart '%s' is not created", id)
+ if chart == nil {
+ continue
+ }
+ dimMinID := fmt.Sprintf("custom_time_field_%s_time_min", cf.Name)
+ assert.True(t, chart.HasDim(dimMinID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimMinID)
+
+ dimMaxID := fmt.Sprintf("custom_time_field_%s_time_min", cf.Name)
+ assert.True(t, chart.HasDim(dimMaxID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimMaxID)
+
+ dimAveID := fmt.Sprintf("custom_time_field_%s_time_min", cf.Name)
+ assert.True(t, chart.HasDim(dimAveID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimAveID)
+ }
+}
+
+func testCustomNumericFieldCharts(t *testing.T, w *WebLog) {
+ for _, cf := range w.CustomNumericFields {
+ id := fmt.Sprintf(customNumericFieldSummaryChartTmpl.ID, cf.Name)
+ chart := w.Charts().Get(id)
+ assert.NotNilf(t, chart, "chart '%s' is not created", id)
+ if chart == nil {
+ continue
+ }
+ dimMinID := fmt.Sprintf("custom_numeric_field_%s_summary_min", cf.Name)
+ assert.True(t, chart.HasDim(dimMinID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimMinID)
+
+ dimMaxID := fmt.Sprintf("custom_numeric_field_%s_summary_min", cf.Name)
+ assert.True(t, chart.HasDim(dimMaxID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimMaxID)
+
+ dimAveID := fmt.Sprintf("custom_numeric_field_%s_summary_min", cf.Name)
+ assert.True(t, chart.HasDim(dimAveID), "chart '%s' has no dim for '%s' name, expected '%s'", chart.ID, cf.Name, dimAveID)
+ }
+}
+
+var (
+ emptySummary = newWebLogSummary()
+ emptyHistogram = metrics.NewHistogram(metrics.DefBuckets)
+)
+
+func isEmptySummary(s metrics.Summary) bool { return reflect.DeepEqual(s, emptySummary) }
+func isEmptyHistogram(h metrics.Histogram) bool { return reflect.DeepEqual(h, emptyHistogram) }
+
+func isEmptyCounterVec(cv metrics.CounterVec) bool {
+ for _, c := range cv {
+ if c.Value() > 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func prepareWebLogCollectFull(t *testing.T) *WebLog {
+ t.Helper()
+ format := strings.Join([]string{
+ "$host:$server_port",
+ "$remote_addr",
+ "-",
+ "-",
+ "$time_local",
+ `"$request"`,
+ "$status",
+ "$body_bytes_sent",
+ "$request_length",
+ "$request_time",
+ "$upstream_response_time",
+ "$scheme",
+ "$ssl_protocol",
+ "$ssl_cipher",
+ "$side",
+ "$drink",
+ "$random_time_field",
+ }, " ")
+
+ cfg := Config{
+ ParserConfig: logs.ParserConfig{
+ LogType: logs.TypeCSV,
+ CSV: logs.CSVConfig{
+ FieldsPerRecord: -1,
+ Delimiter: " ",
+ TrimLeadingSpace: false,
+ Format: format,
+ CheckField: checkCSVFormatField,
+ },
+ },
+ Path: "testdata/full.log",
+ ExcludePath: "",
+ URLPatterns: []userPattern{
+ {Name: "com", Match: "~ com$"},
+ {Name: "org", Match: "~ org$"},
+ {Name: "net", Match: "~ net$"},
+ {Name: "not_match", Match: "* !*"},
+ },
+ CustomFields: []customField{
+ {
+ Name: "side",
+ Patterns: []userPattern{
+ {Name: "dark", Match: "= dark"},
+ {Name: "light", Match: "= light"},
+ },
+ },
+ {
+ Name: "drink",
+ Patterns: []userPattern{
+ {Name: "beer", Match: "= beer"},
+ {Name: "wine", Match: "= wine"},
+ },
+ },
+ },
+ CustomTimeFields: []customTimeField{
+ {
+ Name: "random_time_field",
+ Histogram: metrics.DefBuckets,
+ },
+ },
+ Histogram: metrics.DefBuckets,
+ GroupRespCodes: true,
+ }
+ weblog := New()
+ weblog.Config = cfg
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
+ defer weblog.Cleanup()
+
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataFullLog))
+ require.NoError(t, err)
+ weblog.parser = p
+ return weblog
+}
+
+func prepareWebLogCollectCommon(t *testing.T) *WebLog {
+ t.Helper()
+ format := strings.Join([]string{
+ "$remote_addr",
+ "-",
+ "-",
+ "$time_local",
+ `"$request"`,
+ "$status",
+ "$body_bytes_sent",
+ }, " ")
+
+ cfg := Config{
+ ParserConfig: logs.ParserConfig{
+ LogType: logs.TypeCSV,
+ CSV: logs.CSVConfig{
+ FieldsPerRecord: -1,
+ Delimiter: " ",
+ TrimLeadingSpace: false,
+ Format: format,
+ CheckField: checkCSVFormatField,
+ },
+ },
+ Path: "testdata/common.log",
+ ExcludePath: "",
+ URLPatterns: nil,
+ CustomFields: nil,
+ Histogram: nil,
+ GroupRespCodes: false,
+ }
+
+ weblog := New()
+ weblog.Config = cfg
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
+ defer weblog.Cleanup()
+
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataCommonLog))
+ require.NoError(t, err)
+ weblog.parser = p
+ return weblog
+}
+
+func prepareWebLogCollectCustom(t *testing.T) *WebLog {
+ t.Helper()
+ format := strings.Join([]string{
+ "$side",
+ "$drink",
+ }, " ")
+
+ cfg := Config{
+ ParserConfig: logs.ParserConfig{
+ LogType: logs.TypeCSV,
+ CSV: logs.CSVConfig{
+ FieldsPerRecord: 2,
+ Delimiter: " ",
+ TrimLeadingSpace: false,
+ Format: format,
+ CheckField: checkCSVFormatField,
+ },
+ },
+ CustomFields: []customField{
+ {
+ Name: "side",
+ Patterns: []userPattern{
+ {Name: "dark", Match: "= dark"},
+ {Name: "light", Match: "= light"},
+ },
+ },
+ {
+ Name: "drink",
+ Patterns: []userPattern{
+ {Name: "beer", Match: "= beer"},
+ {Name: "wine", Match: "= wine"},
+ },
+ },
+ },
+ Path: "testdata/custom.log",
+ ExcludePath: "",
+ URLPatterns: nil,
+ Histogram: nil,
+ GroupRespCodes: false,
+ }
+ weblog := New()
+ weblog.Config = cfg
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
+ defer weblog.Cleanup()
+
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataCustomLog))
+ require.NoError(t, err)
+ weblog.parser = p
+ return weblog
+}
+
+func prepareWebLogCollectCustomTimeFields(t *testing.T) *WebLog {
+ t.Helper()
+ format := strings.Join([]string{
+ "$time1",
+ "$time2",
+ }, " ")
+
+ cfg := Config{
+ ParserConfig: logs.ParserConfig{
+ LogType: logs.TypeCSV,
+ CSV: logs.CSVConfig{
+ FieldsPerRecord: 2,
+ Delimiter: " ",
+ TrimLeadingSpace: false,
+ Format: format,
+ CheckField: checkCSVFormatField,
+ },
+ },
+ CustomTimeFields: []customTimeField{
+ {
+ Name: "time1",
+ Histogram: metrics.DefBuckets,
+ },
+ {
+ Name: "time2",
+ Histogram: metrics.DefBuckets,
+ },
+ },
+ Path: "testdata/custom_time_fields.log",
+ ExcludePath: "",
+ URLPatterns: nil,
+ Histogram: nil,
+ GroupRespCodes: false,
+ }
+ weblog := New()
+ weblog.Config = cfg
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
+ defer weblog.Cleanup()
+
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataCustomTimeFieldLog))
+ require.NoError(t, err)
+ weblog.parser = p
+ return weblog
+}
+
+func prepareWebLogCollectCustomNumericFields(t *testing.T) *WebLog {
+ t.Helper()
+ format := strings.Join([]string{
+ "$numeric1",
+ "$numeric2",
+ }, " ")
+
+ cfg := Config{
+ ParserConfig: logs.ParserConfig{
+ LogType: logs.TypeCSV,
+ CSV: logs.CSVConfig{
+ FieldsPerRecord: 2,
+ Delimiter: " ",
+ TrimLeadingSpace: false,
+ Format: format,
+ CheckField: checkCSVFormatField,
+ },
+ },
+ CustomNumericFields: []customNumericField{
+ {
+ Name: "numeric1",
+ Units: "bytes",
+ },
+ {
+ Name: "numeric2",
+ Units: "requests",
+ },
+ },
+ Path: "testdata/custom_time_fields.log",
+ ExcludePath: "",
+ URLPatterns: nil,
+ Histogram: nil,
+ GroupRespCodes: false,
+ }
+ weblog := New()
+ weblog.Config = cfg
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
+ defer weblog.Cleanup()
+
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataCustomTimeFieldLog))
+ require.NoError(t, err)
+ weblog.parser = p
+ return weblog
+}
+
+func prepareWebLogCollectIISFields(t *testing.T) *WebLog {
+ t.Helper()
+ format := strings.Join([]string{
+ "-", // date
+ "-", // time
+ "$host", // s-ip
+ "$request_method", // cs-method
+ "$request_uri", // cs-uri-stem
+ "-", // cs-uri-query
+ "$server_port", // s-port
+ "-", // cs-username
+ "$remote_addr", // c-ip
+ "-", // cs(User-Agent)
+ "-", // cs(Referer)
+ "$status", // sc-status
+ "-", // sc-substatus
+ "-", // sc-win32-status
+ "$request_time", // time-taken
+ }, " ")
+ cfg := Config{
+ ParserConfig: logs.ParserConfig{
+ LogType: logs.TypeCSV,
+ CSV: logs.CSVConfig{
+ // Users can define number of fields
+ FieldsPerRecord: -1,
+ Delimiter: " ",
+ TrimLeadingSpace: false,
+ Format: format,
+ CheckField: checkCSVFormatField,
+ },
+ },
+ Path: "testdata/u_ex221107.log",
+ ExcludePath: "",
+ URLPatterns: nil,
+ Histogram: nil,
+ GroupRespCodes: false,
+ }
+
+ weblog := New()
+ weblog.Config = cfg
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
+ defer weblog.Cleanup()
+
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataIISLog))
+ require.NoError(t, err)
+ weblog.parser = p
+ return weblog
+}
+
+// generateLogs is used to populate 'testdata/full.log'
+//func generateLogs(w io.Writer, num int) error {
+// var (
+// vhost = []string{"localhost", "test.example.com", "test.example.org", "198.51.100.1", "2001:db8:1ce::1"}
+// scheme = []string{"http", "https"}
+// client = []string{"localhost", "203.0.113.1", "203.0.113.2", "2001:db8:2ce:1", "2001:db8:2ce:2"}
+// method = []string{"GET", "HEAD", "POST"}
+// url = []string{"example.other", "example.com", "example.org", "example.net"}
+// version = []string{"1.1", "2", "2.0"}
+// status = []int{100, 101, 200, 201, 300, 301, 400, 401} // no 5xx on purpose
+// sslProto = []string{"TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3", "SSLv2", "SSLv3"}
+// sslCipher = []string{"ECDHE-RSA-AES256-SHA", "DHE-RSA-AES256-SHA", "AES256-SHA", "PSK-RC4-SHA"}
+//
+// customField1 = []string{"dark", "light"}
+// customField2 = []string{"beer", "wine"}
+// )
+//
+// var line string
+// for i := 0; i < num; i++ {
+// unmatched := randInt(1, 100) > 90
+// if unmatched {
+// line = "Unmatched! The rat the cat the dog chased killed ate the malt!\n"
+// } else {
+// // test.example.com:80 203.0.113.1 - - "GET / HTTP/1.1" 200 1674 2674 3674 4674 http TLSv1 AES256-SHA dark beer
+// line = fmt.Sprintf(
+// "%s:%d %s - - [22/Mar/2009:09:30:31 +0100] \"%s /%s HTTP/%s\" %d %d %d %d %d %s %s %s %s %s\n",
+// randFromString(vhost),
+// randInt(80, 85),
+// randFromString(client),
+// randFromString(method),
+// randFromString(url),
+// randFromString(version),
+// randFromInt(status),
+// randInt(1000, 5000),
+// randInt(1000, 5000),
+// randInt(1, 500),
+// randInt(1, 500),
+// randFromString(scheme),
+// randFromString(sslProto),
+// randFromString(sslCipher),
+// randFromString(customField1),
+// randFromString(customField2),
+// )
+// }
+// _, err := fmt.Fprint(w, line)
+// if err != nil {
+// return err
+// }
+// }
+// return nil
+//}
+//
+//var r = rand.New(rand.NewSource(time.Now().UnixNano()))
+//
+//func randFromString(s []string) string { return s[r.Intn(len(s))] }
+//func randFromInt(s []int) int { return s[r.Intn(len(s))] }
+//func randInt(min, max int) int { return r.Intn(max-min) + min }
diff --git a/src/go/plugin/go.d/modules/whoisquery/README.md b/src/go/plugin/go.d/modules/whoisquery/README.md
new file mode 120000
index 000000000..8661481d1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/README.md
@@ -0,0 +1 @@
+integrations/domain_expiration_date.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/whoisquery/charts.go b/src/go/plugin/go.d/modules/whoisquery/charts.go
new file mode 100644
index 000000000..c234fcc56
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/charts.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package whoisquery
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+var baseCharts = module.Charts{
+ {
+ ID: "time_until_expiration",
+ Title: "Time Until Domain Expiration",
+ Units: "seconds",
+ Fam: "expiration time",
+ Ctx: "whoisquery.time_until_expiration",
+ Opts: module.Opts{StoreFirst: true},
+ Dims: module.Dims{
+ {ID: "expiry"},
+ },
+ Vars: module.Vars{
+ {ID: "days_until_expiration_warning"},
+ {ID: "days_until_expiration_critical"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/whoisquery/collect.go b/src/go/plugin/go.d/modules/whoisquery/collect.go
new file mode 100644
index 000000000..7bd8ed70f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/collect.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package whoisquery
+
+import "fmt"
+
+func (w *WhoisQuery) collect() (map[string]int64, error) {
+ remainingTime, err := w.prov.remainingTime()
+ if err != nil {
+ return nil, fmt.Errorf("%v (source: %s)", err, w.Source)
+ }
+
+ mx := make(map[string]int64)
+ w.collectExpiration(mx, remainingTime)
+
+ return mx, nil
+}
+
+func (w *WhoisQuery) collectExpiration(mx map[string]int64, remainingTime float64) {
+ mx["expiry"] = int64(remainingTime)
+ mx["days_until_expiration_warning"] = w.DaysUntilWarn
+ mx["days_until_expiration_critical"] = w.DaysUntilCrit
+}
diff --git a/src/go/plugin/go.d/modules/whoisquery/config_schema.json b/src/go/plugin/go.d/modules/whoisquery/config_schema.json
new file mode 100644
index 000000000..fd3ef4955
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/config_schema.json
@@ -0,0 +1,60 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "WHOIS query collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 60
+ },
+ "source": {
+ "title": "Domain",
+ "description": "The domain for which WHOIS queries will be performed.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the WHOIS query.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 5
+ },
+ "days_until_expiration_warning": {
+ "title": "Days until warning",
+ "description": "Number of days before the alarm status is set to warning.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 30
+ },
+ "days_until_expiration_critical": {
+ "title": "Days until critical",
+ "description": "Number of days before the alarm status is set to critical.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 15
+ }
+ },
+ "required": [
+ "source"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "source": {
+ "ui:placeholder": "example.com"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/whoisquery/init.go b/src/go/plugin/go.d/modules/whoisquery/init.go
new file mode 100644
index 000000000..a0560b73d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/init.go
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package whoisquery
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (w *WhoisQuery) validateConfig() error {
+ if w.Source == "" {
+ return errors.New("source is not set")
+ }
+ return nil
+}
+
+func (w *WhoisQuery) initProvider() (provider, error) {
+ return newProvider(w.Config)
+}
+
+func (w *WhoisQuery) initCharts() *module.Charts {
+ charts := baseCharts.Copy()
+
+ for _, chart := range *charts {
+ chart.Labels = []module.Label{
+ {Key: "domain", Value: w.Source},
+ }
+ }
+
+ return charts
+}
diff --git a/src/go/plugin/go.d/modules/whoisquery/integrations/domain_expiration_date.md b/src/go/plugin/go.d/modules/whoisquery/integrations/domain_expiration_date.md
new file mode 100644
index 000000000..78508e960
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/integrations/domain_expiration_date.md
@@ -0,0 +1,222 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/whoisquery/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/whoisquery/metadata.yaml"
+sidebar_label: "Domain expiration date"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Domain expiration date
+
+
+<img src="https://netdata.cloud/img/globe.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: whoisquery
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the remaining time before the domain expires.
+
+
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per domain
+
+These metrics refer to the configured source.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| domain | Configured source |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| whoisquery.time_until_expiration | expiry | seconds |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ whoisquery_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf) | whoisquery.time_until_expiration | time until the domain name registration expires |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/whoisquery.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/whoisquery.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 60 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| source | Domain address. | | yes |
+| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |
+| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |
+| timeout | The query timeout in seconds. | 5 | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+Basic configuration example
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: my_site
+ source: my_site.com
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define more than one job, their names must be unique.
+
+Check the expiration status of the multiple domains.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: my_site1
+ source: my_site1.com
+
+ - name: my_site2
+ source: my_site2.com
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `whoisquery` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m whoisquery
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `whoisquery` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep whoisquery
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep whoisquery /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep whoisquery
+```
+
+
diff --git a/src/go/plugin/go.d/modules/whoisquery/metadata.yaml b/src/go/plugin/go.d/modules/whoisquery/metadata.yaml
new file mode 100644
index 000000000..eb826ebde
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/metadata.yaml
@@ -0,0 +1,125 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-whoisquery
+ plugin_name: go.d.plugin
+ module_name: whoisquery
+ monitored_instance:
+ name: Domain expiration date
+ link: ""
+ icon_filename: globe.svg
+ categories:
+ - data-collection.synthetic-checks
+ keywords:
+ - whois
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the remaining time before the domain expires.
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/whoisquery.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 60
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: source
+ description: Domain address.
+ default_value: ""
+ required: true
+ - name: days_until_expiration_warning
+ description: Number of days before the alarm status is warning.
+ default_value: 30
+ required: false
+ - name: days_until_expiration_critical
+ description: Number of days before the alarm status is critical.
+ default_value: 15
+ required: false
+ - name: timeout
+ description: The query timeout in seconds.
+ default_value: 5
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: Basic configuration example
+ config: |
+ jobs:
+ - name: my_site
+ source: my_site.com
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define more than one job, their names must be unique.
+
+ Check the expiration status of the multiple domains.
+ config: |
+ jobs:
+ - name: my_site1
+ source: my_site1.com
+
+ - name: my_site2
+ source: my_site2.com
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: whoisquery_days_until_expiration
+ metric: whoisquery.time_until_expiration
+ info: time until the domain name registration expires
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/whoisquery.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: domain
+ description: These metrics refer to the configured source.
+ labels:
+ - name: domain
+ description: Configured source
+ metrics:
+ - name: whoisquery.time_until_expiration
+ description: Time Until Domain Expiration
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: expiry
diff --git a/src/go/plugin/go.d/modules/whoisquery/provider.go b/src/go/plugin/go.d/modules/whoisquery/provider.go
new file mode 100644
index 000000000..f6164da7c
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/provider.go
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package whoisquery
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/araddon/dateparse"
+ "github.com/likexian/whois"
+ whoisparser "github.com/likexian/whois-parser"
+)
+
+type provider interface {
+ remainingTime() (float64, error)
+}
+
+type whoisClient struct {
+ domainAddress string
+ client *whois.Client
+}
+
+func newProvider(config Config) (provider, error) {
+ domain := config.Source
+ client := whois.NewClient()
+ client.SetTimeout(config.Timeout.Duration())
+
+ return &whoisClient{
+ domainAddress: domain,
+ client: client,
+ }, nil
+}
+
+func (c *whoisClient) remainingTime() (float64, error) {
+ info, err := c.queryWhoisInfo()
+ if err != nil {
+ return 0, err
+ }
+
+ if info.Domain.ExpirationDate == "" {
+ if !strings.HasPrefix(c.domainAddress, "=") {
+ // some servers support requesting extended data
+ // https://github.com/netdata/netdata/issues/17907#issuecomment-2171758380
+ c.domainAddress = fmt.Sprintf("= %s", c.domainAddress)
+ return c.remainingTime()
+ }
+ }
+
+ return parseWhoisInfoExpirationDate(info)
+}
+
+func (c *whoisClient) queryWhoisInfo() (*whoisparser.WhoisInfo, error) {
+ resp, err := c.client.Whois(c.domainAddress)
+ if err != nil {
+ return nil, err
+ }
+
+ info, err := whoisparser.Parse(resp)
+ if err != nil {
+ return nil, err
+ }
+
+ return &info, nil
+}
+
+func parseWhoisInfoExpirationDate(info *whoisparser.WhoisInfo) (float64, error) {
+ if info == nil || info.Domain == nil {
+ return 0, errors.New("nil Whois Info")
+ }
+
+ if info.Domain.ExpirationDateInTime != nil {
+ return time.Until(*info.Domain.ExpirationDateInTime).Seconds(), nil
+ }
+
+ date := info.Domain.ExpirationDate
+ if date == "" {
+ return 0, errors.New("no expiration date")
+ }
+
+ if strings.Contains(date, " ") {
+ // https://community.netdata.cloud/t/whois-query-monitor-cannot-parse-expiration-time/3485
+ if v, err := time.Parse("2006.01.02 15:04:05", date); err == nil {
+ return time.Until(v).Seconds(), nil
+ }
+ }
+
+ expire, err := dateparse.ParseAny(date)
+ if err != nil {
+ return 0, err
+ }
+
+ return time.Until(expire).Seconds(), nil
+}
diff --git a/src/go/plugin/go.d/modules/whoisquery/testdata/config.json b/src/go/plugin/go.d/modules/whoisquery/testdata/config.json
new file mode 100644
index 000000000..e633bd4ed
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/testdata/config.json
@@ -0,0 +1,7 @@
+{
+ "update_every": 123,
+ "source": "ok",
+ "timeout": 123.123,
+ "days_until_expiration_warning": 123,
+ "days_until_expiration_critical": 123
+}
diff --git a/src/go/plugin/go.d/modules/whoisquery/testdata/config.yaml b/src/go/plugin/go.d/modules/whoisquery/testdata/config.yaml
new file mode 100644
index 000000000..ad4c501c0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+source: "ok"
+timeout: 123.123
+days_until_expiration_warning: 123
+days_until_expiration_critical: 123
diff --git a/src/go/plugin/go.d/modules/whoisquery/whoisquery.go b/src/go/plugin/go.d/modules/whoisquery/whoisquery.go
new file mode 100644
index 000000000..1f59779b3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/whoisquery.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package whoisquery
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("whoisquery", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 60,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *WhoisQuery {
+ return &WhoisQuery{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 5),
+ DaysUntilWarn: 30,
+ DaysUntilCrit: 15,
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Source string `yaml:"source" json:"source"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ DaysUntilWarn int64 `yaml:"days_until_expiration_warning,omitempty" json:"days_until_expiration_warning"`
+ DaysUntilCrit int64 `yaml:"days_until_expiration_critical,omitempty" json:"days_until_expiration_critical"`
+}
+
+type WhoisQuery struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prov provider
+}
+
+func (w *WhoisQuery) Configuration() any {
+ return w.Config
+}
+
+func (w *WhoisQuery) Init() error {
+ if err := w.validateConfig(); err != nil {
+ w.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prov, err := w.initProvider()
+ if err != nil {
+ w.Errorf("init whois provider: %v", err)
+ return err
+ }
+ w.prov = prov
+
+ w.charts = w.initCharts()
+
+ return nil
+}
+
+func (w *WhoisQuery) Check() error {
+ mx, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (w *WhoisQuery) Charts() *module.Charts {
+ return w.charts
+}
+
+func (w *WhoisQuery) Collect() map[string]int64 {
+ mx, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (w *WhoisQuery) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/whoisquery/whoisquery_test.go b/src/go/plugin/go.d/modules/whoisquery/whoisquery_test.go
new file mode 100644
index 000000000..4979c7f57
--- /dev/null
+++ b/src/go/plugin/go.d/modules/whoisquery/whoisquery_test.go
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package whoisquery
+
+import (
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestWhoisQuery_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &WhoisQuery{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestWhoisQuery_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestWhoisQuery_Charts(t *testing.T) {
+ whoisquery := New()
+ whoisquery.Source = "example.com"
+ require.NoError(t, whoisquery.Init())
+
+ assert.NotNil(t, whoisquery.Charts())
+}
+
+func TestWhoisQuery_Init(t *testing.T) {
+ const net = iota
+ tests := map[string]struct {
+ config Config
+ providerType int
+ err bool
+ }{
+ "ok from net": {
+ config: Config{Source: "example.org"},
+ providerType: net,
+ },
+ "empty source": {
+ config: Config{Source: ""},
+ err: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ whoisquery := New()
+ whoisquery.Config = test.config
+
+ if test.err {
+ assert.Error(t, whoisquery.Init())
+ } else {
+ require.NoError(t, whoisquery.Init())
+
+ var typeOK bool
+ if test.providerType == net {
+ _, typeOK = whoisquery.prov.(*whoisClient)
+ }
+
+ assert.True(t, typeOK)
+ }
+ })
+ }
+}
+
+func TestWhoisQuery_Check(t *testing.T) {
+ whoisquery := New()
+ whoisquery.prov = &mockProvider{remTime: 12345.678}
+
+ assert.NoError(t, whoisquery.Check())
+}
+
+func TestWhoisQuery_Check_ReturnsFalseOnProviderError(t *testing.T) {
+ whoisquery := New()
+ whoisquery.prov = &mockProvider{err: true}
+
+ assert.Error(t, whoisquery.Check())
+}
+
+func TestWhoisQuery_Collect(t *testing.T) {
+ whoisquery := New()
+ whoisquery.Source = "example.com"
+ require.NoError(t, whoisquery.Init())
+ whoisquery.prov = &mockProvider{remTime: 12345}
+
+ collected := whoisquery.Collect()
+
+ expected := map[string]int64{
+ "expiry": 12345,
+ "days_until_expiration_warning": 30,
+ "days_until_expiration_critical": 15,
+ }
+
+ assert.NotZero(t, collected)
+ assert.Equal(t, expected, collected)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, whoisquery, collected)
+}
+
+func TestWhoisQuery_Collect_ReturnsNilOnProviderError(t *testing.T) {
+ whoisquery := New()
+ whoisquery.Source = "example.com"
+ require.NoError(t, whoisquery.Init())
+ whoisquery.prov = &mockProvider{err: true}
+
+ assert.Nil(t, whoisquery.Collect())
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, whoisquery *WhoisQuery, collected map[string]int64) {
+ for _, chart := range *whoisquery.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+type mockProvider struct {
+ remTime float64
+ err bool
+}
+
+func (m mockProvider) remainingTime() (float64, error) {
+ if m.err {
+ return 0, errors.New("mock remaining time error")
+ }
+ return m.remTime, nil
+}
diff --git a/src/go/plugin/go.d/modules/windows/README.md b/src/go/plugin/go.d/modules/windows/README.md
new file mode 120000
index 000000000..802d61bd1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/README.md
@@ -0,0 +1 @@
+integrations/windows.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/windows/charts.go b/src/go/plugin/go.d/modules/windows/charts.go
new file mode 100644
index 000000000..cedc33fa7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/charts.go
@@ -0,0 +1,4933 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioCPUUtil = module.Priority + iota
+ prioCPUCoreUtil
+ prioCPUInterrupts
+ prioCPUDPCs
+ prioCPUCoreCState
+
+ prioMemUtil
+ prioMemPageFaults
+ prioMemSwapUtil
+ prioMemSwapOperations
+ prioMemSwapPages
+ prioMemCache
+ prioMemCacheFaults
+ prioMemSystemPool
+
+ prioDiskSpaceUsage
+ prioDiskBandwidth
+ prioDiskOperations
+ prioDiskAvgLatency
+
+ prioNICBandwidth
+ prioNICPackets
+ prioNICErrors
+ prioNICDiscards
+
+ prioTCPConnsEstablished
+ prioTCPConnsActive
+ prioTCPConnsPassive
+ prioTCPConnsFailure
+ prioTCPConnsReset
+ prioTCPSegmentsReceived
+ prioTCPSegmentsSent
+ prioTCPSegmentsRetransmitted
+
+ prioOSProcesses
+ prioOSUsers
+ prioOSVisibleMemoryUsage
+ prioOSPagingUsage
+
+ prioSystemThreads
+ prioSystemUptime
+
+ prioLogonSessions
+
+ prioThermalzoneTemperature
+
+ prioProcessesCPUUtilization
+ prioProcessesMemoryUsage
+ prioProcessesIOBytes
+ prioProcessesIOOperations
+ prioProcessesPageFaults
+ prioProcessesPageFileBytes
+ prioProcessesThreads
+ prioProcessesHandles
+
+ prioIISWebsiteTraffic
+ prioIISWebsiteFTPFileTransferRate
+ prioIISWebsiteActiveConnectionsCount
+ prioIISWebsiteRequestsRate
+ prioIISWebsiteConnectionAttemptsRate
+ prioIISWebsiteUsersCount
+ prioIISWebsiteISAPIExtRequestsCount
+ prioIISWebsiteISAPIExtRequestsRate
+ prioIISWebsiteErrorsRate
+ prioIISWebsiteLogonAttemptsRate
+ prioIISWebsiteUptime
+
+ // Connections
+ prioMSSQLUserConnections
+
+ // Transactions
+ prioMSSQLDatabaseTransactions
+ prioMSSQLDatabaseActiveTransactions
+ prioMSSQLDatabaseWriteTransactions
+ prioMSSQLDatabaseBackupRestoreOperations
+ prioMSSQLDatabaseLogFlushes
+ prioMSSQLDatabaseLogFlushed
+
+ // Size
+ prioMSSQLDatabaseDataFileSize
+
+ // SQL activity
+ prioMSSQLStatsBatchRequests
+ prioMSSQLStatsCompilations
+ prioMSSQLStatsRecompilations
+ prioMSSQLStatsAutoParameterization
+ prioMSSQLStatsSafeAutoParameterization
+
+ // Processes
+ prioMSSQLBlockedProcess
+
+ // Buffer Cache
+ prioMSSQLCacheHitRatio
+ prioMSSQLBufManIOPS
+ prioMSSQLBufferCheckpointPages
+ prioMSSQLAccessMethodPageSplits
+ prioMSSQLBufferPageLifeExpectancy
+
+ // Memory
+ prioMSSQLMemmgrConnectionMemoryBytes
+ prioMSSQLMemTotalServer
+ prioMSSQLMemmgrExternalBenefitOfMemory
+ prioMSSQLMemmgrPendingMemoryGrants
+
+ // Locks
+ prioMSSQLLocksLockWait
+ prioMSSQLLocksDeadLocks
+
+ // Error
+ prioMSSQLSqlErrorsTotal
+
+ // NET Framework
+ // Exceptions
+ prioNETFrameworkCLRExceptionsThrown
+ prioNETFrameworkCLRExceptionsFilters
+ prioNETFrameworkCLRExceptionsFinallys
+ prioNETFrameworkCLRExceptionsThrowToCatchDepth
+
+ // InterOP
+ prioNETFrameworkCLRInteropCOMCallableWrappers
+ prioNETFrameworkCLRInteropMarshalling
+ prioNETFrameworkCLRInteropStubsCreated
+ prioNETFrameworkCLRJITMethods
+
+ // JIT
+ prioNETFrameworkCLRJITTime
+ prioNETFrameworkCLRJITStandardFailures
+ prioNETFrameworkCLRJITILBytes
+
+ // Loading
+ prioNETFrameworkCLRLoadingLoaderHeapSize
+ prioNETFrameworkCLRLoadingAppDomainsLoaded
+ prioNETFrameworkCLRLoadingAppDomainsUnloaded
+ prioNETFrameworkCLRLoadingAssembliesLoaded
+ prioNETFrameworkCLRLoadingClassesLoaded
+ prioNETFrameworkCLRLoadingClassLoadFailure
+
+ // Locks and threads
+ prioNETFrameworkCLRLocksAndThreadsQueueLength
+ prioNETFrameworkCLRLocksAndThreadsCurrentLogicalThreads
+ prioNETFrameworkCLRLocksAndThreadsCurrentPhysicalThreads
+ prioNETFrameworkCLRLocksAndThreadsRecognizedThreads
+ prioNETFrameworkCLRLocksAndThreadsContentions
+
+ // Memory
+ prioNETFrameworkCLRMemoryAllocatedBytes
+ prioNETFrameworkCLRMemoryFinalizationSurvivors
+ prioNETFrameworkCLRMemoryHeapSize
+ prioNETFrameworkCLRMemoryPromoted
+ prioNETFrameworkCLRMemoryNumberGCHandles
+ prioNETFrameworkCLRMemoryCollections
+ prioNETFrameworkCLRMemoryInducedGC
+ prioNETFrameworkCLRMemoryNumberPinnedObjects
+ prioNETFrameworkCLRMemoryNumberSinkBlocksInUse
+ prioNETFrameworkCLRMemoryCommitted
+ prioNETFrameworkCLRMemoryReserved
+ prioNETFrameworkCLRMemoryGCTime
+
+ // Remoting
+ prioNETFrameworkCLRRemotingChannels
+ prioNETFrameworkCLRRemotingContextBoundClassesLoaded
+ prioNETFrameworkCLRRemotingContextBoundObjects
+ prioNETFrameworkCLRRemotingContextProxies
+ prioNETFrameworkCLRRemotingContexts
+ prioNETFrameworkCLRRemotingRemoteCalls
+
+ // Security
+ prioNETFrameworkCLRSecurityLinkTimeChecks
+ prioNETFrameworkCLRSecurityRTChecksTime
+ prioNETFrameworkCLRSecurityStackWalkDepth
+ prioNETFrameworkCLRSecurityRuntimeChecks
+
+ prioServiceState
+ prioServiceStatus
+
+ // Database
+ prioADDatabaseOperations
+ prioADDirectoryOperations
+ prioADNameCacheLookups
+ prioADCacheHits
+
+ // Replication
+ prioADDRAReplicationIntersiteCompressedTraffic
+ prioADDRAReplicationIntrasiteCompressedTraffic
+ prioADDRAReplicationSyncObjectsRemaining
+ prioADDRAReplicationPropertiesUpdated
+ prioADDRAReplicationPropertiesFiltered
+ prioADDRAReplicationObjectsFiltered
+ prioADReplicationPendingSyncs
+ prioADDRASyncRequests
+ prioADDirectoryServiceThreadsInUse
+
+ // Bind
+ prioADLDAPBindTime
+ prioADBindsTotal
+
+ // LDAP
+ prioADLDAPSearchesTotal
+
+ // Thread Queue
+ prioADATQAverageRequestLatency
+ prioADATQOutstandingRequests
+
+ // Requests
+ prioADCSCertTemplateRequests
+ prioADCSCertTemplateRequestProcessingTime
+ prioADCSCertTemplateRetrievals
+ prioADCSCertTemplateFailedRequests
+ prioADCSCertTemplateIssuesRequests
+ prioADCSCertTemplatePendingRequests
+
+ // Response
+ prioADCSCertTemplateChallengeResponses
+
+ // Retrieval
+ prioADCSCertTemplateRetrievalProcessingTime
+
+ // Timing
+ prioADCSCertTemplateRequestCryptoSigningTime
+ prioADCSCertTemplateRequestPolicyModuleProcessingTime
+ prioADCSCertTemplateChallengeResponseProcessingTime
+ prioADCSCertTemplateSignedCertificateTimestampLists
+ prioADCSCertTemplateSignedCertificateTimestampListProcessingTime
+
+ // ADFS
+ // AD
+ prioADFSADLoginConnectionFailures
+
+ // DB Artifacts
+ prioADFSDBArtifactFailures
+ prioADFSDBArtifactQueryTimeSeconds
+
+ // DB Config
+ prioADFSDBConfigFailures
+ prioADFSDBConfigQueryTimeSeconds
+
+ // Auth
+ prioADFSDeviceAuthentications
+ prioADFSExternalAuthentications
+ prioADFSOauthAuthorizationRequests
+ prioADFSCertificateAuthentications
+ prioADFSOauthClientAuthentications
+ prioADFSPassportAuthentications
+ prioADFSSSOAuthentications
+ prioADFSUserPasswordAuthentications
+ prioADFSWindowsIntegratedAuthentications
+
+ // OAuth
+ prioADFSOauthClientCredentials
+ prioADFSOauthClientPrivkeyJwtAuthentication
+ prioADFSOauthClientSecretBasicAuthentications
+ prioADFSOauthClientSecretPostAuthentications
+ prioADFSOauthClientWindowsAuthentications
+ prioADFSOauthLogonCertificateRequests
+ prioADFSOauthPasswordGrantRequests
+ prioADFSOauthTokenRequestsSuccess
+ prioADFSFederatedAuthentications
+
+ // Requests
+ prioADFSFederationMetadataRequests
+ prioADFSPassiveRequests
+ prioADFSPasswordChangeRequests
+ prioADFSSAMLPTokenRequests
+ prioADFSWSTrustTokenRequestsSuccess
+ prioADFSTokenRequests
+ prioADFSWSFedTokenRequestsSuccess
+
+ // Exchange
+ // Transport Queue
+ prioExchangeTransportQueuesActiveMailboxDelivery
+ prioExchangeTransportQueuesExternalActiveRemoteDelivery
+ prioExchangeTransportQueuesExternalLargestDelivery
+ prioExchangeTransportQueuesInternalActiveRemoteDeliery
+ prioExchangeTransportQueuesInternalLargestDelivery
+ prioExchangeTransportQueuesRetryMailboxDelivery
+ prioExchangeTransportQueuesUnreachable
+ prioExchangeTransportQueuesPoison
+
+ // LDAP
+ prioExchangeLDAPLongRunningOPS
+ prioExchangeLDAPReadTime
+ prioExchangeLDAPSearchTime
+ prioExchangeLDAPWriteTime
+ prioExchangeLDAPTimeoutErrors
+
+ // OWA
+ prioExchangeOWACurrentUniqueUsers
+ prioExchangeOWARequestsTotal
+
+ // Sync
+ prioExchangeActiveSyncPingCMDsPending
+ prioExchangeActiveSyncRequests
+ prioExchangeActiveSyncSyncCMDs
+
+ // RPC
+ prioExchangeRPCActiveUserCount
+ prioExchangeRPCAvgLatency
+ prioExchangeRPCConnectionCount
+ prioExchangeRPCOperationsTotal
+ prioExchangeRPCRequests
+ prioExchangeRpcUserCount
+
+ // Workload
+ prioExchangeWorkloadActiveTasks
+ prioExchangeWorkloadCompleteTasks
+ prioExchangeWorkloadQueueTasks
+ prioExchangeWorkloadYieldedTasks
+ prioExchangeWorkloadActivityStatus
+
+ // HTTP Proxy
+ prioExchangeHTTPProxyAVGAuthLatency
+ prioExchangeHTTPProxyAVGCASProcessingLatency
+ prioExchangeHTTPProxyMailboxProxyFailureRate
+ prioExchangeHTTPProxyServerLocatorAvgLatency
+ prioExchangeHTTPProxyOutstandingProxyRequests
+ prioExchangeHTTPProxyRequestsTotal
+
+ // Request
+ prioExchangeAutoDiscoverRequests
+ prioExchangeAvailServiceRequests
+
+ // Hyperv Health
+ prioHypervVMHealth
+
+ // Hyperv Partition
+ prioHypervRootPartitionDeviceSpacePages
+ prioHypervRootPartitionGPASpacePages
+ prioHypervRootPartitionGPASpaceModifications
+ prioHypervRootPartitionAttachedDevices
+ prioHypervRootPartitionDepositedPages
+ prioHypervRootPartitionSkippedInterrupts
+ prioHypervRootPartitionDeviceDMAErrors
+ prioHypervRootPartitionDeviceInterruptErrors
+ prioHypervRootPartitionDeviceInterruptThrottleEvents
+ prioHypervRootPartitionIOTlbFlush
+ prioHypervRootPartitionAddressSpace
+ prioHypervRootPartitionVirtualTlbFlushEntires
+ prioHypervRootPartitionVirtualTlbPages
+
+ // Hyperv VM (Memory)
+ prioHypervVMCPUUsage
+ prioHypervVMMemoryPhysical
+ prioHypervVMMemoryPhysicalGuestVisible
+ prioHypervVMMemoryPressureCurrent
+ prioHypervVIDPhysicalPagesAllocated
+ prioHypervVIDRemotePhysicalPages
+
+ // Hyperv Device
+ prioHypervVMDeviceBytes
+ prioHypervVMDeviceOperations
+ prioHypervVMDeviceErrors
+
+ // Hyperv Interface
+ prioHypervVMInterfaceBytes
+ prioHypervVMInterfacePacketsDropped
+ prioHypervVMInterfacePackets
+
+ // Hyperv Vswitch
+ prioHypervVswitchTrafficTotal
+ prioHypervVswitchPackets
+ prioHypervVswitchDirectedPackets
+ prioHypervVswitchBroadcastPackets
+ prioHypervVswitchMulticastPackets
+ prioHypervVswitchDroppedPackets
+ prioHypervVswitchExtensionsDroppedPackets
+ prioHypervVswitchPacketsFlooded
+ prioHypervVswitchLearnedMACAddresses
+ prioHypervVswitchPurgeMACAddress
+
+ prioCollectorDuration
+ prioCollectorStatus
+)
+
+// CPU
+var (
+ cpuCharts = module.Charts{
+ cpuUtilChart.Copy(),
+ }
+ cpuUtilChart = module.Chart{
+ ID: "cpu_utilization_total",
+ Title: "Total CPU Utilization (all cores)",
+ Units: "percentage",
+ Fam: "cpu",
+ Ctx: "windows.cpu_utilization_total",
+ Type: module.Stacked,
+ Priority: prioCPUUtil,
+ Dims: module.Dims{
+ {ID: "cpu_idle_time", Name: "idle", Algo: module.PercentOfIncremental, Div: 1000, DimOpts: module.DimOpts{Hidden: true}},
+ {ID: "cpu_dpc_time", Name: "dpc", Algo: module.PercentOfIncremental, Div: 1000},
+ {ID: "cpu_user_time", Name: "user", Algo: module.PercentOfIncremental, Div: 1000},
+ {ID: "cpu_privileged_time", Name: "privileged", Algo: module.PercentOfIncremental, Div: 1000},
+ {ID: "cpu_interrupt_time", Name: "interrupt", Algo: module.PercentOfIncremental, Div: 1000},
+ },
+ }
+)
+
+// CPU core
+var (
+ cpuCoreChartsTmpl = module.Charts{
+ cpuCoreUtilChartTmpl.Copy(),
+ cpuCoreInterruptsChartTmpl.Copy(),
+ cpuDPCsChartTmpl.Copy(),
+ cpuCoreCStateChartTmpl.Copy(),
+ }
+ cpuCoreUtilChartTmpl = module.Chart{
+ ID: "core_%s_cpu_utilization",
+ Title: "Core CPU Utilization",
+ Units: "percentage",
+ Fam: "cpu",
+ Ctx: "windows.cpu_core_utilization",
+ Type: module.Stacked,
+ Priority: prioCPUCoreUtil,
+ Dims: module.Dims{
+ {ID: "cpu_core_%s_idle_time", Name: "idle", Algo: module.PercentOfIncremental, Div: precision, DimOpts: module.DimOpts{Hidden: true}},
+ {ID: "cpu_core_%s_dpc_time", Name: "dpc", Algo: module.PercentOfIncremental, Div: precision},
+ {ID: "cpu_core_%s_user_time", Name: "user", Algo: module.PercentOfIncremental, Div: precision},
+ {ID: "cpu_core_%s_privileged_time", Name: "privileged", Algo: module.PercentOfIncremental, Div: precision},
+ {ID: "cpu_core_%s_interrupt_time", Name: "interrupt", Algo: module.PercentOfIncremental, Div: precision},
+ },
+ }
+ cpuCoreInterruptsChartTmpl = module.Chart{
+ ID: "cpu_core_%s_interrupts",
+ Title: "Received and Serviced Hardware Interrupts",
+ Units: "interrupts/s",
+ Fam: "cpu",
+ Ctx: "windows.cpu_core_interrupts",
+ Priority: prioCPUInterrupts,
+ Dims: module.Dims{
+ {ID: "cpu_core_%s_interrupts", Name: "interrupts", Algo: module.Incremental},
+ },
+ }
+ cpuDPCsChartTmpl = module.Chart{
+ ID: "cpu_core_%s_dpcs",
+ Title: "Received and Serviced Deferred Procedure Calls (DPC)",
+ Units: "dpc/s",
+ Fam: "cpu",
+ Ctx: "windows.cpu_core_dpcs",
+ Priority: prioCPUDPCs,
+ Dims: module.Dims{
+ {ID: "cpu_core_%s_dpcs", Name: "dpcs", Algo: module.Incremental},
+ },
+ }
+ cpuCoreCStateChartTmpl = module.Chart{
+ ID: "cpu_core_%s_cpu_cstate",
+ Title: "Core Time Spent in Low-Power Idle State",
+ Units: "percentage",
+ Fam: "cpu",
+ Ctx: "windows.cpu_core_cstate",
+ Type: module.Stacked,
+ Priority: prioCPUCoreCState,
+ Dims: module.Dims{
+ {ID: "cpu_core_%s_cstate_c1", Name: "c1", Algo: module.PercentOfIncremental, Div: precision},
+ {ID: "cpu_core_%s_cstate_c2", Name: "c2", Algo: module.PercentOfIncremental, Div: precision},
+ {ID: "cpu_core_%s_cstate_c3", Name: "c3", Algo: module.PercentOfIncremental, Div: precision},
+ },
+ }
+)
+
+// Memory
+var (
+ memCharts = module.Charts{
+ memUtilChart.Copy(),
+ memPageFaultsChart.Copy(),
+ memSwapUtilChart.Copy(),
+ memSwapOperationsChart.Copy(),
+ memSwapPagesChart.Copy(),
+ memCacheChart.Copy(),
+ memCacheFaultsChart.Copy(),
+ memSystemPoolChart.Copy(),
+ }
+ memUtilChart = module.Chart{
+ ID: "memory_utilization",
+ Title: "Memory Utilization",
+ Units: "bytes",
+ Fam: "mem",
+ Ctx: "windows.memory_utilization",
+ Type: module.Stacked,
+ Priority: prioMemUtil,
+ Dims: module.Dims{
+ {ID: "memory_available_bytes", Name: "available"},
+ {ID: "memory_used_bytes", Name: "used"},
+ },
+ }
+ memPageFaultsChart = module.Chart{
+ ID: "memory_page_faults",
+ Title: "Memory Page Faults",
+ Units: "pgfaults/s",
+ Fam: "mem",
+ Ctx: "windows.memory_page_faults",
+ Priority: prioMemPageFaults,
+ Dims: module.Dims{
+ {ID: "memory_page_faults_total", Name: "page_faults", Algo: module.Incremental},
+ },
+ }
+ memSwapUtilChart = module.Chart{
+ ID: "memory_swap_utilization",
+ Title: "Swap Utilization",
+ Units: "bytes",
+ Fam: "mem",
+ Ctx: "windows.memory_swap_utilization",
+ Type: module.Stacked,
+ Priority: prioMemSwapUtil,
+ Dims: module.Dims{
+ {ID: "memory_not_committed_bytes", Name: "available"},
+ {ID: "memory_committed_bytes", Name: "used"},
+ },
+ Vars: module.Vars{
+ {ID: "memory_commit_limit"},
+ },
+ }
+ memSwapOperationsChart = module.Chart{
+ ID: "memory_swap_operations",
+ Title: "Swap Operations",
+ Units: "operations/s",
+ Fam: "mem",
+ Ctx: "windows.memory_swap_operations",
+ Type: module.Area,
+ Priority: prioMemSwapOperations,
+ Dims: module.Dims{
+ {ID: "memory_swap_page_reads_total", Name: "read", Algo: module.Incremental},
+ {ID: "memory_swap_page_writes_total", Name: "write", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ memSwapPagesChart = module.Chart{
+ ID: "memory_swap_pages",
+ Title: "Swap Pages",
+ Units: "pages/s",
+ Fam: "mem",
+ Ctx: "windows.memory_swap_pages",
+ Priority: prioMemSwapPages,
+ Dims: module.Dims{
+ {ID: "memory_swap_pages_read_total", Name: "read", Algo: module.Incremental},
+ {ID: "memory_swap_pages_written_total", Name: "written", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ memCacheChart = module.Chart{
+ ID: "memory_cached",
+ Title: "Cached",
+ Units: "bytes",
+ Fam: "mem",
+ Ctx: "windows.memory_cached",
+ Type: module.Area,
+ Priority: prioMemCache,
+ Dims: module.Dims{
+ {ID: "memory_cache_total", Name: "cached"},
+ },
+ }
+ memCacheFaultsChart = module.Chart{
+ ID: "memory_cache_faults",
+ Title: "Cache Faults",
+ Units: "faults/s",
+ Fam: "mem",
+ Ctx: "windows.memory_cache_faults",
+ Priority: prioMemCacheFaults,
+ Dims: module.Dims{
+ {ID: "memory_cache_faults_total", Name: "cache_faults", Algo: module.Incremental},
+ },
+ }
+ memSystemPoolChart = module.Chart{
+ ID: "memory_system_pool",
+ Title: "System Memory Pool",
+ Units: "bytes",
+ Fam: "mem",
+ Ctx: "windows.memory_system_pool",
+ Type: module.Stacked,
+ Priority: prioMemSystemPool,
+ Dims: module.Dims{
+ {ID: "memory_pool_paged_bytes", Name: "paged"},
+ {ID: "memory_pool_nonpaged_bytes_total", Name: "non-paged"},
+ },
+ }
+)
+
+// Logical Disks
+var (
+ diskChartsTmpl = module.Charts{
+ diskSpaceUsageChartTmpl.Copy(),
+ diskBandwidthChartTmpl.Copy(),
+ diskOperationsChartTmpl.Copy(),
+ diskAvgLatencyChartTmpl.Copy(),
+ }
+ diskSpaceUsageChartTmpl = module.Chart{
+ ID: "logical_disk_%s_space_usage",
+ Title: "Space usage",
+ Units: "bytes",
+ Fam: "disk",
+ Ctx: "windows.logical_disk_space_usage",
+ Type: module.Stacked,
+ Priority: prioDiskSpaceUsage,
+ Dims: module.Dims{
+ {ID: "logical_disk_%s_free_space", Name: "free"},
+ {ID: "logical_disk_%s_used_space", Name: "used"},
+ },
+ }
+ diskBandwidthChartTmpl = module.Chart{
+ ID: "logical_disk_%s_bandwidth",
+ Title: "Bandwidth",
+ Units: "bytes/s",
+ Fam: "disk",
+ Ctx: "windows.logical_disk_bandwidth",
+ Type: module.Area,
+ Priority: prioDiskBandwidth,
+ Dims: module.Dims{
+ {ID: "logical_disk_%s_read_bytes_total", Name: "read", Algo: module.Incremental},
+ {ID: "logical_disk_%s_write_bytes_total", Name: "write", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ diskOperationsChartTmpl = module.Chart{
+ ID: "logical_disk_%s_operations",
+ Title: "Operations",
+ Units: "operations/s",
+ Fam: "disk",
+ Ctx: "windows.logical_disk_operations",
+ Priority: prioDiskOperations,
+ Dims: module.Dims{
+ {ID: "logical_disk_%s_reads_total", Name: "reads", Algo: module.Incremental},
+ {ID: "logical_disk_%s_writes_total", Name: "writes", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ diskAvgLatencyChartTmpl = module.Chart{
+ ID: "logical_disk_%s_latency",
+ Title: "Average Read/Write Latency",
+ Units: "seconds",
+ Fam: "disk",
+ Ctx: "windows.logical_disk_latency",
+ Priority: prioDiskAvgLatency,
+ Dims: module.Dims{
+ {ID: "logical_disk_%s_read_latency", Name: "read", Algo: module.Incremental, Div: precision},
+ {ID: "logical_disk_%s_write_latency", Name: "write", Algo: module.Incremental, Div: precision},
+ },
+ }
+)
+
+// Network interfaces
+var (
+ nicChartsTmpl = module.Charts{
+ nicBandwidthChartTmpl.Copy(),
+ nicPacketsChartTmpl.Copy(),
+ nicErrorsChartTmpl.Copy(),
+ nicDiscardsChartTmpl.Copy(),
+ }
+ nicBandwidthChartTmpl = module.Chart{
+ ID: "nic_%s_bandwidth",
+ Title: "Bandwidth",
+ Units: "kilobits/s",
+ Fam: "net",
+ Ctx: "windows.net_nic_bandwidth",
+ Type: module.Area,
+ Priority: prioNICBandwidth,
+ Dims: module.Dims{
+ {ID: "net_nic_%s_bytes_received", Name: "received", Algo: module.Incremental, Div: 1000},
+ {ID: "net_nic_%s_bytes_sent", Name: "sent", Algo: module.Incremental, Mul: -1, Div: 1000},
+ },
+ }
+ nicPacketsChartTmpl = module.Chart{
+ ID: "nic_%s_packets",
+ Title: "Packets",
+ Units: "packets/s",
+ Fam: "net",
+ Ctx: "windows.net_nic_packets",
+ Priority: prioNICPackets,
+ Dims: module.Dims{
+ {ID: "net_nic_%s_packets_received_total", Name: "received", Algo: module.Incremental},
+ {ID: "net_nic_%s_packets_sent_total", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ nicErrorsChartTmpl = module.Chart{
+ ID: "nic_%s_errors",
+ Title: "Errors",
+ Units: "errors/s",
+ Fam: "net",
+ Ctx: "windows.net_nic_errors",
+ Priority: prioNICErrors,
+ Dims: module.Dims{
+ {ID: "net_nic_%s_packets_received_errors", Name: "inbound", Algo: module.Incremental},
+ {ID: "net_nic_%s_packets_outbound_errors", Name: "outbound", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ nicDiscardsChartTmpl = module.Chart{
+ ID: "nic_%s_discarded",
+ Title: "Discards",
+ Units: "discards/s",
+ Fam: "net",
+ Ctx: "windows.net_nic_discarded",
+ Priority: prioNICDiscards,
+ Dims: module.Dims{
+ {ID: "net_nic_%s_packets_received_discarded", Name: "inbound", Algo: module.Incremental},
+ {ID: "net_nic_%s_packets_outbound_discarded", Name: "outbound", Algo: module.Incremental, Mul: -1},
+ },
+ }
+)
+
+// TCP
+var (
+ tcpCharts = module.Charts{
+ tcpConnsActiveChart.Copy(),
+ tcpConnsEstablishedChart.Copy(),
+ tcpConnsFailuresChart.Copy(),
+ tcpConnsPassiveChart.Copy(),
+ tcpConnsResetsChart.Copy(),
+ tcpSegmentsReceivedChart.Copy(),
+ tcpSegmentsRetransmittedChart.Copy(),
+ tcpSegmentsSentChart.Copy(),
+ }
+ tcpConnsEstablishedChart = module.Chart{
+ ID: "tcp_conns_established",
+ Title: "TCP established connections",
+ Units: "connections",
+ Fam: "tcp",
+ Ctx: "windows.tcp_conns_established",
+ Priority: prioTCPConnsEstablished,
+ Dims: module.Dims{
+ {ID: "tcp_ipv4_conns_established", Name: "ipv4"},
+ {ID: "tcp_ipv6_conns_established", Name: "ipv6"},
+ },
+ }
+ tcpConnsActiveChart = module.Chart{
+ ID: "tcp_conns_active",
+ Title: "TCP active connections",
+ Units: "connections/s",
+ Fam: "tcp",
+ Ctx: "windows.tcp_conns_active",
+ Priority: prioTCPConnsActive,
+ Dims: module.Dims{
+ {ID: "tcp_ipv4_conns_active", Name: "ipv4", Algo: module.Incremental},
+ {ID: "tcp_ipv6_conns_active", Name: "ipv6", Algo: module.Incremental},
+ },
+ }
+ tcpConnsPassiveChart = module.Chart{
+ ID: "tcp_conns_passive",
+ Title: "TCP passive connections",
+ Units: "connections/s",
+ Fam: "tcp",
+ Ctx: "windows.tcp_conns_passive",
+ Priority: prioTCPConnsPassive,
+ Dims: module.Dims{
+ {ID: "tcp_ipv4_conns_passive", Name: "ipv4", Algo: module.Incremental},
+ {ID: "tcp_ipv6_conns_passive", Name: "ipv6", Algo: module.Incremental},
+ },
+ }
+ tcpConnsFailuresChart = module.Chart{
+ ID: "tcp_conns_failures",
+ Title: "TCP connection failures",
+ Units: "failures/s",
+ Fam: "tcp",
+ Ctx: "windows.tcp_conns_failures",
+ Priority: prioTCPConnsFailure,
+ Dims: module.Dims{
+ {ID: "tcp_ipv4_conns_failures", Name: "ipv4", Algo: module.Incremental},
+ {ID: "tcp_ipv6_conns_failures", Name: "ipv6", Algo: module.Incremental},
+ },
+ }
+ tcpConnsResetsChart = module.Chart{
+ ID: "tcp_conns_resets",
+ Title: "TCP connections resets",
+ Units: "resets/s",
+ Fam: "tcp",
+ Ctx: "windows.tcp_conns_resets",
+ Priority: prioTCPConnsReset,
+ Dims: module.Dims{
+ {ID: "tcp_ipv4_conns_resets", Name: "ipv4", Algo: module.Incremental},
+ {ID: "tcp_ipv6_conns_resets", Name: "ipv6", Algo: module.Incremental},
+ },
+ }
+ tcpSegmentsReceivedChart = module.Chart{
+ ID: "tcp_segments_received",
+ Title: "Number of TCP segments received",
+ Units: "segments/s",
+ Fam: "tcp",
+ Ctx: "windows.tcp_segments_received",
+ Priority: prioTCPSegmentsReceived,
+ Dims: module.Dims{
+ {ID: "tcp_ipv4_segments_received", Name: "ipv4", Algo: module.Incremental},
+ {ID: "tcp_ipv6_segments_received", Name: "ipv6", Algo: module.Incremental},
+ },
+ }
+ tcpSegmentsSentChart = module.Chart{
+ ID: "tcp_segments_sent",
+ Title: "Number of TCP segments sent",
+ Units: "segments/s",
+ Fam: "tcp",
+ Ctx: "windows.tcp_segments_sent",
+ Priority: prioTCPSegmentsSent,
+ Dims: module.Dims{
+ {ID: "tcp_ipv4_segments_sent", Name: "ipv4", Algo: module.Incremental},
+ {ID: "tcp_ipv6_segments_sent", Name: "ipv6", Algo: module.Incremental},
+ },
+ }
+ tcpSegmentsRetransmittedChart = module.Chart{
+ ID: "tcp_segments_retransmitted",
+ Title: "Number of TCP segments retransmitted",
+ Units: "segments/s",
+ Fam: "tcp",
+ Ctx: "windows.tcp_segments_retransmitted",
+ Priority: prioTCPSegmentsRetransmitted,
+ Dims: module.Dims{
+ {ID: "tcp_ipv4_segments_retransmitted", Name: "ipv4", Algo: module.Incremental},
+ {ID: "tcp_ipv6_segments_retransmitted", Name: "ipv6", Algo: module.Incremental},
+ },
+ }
+)
+
+// OS
+var (
+ osCharts = module.Charts{
+ osProcessesChart.Copy(),
+ osUsersChart.Copy(),
+ osMemoryUsage.Copy(),
+ osPagingFilesUsageChart.Copy(),
+ }
+ osProcessesChart = module.Chart{
+ ID: "os_processes",
+ Title: "Processes",
+ Units: "number",
+ Fam: "os",
+ Ctx: "windows.os_processes",
+ Priority: prioOSProcesses,
+ Dims: module.Dims{
+ {ID: "os_processes", Name: "processes"},
+ },
+ Vars: module.Vars{
+ {ID: "os_processes_limit"},
+ },
+ }
+ osUsersChart = module.Chart{
+ ID: "os_users",
+ Title: "Number of Users",
+ Units: "users",
+ Fam: "os",
+ Ctx: "windows.os_users",
+ Priority: prioOSUsers,
+ Dims: module.Dims{
+ {ID: "os_users", Name: "users"},
+ },
+ }
+ osMemoryUsage = module.Chart{
+ ID: "os_visible_memory_usage",
+ Title: "Visible Memory Usage",
+ Units: "bytes",
+ Fam: "os",
+ Ctx: "windows.os_visible_memory_usage",
+ Type: module.Stacked,
+ Priority: prioOSVisibleMemoryUsage,
+ Dims: module.Dims{
+ {ID: "os_physical_memory_free_bytes", Name: "free"},
+ {ID: "os_visible_memory_used_bytes", Name: "used"},
+ },
+ Vars: module.Vars{
+ {ID: "os_visible_memory_bytes"},
+ },
+ }
+ osPagingFilesUsageChart = module.Chart{
+ ID: "os_paging_files_usage",
+ Title: "Paging Files Usage",
+ Units: "bytes",
+ Fam: "os",
+ Ctx: "windows.os_paging_files_usage",
+ Type: module.Stacked,
+ Priority: prioOSPagingUsage,
+ Dims: module.Dims{
+ {ID: "os_paging_free_bytes", Name: "free"},
+ {ID: "os_paging_used_bytes", Name: "used"},
+ },
+ Vars: module.Vars{
+ {ID: "os_paging_limit_bytes"},
+ },
+ }
+)
+
+// System
+var (
+ systemCharts = module.Charts{
+ systemThreadsChart.Copy(),
+ systemUptimeChart.Copy(),
+ }
+ systemThreadsChart = module.Chart{
+ ID: "system_threads",
+ Title: "Threads",
+ Units: "number",
+ Fam: "system",
+ Ctx: "windows.system_threads",
+ Priority: prioSystemThreads,
+ Dims: module.Dims{
+ {ID: "system_threads", Name: "threads"},
+ },
+ }
+ systemUptimeChart = module.Chart{
+ ID: "system_uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "system",
+ Ctx: "windows.system_uptime",
+ Priority: prioSystemUptime,
+ Dims: module.Dims{
+ {ID: "system_up_time", Name: "time"},
+ },
+ }
+)
+
+// IIS
+var (
+ iisWebsiteChartsTmpl = module.Charts{
+ iisWebsiteTrafficChartTempl.Copy(),
+ iisWebsiteRequestsRateChartTmpl.Copy(),
+ iisWebsiteActiveConnectionsCountChartTmpl.Copy(),
+ iisWebsiteUsersCountChartTmpl.Copy(),
+ iisWebsiteConnectionAttemptsRate.Copy(),
+ iisWebsiteISAPIExtRequestsCountChartTmpl.Copy(),
+ iisWebsiteISAPIExtRequestsRateChartTmpl.Copy(),
+ iisWebsiteFTPFileTransferRateChartTempl.Copy(),
+ iisWebsiteLogonAttemptsRateChartTmpl.Copy(),
+ iisWebsiteErrorsRateChart.Copy(),
+ iisWebsiteUptimeChartTmpl.Copy(),
+ }
+ iisWebsiteTrafficChartTempl = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_traffic",
+ Title: "Website traffic",
+ Units: "bytes/s",
+ Fam: "traffic",
+ Ctx: "iis.website_traffic",
+ Type: module.Area,
+ Priority: prioIISWebsiteTraffic,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_received_bytes_total", Name: "received", Algo: module.Incremental},
+ {ID: "iis_website_%s_sent_bytes_total", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ iisWebsiteFTPFileTransferRateChartTempl = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_ftp_file_transfer_rate",
+ Title: "Website FTP file transfer rate",
+ Units: "files/s",
+ Fam: "traffic",
+ Ctx: "iis.website_ftp_file_transfer_rate",
+ Priority: prioIISWebsiteFTPFileTransferRate,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_files_received_total", Name: "received", Algo: module.Incremental},
+ {ID: "iis_website_%s_files_sent_total", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ iisWebsiteActiveConnectionsCountChartTmpl = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_active_connections_count",
+ Title: "Website active connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "iis.website_active_connections_count",
+ Priority: prioIISWebsiteActiveConnectionsCount,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_current_connections", Name: "active"},
+ },
+ }
+ iisWebsiteConnectionAttemptsRate = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_connection_attempts_rate",
+ Title: "Website connections attempts",
+ Units: "attempts/s",
+ Fam: "connections",
+ Ctx: "iis.website_connection_attempts_rate",
+ Priority: prioIISWebsiteConnectionAttemptsRate,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_connection_attempts_all_instances_total", Name: "connection", Algo: module.Incremental},
+ },
+ }
+ iisWebsiteRequestsRateChartTmpl = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_requests_rate",
+ Title: "Website requests rate",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "iis.website_requests_rate",
+ Priority: prioIISWebsiteRequestsRate,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_requests_total", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ iisWebsiteUsersCountChartTmpl = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_users_count",
+ Title: "Website users with pending requests",
+ Units: "users",
+ Fam: "requests",
+ Ctx: "iis.website_users_count",
+ Type: module.Stacked,
+ Priority: prioIISWebsiteUsersCount,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_current_anonymous_users", Name: "anonymous"},
+ {ID: "iis_website_%s_current_non_anonymous_users", Name: "non_anonymous"},
+ },
+ }
+ iisWebsiteISAPIExtRequestsCountChartTmpl = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_isapi_extension_requests_count",
+ Title: "ISAPI extension requests",
+ Units: "requests",
+ Fam: "requests",
+ Ctx: "iis.website_isapi_extension_requests_count",
+ Priority: prioIISWebsiteISAPIExtRequestsCount,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_current_isapi_extension_requests", Name: "isapi"},
+ },
+ }
+ iisWebsiteISAPIExtRequestsRateChartTmpl = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_isapi_extension_requests_rate",
+ Title: "Website extensions request",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "iis.website_isapi_extension_requests_rate",
+ Priority: prioIISWebsiteISAPIExtRequestsRate,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_isapi_extension_requests_total", Name: "isapi", Algo: module.Incremental},
+ },
+ }
+ iisWebsiteErrorsRateChart = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_errors_rate",
+ Title: "Website errors",
+ Units: "errors/s",
+ Fam: "requests",
+ Ctx: "iis.website_errors_rate",
+ Type: module.Stacked,
+ Priority: prioIISWebsiteErrorsRate,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_locked_errors_total", Name: "document_locked", Algo: module.Incremental},
+ {ID: "iis_website_%s_not_found_errors_total", Name: "document_not_found", Algo: module.Incremental},
+ },
+ }
+ iisWebsiteLogonAttemptsRateChartTmpl = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_logon_attempts_rate",
+ Title: "Website logon attempts",
+ Units: "attempts/s",
+ Fam: "logon",
+ Ctx: "iis.website_logon_attempts_rate",
+ Priority: prioIISWebsiteLogonAttemptsRate,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_logon_attempts_total", Name: "logon", Algo: module.Incremental},
+ },
+ }
+ iisWebsiteUptimeChartTmpl = module.Chart{
+ OverModule: "iis",
+ ID: "iis_website_%s_uptime",
+ Title: "Website uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "iis.website_uptime",
+ Priority: prioIISWebsiteUptime,
+ Dims: module.Dims{
+ {ID: "iis_website_%s_service_uptime", Name: "uptime"},
+ },
+ }
+)
+
+// MS-SQL
+var (
+ mssqlInstanceChartsTmpl = module.Charts{
+ mssqlAccessMethodPageSplitsChart.Copy(),
+ mssqlCacheHitRatioChart.Copy(),
+ mssqlBufferCheckpointPageChart.Copy(),
+ mssqlBufferPageLifeExpectancyChart.Copy(),
+ mssqlBufManIOPSChart.Copy(),
+ mssqlBlockedProcessChart.Copy(),
+ mssqlLocksWaitChart.Copy(),
+ mssqlDeadLocksChart.Copy(),
+ mssqlMemmgrConnectionMemoryBytesChart.Copy(),
+ mssqlMemmgrExternalBenefitOfMemoryChart.Copy(),
+ mssqlMemmgrPendingMemoryChart.Copy(),
+ mssqlMemmgrTotalServerChart.Copy(),
+ mssqlSQLErrorsTotalChart.Copy(),
+ mssqlStatsAutoParamChart.Copy(),
+ mssqlStatsBatchRequestsChart.Copy(),
+ mssqlStatsSafeAutoChart.Copy(),
+ mssqlStatsCompilationChart.Copy(),
+ mssqlStatsRecompilationChart.Copy(),
+ mssqlUserConnectionChart.Copy(),
+ }
+ mssqlDatabaseChartsTmpl = module.Charts{
+ mssqlDatabaseActiveTransactionsChart.Copy(),
+ mssqlDatabaseBackupRestoreOperationsChart.Copy(),
+ mssqlDatabaseSizeChart.Copy(),
+ mssqlDatabaseLogFlushedChart.Copy(),
+ mssqlDatabaseLogFlushesChart.Copy(),
+ mssqlDatabaseTransactionsChart.Copy(),
+ mssqlDatabaseWriteTransactionsChart.Copy(),
+ }
+ // Access Method:
+ // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-access-methods-object?view=sql-server-ver16
+ mssqlAccessMethodPageSplitsChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_accessmethods_page_splits",
+ Title: "Page splits",
+ Units: "splits/s",
+ Fam: "buffer cache",
+ Ctx: "mssql.instance_accessmethods_page_splits",
+ Priority: prioMSSQLAccessMethodPageSplits,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_accessmethods_page_splits", Name: "page", Algo: module.Incremental},
+ },
+ }
+ // Buffer Management
+ // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-buffer-manager-object?view=sql-server-ver16
+ mssqlCacheHitRatioChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_cache_hit_ratio",
+ Title: "Buffer Cache hit ratio",
+ Units: "percentage",
+ Fam: "buffer cache",
+ Ctx: "mssql.instance_cache_hit_ratio",
+ Priority: prioMSSQLCacheHitRatio,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_cache_hit_ratio", Name: "hit_ratio"},
+ },
+ }
+ mssqlBufferCheckpointPageChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_bufman_checkpoint_pages",
+ Title: "Flushed pages",
+ Units: "pages/s",
+ Fam: "buffer cache",
+ Ctx: "mssql.instance_bufman_checkpoint_pages",
+ Priority: prioMSSQLBufferCheckpointPages,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_bufman_checkpoint_pages", Name: "flushed", Algo: module.Incremental},
+ },
+ }
+ mssqlBufferPageLifeExpectancyChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_bufman_page_life_expectancy",
+ Title: "Page life expectancy",
+ Units: "seconds",
+ Fam: "buffer cache",
+ Ctx: "mssql.instance_bufman_page_life_expectancy",
+ Priority: prioMSSQLBufferPageLifeExpectancy,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_bufman_page_life_expectancy_seconds", Name: "life_expectancy"},
+ },
+ }
+ mssqlBufManIOPSChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_bufman_iops",
+ Title: "Number of pages input and output",
+ Units: "pages/s",
+ Fam: "buffer cache",
+ Ctx: "mssql.instance_bufman_iops",
+ Priority: prioMSSQLBufManIOPS,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_bufman_page_reads", Name: "read", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_bufman_page_writes", Name: "written", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ // General Statistic
+ // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-general-statistics-object?view=sql-server-ver16
+ mssqlBlockedProcessChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_blocked_process",
+ Title: "Blocked processes",
+ Units: "process",
+ Fam: "processes",
+ Ctx: "mssql.instance_blocked_processes",
+ Priority: prioMSSQLBlockedProcess,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_genstats_blocked_processes", Name: "blocked"},
+ },
+ }
+ mssqlUserConnectionChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_user_connection",
+ Title: "User connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "mssql.instance_user_connection",
+ Priority: prioMSSQLUserConnections,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_genstats_user_connections", Name: "user"},
+ },
+ }
+ // Lock Wait
+ // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-locks-object?view=sql-server-ver16
+ mssqlLocksWaitChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_locks_lock_wait",
+ Title: "Lock requests that required the caller to wait",
+ Units: "locks/s",
+ Fam: "locks",
+ Ctx: "mssql.instance_locks_lock_wait",
+ Priority: prioMSSQLLocksLockWait,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_resource_AllocUnit_locks_lock_wait_seconds", Name: "alloc_unit", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Application_locks_lock_wait_seconds", Name: "application", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Database_locks_lock_wait_seconds", Name: "database", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Extent_locks_lock_wait_seconds", Name: "extent", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_File_locks_lock_wait_seconds", Name: "file", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_HoBT_locks_lock_wait_seconds", Name: "hobt", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Key_locks_lock_wait_seconds", Name: "key", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Metadata_locks_lock_wait_seconds", Name: "metadata", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_OIB_locks_lock_wait_seconds", Name: "oib", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Object_locks_lock_wait_seconds", Name: "object", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Page_locks_lock_wait_seconds", Name: "page", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_RID_locks_lock_wait_seconds", Name: "rid", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_RowGroup_locks_lock_wait_seconds", Name: "row_group", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Xact_locks_lock_wait_seconds", Name: "xact", Algo: module.Incremental},
+ },
+ }
+ mssqlDeadLocksChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_locks_deadlocks",
+ Title: "Lock requests that resulted in deadlock",
+ Units: "locks/s",
+ Fam: "locks",
+ Ctx: "mssql.instance_locks_deadlocks",
+ Priority: prioMSSQLLocksDeadLocks,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_resource_AllocUnit_locks_deadlocks", Name: "alloc_unit", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Application_locks_deadlocks", Name: "application", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Database_locks_deadlocks", Name: "database", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Extent_locks_deadlocks", Name: "extent", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_File_locks_deadlocks", Name: "file", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_HoBT_locks_deadlocks", Name: "hobt", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Key_locks_deadlocks", Name: "key", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Metadata_locks_deadlocks", Name: "metadata", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_OIB_locks_deadlocks", Name: "oib", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Object_locks_deadlocks", Name: "object", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Page_locks_deadlocks", Name: "page", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_RID_locks_deadlocks", Name: "rid", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_RowGroup_locks_deadlocks", Name: "row_group", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_resource_Xact_locks_deadlocks", Name: "xact", Algo: module.Incremental},
+ },
+ }
+
+ // Memory Manager
+ // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-memory-manager-object?view=sql-server-ver16
+ mssqlMemmgrConnectionMemoryBytesChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_memmgr_connection_memory_bytes",
+ Title: "Amount of dynamic memory to maintain connections",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "mssql.instance_memmgr_connection_memory_bytes",
+ Priority: prioMSSQLMemmgrConnectionMemoryBytes,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_memmgr_connection_memory_bytes", Name: "memory", Algo: module.Incremental},
+ },
+ }
+ mssqlMemmgrExternalBenefitOfMemoryChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_memmgr_external_benefit_of_memory",
+ Title: "Performance benefit from adding memory to a specific cache",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "mssql.instance_memmgr_external_benefit_of_memory",
+ Priority: prioMSSQLMemmgrExternalBenefitOfMemory,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_memmgr_external_benefit_of_memory", Name: "benefit", Algo: module.Incremental},
+ },
+ }
+ mssqlMemmgrPendingMemoryChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_memmgr_pending_memory_grants",
+ Title: "Process waiting for memory grant",
+ Units: "process",
+ Fam: "memory",
+ Ctx: "mssql.instance_memmgr_pending_memory_grants",
+ Priority: prioMSSQLMemmgrPendingMemoryGrants,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_memmgr_pending_memory_grants", Name: "pending"},
+ },
+ }
+ mssqlMemmgrTotalServerChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_memmgr_server_memory",
+ Title: "Memory committed",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "mssql.instance_memmgr_server_memory",
+ Priority: prioMSSQLMemTotalServer,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_memmgr_total_server_memory_bytes", Name: "memory"},
+ },
+ }
+
+ // SQL errors
+ // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-errors-object?view=sql-server-ver16
+ mssqlSQLErrorsTotalChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_sql_errors_total",
+ Title: "Errors",
+ Units: "errors/s",
+ Fam: "errors",
+ Ctx: "mssql.instance_sql_errors",
+ Priority: prioMSSQLSqlErrorsTotal,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_sql_errors_total_db_offline_errors", Name: "db_offline", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_sql_errors_total_info_errors", Name: "info", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_sql_errors_total_kill_connection_errors", Name: "kill_connection", Algo: module.Incremental},
+ {ID: "mssql_instance_%s_sql_errors_total_user_errors", Name: "user", Algo: module.Incremental},
+ },
+ }
+
+ // SQL Statistic
+ // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-sql-statistics-object?view=sql-server-ver16
+ mssqlStatsAutoParamChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_sqlstats_auto_parameterization_attempts",
+ Title: "Failed auto-parameterization attempts",
+ Units: "attempts/s",
+ Fam: "sql activity",
+ Ctx: "mssql.instance_sqlstats_auto_parameterization_attempts",
+ Priority: prioMSSQLStatsAutoParameterization,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_sqlstats_auto_parameterization_attempts", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ mssqlStatsBatchRequestsChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_sqlstats_batch_requests",
+ Title: "Total of batches requests",
+ Units: "requests/s",
+ Fam: "sql activity",
+ Ctx: "mssql.instance_sqlstats_batch_requests",
+ Priority: prioMSSQLStatsBatchRequests,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_sqlstats_batch_requests", Name: "batch", Algo: module.Incremental},
+ },
+ }
+ mssqlStatsSafeAutoChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_sqlstats_safe_auto_parameterization_attempts",
+ Title: "Safe auto-parameterization attempts",
+ Units: "attempts/s",
+ Fam: "sql activity",
+ Ctx: "mssql.instance_sqlstats_safe_auto_parameterization_attempts",
+ Priority: prioMSSQLStatsSafeAutoParameterization,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_sqlstats_safe_auto_parameterization_attempts", Name: "safe", Algo: module.Incremental},
+ },
+ }
+ mssqlStatsCompilationChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_sqlstats_sql_compilations",
+ Title: "SQL compilations",
+ Units: "compilations/s",
+ Fam: "sql activity",
+ Ctx: "mssql.instance_sqlstats_sql_compilations",
+ Priority: prioMSSQLStatsCompilations,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_sqlstats_sql_compilations", Name: "compilations", Algo: module.Incremental},
+ },
+ }
+ mssqlStatsRecompilationChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_instance_%s_sqlstats_sql_recompilations",
+ Title: "SQL re-compilations",
+ Units: "recompiles/s",
+ Fam: "sql activity",
+ Ctx: "mssql.instance_sqlstats_sql_recompilations",
+ Priority: prioMSSQLStatsRecompilations,
+ Dims: module.Dims{
+ {ID: "mssql_instance_%s_sqlstats_sql_recompilations", Name: "recompiles", Algo: module.Incremental},
+ },
+ }
+
+ // Database
+ // Source: https://learn.microsoft.com/en-us/sql/relational-databases/performance-monitor/sql-server-databases-object?view=sql-server-2017
+ mssqlDatabaseActiveTransactionsChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_db_%s_instance_%s_active_transactions",
+ Title: "Active transactions per database",
+ Units: "transactions",
+ Fam: "transactions",
+ Ctx: "mssql.database_active_transactions",
+ Priority: prioMSSQLDatabaseActiveTransactions,
+ Dims: module.Dims{
+ {ID: "mssql_db_%s_instance_%s_active_transactions", Name: "active"},
+ },
+ }
+ mssqlDatabaseBackupRestoreOperationsChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_db_%s_instance_%s_backup_restore_operations",
+ Title: "Backup IO per database",
+ Units: "operations/s",
+ Fam: "transactions",
+ Ctx: "mssql.database_backup_restore_operations",
+ Priority: prioMSSQLDatabaseBackupRestoreOperations,
+ Dims: module.Dims{
+ {ID: "mssql_db_%s_instance_%s_backup_restore_operations", Name: "backup", Algo: module.Incremental},
+ },
+ }
+ mssqlDatabaseSizeChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_db_%s_instance_%s_data_files_size",
+ Title: "Current database size",
+ Units: "bytes",
+ Fam: "size",
+ Ctx: "mssql.database_data_files_size",
+ Priority: prioMSSQLDatabaseDataFileSize,
+ Dims: module.Dims{
+ {ID: "mssql_db_%s_instance_%s_data_files_size_bytes", Name: "size"},
+ },
+ }
+ mssqlDatabaseLogFlushedChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_db_%s_instance_%s_log_flushed",
+ Title: "Log flushed",
+ Units: "bytes/s",
+ Fam: "transactions",
+ Ctx: "mssql.database_log_flushed",
+ Priority: prioMSSQLDatabaseLogFlushed,
+ Dims: module.Dims{
+ {ID: "mssql_db_%s_instance_%s_log_flushed_bytes", Name: "flushed", Algo: module.Incremental},
+ },
+ }
+ mssqlDatabaseLogFlushesChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_db_%s_instance_%s_log_flushes",
+ Title: "Log flushes",
+ Units: "flushes/s",
+ Fam: "transactions",
+ Ctx: "mssql.database_log_flushes",
+ Priority: prioMSSQLDatabaseLogFlushes,
+ Dims: module.Dims{
+ {ID: "mssql_db_%s_instance_%s_log_flushes", Name: "log", Algo: module.Incremental},
+ },
+ }
+ mssqlDatabaseTransactionsChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_db_%s_instance_%s_transactions",
+ Title: "Transactions",
+ Units: "transactions/s",
+ Fam: "transactions",
+ Ctx: "mssql.database_transactions",
+ Priority: prioMSSQLDatabaseTransactions,
+ Dims: module.Dims{
+ {ID: "mssql_db_%s_instance_%s_transactions", Name: "transactions", Algo: module.Incremental},
+ },
+ }
+ mssqlDatabaseWriteTransactionsChart = module.Chart{
+ OverModule: "mssql",
+ ID: "mssql_db_%s_instance_%s_write_transactions",
+ Title: "Write transactions",
+ Units: "transactions/s",
+ Fam: "transactions",
+ Ctx: "mssql.database_write_transactions",
+ Priority: prioMSSQLDatabaseWriteTransactions,
+ Dims: module.Dims{
+ {ID: "mssql_db_%s_instance_%s_write_transactions", Name: "write", Algo: module.Incremental},
+ },
+ }
+)
+
+// AD
+var (
+ adCharts = module.Charts{
+ adDatabaseOperationsChart.Copy(),
+ adDirectoryOperationsChart.Copy(),
+ adNameCacheLookupsChart.Copy(),
+ adNameCacheHitsChart.Copy(),
+ adDRAReplicationIntersiteCompressedTrafficChart.Copy(),
+ adDRAReplicationIntrasiteCompressedTrafficChart.Copy(),
+ adDRAReplicationSyncObjectRemainingChart.Copy(),
+ adDRAReplicationObjectsFilteredChart.Copy(),
+ adDRAReplicationPropertiesUpdatedChart.Copy(),
+ adDRAReplicationPropertiesFilteredChart.Copy(),
+ adDRAReplicationPendingSyncsChart.Copy(),
+ adDRAReplicationSyncRequestsChart.Copy(),
+ adDirectoryServiceThreadsChart.Copy(),
+ adLDAPLastBindTimeChart.Copy(),
+ adBindsTotalChart.Copy(),
+ adLDAPSearchesChart.Copy(),
+ adATQAverageRequestLatencyChart.Copy(),
+ adATQOutstandingRequestsChart.Copy(),
+ }
+ adDatabaseOperationsChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_database_operations",
+ Title: "AD database operations",
+ Units: "operations/s",
+ Fam: "database",
+ Ctx: "ad.database_operations",
+ Priority: prioADDatabaseOperations,
+ Dims: module.Dims{
+ {ID: "ad_database_operations_total_add", Name: "add", Algo: module.Incremental},
+ {ID: "ad_database_operations_total_delete", Name: "delete", Algo: module.Incremental},
+ {ID: "ad_database_operations_total_modify", Name: "modify", Algo: module.Incremental},
+ {ID: "ad_database_operations_total_recycle", Name: "recycle", Algo: module.Incremental},
+ },
+ }
+ adDirectoryOperationsChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_directory_operations_read",
+ Title: "AD directory operations",
+ Units: "operations/s",
+ Fam: "database",
+ Ctx: "ad.directory_operations",
+ Priority: prioADDirectoryOperations,
+ Dims: module.Dims{
+ {ID: "ad_directory_operations_total_read", Name: "read", Algo: module.Incremental},
+ {ID: "ad_directory_operations_total_write", Name: "write", Algo: module.Incremental},
+ {ID: "ad_directory_operations_total_search", Name: "search", Algo: module.Incremental},
+ },
+ }
+ adNameCacheLookupsChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_name_cache_lookups",
+ Title: "Name cache lookups",
+ Units: "lookups/s",
+ Fam: "database",
+ Ctx: "ad.name_cache_lookups",
+ Priority: prioADNameCacheLookups,
+ Dims: module.Dims{
+ {ID: "ad_name_cache_lookups_total", Name: "lookups", Algo: module.Incremental},
+ },
+ }
+ adNameCacheHitsChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_name_cache_hits",
+ Title: "Name cache hits",
+ Units: "hits/s",
+ Fam: "database",
+ Ctx: "ad.name_cache_hits",
+ Priority: prioADCacheHits,
+ Dims: module.Dims{
+ {ID: "ad_name_cache_hits_total", Name: "hits", Algo: module.Incremental},
+ },
+ }
+ adDRAReplicationIntersiteCompressedTrafficChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_dra_replication_intersite_compressed_traffic",
+ Title: "DRA replication compressed traffic withing site",
+ Units: "bytes/s",
+ Fam: "replication",
+ Ctx: "ad.dra_replication_intersite_compressed_traffic",
+ Priority: prioADDRAReplicationIntersiteCompressedTraffic,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "ad_replication_data_intersite_bytes_total_inbound", Name: "inbound", Algo: module.Incremental},
+ {ID: "ad_replication_data_intersite_bytes_total_outbound", Name: "outbound", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ adDRAReplicationIntrasiteCompressedTrafficChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_dra_replication_intrasite_compressed_traffic",
+ Title: "DRA replication compressed traffic between sites",
+ Units: "bytes/s",
+ Fam: "replication",
+ Ctx: "ad.dra_replication_intrasite_compressed_traffic",
+ Priority: prioADDRAReplicationIntrasiteCompressedTraffic,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "ad_replication_data_intrasite_bytes_total_inbound", Name: "inbound", Algo: module.Incremental},
+ {ID: "ad_replication_data_intrasite_bytes_total_outbound", Name: "outbound", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ adDRAReplicationSyncObjectRemainingChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_dra_replication_sync_objects_remaining",
+ Title: "DRA replication full sync objects remaining",
+ Units: "objects",
+ Fam: "replication",
+ Ctx: "ad.dra_replication_sync_objects_remaining",
+ Priority: prioADDRAReplicationSyncObjectsRemaining,
+ Dims: module.Dims{
+ {ID: "ad_replication_inbound_sync_objects_remaining", Name: "inbound"},
+ },
+ }
+ adDRAReplicationObjectsFilteredChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_dra_replication_objects_filtered",
+ Title: "DRA replication objects filtered",
+ Units: "objects/s",
+ Fam: "replication",
+ Ctx: "ad.dra_replication_objects_filtered",
+ Priority: prioADDRAReplicationObjectsFiltered,
+ Dims: module.Dims{
+ {ID: "ad_replication_inbound_objects_filtered_total", Name: "inbound", Algo: module.Incremental},
+ },
+ }
+ adDRAReplicationPropertiesUpdatedChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_dra_replication_properties_updated",
+ Title: "DRA replication properties updated",
+ Units: "properties/s",
+ Fam: "replication",
+ Ctx: "ad.dra_replication_properties_updated",
+ Priority: prioADDRAReplicationPropertiesUpdated,
+ Dims: module.Dims{
+ {ID: "ad_replication_inbound_properties_updated_total", Name: "inbound", Algo: module.Incremental},
+ },
+ }
+ adDRAReplicationPropertiesFilteredChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_dra_replication_properties_filtered",
+ Title: "DRA replication properties filtered",
+ Units: "properties/s",
+ Fam: "replication",
+ Ctx: "ad.dra_replication_properties_filtered",
+ Priority: prioADDRAReplicationPropertiesFiltered,
+ Dims: module.Dims{
+ {ID: "ad_replication_inbound_properties_filtered_total", Name: "inbound", Algo: module.Incremental},
+ },
+ }
+ adDRAReplicationPendingSyncsChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_dra_replication_pending_syncs",
+ Title: "DRA replication pending syncs",
+ Units: "syncs",
+ Fam: "replication",
+ Ctx: "ad.dra_replication_pending_syncs",
+ Priority: prioADReplicationPendingSyncs,
+ Dims: module.Dims{
+ {ID: "ad_replication_pending_synchronizations", Name: "pending"},
+ },
+ }
+ adDRAReplicationSyncRequestsChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_dra_replication_sync_requests",
+ Title: "DRA replication sync requests",
+ Units: "requests/s",
+ Fam: "replication",
+ Ctx: "ad.dra_replication_sync_requests",
+ Priority: prioADDRASyncRequests,
+ Dims: module.Dims{
+ {ID: "ad_replication_sync_requests_total", Name: "request", Algo: module.Incremental},
+ },
+ }
+ adDirectoryServiceThreadsChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_ds_threads",
+ Title: "Directory Service threads",
+ Units: "threads",
+ Fam: "replication",
+ Ctx: "ad.ds_threads",
+ Priority: prioADDirectoryServiceThreadsInUse,
+ Dims: module.Dims{
+ {ID: "ad_directory_service_threads", Name: "in_use"},
+ },
+ }
+ adLDAPLastBindTimeChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_ldap_last_bind_time",
+ Title: "LDAP last successful bind time",
+ Units: "seconds",
+ Fam: "bind",
+ Ctx: "ad.ldap_last_bind_time",
+ Priority: prioADLDAPBindTime,
+ Dims: module.Dims{
+ {ID: "ad_ldap_last_bind_time_seconds", Name: "last_bind"},
+ },
+ }
+ adBindsTotalChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_binds",
+ Title: "Successful binds",
+ Units: "bind/s",
+ Fam: "bind",
+ Ctx: "ad.binds",
+ Priority: prioADBindsTotal,
+ Dims: module.Dims{
+ {ID: "ad_binds_total", Name: "binds", Algo: module.Incremental},
+ },
+ }
+ adLDAPSearchesChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_ldap_searches",
+ Title: "LDAP client search operations",
+ Units: "searches/s",
+ Fam: "ldap",
+ Ctx: "ad.ldap_searches",
+ Priority: prioADLDAPSearchesTotal,
+ Dims: module.Dims{
+ {ID: "ad_ldap_searches_total", Name: "searches", Algo: module.Incremental},
+ },
+ }
+ // https://techcommunity.microsoft.com/t5/ask-the-directory-services-team/understanding-atq-performance-counters-yet-another-twist-in-the/ba-p/400293
+ adATQAverageRequestLatencyChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_atq_average_request_latency",
+ Title: "Average request processing time",
+ Units: "seconds",
+ Fam: "queue",
+ Ctx: "ad.atq_average_request_latency",
+ Priority: prioADATQAverageRequestLatency,
+ Dims: module.Dims{
+ {ID: "ad_atq_average_request_latency", Name: "time", Div: precision},
+ },
+ }
+ adATQOutstandingRequestsChart = module.Chart{
+ OverModule: "ad",
+ ID: "ad_atq_outstanding_requests",
+ Title: "Outstanding requests",
+ Units: "requests",
+ Fam: "queue",
+ Ctx: "ad.atq_outstanding_requests",
+ Priority: prioADATQOutstandingRequests,
+ Dims: module.Dims{
+ {ID: "ad_atq_outstanding_requests", Name: "outstanding"},
+ },
+ }
+)
+
+// AD CS
+var (
+ adcsCertTemplateChartsTmpl = module.Charts{
+ adcsCertTemplateRequestsChartTmpl.Copy(),
+ adcsCertTemplateFailedRequestsChartTmpl.Copy(),
+ adcsCertTemplateIssuedRequestsChartTmpl.Copy(),
+ adcsCertTemplatePendingRequestsChartTmpl.Copy(),
+ adcsCertTemplateRequestProcessingTimeChartTmpl.Copy(),
+
+ adcsCertTemplateRetrievalsChartTmpl.Copy(),
+ adcsCertificateRetrievalsTimeChartTmpl.Copy(),
+ adcsCertTemplateRequestCryptoSigningTimeChartTmpl.Copy(),
+ adcsCertTemplateRequestPolicyModuleProcessingTimeChartTmpl.Copy(),
+ adcsCertTemplateChallengeResponseChartTmpl.Copy(),
+ adcsCertTemplateChallengeResponseProcessingTimeChartTmpl.Copy(),
+ adcsCertTemplateSignedCertificateTimestampListsChartTmpl.Copy(),
+ adcsCertTemplateSignedCertificateTimestampListProcessingTimeChartTmpl.Copy(),
+ }
+ adcsCertTemplateRequestsChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template%s_requests",
+ Title: "Certificate requests processed",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adcs.cert_template_requests",
+ Priority: prioADCSCertTemplateRequests,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_requests_total", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ adcsCertTemplateFailedRequestsChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_failed_requests",
+ Title: "Certificate failed requests processed",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adcs.cert_template_failed_requests",
+ Priority: prioADCSCertTemplateFailedRequests,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_failed_requests_total", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ adcsCertTemplateIssuedRequestsChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_issued_requests",
+ Title: "Certificate issued requests processed",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adcs.cert_template_issued_requests",
+ Priority: prioADCSCertTemplateIssuesRequests,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_issued_requests_total", Name: "issued", Algo: module.Incremental},
+ },
+ }
+ adcsCertTemplatePendingRequestsChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_pending_requests",
+ Title: "Certificate pending requests processed",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adcs.cert_template_pending_requests",
+ Priority: prioADCSCertTemplatePendingRequests,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_pending_requests_total", Name: "pending", Algo: module.Incremental},
+ },
+ }
+ adcsCertTemplateRequestProcessingTimeChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_request_processing_time",
+ Title: "Certificate last request processing time",
+ Units: "seconds",
+ Fam: "requests",
+ Ctx: "adcs.cert_template_request_processing_time",
+ Priority: prioADCSCertTemplateRequestProcessingTime,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_request_processing_time_seconds", Name: "processing_time", Div: precision},
+ },
+ }
+ adcsCertTemplateChallengeResponseChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_challenge_responses",
+ Title: "Certificate challenge responses",
+ Units: "responses/s",
+ Fam: "responses",
+ Ctx: "adcs.cert_template_challenge_responses",
+ Priority: prioADCSCertTemplateChallengeResponses,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_challenge_responses_total", Name: "challenge", Algo: module.Incremental},
+ },
+ }
+ adcsCertTemplateRetrievalsChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_retrievals",
+ Title: "Total of certificate retrievals",
+ Units: "retrievals/s",
+ Fam: "retrievals",
+ Ctx: "adcs.cert_template_retrievals",
+ Priority: prioADCSCertTemplateRetrievals,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_retrievals_total", Name: "retrievals", Algo: module.Incremental},
+ },
+ }
+ adcsCertificateRetrievalsTimeChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_retrievals_processing_time",
+ Title: "Certificate last retrieval processing time",
+ Units: "seconds",
+ Fam: "retrievals",
+ Ctx: "adcs.cert_template_retrieval_processing_time",
+ Priority: prioADCSCertTemplateRetrievalProcessingTime,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_retrievals_processing_time_seconds", Name: "processing_time", Div: precision},
+ },
+ }
+ adcsCertTemplateRequestCryptoSigningTimeChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_request_cryptographic_signing_time",
+ Title: "Certificate last signing operation request time",
+ Units: "seconds",
+ Fam: "timings",
+ Ctx: "adcs.cert_template_request_cryptographic_signing_time",
+ Priority: prioADCSCertTemplateRequestCryptoSigningTime,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_request_cryptographic_signing_time_seconds", Name: "singing_time", Div: precision},
+ },
+ }
+ adcsCertTemplateRequestPolicyModuleProcessingTimeChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_request_policy_module_processing_time",
+ Title: "Certificate last policy module processing request time",
+ Units: "seconds",
+ Fam: "timings",
+ Ctx: "adcs.cert_template_request_policy_module_processing",
+ Priority: prioADCSCertTemplateRequestPolicyModuleProcessingTime,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_request_policy_module_processing_time_seconds", Name: "processing_time", Div: precision},
+ },
+ }
+ adcsCertTemplateChallengeResponseProcessingTimeChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_challenge_response_processing_time",
+ Title: "Certificate last challenge response time",
+ Units: "seconds",
+ Fam: "timings",
+ Ctx: "adcs.cert_template_challenge_response_processing_time",
+ Priority: prioADCSCertTemplateChallengeResponseProcessingTime,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_challenge_response_processing_time_seconds", Name: "processing_time", Div: precision},
+ },
+ }
+ adcsCertTemplateSignedCertificateTimestampListsChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_signed_certificate_timestamp_lists",
+ Title: "Certificate Signed Certificate Timestamp Lists processed",
+ Units: "lists/s",
+ Fam: "timings",
+ Ctx: "adcs.cert_template_signed_certificate_timestamp_lists",
+ Priority: prioADCSCertTemplateSignedCertificateTimestampLists,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_signed_certificate_timestamp_lists_total", Name: "processed", Algo: module.Incremental},
+ },
+ }
+ adcsCertTemplateSignedCertificateTimestampListProcessingTimeChartTmpl = module.Chart{
+ OverModule: "adcs",
+ ID: "adcs_cert_template_%s_signed_certificate_timestamp_list_processing_time",
+ Title: "Certificate last Signed Certificate Timestamp List process time",
+ Units: "seconds",
+ Fam: "timings",
+ Ctx: "adcs.cert_template_signed_certificate_timestamp_list_processing_time",
+ Priority: prioADCSCertTemplateSignedCertificateTimestampListProcessingTime,
+ Dims: module.Dims{
+ {ID: "adcs_cert_template_%s_signed_certificate_timestamp_list_processing_time_seconds", Name: "processing_time", Div: precision},
+ },
+ }
+)
+
+// AD FS
+var (
+ adfsCharts = module.Charts{
+ adfsADLoginConnectionFailuresChart.Copy(),
+ adfsCertificateAuthenticationsChart.Copy(),
+ adfsDBArtifactFailuresChart.Copy(),
+ adfsDBArtifactQueryTimeSecondsChart.Copy(),
+ adfsDBConfigFailuresChart.Copy(),
+ adfsDBConfigQueryTimeSecondsChart.Copy(),
+ adfsDeviceAuthenticationsChart.Copy(),
+ adfsExternalAuthenticationsChart.Copy(),
+ adfsFederatedAuthenticationsChart.Copy(),
+ adfsFederationMetadataRequestsChart.Copy(),
+
+ adfsOAuthAuthorizationRequestsChart.Copy(),
+ adfsOAuthClientAuthenticationsChart.Copy(),
+ adfsOAuthClientCredentialRequestsChart.Copy(),
+ adfsOAuthClientPrivKeyJwtAuthenticationsChart.Copy(),
+ adfsOAuthClientSecretBasicAuthenticationsChart.Copy(),
+ adfsOAuthClientSecretPostAuthenticationsChart.Copy(),
+ adfsOAuthClientWindowsAuthenticationsChart.Copy(),
+ adfsOAuthLogonCertificateRequestsChart.Copy(),
+ adfsOAuthPasswordGrantRequestsChart.Copy(),
+ adfsOAuthTokenRequestsChart.Copy(),
+
+ adfsPassiveRequestsChart.Copy(),
+ adfsPassportAuthenticationsChart.Copy(),
+ adfsPasswordChangeChart.Copy(),
+ adfsSAMLPTokenRequestsChart.Copy(),
+ adfsSSOAuthenticationsChart.Copy(),
+ adfsTokenRequestsChart.Copy(),
+ adfsUserPasswordAuthenticationsChart.Copy(),
+ adfsWindowsIntegratedAuthenticationsChart.Copy(),
+ adfsWSFedTokenRequestsSuccessChart.Copy(),
+ adfsWSTrustTokenRequestsSuccessChart.Copy(),
+ }
+
+ adfsADLoginConnectionFailuresChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_ad_login_connection_failures",
+ Title: "Connection failures",
+ Units: "failures/s",
+ Fam: "ad",
+ Ctx: "adfs.ad_login_connection_failures",
+ Priority: prioADFSADLoginConnectionFailures,
+ Dims: module.Dims{
+ {ID: "adfs_ad_login_connection_failures_total", Name: "connection", Algo: module.Incremental},
+ },
+ }
+ adfsCertificateAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_certificate_authentications",
+ Title: "User Certificate authentications",
+ Units: "authentications/s",
+ Fam: "auth",
+ Ctx: "adfs.certificate_authentications",
+ Priority: prioADFSCertificateAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_certificate_authentications_total", Name: "authentications", Algo: module.Incremental},
+ },
+ }
+
+ adfsDBArtifactFailuresChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_db_artifact_failures",
+ Title: "Connection failures to the artifact database",
+ Units: "failures/s",
+ Fam: "db artifact",
+ Ctx: "adfs.db_artifact_failures",
+ Priority: prioADFSDBArtifactFailures,
+ Dims: module.Dims{
+ {ID: "adfs_db_artifact_failure_total", Name: "connection", Algo: module.Incremental},
+ },
+ }
+ adfsDBArtifactQueryTimeSecondsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_db_artifact_query_time_seconds",
+ Title: "Time taken for an artifact database query",
+ Units: "seconds/s",
+ Fam: "db artifact",
+ Ctx: "adfs.db_artifact_query_time_seconds",
+ Priority: prioADFSDBArtifactQueryTimeSeconds,
+ Dims: module.Dims{
+ {ID: "adfs_db_artifact_query_time_seconds_total", Name: "query_time", Algo: module.Incremental, Div: precision},
+ },
+ }
+ adfsDBConfigFailuresChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_db_config_failures",
+ Title: "Connection failures to the configuration database",
+ Units: "failures/s",
+ Fam: "db config",
+ Ctx: "adfs.db_config_failures",
+ Priority: prioADFSDBConfigFailures,
+ Dims: module.Dims{
+ {ID: "adfs_db_config_failure_total", Name: "connection", Algo: module.Incremental},
+ },
+ }
+ adfsDBConfigQueryTimeSecondsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_db_config_query_time_seconds",
+ Title: "Time taken for a configuration database query",
+ Units: "seconds/s",
+ Fam: "db config",
+ Ctx: "adfs.db_config_query_time_seconds",
+ Priority: prioADFSDBConfigQueryTimeSeconds,
+ Dims: module.Dims{
+ {ID: "adfs_db_config_query_time_seconds_total", Name: "query_time", Algo: module.Incremental, Div: precision},
+ },
+ }
+ adfsDeviceAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_device_authentications",
+ Title: "Device authentications",
+ Units: "authentications/s",
+ Fam: "auth",
+ Ctx: "adfs.device_authentications",
+ Priority: prioADFSDeviceAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_device_authentications_total", Name: "authentications", Algo: module.Incremental},
+ },
+ }
+ adfsExternalAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_external_authentications",
+ Title: "Authentications from external MFA providers",
+ Units: "authentications/s",
+ Fam: "auth",
+ Ctx: "adfs.external_authentications",
+ Priority: prioADFSExternalAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_external_authentications_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_external_authentications_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsFederatedAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_federated_authentications",
+ Title: "Authentications from Federated Sources",
+ Units: "authentications/s",
+ Fam: "auth",
+ Ctx: "adfs.federated_authentications",
+ Priority: prioADFSFederatedAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_federated_authentications_total", Name: "authentications", Algo: module.Incremental},
+ },
+ }
+ adfsFederationMetadataRequestsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_federation_metadata_requests",
+ Title: "Federation Metadata requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adfs.federation_metadata_requests",
+ Priority: prioADFSFederationMetadataRequests,
+ Dims: module.Dims{
+ {ID: "adfs_federation_metadata_requests_total", Name: "requests", Algo: module.Incremental},
+ },
+ }
+
+ adfsOAuthAuthorizationRequestsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_authorization_requests",
+ Title: "Incoming requests to the OAuth Authorization endpoint",
+ Units: "requests/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_authorization_requests",
+ Priority: prioADFSOauthAuthorizationRequests,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_authorization_requests_total", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ adfsOAuthClientAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_client_authentications",
+ Title: "OAuth client authentications",
+ Units: "authentications/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_client_authentications",
+ Priority: prioADFSOauthClientAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_client_authentication_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_oauth_client_authentication_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsOAuthClientCredentialRequestsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_client_credentials_requests",
+ Title: "OAuth client credentials requests",
+ Units: "requests/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_client_credentials_requests",
+ Priority: prioADFSOauthClientCredentials,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_client_credentials_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_oauth_client_credentials_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsOAuthClientPrivKeyJwtAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_client_privkey_jwt_authentications",
+ Title: "OAuth client private key JWT authentications",
+ Units: "authentications/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_client_privkey_jwt_authentications",
+ Priority: prioADFSOauthClientPrivkeyJwtAuthentication,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_client_privkey_jwt_authentications_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_oauth_client_privkey_jtw_authentication_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsOAuthClientSecretBasicAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_client_secret_basic_authentications",
+ Title: "OAuth client secret basic authentications",
+ Units: "authentications/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_client_secret_basic_authentications",
+ Priority: prioADFSOauthClientSecretBasicAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_client_secret_basic_authentications_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_oauth_client_secret_basic_authentications_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsOAuthClientSecretPostAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_client_secret_post_authentications",
+ Title: "OAuth client secret post authentications",
+ Units: "authentications/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_client_secret_post_authentications",
+ Priority: prioADFSOauthClientSecretPostAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_client_secret_post_authentications_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_oauth_client_secret_post_authentications_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsOAuthClientWindowsAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_client_windows_authentications",
+ Title: "OAuth client windows integrated authentications",
+ Units: "authentications/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_client_windows_authentications",
+ Priority: prioADFSOauthClientWindowsAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_client_windows_authentications_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_oauth_client_windows_authentications_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsOAuthLogonCertificateRequestsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_logon_certificate_requests",
+ Title: "OAuth logon certificate requests",
+ Units: "requests/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_logon_certificate_requests",
+ Priority: prioADFSOauthLogonCertificateRequests,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_logon_certificate_token_requests_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_oauth_logon_certificate_requests_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsOAuthPasswordGrantRequestsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_password_grant_requests",
+ Title: "OAuth password grant requests",
+ Units: "requests/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_password_grant_requests",
+ Priority: prioADFSOauthPasswordGrantRequests,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_password_grant_requests_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_oauth_password_grant_requests_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsOAuthTokenRequestsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_oauth_token_requests_success",
+ Title: "Successful RP token requests over OAuth protocol",
+ Units: "requests/s",
+ Fam: "oauth",
+ Ctx: "adfs.oauth_token_requests_success",
+ Priority: prioADFSOauthTokenRequestsSuccess,
+ Dims: module.Dims{
+ {ID: "adfs_oauth_token_requests_success_total", Name: "success", Algo: module.Incremental},
+ },
+ }
+
+ adfsPassiveRequestsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_passive_requests",
+ Title: "Passive requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adfs.passive_requests",
+ Priority: prioADFSPassiveRequests,
+ Dims: module.Dims{
+ {ID: "adfs_passive_requests_total", Name: "passive", Algo: module.Incremental},
+ },
+ }
+ adfsPassportAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_passport_authentications",
+ Title: "Microsoft Passport SSO authentications",
+ Units: "authentications/s",
+ Fam: "auth",
+ Ctx: "adfs.passport_authentications",
+ Priority: prioADFSPassportAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_passport_authentications_total", Name: "passport", Algo: module.Incremental},
+ },
+ }
+ adfsPasswordChangeChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_password_change_requests",
+ Title: "Password change requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adfs.password_change_requests",
+ Priority: prioADFSPasswordChangeRequests,
+ Dims: module.Dims{
+ {ID: "adfs_password_change_succeeded_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_password_change_failed_total", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ adfsSAMLPTokenRequestsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_samlp_token_requests_success",
+ Title: "Successful RP token requests over SAML-P protocol",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adfs.samlp_token_requests_success",
+ Priority: prioADFSSAMLPTokenRequests,
+ Dims: module.Dims{
+ {ID: "adfs_samlp_token_requests_success_total", Name: "success", Algo: module.Incremental},
+ },
+ }
+ adfsWSTrustTokenRequestsSuccessChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_wstrust_token_requests_success",
+ Title: "Successful RP token requests over WS-Trust protocol",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adfs.wstrust_token_requests_success",
+ Priority: prioADFSWSTrustTokenRequestsSuccess,
+ Dims: module.Dims{
+ {ID: "adfs_wstrust_token_requests_success_total", Name: "success", Algo: module.Incremental},
+ },
+ }
+ adfsSSOAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_sso_authentications",
+ Title: "SSO authentications",
+ Units: "authentications/s",
+ Fam: "auth",
+ Ctx: "adfs.sso_authentications",
+ Priority: prioADFSSSOAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_sso_authentications_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_sso_authentications_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsTokenRequestsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_token_requests",
+ Title: "Token access requests",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adfs.token_requests",
+ Priority: prioADFSTokenRequests,
+ Dims: module.Dims{
+ {ID: "adfs_token_requests_total", Name: "requests", Algo: module.Incremental},
+ },
+ }
+ adfsUserPasswordAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_userpassword_authentications",
+ Title: "AD U/P authentications",
+ Units: "authentications/s",
+ Fam: "auth",
+ Ctx: "adfs.userpassword_authentications",
+ Priority: prioADFSUserPasswordAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_sso_authentications_success_total", Name: "success", Algo: module.Incremental},
+ {ID: "adfs_sso_authentications_failure_total", Name: "failure", Algo: module.Incremental},
+ },
+ }
+ adfsWindowsIntegratedAuthenticationsChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_windows_integrated_authentications",
+ Title: "Windows integrated authentications using Kerberos or NTLM",
+ Units: "authentications/s",
+ Fam: "auth",
+ Ctx: "adfs.windows_integrated_authentications",
+ Priority: prioADFSWindowsIntegratedAuthentications,
+ Dims: module.Dims{
+ {ID: "adfs_windows_integrated_authentications_total", Name: "authentications", Algo: module.Incremental},
+ },
+ }
+ adfsWSFedTokenRequestsSuccessChart = module.Chart{
+ OverModule: "adfs",
+ ID: "adfs_wsfed_token_requests_success",
+ Title: "Successful RP token requests over WS-Fed protocol",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "adfs.wsfed_token_requests_success",
+ Priority: prioADFSWSFedTokenRequestsSuccess,
+ Dims: module.Dims{
+ {ID: "adfs_wsfed_token_requests_success_total", Name: "success", Algo: module.Incremental},
+ },
+ }
+)
+
+// Exchange
+var (
+ exchangeCharts = module.Charts{
+ exchangeActiveSyncPingCMDsPendingChart.Copy(),
+ exchangeActiveSyncRequestsChart.Copy(),
+ exchangeActiveSyncCMDsChart.Copy(),
+ exchangeAutoDiscoverRequestsChart.Copy(),
+ exchangeAvailableServiceRequestsChart.Copy(),
+ exchangeOWACurrentUniqueUsersChart.Copy(),
+ exchangeOWARequestsChart.Copy(),
+ exchangeRPCActiveUsersCountChart.Copy(),
+ exchangeRPCAvgLatencyChart.Copy(),
+ exchangeRPCConnectionChart.Copy(),
+ exchangeRPCOperationsChart.Copy(),
+ exchangeRPCRequestsChart.Copy(),
+ exchangeRPCUserChart.Copy(),
+ exchangeTransportQueuesActiveMailBoxDelivery.Copy(),
+ exchangeTransportQueuesExternalActiveRemoteDelivery.Copy(),
+ exchangeTransportQueuesExternalLargestDelivery.Copy(),
+ exchangeTransportQueuesInternalActiveRemoteDelivery.Copy(),
+ exchangeTransportQueuesInternalLargestDelivery.Copy(),
+ exchangeTransportQueuesRetryMailboxDelivery.Copy(),
+ exchangeTransportQueuesUnreachable.Copy(),
+ exchangeTransportQueuesPoison.Copy(),
+ }
+ exchangeWorkloadChartsTmpl = module.Charts{
+ exchangeWorkloadActiveTasks.Copy(),
+ exchangeWorkloadCompletedTasks.Copy(),
+ exchangeWorkloadQueuedTasks.Copy(),
+ exchangeWorkloadYieldedTasks.Copy(),
+
+ exchangeWorkloadActivityStatus.Copy(),
+ }
+ exchangeLDAPChartsTmpl = module.Charts{
+ exchangeLDAPLongRunningOPS.Copy(),
+ exchangeLDAPReadTime.Copy(),
+ exchangeLDAPSearchTime.Copy(),
+ exchangeLDAPTimeoutErrors.Copy(),
+ exchangeLDAPWriteTime.Copy(),
+ }
+ exchangeHTTPProxyChartsTmpl = module.Charts{
+ exchangeProxyAvgAuthLatency.Copy(),
+ exchangeProxyAvgCasProcessingLatencySec.Copy(),
+ exchangeProxyMailboxProxyFailureRace.Copy(),
+ exchangeProxyMailboxServerLocatorAvgLatencySec.Copy(),
+ exchangeProxyOutstandingProxyRequests.Copy(),
+ exchangeProxyRequestsTotal.Copy(),
+ }
+
+ exchangeActiveSyncPingCMDsPendingChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_activesync_ping_cmds_pending",
+ Title: "Ping commands pending in queue",
+ Units: "commands",
+ Fam: "sync",
+ Ctx: "exchange.activesync_ping_cmds_pending",
+ Priority: prioExchangeActiveSyncPingCMDsPending,
+ Dims: module.Dims{
+ {ID: "exchange_activesync_ping_cmds_pending", Name: "pending"},
+ },
+ }
+ exchangeActiveSyncRequestsChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_activesync_requests",
+ Title: "HTTP requests received from ASP.NET",
+ Units: "requests/s",
+ Fam: "sync",
+ Ctx: "exchange.activesync_requests",
+ Priority: prioExchangeActiveSyncRequests,
+ Dims: module.Dims{
+ {ID: "exchange_activesync_requests_total", Name: "received", Algo: module.Incremental},
+ },
+ }
+ exchangeActiveSyncCMDsChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_activesync_sync_cmds",
+ Title: "Sync commands processed",
+ Units: "commands/s",
+ Fam: "sync",
+ Ctx: "exchange.activesync_sync_cmds",
+ Priority: prioExchangeActiveSyncSyncCMDs,
+ Dims: module.Dims{
+ {ID: "exchange_activesync_sync_cmds_total", Name: "processed", Algo: module.Incremental},
+ },
+ }
+ exchangeAutoDiscoverRequestsChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_autodiscover_requests",
+ Title: "Autodiscover service requests processed",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "exchange.autodiscover_requests",
+ Priority: prioExchangeAutoDiscoverRequests,
+ Dims: module.Dims{
+ {ID: "exchange_autodiscover_requests_total", Name: "processed", Algo: module.Incremental},
+ },
+ }
+ exchangeAvailableServiceRequestsChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_avail_service_requests",
+ Title: "Requests serviced",
+ Units: "requests/s",
+ Fam: "requests",
+ Ctx: "exchange.avail_service_requests",
+ Priority: prioExchangeAvailServiceRequests,
+ Dims: module.Dims{
+ {ID: "exchange_avail_service_requests_per_sec", Name: "serviced", Algo: module.Incremental},
+ },
+ }
+ exchangeOWACurrentUniqueUsersChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_owa_current_unique_users",
+ Title: "Unique users currently logged on to Outlook Web App",
+ Units: "users",
+ Fam: "owa",
+ Ctx: "exchange.owa_current_unique_users",
+ Priority: prioExchangeOWACurrentUniqueUsers,
+ Dims: module.Dims{
+ {ID: "exchange_owa_current_unique_users", Name: "logged-in"},
+ },
+ }
+ exchangeOWARequestsChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_owa_requests_total",
+ Title: "Requests handled by Outlook Web App",
+ Units: "requests/s",
+ Fam: "owa",
+ Ctx: "exchange.owa_requests_total",
+ Priority: prioExchangeOWARequestsTotal,
+ Dims: module.Dims{
+ {ID: "exchange_owa_requests_total", Name: "handled", Algo: module.Incremental},
+ },
+ }
+ exchangeRPCActiveUsersCountChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_rpc_active_user",
+ Title: "Active unique users in the last 2 minutes",
+ Units: "users",
+ Fam: "rpc",
+ Ctx: "exchange.rpc_active_user_count",
+ Priority: prioExchangeRPCActiveUserCount,
+ Dims: module.Dims{
+ {ID: "exchange_rpc_active_user_count", Name: "active"},
+ },
+ }
+ exchangeRPCAvgLatencyChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_rpc_avg_latency",
+ Title: "Average latency",
+ Units: "seconds",
+ Fam: "rpc",
+ Ctx: "exchange.rpc_avg_latency",
+ Priority: prioExchangeRPCAvgLatency,
+ Dims: module.Dims{
+ {ID: "exchange_rpc_avg_latency_sec", Name: "latency", Div: precision},
+ },
+ }
+ exchangeRPCConnectionChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_rpc_connection",
+ Title: "Client connections",
+ Units: "connections",
+ Fam: "rpc",
+ Ctx: "exchange.rpc_connection_count",
+ Priority: prioExchangeRPCConnectionCount,
+ Dims: module.Dims{
+ {ID: "exchange_rpc_connection_count", Name: "connections"},
+ },
+ }
+ exchangeRPCOperationsChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_rpc_operations",
+ Title: "RPC operations",
+ Units: "operations/s",
+ Fam: "rpc",
+ Ctx: "exchange.rpc_operations",
+ Priority: prioExchangeRPCOperationsTotal,
+ Dims: module.Dims{
+ {ID: "exchange_rpc_operations_total", Name: "operations", Algo: module.Incremental},
+ },
+ }
+ exchangeRPCRequestsChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_rpc_requests_total",
+ Title: "Clients requests currently being processed",
+ Units: "requests",
+ Fam: "rpc",
+ Ctx: "exchange.rpc_requests",
+ Priority: prioExchangeRPCRequests,
+ Dims: module.Dims{
+ {ID: "exchange_rpc_requests", Name: "processed"},
+ },
+ }
+ exchangeRPCUserChart = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_rpc_user",
+ Title: "RPC users",
+ Units: "users",
+ Fam: "rpc",
+ Ctx: "exchange.rpc_user_count",
+ Priority: prioExchangeRpcUserCount,
+ Dims: module.Dims{
+ {ID: "exchange_rpc_user_count", Name: "users"},
+ },
+ }
+
+ // Source: https://learn.microsoft.com/en-us/exchange/mail-flow/queues/queues?view=exchserver-2019
+ exchangeTransportQueuesActiveMailBoxDelivery = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_transport_queues_active_mailbox_delivery",
+ Title: "Active Mailbox Delivery Queue length",
+ Units: "messages",
+ Fam: "queue",
+ Ctx: "exchange.transport_queues_active_mail_box_delivery",
+ Priority: prioExchangeTransportQueuesActiveMailboxDelivery,
+ Dims: module.Dims{
+ {ID: "exchange_transport_queues_active_mailbox_delivery_low_priority", Name: "low"},
+ {ID: "exchange_transport_queues_active_mailbox_delivery_high_priority", Name: "high"},
+ {ID: "exchange_transport_queues_active_mailbox_delivery_none_priority", Name: "none"},
+ {ID: "exchange_transport_queues_active_mailbox_delivery_normal_priority", Name: "normal"},
+ },
+ }
+ exchangeTransportQueuesExternalActiveRemoteDelivery = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_transport_queues_external_active_remote_delivery",
+ Title: "External Active Remote Delivery Queue length",
+ Units: "messages",
+ Fam: "queue",
+ Ctx: "exchange.transport_queues_external_active_remote_delivery",
+ Priority: prioExchangeTransportQueuesExternalActiveRemoteDelivery,
+ Dims: module.Dims{
+ {ID: "exchange_transport_queues_external_active_remote_delivery_low_priority", Name: "low"},
+ {ID: "exchange_transport_queues_external_active_remote_delivery_high_priority", Name: "high"},
+ {ID: "exchange_transport_queues_external_active_remote_delivery_none_priority", Name: "none"},
+ {ID: "exchange_transport_queues_external_active_remote_delivery_normal_priority", Name: "normal"},
+ },
+ }
+ exchangeTransportQueuesExternalLargestDelivery = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_transport_queues_external_largest_delivery",
+ Title: "External Largest Delivery Queue length",
+ Units: "messages",
+ Fam: "queue",
+ Ctx: "exchange.transport_queues_external_largest_delivery",
+ Priority: prioExchangeTransportQueuesExternalLargestDelivery,
+ Dims: module.Dims{
+ {ID: "exchange_transport_queues_external_largest_delivery_low_priority", Name: "low"},
+ {ID: "exchange_transport_queues_external_largest_delivery_high_priority", Name: "high"},
+ {ID: "exchange_transport_queues_external_largest_delivery_none_priority", Name: "none"},
+ {ID: "exchange_transport_queues_external_largest_delivery_normal_priority", Name: "normal"},
+ },
+ }
+ exchangeTransportQueuesInternalActiveRemoteDelivery = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_transport_queues_internal_active_remote_delivery",
+ Title: "Internal Active Remote Delivery Queue length",
+ Units: "messages",
+ Fam: "queue",
+ Ctx: "exchange.transport_queues_internal_active_remote_delivery",
+ Priority: prioExchangeTransportQueuesInternalActiveRemoteDeliery,
+ Dims: module.Dims{
+ {ID: "exchange_transport_queues_internal_active_remote_delivery_low_priority", Name: "low"},
+ {ID: "exchange_transport_queues_internal_active_remote_delivery_high_priority", Name: "high"},
+ {ID: "exchange_transport_queues_internal_active_remote_delivery_none_priority", Name: "none"},
+ {ID: "exchange_transport_queues_internal_active_remote_delivery_normal_priority", Name: "normal"},
+ },
+ }
+ exchangeTransportQueuesInternalLargestDelivery = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_transport_queues_internal_largest_delivery",
+ Title: "Internal Largest Delivery Queue length",
+ Units: "messages",
+ Fam: "queue",
+ Ctx: "exchange.transport_queues_internal_largest_delivery",
+ Priority: prioExchangeTransportQueuesInternalLargestDelivery,
+ Dims: module.Dims{
+ {ID: "exchange_transport_queues_internal_largest_delivery_low_priority", Name: "low"},
+ {ID: "exchange_transport_queues_internal_largest_delivery_high_priority", Name: "high"},
+ {ID: "exchange_transport_queues_internal_largest_delivery_none_priority", Name: "none"},
+ {ID: "exchange_transport_queues_internal_largest_delivery_normal_priority", Name: "normal"},
+ },
+ }
+ exchangeTransportQueuesRetryMailboxDelivery = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_transport_queues_retry_mailbox_delivery",
+ Title: "Internal Active Remote Delivery Queue length",
+ Units: "messages",
+ Fam: "queue",
+ Ctx: "exchange.transport_queues_retry_mailbox_delivery",
+ Priority: prioExchangeTransportQueuesRetryMailboxDelivery,
+ Dims: module.Dims{
+ {ID: "exchange_transport_queues_retry_mailbox_delivery_low_priority", Name: "low"},
+ {ID: "exchange_transport_queues_retry_mailbox_delivery_high_priority", Name: "high"},
+ {ID: "exchange_transport_queues_retry_mailbox_delivery_none_priority", Name: "none"},
+ {ID: "exchange_transport_queues_retry_mailbox_delivery_normal_priority", Name: "normal"},
+ },
+ }
+ exchangeTransportQueuesUnreachable = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_transport_queues_unreachable",
+ Title: "Unreachable Queue length",
+ Units: "messages",
+ Fam: "queue",
+ Ctx: "exchange.transport_queues_unreachable",
+ Priority: prioExchangeTransportQueuesUnreachable,
+ Dims: module.Dims{
+ {ID: "exchange_transport_queues_unreachable_low_priority", Name: "low"},
+ {ID: "exchange_transport_queues_unreachable_high_priority", Name: "high"},
+ {ID: "exchange_transport_queues_unreachable_none_priority", Name: "none"},
+ {ID: "exchange_transport_queues_unreachable_normal_priority", Name: "normal"},
+ },
+ }
+ exchangeTransportQueuesPoison = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_transport_queues_poison",
+ Title: "Poison Queue Length",
+ Units: "messages/s",
+ Fam: "queue",
+ Ctx: "exchange.transport_queues_poison",
+ Priority: prioExchangeTransportQueuesPoison,
+ Dims: module.Dims{
+ {ID: "exchange_transport_queues_poison_high_priority", Name: "high"},
+ {ID: "exchange_transport_queues_poison_low_priority", Name: "low"},
+ {ID: "exchange_transport_queues_poison_none_priority", Name: "none"},
+ {ID: "exchange_transport_queues_poison_normal_priority", Name: "normal"},
+ },
+ }
+
+ exchangeWorkloadActiveTasks = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_workload_%s_tasks",
+ Title: "Workload active tasks",
+ Units: "tasks",
+ Fam: "workload",
+ Ctx: "exchange.workload_active_tasks",
+ Priority: prioExchangeWorkloadActiveTasks,
+ Dims: module.Dims{
+ {ID: "exchange_workload_%s_active_tasks", Name: "active"},
+ },
+ }
+ exchangeWorkloadCompletedTasks = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_workload_%s_completed_tasks",
+ Title: "Workload completed tasks",
+ Units: "tasks/s",
+ Fam: "workload",
+ Ctx: "exchange.workload_completed_tasks",
+ Priority: prioExchangeWorkloadCompleteTasks,
+ Dims: module.Dims{
+ {ID: "exchange_workload_%s_completed_tasks", Name: "completed", Algo: module.Incremental},
+ },
+ }
+ exchangeWorkloadQueuedTasks = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_workload_%s_queued_tasks",
+ Title: "Workload queued tasks",
+ Units: "tasks/s",
+ Fam: "workload",
+ Ctx: "exchange.workload_queued_tasks",
+ Priority: prioExchangeWorkloadQueueTasks,
+ Dims: module.Dims{
+ {ID: "exchange_workload_%s_queued_tasks", Name: "queued", Algo: module.Incremental},
+ },
+ }
+ exchangeWorkloadYieldedTasks = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_workload_%s_yielded_tasks",
+ Title: "Workload yielded tasks",
+ Units: "tasks/s",
+ Fam: "workload",
+ Ctx: "exchange.workload_yielded_tasks",
+ Priority: prioExchangeWorkloadYieldedTasks,
+ Dims: module.Dims{
+ {ID: "exchange_workload_%s_yielded_tasks", Name: "yielded", Algo: module.Incremental},
+ },
+ }
+ exchangeWorkloadActivityStatus = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_workload_%s_activity_status",
+ Title: "Workload activity status",
+ Units: "status",
+ Fam: "workload",
+ Ctx: "exchange.workload_activity_status",
+ Priority: prioExchangeWorkloadActivityStatus,
+ Dims: module.Dims{
+ {ID: "exchange_workload_%s_is_active", Name: "active"},
+ {ID: "exchange_workload_%s_is_paused", Name: "paused"},
+ },
+ }
+
+ exchangeLDAPLongRunningOPS = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_ldap_%s_long_running_ops",
+ Title: "Long Running LDAP operations",
+ Units: "operations/s",
+ Fam: "ldap",
+ Ctx: "exchange.ldap_long_running_ops_per_sec",
+ Priority: prioExchangeLDAPLongRunningOPS,
+ Dims: module.Dims{
+ {ID: "exchange_ldap_%s_long_running_ops_per_sec", Name: "long-running", Algo: module.Incremental},
+ },
+ }
+ exchangeLDAPReadTime = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_ldap_%s_read_time",
+ Title: "Time to send an LDAP read request and receive a response",
+ Units: "seconds",
+ Fam: "ldap",
+ Ctx: "exchange.ldap_read_time",
+ Priority: prioExchangeLDAPReadTime,
+ Dims: module.Dims{
+ {ID: "exchange_ldap_%s_read_time_sec", Name: "read", Algo: module.Incremental, Div: precision},
+ },
+ }
+ exchangeLDAPSearchTime = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_ldap_%s_search_time",
+ Title: "Time to send an LDAP search request and receive a response",
+ Units: "seconds",
+ Fam: "ldap",
+ Ctx: "exchange.ldap_search_time",
+ Priority: prioExchangeLDAPSearchTime,
+ Dims: module.Dims{
+ {ID: "exchange_ldap_%s_search_time_sec", Name: "search", Algo: module.Incremental, Div: precision},
+ },
+ }
+ exchangeLDAPWriteTime = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_ldap_%s_write_time",
+ Title: "Time to send an LDAP search request and receive a response",
+ Units: "second",
+ Fam: "ldap",
+ Ctx: "exchange.ldap_write_time",
+ Priority: prioExchangeLDAPWriteTime,
+ Dims: module.Dims{
+ {ID: "exchange_ldap_%s_write_time_sec", Name: "write", Algo: module.Incremental, Div: precision},
+ },
+ }
+ exchangeLDAPTimeoutErrors = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_ldap_%s_timeout_errors",
+ Title: "LDAP timeout errors",
+ Units: "errors/s",
+ Fam: "ldap",
+ Ctx: "exchange.ldap_timeout_errors",
+ Priority: prioExchangeLDAPTimeoutErrors,
+ Dims: module.Dims{
+ {ID: "exchange_ldap_%s_timeout_errors_total", Name: "timeout", Algo: module.Incremental},
+ },
+ }
+
+ exchangeProxyAvgAuthLatency = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_proxy_%s_avg_auth_latency",
+ Title: "Average time spent authenticating CAS",
+ Units: "seconds",
+ Fam: "proxy",
+ Ctx: "exchange.http_proxy_avg_auth_latency",
+ Priority: prioExchangeHTTPProxyAVGAuthLatency,
+ Dims: module.Dims{
+ {ID: "exchange_http_proxy_%s_avg_auth_latency", Name: "latency"},
+ },
+ }
+ exchangeProxyAvgCasProcessingLatencySec = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_proxy_%s_avg_cas_processing_latency_sec",
+ Title: "Average time spent authenticating CAS",
+ Units: "seconds",
+ Fam: "proxy",
+ Ctx: "exchange.http_proxy_avg_cas_processing_latency_sec",
+ Priority: prioExchangeHTTPProxyAVGCASProcessingLatency,
+ Dims: module.Dims{
+ {ID: "exchange_http_proxy_%s_avg_cas_proccessing_latency_sec", Name: "latency"},
+ },
+ }
+ exchangeProxyMailboxProxyFailureRace = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_proxy_%s_mailbox_proxy_failure_rate",
+ Title: "Percentage of failures between this CAS and MBX servers",
+ Units: "percentage",
+ Fam: "proxy",
+ Ctx: "exchange.http_proxy_mailbox_proxy_failure_rate",
+ Priority: prioExchangeHTTPProxyMailboxProxyFailureRate,
+ Dims: module.Dims{
+ {ID: "exchange_http_proxy_%s_mailbox_proxy_failure_rate", Name: "failures", Div: precision},
+ },
+ }
+ exchangeProxyMailboxServerLocatorAvgLatencySec = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_proxy_%s_mailbox_server_locator_avg_latency_sec",
+ Title: "Average latency of MailboxServerLocator web service calls",
+ Units: "seconds",
+ Fam: "proxy",
+ Ctx: "exchange.http_proxy_mailbox_server_locator_avg_latency_sec",
+ Priority: prioExchangeHTTPProxyServerLocatorAvgLatency,
+ Dims: module.Dims{
+ {ID: "exchange_http_proxy_%s_mailbox_server_locator_avg_latency_sec", Name: "latency", Div: precision},
+ },
+ }
+ exchangeProxyOutstandingProxyRequests = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_proxy_%s_outstanding_proxy_requests",
+ Title: "Concurrent outstanding proxy requests",
+ Units: "requests",
+ Fam: "proxy",
+ Ctx: "exchange.http_proxy_outstanding_proxy_requests",
+ Priority: prioExchangeHTTPProxyOutstandingProxyRequests,
+ Dims: module.Dims{
+ {ID: "exchange_http_proxy_%s_outstanding_proxy_requests", Name: "outstanding"},
+ },
+ }
+ exchangeProxyRequestsTotal = module.Chart{
+ OverModule: "exchange",
+ ID: "exchange_proxy_%s_requests_total",
+ Title: "Number of proxy requests processed each second",
+ Units: "requests/s",
+ Fam: "proxy",
+ Ctx: "exchange.http_proxy_requests",
+ Priority: prioExchangeHTTPProxyRequestsTotal,
+ Dims: module.Dims{
+ {ID: "exchange_http_proxy_%s_requests_total", Name: "processed", Algo: module.Incremental},
+ },
+ }
+)
+
+// Logon
+var (
+ logonCharts = module.Charts{
+ logonSessionsChart.Copy(),
+ }
+ logonSessionsChart = module.Chart{
+ ID: "logon_active_sessions_by_type",
+ Title: "Active User Logon Sessions By Type",
+ Units: "sessions",
+ Fam: "logon",
+ Ctx: "windows.logon_type_sessions",
+ Type: module.Stacked,
+ Priority: prioLogonSessions,
+ Dims: module.Dims{
+ {ID: "logon_type_system_sessions", Name: "system"},
+ {ID: "logon_type_proxy_sessions", Name: "proxy"},
+ {ID: "logon_type_network_sessions", Name: "network"},
+ {ID: "logon_type_interactive_sessions", Name: "interactive"},
+ {ID: "logon_type_batch_sessions", Name: "batch"},
+ {ID: "logon_type_service_sessions", Name: "service"},
+ {ID: "logon_type_unlock_sessions", Name: "unlock"},
+ {ID: "logon_type_network_clear_text_sessions", Name: "network_clear_text"},
+ {ID: "logon_type_new_credentials_sessions", Name: "new_credentials"},
+ {ID: "logon_type_remote_interactive_sessions", Name: "remote_interactive"},
+ {ID: "logon_type_cached_interactive_sessions", Name: "cached_interactive"},
+ {ID: "logon_type_cached_remote_interactive_sessions", Name: "cached_remote_interactive"},
+ {ID: "logon_type_cached_unlock_sessions", Name: "cached_unlock"},
+ },
+ }
+)
+
+// Thermal zone
+var (
+ thermalzoneChartsTmpl = module.Charts{
+ thermalzoneTemperatureChartTmpl.Copy(),
+ }
+ thermalzoneTemperatureChartTmpl = module.Chart{
+ ID: "thermalzone_%s_temperature",
+ Title: "Thermal zone temperature",
+ Units: "Celsius",
+ Fam: "thermalzone",
+ Ctx: "windows.thermalzone_temperature",
+ Priority: prioThermalzoneTemperature,
+ Dims: module.Dims{
+ {ID: "thermalzone_%s_temperature", Name: "temperature"},
+ },
+ }
+)
+
+// Processes
+var (
+ processesCharts = module.Charts{
+ processesCPUUtilizationTotalChart.Copy(),
+ processesMemoryUsageChart.Copy(),
+ processesHandlesChart.Copy(),
+ processesIOBytesChart.Copy(),
+ processesIOOperationsChart.Copy(),
+ processesPageFaultsChart.Copy(),
+ processesPageFileBytes.Copy(),
+ processesThreads.Copy(),
+ }
+ processesCPUUtilizationTotalChart = module.Chart{
+ ID: "processes_cpu_utilization",
+ Title: "CPU usage (100% = 1 core)",
+ Units: "percentage",
+ Fam: "processes",
+ Ctx: "windows.processes_cpu_utilization",
+ Type: module.Stacked,
+ Priority: prioProcessesCPUUtilization,
+ }
+ processesMemoryUsageChart = module.Chart{
+ ID: "processes_memory_usage",
+ Title: "Memory usage",
+ Units: "bytes",
+ Fam: "processes",
+ Ctx: "windows.processes_memory_usage",
+ Type: module.Stacked,
+ Priority: prioProcessesMemoryUsage,
+ }
+ processesIOBytesChart = module.Chart{
+ ID: "processes_io_bytes",
+ Title: "Total of IO bytes (read, write, other)",
+ Units: "bytes/s",
+ Fam: "processes",
+ Ctx: "windows.processes_io_bytes",
+ Type: module.Stacked,
+ Priority: prioProcessesIOBytes,
+ }
+ processesIOOperationsChart = module.Chart{
+ ID: "processes_io_operations",
+ Title: "Total of IO events (read, write, other)",
+ Units: "operations/s",
+ Fam: "processes",
+ Ctx: "windows.processes_io_operations",
+ Type: module.Stacked,
+ Priority: prioProcessesIOOperations,
+ }
+ processesPageFaultsChart = module.Chart{
+ ID: "processes_page_faults",
+ Title: "Number of page faults",
+ Units: "pgfaults/s",
+ Fam: "processes",
+ Ctx: "windows.processes_page_faults",
+ Type: module.Stacked,
+ Priority: prioProcessesPageFaults,
+ }
+ processesPageFileBytes = module.Chart{
+ ID: "processes_page_file_bytes",
+ Title: "Bytes used in page file(s)",
+ Units: "bytes",
+ Fam: "processes",
+ Ctx: "windows.processes_file_bytes",
+ Type: module.Stacked,
+ Priority: prioProcessesPageFileBytes,
+ }
+ processesThreads = module.Chart{
+ ID: "processes_threads",
+ Title: "Active threads",
+ Units: "threads",
+ Fam: "processes",
+ Ctx: "windows.processes_threads",
+ Type: module.Stacked,
+ Priority: prioProcessesThreads,
+ }
+ processesHandlesChart = module.Chart{
+ ID: "processes_handles",
+ Title: "Number of handles open",
+ Units: "handles",
+ Fam: "processes",
+ Ctx: "windows.processes_handles",
+ Type: module.Stacked,
+ Priority: prioProcessesHandles,
+ }
+)
+
+// .NET
+var (
+ netFrameworkCLRExceptionsChartsTmpl = module.Charts{
+ netFrameworkCLRExceptionsThrown.Copy(),
+ netFrameworkCLRExceptionsFilters.Copy(),
+ netFrameworkCLRExceptionsFinallys.Copy(),
+ netFrameworkCLRExceptionsThrowToCatchDepth.Copy(),
+ }
+
+ netFrameworkCLRInteropChartsTmpl = module.Charts{
+ netFrameworkCLRInteropCOMCallableWrapper.Copy(),
+ netFrameworkCLRInteropMarshalling.Copy(),
+ netFrameworkCLRInteropStubsCreated.Copy(),
+ }
+
+ netFrameworkCLRJITChartsTmpl = module.Charts{
+ netFrameworkCLRJITMethods.Copy(),
+ netFrameworkCLRJITTime.Copy(),
+ netFrameworkCLRJITStandardFailures.Copy(),
+ netFrameworkCLRJITILBytes.Copy(),
+ }
+
+ netFrameworkCLRLoadingChartsTmpl = module.Charts{
+ netFrameworkCLRLoadingLoaderHeapSize.Copy(),
+ netFrameworkCLRLoadingAppDomainsLoaded.Copy(),
+ netFrameworkCLRLoadingAppDomainsUnloaded.Copy(),
+ netFrameworkCLRLoadingAssembliesLoaded.Copy(),
+ netFrameworkCLRLoadingClassesLoaded.Copy(),
+ netFrameworkCLRLoadingClassLoadFailure.Copy(),
+ }
+
+ netFrameworkCLRLocksAndThreadsChartsTmpl = module.Charts{
+ netFrameworkCLRLockAndThreadsQueueLength.Copy(),
+ netFrameworkCLRLockAndThreadsCurrentLogicalThreads.Copy(),
+ netFrameworkCLRLockAndThreadsCurrentPhysicalThreads.Copy(),
+ netFrameworkCLRLockAndThreadsRecognizedThreads.Copy(),
+ netFrameworkCLRLockAndThreadsContentions.Copy(),
+ }
+
+ netFrameworkCLRMemoryChartsTmpl = module.Charts{
+ netFrameworkCLRMemoryAllocatedBytes.Copy(),
+ netFrameworkCLRMemoryFinalizationSurvivors.Copy(),
+ netFrameworkCLRMemoryHeapSize.Copy(),
+ netFrameworkCLRMemoryPromoted.Copy(),
+ netFrameworkCLRMemoryNumberGCHandles.Copy(),
+ netFrameworkCLRMemoryCollections.Copy(),
+ netFrameworkCLRMemoryInducedGC.Copy(),
+ netFrameworkCLRMemoryNumberPinnedObjects.Copy(),
+ netFrameworkCLRMemoryNumberSinkBlocksInUse.Copy(),
+ netFrameworkCLRMemoryCommitted.Copy(),
+ netFrameworkCLRMemoryReserved.Copy(),
+ netFrameworkCLRMemoryGCTime.Copy(),
+ }
+
+ netFrameworkCLRRemotingChartsTmpl = module.Charts{
+ netFrameworkCLRRemotingChannels.Copy(),
+ netFrameworkCLRRemotingContextBoundClassesLoaded.Copy(),
+ netFrameworkCLRRemotingContextBoundObjects.Copy(),
+ netFrameworkCLRRemotingContextProxies.Copy(),
+ netFrameworkCLRRemotingContexts.Copy(),
+ netFrameworkCLRRemotingCalls.Copy(),
+ }
+
+ netFrameworkCLRSecurityChartsTmpl = module.Charts{
+ netFrameworkCLRSecurityLinkTimeChecks.Copy(),
+ netFrameworkCLRSecurityChecksTime.Copy(),
+ netFrameworkCLRSecurityStackWalkDepth.Copy(),
+ netFrameworkCLRSecurityRuntimeChecks.Copy(),
+ }
+
+ // Exceptions
+ netFrameworkCLRExceptionsThrown = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrexception_thrown",
+ Title: "Thrown exceptions",
+ Units: "exceptions/s",
+ Fam: "exceptions",
+ Ctx: "netframework.clrexception_thrown",
+ Priority: prioNETFrameworkCLRExceptionsThrown,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrexception_thrown_total", Name: "exceptions", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRExceptionsFilters = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrexception_filters",
+ Title: "Executed exception filters",
+ Units: "filters/s",
+ Fam: "exceptions",
+ Ctx: "netframework.clrexception_filters",
+ Priority: prioNETFrameworkCLRExceptionsFilters,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrexception_filters_total", Name: "filters", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRExceptionsFinallys = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrexception_finallys",
+ Title: "Executed finally blocks",
+ Units: "finallys/s",
+ Fam: "exceptions",
+ Ctx: "netframework.clrexception_finallys",
+ Priority: prioNETFrameworkCLRExceptionsFinallys,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrexception_finallys_total", Name: "finallys", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRExceptionsThrowToCatchDepth = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrexception_throw_to_catch_depth",
+ Title: "Traversed stack frames",
+ Units: "stack_frames/s",
+ Fam: "exceptions",
+ Ctx: "netframework.clrexception_throw_to_catch_depth",
+ Priority: prioNETFrameworkCLRExceptionsThrowToCatchDepth,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrexception_throw_to_catch_depth_total", Name: "traversed", Algo: module.Incremental},
+ },
+ }
+
+ // Interop
+ netFrameworkCLRInteropCOMCallableWrapper = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrinterop_com_callable_wrappers",
+ Title: "COM callable wrappers (CCW)",
+ Units: "ccw/s",
+ Fam: "interop",
+ Ctx: "netframework.clrinterop_com_callable_wrappers",
+ Priority: prioNETFrameworkCLRInteropCOMCallableWrappers,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrinterop_com_callable_wrappers_total", Name: "com_callable_wrappers", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRInteropMarshalling = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrinterop_interop_marshalling",
+ Title: "Arguments and return values marshallings",
+ Units: "marshalling/s",
+ Fam: "interop",
+ Ctx: "netframework.clrinterop_interop_marshallings",
+ Priority: prioNETFrameworkCLRInteropMarshalling,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrinterop_interop_marshalling_total", Name: "marshallings", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRInteropStubsCreated = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrinterop_interop_stubs_created",
+ Title: "Created stubs",
+ Units: "stubs/s",
+ Fam: "interop",
+ Ctx: "netframework.clrinterop_interop_stubs_created",
+ Priority: prioNETFrameworkCLRInteropStubsCreated,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrinterop_interop_stubs_created_total", Name: "created", Algo: module.Incremental},
+ },
+ }
+
+ // JIT
+ netFrameworkCLRJITMethods = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrjit_methods",
+ Title: "JIT-compiled methods",
+ Units: "methods/s",
+ Fam: "jit",
+ Ctx: "netframework.clrjit_methods",
+ Priority: prioNETFrameworkCLRJITMethods,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrjit_methods_total", Name: "jit-compiled", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRJITTime = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrjit_time",
+ Title: "Time spent in JIT compilation",
+ Units: "percentage",
+ Fam: "jit",
+ Ctx: "netframework.clrjit_time",
+ Priority: prioNETFrameworkCLRJITTime,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrjit_time_percent", Name: "time"},
+ },
+ }
+ netFrameworkCLRJITStandardFailures = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrjit_standard_failures",
+ Title: "JIT compiler failures",
+ Units: "failures/s",
+ Fam: "jit",
+ Ctx: "netframework.clrjit_standard_failures",
+ Priority: prioNETFrameworkCLRJITStandardFailures,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrjit_standard_failures_total", Name: "failures", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRJITILBytes = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrjit_il_bytes",
+ Title: "Compiled Microsoft intermediate language (MSIL) bytes",
+ Units: "bytes/s",
+ Fam: "jit",
+ Ctx: "netframework.clrjit_il_bytes",
+ Priority: prioNETFrameworkCLRJITILBytes,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrjit_il_bytes_total", Name: "compiled_msil", Algo: module.Incremental},
+ },
+ }
+
+ // Loading
+ netFrameworkCLRLoadingLoaderHeapSize = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrloading_loader_heap_size",
+ Title: "Memory committed by class loader",
+ Units: "bytes",
+ Fam: "loading",
+ Ctx: "netframework.clrloading_loader_heap_size",
+ Priority: prioNETFrameworkCLRLoadingLoaderHeapSize,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrloading_loader_heap_size_bytes", Name: "committed"},
+ },
+ }
+ netFrameworkCLRLoadingAppDomainsLoaded = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrloading_appdomains_loaded",
+ Title: "Loaded application domains",
+ Units: "domain/s",
+ Fam: "loading",
+ Ctx: "netframework.clrloading_appdomains_loaded",
+ Priority: prioNETFrameworkCLRLoadingAppDomainsLoaded,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrloading_appdomains_loaded_total", Name: "loaded", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRLoadingAppDomainsUnloaded = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrloading_appdomains_unloaded",
+ Title: "Unloaded application domains",
+ Units: "domain/s",
+ Fam: "loading",
+ Ctx: "netframework.clrloading_appdomains_unloaded",
+ Priority: prioNETFrameworkCLRLoadingAppDomainsUnloaded,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrloading_appdomains_unloaded_total", Name: "unloaded", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRLoadingAssembliesLoaded = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrloading_assemblies_loaded",
+ Title: "Loaded assemblies",
+ Units: "assemblies/s",
+ Fam: "loading",
+ Ctx: "netframework.clrloading_assemblies_loaded",
+ Priority: prioNETFrameworkCLRLoadingAssembliesLoaded,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrloading_assemblies_loaded_total", Name: "loaded", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRLoadingClassesLoaded = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrloading_classes_loaded",
+ Title: "Loaded classes in all assemblies",
+ Units: "classes/s",
+ Fam: "loading",
+ Ctx: "netframework.clrloading_classes_loaded",
+ Priority: prioNETFrameworkCLRLoadingClassesLoaded,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrloading_classes_loaded_total", Name: "loaded", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRLoadingClassLoadFailure = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrloading_class_load_failure",
+ Title: "Class load failures",
+ Units: "failures/s",
+ Fam: "loading",
+ Ctx: "netframework.clrloading_class_load_failures",
+ Priority: prioNETFrameworkCLRLoadingClassLoadFailure,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrloading_class_load_failures_total", Name: "class_load", Algo: module.Incremental},
+ },
+ }
+
+ // Lock and Threads
+ netFrameworkCLRLockAndThreadsQueueLength = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrlocksandthreads_queue_length",
+ Title: "Threads waited to acquire a managed lock",
+ Units: "threads/s",
+ Fam: "locks threads",
+ Ctx: "netframework.clrlocksandthreads_queue_length",
+ Priority: prioNETFrameworkCLRLocksAndThreadsQueueLength,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrlocksandthreads_queue_length_total", Name: "threads", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRLockAndThreadsCurrentLogicalThreads = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrlocksandthreads_current_logical_threads",
+ Title: "Logical threads",
+ Units: "threads",
+ Fam: "locks threads",
+ Ctx: "netframework.clrlocksandthreads_current_logical_threads",
+ Priority: prioNETFrameworkCLRLocksAndThreadsCurrentLogicalThreads,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrlocksandthreads_current_logical_threads", Name: "logical"},
+ },
+ }
+ netFrameworkCLRLockAndThreadsCurrentPhysicalThreads = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrlocksandthreads_current_physical_threads",
+ Title: "Physical threads",
+ Units: "threads",
+ Fam: "locks threads",
+ Ctx: "netframework.clrlocksandthreads_current_physical_threads",
+ Priority: prioNETFrameworkCLRLocksAndThreadsCurrentPhysicalThreads,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrlocksandthreads_physical_threads_current", Name: "physical"},
+ },
+ }
+ netFrameworkCLRLockAndThreadsRecognizedThreads = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrlocksandthreads_recognized_threads",
+ Title: "Threads recognized by the runtime",
+ Units: "threads/s",
+ Fam: "locks threads",
+ Ctx: "netframework.clrlocksandthreads_recognized_threads",
+ Priority: prioNETFrameworkCLRLocksAndThreadsRecognizedThreads,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrlocksandthreads_recognized_threads_total", Name: "threads", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRLockAndThreadsContentions = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrlocksandthreads_contentions",
+ Title: "Fails to acquire a managed lock",
+ Units: "contentions/s",
+ Fam: "locks threads",
+ Ctx: "netframework.clrlocksandthreads_contentions",
+ Priority: prioNETFrameworkCLRLocksAndThreadsContentions,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrlocksandthreads_contentions_total", Name: "contentions", Algo: module.Incremental},
+ },
+ }
+
+ // Memory
+ netFrameworkCLRMemoryAllocatedBytes = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_allocated_bytes",
+ Title: "Memory allocated on the garbage collection heap",
+ Units: "bytes/s",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_allocated_bytes",
+ Priority: prioNETFrameworkCLRMemoryAllocatedBytes,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_allocated_bytes_total", Name: "allocated", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRMemoryFinalizationSurvivors = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_finalization_survivors",
+ Title: "Objects that survived garbage-collection",
+ Units: "objects",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_finalization_survivors",
+ Priority: prioNETFrameworkCLRMemoryFinalizationSurvivors,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_finalization_survivors", Name: "survived"},
+ },
+ }
+ netFrameworkCLRMemoryHeapSize = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_heap_size",
+ Title: "Maximum bytes that can be allocated",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_heap_size",
+ Priority: prioNETFrameworkCLRMemoryHeapSize,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_heap_size_bytes", Name: "heap"},
+ },
+ }
+ netFrameworkCLRMemoryPromoted = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_promoted",
+ Title: "Memory promoted to the next generation",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_promoted",
+ Priority: prioNETFrameworkCLRMemoryPromoted,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_promoted_bytes", Name: "promoted"},
+ },
+ }
+ netFrameworkCLRMemoryNumberGCHandles = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_number_gc_handles",
+ Title: "Garbage collection handles",
+ Units: "handles",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_number_gc_handles",
+ Priority: prioNETFrameworkCLRMemoryNumberGCHandles,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_number_gc_handles", Name: "used"},
+ },
+ }
+ netFrameworkCLRMemoryCollections = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_collections",
+ Title: "Garbage collections",
+ Units: "gc/s",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_collections",
+ Priority: prioNETFrameworkCLRMemoryCollections,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_collections_total", Name: "gc", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRMemoryInducedGC = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_induced_gc",
+ Title: "Garbage collections induced",
+ Units: "gc/s",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_induced_gc",
+ Priority: prioNETFrameworkCLRMemoryInducedGC,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_induced_gc_total", Name: "gc", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRMemoryNumberPinnedObjects = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_number_pinned_objects",
+ Title: "Pinned objects encountered",
+ Units: "objects",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_number_pinned_objects",
+ Priority: prioNETFrameworkCLRMemoryNumberPinnedObjects,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_number_pinned_objects", Name: "pinned"},
+ },
+ }
+ netFrameworkCLRMemoryNumberSinkBlocksInUse = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_number_sink_blocks_in_use",
+ Title: "Synchronization blocks in use",
+ Units: "blocks",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_number_sink_blocks_in_use",
+ Priority: prioNETFrameworkCLRMemoryNumberSinkBlocksInUse,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_number_sink_blocksinuse", Name: "used"},
+ },
+ }
+ netFrameworkCLRMemoryCommitted = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_committed",
+ Title: "Virtual memory committed by GC",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_committed",
+ Priority: prioNETFrameworkCLRMemoryCommitted,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_committed_bytes", Name: "committed"},
+ },
+ }
+ netFrameworkCLRMemoryReserved = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_reserved",
+ Title: "Virtual memory reserved by GC",
+ Units: "bytes",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_reserved",
+ Priority: prioNETFrameworkCLRMemoryReserved,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_reserved_bytes", Name: "reserved"},
+ },
+ }
+ netFrameworkCLRMemoryGCTime = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrmemory_gc_time",
+ Title: "Time spent on GC",
+ Units: "percentage",
+ Fam: "memory",
+ Ctx: "netframework.clrmemory_gc_time",
+ Priority: prioNETFrameworkCLRMemoryGCTime,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrmemory_gc_time_percent", Name: "time"},
+ },
+ }
+
+ // Remoting
+ netFrameworkCLRRemotingChannels = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrremoting_channels",
+ Title: "Registered channels",
+ Units: "channels/s",
+ Fam: "remoting",
+ Ctx: "netframework.clrremoting_channels",
+ Priority: prioNETFrameworkCLRRemotingChannels,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrremoting_channels_total", Name: "registered", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRRemotingContextBoundClassesLoaded = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrremoting_context_bound_classes_loaded",
+ Title: "Loaded context-bound classes",
+ Units: "classes",
+ Fam: "remoting",
+ Ctx: "netframework.clrremoting_context_bound_classes_loaded",
+ Priority: prioNETFrameworkCLRRemotingContextBoundClassesLoaded,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrremoting_context_bound_classes_loaded", Name: "loaded"},
+ },
+ }
+ netFrameworkCLRRemotingContextBoundObjects = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrremoting_context_bound_objects",
+ Title: "Allocated context-bound objects",
+ Units: "objects/s",
+ Fam: "remoting",
+ Ctx: "netframework.clrremoting_context_bound_objects",
+ Priority: prioNETFrameworkCLRRemotingContextBoundObjects,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrremoting_context_bound_objects_total", Name: "allocated", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRRemotingContextProxies = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrremoting_context_proxies",
+ Title: "Remoting proxy objects",
+ Units: "objects/s",
+ Fam: "remoting",
+ Ctx: "netframework.clrremoting_context_proxies",
+ Priority: prioNETFrameworkCLRRemotingContextProxies,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrremoting_context_proxies_total", Name: "objects", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRRemotingContexts = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrremoting_contexts",
+ Title: "Total of remoting contexts",
+ Units: "contexts",
+ Fam: "remoting",
+ Ctx: "netframework.clrremoting_contexts",
+ Priority: prioNETFrameworkCLRRemotingContexts,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrremoting_contexts", Name: "contexts"},
+ },
+ }
+ netFrameworkCLRRemotingCalls = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrremoting_calls",
+ Title: "Remote Procedure Calls (RPC) invoked",
+ Units: "calls/s",
+ Fam: "remoting",
+ Ctx: "netframework.clrremoting_remote_calls",
+ Priority: prioNETFrameworkCLRRemotingRemoteCalls,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrremoting_remote_calls_total", Name: "rpc", Algo: module.Incremental},
+ },
+ }
+
+ // Security
+ netFrameworkCLRSecurityLinkTimeChecks = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrsecurity_link_time_checks",
+ Title: "Link-time code access security checks",
+ Units: "checks/s",
+ Fam: "security",
+ Ctx: "netframework.clrsecurity_link_time_checks",
+ Priority: prioNETFrameworkCLRSecurityLinkTimeChecks,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrsecurity_link_time_checks_total", Name: "linktime", Algo: module.Incremental},
+ },
+ }
+ netFrameworkCLRSecurityChecksTime = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrsecurity_checks_time",
+ Title: "Time spent performing runtime code access security checks",
+ Units: "percentage",
+ Fam: "security",
+ Ctx: "netframework.clrsecurity_checks_time",
+ Priority: prioNETFrameworkCLRSecurityRTChecksTime,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrsecurity_checks_time_percent", Name: "time"},
+ },
+ }
+ netFrameworkCLRSecurityStackWalkDepth = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrsecurity_stack_walk_depth",
+ Title: "Depth of the stack",
+ Units: "depth",
+ Fam: "security",
+ Ctx: "netframework.clrsecurity_stack_walk_depth",
+ Priority: prioNETFrameworkCLRSecurityStackWalkDepth,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrsecurity_stack_walk_depth", Name: "stack"},
+ },
+ }
+ netFrameworkCLRSecurityRuntimeChecks = module.Chart{
+ OverModule: "netframework",
+ ID: "netframework_%s_clrsecurity_runtime_checks",
+ Title: "Runtime code access security checks performed",
+ Units: "checks/s",
+ Fam: "security",
+ Ctx: "netframework.clrsecurity_runtime_checks",
+ Priority: prioNETFrameworkCLRSecurityRuntimeChecks,
+ Dims: module.Dims{
+ {ID: "netframework_%s_clrsecurity_runtime_checks_total", Name: "runtime", Algo: module.Incremental},
+ },
+ }
+)
+
+// Service
+var (
+ serviceChartsTmpl = module.Charts{
+ serviceStateChartTmpl.Copy(),
+ serviceStatusChartTmpl.Copy(),
+ }
+ serviceStateChartTmpl = module.Chart{
+ ID: "service_%s_state",
+ Title: "Service state",
+ Units: "state",
+ Fam: "services",
+ Ctx: "windows.service_state",
+ Priority: prioServiceState,
+ Dims: module.Dims{
+ {ID: "service_%s_state_running", Name: "running"},
+ {ID: "service_%s_state_stopped", Name: "stopped"},
+ {ID: "service_%s_state_start_pending", Name: "start_pending"},
+ {ID: "service_%s_state_stop_pending", Name: "stop_pending"},
+ {ID: "service_%s_state_continue_pending", Name: "continue_pending"},
+ {ID: "service_%s_state_pause_pending", Name: "pause_pending"},
+ {ID: "service_%s_state_paused", Name: "paused"},
+ {ID: "service_%s_state_unknown", Name: "unknown"},
+ },
+ }
+ serviceStatusChartTmpl = module.Chart{
+ ID: "service_%s_status",
+ Title: "Service status",
+ Units: "status",
+ Fam: "services",
+ Ctx: "windows.service_status",
+ Priority: prioServiceStatus,
+ Dims: module.Dims{
+ {ID: "service_%s_status_ok", Name: "ok"},
+ {ID: "service_%s_status_error", Name: "error"},
+ {ID: "service_%s_status_unknown", Name: "unknown"},
+ {ID: "service_%s_status_degraded", Name: "degraded"},
+ {ID: "service_%s_status_pred_fail", Name: "pred_fail"},
+ {ID: "service_%s_status_starting", Name: "starting"},
+ {ID: "service_%s_status_stopping", Name: "stopping"},
+ {ID: "service_%s_status_service", Name: "service"},
+ {ID: "service_%s_status_stressed", Name: "stressed"},
+ {ID: "service_%s_status_nonrecover", Name: "nonrecover"},
+ {ID: "service_%s_status_no_contact", Name: "no_contact"},
+ {ID: "service_%s_status_lost_comm", Name: "lost_comm"},
+ },
+ }
+)
+
+// HyperV
+var (
+ hypervChartsTmpl = module.Charts{
+ hypervVirtualMachinesHealthChart.Copy(),
+ hypervRootPartitionDeviceSpacePagesChart.Copy(),
+ hypervRootPartitionGPASpacePagesChart.Copy(),
+ hypervRootPartitionGPASpaceModificationsChart.Copy(),
+ hypervRootPartitionAttachedDevicesChart.Copy(),
+ hypervRootPartitionDepositedPagesChart.Copy(),
+ hypervRootPartitionSkippedInterrupts.Copy(),
+ hypervRootPartitionDeviceDMAErrorsChart.Copy(),
+ hypervRootPartitionDeviceInterruptErrorsChart.Copy(),
+ hypervRootPartitionDeviceInterruptThrottledEventsChart.Copy(),
+ hypervRootPartitionIOTlbFlushChart.Copy(),
+ hypervRootPartitionAddressSpaceChart.Copy(),
+ hypervRootPartitionVirtualTlbFlushEntries.Copy(),
+ hypervRootPartitionVirtualTlbPages.Copy(),
+ }
+ hypervVirtualMachinesHealthChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "health_vm",
+ Title: "Virtual machines health status",
+ Units: "vms",
+ Fam: "vms health",
+ Ctx: "hyperv.vms_health",
+ Priority: prioHypervVMHealth,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "hyperv_health_ok", Name: "ok"},
+ {ID: "hyperv_health_critical", Name: "critical"},
+ },
+ }
+ hypervRootPartitionDeviceSpacePagesChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_device_space_pages",
+ Title: "Root partition pages in the device space",
+ Units: "pages",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_device_space_pages",
+ Priority: prioHypervRootPartitionDeviceSpacePages,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_4K_device_pages", Name: "4K"},
+ {ID: "hyperv_root_partition_2M_device_pages", Name: "2M"},
+ {ID: "hyperv_root_partition_1G_device_pages", Name: "1G"},
+ },
+ }
+ hypervRootPartitionGPASpacePagesChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_gpa_space_pages",
+ Title: "Root partition pages in the GPA space",
+ Units: "pages",
+ Fam: "root partition",
+ Ctx: "windows.hyperv_root_partition_gpa_space_pages",
+ Priority: prioHypervRootPartitionGPASpacePages,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_4K_gpa_pages", Name: "4K"},
+ {ID: "hyperv_root_partition_2M_gpa_pages", Name: "2M"},
+ {ID: "hyperv_root_partition_1G_gpa_pages", Name: "1G"},
+ },
+ }
+ hypervRootPartitionGPASpaceModificationsChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_gpa_space_modifications",
+ Title: "Root partition GPA space modifications",
+ Units: "modifications/s",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_gpa_space_modifications",
+ Priority: prioHypervRootPartitionGPASpaceModifications,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_gpa_space_modifications", Name: "gpa", Algo: module.Incremental},
+ },
+ }
+ hypervRootPartitionAttachedDevicesChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_attached_devices",
+ Title: "Root partition attached devices",
+ Units: "devices",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_attached_devices",
+ Priority: prioHypervRootPartitionAttachedDevices,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_attached_devices", Name: "attached"},
+ },
+ }
+ hypervRootPartitionDepositedPagesChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_deposited_pages",
+ Title: "Root partition deposited pages",
+ Units: "pages",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_deposited_pages",
+ Priority: prioHypervRootPartitionDepositedPages,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_deposited_pages", Name: "deposited"},
+ },
+ }
+ hypervRootPartitionSkippedInterrupts = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_skipped_interrupts",
+ Title: "Root partition skipped interrupts",
+ Units: "interrupts",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_skipped_interrupts",
+ Priority: prioHypervRootPartitionSkippedInterrupts,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_physical_pages_allocated", Name: "skipped"},
+ },
+ }
+ hypervRootPartitionDeviceDMAErrorsChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_device_dma_errors",
+ Title: "Root partition illegal DMA requests",
+ Units: "requests",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_device_dma_errors",
+ Priority: prioHypervRootPartitionDeviceDMAErrors,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_deposited_pages", Name: "illegal_dma"},
+ },
+ }
+ hypervRootPartitionDeviceInterruptErrorsChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "partition_device_interrupt_errors",
+ Title: "Root partition illegal interrupt requests",
+ Units: "requests",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_device_interrupt_errors",
+ Priority: prioHypervRootPartitionDeviceInterruptErrors,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_device_interrupt_errors", Name: "illegal_interrupt"},
+ },
+ }
+ hypervRootPartitionDeviceInterruptThrottledEventsChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_device_interrupt_throttle_events",
+ Title: "Root partition throttled interrupts",
+ Units: "events",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_device_interrupt_throttle_events",
+ Priority: prioHypervRootPartitionDeviceInterruptThrottleEvents,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_device_interrupt_throttle_events", Name: "throttling"},
+ },
+ }
+ hypervRootPartitionIOTlbFlushChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_io_tbl_flush",
+ Title: "Root partition flushes of I/O TLBs",
+ Units: "flushes/s",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_io_tlb_flush",
+ Priority: prioHypervRootPartitionIOTlbFlush,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_io_tlb_flush", Name: "flushes", Algo: module.Incremental},
+ },
+ }
+ hypervRootPartitionAddressSpaceChart = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_address_space",
+ Title: "Root partition address spaces in the virtual TLB",
+ Units: "address spaces",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_address_space",
+ Priority: prioHypervRootPartitionAddressSpace,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_address_spaces", Name: "address_spaces"},
+ },
+ }
+ hypervRootPartitionVirtualTlbFlushEntries = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_virtual_tbl_flush_entries",
+ Title: "Root partition flushes of the entire virtual TLB",
+ Units: "flushes/s",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_virtual_tlb_flush_entries",
+ Priority: prioHypervRootPartitionVirtualTlbFlushEntires,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_virtual_tlb_flush_entires", Name: "flushes", Algo: module.Incremental},
+ },
+ }
+ hypervRootPartitionVirtualTlbPages = module.Chart{
+ OverModule: "hyperv",
+ ID: "root_partition_virtual_tlb_pages",
+ Title: "Root partition pages used by the virtual TLB",
+ Units: "pages",
+ Fam: "root partition",
+ Ctx: "hyperv.root_partition_virtual_tlb_pages",
+ Priority: prioHypervRootPartitionVirtualTlbPages,
+ Dims: module.Dims{
+ {ID: "hyperv_root_partition_virtual_tlb_pages", Name: "used"},
+ },
+ }
+)
+
+// HyperV VM Memory
+var (
+ hypervVMChartsTemplate = module.Charts{
+ hypervHypervVMCPUUsageChartTmpl.Copy(),
+ hypervHypervVMMemoryPhysicalChartTmpl.Copy(),
+ hypervHypervVMMemoryPhysicalGuestVisibleChartTmpl.Copy(),
+ hypervHypervVMMemoryPressureCurrentChartTmpl.Copy(),
+ hypervVIDPhysicalPagesAllocatedChartTmpl.Copy(),
+ hypervVIDRemotePhysicalPagesChartTmpl.Copy(),
+ }
+ hypervHypervVMCPUUsageChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_%s_cpu_usage",
+ Title: "VM CPU usage (100% = 1 core)",
+ Units: "percentage",
+ Fam: "vm cpu",
+ Ctx: "hyperv.vm_cpu_usage",
+ Priority: prioHypervVMCPUUsage,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_%s_cpu_guest_run_time", Name: "guest", Div: 1e5, Algo: module.Incremental},
+ {ID: "hyperv_vm_%s_cpu_hypervisor_run_time", Name: "hypervisor", Div: 1e5, Algo: module.Incremental},
+ {ID: "hyperv_vm_%s_cpu_remote_run_time", Name: "remote", Div: 1e5, Algo: module.Incremental},
+ },
+ }
+ hypervHypervVMMemoryPhysicalChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_%s_memory_physical",
+ Title: "VM assigned memory",
+ Units: "MiB",
+ Fam: "vm mem",
+ Ctx: "hyperv.vm_memory_physical",
+ Priority: prioHypervVMMemoryPhysical,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_%s_memory_physical", Name: "assigned_memory"},
+ },
+ }
+ hypervHypervVMMemoryPhysicalGuestVisibleChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_%s_memory_physical_guest_visible",
+ Title: "VM guest visible memory",
+ Units: "MiB",
+ Fam: "vm mem",
+ Ctx: "hyperv.vm_memory_physical_guest_visible",
+ Priority: prioHypervVMMemoryPhysicalGuestVisible,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_%s_memory_physical_guest_visible", Name: "visible_memory"},
+ },
+ }
+ hypervHypervVMMemoryPressureCurrentChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_%s_memory_pressure_current",
+ Title: "VM current pressure",
+ Units: "percentage",
+ Fam: "vm mem",
+ Ctx: "hyperv.vm_memory_pressure_current",
+ Priority: prioHypervVMMemoryPressureCurrent,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_%s_memory_pressure_current", Name: "pressure"},
+ },
+ }
+ hypervVIDPhysicalPagesAllocatedChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_%s_vid_physical_pages_allocated",
+ Title: "VM physical pages allocated",
+ Units: "pages",
+ Fam: "vm mem",
+ Ctx: "hyperv.vm_vid_physical_pages_allocated",
+ Priority: prioHypervVIDPhysicalPagesAllocated,
+ Dims: module.Dims{
+ {ID: "hyperv_vid_%s_physical_pages_allocated", Name: "allocated"},
+ },
+ }
+ hypervVIDRemotePhysicalPagesChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_%s_remote_physical_pages",
+ Title: "VM physical pages not allocated from the preferred NUMA node",
+ Units: "pages",
+ Fam: "vm mem",
+ Ctx: "hyperv.vm_vid_remote_physical_pages",
+ Priority: prioHypervVIDRemotePhysicalPages,
+ Dims: module.Dims{
+ {ID: "hyperv_vid_%s_remote_physical_pages", Name: "remote_physical"},
+ },
+ }
+)
+
+// HyperV VM storage device
+var (
+ hypervVMDeviceChartsTemplate = module.Charts{
+ hypervVMDeviceIOChartTmpl.Copy(),
+ hypervVMDeviceIOPSChartTmpl.Copy(),
+ hypervVMDeviceErrorCountChartTmpl.Copy(),
+ }
+ hypervVMDeviceIOChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_device_%s_bytes_read",
+ Title: "VM storage device IO",
+ Units: "bytes/s",
+ Fam: "vm disk",
+ Ctx: "hyperv.vm_device_bytes",
+ Priority: prioHypervVMDeviceBytes,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_device_%s_bytes_read", Name: "read", Algo: module.Incremental},
+ {ID: "hyperv_vm_device_%s_bytes_written", Name: "written", Algo: module.Incremental},
+ },
+ }
+ hypervVMDeviceIOPSChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_device_%s_operation_read",
+ Title: "VM storage device IOPS",
+ Units: "operations/s",
+ Fam: "vm disk",
+ Ctx: "hyperv.vm_device_operations",
+ Priority: prioHypervVMDeviceOperations,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_device_%s_operations_read", Name: "read", Algo: module.Incremental},
+ {ID: "hyperv_vm_device_%s_operations_written", Name: "write", Algo: module.Incremental},
+ },
+ }
+ hypervVMDeviceErrorCountChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_device_%s_error_count",
+ Title: "VM storage device errors",
+ Units: "errors/s",
+ Fam: "vm disk",
+ Ctx: "hyperv.vm_device_errors",
+ Priority: prioHypervVMDeviceErrors,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_device_%s_error_count", Name: "errors", Algo: module.Incremental},
+ },
+ }
+)
+
+// HyperV VM network interface
+var (
+ hypervVMInterfaceChartsTemplate = module.Charts{
+ hypervVMInterfaceTrafficChartTmpl.Copy(),
+ hypervVMInterfacePacketsChartTmpl.Copy(),
+ hypervVMInterfacePacketsDroppedChartTmpl.Copy(),
+ }
+
+ hypervVMInterfaceTrafficChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_interface_%s_bytes",
+ Title: "VM interface traffic",
+ Units: "bytes/s",
+ Fam: "vm net",
+ Ctx: "hyperv.vm_interface_bytes",
+ Priority: prioHypervVMInterfaceBytes,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_interface_%s_bytes_received", Name: "received", Algo: module.Incremental},
+ {ID: "hyperv_vm_interface_%s_bytes_sent", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ hypervVMInterfacePacketsChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_interface_%s_packets",
+ Title: "VM interface packets",
+ Units: "packets/s",
+ Fam: "vm net",
+ Ctx: "hyperv.vm_interface_packets",
+ Priority: prioHypervVMInterfacePackets,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_interface_%s_packets_received", Name: "received", Algo: module.Incremental},
+ {ID: "hyperv_vm_interface_%s_packets_sent", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ hypervVMInterfacePacketsDroppedChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vm_interface_%s_packets_dropped",
+ Title: "VM interface packets dropped",
+ Units: "drops/s",
+ Fam: "vm net",
+ Ctx: "hyperv.vm_interface_packets_dropped",
+ Priority: prioHypervVMInterfacePacketsDropped,
+ Dims: module.Dims{
+ {ID: "hyperv_vm_interface_%s_packets_incoming_dropped", Name: "incoming", Algo: module.Incremental},
+ {ID: "hyperv_vm_interface_%s_packets_outgoing_dropped", Name: "outgoing", Algo: module.Incremental},
+ },
+ }
+)
+
+// HyperV Virtual Switch
+var (
+ hypervVswitchChartsTemplate = module.Charts{
+ hypervVswitchTrafficChartTmpl.Copy(),
+ hypervVswitchPacketsChartTmpl.Copy(),
+ hypervVswitchDirectedPacketsChartTmpl.Copy(),
+ hypervVswitchBroadcastPacketsChartTmpl.Copy(),
+ hypervVswitchMulticastPacketsChartTmpl.Copy(),
+ hypervVswitchDroppedPacketsChartTmpl.Copy(),
+ hypervVswitchExtensionDroppedPacketsChartTmpl.Copy(),
+ hypervVswitchPacketsFloodedTotalChartTmpl.Copy(),
+ hypervVswitchLearnedMACAddressChartTmpl.Copy(),
+ hypervVswitchPurgedMACAddressChartTmpl.Copy(),
+ }
+
+ hypervVswitchTrafficChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_traffic",
+ Title: "Virtual switch traffic",
+ Units: "bytes/s",
+ Fam: "vswitch traffic",
+ Ctx: "hyperv.vswitch_bytes",
+ Priority: prioHypervVswitchTrafficTotal,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_bytes_received_total", Name: "received", Algo: module.Incremental},
+ {ID: "hyperv_vswitch_%s_bytes_sent_total", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ hypervVswitchPacketsChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_packets",
+ Title: "Virtual switch packets",
+ Units: "packets/s",
+ Fam: "vswitch packets",
+ Ctx: "hyperv.vswitch_packets",
+ Priority: prioHypervVswitchPackets,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_packets_received_total", Name: "received", Algo: module.Incremental},
+ // FIXME: https://github.com/prometheus-community/windows_exporter/pull/1201
+ //{ID: "hyperv_vswitch_%s_packets_sent_total", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ hypervVswitchDirectedPacketsChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_directed_packets",
+ Title: "Virtual switch directed packets",
+ Units: "packets/s",
+ Fam: "vswitch packets",
+ Ctx: "hyperv.vswitch_directed_packets",
+ Priority: prioHypervVswitchDirectedPackets,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_directed_packets_received_total", Name: "received", Algo: module.Incremental},
+ {ID: "hyperv_vswitch_%s_directed_packets_send_total", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ hypervVswitchBroadcastPacketsChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_broadcast_packets",
+ Title: "Virtual switch broadcast packets",
+ Units: "packets/s",
+ Fam: "vswitch packets",
+ Ctx: "hyperv.vswitch_broadcast_packets",
+ Priority: prioHypervVswitchBroadcastPackets,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_broadcast_packets_received_total", Name: "received", Algo: module.Incremental},
+ {ID: "hyperv_vswitch_%s_broadcast_packets_sent_total", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ hypervVswitchMulticastPacketsChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_multicast_packets",
+ Title: "Virtual switch multicast packets",
+ Units: "packets/s",
+ Fam: "vswitch packets",
+ Ctx: "hyperv.vswitch_multicast_packets",
+ Priority: prioHypervVswitchMulticastPackets,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_multicast_packets_received_total", Name: "received", Algo: module.Incremental},
+ {ID: "hyperv_vswitch_%s_multicast_packets_sent_total", Name: "sent", Algo: module.Incremental},
+ },
+ }
+ hypervVswitchDroppedPacketsChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_dropped_packets",
+ Title: "Virtual switch dropped packets",
+ Units: "drops/s",
+ Fam: "vswitch drops",
+ Ctx: "hyperv.vswitch_dropped_packets",
+ Priority: prioHypervVswitchDroppedPackets,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_dropped_packets_incoming_total", Name: "incoming", Algo: module.Incremental},
+ {ID: "hyperv_vswitch_%s_dropped_packets_outcoming_total", Name: "outgoing", Algo: module.Incremental},
+ },
+ }
+ hypervVswitchExtensionDroppedPacketsChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_extensions_dropped_packets_incoming",
+ Title: "Virtual switch extensions dropped packets",
+ Units: "drops/s",
+ Fam: "vswitch drops",
+ Ctx: "hyperv.vswitch_extensions_dropped_packets",
+ Priority: prioHypervVswitchExtensionsDroppedPackets,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_extensions_dropped_packets_incoming_total", Name: "incoming", Algo: module.Incremental},
+ {ID: "hyperv_vswitch_%s_extensions_dropped_packets_outcoming_total", Name: "outgoing", Algo: module.Incremental},
+ },
+ }
+ hypervVswitchPacketsFloodedTotalChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_packets_flooded",
+ Title: "Virtual switch flooded packets",
+ Units: "packets/s",
+ Fam: "vswitch flood",
+ Ctx: "hyperv.vswitch_packets_flooded",
+ Priority: prioHypervVswitchPacketsFlooded,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_packets_flooded_total", Name: "flooded", Algo: module.Incremental},
+ },
+ }
+ hypervVswitchLearnedMACAddressChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_learned_mac_addresses",
+ Title: "Virtual switch learned MAC addresses",
+ Units: "mac addresses/s",
+ Fam: "vswitch mac addresses",
+ Ctx: "hyperv.vswitch_learned_mac_addresses",
+ Priority: prioHypervVswitchLearnedMACAddresses,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_learned_mac_addresses_total", Name: "learned", Algo: module.Incremental},
+ },
+ }
+ hypervVswitchPurgedMACAddressChartTmpl = module.Chart{
+ OverModule: "hyperv",
+ ID: "vswitch_%s_purged_mac_addresses",
+ Title: "Virtual switch purged MAC addresses",
+ Units: "mac addresses/s",
+ Fam: "vswitch mac addresses",
+ Ctx: "hyperv.vswitch_purged_mac_addresses",
+ Priority: prioHypervVswitchPurgeMACAddress,
+ Dims: module.Dims{
+ {ID: "hyperv_vswitch_%s_purged_mac_addresses_total", Name: "purged", Algo: module.Incremental},
+ },
+ }
+)
+
+// Collectors
+var (
+ collectorChartsTmpl = module.Charts{
+ collectorDurationChartTmpl.Copy(),
+ collectorStatusChartTmpl.Copy(),
+ }
+ collectorDurationChartTmpl = module.Chart{
+ ID: "collector_%s_duration",
+ Title: "Duration of a data collection",
+ Units: "seconds",
+ Fam: "collection",
+ Ctx: "windows.collector_duration",
+ Priority: prioCollectorDuration,
+ Dims: module.Dims{
+ {ID: "collector_%s_duration", Name: "duration", Div: precision},
+ },
+ }
+ collectorStatusChartTmpl = module.Chart{
+ ID: "collector_%s_status",
+ Title: "Status of a data collection",
+ Units: "status",
+ Fam: "collection",
+ Ctx: "windows.collector_status",
+ Priority: prioCollectorStatus,
+ Dims: module.Dims{
+ {ID: "collector_%s_status_success", Name: "success"},
+ {ID: "collector_%s_status_fail", Name: "fail"},
+ },
+ }
+)
+
+func (w *Windows) addCPUCharts() {
+ charts := cpuCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addCPUCoreCharts(core string) {
+ charts := cpuCoreChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, core)
+ chart.Labels = []module.Label{
+ {Key: "core", Value: core},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, core)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeCPUCoreCharts(core string) {
+ px := fmt.Sprintf("cpu_core_%s", core)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addMemoryCharts() {
+ charts := memCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addDiskCharts(disk string) {
+ charts := diskChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, disk)
+ chart.Labels = []module.Label{
+ {Key: "disk", Value: disk},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, disk)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeDiskCharts(disk string) {
+ px := fmt.Sprintf("logical_disk_%s", disk)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addNICCharts(nic string) {
+ charts := nicChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, nic)
+ chart.Labels = []module.Label{
+ {Key: "nic", Value: nic},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, nic)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeNICCharts(nic string) {
+ px := fmt.Sprintf("nic_%s", nic)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addTCPCharts() {
+ charts := tcpCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addOSCharts() {
+ charts := osCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addSystemCharts() {
+ charts := systemCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addLogonCharts() {
+ charts := logonCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addADFSCharts() {
+ charts := adfsCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addExchangeCharts() {
+ charts := exchangeCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addExchangeWorkloadCharts(name string) {
+ charts := exchangeWorkloadChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Labels = []module.Label{
+ {Key: "workload", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeExchangeWorkloadCharts(name string) {
+ px := fmt.Sprintf("exchange_workload_%s", name)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addExchangeLDAPCharts(name string) {
+ charts := exchangeLDAPChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Labels = []module.Label{
+ {Key: "ldap_process", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeExchangeLDAPCharts(name string) {
+ px := fmt.Sprintf("exchange_ldap_%s", name)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addExchangeHTTPProxyCharts(name string) {
+ charts := exchangeHTTPProxyChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Labels = []module.Label{
+ {Key: "http_proxy", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeExchangeHTTPProxyCharts(name string) {
+ px := fmt.Sprintf("exchange_http_proxy_%s", name)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addThermalZoneCharts(zone string) {
+ charts := thermalzoneChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, zone)
+ chart.Labels = []module.Label{
+ {Key: "thermalzone", Value: zone},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, zone)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeThermalZoneCharts(zone string) {
+ px := fmt.Sprintf("thermalzone_%s", zone)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addIISWebsiteCharts(website string) {
+ charts := iisWebsiteChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, website)
+ chart.Labels = []module.Label{
+ {Key: "website", Value: website},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, website)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeIIWebsiteSCharts(website string) {
+ px := fmt.Sprintf("iis_website_%s", website)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addMSSQLDBCharts(instance string, dbname string) {
+ charts := mssqlDatabaseChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, dbname, instance)
+ chart.Labels = []module.Label{
+ {Key: "mssql_instance", Value: instance},
+ {Key: "database", Value: dbname},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, dbname, instance)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeMSSQLDBCharts(instance string, dbname string) {
+ px := fmt.Sprintf("mssql_db_%s_instance_%s", dbname, instance)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addMSSQLInstanceCharts(instance string) {
+ charts := mssqlInstanceChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, instance)
+ chart.Labels = []module.Label{
+ {Key: "mssql_instance", Value: instance},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, instance)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeMSSQLInstanceCharts(instance string) {
+ px := fmt.Sprintf("mssql_instance_%s", instance)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addProcessesCharts() {
+ charts := processesCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addADCharts() {
+ charts := adCharts.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addCertificateTemplateCharts(template string) {
+ charts := adcsCertTemplateChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, template)
+ chart.Labels = []module.Label{
+ {Key: "cert_template", Value: template},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, template)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeCertificateTemplateCharts(template string) {
+ px := fmt.Sprintf("adcs_cert_template_%s", template)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addProcessToCharts(procID string) {
+ for _, chart := range *w.Charts() {
+ var dim *module.Dim
+ switch chart.ID {
+ case processesCPUUtilizationTotalChart.ID:
+ id := fmt.Sprintf("process_%s_cpu_time", procID)
+ dim = &module.Dim{ID: id, Name: procID, Algo: module.Incremental, Div: 1000, Mul: 100}
+ if procID == "Idle" {
+ dim.Hidden = true
+ }
+ case processesMemoryUsageChart.ID:
+ id := fmt.Sprintf("process_%s_working_set_private_bytes", procID)
+ dim = &module.Dim{ID: id, Name: procID}
+ case processesIOBytesChart.ID:
+ id := fmt.Sprintf("process_%s_io_bytes", procID)
+ dim = &module.Dim{ID: id, Name: procID, Algo: module.Incremental}
+ case processesIOOperationsChart.ID:
+ id := fmt.Sprintf("process_%s_io_operations", procID)
+ dim = &module.Dim{ID: id, Name: procID, Algo: module.Incremental}
+ case processesPageFaultsChart.ID:
+ id := fmt.Sprintf("process_%s_page_faults", procID)
+ dim = &module.Dim{ID: id, Name: procID, Algo: module.Incremental}
+ case processesPageFileBytes.ID:
+ id := fmt.Sprintf("process_%s_page_file_bytes", procID)
+ dim = &module.Dim{ID: id, Name: procID}
+ case processesThreads.ID:
+ id := fmt.Sprintf("process_%s_threads", procID)
+ dim = &module.Dim{ID: id, Name: procID}
+ case processesHandlesChart.ID:
+ id := fmt.Sprintf("process_%s_handles", procID)
+ dim = &module.Dim{ID: id, Name: procID}
+ default:
+ continue
+ }
+
+ if err := chart.AddDim(dim); err != nil {
+ w.Warning(err)
+ continue
+ }
+ chart.MarkNotCreated()
+ }
+}
+
+func (w *Windows) removeProcessFromCharts(procID string) {
+ for _, chart := range *w.Charts() {
+ var id string
+ switch chart.ID {
+ case processesCPUUtilizationTotalChart.ID:
+ id = fmt.Sprintf("process_%s_cpu_time", procID)
+ case processesMemoryUsageChart.ID:
+ id = fmt.Sprintf("process_%s_working_set_private_bytes", procID)
+ case processesIOBytesChart.ID:
+ id = fmt.Sprintf("process_%s_io_bytes", procID)
+ case processesIOOperationsChart.ID:
+ id = fmt.Sprintf("process_%s_io_operations", procID)
+ case processesPageFaultsChart.ID:
+ id = fmt.Sprintf("process_%s_page_faults", procID)
+ case processesPageFileBytes.ID:
+ id = fmt.Sprintf("process_%s_page_file_bytes", procID)
+ case processesThreads.ID:
+ id = fmt.Sprintf("process_%s_threads", procID)
+ case processesHandlesChart.ID:
+ id = fmt.Sprintf("process_%s_handles", procID)
+ default:
+ continue
+ }
+
+ if err := chart.MarkDimRemove(id, false); err != nil {
+ w.Warning(err)
+ continue
+ }
+ chart.MarkNotCreated()
+ }
+}
+
+func (w *Windows) addProcessNetFrameworkExceptionsCharts(procName string) {
+ charts := netFrameworkCLRExceptionsChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName))
+ chart.Labels = []module.Label{
+ {Key: "process", Value: procName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, procName)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeProcessFromNetFrameworkExceptionsCharts(procName string) {
+ px := fmt.Sprintf("netframework_%s_clrexception", strings.ToLower(procName))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addProcessNetFrameworkInteropCharts(procName string) {
+ charts := netFrameworkCLRInteropChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName))
+ chart.Labels = []module.Label{
+ {Key: "process", Value: procName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, procName)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeProcessNetFrameworkInteropCharts(procName string) {
+ px := fmt.Sprintf("netframework_%s_clrinterop", strings.ToLower(procName))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addProcessNetFrameworkJITCharts(procName string) {
+ charts := netFrameworkCLRJITChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName))
+ chart.Labels = []module.Label{
+ {Key: "process", Value: procName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, procName)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeProcessNetFrameworkJITCharts(procName string) {
+ px := fmt.Sprintf("netframework_%s_clrjit", strings.ToLower(procName))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addProcessNetFrameworkLoadingCharts(procName string) {
+ charts := netFrameworkCLRLoadingChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName))
+ chart.Labels = []module.Label{
+ {Key: "process", Value: procName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, procName)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeProcessNetFrameworkLoadingCharts(procName string) {
+ px := fmt.Sprintf("netframework_%s_clrloading", strings.ToLower(procName))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addProcessNetFrameworkLocksAndThreadsCharts(procName string) {
+ charts := netFrameworkCLRLocksAndThreadsChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName))
+ chart.Labels = []module.Label{
+ {Key: "process", Value: procName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, procName)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeProcessNetFrameworkLocksAndThreadsCharts(procName string) {
+ px := fmt.Sprintf("netframework_%s_clrlocksandthreads", strings.ToLower(procName))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addProcessNetFrameworkMemoryCharts(procName string) {
+ charts := netFrameworkCLRMemoryChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName))
+ chart.Labels = []module.Label{
+ {Key: "process", Value: procName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, procName)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeProcessNetFrameworkMemoryCharts(procName string) {
+ px := fmt.Sprintf("netframework_%s_clrmemory", strings.ToLower(procName))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addProcessNetFrameworkRemotingCharts(procName string) {
+ charts := netFrameworkCLRRemotingChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName))
+ chart.Labels = []module.Label{
+ {Key: "process", Value: procName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, procName)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeProcessNetFrameworkRemotingCharts(procName string) {
+ px := fmt.Sprintf("netframework_%s_clrremoting", strings.ToLower(procName))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addProcessNetFrameworkSecurityCharts(procName string) {
+ charts := netFrameworkCLRSecurityChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, strings.ToLower(procName))
+ chart.Labels = []module.Label{
+ {Key: "process", Value: procName},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, procName)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeProcessNetFrameworkSecurityCharts(procName string) {
+ px := fmt.Sprintf("netframework_%s_clrsecurity", strings.ToLower(procName))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addServiceCharts(svc string) {
+ charts := serviceChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, svc)
+ chart.Labels = []module.Label{
+ {Key: "service", Value: svc},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, svc)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeServiceCharts(svc string) {
+ px := fmt.Sprintf("service_%s", svc)
+ w.removeCharts(px)
+}
+
+func (w *Windows) addCollectorCharts(name string) {
+ charts := collectorChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Labels = []module.Label{
+ {Key: "collector", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addHypervCharts() {
+ charts := hypervChartsTmpl.Copy()
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) addHypervVMCharts(vm string) {
+ charts := hypervVMChartsTemplate.Copy()
+ n := hypervCleanName(vm)
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, n)
+ chart.Labels = []module.Label{
+ {Key: "vm_name", Value: vm},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, n)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeHypervVMCharts(vm string) {
+ px := fmt.Sprintf("vm_%s", hypervCleanName(vm))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addHypervVMDeviceCharts(device string) {
+ charts := hypervVMDeviceChartsTemplate.Copy()
+ n := hypervCleanName(device)
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, n)
+ chart.Labels = []module.Label{
+ {Key: "vm_device", Value: device},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, n)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeHypervVMDeviceCharts(device string) {
+ px := fmt.Sprintf("vm_device_%s", hypervCleanName(device))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addHypervVMInterfaceCharts(iface string) {
+ charts := hypervVMInterfaceChartsTemplate.Copy()
+ n := hypervCleanName(iface)
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, n)
+ chart.Labels = []module.Label{
+ {Key: "vm_interface", Value: iface},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, n)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeHypervVMInterfaceCharts(iface string) {
+ px := fmt.Sprintf("vm_interface_%s", hypervCleanName(iface))
+ w.removeCharts(px)
+}
+
+func (w *Windows) addHypervVSwitchCharts(vswitch string) {
+ charts := hypervVswitchChartsTemplate.Copy()
+ n := hypervCleanName(vswitch)
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, n)
+ chart.Labels = []module.Label{
+ {Key: "vswitch", Value: vswitch},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, n)
+ }
+ }
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *Windows) removeHypervVSwitchCharts(vswitch string) {
+ px := fmt.Sprintf("vswitch_%s", hypervCleanName(vswitch))
+ w.removeCharts(px)
+}
+
+func (w *Windows) removeCollectorCharts(name string) {
+ px := fmt.Sprintf("collector_%s", name)
+ w.removeCharts(px)
+}
+
+func (w *Windows) removeCharts(prefix string) {
+ for _, chart := range *w.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect.go b/src/go/plugin/go.d/modules/windows/collect.go
new file mode 100644
index 000000000..22421e221
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect.go
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const precision = 1000
+
+const (
+ collectorAD = "ad"
+ collectorADCS = "adcs"
+ collectorADFS = "adfs"
+ collectorCPU = "cpu"
+ collectorMemory = "memory"
+ collectorNet = "net"
+ collectorLogicalDisk = "logical_disk"
+ collectorOS = "os"
+ collectorSystem = "system"
+ collectorLogon = "logon"
+ collectorThermalZone = "thermalzone"
+ collectorTCP = "tcp"
+ collectorIIS = "iis"
+ collectorMSSQL = "mssql"
+ collectorProcess = "process"
+ collectorService = "service"
+ collectorNetFrameworkCLRExceptions = "netframework_clrexceptions"
+ collectorNetFrameworkCLRInterop = "netframework_clrinterop"
+ collectorNetFrameworkCLRJIT = "netframework_clrjit"
+ collectorNetFrameworkCLRLoading = "netframework_clrloading"
+ collectorNetFrameworkCLRLocksAndThreads = "netframework_clrlocksandthreads"
+ collectorNetFrameworkCLRMemory = "netframework_clrmemory"
+ collectorNetFrameworkCLRRemoting = "netframework_clrremoting"
+ collectorNetFrameworkCLRSecurity = "netframework_clrsecurity"
+ collectorExchange = "exchange"
+ collectorHyperv = "hyperv"
+)
+
+func (w *Windows) collect() (map[string]int64, error) {
+ pms, err := w.prom.ScrapeSeries()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+ w.collectMetrics(mx, pms)
+
+ if hasKey(mx, "os_visible_memory_bytes", "memory_available_bytes") {
+ mx["memory_used_bytes"] = 0 +
+ mx["os_visible_memory_bytes"] -
+ mx["memory_available_bytes"]
+ }
+ if hasKey(mx, "os_paging_limit_bytes", "os_paging_free_bytes") {
+ mx["os_paging_used_bytes"] = 0 +
+ mx["os_paging_limit_bytes"] -
+ mx["os_paging_free_bytes"]
+ }
+ if hasKey(mx, "os_visible_memory_bytes", "os_physical_memory_free_bytes") {
+ mx["os_visible_memory_used_bytes"] = 0 +
+ mx["os_visible_memory_bytes"] -
+ mx["os_physical_memory_free_bytes"]
+ }
+ if hasKey(mx, "memory_commit_limit", "memory_committed_bytes") {
+ mx["memory_not_committed_bytes"] = 0 +
+ mx["memory_commit_limit"] -
+ mx["memory_committed_bytes"]
+ }
+ if hasKey(mx, "memory_standby_cache_reserve_bytes", "memory_standby_cache_normal_priority_bytes", "memory_standby_cache_core_bytes") {
+ mx["memory_standby_cache_total"] = 0 +
+ mx["memory_standby_cache_reserve_bytes"] +
+ mx["memory_standby_cache_normal_priority_bytes"] +
+ mx["memory_standby_cache_core_bytes"]
+ }
+ if hasKey(mx, "memory_standby_cache_total", "memory_modified_page_list_bytes") {
+ mx["memory_cache_total"] = 0 +
+ mx["memory_standby_cache_total"] +
+ mx["memory_modified_page_list_bytes"]
+ }
+
+ return mx, nil
+}
+
+func (w *Windows) collectMetrics(mx map[string]int64, pms prometheus.Series) {
+ w.collectCollector(mx, pms)
+ for _, pm := range pms.FindByName(metricCollectorSuccess) {
+ if pm.Value == 0 {
+ continue
+ }
+
+ switch pm.Labels.Get("collector") {
+ case collectorCPU:
+ w.collectCPU(mx, pms)
+ case collectorMemory:
+ w.collectMemory(mx, pms)
+ case collectorNet:
+ w.collectNet(mx, pms)
+ case collectorLogicalDisk:
+ w.collectLogicalDisk(mx, pms)
+ case collectorOS:
+ w.collectOS(mx, pms)
+ case collectorSystem:
+ w.collectSystem(mx, pms)
+ case collectorLogon:
+ w.collectLogon(mx, pms)
+ case collectorThermalZone:
+ w.collectThermalzone(mx, pms)
+ case collectorTCP:
+ w.collectTCP(mx, pms)
+ case collectorProcess:
+ w.collectProcess(mx, pms)
+ case collectorService:
+ w.collectService(mx, pms)
+ case collectorIIS:
+ w.collectIIS(mx, pms)
+ case collectorMSSQL:
+ w.collectMSSQL(mx, pms)
+ case collectorAD:
+ w.collectAD(mx, pms)
+ case collectorADCS:
+ w.collectADCS(mx, pms)
+ case collectorADFS:
+ w.collectADFS(mx, pms)
+ case collectorNetFrameworkCLRExceptions:
+ w.collectNetFrameworkCLRExceptions(mx, pms)
+ case collectorNetFrameworkCLRInterop:
+ w.collectNetFrameworkCLRInterop(mx, pms)
+ case collectorNetFrameworkCLRJIT:
+ w.collectNetFrameworkCLRJIT(mx, pms)
+ case collectorNetFrameworkCLRLoading:
+ w.collectNetFrameworkCLRLoading(mx, pms)
+ case collectorNetFrameworkCLRLocksAndThreads:
+ w.collectNetFrameworkCLRLocksAndThreads(mx, pms)
+ case collectorNetFrameworkCLRMemory:
+ w.collectNetFrameworkCLRMemory(mx, pms)
+ case collectorNetFrameworkCLRRemoting:
+ w.collectNetFrameworkCLRRemoting(mx, pms)
+ case collectorNetFrameworkCLRSecurity:
+ w.collectNetFrameworkCLRSecurity(mx, pms)
+ case collectorExchange:
+ w.collectExchange(mx, pms)
+ case collectorHyperv:
+ w.collectHyperv(mx, pms)
+ }
+ }
+}
+
+func hasKey(mx map[string]int64, key string, keys ...string) bool {
+ _, ok := mx[key]
+ switch len(keys) {
+ case 0:
+ return ok
+ default:
+ return ok && hasKey(mx, keys[0], keys[1:]...)
+ }
+}
+
+func boolToInt(v bool) int64 {
+ if v {
+ return 1
+ }
+ return 0
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_ad.go b/src/go/plugin/go.d/modules/windows/collect_ad.go
new file mode 100644
index 000000000..5a99ce5c8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_ad.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+
+// Windows exporter:
+// https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md
+// Microsoft:
+// https://learn.microsoft.com/en-us/previous-versions/ms803980(v=msdn.10)
+const (
+ metricADATQAverageRequestLatency = "windows_ad_atq_average_request_latency"
+ metricADATQOutstandingRequests = "windows_ad_atq_outstanding_requests"
+ metricADDatabaseOperationsTotal = "windows_ad_database_operations_total"
+ metricADDirectoryOperationsTotal = "windows_ad_directory_operations_total"
+ metricADReplicationInboundObjectsFilteringTotal = "windows_ad_replication_inbound_objects_filtered_total"
+ metricADReplicationInboundPropertiesFilteredTotal = "windows_ad_replication_inbound_properties_filtered_total"
+ metricADReplicationInboundPropertiesUpdatedTotal = "windows_ad_replication_inbound_properties_updated_total"
+ metricADReplicationInboundSyncObjectsRemaining = "windows_ad_replication_inbound_sync_objects_remaining"
+ metricADReplicationDataInterSiteBytesTotal = "windows_ad_replication_data_intersite_bytes_total"
+ metricADReplicationDataIntraSiteBytesTotal = "windows_ad_replication_data_intrasite_bytes_total"
+ metricADReplicationPendingSyncs = "windows_ad_replication_pending_synchronizations"
+ metricADReplicationSyncRequestsTotal = "windows_ad_replication_sync_requests_total"
+ metricADDirectoryServiceThreads = "windows_ad_directory_service_threads"
+ metricADLDAPLastBindTimeSecondsTotal = "windows_ad_ldap_last_bind_time_seconds"
+ metricADBindsTotal = "windows_ad_binds_total"
+ metricADLDAPSearchesTotal = "windows_ad_ldap_searches_total"
+ metricADNameCacheLookupsTotal = "windows_ad_name_cache_lookups_total"
+ metricADNameCacheHitsTotal = "windows_ad_name_cache_hits_total"
+)
+
+func (w *Windows) collectAD(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorAD] {
+ w.cache.collection[collectorAD] = true
+ w.addADCharts()
+ }
+
+ if pm := pms.FindByName(metricADATQAverageRequestLatency); pm.Len() > 0 {
+ mx["ad_atq_average_request_latency"] = int64(pm.Max() * precision)
+ }
+ if pm := pms.FindByName(metricADATQOutstandingRequests); pm.Len() > 0 {
+ mx["ad_atq_outstanding_requests"] = int64(pm.Max())
+ }
+ for _, pm := range pms.FindByName(metricADDatabaseOperationsTotal) {
+ if op := pm.Labels.Get("operation"); op != "" {
+ mx["ad_database_operations_total_"+op] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricADDirectoryOperationsTotal) {
+ if op := pm.Labels.Get("operation"); op != "" {
+ mx["ad_directory_operations_total_"+op] += int64(pm.Value) // sum "origin"
+ }
+ }
+ if pm := pms.FindByName(metricADReplicationInboundObjectsFilteringTotal); pm.Len() > 0 {
+ mx["ad_replication_inbound_objects_filtered_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricADReplicationInboundPropertiesFilteredTotal); pm.Len() > 0 {
+ mx["ad_replication_inbound_properties_filtered_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricADReplicationInboundPropertiesUpdatedTotal); pm.Len() > 0 {
+ mx["ad_replication_inbound_properties_updated_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricADReplicationInboundSyncObjectsRemaining); pm.Len() > 0 {
+ mx["ad_replication_inbound_sync_objects_remaining"] = int64(pm.Max())
+ }
+ for _, pm := range pms.FindByName(metricADReplicationDataInterSiteBytesTotal) {
+ if name := pm.Labels.Get("direction"); name != "" {
+ mx["ad_replication_data_intersite_bytes_total_"+name] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricADReplicationDataIntraSiteBytesTotal) {
+ if name := pm.Labels.Get("direction"); name != "" {
+ mx["ad_replication_data_intrasite_bytes_total_"+name] = int64(pm.Value)
+ }
+ }
+ if pm := pms.FindByName(metricADReplicationPendingSyncs); pm.Len() > 0 {
+ mx["ad_replication_pending_synchronizations"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricADReplicationSyncRequestsTotal); pm.Len() > 0 {
+ mx["ad_replication_sync_requests_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricADDirectoryServiceThreads); pm.Len() > 0 {
+ mx["ad_directory_service_threads"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricADLDAPLastBindTimeSecondsTotal); pm.Len() > 0 {
+ mx["ad_ldap_last_bind_time_seconds"] = int64(pm.Max())
+ }
+ for _, pm := range pms.FindByName(metricADBindsTotal) {
+ mx["ad_binds_total"] += int64(pm.Value) // sum "bind_method"'s
+ }
+ if pm := pms.FindByName(metricADLDAPSearchesTotal); pm.Len() > 0 {
+ mx["ad_ldap_searches_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricADNameCacheLookupsTotal); pm.Len() > 0 {
+ mx["ad_name_cache_lookups_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricADNameCacheHitsTotal); pm.Len() > 0 {
+ mx["ad_name_cache_hits_total"] = int64(pm.Max())
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_adcs.go b/src/go/plugin/go.d/modules/windows/collect_adcs.go
new file mode 100644
index 000000000..115eddee5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_adcs.go
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricADCSRequestsTotal = "windows_adcs_requests_total"
+ metricADCSFailedRequestsTotal = "windows_adcs_failed_requests_total"
+ metricADCSIssuedRequestsTotal = "windows_adcs_issued_requests_total"
+ metricADCSPendingRequestsTotal = "windows_adcs_pending_requests_total"
+ metricADCSRequestProcessingTime = "windows_adcs_request_processing_time_seconds"
+ metricADCSRetrievalsTotal = "windows_adcs_retrievals_total"
+ metricADCSRetrievalsProcessingTime = "windows_adcs_retrievals_processing_time_seconds"
+ metricADCSRequestCryptoSigningTime = "windows_adcs_request_cryptographic_signing_time_seconds"
+ metricADCSRequestPolicyModuleProcessingTime = "windows_adcs_request_policy_module_processing_time_seconds"
+ metricADCSChallengeResponseResponsesTotal = "windows_adcs_challenge_responses_total"
+ metricADCSChallengeResponseProcessingTime = "windows_adcs_challenge_response_processing_time_seconds"
+ metricADCSSignedCertTimestampListsTotal = "windows_adcs_signed_certificate_timestamp_lists_total"
+ metricADCSSignedCertTimestampListProcessingTime = "windows_adcs_signed_certificate_timestamp_list_processing_time_seconds"
+)
+
+func (w *Windows) collectADCS(mx map[string]int64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricADCSRequestsTotal,
+ metricADCSFailedRequestsTotal,
+ metricADCSIssuedRequestsTotal,
+ metricADCSPendingRequestsTotal,
+ metricADCSRequestProcessingTime,
+ metricADCSRetrievalsTotal,
+ metricADCSRetrievalsProcessingTime,
+ metricADCSRequestCryptoSigningTime,
+ metricADCSRequestPolicyModuleProcessingTime,
+ metricADCSChallengeResponseResponsesTotal,
+ metricADCSChallengeResponseProcessingTime,
+ metricADCSSignedCertTimestampListsTotal,
+ metricADCSSignedCertTimestampListProcessingTime,
+ )
+
+ seen := make(map[string]bool)
+
+ for _, pm := range pms {
+ if tmpl := pm.Labels.Get("cert_template"); tmpl != "" && tmpl != "_Total" {
+ seen[tmpl] = true
+ name := strings.TrimPrefix(pm.Name(), "windows_adcs_")
+ v := pm.Value
+ if strings.HasSuffix(pm.Name(), "_seconds") {
+ v *= precision
+ }
+ mx["adcs_cert_template_"+tmpl+"_"+name] += int64(v)
+ }
+ }
+
+ for template := range seen {
+ if !w.cache.adcs[template] {
+ w.cache.adcs[template] = true
+ w.addCertificateTemplateCharts(template)
+ }
+ }
+ for template := range w.cache.adcs {
+ if !seen[template] {
+ delete(w.cache.adcs, template)
+ w.removeCertificateTemplateCharts(template)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_adfs.go b/src/go/plugin/go.d/modules/windows/collect_adfs.go
new file mode 100644
index 000000000..1802a609a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_adfs.go
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricADFSADLoginConnectionFailuresTotal = "windows_adfs_ad_login_connection_failures_total"
+ metricADFSCertificateAuthenticationsTotal = "windows_adfs_certificate_authentications_total"
+ metricADFSDBArtifactFailureTotal = "windows_adfs_db_artifact_failure_total"
+ metricADFSDBArtifactQueryTimeSeconds = "windows_adfs_db_artifact_query_time_seconds_total"
+ metricADFSDBConfigFailureTotal = "windows_adfs_db_config_failure_total"
+ metricADFSDBQueryTimeSecondsTotal = "windows_adfs_db_config_query_time_seconds_total"
+ metricADFSDeviceAuthenticationsTotal = "windows_adfs_device_authentications_total"
+ metricADFSExternalAuthenticationsFailureTotal = "windows_adfs_external_authentications_failure_total"
+ metricADFSExternalAuthenticationsSuccessTotal = "windows_adfs_external_authentications_success_total"
+ metricADFSExtranetAccountLockoutsTotal = "windows_adfs_extranet_account_lockouts_total"
+ metricADFSFederatedAuthenticationsTotal = "windows_adfs_federated_authentications_total"
+ metricADFSFederationMetadataRequestsTotal = "windows_adfs_federation_metadata_requests_total"
+
+ metricADFSOauthAuthorizationRequestsTotal = "windows_adfs_oauth_authorization_requests_total"
+ metricADFSOauthClientAuthenticationFailureTotal = "windows_adfs_oauth_client_authentication_failure_total"
+ metricADFSOauthClientAuthenticationSuccessTotal = "windows_adfs_oauth_client_authentication_success_total"
+ metricADFSOauthClientCredentialsFailureTotal = "windows_adfs_oauth_client_credentials_failure_total"
+ metricADFSOauthClientCredentialsSuccessTotal = "windows_adfs_oauth_client_credentials_success_total"
+ metricADFSOauthClientPrivKeyJTWAuthenticationFailureTotal = "windows_adfs_oauth_client_privkey_jtw_authentication_failure_total"
+ metricADFSOauthClientPrivKeyJWTAuthenticationSuccessTotal = "windows_adfs_oauth_client_privkey_jwt_authentications_success_total"
+ metricADFSOauthClientSecretBasicAuthenticationsFailureTotal = "windows_adfs_oauth_client_secret_basic_authentications_failure_total"
+ metricADFSADFSOauthClientSecretBasicAuthenticationsSuccessTotal = "windows_adfs_oauth_client_secret_basic_authentications_success_total"
+ metricADFSOauthClientSecretPostAuthenticationsFailureTotal = "windows_adfs_oauth_client_secret_post_authentications_failure_total"
+ metricADFSOauthClientSecretPostAuthenticationsSuccessTotal = "windows_adfs_oauth_client_secret_post_authentications_success_total"
+ metricADFSOauthClientWindowsAuthenticationsFailureTotal = "windows_adfs_oauth_client_windows_authentications_failure_total"
+ metricADFSOauthClientWindowsAuthenticationsSuccessTotal = "windows_adfs_oauth_client_windows_authentications_success_total"
+ metricADFSOauthLogonCertificateRequestsFailureTotal = "windows_adfs_oauth_logon_certificate_requests_failure_total"
+ metricADFSOauthLogonCertificateTokenRequestsSuccessTotal = "windows_adfs_oauth_logon_certificate_token_requests_success_total"
+ metricADFSOauthPasswordGrantRequestsFailureTotal = "windows_adfs_oauth_password_grant_requests_failure_total"
+ metricADFSOauthPasswordGrantRequestsSuccessTotal = "windows_adfs_oauth_password_grant_requests_success_total"
+ metricADFSOauthTokenRequestsSuccessTotal = "windows_adfs_oauth_token_requests_success_total"
+
+ metricADFSPassiveRequestsTotal = "windows_adfs_passive_requests_total"
+ metricADFSPasswortAuthenticationsTotal = "windows_adfs_passport_authentications_total"
+ metricADFSPasswordChangeFailedTotal = "windows_adfs_password_change_failed_total"
+ metricADFSWPasswordChangeSucceededTotal = "windows_adfs_password_change_succeeded_total"
+ metricADFSSamlpTokenRequestsSuccessTotal = "windows_adfs_samlp_token_requests_success_total"
+ metricADFSSSOAuthenticationsFailureTotal = "windows_adfs_sso_authentications_failure_total"
+ metricADFSSSOAuthenticationsSuccessTotal = "windows_adfs_sso_authentications_success_total"
+ metricADFSTokenRequestsTotal = "windows_adfs_token_requests_total"
+ metricADFSUserPasswordAuthenticationsFailureTotal = "windows_adfs_userpassword_authentications_failure_total"
+ metricADFSUserPasswordAuthenticationsSuccessTotal = "windows_adfs_userpassword_authentications_success_total"
+ metricADFSWindowsIntegratedAuthenticationsTotal = "windows_adfs_windows_integrated_authentications_total"
+ metricADFSWSFedTokenRequestsSuccessTotal = "windows_adfs_wsfed_token_requests_success_total"
+ metricADFSWSTrustTokenRequestsSuccessTotal = "windows_adfs_wstrust_token_requests_success_total"
+)
+
+var adfsMetrics = []string{
+ metricADFSADLoginConnectionFailuresTotal,
+ metricADFSCertificateAuthenticationsTotal,
+ metricADFSDBArtifactFailureTotal,
+ metricADFSDBArtifactQueryTimeSeconds,
+ metricADFSDBConfigFailureTotal,
+ metricADFSDBQueryTimeSecondsTotal,
+ metricADFSDeviceAuthenticationsTotal,
+ metricADFSExternalAuthenticationsFailureTotal,
+ metricADFSExternalAuthenticationsSuccessTotal,
+ metricADFSExtranetAccountLockoutsTotal,
+ metricADFSFederatedAuthenticationsTotal,
+ metricADFSFederationMetadataRequestsTotal,
+ metricADFSOauthAuthorizationRequestsTotal,
+ metricADFSOauthClientAuthenticationFailureTotal,
+ metricADFSOauthClientAuthenticationSuccessTotal,
+ metricADFSOauthClientCredentialsFailureTotal,
+ metricADFSOauthClientCredentialsSuccessTotal,
+ metricADFSOauthClientPrivKeyJTWAuthenticationFailureTotal,
+ metricADFSOauthClientPrivKeyJWTAuthenticationSuccessTotal,
+ metricADFSOauthClientSecretBasicAuthenticationsFailureTotal,
+ metricADFSADFSOauthClientSecretBasicAuthenticationsSuccessTotal,
+ metricADFSOauthClientSecretPostAuthenticationsFailureTotal,
+ metricADFSOauthClientSecretPostAuthenticationsSuccessTotal,
+ metricADFSOauthClientWindowsAuthenticationsFailureTotal,
+ metricADFSOauthClientWindowsAuthenticationsSuccessTotal,
+ metricADFSOauthLogonCertificateRequestsFailureTotal,
+ metricADFSOauthLogonCertificateTokenRequestsSuccessTotal,
+ metricADFSOauthPasswordGrantRequestsFailureTotal,
+ metricADFSOauthPasswordGrantRequestsSuccessTotal,
+ metricADFSOauthTokenRequestsSuccessTotal,
+ metricADFSPassiveRequestsTotal,
+ metricADFSPasswortAuthenticationsTotal,
+ metricADFSPasswordChangeFailedTotal,
+ metricADFSWPasswordChangeSucceededTotal,
+ metricADFSSamlpTokenRequestsSuccessTotal,
+ metricADFSSSOAuthenticationsFailureTotal,
+ metricADFSSSOAuthenticationsSuccessTotal,
+ metricADFSTokenRequestsTotal,
+ metricADFSUserPasswordAuthenticationsFailureTotal,
+ metricADFSUserPasswordAuthenticationsSuccessTotal,
+ metricADFSWindowsIntegratedAuthenticationsTotal,
+ metricADFSWSFedTokenRequestsSuccessTotal,
+ metricADFSWSTrustTokenRequestsSuccessTotal,
+}
+
+func (w *Windows) collectADFS(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorADFS] {
+ w.cache.collection[collectorADFS] = true
+ w.addADFSCharts()
+ }
+
+ for _, pm := range pms.FindByNames(adfsMetrics...) {
+ name := strings.TrimPrefix(pm.Name(), "windows_")
+ v := pm.Value
+ if strings.HasSuffix(name, "_seconds_total") {
+ v *= precision
+ }
+ mx[name] = int64(v)
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_collector.go b/src/go/plugin/go.d/modules/windows/collect_collector.go
new file mode 100644
index 000000000..f182b9af5
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_collector.go
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricCollectorDuration = "windows_exporter_collector_duration_seconds"
+ metricCollectorSuccess = "windows_exporter_collector_success"
+)
+
+func (w *Windows) collectCollector(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ px := "collector_"
+ for _, pm := range pms.FindByName(metricCollectorDuration) {
+ if name := pm.Labels.Get("collector"); name != "" {
+ seen[name] = true
+ mx[px+name+"_duration"] = int64(pm.Value * precision)
+ }
+ }
+ for _, pm := range pms.FindByName(metricCollectorSuccess) {
+ if name := pm.Labels.Get("collector"); name != "" {
+ seen[name] = true
+ if pm.Value == 1 {
+ mx[px+name+"_status_success"], mx[px+name+"_status_fail"] = 1, 0
+ } else {
+ mx[px+name+"_status_success"], mx[px+name+"_status_fail"] = 0, 1
+ }
+ }
+ }
+
+ for name := range seen {
+ if !w.cache.collectors[name] {
+ w.cache.collectors[name] = true
+ w.addCollectorCharts(name)
+ }
+ }
+ for name := range w.cache.collectors {
+ if !seen[name] {
+ delete(w.cache.collectors, name)
+ w.removeCollectorCharts(name)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_cpu.go b/src/go/plugin/go.d/modules/windows/collect_cpu.go
new file mode 100644
index 000000000..6a324e5ef
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_cpu.go
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricCPUTimeTotal = "windows_cpu_time_total"
+ metricCPUInterruptsTotal = "windows_cpu_interrupts_total"
+ metricCPUDPCsTotal = "windows_cpu_dpcs_total"
+ metricCPUCStateTotal = "windows_cpu_cstate_seconds_total"
+)
+
+func (w *Windows) collectCPU(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorCPU] {
+ w.cache.collection[collectorCPU] = true
+ w.addCPUCharts()
+ }
+
+ seen := make(map[string]bool)
+ for _, pm := range pms.FindByName(metricCPUTimeTotal) {
+ core := pm.Labels.Get("core")
+ mode := pm.Labels.Get("mode")
+ if core == "" || mode == "" {
+ continue
+ }
+
+ seen[core] = true
+ mx["cpu_"+mode+"_time"] += int64(pm.Value * precision)
+ mx["cpu_core_"+core+"_"+mode+"_time"] += int64(pm.Value * precision)
+ }
+
+ for _, pm := range pms.FindByName(metricCPUInterruptsTotal) {
+ core := pm.Labels.Get("core")
+ if core == "" {
+ continue
+ }
+
+ seen[core] = true
+ mx["cpu_core_"+core+"_interrupts"] += int64(pm.Value)
+ }
+
+ for _, pm := range pms.FindByName(metricCPUDPCsTotal) {
+ core := pm.Labels.Get("core")
+ if core == "" {
+ continue
+ }
+
+ seen[core] = true
+ mx["cpu_core_"+core+"_dpcs"] += int64(pm.Value)
+ }
+
+ for _, pm := range pms.FindByName(metricCPUCStateTotal) {
+ core := pm.Labels.Get("core")
+ state := pm.Labels.Get("state")
+ if core == "" || state == "" {
+ continue
+ }
+
+ seen[core] = true
+ mx["cpu_core_"+core+"_cstate_"+state] += int64(pm.Value * precision)
+ }
+
+ for core := range seen {
+ if !w.cache.cores[core] {
+ w.cache.cores[core] = true
+ w.addCPUCoreCharts(core)
+ }
+ }
+ for core := range w.cache.cores {
+ if !seen[core] {
+ delete(w.cache.cores, core)
+ w.removeCPUCoreCharts(core)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_exchange.go b/src/go/plugin/go.d/modules/windows/collect_exchange.go
new file mode 100644
index 000000000..bbbbfd533
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_exchange.go
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricExchangeActiveSyncPingCmdsPending = "windows_exchange_activesync_ping_cmds_pending"
+ metricExchangeActiveSyncRequestsTotal = "windows_exchange_activesync_requests_total"
+ metricExchangeActiveSyncCMDsTotal = "windows_exchange_activesync_sync_cmds_total"
+ metricExchangeAutoDiscoverRequestsTotal = "windows_exchange_autodiscover_requests_total"
+ metricExchangeAvailServiceRequestsPerSec = "windows_exchange_avail_service_requests_per_sec"
+ metricExchangeOWACurrentUniqueUsers = "windows_exchange_owa_current_unique_users"
+ metricExchangeOWARequestsTotal = "windows_exchange_owa_requests_total"
+ metricExchangeRPCActiveUserCount = "windows_exchange_rpc_active_user_count"
+ metricExchangeRPCAvgLatencySec = "windows_exchange_rpc_avg_latency_sec"
+ metricExchangeRPCConnectionCount = "windows_exchange_rpc_connection_count"
+ metricExchangeRPCOperationsTotal = "windows_exchange_rpc_operations_total"
+ metricExchangeRPCRequests = "windows_exchange_rpc_requests"
+ metricExchangeRPCUserCount = "windows_exchange_rpc_user_count"
+
+ metricExchangeTransportQueuesActiveMailboxDelivery = "windows_exchange_transport_queues_active_mailbox_delivery"
+ metricExchangeTransportQueuesExternalActiveRemoteDelivery = "windows_exchange_transport_queues_external_active_remote_delivery"
+ metricExchangeTransportQueuesExternalLargestDelivery = "windows_exchange_transport_queues_external_largest_delivery"
+ metricExchangeTransportQueuesInternalActiveRemoteDelivery = "windows_exchange_transport_queues_internal_active_remote_delivery"
+ metricExchangeTransportQueuesInternalLargestDelivery = "windows_exchange_transport_queues_internal_largest_delivery"
+ metricExchangeTransportQueuesPoison = "windows_exchange_transport_queues_poison"
+ metricExchangeTransportQueuesRetryMailboxDelivery = "windows_exchange_transport_queues_retry_mailbox_delivery"
+ metricExchangeTransportQueuesUnreachable = "windows_exchange_transport_queues_unreachable"
+
+ metricExchangeWorkloadActiveTasks = "windows_exchange_workload_active_tasks"
+ metricExchangeWorkloadCompletedTasks = "windows_exchange_workload_completed_tasks"
+ metricExchangeWorkloadQueuedTasks = "windows_exchange_workload_queued_tasks"
+ metricExchangeWorkloadYieldedTasks = "windows_exchange_workload_yielded_tasks"
+ metricExchangeWorkloadIsActive = "windows_exchange_workload_is_active"
+
+ metricExchangeLDAPLongRunningOPSPerSec = "windows_exchange_ldap_long_running_ops_per_sec"
+ metricExchangeLDAPReadTimeSec = "windows_exchange_ldap_read_time_sec"
+ metricExchangeLDAPSearchTmeSec = "windows_exchange_ldap_search_time_sec"
+ metricExchangeLDAPWriteTimeSec = "windows_exchange_ldap_write_time_sec"
+ metricExchangeLDAPTimeoutErrorsTotal = "windows_exchange_ldap_timeout_errors_total"
+
+ metricExchangeHTTPProxyAvgAuthLatency = "windows_exchange_http_proxy_avg_auth_latency"
+ metricExchangeHTTPProxyAvgCASProcessingLatencySec = "windows_exchange_http_proxy_avg_cas_proccessing_latency_sec"
+ metricExchangeHTTPProxyMailboxProxyFailureRate = "windows_exchange_http_proxy_mailbox_proxy_failure_rate"
+ metricExchangeHTTPProxyMailboxServerLocatorAvgLatencySec = "windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec"
+ metricExchangeHTTPProxyOutstandingProxyRequests = "windows_exchange_http_proxy_outstanding_proxy_requests"
+ metricExchangeHTTPProxyRequestsTotal = "windows_exchange_http_proxy_requests_total"
+)
+
+func (w *Windows) collectExchange(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorExchange] {
+ w.cache.collection[collectorExchange] = true
+ w.addExchangeCharts()
+ }
+
+ if pm := pms.FindByName(metricExchangeActiveSyncPingCmdsPending); pm.Len() > 0 {
+ mx["exchange_activesync_ping_cmds_pending"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeActiveSyncRequestsTotal); pm.Len() > 0 {
+ mx["exchange_activesync_requests_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeActiveSyncCMDsTotal); pm.Len() > 0 {
+ mx["exchange_activesync_sync_cmds_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeAutoDiscoverRequestsTotal); pm.Len() > 0 {
+ mx["exchange_autodiscover_requests_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeAvailServiceRequestsPerSec); pm.Len() > 0 {
+ mx["exchange_avail_service_requests_per_sec"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeOWACurrentUniqueUsers); pm.Len() > 0 {
+ mx["exchange_owa_current_unique_users"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeOWARequestsTotal); pm.Len() > 0 {
+ mx["exchange_owa_requests_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeRPCActiveUserCount); pm.Len() > 0 {
+ mx["exchange_rpc_active_user_count"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeRPCAvgLatencySec); pm.Len() > 0 {
+ mx["exchange_rpc_avg_latency_sec"] = int64(pm.Max() * precision)
+ }
+ if pm := pms.FindByName(metricExchangeRPCConnectionCount); pm.Len() > 0 {
+ mx["exchange_rpc_connection_count"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeRPCOperationsTotal); pm.Len() > 0 {
+ mx["exchange_rpc_operations_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeRPCRequests); pm.Len() > 0 {
+ mx["exchange_rpc_requests"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricExchangeRPCUserCount); pm.Len() > 0 {
+ mx["exchange_rpc_user_count"] = int64(pm.Max())
+ }
+
+ w.collectExchangeAddTransportQueueMetric(mx, pms)
+ w.collectExchangeAddWorkloadMetric(mx, pms)
+ w.collectExchangeAddLDAPMetric(mx, pms)
+ w.collectExchangeAddHTTPProxyMetric(mx, pms)
+}
+
+func (w *Windows) collectExchangeAddTransportQueueMetric(mx map[string]int64, pms prometheus.Series) {
+ pms = pms.FindByNames(
+ metricExchangeTransportQueuesActiveMailboxDelivery,
+ metricExchangeTransportQueuesExternalActiveRemoteDelivery,
+ metricExchangeTransportQueuesExternalLargestDelivery,
+ metricExchangeTransportQueuesInternalActiveRemoteDelivery,
+ metricExchangeTransportQueuesInternalLargestDelivery,
+ metricExchangeTransportQueuesPoison,
+ metricExchangeTransportQueuesRetryMailboxDelivery,
+ metricExchangeTransportQueuesUnreachable,
+ )
+
+ for _, pm := range pms {
+ if name := pm.Labels.Get("name"); name != "" && name != "total_excluding_priority_none" {
+ metric := strings.TrimPrefix(pm.Name(), "windows_")
+ mx[metric+"_"+name] += int64(pm.Value)
+ }
+ }
+}
+
+func (w *Windows) collectExchangeAddWorkloadMetric(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByNames(
+ metricExchangeWorkloadActiveTasks,
+ metricExchangeWorkloadCompletedTasks,
+ metricExchangeWorkloadQueuedTasks,
+ metricExchangeWorkloadYieldedTasks,
+ ) {
+ if name := pm.Labels.Get("name"); name != "" {
+ seen[name] = true
+ metric := strings.TrimPrefix(pm.Name(), "windows_exchange_workload_")
+ mx["exchange_workload_"+name+"_"+metric] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricExchangeWorkloadIsActive) {
+ if name := pm.Labels.Get("name"); name != "" {
+ seen[name] = true
+ mx["exchange_workload_"+name+"_is_active"] += boolToInt(pm.Value == 1)
+ mx["exchange_workload_"+name+"_is_paused"] += boolToInt(pm.Value == 0)
+ }
+ }
+
+ for name := range seen {
+ if !w.cache.exchangeWorkload[name] {
+ w.cache.exchangeWorkload[name] = true
+ w.addExchangeWorkloadCharts(name)
+ }
+ }
+ for name := range w.cache.exchangeWorkload {
+ if !seen[name] {
+ delete(w.cache.exchangeWorkload, name)
+ w.removeExchangeWorkloadCharts(name)
+ }
+ }
+}
+
+func (w *Windows) collectExchangeAddLDAPMetric(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByNames(
+ metricExchangeLDAPLongRunningOPSPerSec,
+ metricExchangeLDAPTimeoutErrorsTotal,
+ ) {
+ if name := pm.Labels.Get("name"); name != "" {
+ seen[name] = true
+ metric := strings.TrimPrefix(pm.Name(), "windows_exchange_ldap_")
+ mx["exchange_ldap_"+name+"_"+metric] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByNames(
+ metricExchangeLDAPReadTimeSec,
+ metricExchangeLDAPSearchTmeSec,
+ metricExchangeLDAPWriteTimeSec,
+ ) {
+ if name := pm.Labels.Get("name"); name != "" {
+ seen[name] = true
+ metric := strings.TrimPrefix(pm.Name(), "windows_exchange_ldap_")
+ mx["exchange_ldap_"+name+"_"+metric] += int64(pm.Value * precision)
+ }
+ }
+
+ for name := range seen {
+ if !w.cache.exchangeLDAP[name] {
+ w.cache.exchangeLDAP[name] = true
+ w.addExchangeLDAPCharts(name)
+ }
+ }
+ for name := range w.cache.exchangeLDAP {
+ if !seen[name] {
+ delete(w.cache.exchangeLDAP, name)
+ w.removeExchangeLDAPCharts(name)
+ }
+ }
+}
+
+func (w *Windows) collectExchangeAddHTTPProxyMetric(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByNames(
+ metricExchangeHTTPProxyAvgAuthLatency,
+ metricExchangeHTTPProxyOutstandingProxyRequests,
+ metricExchangeHTTPProxyRequestsTotal,
+ ) {
+ if name := pm.Labels.Get("name"); name != "" {
+ seen[name] = true
+ metric := strings.TrimPrefix(pm.Name(), "windows_exchange_http_proxy_")
+ mx["exchange_http_proxy_"+name+"_"+metric] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByNames(
+ metricExchangeHTTPProxyAvgCASProcessingLatencySec,
+ metricExchangeHTTPProxyMailboxProxyFailureRate,
+ metricExchangeHTTPProxyMailboxServerLocatorAvgLatencySec,
+ ) {
+ if name := pm.Labels.Get("name"); name != "" {
+ seen[name] = true
+ metric := strings.TrimPrefix(pm.Name(), "windows_exchange_http_proxy_")
+ mx["exchange_http_proxy_"+name+"_"+metric] += int64(pm.Value * precision)
+ }
+ }
+
+ for name := range seen {
+ if !w.cache.exchangeHTTPProxy[name] {
+ w.cache.exchangeHTTPProxy[name] = true
+ w.addExchangeHTTPProxyCharts(name)
+ }
+ }
+ for name := range w.cache.exchangeHTTPProxy {
+ if !seen[name] {
+ delete(w.cache.exchangeHTTPProxy, name)
+ w.removeExchangeHTTPProxyCharts(name)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_hyperv.go b/src/go/plugin/go.d/modules/windows/collect_hyperv.go
new file mode 100644
index 000000000..f7cf2c60a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_hyperv.go
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricHypervHealthCritical = "windows_hyperv_health_critical"
+ metricHypervHealthOK = "windows_hyperv_health_ok"
+
+ metricHypervRootPartition4KGPAPages = "windows_hyperv_root_partition_4K_gpa_pages"
+ metricHypervRootPartition2MGPAPages = "windows_hyperv_root_partition_2M_gpa_pages"
+ metricHypervRootPartition1GGPAPages = "windows_hyperv_root_partition_1G_gpa_pages"
+ metricHypervRootPartition4KDevicePages = "windows_hyperv_root_partition_4K_device_pages"
+ metricHypervRootPartition2MDevicePages = "windows_hyperv_root_partition_2M_device_pages"
+ metricHypervRootPartition1GDevicePages = "windows_hyperv_root_partition_1G_device_pages"
+ metricHypervRootPartitionGPASpaceModifications = "windows_hyperv_root_partition_gpa_space_modifications"
+ metricHypervRootPartitionAttachedDevices = "windows_hyperv_root_partition_attached_devices"
+ metricHypervRootPartitionDepositedPages = "windows_hyperv_root_partition_deposited_pages"
+ metricHypervRootPartitionPhysicalPagesAllocated = "windows_hyperv_root_partition_physical_pages_allocated" // SkippedTimerTicks
+ metricHypervRootPartitionDeviceDMAErrors = "windows_hyperv_root_partition_device_dma_errors"
+ metricHypervRootPartitionDeviceInterruptErrors = "windows_hyperv_root_partition_device_interrupt_errors"
+ metricHypervRootPartitionDeviceInterruptThrottleEvents = "windows_hyperv_root_partition_device_interrupt_throttle_events"
+ metricHypervRootPartitionIOTLBFlush = "windows_hyperv_root_partition_io_tlb_flush"
+ metricHypervRootPartitionAddressSpace = "windows_hyperv_root_partition_address_spaces"
+ metricHypervRootPartitionVirtualTLBPages = "windows_hyperv_root_partition_virtual_tlb_pages"
+ metricHypervRootPartitionVirtualTLBFlushEntries = "windows_hyperv_root_partition_virtual_tlb_flush_entires"
+
+ metricsHypervVMCPUGuestRunTime = "windows_hyperv_vm_cpu_guest_run_time"
+ metricsHypervVMCPUHypervisorRunTime = "windows_hyperv_vm_cpu_hypervisor_run_time"
+ metricsHypervVMCPURemoteRunTime = "windows_hyperv_vm_cpu_remote_run_time"
+ metricsHypervVMCPUTotalRunTime = "windows_hyperv_vm_cpu_total_run_time"
+
+ metricHypervVMMemoryPhysical = "windows_hyperv_vm_memory_physical"
+ metricHypervVMMemoryPhysicalGuestVisible = "windows_hyperv_vm_memory_physical_guest_visible"
+ metricHypervVMMemoryPressureCurrent = "windows_hyperv_vm_memory_pressure_current"
+ metricHyperVVIDPhysicalPagesAllocated = "windows_hyperv_vid_physical_pages_allocated"
+ metricHyperVVIDRemotePhysicalPages = "windows_hyperv_vid_remote_physical_pages"
+
+ metricHypervVMDeviceBytesRead = "windows_hyperv_vm_device_bytes_read"
+ metricHypervVMDeviceBytesWritten = "windows_hyperv_vm_device_bytes_written"
+ metricHypervVMDeviceOperationsRead = "windows_hyperv_vm_device_operations_read"
+ metricHypervVMDeviceOperationsWritten = "windows_hyperv_vm_device_operations_written"
+ metricHypervVMDeviceErrorCount = "windows_hyperv_vm_device_error_count"
+
+ metricHypervVMInterfaceBytesReceived = "windows_hyperv_vm_interface_bytes_received"
+ metricHypervVMInterfaceBytesSent = "windows_hyperv_vm_interface_bytes_sent"
+ metricHypervVMInterfacePacketsIncomingDropped = "windows_hyperv_vm_interface_packets_incoming_dropped"
+ metricHypervVMInterfacePacketsOutgoingDropped = "windows_hyperv_vm_interface_packets_outgoing_dropped"
+ metricHypervVMInterfacePacketsReceived = "windows_hyperv_vm_interface_packets_received"
+ metricHypervVMInterfacePacketsSent = "windows_hyperv_vm_interface_packets_sent"
+
+ metricHypervVSwitchBroadcastPacketsReceivedTotal = "windows_hyperv_vswitch_broadcast_packets_received_total"
+ metricHypervVSwitchBroadcastPacketsSentTotal = "windows_hyperv_vswitch_broadcast_packets_sent_total"
+ metricHypervVSwitchBytesReceivedTotal = "windows_hyperv_vswitch_bytes_received_total"
+ metricHypervVSwitchBytesSentTotal = "windows_hyperv_vswitch_bytes_sent_total"
+ metricHypervVSwitchPacketsReceivedTotal = "windows_hyperv_vswitch_packets_received_total"
+ metricHypervVSwitchPacketsSentTotal = "windows_hyperv_vswitch_packets_sent_total"
+ metricHypervVSwitchDirectedPacketsReceivedTotal = "windows_hyperv_vswitch_directed_packets_received_total"
+ metricHypervVSwitchDirectedPacketsSendTotal = "windows_hyperv_vswitch_directed_packets_send_total"
+ metricHypervVSwitchDroppedPacketsIncomingTotal = "windows_hyperv_vswitch_dropped_packets_incoming_total"
+ metricHypervVSwitchDroppedPacketsOutcomingTotal = "windows_hyperv_vswitch_dropped_packets_outcoming_total"
+ metricHypervVSwitchExtensionDroppedAttacksIncomingTotal = "windows_hyperv_vswitch_extensions_dropped_packets_incoming_total"
+ metricHypervVSwitchExtensionDroppedPacketsOutcomingTotal = "windows_hyperv_vswitch_extensions_dropped_packets_outcoming_total"
+ metricHypervVSwitchLearnedMACAddressTotal = "windows_hyperv_vswitch_learned_mac_addresses_total"
+ metricHypervVSwitchMulticastPacketsReceivedTotal = "windows_hyperv_vswitch_multicast_packets_received_total"
+ metricHypervVSwitchMulticastPacketsSentTotal = "windows_hyperv_vswitch_multicast_packets_sent_total"
+ metricHypervVSwitchNumberOfSendChannelMovesTotal = "windows_hyperv_vswitch_number_of_send_channel_moves_total"
+ metricHypervVSwitchNumberOfVMQMovesTotal = "windows_hyperv_vswitch_number_of_vmq_moves_total"
+ metricHypervVSwitchPacketsFloodedTotal = "windows_hyperv_vswitch_packets_flooded_total"
+ metricHypervVSwitchPurgedMACAddresses = "windows_hyperv_vswitch_purged_mac_addresses_total"
+)
+
+func (w *Windows) collectHyperv(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorHyperv] {
+ w.cache.collection[collectorHyperv] = true
+ w.addHypervCharts()
+ }
+
+ for _, v := range []string{
+ metricHypervHealthOK,
+ metricHypervHealthCritical,
+ metricHypervRootPartition4KGPAPages,
+ metricHypervRootPartition2MGPAPages,
+ metricHypervRootPartition1GGPAPages,
+ metricHypervRootPartition4KDevicePages,
+ metricHypervRootPartition2MDevicePages,
+ metricHypervRootPartition1GDevicePages,
+ metricHypervRootPartitionGPASpaceModifications,
+ metricHypervRootPartitionAddressSpace,
+ metricHypervRootPartitionAttachedDevices,
+ metricHypervRootPartitionDepositedPages,
+ metricHypervRootPartitionPhysicalPagesAllocated,
+ metricHypervRootPartitionDeviceDMAErrors,
+ metricHypervRootPartitionDeviceInterruptErrors,
+ metricHypervRootPartitionDeviceInterruptThrottleEvents,
+ metricHypervRootPartitionIOTLBFlush,
+ metricHypervRootPartitionVirtualTLBPages,
+ metricHypervRootPartitionVirtualTLBFlushEntries,
+ } {
+ for _, pm := range pms.FindByName(v) {
+ name := strings.TrimPrefix(pm.Name(), "windows_")
+ mx[name] = int64(pm.Value)
+ }
+ }
+
+ w.collectHypervVM(mx, pms)
+ w.collectHypervVMDevices(mx, pms)
+ w.collectHypervVMInterface(mx, pms)
+ w.collectHypervVSwitch(mx, pms)
+}
+
+func (w *Windows) collectHypervVM(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ px := "hyperv_vm_"
+
+ for _, v := range []string{
+ metricHypervVMMemoryPhysical,
+ metricHypervVMMemoryPhysicalGuestVisible,
+ metricHypervVMMemoryPressureCurrent,
+ metricsHypervVMCPUGuestRunTime,
+ metricsHypervVMCPUHypervisorRunTime,
+ metricsHypervVMCPURemoteRunTime,
+ } {
+ for _, pm := range pms.FindByName(v) {
+ if vm := pm.Labels.Get("vm"); vm != "" {
+ name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vm")
+ seen[vm] = true
+ mx[px+hypervCleanName(vm)+name] += int64(pm.Value)
+ }
+ }
+ }
+
+ px = "hyperv_vid_"
+ for _, v := range []string{
+ metricHyperVVIDPhysicalPagesAllocated,
+ metricHyperVVIDRemotePhysicalPages,
+ } {
+ for _, pm := range pms.FindByName(v) {
+ if vm := pm.Labels.Get("vm"); vm != "" {
+ name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vid")
+ seen[vm] = true
+ mx[px+hypervCleanName(vm)+name] = int64(pm.Value)
+ }
+ }
+ }
+
+ for v := range seen {
+ if !w.cache.hypervVMMem[v] {
+ w.cache.hypervVMMem[v] = true
+ w.addHypervVMCharts(v)
+ }
+ }
+ for v := range w.cache.hypervVMMem {
+ if !seen[v] {
+ delete(w.cache.hypervVMMem, v)
+ w.removeHypervVMCharts(v)
+ }
+ }
+}
+
+func (w *Windows) collectHypervVMDevices(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ px := "hyperv_vm_device_"
+
+ for _, v := range []string{
+ metricHypervVMDeviceBytesRead,
+ metricHypervVMDeviceBytesWritten,
+ metricHypervVMDeviceOperationsRead,
+ metricHypervVMDeviceOperationsWritten,
+ metricHypervVMDeviceErrorCount,
+ } {
+ for _, pm := range pms.FindByName(v) {
+ if device := pm.Labels.Get("vm_device"); device != "" {
+ name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vm_device")
+ seen[device] = true
+ mx[px+hypervCleanName(device)+name] = int64(pm.Value)
+ }
+ }
+ }
+
+ for v := range seen {
+ if !w.cache.hypervVMDevices[v] {
+ w.cache.hypervVMDevices[v] = true
+ w.addHypervVMDeviceCharts(v)
+ }
+ }
+ for v := range w.cache.hypervVMDevices {
+ if !seen[v] {
+ delete(w.cache.hypervVMDevices, v)
+ w.removeHypervVMDeviceCharts(v)
+ }
+ }
+}
+
+func (w *Windows) collectHypervVMInterface(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ px := "hyperv_vm_interface_"
+
+ for _, v := range []string{
+ metricHypervVMInterfaceBytesReceived,
+ metricHypervVMInterfaceBytesSent,
+ metricHypervVMInterfacePacketsIncomingDropped,
+ metricHypervVMInterfacePacketsOutgoingDropped,
+ metricHypervVMInterfacePacketsReceived,
+ metricHypervVMInterfacePacketsSent,
+ } {
+ for _, pm := range pms.FindByName(v) {
+ if iface := pm.Labels.Get("vm_interface"); iface != "" {
+ name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vm_interface")
+ seen[iface] = true
+ mx[px+hypervCleanName(iface)+name] = int64(pm.Value)
+ }
+ }
+ }
+
+ for v := range seen {
+ if !w.cache.hypervVMInterfaces[v] {
+ w.cache.hypervVMInterfaces[v] = true
+ w.addHypervVMInterfaceCharts(v)
+ }
+ }
+ for v := range w.cache.hypervVMInterfaces {
+ if !seen[v] {
+ delete(w.cache.hypervVMInterfaces, v)
+ w.removeHypervVMInterfaceCharts(v)
+ }
+ }
+}
+
+func (w *Windows) collectHypervVSwitch(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ px := "hyperv_vswitch_"
+
+ for _, v := range []string{
+ metricHypervVSwitchBytesReceivedTotal,
+ metricHypervVSwitchBytesSentTotal,
+ metricHypervVSwitchPacketsReceivedTotal,
+ metricHypervVSwitchPacketsSentTotal,
+ metricHypervVSwitchDirectedPacketsReceivedTotal,
+ metricHypervVSwitchDirectedPacketsSendTotal,
+ metricHypervVSwitchBroadcastPacketsReceivedTotal,
+ metricHypervVSwitchBroadcastPacketsSentTotal,
+ metricHypervVSwitchMulticastPacketsReceivedTotal,
+ metricHypervVSwitchMulticastPacketsSentTotal,
+ metricHypervVSwitchDroppedPacketsIncomingTotal,
+ metricHypervVSwitchDroppedPacketsOutcomingTotal,
+ metricHypervVSwitchExtensionDroppedAttacksIncomingTotal,
+ metricHypervVSwitchExtensionDroppedPacketsOutcomingTotal,
+ metricHypervVSwitchPacketsFloodedTotal,
+ metricHypervVSwitchLearnedMACAddressTotal,
+ metricHypervVSwitchPurgedMACAddresses,
+ metricHypervVSwitchNumberOfSendChannelMovesTotal,
+ metricHypervVSwitchNumberOfVMQMovesTotal,
+ } {
+ for _, pm := range pms.FindByName(v) {
+ if vswitch := pm.Labels.Get("vswitch"); vswitch != "" {
+ name := strings.TrimPrefix(pm.Name(), "windows_hyperv_vswitch")
+ seen[vswitch] = true
+ mx[px+hypervCleanName(vswitch)+name] = int64(pm.Value)
+ }
+ }
+ }
+
+ for v := range seen {
+ if !w.cache.hypervVswitch[v] {
+ w.cache.hypervVswitch[v] = true
+ w.addHypervVSwitchCharts(v)
+ }
+ }
+ for v := range w.cache.hypervVswitch {
+ if !seen[v] {
+ delete(w.cache.hypervVswitch, v)
+ w.removeHypervVSwitchCharts(v)
+ }
+ }
+}
+
+var hypervNameReplacer = strings.NewReplacer(" ", "_", "?", "_", ":", "_", ".", "_")
+
+func hypervCleanName(name string) string {
+ name = hypervNameReplacer.Replace(name)
+ return strings.ToLower(name)
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_iis.go b/src/go/plugin/go.d/modules/windows/collect_iis.go
new file mode 100644
index 000000000..5218e64e1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_iis.go
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricIISCurrentAnonymousUsers = "windows_iis_current_anonymous_users"
+ metricIISCurrentNonAnonymousUsers = "windows_iis_current_non_anonymous_users"
+ metricIISCurrentConnections = "windows_iis_current_connections"
+ metricIICurrentISAPIExtRequests = "windows_iis_current_isapi_extension_requests"
+ metricIISUptime = "windows_iis_service_uptime"
+
+ metricIISReceivedBytesTotal = "windows_iis_received_bytes_total"
+ metricIISSentBytesTotal = "windows_iis_sent_bytes_total"
+ metricIISRequestsTotal = "windows_iis_requests_total"
+ metricIISIPAPIExtRequestsTotal = "windows_iis_ipapi_extension_requests_total"
+ metricIISConnAttemptsAllInstancesTotal = "windows_iis_connection_attempts_all_instances_total"
+ metricIISFilesReceivedTotal = "windows_iis_files_received_total"
+ metricIISFilesSentTotal = "windows_iis_files_sent_total"
+ metricIISLogonAttemptsTotal = "windows_iis_logon_attempts_total"
+ metricIISLockedErrorsTotal = "windows_iis_locked_errors_total"
+ metricIISNotFoundErrorsTotal = "windows_iis_not_found_errors_total"
+)
+
+func (w *Windows) collectIIS(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ px := "iis_website_"
+ for _, pm := range pms.FindByName(metricIISCurrentAnonymousUsers) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_current_anonymous_users"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISCurrentNonAnonymousUsers) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_current_non_anonymous_users"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISCurrentConnections) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_current_connections"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIICurrentISAPIExtRequests) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_current_isapi_extension_requests"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISUptime) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_service_uptime"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISReceivedBytesTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_received_bytes_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISSentBytesTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_sent_bytes_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISRequestsTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_requests_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISConnAttemptsAllInstancesTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_connection_attempts_all_instances_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISFilesReceivedTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_files_received_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISFilesSentTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_files_sent_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISIPAPIExtRequestsTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_isapi_extension_requests_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISLogonAttemptsTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_logon_attempts_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISLockedErrorsTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_locked_errors_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricIISNotFoundErrorsTotal) {
+ if name := cleanWebsiteName(pm.Labels.Get("site")); name != "" {
+ seen[name] = true
+ mx[px+name+"_not_found_errors_total"] += int64(pm.Value)
+ }
+ }
+
+ for site := range seen {
+ if !w.cache.iis[site] {
+ w.cache.iis[site] = true
+ w.addIISWebsiteCharts(site)
+ }
+ }
+ for site := range w.cache.iis {
+ if !seen[site] {
+ delete(w.cache.iis, site)
+ w.removeIIWebsiteSCharts(site)
+ }
+ }
+}
+
+func cleanWebsiteName(name string) string {
+ return strings.ReplaceAll(name, " ", "_")
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_logical_disk.go b/src/go/plugin/go.d/modules/windows/collect_logical_disk.go
new file mode 100644
index 000000000..0db52f8cd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_logical_disk.go
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricLDReadBytesTotal = "windows_logical_disk_read_bytes_total"
+ metricLDWriteBytesTotal = "windows_logical_disk_write_bytes_total"
+ metricLDReadsTotal = "windows_logical_disk_reads_total"
+ metricLDWritesTotal = "windows_logical_disk_writes_total"
+ metricLDSizeBytes = "windows_logical_disk_size_bytes"
+ metricLDFreeBytes = "windows_logical_disk_free_bytes"
+ metricLDReadLatencyTotal = "windows_logical_disk_read_latency_seconds_total"
+ metricLDWriteLatencyTotal = "windows_logical_disk_write_latency_seconds_total"
+)
+
+func (w *Windows) collectLogicalDisk(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ px := "logical_disk_"
+ for _, pm := range pms.FindByName(metricLDReadBytesTotal) {
+ vol := pm.Labels.Get("volume")
+ if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") {
+ seen[vol] = true
+ mx[px+vol+"_read_bytes_total"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricLDWriteBytesTotal) {
+ vol := pm.Labels.Get("volume")
+ if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") {
+ seen[vol] = true
+ mx[px+vol+"_write_bytes_total"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricLDReadsTotal) {
+ vol := pm.Labels.Get("volume")
+ if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") {
+ seen[vol] = true
+ mx[px+vol+"_reads_total"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricLDWritesTotal) {
+ vol := pm.Labels.Get("volume")
+ if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") {
+ seen[vol] = true
+ mx[px+vol+"_writes_total"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricLDSizeBytes) {
+ vol := pm.Labels.Get("volume")
+ if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") {
+ seen[vol] = true
+ mx[px+vol+"_total_space"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricLDFreeBytes) {
+ vol := pm.Labels.Get("volume")
+ if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") {
+ seen[vol] = true
+ mx[px+vol+"_free_space"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricLDReadLatencyTotal) {
+ vol := pm.Labels.Get("volume")
+ if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") {
+ seen[vol] = true
+ mx[px+vol+"_read_latency"] = int64(pm.Value * precision)
+ }
+ }
+ for _, pm := range pms.FindByName(metricLDWriteLatencyTotal) {
+ vol := pm.Labels.Get("volume")
+ if vol != "" && !strings.HasPrefix(vol, "HarddiskVolume") {
+ seen[vol] = true
+ mx[px+vol+"_write_latency"] = int64(pm.Value * precision)
+ }
+ }
+
+ for disk := range seen {
+ if !w.cache.volumes[disk] {
+ w.cache.volumes[disk] = true
+ w.addDiskCharts(disk)
+ }
+ mx[px+disk+"_used_space"] = mx[px+disk+"_total_space"] - mx[px+disk+"_free_space"]
+ }
+ for disk := range w.cache.volumes {
+ if !seen[disk] {
+ delete(w.cache.volumes, disk)
+ w.removeDiskCharts(disk)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_logon.go b/src/go/plugin/go.d/modules/windows/collect_logon.go
new file mode 100644
index 000000000..7db0024ca
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_logon.go
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricLogonType = "windows_logon_logon_type"
+)
+
+func (w *Windows) collectLogon(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorLogon] {
+ w.cache.collection[collectorLogon] = true
+ w.addLogonCharts()
+ }
+
+ for _, pm := range pms.FindByName(metricLogonType) {
+ if v := pm.Labels.Get("status"); v != "" {
+ mx["logon_type_"+v+"_sessions"] = int64(pm.Value)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_memory.go b/src/go/plugin/go.d/modules/windows/collect_memory.go
new file mode 100644
index 000000000..36123e4dd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_memory.go
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricMemAvailBytes = "windows_memory_available_bytes"
+ metricMemCacheFaultsTotal = "windows_memory_cache_faults_total"
+ metricMemCommitLimit = "windows_memory_commit_limit"
+ metricMemCommittedBytes = "windows_memory_committed_bytes"
+ metricMemModifiedPageListBytes = "windows_memory_modified_page_list_bytes"
+ metricMemPageFaultsTotal = "windows_memory_page_faults_total"
+ metricMemSwapPageReadsTotal = "windows_memory_swap_page_reads_total"
+ metricMemSwapPagesReadTotal = "windows_memory_swap_pages_read_total"
+ metricMemSwapPagesWrittenTotal = "windows_memory_swap_pages_written_total"
+ metricMemSwapPageWritesTotal = "windows_memory_swap_page_writes_total"
+ metricMemPoolNonPagedBytesTotal = "windows_memory_pool_nonpaged_bytes"
+ metricMemPoolPagedBytes = "windows_memory_pool_paged_bytes"
+ metricMemStandbyCacheCoreBytes = "windows_memory_standby_cache_core_bytes"
+ metricMemStandbyCacheNormalPriorityBytes = "windows_memory_standby_cache_normal_priority_bytes"
+ metricMemStandbyCacheReserveBytes = "windows_memory_standby_cache_reserve_bytes"
+)
+
+func (w *Windows) collectMemory(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorMemory] {
+ w.cache.collection[collectorMemory] = true
+ w.addMemoryCharts()
+ }
+
+ if pm := pms.FindByName(metricMemAvailBytes); pm.Len() > 0 {
+ mx["memory_available_bytes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemCacheFaultsTotal); pm.Len() > 0 {
+ mx["memory_cache_faults_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemCommitLimit); pm.Len() > 0 {
+ mx["memory_commit_limit"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemCommittedBytes); pm.Len() > 0 {
+ mx["memory_committed_bytes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemModifiedPageListBytes); pm.Len() > 0 {
+ mx["memory_modified_page_list_bytes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemPageFaultsTotal); pm.Len() > 0 {
+ mx["memory_page_faults_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemSwapPageReadsTotal); pm.Len() > 0 {
+ mx["memory_swap_page_reads_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemSwapPagesReadTotal); pm.Len() > 0 {
+ mx["memory_swap_pages_read_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemSwapPagesWrittenTotal); pm.Len() > 0 {
+ mx["memory_swap_pages_written_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemSwapPageWritesTotal); pm.Len() > 0 {
+ mx["memory_swap_page_writes_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemPoolNonPagedBytesTotal); pm.Len() > 0 {
+ mx["memory_pool_nonpaged_bytes_total"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemPoolPagedBytes); pm.Len() > 0 {
+ mx["memory_pool_paged_bytes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemStandbyCacheCoreBytes); pm.Len() > 0 {
+ mx["memory_standby_cache_core_bytes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemStandbyCacheNormalPriorityBytes); pm.Len() > 0 {
+ mx["memory_standby_cache_normal_priority_bytes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricMemStandbyCacheReserveBytes); pm.Len() > 0 {
+ mx["memory_standby_cache_reserve_bytes"] = int64(pm.Max())
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_mssql.go b/src/go/plugin/go.d/modules/windows/collect_mssql.go
new file mode 100644
index 000000000..2a6078f28
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_mssql.go
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricMSSQLAccessMethodPageSplits = "windows_mssql_accessmethods_page_splits"
+ metricMSSQLBufferCacheHits = "windows_mssql_bufman_buffer_cache_hits"
+ metricMSSQLBufferCacheLookups = "windows_mssql_bufman_buffer_cache_lookups"
+ metricMSSQLBufferCheckpointPages = "windows_mssql_bufman_checkpoint_pages"
+ metricMSSQLBufferPageLifeExpectancy = "windows_mssql_bufman_page_life_expectancy_seconds"
+ metricMSSQLBufferPageReads = "windows_mssql_bufman_page_reads"
+ metricMSSQLBufferPageWrites = "windows_mssql_bufman_page_writes"
+ metricMSSQLBlockedProcesses = "windows_mssql_genstats_blocked_processes"
+ metricMSSQLUserConnections = "windows_mssql_genstats_user_connections"
+ metricMSSQLLockWait = "windows_mssql_locks_lock_wait_seconds"
+ metricMSSQLDeadlocks = "windows_mssql_locks_deadlocks"
+ metricMSSQLConnectionMemoryBytes = "windows_mssql_memmgr_connection_memory_bytes"
+ metricMSSQLExternalBenefitOfMemory = "windows_mssql_memmgr_external_benefit_of_memory"
+ metricMSSQLPendingMemoryGrants = "windows_mssql_memmgr_pending_memory_grants"
+ metricMSSQLSQLErrorsTotal = "windows_mssql_sql_errors_total"
+ metricMSSQLTotalServerMemory = "windows_mssql_memmgr_total_server_memory_bytes"
+ metricMSSQLStatsAutoParameterization = "windows_mssql_sqlstats_auto_parameterization_attempts"
+ metricMSSQLStatsBatchRequests = "windows_mssql_sqlstats_batch_requests"
+ metricMSSQLStatSafeAutoParameterization = "windows_mssql_sqlstats_safe_auto_parameterization_attempts"
+ metricMSSQLCompilations = "windows_mssql_sqlstats_sql_compilations"
+ metricMSSQLRecompilations = "windows_mssql_sqlstats_sql_recompilations"
+
+ metricMSSQLDatabaseActiveTransactions = "windows_mssql_databases_active_transactions"
+ metricMSSQLDatabaseBackupRestoreOperations = "windows_mssql_databases_backup_restore_operations"
+ metricMSSQLDatabaseDataFileSize = "windows_mssql_databases_data_files_size_bytes"
+ metricMSSQLDatabaseLogFlushed = "windows_mssql_databases_log_flushed_bytes"
+ metricMSSQLDatabaseLogFlushes = "windows_mssql_databases_log_flushes"
+ metricMSSQLDatabaseTransactions = "windows_mssql_databases_transactions"
+ metricMSSQLDatabaseWriteTransactions = "windows_mssql_databases_write_transactions"
+)
+
+func (w *Windows) collectMSSQL(mx map[string]int64, pms prometheus.Series) {
+ instances := make(map[string]bool)
+ dbs := make(map[string]bool)
+ px := "mssql_instance_"
+ for _, pm := range pms.FindByName(metricMSSQLAccessMethodPageSplits) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_accessmethods_page_splits"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLBufferCacheHits) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_bufman_buffer_cache_hits"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLBufferCacheLookups) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" && pm.Value > 0 {
+ instances[name] = true
+ mx[px+name+"_cache_hit_ratio"] = int64(float64(mx[px+name+"_bufman_buffer_cache_hits"]) / pm.Value * 100)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLBufferCheckpointPages) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_bufman_checkpoint_pages"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLBufferPageLifeExpectancy) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_bufman_page_life_expectancy_seconds"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLBufferPageReads) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_bufman_page_reads"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLBufferPageWrites) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_bufman_page_writes"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLBlockedProcesses) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_genstats_blocked_processes"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLUserConnections) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_genstats_user_connections"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLLockWait) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ if res := pm.Labels.Get("resource"); res != "" {
+ mx[px+name+"_resource_"+res+"_locks_lock_wait_seconds"] = int64(pm.Value)
+ }
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLDeadlocks) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ if res := pm.Labels.Get("resource"); res != "" {
+ mx[px+name+"_resource_"+res+"_locks_deadlocks"] = int64(pm.Value)
+ }
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLConnectionMemoryBytes) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_memmgr_connection_memory_bytes"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLExternalBenefitOfMemory) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_memmgr_external_benefit_of_memory"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLPendingMemoryGrants) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_memmgr_pending_memory_grants"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLSQLErrorsTotal) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ if res := pm.Labels.Get("resource"); res != "" && res != "_Total" {
+ dim := mssqlParseResource(res)
+ mx[px+name+"_sql_errors_total_"+dim] = int64(pm.Value)
+ }
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLTotalServerMemory) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_memmgr_total_server_memory_bytes"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLStatsAutoParameterization) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_sqlstats_auto_parameterization_attempts"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLStatsBatchRequests) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_sqlstats_batch_requests"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLStatSafeAutoParameterization) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_sqlstats_safe_auto_parameterization_attempts"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLCompilations) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_sqlstats_sql_compilations"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLRecompilations) {
+ if name := pm.Labels.Get("mssql_instance"); name != "" {
+ instances[name] = true
+ mx[px+name+"_sqlstats_sql_recompilations"] = int64(pm.Value)
+ }
+ }
+
+ px = "mssql_db_"
+ for _, pm := range pms.FindByName(metricMSSQLDatabaseActiveTransactions) {
+ if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" {
+ instances[name], dbs[name+":"+db] = true, true
+ mx[px+db+"_instance_"+name+"_active_transactions"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLDatabaseBackupRestoreOperations) {
+ if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" {
+ instances[name], dbs[name+":"+db] = true, true
+ mx[px+db+"_instance_"+name+"_backup_restore_operations"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLDatabaseDataFileSize) {
+ if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" {
+ instances[name], dbs[name+":"+db] = true, true
+ mx[px+db+"_instance_"+name+"_data_files_size_bytes"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLDatabaseLogFlushed) {
+ if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" {
+ instances[name], dbs[name+":"+db] = true, true
+ mx[px+db+"_instance_"+name+"_log_flushed_bytes"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLDatabaseLogFlushes) {
+ if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" {
+ instances[name], dbs[name+":"+db] = true, true
+ mx[px+db+"_instance_"+name+"_log_flushes"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLDatabaseTransactions) {
+ if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" {
+ instances[name], dbs[name+":"+db] = true, true
+ mx[px+db+"_instance_"+name+"_transactions"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricMSSQLDatabaseWriteTransactions) {
+ if name, db := pm.Labels.Get("mssql_instance"), pm.Labels.Get("database"); name != "" && db != "" {
+ instances[name], dbs[name+":"+db] = true, true
+ mx[px+db+"_instance_"+name+"_write_transactions"] = int64(pm.Value)
+ }
+ }
+
+ for v := range instances {
+ if !w.cache.mssqlInstances[v] {
+ w.cache.mssqlInstances[v] = true
+ w.addMSSQLInstanceCharts(v)
+ }
+ }
+ for v := range w.cache.mssqlInstances {
+ if !instances[v] {
+ delete(w.cache.mssqlInstances, v)
+ w.removeMSSQLInstanceCharts(v)
+ }
+ }
+
+ for v := range dbs {
+ if !w.cache.mssqlDBs[v] {
+ w.cache.mssqlDBs[v] = true
+ if s := strings.Split(v, ":"); len(s) == 2 {
+ w.addMSSQLDBCharts(s[0], s[1])
+ }
+ }
+ }
+ for v := range w.cache.mssqlDBs {
+ if !dbs[v] {
+ delete(w.cache.mssqlDBs, v)
+ if s := strings.Split(v, ":"); len(s) == 2 {
+ w.removeMSSQLDBCharts(s[0], s[1])
+ }
+ }
+ }
+}
+
+func mssqlParseResource(name string) string {
+ name = strings.ReplaceAll(name, " ", "_")
+ return strings.ToLower(name)
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_net.go b/src/go/plugin/go.d/modules/windows/collect_net.go
new file mode 100644
index 000000000..4fe5dd7d6
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_net.go
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricNetBytesReceivedTotal = "windows_net_bytes_received_total"
+ metricNetBytesSentTotal = "windows_net_bytes_sent_total"
+ metricNetPacketsReceivedTotal = "windows_net_packets_received_total"
+ metricNetPacketsSentTotal = "windows_net_packets_sent_total"
+ metricNetPacketsReceivedDiscardedTotal = "windows_net_packets_received_discarded_total"
+ metricNetPacketsOutboundDiscardedTotal = "windows_net_packets_outbound_discarded_total"
+ metricNetPacketsReceivedErrorsTotal = "windows_net_packets_received_errors_total"
+ metricNetPacketsOutboundErrorsTotal = "windows_net_packets_outbound_errors_total"
+)
+
+func (w *Windows) collectNet(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ px := "net_nic_"
+ for _, pm := range pms.FindByName(metricNetBytesReceivedTotal) {
+ if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" {
+ seen[nic] = true
+ mx[px+nic+"_bytes_received"] += int64(pm.Value * 8)
+ }
+ }
+ for _, pm := range pms.FindByName(metricNetBytesSentTotal) {
+ if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" {
+ seen[nic] = true
+ mx[px+nic+"_bytes_sent"] += int64(pm.Value * 8)
+ }
+ }
+ for _, pm := range pms.FindByName(metricNetPacketsReceivedTotal) {
+ if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" {
+ seen[nic] = true
+ mx[px+nic+"_packets_received_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricNetPacketsSentTotal) {
+ if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" {
+ seen[nic] = true
+ mx[px+nic+"_packets_sent_total"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricNetPacketsReceivedDiscardedTotal) {
+ if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" {
+ seen[nic] = true
+ mx[px+nic+"_packets_received_discarded"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricNetPacketsOutboundDiscardedTotal) {
+ if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" {
+ seen[nic] = true
+ mx[px+nic+"_packets_outbound_discarded"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricNetPacketsReceivedErrorsTotal) {
+ if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" {
+ seen[nic] = true
+ mx[px+nic+"_packets_received_errors"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricNetPacketsOutboundErrorsTotal) {
+ if nic := cleanNICID(pm.Labels.Get("nic")); nic != "" {
+ seen[nic] = true
+ mx[px+nic+"_packets_outbound_errors"] += int64(pm.Value)
+ }
+ }
+
+ for nic := range seen {
+ if !w.cache.nics[nic] {
+ w.cache.nics[nic] = true
+ w.addNICCharts(nic)
+ }
+ }
+ for nic := range w.cache.nics {
+ if !seen[nic] {
+ delete(w.cache.nics, nic)
+ w.removeNICCharts(nic)
+ }
+ }
+}
+
+func cleanNICID(id string) string {
+ return strings.Replace(id, "__", "_", -1)
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_netframework.go b/src/go/plugin/go.d/modules/windows/collect_netframework.go
new file mode 100644
index 000000000..aab9364d3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_netframework.go
@@ -0,0 +1,531 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ netframeworkPrefix = "netframework_"
+)
+
+const (
+ metricNetFrameworkCLRExceptionsThrownTotal = "windows_netframework_clrexceptions_exceptions_thrown_total"
+ metricNetFrameworkCLRExceptionsFiltersTotal = "windows_netframework_clrexceptions_exceptions_filters_total"
+ metricNetFrameworkCLRExceptionsFinallysTotal = "windows_netframework_clrexceptions_exceptions_finallys_total"
+ metricNetFrameworkCLRExceptionsThrowCatchDepthTotal = "windows_netframework_clrexceptions_throw_to_catch_depth_total"
+)
+
+func (w *Windows) collectNetFrameworkCLRExceptions(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRExceptionsThrownTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrexception_thrown_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRExceptionsFiltersTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrexception_filters_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRExceptionsFinallysTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrexception_finallys_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRExceptionsThrowCatchDepthTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrexception_throw_to_catch_depth_total"] += int64(pm.Value)
+ }
+ }
+
+ for proc := range seen {
+ if !w.cache.netFrameworkCLRExceptions[proc] {
+ w.cache.netFrameworkCLRExceptions[proc] = true
+ w.addProcessNetFrameworkExceptionsCharts(proc)
+ }
+ }
+
+ for proc := range w.cache.netFrameworkCLRExceptions {
+ if !seen[proc] {
+ delete(w.cache.netFrameworkCLRExceptions, proc)
+ w.removeProcessFromNetFrameworkExceptionsCharts(proc)
+ }
+ }
+}
+
+const (
+ metricNetFrameworkCLRInteropComCallableWrappersTotal = "windows_netframework_clrinterop_com_callable_wrappers_total"
+ metricNetFrameworkCLRInteropMarshallingTotal = "windows_netframework_clrinterop_interop_marshalling_total"
+ metricNetFrameworkCLRInteropStubsCreatedTotal = "windows_netframework_clrinterop_interop_stubs_created_total"
+)
+
+func (w *Windows) collectNetFrameworkCLRInterop(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRInteropComCallableWrappersTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrinterop_com_callable_wrappers_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRInteropMarshallingTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrinterop_interop_marshalling_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRInteropStubsCreatedTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrinterop_interop_stubs_created_total"] += int64(pm.Value)
+ }
+ }
+
+ for proc := range seen {
+ if !w.cache.netFrameworkCLRInterops[proc] {
+ w.cache.netFrameworkCLRInterops[proc] = true
+ w.addProcessNetFrameworkInteropCharts(proc)
+ }
+ }
+
+ for proc := range w.cache.netFrameworkCLRInterops {
+ if !seen[proc] {
+ delete(w.cache.netFrameworkCLRInterops, proc)
+ w.removeProcessNetFrameworkInteropCharts(proc)
+ }
+ }
+}
+
+const (
+ metricNetFrameworkCLRJITMethodsTotal = "windows_netframework_clrjit_jit_methods_total"
+ metricNetFrameworkCLRJITTimePercent = "windows_netframework_clrjit_jit_time_percent"
+ metricNetFrameworkCLRJITStandardFailuresTotal = "windows_netframework_clrjit_jit_standard_failures_total"
+ metricNetFrameworkCLRJITILBytesTotal = "windows_netframework_clrjit_jit_il_bytes_total"
+)
+
+func (w *Windows) collectNetFrameworkCLRJIT(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRJITMethodsTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrjit_methods_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRJITStandardFailuresTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrjit_standard_failures_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRJITTimePercent) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrjit_time_percent"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRJITILBytesTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrjit_il_bytes_total"] += int64(pm.Value)
+ }
+ }
+
+ for proc := range seen {
+ if !w.cache.netFrameworkCLRJIT[proc] {
+ w.cache.netFrameworkCLRJIT[proc] = true
+ w.addProcessNetFrameworkJITCharts(proc)
+ }
+ }
+
+ for proc := range w.cache.netFrameworkCLRJIT {
+ if !seen[proc] {
+ delete(w.cache.netFrameworkCLRJIT, proc)
+ w.removeProcessNetFrameworkJITCharts(proc)
+ }
+ }
+}
+
+const (
+ metricNetFrameworkCLRLoadingLoaderHeapSizeBytes = "windows_netframework_clrloading_loader_heap_size_bytes"
+ metricNetFrameworkCLRLoadingAppDomainLoadedTotal = "windows_netframework_clrloading_appdomains_loaded_total"
+ metricNetFrameworkCLRLoadingAppDomainUnloadedTotal = "windows_netframework_clrloading_appdomains_unloaded_total"
+ metricNetFrameworkCLRLoadingAssembliesLoadedTotal = "windows_netframework_clrloading_assemblies_loaded_total"
+ metricNetFrameworkCLRLoadingClassesLoadedTotal = "windows_netframework_clrloading_classes_loaded_total"
+ metricNetFrameworkCLRLoadingClassLoadFailuresTotal = "windows_netframework_clrloading_class_load_failures_total"
+)
+
+func (w *Windows) collectNetFrameworkCLRLoading(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingLoaderHeapSizeBytes) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrloading_loader_heap_size_bytes"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingAppDomainLoadedTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrloading_appdomains_loaded_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingAppDomainUnloadedTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrloading_appdomains_unloaded_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingAssembliesLoadedTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrloading_assemblies_loaded_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingClassesLoadedTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrloading_classes_loaded_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLoadingClassLoadFailuresTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrloading_class_load_failures_total"] += int64(pm.Value)
+ }
+ }
+
+ for proc := range seen {
+ if !w.cache.netFrameworkCLRLoading[proc] {
+ w.cache.netFrameworkCLRLoading[proc] = true
+ w.addProcessNetFrameworkLoadingCharts(proc)
+ }
+ }
+
+ for proc := range w.cache.netFrameworkCLRLoading {
+ if !seen[proc] {
+ delete(w.cache.netFrameworkCLRLoading, proc)
+ w.removeProcessNetFrameworkLoadingCharts(proc)
+ }
+ }
+}
+
+const (
+ metricNetFrameworkCLRLocksAndThreadsQueueLengthTotal = "windows_netframework_clrlocksandthreads_queue_length_total"
+ metricNetFrameworkCLRLocksAndThreadsCurrentLogicalThreads = "windows_netframework_clrlocksandthreads_current_logical_threads"
+ metricNetFrameworkCLRLocksAndThreadsPhysicalThreadsCurrent = "windows_netframework_clrlocksandthreads_physical_threads_current"
+ metricNetFrameworkCLRLocksAndThreadsRecognizedThreadsTotal = "windows_netframework_clrlocksandthreads_recognized_threads_total"
+ metricNetFrameworkCLRLocksAndThreadsContentionsTotal = "windows_netframework_clrlocksandthreads_contentions_total"
+)
+
+func (w *Windows) collectNetFrameworkCLRLocksAndThreads(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsQueueLengthTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrlocksandthreads_queue_length_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsCurrentLogicalThreads) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrlocksandthreads_current_logical_threads"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsPhysicalThreadsCurrent) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrlocksandthreads_physical_threads_current"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsRecognizedThreadsTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrlocksandthreads_recognized_threads_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRLocksAndThreadsContentionsTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrlocksandthreads_contentions_total"] += int64(pm.Value)
+ }
+ }
+
+ for proc := range seen {
+ if !w.cache.netFrameworkCLRLocksThreads[proc] {
+ w.cache.netFrameworkCLRLocksThreads[proc] = true
+ w.addProcessNetFrameworkLocksAndThreadsCharts(proc)
+ }
+ }
+
+ for proc := range w.cache.netFrameworkCLRLocksThreads {
+ if !seen[proc] {
+ delete(w.cache.netFrameworkCLRLocksThreads, proc)
+ w.removeProcessNetFrameworkLocksAndThreadsCharts(proc)
+ }
+ }
+}
+
+const (
+ metricNetFrameworkCLRMemoryAllocatedBytesTotal = "windows_netframework_clrmemory_allocated_bytes_total"
+ metricNetFrameworkCLRMemoryFinalizationSurvivors = "windows_netframework_clrmemory_finalization_survivors"
+ metricNetFrameworkCLRMemoryHeapSizeBytes = "windows_netframework_clrmemory_heap_size_bytes"
+ metricNetFrameworkCLRMemoryPromotedBytes = "windows_netframework_clrmemory_promoted_bytes"
+ metricNetFrameworkCLRMemoryNumberGCHandles = "windows_netframework_clrmemory_number_gc_handles"
+ metricNetFrameworkCLRMemoryCollectionsTotal = "windows_netframework_clrmemory_collections_total"
+ metricNetFrameworkCLRMemoryInducedGCTotal = "windows_netframework_clrmemory_induced_gc_total"
+ metricNetFrameworkCLRMemoryNumberPinnedObjects = "windows_netframework_clrmemory_number_pinned_objects"
+ metricNetFrameworkCLRMemoryNumberSinkBlockInUse = "windows_netframework_clrmemory_number_sink_blocksinuse"
+ metricNetFrameworkCLRMemoryCommittedBytes = "windows_netframework_clrmemory_committed_bytes"
+ metricNetFrameworkCLRMemoryReservedBytes = "windows_netframework_clrmemory_reserved_bytes"
+ metricNetFrameworkCLRMemoryGCTimePercent = "windows_netframework_clrmemory_gc_time_percent"
+)
+
+func (w *Windows) collectNetFrameworkCLRMemory(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryAllocatedBytesTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_allocated_bytes_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryFinalizationSurvivors) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_finalization_survivors"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryHeapSizeBytes) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_heap_size_bytes"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryPromotedBytes) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_promoted_bytes"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryNumberGCHandles) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_number_gc_handles"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryCollectionsTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_collections_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryInducedGCTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_induced_gc_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryNumberPinnedObjects) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_number_pinned_objects"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryNumberSinkBlockInUse) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_number_sink_blocksinuse"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryCommittedBytes) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_committed_bytes"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryReservedBytes) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_reserved_bytes"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRMemoryGCTimePercent) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrmemory_gc_time_percent"] += int64(pm.Value)
+ }
+ }
+
+ for proc := range seen {
+ if !w.cache.netFrameworkCLRMemory[proc] {
+ w.cache.netFrameworkCLRMemory[proc] = true
+ w.addProcessNetFrameworkMemoryCharts(proc)
+ }
+ }
+
+ for proc := range w.cache.netFrameworkCLRMemory {
+ if !seen[proc] {
+ delete(w.cache.netFrameworkCLRMemory, proc)
+ w.removeProcessNetFrameworkMemoryCharts(proc)
+ }
+ }
+}
+
+const (
+ metricNetFrameworkCLRRemotingChannelsTotal = "windows_netframework_clrremoting_channels_total"
+ metricNetFrameworkCLRRemotingContextBoundClassesLoaded = "windows_netframework_clrremoting_context_bound_classes_loaded"
+ metricNetFrameworkCLRRemotingContextBoundObjectsTotal = "windows_netframework_clrremoting_context_bound_objects_total"
+ metricNetFrameworkCLRRemotingContextProxiesTotal = "windows_netframework_clrremoting_context_proxies_total"
+ metricNetFrameworkCLRRemotingContexts = "windows_netframework_clrremoting_contexts"
+ metricNetFrameworkCLRRemotingRemoteCallsTotal = "windows_netframework_clrremoting_remote_calls_total"
+)
+
+func (w *Windows) collectNetFrameworkCLRRemoting(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingChannelsTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrremoting_channels_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingContextBoundClassesLoaded) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrremoting_context_bound_classes_loaded"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingContextBoundObjectsTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrremoting_context_bound_objects_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingContextProxiesTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrremoting_context_proxies_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingContexts) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrremoting_contexts"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRRemotingRemoteCallsTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrremoting_remote_calls_total"] += int64(pm.Value)
+ }
+ }
+
+ for proc := range seen {
+ if !w.cache.netFrameworkCLRRemoting[proc] {
+ w.cache.netFrameworkCLRRemoting[proc] = true
+ w.addProcessNetFrameworkRemotingCharts(proc)
+ }
+ }
+
+ for proc := range w.cache.netFrameworkCLRRemoting {
+ if !seen[proc] {
+ delete(w.cache.netFrameworkCLRRemoting, proc)
+ w.removeProcessNetFrameworkRemotingCharts(proc)
+ }
+ }
+}
+
+const (
+ metricNetFrameworkCLRSecurityLinkTimeChecksTotal = "windows_netframework_clrsecurity_link_time_checks_total"
+ metricNetFrameworkCLRSecurityRTChecksTimePercent = "windows_netframework_clrsecurity_rt_checks_time_percent"
+ metricNetFrameworkCLRSecurityStackWalkDepth = "windows_netframework_clrsecurity_stack_walk_depth"
+ metricNetFrameworkCLRSecurityRuntimeChecksTotal = "windows_netframework_clrsecurity_runtime_checks_total"
+)
+
+func (w *Windows) collectNetFrameworkCLRSecurity(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRSecurityLinkTimeChecksTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrsecurity_link_time_checks_total"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRSecurityRTChecksTimePercent) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrsecurity_checks_time_percent"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRSecurityStackWalkDepth) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrsecurity_stack_walk_depth"] += int64(pm.Value)
+ }
+ }
+
+ for _, pm := range pms.FindByName(metricNetFrameworkCLRSecurityRuntimeChecksTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[netframeworkPrefix+name+"_clrsecurity_runtime_checks_total"] += int64(pm.Value)
+ }
+ }
+
+ for proc := range seen {
+ if !w.cache.netFrameworkCLRSecurity[proc] {
+ w.cache.netFrameworkCLRSecurity[proc] = true
+ w.addProcessNetFrameworkSecurityCharts(proc)
+ }
+ }
+
+ for proc := range w.cache.netFrameworkCLRSecurity {
+ if !seen[proc] {
+ delete(w.cache.netFrameworkCLRSecurity, proc)
+ w.removeProcessNetFrameworkSecurityCharts(proc)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_os.go b/src/go/plugin/go.d/modules/windows/collect_os.go
new file mode 100644
index 000000000..99113e973
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_os.go
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricOSPhysicalMemoryFreeBytes = "windows_os_physical_memory_free_bytes"
+ metricOSPagingFreeBytes = "windows_os_paging_free_bytes"
+ metricOSProcessesLimit = "windows_os_processes_limit"
+ metricOSProcesses = "windows_os_processes"
+ metricOSUsers = "windows_os_users"
+ metricOSPagingLimitBytes = "windows_os_paging_limit_bytes"
+ metricOSVisibleMemoryBytes = "windows_os_visible_memory_bytes"
+)
+
+func (w *Windows) collectOS(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorOS] {
+ w.cache.collection[collectorOS] = true
+ w.addOSCharts()
+ }
+
+ px := "os_"
+ if pm := pms.FindByName(metricOSPhysicalMemoryFreeBytes); pm.Len() > 0 {
+ mx[px+"physical_memory_free_bytes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricOSPagingFreeBytes); pm.Len() > 0 {
+ mx[px+"paging_free_bytes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricOSProcessesLimit); pm.Len() > 0 {
+ mx[px+"processes_limit"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricOSProcesses); pm.Len() > 0 {
+ mx[px+"processes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricOSUsers); pm.Len() > 0 {
+ mx[px+"users"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricOSPagingLimitBytes); pm.Len() > 0 {
+ mx[px+"paging_limit_bytes"] = int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricOSVisibleMemoryBytes); pm.Len() > 0 {
+ mx[px+"visible_memory_bytes"] = int64(pm.Max())
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_process.go b/src/go/plugin/go.d/modules/windows/collect_process.go
new file mode 100644
index 000000000..373db6c08
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_process.go
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricProcessCPUTimeTotal = "windows_process_cpu_time_total"
+ metricProcessWorkingSetBytes = "windows_process_working_set_private_bytes"
+ metricProcessIOBytes = "windows_process_io_bytes_total"
+ metricProcessIOOperations = "windows_process_io_operations_total"
+ metricProcessPageFaults = "windows_process_page_faults_total"
+ metricProcessPageFileBytes = "windows_process_page_file_bytes"
+ metricProcessThreads = "windows_process_threads"
+ metricProcessCPUHandles = "windows_process_handles"
+)
+
+func (w *Windows) collectProcess(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorProcess] {
+ w.cache.collection[collectorProcess] = true
+ w.addProcessesCharts()
+ }
+
+ seen := make(map[string]bool)
+ px := "process_"
+ for _, pm := range pms.FindByName(metricProcessCPUTimeTotal) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[px+name+"_cpu_time"] += int64(pm.Value * 1000)
+ }
+ }
+ for _, pm := range pms.FindByName(metricProcessWorkingSetBytes) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[px+name+"_working_set_private_bytes"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricProcessIOBytes) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[px+name+"_io_bytes"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricProcessIOOperations) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[px+name+"_io_operations"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricProcessPageFaults) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[px+name+"_page_faults"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricProcessPageFileBytes) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[px+name+"_page_file_bytes"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricProcessThreads) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[px+name+"_threads"] += int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricProcessCPUHandles) {
+ if name := cleanProcessName(pm.Labels.Get("process")); name != "" {
+ seen[name] = true
+ mx[px+name+"_handles"] += int64(pm.Value)
+ }
+ }
+
+ for proc := range seen {
+ if !w.cache.processes[proc] {
+ w.cache.processes[proc] = true
+ w.addProcessToCharts(proc)
+ }
+ }
+ for proc := range w.cache.processes {
+ if !seen[proc] {
+ delete(w.cache.processes, proc)
+ w.removeProcessFromCharts(proc)
+ }
+ }
+}
+
+func cleanProcessName(name string) string {
+ return strings.ReplaceAll(name, " ", "_")
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_service.go b/src/go/plugin/go.d/modules/windows/collect_service.go
new file mode 100644
index 000000000..c6d77c99e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_service.go
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricServiceState = "windows_service_state"
+ metricServiceStatus = "windows_service_status"
+)
+
+func (w *Windows) collectService(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ px := "service_"
+ for _, pm := range pms.FindByName(metricServiceState) {
+ name := cleanService(pm.Labels.Get("name"))
+ state := cleanService(pm.Labels.Get("state"))
+ if name == "" || state == "" {
+ continue
+ }
+
+ seen[name] = true
+ mx[px+name+"_state_"+state] = int64(pm.Value)
+ }
+ for _, pm := range pms.FindByName(metricServiceStatus) {
+ name := cleanService(pm.Labels.Get("name"))
+ status := cleanService(pm.Labels.Get("status"))
+ if name == "" || status == "" {
+ continue
+ }
+
+ seen[name] = true
+ mx[px+name+"_status_"+status] = int64(pm.Value)
+ }
+
+ for svc := range seen {
+ if !w.cache.services[svc] {
+ w.cache.services[svc] = true
+ w.addServiceCharts(svc)
+ }
+ }
+ for svc := range w.cache.services {
+ if !seen[svc] {
+ delete(w.cache.services, svc)
+ w.removeServiceCharts(svc)
+ }
+ }
+}
+
+func cleanService(name string) string {
+ return strings.ReplaceAll(name, " ", "_")
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_system.go b/src/go/plugin/go.d/modules/windows/collect_system.go
new file mode 100644
index 000000000..8758e8476
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_system.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricSysSystemUpTime = "windows_system_system_up_time"
+ metricSysThreads = "windows_system_threads"
+)
+
+func (w *Windows) collectSystem(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorSystem] {
+ w.cache.collection[collectorSystem] = true
+ w.addSystemCharts()
+ }
+
+ px := "system_"
+ if pm := pms.FindByName(metricSysSystemUpTime); pm.Len() > 0 {
+ mx[px+"up_time"] = time.Now().Unix() - int64(pm.Max())
+ }
+ if pm := pms.FindByName(metricSysThreads); pm.Len() > 0 {
+ mx[px+"threads"] = int64(pm.Max())
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_tcp.go b/src/go/plugin/go.d/modules/windows/collect_tcp.go
new file mode 100644
index 000000000..7b4621835
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_tcp.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+
+const (
+ metricTCPConnectionFailure = "windows_tcp_connection_failures_total"
+ metricTCPConnectionActive = "windows_tcp_connections_active_total"
+ metricTCPConnectionEstablished = "windows_tcp_connections_established"
+ metricTCPConnectionPassive = "windows_tcp_connections_passive_total"
+ metricTCPConnectionReset = "windows_tcp_connections_reset_total"
+ metricTCPConnectionSegmentsReceived = "windows_tcp_segments_received_total"
+ metricTCPConnectionSegmentsRetransmitted = "windows_tcp_segments_retransmitted_total"
+ metricTCPConnectionSegmentsSent = "windows_tcp_segments_sent_total"
+)
+
+func (w *Windows) collectTCP(mx map[string]int64, pms prometheus.Series) {
+ if !w.cache.collection[collectorTCP] {
+ w.cache.collection[collectorTCP] = true
+ w.addTCPCharts()
+ }
+
+ px := "tcp_"
+ for _, pm := range pms.FindByName(metricTCPConnectionFailure) {
+ if af := pm.Labels.Get("af"); af != "" {
+ mx[px+af+"_conns_failures"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricTCPConnectionActive) {
+ if af := pm.Labels.Get("af"); af != "" {
+ mx[px+af+"_conns_active"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricTCPConnectionEstablished) {
+ if af := pm.Labels.Get("af"); af != "" {
+ mx[px+af+"_conns_established"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricTCPConnectionPassive) {
+ if af := pm.Labels.Get("af"); af != "" {
+ mx[px+af+"_conns_passive"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricTCPConnectionReset) {
+ if af := pm.Labels.Get("af"); af != "" {
+ mx[px+af+"_conns_resets"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricTCPConnectionSegmentsReceived) {
+ if af := pm.Labels.Get("af"); af != "" {
+ mx[px+af+"_segments_received"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricTCPConnectionSegmentsRetransmitted) {
+ if af := pm.Labels.Get("af"); af != "" {
+ mx[px+af+"_segments_retransmitted"] = int64(pm.Value)
+ }
+ }
+ for _, pm := range pms.FindByName(metricTCPConnectionSegmentsSent) {
+ if af := pm.Labels.Get("af"); af != "" {
+ mx[px+af+"_segments_sent"] = int64(pm.Value)
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/collect_thermalzone.go b/src/go/plugin/go.d/modules/windows/collect_thermalzone.go
new file mode 100644
index 000000000..6dccb9fed
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/collect_thermalzone.go
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+)
+
+const (
+ metricThermalzoneTemperatureCelsius = "windows_thermalzone_temperature_celsius"
+)
+
+func (w *Windows) collectThermalzone(mx map[string]int64, pms prometheus.Series) {
+ seen := make(map[string]bool)
+ for _, pm := range pms.FindByName(metricThermalzoneTemperatureCelsius) {
+ if name := cleanZoneName(pm.Labels.Get("name")); name != "" {
+ seen[name] = true
+ mx["thermalzone_"+name+"_temperature"] = int64(pm.Value)
+ }
+ }
+
+ for zone := range seen {
+ if !w.cache.thermalZones[zone] {
+ w.cache.thermalZones[zone] = true
+ w.addThermalZoneCharts(zone)
+ }
+ }
+ for zone := range w.cache.thermalZones {
+ if !seen[zone] {
+ delete(w.cache.thermalZones, zone)
+ w.removeThermalZoneCharts(zone)
+ }
+ }
+}
+
+func cleanZoneName(name string) string {
+ // "\\_TZ.TZ10", "\\_TZ.X570" => TZ10, X570
+ i := strings.Index(name, ".")
+ if i == -1 || len(name) == i+1 {
+ return ""
+ }
+ return name[i+1:]
+}
diff --git a/src/go/plugin/go.d/modules/windows/config_schema.json b/src/go/plugin/go.d/modules/windows/config_schema.json
new file mode 100644
index 000000000..e1011e876
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/config_schema.json
@@ -0,0 +1,190 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Windows collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Windows exporter metrics endpoint.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 5
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "vnode": {
+ "title": "Vnode",
+ "description": "The hostname of the [virtual node](https://github.com/netdata/netdata/blob/master/docs/guides/using-host-labels.md#virtual-nodes) defined in the vnodes.conf configuration file.",
+ "type": "string"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "vnode"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ },
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "url": {
+ "ui:placeholder": "http://203.0.113.0:9182/metrics"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "username": {
+ "ui:widget": "password"
+ },
+ "proxy_username": {
+ "ui:widget": "password"
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/init.go b/src/go/plugin/go.d/modules/windows/init.go
new file mode 100644
index 000000000..87faf40bd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/init.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+func (w *Windows) validateConfig() error {
+ if w.URL == "" {
+ return errors.New("'url' is not set")
+ }
+ return nil
+}
+
+func (w *Windows) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(w.Client)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.New(client, w.Request), nil
+}
diff --git a/src/go/plugin/go.d/modules/windows/integrations/active_directory.md b/src/go/plugin/go.d/modules/windows/integrations/active_directory.md
new file mode 100644
index 000000000..6d255aba8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/integrations/active_directory.md
@@ -0,0 +1,843 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/integrations/active_directory.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/metadata.yaml"
+sidebar_label: "Active Directory"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Windows Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Active Directory
+
+
+<img src="https://netdata.cloud/img/windows.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: windows
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).
+
+
+It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).
+
+Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+Supported collectors:
+
+- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)
+- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)
+- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)
+- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)
+- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)
+- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)
+- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)
+- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)
+- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)
+- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)
+- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)
+- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)
+- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)
+- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)
+- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)
+- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)
+- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)
+- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)
+- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)
+- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)
+- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)
+- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)
+- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)
+- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)
+- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)
+
+
+### Per Active Directory instance
+
+These metrics refer to the entire monitored host.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |
+| windows.memory_utilization | available, used | bytes |
+| windows.memory_page_faults | page_faults | events/s |
+| windows.memory_swap_utilization | available, used | bytes |
+| windows.memory_swap_operations | read, write | operations/s |
+| windows.memory_swap_pages | read, written | pages/s |
+| windows.memory_cached | cached | KiB |
+| windows.memory_cache_faults | cache_faults | events/s |
+| windows.memory_system_pool | paged, non-paged | bytes |
+| windows.tcp_conns_established | ipv4, ipv6 | connections |
+| windows.tcp_conns_active | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |
+| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |
+| windows.tcp_segments_received | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |
+| windows.os_processes | processes | number |
+| windows.os_users | users | users |
+| windows.os_visible_memory_usage | free, used | bytes |
+| windows.os_paging_files_usage | free, used | bytes |
+| windows.system_threads | threads | number |
+| windows.system_uptime | time | seconds |
+| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |
+| windows.processes_cpu_utilization | a dimension per process | percentage |
+| windows.processes_handles | a dimension per process | handles |
+| windows.processes_io_bytes | a dimension per process | bytes/s |
+| windows.processes_io_operations | a dimension per process | operations/s |
+| windows.processes_page_faults | a dimension per process | pgfaults/s |
+| windows.processes_page_file_bytes | a dimension per process | bytes |
+| windows.processes_pool_bytes | a dimension per process | bytes |
+| windows.processes_threads | a dimension per process | threads |
+| ad.database_operations | add, delete, modify, recycle | operations/s |
+| ad.directory_operations | read, write, search | operations/s |
+| ad.name_cache_lookups | lookups | lookups/s |
+| ad.name_cache_hits | hits | hits/s |
+| ad.atq_average_request_latency | time | seconds |
+| ad.atq_outstanding_requests | outstanding | requests |
+| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |
+| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |
+| ad.dra_replication_properties_updated | inbound, outbound | properties/s |
+| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |
+| ad.dra_replication_pending_syncs | pending | syncs |
+| ad.dra_replication_sync_requests | requests | requests/s |
+| ad.ds_threads | in_use | threads |
+| ad.ldap_last_bind_time | last_bind | seconds |
+| ad.binds | binds | binds/s |
+| ad.ldap_searches | searches | searches/s |
+| adfs.ad_login_connection_failures | connection | failures/s |
+| adfs.certificate_authentications | authentications | authentications/s |
+| adfs.db_artifact_failures | connection | failures/s |
+| adfs.db_artifact_query_time_seconds | query_time | seconds/s |
+| adfs.db_config_failures | connection | failures/s |
+| adfs.db_config_query_time_seconds | query_time | seconds/s |
+| adfs.device_authentications | authentications | authentications/s |
+| adfs.external_authentications | success, failure | authentications/s |
+| adfs.federated_authentications | authentications | authentications/s |
+| adfs.federation_metadata_requests | requests | requests/s |
+| adfs.oauth_authorization_requests | requests | requests/s |
+| adfs.oauth_client_authentications | success, failure | authentications/s |
+| adfs.oauth_client_credentials_requests | success, failure | requests/s |
+| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |
+| adfs.oauth_client_windows_authentications | success, failure | authentications/s |
+| adfs.oauth_logon_certificate_requests | success, failure | requests/s |
+| adfs.oauth_password_grant_requests | success, failure | requests/s |
+| adfs.oauth_token_requests_success | success | requests/s |
+| adfs.passive_requests | passive | requests/s |
+| adfs.passport_authentications | passport | authentications/s |
+| adfs.password_change_requests | success, failure | requests/s |
+| adfs.samlp_token_requests_success | success | requests/s |
+| adfs.sso_authentications | success, failure | authentications/s |
+| adfs.token_requests | requests | requests/s |
+| adfs.userpassword_authentications | success, failure | authentications/s |
+| adfs.windows_integrated_authentications | authentications | authentications/s |
+| adfs.wsfed_token_requests_success | success | requests/s |
+| adfs.wstrust_token_requests_success | success | requests/s |
+| exchange.activesync_ping_cmds_pending | pending | commands |
+| exchange.activesync_requests | received | requests/s |
+| exchange.activesync_sync_cmds | processed | commands/s |
+| exchange.autodiscover_requests | processed | requests/s |
+| exchange.avail_service_requests | serviced | requests/s |
+| exchange.owa_current_unique_users | logged-in | users |
+| exchange.owa_requests_total | handled | requests/s |
+| exchange.rpc_active_user_count | active | users |
+| exchange.rpc_avg_latency | latency | seconds |
+| exchange.rpc_connection_count | connections | connections |
+| exchange.rpc_operations | operations | operations/s |
+| exchange.rpc_requests | processed | requests |
+| exchange.rpc_user_count | users | users |
+| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_poison | low, high, none, normal | messages/s |
+| hyperv.vms_health | ok, critical | vms |
+| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |
+| hyperv.root_partition_attached_devices | attached | devices |
+| hyperv.root_partition_deposited_pages | deposited | pages |
+| hyperv.root_partition_skipped_interrupts | skipped | interrupts |
+| hyperv.root_partition_device_dma_errors | illegal_dma | requests |
+| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |
+| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |
+| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |
+| hyperv.root_partition_address_space | address_spaces | address spaces |
+| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |
+| hyperv.root_partition_virtual_tlb_pages | used | pages |
+
+### Per cpu core
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| core | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |
+| windows.cpu_core_interrupts | interrupts | interrupts/s |
+| windows.cpu_core_dpcs | dpcs | dpcs/s |
+| windows.cpu_core_cstate | c1, c2, c3 | percentage |
+
+### Per logical disk
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| disk | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.logical_disk_utilization | free, used | bytes |
+| windows.logical_disk_bandwidth | read, write | bytes/s |
+| windows.logical_disk_operations | reads, writes | operations/s |
+| windows.logical_disk_latency | read, write | seconds |
+
+### Per network device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| nic | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.net_nic_bandwidth | received, sent | kilobits/s |
+| windows.net_nic_packets | received, sent | packets/s |
+| windows.net_nic_errors | inbound, outbound | errors/s |
+| windows.net_nic_discarded | inbound, outbound | discards/s |
+
+### Per thermalzone
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| thermalzone | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.thermalzone_temperature | temperature | celsius |
+
+### Per service
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| service | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |
+| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |
+
+### Per website
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| website | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| iis.website_traffic | received, sent | bytes/s |
+| iis.website_requests_rate | requests | requests/s |
+| iis.website_active_connections_count | active | connections |
+| iis.website_users_count | anonymous, non_anonymous | users |
+| iis.website_connection_attempts_rate | connection | attempts/s |
+| iis.website_isapi_extension_requests_count | isapi | requests |
+| iis.website_isapi_extension_requests_rate | isapi | requests/s |
+| iis.website_ftp_file_transfer_rate | received, sent | files/s |
+| iis.website_logon_attempts_rate | logon | attempts/s |
+| iis.website_errors_rate | document_locked, document_not_found | errors/s |
+| iis.website_uptime | document_locked, document_not_found | seconds |
+
+### Per mssql instance
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.instance_accessmethods_page_splits | page | splits/s |
+| mssql.instance_cache_hit_ratio | hit_ratio | percentage |
+| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |
+| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |
+| mssql.instance_bufman_iops | read, written | iops |
+| mssql.instance_blocked_processes | blocked | processes |
+| mssql.instance_user_connection | user | connections |
+| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |
+| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |
+| mssql.instance_memmgr_pending_memory_grants | pending | processes |
+| mssql.instance_memmgr_server_memory | memory | bytes |
+| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |
+| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |
+| mssql.instance_sqlstats_batch_requests | batch | requests/s |
+| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |
+| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |
+| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |
+
+### Per database
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+| database | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.database_active_transactions | active | transactions |
+| mssql.database_backup_restore_operations | backup | operations/s |
+| mssql.database_data_files_size | size | bytes |
+| mssql.database_log_flushed | flushed | bytes/s |
+| mssql.database_log_flushes | log | flushes/s |
+| mssql.database_transactions | transactions | transactions/s |
+| mssql.database_write_transactions | write | transactions/s |
+
+### Per certificate template
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cert_template | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| adcs.cert_template_requests | requests | requests/s |
+| adcs.cert_template_failed_requests | failed | requests/s |
+| adcs.cert_template_issued_requests | issued | requests/s |
+| adcs.cert_template_pending_requests | pending | requests/s |
+| adcs.cert_template_request_processing_time | processing_time | seconds |
+| adcs.cert_template_retrievals | retrievals | retrievals/s |
+| adcs.cert_template_retrieval_processing_time | processing_time | seconds |
+| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |
+| adcs.cert_template_request_policy_module_processing | processing_time | seconds |
+| adcs.cert_template_challenge_responses | challenge | responses/s |
+| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |
+| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |
+| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |
+
+### Per process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| process | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| netframework.clrexception_thrown | exceptions | exceptions/s |
+| netframework.clrexception_filters | filters | filters/s |
+| netframework.clrexception_finallys | finallys | finallys/s |
+| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |
+| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |
+| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |
+| netframework.clrinterop_interop_stubs_created | created | stubs/s |
+| netframework.clrjit_methods | jit-compiled | methods/s |
+| netframework.clrjit_time | time | percentage |
+| netframework.clrjit_standard_failures | failures | failures/s |
+| netframework.clrjit_il_bytes | compiled_msil | bytes/s |
+| netframework.clrloading_loader_heap_size | committed | bytes |
+| netframework.clrloading_appdomains_loaded | loaded | domain/s |
+| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |
+| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |
+| netframework.clrloading_classes_loaded | loaded | classes/s |
+| netframework.clrloading_class_load_failures | class_load | failures/s |
+| netframework.clrlocksandthreads_queue_length | threads | threads/s |
+| netframework.clrlocksandthreads_current_logical_threads | logical | threads |
+| netframework.clrlocksandthreads_current_physical_threads | physical | threads |
+| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |
+| netframework.clrlocksandthreads_contentions | contentions | contentions/s |
+| netframework.clrmemory_allocated_bytes | allocated | bytes/s |
+| netframework.clrmemory_finalization_survivors | survived | objects |
+| netframework.clrmemory_heap_size | heap | bytes |
+| netframework.clrmemory_promoted | promoted | bytes |
+| netframework.clrmemory_number_gc_handles | used | handles |
+| netframework.clrmemory_collections | gc | gc/s |
+| netframework.clrmemory_induced_gc | gc | gc/s |
+| netframework.clrmemory_number_pinned_objects | pinned | objects |
+| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |
+| netframework.clrmemory_committed | committed | bytes |
+| netframework.clrmemory_reserved | reserved | bytes |
+| netframework.clrmemory_gc_time | time | percentage |
+| netframework.clrremoting_channels | registered | channels/s |
+| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |
+| netframework.clrremoting_context_bound_objects | allocated | objects/s |
+| netframework.clrremoting_context_proxies | objects | objects/s |
+| netframework.clrremoting_contexts | contexts | contexts |
+| netframework.clrremoting_remote_calls | rpc | calls/s |
+| netframework.clrsecurity_link_time_checks | linktime | checks/s |
+| netframework.clrsecurity_checks_time | time | percentage |
+| netframework.clrsecurity_stack_walk_depth | stack | depth |
+| netframework.clrsecurity_runtime_checks | runtime | checks/s |
+
+### Per exchange workload
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.workload_active_tasks | active | tasks |
+| exchange.workload_completed_tasks | completed | tasks/s |
+| exchange.workload_queued_tasks | queued | tasks/s |
+| exchange.workload_yielded_tasks | yielded | tasks/s |
+| exchange.workload_activity_status | active, paused | status |
+
+### Per ldap process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |
+| exchange.ldap_read_time | read | seconds |
+| exchange.ldap_search_time | search | seconds |
+| exchange.ldap_write_time | write | seconds |
+| exchange.ldap_timeout_errors | timeout | errors/s |
+
+### Per http proxy
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.http_proxy_avg_auth_latency | latency | seconds |
+| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |
+| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |
+| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |
+| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |
+| exchange.http_proxy_requests | processed | requests/s |
+
+### Per vm
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_name | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |
+| hyperv.vm_memory_physical | assigned_memory | MiB |
+| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |
+| hyperv.vm_memory_pressure_current | pressure | percentage |
+| hyperv.vm_vid_physical_pages_allocated | allocated | pages |
+| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |
+
+### Per vm device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_device | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_device_bytes | read, written | bytes/s |
+| hyperv.vm_device_operations | read, write | operations/s |
+| hyperv.vm_device_errors | errors | errors/s |
+
+### Per vm interface
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_interface | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_interface_bytes | received, sent | bytes/s |
+| hyperv.vm_interface_packets | received, sent | packets/s |
+| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |
+
+### Per vswitch
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vswitch | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vswitch_bytes | received, sent | bytes/s |
+| hyperv.vswitch_packets | received, sent | packets/s |
+| hyperv.vswitch_directed_packets | received, sent | packets/s |
+| hyperv.vswitch_broadcast_packets | received, sent | packets/s |
+| hyperv.vswitch_multicast_packets | received, sent | packets/s |
+| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_packets_flooded | flooded | packets/s |
+| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |
+| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |
+| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |
+| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |
+| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Windows exporter
+
+To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/windows.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/windows.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: https://192.0.2.1:9182/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Virtual Node
+
+The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.
+You can create a virtual node for all your Windows machines and control them as separate entities.
+
+To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:
+
+> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.
+
+```yaml
+# /etc/netdata/vnodes/vnodes.conf
+- hostname: win_server
+ guid: <value>
+```
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ vnode: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from multiple remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server1
+ url: http://192.0.2.1:9182/metrics
+
+ - name: win_server2
+ url: http://192.0.2.2:9182/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m windows
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep windows
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep windows /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep windows
+```
+
+
diff --git a/src/go/plugin/go.d/modules/windows/integrations/hyperv.md b/src/go/plugin/go.d/modules/windows/integrations/hyperv.md
new file mode 100644
index 000000000..42e4f308d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/integrations/hyperv.md
@@ -0,0 +1,843 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/integrations/hyperv.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/metadata.yaml"
+sidebar_label: "HyperV"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Windows Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# HyperV
+
+
+<img src="https://netdata.cloud/img/windows.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: windows
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).
+
+
+It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).
+
+Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+Supported collectors:
+
+- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)
+- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)
+- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)
+- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)
+- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)
+- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)
+- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)
+- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)
+- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)
+- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)
+- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)
+- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)
+- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)
+- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)
+- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)
+- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)
+- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)
+- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)
+- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)
+- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)
+- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)
+- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)
+- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)
+- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)
+- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)
+
+
+### Per Active Directory instance
+
+These metrics refer to the entire monitored host.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |
+| windows.memory_utilization | available, used | bytes |
+| windows.memory_page_faults | page_faults | events/s |
+| windows.memory_swap_utilization | available, used | bytes |
+| windows.memory_swap_operations | read, write | operations/s |
+| windows.memory_swap_pages | read, written | pages/s |
+| windows.memory_cached | cached | KiB |
+| windows.memory_cache_faults | cache_faults | events/s |
+| windows.memory_system_pool | paged, non-paged | bytes |
+| windows.tcp_conns_established | ipv4, ipv6 | connections |
+| windows.tcp_conns_active | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |
+| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |
+| windows.tcp_segments_received | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |
+| windows.os_processes | processes | number |
+| windows.os_users | users | users |
+| windows.os_visible_memory_usage | free, used | bytes |
+| windows.os_paging_files_usage | free, used | bytes |
+| windows.system_threads | threads | number |
+| windows.system_uptime | time | seconds |
+| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |
+| windows.processes_cpu_utilization | a dimension per process | percentage |
+| windows.processes_handles | a dimension per process | handles |
+| windows.processes_io_bytes | a dimension per process | bytes/s |
+| windows.processes_io_operations | a dimension per process | operations/s |
+| windows.processes_page_faults | a dimension per process | pgfaults/s |
+| windows.processes_page_file_bytes | a dimension per process | bytes |
+| windows.processes_pool_bytes | a dimension per process | bytes |
+| windows.processes_threads | a dimension per process | threads |
+| ad.database_operations | add, delete, modify, recycle | operations/s |
+| ad.directory_operations | read, write, search | operations/s |
+| ad.name_cache_lookups | lookups | lookups/s |
+| ad.name_cache_hits | hits | hits/s |
+| ad.atq_average_request_latency | time | seconds |
+| ad.atq_outstanding_requests | outstanding | requests |
+| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |
+| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |
+| ad.dra_replication_properties_updated | inbound, outbound | properties/s |
+| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |
+| ad.dra_replication_pending_syncs | pending | syncs |
+| ad.dra_replication_sync_requests | requests | requests/s |
+| ad.ds_threads | in_use | threads |
+| ad.ldap_last_bind_time | last_bind | seconds |
+| ad.binds | binds | binds/s |
+| ad.ldap_searches | searches | searches/s |
+| adfs.ad_login_connection_failures | connection | failures/s |
+| adfs.certificate_authentications | authentications | authentications/s |
+| adfs.db_artifact_failures | connection | failures/s |
+| adfs.db_artifact_query_time_seconds | query_time | seconds/s |
+| adfs.db_config_failures | connection | failures/s |
+| adfs.db_config_query_time_seconds | query_time | seconds/s |
+| adfs.device_authentications | authentications | authentications/s |
+| adfs.external_authentications | success, failure | authentications/s |
+| adfs.federated_authentications | authentications | authentications/s |
+| adfs.federation_metadata_requests | requests | requests/s |
+| adfs.oauth_authorization_requests | requests | requests/s |
+| adfs.oauth_client_authentications | success, failure | authentications/s |
+| adfs.oauth_client_credentials_requests | success, failure | requests/s |
+| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |
+| adfs.oauth_client_windows_authentications | success, failure | authentications/s |
+| adfs.oauth_logon_certificate_requests | success, failure | requests/s |
+| adfs.oauth_password_grant_requests | success, failure | requests/s |
+| adfs.oauth_token_requests_success | success | requests/s |
+| adfs.passive_requests | passive | requests/s |
+| adfs.passport_authentications | passport | authentications/s |
+| adfs.password_change_requests | success, failure | requests/s |
+| adfs.samlp_token_requests_success | success | requests/s |
+| adfs.sso_authentications | success, failure | authentications/s |
+| adfs.token_requests | requests | requests/s |
+| adfs.userpassword_authentications | success, failure | authentications/s |
+| adfs.windows_integrated_authentications | authentications | authentications/s |
+| adfs.wsfed_token_requests_success | success | requests/s |
+| adfs.wstrust_token_requests_success | success | requests/s |
+| exchange.activesync_ping_cmds_pending | pending | commands |
+| exchange.activesync_requests | received | requests/s |
+| exchange.activesync_sync_cmds | processed | commands/s |
+| exchange.autodiscover_requests | processed | requests/s |
+| exchange.avail_service_requests | serviced | requests/s |
+| exchange.owa_current_unique_users | logged-in | users |
+| exchange.owa_requests_total | handled | requests/s |
+| exchange.rpc_active_user_count | active | users |
+| exchange.rpc_avg_latency | latency | seconds |
+| exchange.rpc_connection_count | connections | connections |
+| exchange.rpc_operations | operations | operations/s |
+| exchange.rpc_requests | processed | requests |
+| exchange.rpc_user_count | users | users |
+| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_poison | low, high, none, normal | messages/s |
+| hyperv.vms_health | ok, critical | vms |
+| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |
+| hyperv.root_partition_attached_devices | attached | devices |
+| hyperv.root_partition_deposited_pages | deposited | pages |
+| hyperv.root_partition_skipped_interrupts | skipped | interrupts |
+| hyperv.root_partition_device_dma_errors | illegal_dma | requests |
+| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |
+| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |
+| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |
+| hyperv.root_partition_address_space | address_spaces | address spaces |
+| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |
+| hyperv.root_partition_virtual_tlb_pages | used | pages |
+
+### Per cpu core
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| core | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |
+| windows.cpu_core_interrupts | interrupts | interrupts/s |
+| windows.cpu_core_dpcs | dpcs | dpcs/s |
+| windows.cpu_core_cstate | c1, c2, c3 | percentage |
+
+### Per logical disk
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| disk | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.logical_disk_utilization | free, used | bytes |
+| windows.logical_disk_bandwidth | read, write | bytes/s |
+| windows.logical_disk_operations | reads, writes | operations/s |
+| windows.logical_disk_latency | read, write | seconds |
+
+### Per network device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| nic | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.net_nic_bandwidth | received, sent | kilobits/s |
+| windows.net_nic_packets | received, sent | packets/s |
+| windows.net_nic_errors | inbound, outbound | errors/s |
+| windows.net_nic_discarded | inbound, outbound | discards/s |
+
+### Per thermalzone
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| thermalzone | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.thermalzone_temperature | temperature | celsius |
+
+### Per service
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| service | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |
+| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |
+
+### Per website
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| website | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| iis.website_traffic | received, sent | bytes/s |
+| iis.website_requests_rate | requests | requests/s |
+| iis.website_active_connections_count | active | connections |
+| iis.website_users_count | anonymous, non_anonymous | users |
+| iis.website_connection_attempts_rate | connection | attempts/s |
+| iis.website_isapi_extension_requests_count | isapi | requests |
+| iis.website_isapi_extension_requests_rate | isapi | requests/s |
+| iis.website_ftp_file_transfer_rate | received, sent | files/s |
+| iis.website_logon_attempts_rate | logon | attempts/s |
+| iis.website_errors_rate | document_locked, document_not_found | errors/s |
+| iis.website_uptime | document_locked, document_not_found | seconds |
+
+### Per mssql instance
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.instance_accessmethods_page_splits | page | splits/s |
+| mssql.instance_cache_hit_ratio | hit_ratio | percentage |
+| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |
+| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |
+| mssql.instance_bufman_iops | read, written | iops |
+| mssql.instance_blocked_processes | blocked | processes |
+| mssql.instance_user_connection | user | connections |
+| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |
+| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |
+| mssql.instance_memmgr_pending_memory_grants | pending | processes |
+| mssql.instance_memmgr_server_memory | memory | bytes |
+| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |
+| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |
+| mssql.instance_sqlstats_batch_requests | batch | requests/s |
+| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |
+| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |
+| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |
+
+### Per database
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+| database | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.database_active_transactions | active | transactions |
+| mssql.database_backup_restore_operations | backup | operations/s |
+| mssql.database_data_files_size | size | bytes |
+| mssql.database_log_flushed | flushed | bytes/s |
+| mssql.database_log_flushes | log | flushes/s |
+| mssql.database_transactions | transactions | transactions/s |
+| mssql.database_write_transactions | write | transactions/s |
+
+### Per certificate template
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cert_template | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| adcs.cert_template_requests | requests | requests/s |
+| adcs.cert_template_failed_requests | failed | requests/s |
+| adcs.cert_template_issued_requests | issued | requests/s |
+| adcs.cert_template_pending_requests | pending | requests/s |
+| adcs.cert_template_request_processing_time | processing_time | seconds |
+| adcs.cert_template_retrievals | retrievals | retrievals/s |
+| adcs.cert_template_retrieval_processing_time | processing_time | seconds |
+| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |
+| adcs.cert_template_request_policy_module_processing | processing_time | seconds |
+| adcs.cert_template_challenge_responses | challenge | responses/s |
+| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |
+| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |
+| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |
+
+### Per process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| process | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| netframework.clrexception_thrown | exceptions | exceptions/s |
+| netframework.clrexception_filters | filters | filters/s |
+| netframework.clrexception_finallys | finallys | finallys/s |
+| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |
+| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |
+| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |
+| netframework.clrinterop_interop_stubs_created | created | stubs/s |
+| netframework.clrjit_methods | jit-compiled | methods/s |
+| netframework.clrjit_time | time | percentage |
+| netframework.clrjit_standard_failures | failures | failures/s |
+| netframework.clrjit_il_bytes | compiled_msil | bytes/s |
+| netframework.clrloading_loader_heap_size | committed | bytes |
+| netframework.clrloading_appdomains_loaded | loaded | domain/s |
+| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |
+| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |
+| netframework.clrloading_classes_loaded | loaded | classes/s |
+| netframework.clrloading_class_load_failures | class_load | failures/s |
+| netframework.clrlocksandthreads_queue_length | threads | threads/s |
+| netframework.clrlocksandthreads_current_logical_threads | logical | threads |
+| netframework.clrlocksandthreads_current_physical_threads | physical | threads |
+| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |
+| netframework.clrlocksandthreads_contentions | contentions | contentions/s |
+| netframework.clrmemory_allocated_bytes | allocated | bytes/s |
+| netframework.clrmemory_finalization_survivors | survived | objects |
+| netframework.clrmemory_heap_size | heap | bytes |
+| netframework.clrmemory_promoted | promoted | bytes |
+| netframework.clrmemory_number_gc_handles | used | handles |
+| netframework.clrmemory_collections | gc | gc/s |
+| netframework.clrmemory_induced_gc | gc | gc/s |
+| netframework.clrmemory_number_pinned_objects | pinned | objects |
+| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |
+| netframework.clrmemory_committed | committed | bytes |
+| netframework.clrmemory_reserved | reserved | bytes |
+| netframework.clrmemory_gc_time | time | percentage |
+| netframework.clrremoting_channels | registered | channels/s |
+| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |
+| netframework.clrremoting_context_bound_objects | allocated | objects/s |
+| netframework.clrremoting_context_proxies | objects | objects/s |
+| netframework.clrremoting_contexts | contexts | contexts |
+| netframework.clrremoting_remote_calls | rpc | calls/s |
+| netframework.clrsecurity_link_time_checks | linktime | checks/s |
+| netframework.clrsecurity_checks_time | time | percentage |
+| netframework.clrsecurity_stack_walk_depth | stack | depth |
+| netframework.clrsecurity_runtime_checks | runtime | checks/s |
+
+### Per exchange workload
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.workload_active_tasks | active | tasks |
+| exchange.workload_completed_tasks | completed | tasks/s |
+| exchange.workload_queued_tasks | queued | tasks/s |
+| exchange.workload_yielded_tasks | yielded | tasks/s |
+| exchange.workload_activity_status | active, paused | status |
+
+### Per ldap process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |
+| exchange.ldap_read_time | read | seconds |
+| exchange.ldap_search_time | search | seconds |
+| exchange.ldap_write_time | write | seconds |
+| exchange.ldap_timeout_errors | timeout | errors/s |
+
+### Per http proxy
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.http_proxy_avg_auth_latency | latency | seconds |
+| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |
+| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |
+| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |
+| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |
+| exchange.http_proxy_requests | processed | requests/s |
+
+### Per vm
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_name | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |
+| hyperv.vm_memory_physical | assigned_memory | MiB |
+| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |
+| hyperv.vm_memory_pressure_current | pressure | percentage |
+| hyperv.vm_vid_physical_pages_allocated | allocated | pages |
+| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |
+
+### Per vm device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_device | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_device_bytes | read, written | bytes/s |
+| hyperv.vm_device_operations | read, write | operations/s |
+| hyperv.vm_device_errors | errors | errors/s |
+
+### Per vm interface
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_interface | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_interface_bytes | received, sent | bytes/s |
+| hyperv.vm_interface_packets | received, sent | packets/s |
+| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |
+
+### Per vswitch
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vswitch | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vswitch_bytes | received, sent | bytes/s |
+| hyperv.vswitch_packets | received, sent | packets/s |
+| hyperv.vswitch_directed_packets | received, sent | packets/s |
+| hyperv.vswitch_broadcast_packets | received, sent | packets/s |
+| hyperv.vswitch_multicast_packets | received, sent | packets/s |
+| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_packets_flooded | flooded | packets/s |
+| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |
+| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |
+| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |
+| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |
+| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Windows exporter
+
+To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/windows.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/windows.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: https://192.0.2.1:9182/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Virtual Node
+
+The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.
+You can create a virtual node for all your Windows machines and control them as separate entities.
+
+To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:
+
+> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.
+
+```yaml
+# /etc/netdata/vnodes/vnodes.conf
+- hostname: win_server
+ guid: <value>
+```
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ vnode: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from multiple remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server1
+ url: http://192.0.2.1:9182/metrics
+
+ - name: win_server2
+ url: http://192.0.2.2:9182/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m windows
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep windows
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep windows /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep windows
+```
+
+
diff --git a/src/go/plugin/go.d/modules/windows/integrations/ms_exchange.md b/src/go/plugin/go.d/modules/windows/integrations/ms_exchange.md
new file mode 100644
index 000000000..24d416021
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/integrations/ms_exchange.md
@@ -0,0 +1,843 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/integrations/ms_exchange.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/metadata.yaml"
+sidebar_label: "MS Exchange"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Windows Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MS Exchange
+
+
+<img src="https://netdata.cloud/img/exchange.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: windows
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).
+
+
+It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).
+
+Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+Supported collectors:
+
+- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)
+- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)
+- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)
+- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)
+- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)
+- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)
+- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)
+- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)
+- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)
+- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)
+- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)
+- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)
+- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)
+- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)
+- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)
+- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)
+- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)
+- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)
+- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)
+- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)
+- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)
+- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)
+- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)
+- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)
+- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)
+
+
+### Per Active Directory instance
+
+These metrics refer to the entire monitored host.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |
+| windows.memory_utilization | available, used | bytes |
+| windows.memory_page_faults | page_faults | events/s |
+| windows.memory_swap_utilization | available, used | bytes |
+| windows.memory_swap_operations | read, write | operations/s |
+| windows.memory_swap_pages | read, written | pages/s |
+| windows.memory_cached | cached | KiB |
+| windows.memory_cache_faults | cache_faults | events/s |
+| windows.memory_system_pool | paged, non-paged | bytes |
+| windows.tcp_conns_established | ipv4, ipv6 | connections |
+| windows.tcp_conns_active | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |
+| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |
+| windows.tcp_segments_received | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |
+| windows.os_processes | processes | number |
+| windows.os_users | users | users |
+| windows.os_visible_memory_usage | free, used | bytes |
+| windows.os_paging_files_usage | free, used | bytes |
+| windows.system_threads | threads | number |
+| windows.system_uptime | time | seconds |
+| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |
+| windows.processes_cpu_utilization | a dimension per process | percentage |
+| windows.processes_handles | a dimension per process | handles |
+| windows.processes_io_bytes | a dimension per process | bytes/s |
+| windows.processes_io_operations | a dimension per process | operations/s |
+| windows.processes_page_faults | a dimension per process | pgfaults/s |
+| windows.processes_page_file_bytes | a dimension per process | bytes |
+| windows.processes_pool_bytes | a dimension per process | bytes |
+| windows.processes_threads | a dimension per process | threads |
+| ad.database_operations | add, delete, modify, recycle | operations/s |
+| ad.directory_operations | read, write, search | operations/s |
+| ad.name_cache_lookups | lookups | lookups/s |
+| ad.name_cache_hits | hits | hits/s |
+| ad.atq_average_request_latency | time | seconds |
+| ad.atq_outstanding_requests | outstanding | requests |
+| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |
+| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |
+| ad.dra_replication_properties_updated | inbound, outbound | properties/s |
+| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |
+| ad.dra_replication_pending_syncs | pending | syncs |
+| ad.dra_replication_sync_requests | requests | requests/s |
+| ad.ds_threads | in_use | threads |
+| ad.ldap_last_bind_time | last_bind | seconds |
+| ad.binds | binds | binds/s |
+| ad.ldap_searches | searches | searches/s |
+| adfs.ad_login_connection_failures | connection | failures/s |
+| adfs.certificate_authentications | authentications | authentications/s |
+| adfs.db_artifact_failures | connection | failures/s |
+| adfs.db_artifact_query_time_seconds | query_time | seconds/s |
+| adfs.db_config_failures | connection | failures/s |
+| adfs.db_config_query_time_seconds | query_time | seconds/s |
+| adfs.device_authentications | authentications | authentications/s |
+| adfs.external_authentications | success, failure | authentications/s |
+| adfs.federated_authentications | authentications | authentications/s |
+| adfs.federation_metadata_requests | requests | requests/s |
+| adfs.oauth_authorization_requests | requests | requests/s |
+| adfs.oauth_client_authentications | success, failure | authentications/s |
+| adfs.oauth_client_credentials_requests | success, failure | requests/s |
+| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |
+| adfs.oauth_client_windows_authentications | success, failure | authentications/s |
+| adfs.oauth_logon_certificate_requests | success, failure | requests/s |
+| adfs.oauth_password_grant_requests | success, failure | requests/s |
+| adfs.oauth_token_requests_success | success | requests/s |
+| adfs.passive_requests | passive | requests/s |
+| adfs.passport_authentications | passport | authentications/s |
+| adfs.password_change_requests | success, failure | requests/s |
+| adfs.samlp_token_requests_success | success | requests/s |
+| adfs.sso_authentications | success, failure | authentications/s |
+| adfs.token_requests | requests | requests/s |
+| adfs.userpassword_authentications | success, failure | authentications/s |
+| adfs.windows_integrated_authentications | authentications | authentications/s |
+| adfs.wsfed_token_requests_success | success | requests/s |
+| adfs.wstrust_token_requests_success | success | requests/s |
+| exchange.activesync_ping_cmds_pending | pending | commands |
+| exchange.activesync_requests | received | requests/s |
+| exchange.activesync_sync_cmds | processed | commands/s |
+| exchange.autodiscover_requests | processed | requests/s |
+| exchange.avail_service_requests | serviced | requests/s |
+| exchange.owa_current_unique_users | logged-in | users |
+| exchange.owa_requests_total | handled | requests/s |
+| exchange.rpc_active_user_count | active | users |
+| exchange.rpc_avg_latency | latency | seconds |
+| exchange.rpc_connection_count | connections | connections |
+| exchange.rpc_operations | operations | operations/s |
+| exchange.rpc_requests | processed | requests |
+| exchange.rpc_user_count | users | users |
+| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_poison | low, high, none, normal | messages/s |
+| hyperv.vms_health | ok, critical | vms |
+| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |
+| hyperv.root_partition_attached_devices | attached | devices |
+| hyperv.root_partition_deposited_pages | deposited | pages |
+| hyperv.root_partition_skipped_interrupts | skipped | interrupts |
+| hyperv.root_partition_device_dma_errors | illegal_dma | requests |
+| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |
+| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |
+| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |
+| hyperv.root_partition_address_space | address_spaces | address spaces |
+| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |
+| hyperv.root_partition_virtual_tlb_pages | used | pages |
+
+### Per cpu core
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| core | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |
+| windows.cpu_core_interrupts | interrupts | interrupts/s |
+| windows.cpu_core_dpcs | dpcs | dpcs/s |
+| windows.cpu_core_cstate | c1, c2, c3 | percentage |
+
+### Per logical disk
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| disk | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.logical_disk_utilization | free, used | bytes |
+| windows.logical_disk_bandwidth | read, write | bytes/s |
+| windows.logical_disk_operations | reads, writes | operations/s |
+| windows.logical_disk_latency | read, write | seconds |
+
+### Per network device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| nic | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.net_nic_bandwidth | received, sent | kilobits/s |
+| windows.net_nic_packets | received, sent | packets/s |
+| windows.net_nic_errors | inbound, outbound | errors/s |
+| windows.net_nic_discarded | inbound, outbound | discards/s |
+
+### Per thermalzone
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| thermalzone | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.thermalzone_temperature | temperature | celsius |
+
+### Per service
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| service | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |
+| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |
+
+### Per website
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| website | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| iis.website_traffic | received, sent | bytes/s |
+| iis.website_requests_rate | requests | requests/s |
+| iis.website_active_connections_count | active | connections |
+| iis.website_users_count | anonymous, non_anonymous | users |
+| iis.website_connection_attempts_rate | connection | attempts/s |
+| iis.website_isapi_extension_requests_count | isapi | requests |
+| iis.website_isapi_extension_requests_rate | isapi | requests/s |
+| iis.website_ftp_file_transfer_rate | received, sent | files/s |
+| iis.website_logon_attempts_rate | logon | attempts/s |
+| iis.website_errors_rate | document_locked, document_not_found | errors/s |
+| iis.website_uptime | document_locked, document_not_found | seconds |
+
+### Per mssql instance
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.instance_accessmethods_page_splits | page | splits/s |
+| mssql.instance_cache_hit_ratio | hit_ratio | percentage |
+| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |
+| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |
+| mssql.instance_bufman_iops | read, written | iops |
+| mssql.instance_blocked_processes | blocked | processes |
+| mssql.instance_user_connection | user | connections |
+| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |
+| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |
+| mssql.instance_memmgr_pending_memory_grants | pending | processes |
+| mssql.instance_memmgr_server_memory | memory | bytes |
+| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |
+| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |
+| mssql.instance_sqlstats_batch_requests | batch | requests/s |
+| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |
+| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |
+| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |
+
+### Per database
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+| database | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.database_active_transactions | active | transactions |
+| mssql.database_backup_restore_operations | backup | operations/s |
+| mssql.database_data_files_size | size | bytes |
+| mssql.database_log_flushed | flushed | bytes/s |
+| mssql.database_log_flushes | log | flushes/s |
+| mssql.database_transactions | transactions | transactions/s |
+| mssql.database_write_transactions | write | transactions/s |
+
+### Per certificate template
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cert_template | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| adcs.cert_template_requests | requests | requests/s |
+| adcs.cert_template_failed_requests | failed | requests/s |
+| adcs.cert_template_issued_requests | issued | requests/s |
+| adcs.cert_template_pending_requests | pending | requests/s |
+| adcs.cert_template_request_processing_time | processing_time | seconds |
+| adcs.cert_template_retrievals | retrievals | retrievals/s |
+| adcs.cert_template_retrieval_processing_time | processing_time | seconds |
+| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |
+| adcs.cert_template_request_policy_module_processing | processing_time | seconds |
+| adcs.cert_template_challenge_responses | challenge | responses/s |
+| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |
+| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |
+| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |
+
+### Per process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| process | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| netframework.clrexception_thrown | exceptions | exceptions/s |
+| netframework.clrexception_filters | filters | filters/s |
+| netframework.clrexception_finallys | finallys | finallys/s |
+| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |
+| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |
+| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |
+| netframework.clrinterop_interop_stubs_created | created | stubs/s |
+| netframework.clrjit_methods | jit-compiled | methods/s |
+| netframework.clrjit_time | time | percentage |
+| netframework.clrjit_standard_failures | failures | failures/s |
+| netframework.clrjit_il_bytes | compiled_msil | bytes/s |
+| netframework.clrloading_loader_heap_size | committed | bytes |
+| netframework.clrloading_appdomains_loaded | loaded | domain/s |
+| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |
+| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |
+| netframework.clrloading_classes_loaded | loaded | classes/s |
+| netframework.clrloading_class_load_failures | class_load | failures/s |
+| netframework.clrlocksandthreads_queue_length | threads | threads/s |
+| netframework.clrlocksandthreads_current_logical_threads | logical | threads |
+| netframework.clrlocksandthreads_current_physical_threads | physical | threads |
+| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |
+| netframework.clrlocksandthreads_contentions | contentions | contentions/s |
+| netframework.clrmemory_allocated_bytes | allocated | bytes/s |
+| netframework.clrmemory_finalization_survivors | survived | objects |
+| netframework.clrmemory_heap_size | heap | bytes |
+| netframework.clrmemory_promoted | promoted | bytes |
+| netframework.clrmemory_number_gc_handles | used | handles |
+| netframework.clrmemory_collections | gc | gc/s |
+| netframework.clrmemory_induced_gc | gc | gc/s |
+| netframework.clrmemory_number_pinned_objects | pinned | objects |
+| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |
+| netframework.clrmemory_committed | committed | bytes |
+| netframework.clrmemory_reserved | reserved | bytes |
+| netframework.clrmemory_gc_time | time | percentage |
+| netframework.clrremoting_channels | registered | channels/s |
+| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |
+| netframework.clrremoting_context_bound_objects | allocated | objects/s |
+| netframework.clrremoting_context_proxies | objects | objects/s |
+| netframework.clrremoting_contexts | contexts | contexts |
+| netframework.clrremoting_remote_calls | rpc | calls/s |
+| netframework.clrsecurity_link_time_checks | linktime | checks/s |
+| netframework.clrsecurity_checks_time | time | percentage |
+| netframework.clrsecurity_stack_walk_depth | stack | depth |
+| netframework.clrsecurity_runtime_checks | runtime | checks/s |
+
+### Per exchange workload
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.workload_active_tasks | active | tasks |
+| exchange.workload_completed_tasks | completed | tasks/s |
+| exchange.workload_queued_tasks | queued | tasks/s |
+| exchange.workload_yielded_tasks | yielded | tasks/s |
+| exchange.workload_activity_status | active, paused | status |
+
+### Per ldap process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |
+| exchange.ldap_read_time | read | seconds |
+| exchange.ldap_search_time | search | seconds |
+| exchange.ldap_write_time | write | seconds |
+| exchange.ldap_timeout_errors | timeout | errors/s |
+
+### Per http proxy
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.http_proxy_avg_auth_latency | latency | seconds |
+| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |
+| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |
+| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |
+| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |
+| exchange.http_proxy_requests | processed | requests/s |
+
+### Per vm
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_name | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |
+| hyperv.vm_memory_physical | assigned_memory | MiB |
+| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |
+| hyperv.vm_memory_pressure_current | pressure | percentage |
+| hyperv.vm_vid_physical_pages_allocated | allocated | pages |
+| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |
+
+### Per vm device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_device | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_device_bytes | read, written | bytes/s |
+| hyperv.vm_device_operations | read, write | operations/s |
+| hyperv.vm_device_errors | errors | errors/s |
+
+### Per vm interface
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_interface | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_interface_bytes | received, sent | bytes/s |
+| hyperv.vm_interface_packets | received, sent | packets/s |
+| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |
+
+### Per vswitch
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vswitch | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vswitch_bytes | received, sent | bytes/s |
+| hyperv.vswitch_packets | received, sent | packets/s |
+| hyperv.vswitch_directed_packets | received, sent | packets/s |
+| hyperv.vswitch_broadcast_packets | received, sent | packets/s |
+| hyperv.vswitch_multicast_packets | received, sent | packets/s |
+| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_packets_flooded | flooded | packets/s |
+| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |
+| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |
+| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |
+| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |
+| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Windows exporter
+
+To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/windows.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/windows.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: https://192.0.2.1:9182/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Virtual Node
+
+The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.
+You can create a virtual node for all your Windows machines and control them as separate entities.
+
+To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:
+
+> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.
+
+```yaml
+# /etc/netdata/vnodes/vnodes.conf
+- hostname: win_server
+ guid: <value>
+```
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ vnode: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from multiple remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server1
+ url: http://192.0.2.1:9182/metrics
+
+ - name: win_server2
+ url: http://192.0.2.2:9182/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m windows
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep windows
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep windows /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep windows
+```
+
+
diff --git a/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md b/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md
new file mode 100644
index 000000000..1dd59c30e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md
@@ -0,0 +1,843 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/integrations/ms_sql_server.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/metadata.yaml"
+sidebar_label: "MS SQL Server"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Windows Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# MS SQL Server
+
+
+<img src="https://netdata.cloud/img/mssql.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: windows
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).
+
+
+It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).
+
+Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+Supported collectors:
+
+- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)
+- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)
+- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)
+- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)
+- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)
+- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)
+- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)
+- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)
+- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)
+- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)
+- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)
+- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)
+- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)
+- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)
+- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)
+- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)
+- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)
+- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)
+- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)
+- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)
+- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)
+- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)
+- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)
+- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)
+- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)
+
+
+### Per Active Directory instance
+
+These metrics refer to the entire monitored host.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |
+| windows.memory_utilization | available, used | bytes |
+| windows.memory_page_faults | page_faults | events/s |
+| windows.memory_swap_utilization | available, used | bytes |
+| windows.memory_swap_operations | read, write | operations/s |
+| windows.memory_swap_pages | read, written | pages/s |
+| windows.memory_cached | cached | KiB |
+| windows.memory_cache_faults | cache_faults | events/s |
+| windows.memory_system_pool | paged, non-paged | bytes |
+| windows.tcp_conns_established | ipv4, ipv6 | connections |
+| windows.tcp_conns_active | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |
+| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |
+| windows.tcp_segments_received | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |
+| windows.os_processes | processes | number |
+| windows.os_users | users | users |
+| windows.os_visible_memory_usage | free, used | bytes |
+| windows.os_paging_files_usage | free, used | bytes |
+| windows.system_threads | threads | number |
+| windows.system_uptime | time | seconds |
+| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |
+| windows.processes_cpu_utilization | a dimension per process | percentage |
+| windows.processes_handles | a dimension per process | handles |
+| windows.processes_io_bytes | a dimension per process | bytes/s |
+| windows.processes_io_operations | a dimension per process | operations/s |
+| windows.processes_page_faults | a dimension per process | pgfaults/s |
+| windows.processes_page_file_bytes | a dimension per process | bytes |
+| windows.processes_pool_bytes | a dimension per process | bytes |
+| windows.processes_threads | a dimension per process | threads |
+| ad.database_operations | add, delete, modify, recycle | operations/s |
+| ad.directory_operations | read, write, search | operations/s |
+| ad.name_cache_lookups | lookups | lookups/s |
+| ad.name_cache_hits | hits | hits/s |
+| ad.atq_average_request_latency | time | seconds |
+| ad.atq_outstanding_requests | outstanding | requests |
+| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |
+| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |
+| ad.dra_replication_properties_updated | inbound, outbound | properties/s |
+| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |
+| ad.dra_replication_pending_syncs | pending | syncs |
+| ad.dra_replication_sync_requests | requests | requests/s |
+| ad.ds_threads | in_use | threads |
+| ad.ldap_last_bind_time | last_bind | seconds |
+| ad.binds | binds | binds/s |
+| ad.ldap_searches | searches | searches/s |
+| adfs.ad_login_connection_failures | connection | failures/s |
+| adfs.certificate_authentications | authentications | authentications/s |
+| adfs.db_artifact_failures | connection | failures/s |
+| adfs.db_artifact_query_time_seconds | query_time | seconds/s |
+| adfs.db_config_failures | connection | failures/s |
+| adfs.db_config_query_time_seconds | query_time | seconds/s |
+| adfs.device_authentications | authentications | authentications/s |
+| adfs.external_authentications | success, failure | authentications/s |
+| adfs.federated_authentications | authentications | authentications/s |
+| adfs.federation_metadata_requests | requests | requests/s |
+| adfs.oauth_authorization_requests | requests | requests/s |
+| adfs.oauth_client_authentications | success, failure | authentications/s |
+| adfs.oauth_client_credentials_requests | success, failure | requests/s |
+| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |
+| adfs.oauth_client_windows_authentications | success, failure | authentications/s |
+| adfs.oauth_logon_certificate_requests | success, failure | requests/s |
+| adfs.oauth_password_grant_requests | success, failure | requests/s |
+| adfs.oauth_token_requests_success | success | requests/s |
+| adfs.passive_requests | passive | requests/s |
+| adfs.passport_authentications | passport | authentications/s |
+| adfs.password_change_requests | success, failure | requests/s |
+| adfs.samlp_token_requests_success | success | requests/s |
+| adfs.sso_authentications | success, failure | authentications/s |
+| adfs.token_requests | requests | requests/s |
+| adfs.userpassword_authentications | success, failure | authentications/s |
+| adfs.windows_integrated_authentications | authentications | authentications/s |
+| adfs.wsfed_token_requests_success | success | requests/s |
+| adfs.wstrust_token_requests_success | success | requests/s |
+| exchange.activesync_ping_cmds_pending | pending | commands |
+| exchange.activesync_requests | received | requests/s |
+| exchange.activesync_sync_cmds | processed | commands/s |
+| exchange.autodiscover_requests | processed | requests/s |
+| exchange.avail_service_requests | serviced | requests/s |
+| exchange.owa_current_unique_users | logged-in | users |
+| exchange.owa_requests_total | handled | requests/s |
+| exchange.rpc_active_user_count | active | users |
+| exchange.rpc_avg_latency | latency | seconds |
+| exchange.rpc_connection_count | connections | connections |
+| exchange.rpc_operations | operations | operations/s |
+| exchange.rpc_requests | processed | requests |
+| exchange.rpc_user_count | users | users |
+| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_poison | low, high, none, normal | messages/s |
+| hyperv.vms_health | ok, critical | vms |
+| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |
+| hyperv.root_partition_attached_devices | attached | devices |
+| hyperv.root_partition_deposited_pages | deposited | pages |
+| hyperv.root_partition_skipped_interrupts | skipped | interrupts |
+| hyperv.root_partition_device_dma_errors | illegal_dma | requests |
+| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |
+| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |
+| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |
+| hyperv.root_partition_address_space | address_spaces | address spaces |
+| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |
+| hyperv.root_partition_virtual_tlb_pages | used | pages |
+
+### Per cpu core
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| core | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |
+| windows.cpu_core_interrupts | interrupts | interrupts/s |
+| windows.cpu_core_dpcs | dpcs | dpcs/s |
+| windows.cpu_core_cstate | c1, c2, c3 | percentage |
+
+### Per logical disk
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| disk | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.logical_disk_utilization | free, used | bytes |
+| windows.logical_disk_bandwidth | read, write | bytes/s |
+| windows.logical_disk_operations | reads, writes | operations/s |
+| windows.logical_disk_latency | read, write | seconds |
+
+### Per network device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| nic | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.net_nic_bandwidth | received, sent | kilobits/s |
+| windows.net_nic_packets | received, sent | packets/s |
+| windows.net_nic_errors | inbound, outbound | errors/s |
+| windows.net_nic_discarded | inbound, outbound | discards/s |
+
+### Per thermalzone
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| thermalzone | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.thermalzone_temperature | temperature | celsius |
+
+### Per service
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| service | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |
+| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |
+
+### Per website
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| website | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| iis.website_traffic | received, sent | bytes/s |
+| iis.website_requests_rate | requests | requests/s |
+| iis.website_active_connections_count | active | connections |
+| iis.website_users_count | anonymous, non_anonymous | users |
+| iis.website_connection_attempts_rate | connection | attempts/s |
+| iis.website_isapi_extension_requests_count | isapi | requests |
+| iis.website_isapi_extension_requests_rate | isapi | requests/s |
+| iis.website_ftp_file_transfer_rate | received, sent | files/s |
+| iis.website_logon_attempts_rate | logon | attempts/s |
+| iis.website_errors_rate | document_locked, document_not_found | errors/s |
+| iis.website_uptime | document_locked, document_not_found | seconds |
+
+### Per mssql instance
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.instance_accessmethods_page_splits | page | splits/s |
+| mssql.instance_cache_hit_ratio | hit_ratio | percentage |
+| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |
+| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |
+| mssql.instance_bufman_iops | read, written | iops |
+| mssql.instance_blocked_processes | blocked | processes |
+| mssql.instance_user_connection | user | connections |
+| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |
+| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |
+| mssql.instance_memmgr_pending_memory_grants | pending | processes |
+| mssql.instance_memmgr_server_memory | memory | bytes |
+| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |
+| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |
+| mssql.instance_sqlstats_batch_requests | batch | requests/s |
+| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |
+| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |
+| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |
+
+### Per database
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+| database | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.database_active_transactions | active | transactions |
+| mssql.database_backup_restore_operations | backup | operations/s |
+| mssql.database_data_files_size | size | bytes |
+| mssql.database_log_flushed | flushed | bytes/s |
+| mssql.database_log_flushes | log | flushes/s |
+| mssql.database_transactions | transactions | transactions/s |
+| mssql.database_write_transactions | write | transactions/s |
+
+### Per certificate template
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cert_template | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| adcs.cert_template_requests | requests | requests/s |
+| adcs.cert_template_failed_requests | failed | requests/s |
+| adcs.cert_template_issued_requests | issued | requests/s |
+| adcs.cert_template_pending_requests | pending | requests/s |
+| adcs.cert_template_request_processing_time | processing_time | seconds |
+| adcs.cert_template_retrievals | retrievals | retrievals/s |
+| adcs.cert_template_retrieval_processing_time | processing_time | seconds |
+| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |
+| adcs.cert_template_request_policy_module_processing | processing_time | seconds |
+| adcs.cert_template_challenge_responses | challenge | responses/s |
+| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |
+| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |
+| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |
+
+### Per process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| process | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| netframework.clrexception_thrown | exceptions | exceptions/s |
+| netframework.clrexception_filters | filters | filters/s |
+| netframework.clrexception_finallys | finallys | finallys/s |
+| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |
+| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |
+| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |
+| netframework.clrinterop_interop_stubs_created | created | stubs/s |
+| netframework.clrjit_methods | jit-compiled | methods/s |
+| netframework.clrjit_time | time | percentage |
+| netframework.clrjit_standard_failures | failures | failures/s |
+| netframework.clrjit_il_bytes | compiled_msil | bytes/s |
+| netframework.clrloading_loader_heap_size | committed | bytes |
+| netframework.clrloading_appdomains_loaded | loaded | domain/s |
+| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |
+| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |
+| netframework.clrloading_classes_loaded | loaded | classes/s |
+| netframework.clrloading_class_load_failures | class_load | failures/s |
+| netframework.clrlocksandthreads_queue_length | threads | threads/s |
+| netframework.clrlocksandthreads_current_logical_threads | logical | threads |
+| netframework.clrlocksandthreads_current_physical_threads | physical | threads |
+| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |
+| netframework.clrlocksandthreads_contentions | contentions | contentions/s |
+| netframework.clrmemory_allocated_bytes | allocated | bytes/s |
+| netframework.clrmemory_finalization_survivors | survived | objects |
+| netframework.clrmemory_heap_size | heap | bytes |
+| netframework.clrmemory_promoted | promoted | bytes |
+| netframework.clrmemory_number_gc_handles | used | handles |
+| netframework.clrmemory_collections | gc | gc/s |
+| netframework.clrmemory_induced_gc | gc | gc/s |
+| netframework.clrmemory_number_pinned_objects | pinned | objects |
+| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |
+| netframework.clrmemory_committed | committed | bytes |
+| netframework.clrmemory_reserved | reserved | bytes |
+| netframework.clrmemory_gc_time | time | percentage |
+| netframework.clrremoting_channels | registered | channels/s |
+| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |
+| netframework.clrremoting_context_bound_objects | allocated | objects/s |
+| netframework.clrremoting_context_proxies | objects | objects/s |
+| netframework.clrremoting_contexts | contexts | contexts |
+| netframework.clrremoting_remote_calls | rpc | calls/s |
+| netframework.clrsecurity_link_time_checks | linktime | checks/s |
+| netframework.clrsecurity_checks_time | time | percentage |
+| netframework.clrsecurity_stack_walk_depth | stack | depth |
+| netframework.clrsecurity_runtime_checks | runtime | checks/s |
+
+### Per exchange workload
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.workload_active_tasks | active | tasks |
+| exchange.workload_completed_tasks | completed | tasks/s |
+| exchange.workload_queued_tasks | queued | tasks/s |
+| exchange.workload_yielded_tasks | yielded | tasks/s |
+| exchange.workload_activity_status | active, paused | status |
+
+### Per ldap process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |
+| exchange.ldap_read_time | read | seconds |
+| exchange.ldap_search_time | search | seconds |
+| exchange.ldap_write_time | write | seconds |
+| exchange.ldap_timeout_errors | timeout | errors/s |
+
+### Per http proxy
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.http_proxy_avg_auth_latency | latency | seconds |
+| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |
+| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |
+| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |
+| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |
+| exchange.http_proxy_requests | processed | requests/s |
+
+### Per vm
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_name | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |
+| hyperv.vm_memory_physical | assigned_memory | MiB |
+| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |
+| hyperv.vm_memory_pressure_current | pressure | percentage |
+| hyperv.vm_vid_physical_pages_allocated | allocated | pages |
+| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |
+
+### Per vm device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_device | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_device_bytes | read, written | bytes/s |
+| hyperv.vm_device_operations | read, write | operations/s |
+| hyperv.vm_device_errors | errors | errors/s |
+
+### Per vm interface
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_interface | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_interface_bytes | received, sent | bytes/s |
+| hyperv.vm_interface_packets | received, sent | packets/s |
+| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |
+
+### Per vswitch
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vswitch | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vswitch_bytes | received, sent | bytes/s |
+| hyperv.vswitch_packets | received, sent | packets/s |
+| hyperv.vswitch_directed_packets | received, sent | packets/s |
+| hyperv.vswitch_broadcast_packets | received, sent | packets/s |
+| hyperv.vswitch_multicast_packets | received, sent | packets/s |
+| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_packets_flooded | flooded | packets/s |
+| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |
+| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |
+| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |
+| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |
+| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Windows exporter
+
+To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/windows.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/windows.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: https://192.0.2.1:9182/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Virtual Node
+
+The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.
+You can create a virtual node for all your Windows machines and control them as separate entities.
+
+To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:
+
+> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.
+
+```yaml
+# /etc/netdata/vnodes/vnodes.conf
+- hostname: win_server
+ guid: <value>
+```
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ vnode: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from multiple remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server1
+ url: http://192.0.2.1:9182/metrics
+
+ - name: win_server2
+ url: http://192.0.2.2:9182/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m windows
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep windows
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep windows /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep windows
+```
+
+
diff --git a/src/go/plugin/go.d/modules/windows/integrations/net_framework.md b/src/go/plugin/go.d/modules/windows/integrations/net_framework.md
new file mode 100644
index 000000000..01879ddea
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/integrations/net_framework.md
@@ -0,0 +1,843 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/integrations/net_framework.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/metadata.yaml"
+sidebar_label: "NET Framework"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Windows Systems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# NET Framework
+
+
+<img src="https://netdata.cloud/img/dotnet.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: windows
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).
+
+
+It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).
+
+Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+Supported collectors:
+
+- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)
+- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)
+- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)
+- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)
+- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)
+- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)
+- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)
+- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)
+- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)
+- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)
+- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)
+- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)
+- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)
+- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)
+- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)
+- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)
+- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)
+- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)
+- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)
+- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)
+- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)
+- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)
+- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)
+- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)
+- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)
+
+
+### Per Active Directory instance
+
+These metrics refer to the entire monitored host.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |
+| windows.memory_utilization | available, used | bytes |
+| windows.memory_page_faults | page_faults | events/s |
+| windows.memory_swap_utilization | available, used | bytes |
+| windows.memory_swap_operations | read, write | operations/s |
+| windows.memory_swap_pages | read, written | pages/s |
+| windows.memory_cached | cached | KiB |
+| windows.memory_cache_faults | cache_faults | events/s |
+| windows.memory_system_pool | paged, non-paged | bytes |
+| windows.tcp_conns_established | ipv4, ipv6 | connections |
+| windows.tcp_conns_active | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |
+| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |
+| windows.tcp_segments_received | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |
+| windows.os_processes | processes | number |
+| windows.os_users | users | users |
+| windows.os_visible_memory_usage | free, used | bytes |
+| windows.os_paging_files_usage | free, used | bytes |
+| windows.system_threads | threads | number |
+| windows.system_uptime | time | seconds |
+| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |
+| windows.processes_cpu_utilization | a dimension per process | percentage |
+| windows.processes_handles | a dimension per process | handles |
+| windows.processes_io_bytes | a dimension per process | bytes/s |
+| windows.processes_io_operations | a dimension per process | operations/s |
+| windows.processes_page_faults | a dimension per process | pgfaults/s |
+| windows.processes_page_file_bytes | a dimension per process | bytes |
+| windows.processes_pool_bytes | a dimension per process | bytes |
+| windows.processes_threads | a dimension per process | threads |
+| ad.database_operations | add, delete, modify, recycle | operations/s |
+| ad.directory_operations | read, write, search | operations/s |
+| ad.name_cache_lookups | lookups | lookups/s |
+| ad.name_cache_hits | hits | hits/s |
+| ad.atq_average_request_latency | time | seconds |
+| ad.atq_outstanding_requests | outstanding | requests |
+| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |
+| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |
+| ad.dra_replication_properties_updated | inbound, outbound | properties/s |
+| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |
+| ad.dra_replication_pending_syncs | pending | syncs |
+| ad.dra_replication_sync_requests | requests | requests/s |
+| ad.ds_threads | in_use | threads |
+| ad.ldap_last_bind_time | last_bind | seconds |
+| ad.binds | binds | binds/s |
+| ad.ldap_searches | searches | searches/s |
+| adfs.ad_login_connection_failures | connection | failures/s |
+| adfs.certificate_authentications | authentications | authentications/s |
+| adfs.db_artifact_failures | connection | failures/s |
+| adfs.db_artifact_query_time_seconds | query_time | seconds/s |
+| adfs.db_config_failures | connection | failures/s |
+| adfs.db_config_query_time_seconds | query_time | seconds/s |
+| adfs.device_authentications | authentications | authentications/s |
+| adfs.external_authentications | success, failure | authentications/s |
+| adfs.federated_authentications | authentications | authentications/s |
+| adfs.federation_metadata_requests | requests | requests/s |
+| adfs.oauth_authorization_requests | requests | requests/s |
+| adfs.oauth_client_authentications | success, failure | authentications/s |
+| adfs.oauth_client_credentials_requests | success, failure | requests/s |
+| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |
+| adfs.oauth_client_windows_authentications | success, failure | authentications/s |
+| adfs.oauth_logon_certificate_requests | success, failure | requests/s |
+| adfs.oauth_password_grant_requests | success, failure | requests/s |
+| adfs.oauth_token_requests_success | success | requests/s |
+| adfs.passive_requests | passive | requests/s |
+| adfs.passport_authentications | passport | authentications/s |
+| adfs.password_change_requests | success, failure | requests/s |
+| adfs.samlp_token_requests_success | success | requests/s |
+| adfs.sso_authentications | success, failure | authentications/s |
+| adfs.token_requests | requests | requests/s |
+| adfs.userpassword_authentications | success, failure | authentications/s |
+| adfs.windows_integrated_authentications | authentications | authentications/s |
+| adfs.wsfed_token_requests_success | success | requests/s |
+| adfs.wstrust_token_requests_success | success | requests/s |
+| exchange.activesync_ping_cmds_pending | pending | commands |
+| exchange.activesync_requests | received | requests/s |
+| exchange.activesync_sync_cmds | processed | commands/s |
+| exchange.autodiscover_requests | processed | requests/s |
+| exchange.avail_service_requests | serviced | requests/s |
+| exchange.owa_current_unique_users | logged-in | users |
+| exchange.owa_requests_total | handled | requests/s |
+| exchange.rpc_active_user_count | active | users |
+| exchange.rpc_avg_latency | latency | seconds |
+| exchange.rpc_connection_count | connections | connections |
+| exchange.rpc_operations | operations | operations/s |
+| exchange.rpc_requests | processed | requests |
+| exchange.rpc_user_count | users | users |
+| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_poison | low, high, none, normal | messages/s |
+| hyperv.vms_health | ok, critical | vms |
+| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |
+| hyperv.root_partition_attached_devices | attached | devices |
+| hyperv.root_partition_deposited_pages | deposited | pages |
+| hyperv.root_partition_skipped_interrupts | skipped | interrupts |
+| hyperv.root_partition_device_dma_errors | illegal_dma | requests |
+| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |
+| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |
+| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |
+| hyperv.root_partition_address_space | address_spaces | address spaces |
+| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |
+| hyperv.root_partition_virtual_tlb_pages | used | pages |
+
+### Per cpu core
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| core | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |
+| windows.cpu_core_interrupts | interrupts | interrupts/s |
+| windows.cpu_core_dpcs | dpcs | dpcs/s |
+| windows.cpu_core_cstate | c1, c2, c3 | percentage |
+
+### Per logical disk
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| disk | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.logical_disk_utilization | free, used | bytes |
+| windows.logical_disk_bandwidth | read, write | bytes/s |
+| windows.logical_disk_operations | reads, writes | operations/s |
+| windows.logical_disk_latency | read, write | seconds |
+
+### Per network device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| nic | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.net_nic_bandwidth | received, sent | kilobits/s |
+| windows.net_nic_packets | received, sent | packets/s |
+| windows.net_nic_errors | inbound, outbound | errors/s |
+| windows.net_nic_discarded | inbound, outbound | discards/s |
+
+### Per thermalzone
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| thermalzone | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.thermalzone_temperature | temperature | celsius |
+
+### Per service
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| service | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |
+| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |
+
+### Per website
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| website | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| iis.website_traffic | received, sent | bytes/s |
+| iis.website_requests_rate | requests | requests/s |
+| iis.website_active_connections_count | active | connections |
+| iis.website_users_count | anonymous, non_anonymous | users |
+| iis.website_connection_attempts_rate | connection | attempts/s |
+| iis.website_isapi_extension_requests_count | isapi | requests |
+| iis.website_isapi_extension_requests_rate | isapi | requests/s |
+| iis.website_ftp_file_transfer_rate | received, sent | files/s |
+| iis.website_logon_attempts_rate | logon | attempts/s |
+| iis.website_errors_rate | document_locked, document_not_found | errors/s |
+| iis.website_uptime | document_locked, document_not_found | seconds |
+
+### Per mssql instance
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.instance_accessmethods_page_splits | page | splits/s |
+| mssql.instance_cache_hit_ratio | hit_ratio | percentage |
+| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |
+| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |
+| mssql.instance_bufman_iops | read, written | iops |
+| mssql.instance_blocked_processes | blocked | processes |
+| mssql.instance_user_connection | user | connections |
+| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |
+| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |
+| mssql.instance_memmgr_pending_memory_grants | pending | processes |
+| mssql.instance_memmgr_server_memory | memory | bytes |
+| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |
+| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |
+| mssql.instance_sqlstats_batch_requests | batch | requests/s |
+| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |
+| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |
+| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |
+
+### Per database
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+| database | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.database_active_transactions | active | transactions |
+| mssql.database_backup_restore_operations | backup | operations/s |
+| mssql.database_data_files_size | size | bytes |
+| mssql.database_log_flushed | flushed | bytes/s |
+| mssql.database_log_flushes | log | flushes/s |
+| mssql.database_transactions | transactions | transactions/s |
+| mssql.database_write_transactions | write | transactions/s |
+
+### Per certificate template
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cert_template | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| adcs.cert_template_requests | requests | requests/s |
+| adcs.cert_template_failed_requests | failed | requests/s |
+| adcs.cert_template_issued_requests | issued | requests/s |
+| adcs.cert_template_pending_requests | pending | requests/s |
+| adcs.cert_template_request_processing_time | processing_time | seconds |
+| adcs.cert_template_retrievals | retrievals | retrievals/s |
+| adcs.cert_template_retrieval_processing_time | processing_time | seconds |
+| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |
+| adcs.cert_template_request_policy_module_processing | processing_time | seconds |
+| adcs.cert_template_challenge_responses | challenge | responses/s |
+| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |
+| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |
+| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |
+
+### Per process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| process | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| netframework.clrexception_thrown | exceptions | exceptions/s |
+| netframework.clrexception_filters | filters | filters/s |
+| netframework.clrexception_finallys | finallys | finallys/s |
+| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |
+| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |
+| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |
+| netframework.clrinterop_interop_stubs_created | created | stubs/s |
+| netframework.clrjit_methods | jit-compiled | methods/s |
+| netframework.clrjit_time | time | percentage |
+| netframework.clrjit_standard_failures | failures | failures/s |
+| netframework.clrjit_il_bytes | compiled_msil | bytes/s |
+| netframework.clrloading_loader_heap_size | committed | bytes |
+| netframework.clrloading_appdomains_loaded | loaded | domain/s |
+| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |
+| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |
+| netframework.clrloading_classes_loaded | loaded | classes/s |
+| netframework.clrloading_class_load_failures | class_load | failures/s |
+| netframework.clrlocksandthreads_queue_length | threads | threads/s |
+| netframework.clrlocksandthreads_current_logical_threads | logical | threads |
+| netframework.clrlocksandthreads_current_physical_threads | physical | threads |
+| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |
+| netframework.clrlocksandthreads_contentions | contentions | contentions/s |
+| netframework.clrmemory_allocated_bytes | allocated | bytes/s |
+| netframework.clrmemory_finalization_survivors | survived | objects |
+| netframework.clrmemory_heap_size | heap | bytes |
+| netframework.clrmemory_promoted | promoted | bytes |
+| netframework.clrmemory_number_gc_handles | used | handles |
+| netframework.clrmemory_collections | gc | gc/s |
+| netframework.clrmemory_induced_gc | gc | gc/s |
+| netframework.clrmemory_number_pinned_objects | pinned | objects |
+| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |
+| netframework.clrmemory_committed | committed | bytes |
+| netframework.clrmemory_reserved | reserved | bytes |
+| netframework.clrmemory_gc_time | time | percentage |
+| netframework.clrremoting_channels | registered | channels/s |
+| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |
+| netframework.clrremoting_context_bound_objects | allocated | objects/s |
+| netframework.clrremoting_context_proxies | objects | objects/s |
+| netframework.clrremoting_contexts | contexts | contexts |
+| netframework.clrremoting_remote_calls | rpc | calls/s |
+| netframework.clrsecurity_link_time_checks | linktime | checks/s |
+| netframework.clrsecurity_checks_time | time | percentage |
+| netframework.clrsecurity_stack_walk_depth | stack | depth |
+| netframework.clrsecurity_runtime_checks | runtime | checks/s |
+
+### Per exchange workload
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.workload_active_tasks | active | tasks |
+| exchange.workload_completed_tasks | completed | tasks/s |
+| exchange.workload_queued_tasks | queued | tasks/s |
+| exchange.workload_yielded_tasks | yielded | tasks/s |
+| exchange.workload_activity_status | active, paused | status |
+
+### Per ldap process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |
+| exchange.ldap_read_time | read | seconds |
+| exchange.ldap_search_time | search | seconds |
+| exchange.ldap_write_time | write | seconds |
+| exchange.ldap_timeout_errors | timeout | errors/s |
+
+### Per http proxy
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.http_proxy_avg_auth_latency | latency | seconds |
+| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |
+| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |
+| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |
+| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |
+| exchange.http_proxy_requests | processed | requests/s |
+
+### Per vm
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_name | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |
+| hyperv.vm_memory_physical | assigned_memory | MiB |
+| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |
+| hyperv.vm_memory_pressure_current | pressure | percentage |
+| hyperv.vm_vid_physical_pages_allocated | allocated | pages |
+| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |
+
+### Per vm device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_device | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_device_bytes | read, written | bytes/s |
+| hyperv.vm_device_operations | read, write | operations/s |
+| hyperv.vm_device_errors | errors | errors/s |
+
+### Per vm interface
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_interface | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_interface_bytes | received, sent | bytes/s |
+| hyperv.vm_interface_packets | received, sent | packets/s |
+| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |
+
+### Per vswitch
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vswitch | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vswitch_bytes | received, sent | bytes/s |
+| hyperv.vswitch_packets | received, sent | packets/s |
+| hyperv.vswitch_directed_packets | received, sent | packets/s |
+| hyperv.vswitch_broadcast_packets | received, sent | packets/s |
+| hyperv.vswitch_multicast_packets | received, sent | packets/s |
+| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_packets_flooded | flooded | packets/s |
+| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |
+| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |
+| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |
+| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |
+| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Windows exporter
+
+To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/windows.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/windows.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: https://192.0.2.1:9182/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Virtual Node
+
+The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.
+You can create a virtual node for all your Windows machines and control them as separate entities.
+
+To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:
+
+> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.
+
+```yaml
+# /etc/netdata/vnodes/vnodes.conf
+- hostname: win_server
+ guid: <value>
+```
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ vnode: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from multiple remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server1
+ url: http://192.0.2.1:9182/metrics
+
+ - name: win_server2
+ url: http://192.0.2.2:9182/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m windows
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep windows
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep windows /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep windows
+```
+
+
diff --git a/src/go/plugin/go.d/modules/windows/integrations/windows.md b/src/go/plugin/go.d/modules/windows/integrations/windows.md
new file mode 100644
index 000000000..60a3b7f30
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/integrations/windows.md
@@ -0,0 +1,843 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/integrations/windows.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/windows/metadata.yaml"
+sidebar_label: "Windows"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Windows Systems"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# Windows
+
+
+<img src="https://netdata.cloud/img/windows.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: windows
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).
+
+
+It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).
+
+Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+Supported collectors:
+
+- [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)
+- [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)
+- [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)
+- [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)
+- [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)
+- [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)
+- [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)
+- [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)
+- [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)
+- [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)
+- [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)
+- [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)
+- [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)
+- [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)
+- [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)
+- [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)
+- [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)
+- [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)
+- [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)
+- [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)
+- [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)
+- [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)
+- [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)
+- [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)
+- [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)
+
+
+### Per Active Directory instance
+
+These metrics refer to the entire monitored host.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_utilization_total | dpc, user, privileged, interrupt | percentage |
+| windows.memory_utilization | available, used | bytes |
+| windows.memory_page_faults | page_faults | events/s |
+| windows.memory_swap_utilization | available, used | bytes |
+| windows.memory_swap_operations | read, write | operations/s |
+| windows.memory_swap_pages | read, written | pages/s |
+| windows.memory_cached | cached | KiB |
+| windows.memory_cache_faults | cache_faults | events/s |
+| windows.memory_system_pool | paged, non-paged | bytes |
+| windows.tcp_conns_established | ipv4, ipv6 | connections |
+| windows.tcp_conns_active | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_passive | ipv4, ipv6 | connections/s |
+| windows.tcp_conns_failures | ipv4, ipv6 | failures/s |
+| windows.tcp_conns_resets | ipv4, ipv6 | resets/s |
+| windows.tcp_segments_received | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_sent | ipv4, ipv6 | segments/s |
+| windows.tcp_segments_retransmitted | ipv4, ipv6 | segments/s |
+| windows.os_processes | processes | number |
+| windows.os_users | users | users |
+| windows.os_visible_memory_usage | free, used | bytes |
+| windows.os_paging_files_usage | free, used | bytes |
+| windows.system_threads | threads | number |
+| windows.system_uptime | time | seconds |
+| windows.logon_type_sessions | system, interactive, network, batch, service, proxy, unlock, network_clear_text, new_credentials, remote_interactive, cached_interactive, cached_remote_interactive, cached_unlock | seconds |
+| windows.processes_cpu_utilization | a dimension per process | percentage |
+| windows.processes_handles | a dimension per process | handles |
+| windows.processes_io_bytes | a dimension per process | bytes/s |
+| windows.processes_io_operations | a dimension per process | operations/s |
+| windows.processes_page_faults | a dimension per process | pgfaults/s |
+| windows.processes_page_file_bytes | a dimension per process | bytes |
+| windows.processes_pool_bytes | a dimension per process | bytes |
+| windows.processes_threads | a dimension per process | threads |
+| ad.database_operations | add, delete, modify, recycle | operations/s |
+| ad.directory_operations | read, write, search | operations/s |
+| ad.name_cache_lookups | lookups | lookups/s |
+| ad.name_cache_hits | hits | hits/s |
+| ad.atq_average_request_latency | time | seconds |
+| ad.atq_outstanding_requests | outstanding | requests |
+| ad.dra_replication_intersite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_intrasite_compressed_traffic | inbound, outbound | bytes/s |
+| ad.dra_replication_sync_objects_remaining | inbound, outbound | objects |
+| ad.dra_replication_objects_filtered | inbound, outbound | objects/s |
+| ad.dra_replication_properties_updated | inbound, outbound | properties/s |
+| ad.dra_replication_properties_filtered | inbound, outbound | properties/s |
+| ad.dra_replication_pending_syncs | pending | syncs |
+| ad.dra_replication_sync_requests | requests | requests/s |
+| ad.ds_threads | in_use | threads |
+| ad.ldap_last_bind_time | last_bind | seconds |
+| ad.binds | binds | binds/s |
+| ad.ldap_searches | searches | searches/s |
+| adfs.ad_login_connection_failures | connection | failures/s |
+| adfs.certificate_authentications | authentications | authentications/s |
+| adfs.db_artifact_failures | connection | failures/s |
+| adfs.db_artifact_query_time_seconds | query_time | seconds/s |
+| adfs.db_config_failures | connection | failures/s |
+| adfs.db_config_query_time_seconds | query_time | seconds/s |
+| adfs.device_authentications | authentications | authentications/s |
+| adfs.external_authentications | success, failure | authentications/s |
+| adfs.federated_authentications | authentications | authentications/s |
+| adfs.federation_metadata_requests | requests | requests/s |
+| adfs.oauth_authorization_requests | requests | requests/s |
+| adfs.oauth_client_authentications | success, failure | authentications/s |
+| adfs.oauth_client_credentials_requests | success, failure | requests/s |
+| adfs.oauth_client_privkey_jwt_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_basic_authentications | success, failure | authentications/s |
+| adfs.oauth_client_secret_post_authentications | success, failure | authentications/s |
+| adfs.oauth_client_windows_authentications | success, failure | authentications/s |
+| adfs.oauth_logon_certificate_requests | success, failure | requests/s |
+| adfs.oauth_password_grant_requests | success, failure | requests/s |
+| adfs.oauth_token_requests_success | success | requests/s |
+| adfs.passive_requests | passive | requests/s |
+| adfs.passport_authentications | passport | authentications/s |
+| adfs.password_change_requests | success, failure | requests/s |
+| adfs.samlp_token_requests_success | success | requests/s |
+| adfs.sso_authentications | success, failure | authentications/s |
+| adfs.token_requests | requests | requests/s |
+| adfs.userpassword_authentications | success, failure | authentications/s |
+| adfs.windows_integrated_authentications | authentications | authentications/s |
+| adfs.wsfed_token_requests_success | success | requests/s |
+| adfs.wstrust_token_requests_success | success | requests/s |
+| exchange.activesync_ping_cmds_pending | pending | commands |
+| exchange.activesync_requests | received | requests/s |
+| exchange.activesync_sync_cmds | processed | commands/s |
+| exchange.autodiscover_requests | processed | requests/s |
+| exchange.avail_service_requests | serviced | requests/s |
+| exchange.owa_current_unique_users | logged-in | users |
+| exchange.owa_requests_total | handled | requests/s |
+| exchange.rpc_active_user_count | active | users |
+| exchange.rpc_avg_latency | latency | seconds |
+| exchange.rpc_connection_count | connections | connections |
+| exchange.rpc_operations | operations | operations/s |
+| exchange.rpc_requests | processed | requests |
+| exchange.rpc_user_count | users | users |
+| exchange.transport_queues_active_mail_box_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_external_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_active_remote_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_internal_largest_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_retry_mailbox_delivery | low, high, none, normal | messages/s |
+| exchange.transport_queues_poison | low, high, none, normal | messages/s |
+| hyperv.vms_health | ok, critical | vms |
+| hyperv.root_partition_device_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_pages | 4K, 2M, 1G | pages |
+| hyperv.root_partition_gpa_space_modifications | gpa | modifications/s |
+| hyperv.root_partition_attached_devices | attached | devices |
+| hyperv.root_partition_deposited_pages | deposited | pages |
+| hyperv.root_partition_skipped_interrupts | skipped | interrupts |
+| hyperv.root_partition_device_dma_errors | illegal_dma | requests |
+| hyperv.root_partition_device_interrupt_errors | illegal_interrupt | requests |
+| hyperv.root_partition_device_interrupt_throttle_events | throttling | events |
+| hyperv.root_partition_io_tlb_flush | flushes | flushes/s |
+| hyperv.root_partition_address_space | address_spaces | address spaces |
+| hyperv.root_partition_virtual_tlb_flush_entries | flushes | flushes/s |
+| hyperv.root_partition_virtual_tlb_pages | used | pages |
+
+### Per cpu core
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| core | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.cpu_core_utilization | dpc, user, privileged, interrupt | percentage |
+| windows.cpu_core_interrupts | interrupts | interrupts/s |
+| windows.cpu_core_dpcs | dpcs | dpcs/s |
+| windows.cpu_core_cstate | c1, c2, c3 | percentage |
+
+### Per logical disk
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| disk | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.logical_disk_utilization | free, used | bytes |
+| windows.logical_disk_bandwidth | read, write | bytes/s |
+| windows.logical_disk_operations | reads, writes | operations/s |
+| windows.logical_disk_latency | read, write | seconds |
+
+### Per network device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| nic | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.net_nic_bandwidth | received, sent | kilobits/s |
+| windows.net_nic_packets | received, sent | packets/s |
+| windows.net_nic_errors | inbound, outbound | errors/s |
+| windows.net_nic_discarded | inbound, outbound | discards/s |
+
+### Per thermalzone
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| thermalzone | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.thermalzone_temperature | temperature | celsius |
+
+### Per service
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| service | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| windows.service_state | running, stopped, start_pending, stop_pending, continue_pending, pause_pending, paused, unknown | state |
+| windows.service_status | ok, error, unknown, degraded, pred_fail, starting, stopping, service, stressed, nonrecover, no_contact, lost_comm | status |
+
+### Per website
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| website | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| iis.website_traffic | received, sent | bytes/s |
+| iis.website_requests_rate | requests | requests/s |
+| iis.website_active_connections_count | active | connections |
+| iis.website_users_count | anonymous, non_anonymous | users |
+| iis.website_connection_attempts_rate | connection | attempts/s |
+| iis.website_isapi_extension_requests_count | isapi | requests |
+| iis.website_isapi_extension_requests_rate | isapi | requests/s |
+| iis.website_ftp_file_transfer_rate | received, sent | files/s |
+| iis.website_logon_attempts_rate | logon | attempts/s |
+| iis.website_errors_rate | document_locked, document_not_found | errors/s |
+| iis.website_uptime | document_locked, document_not_found | seconds |
+
+### Per mssql instance
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.instance_accessmethods_page_splits | page | splits/s |
+| mssql.instance_cache_hit_ratio | hit_ratio | percentage |
+| mssql.instance_bufman_checkpoint_pages | flushed | pages/s |
+| mssql.instance_bufman_page_life_expectancy | life_expectancy | seconds |
+| mssql.instance_bufman_iops | read, written | iops |
+| mssql.instance_blocked_processes | blocked | processes |
+| mssql.instance_user_connection | user | connections |
+| mssql.instance_locks_lock_wait | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_locks_deadlocks | alloc_unit, application, database, extent, file, hobt, key, metadata, oib, object, page, rid, row_group, xact | locks/s |
+| mssql.instance_memmgr_connection_memory_bytes | memory | bytes |
+| mssql.instance_memmgr_external_benefit_of_memory | benefit | bytes |
+| mssql.instance_memmgr_pending_memory_grants | pending | processes |
+| mssql.instance_memmgr_server_memory | memory | bytes |
+| mssql.instance_sql_errors | db_offline, info, kill_connection, user | errors |
+| mssql.instance_sqlstats_auto_parameterization_attempts | failed | attempts/s |
+| mssql.instance_sqlstats_batch_requests | batch | requests/s |
+| mssql.instance_sqlstats_safe_auto_parameterization_attempts | safe | attempts/s |
+| mssql.instance_sqlstats_sql_compilations | compilations | compilations/s |
+| mssql.instance_sqlstats_sql_recompilations | recompiles | recompiles/s |
+
+### Per database
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| mssql_instance | TBD |
+| database | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| mssql.database_active_transactions | active | transactions |
+| mssql.database_backup_restore_operations | backup | operations/s |
+| mssql.database_data_files_size | size | bytes |
+| mssql.database_log_flushed | flushed | bytes/s |
+| mssql.database_log_flushes | log | flushes/s |
+| mssql.database_transactions | transactions | transactions/s |
+| mssql.database_write_transactions | write | transactions/s |
+
+### Per certificate template
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| cert_template | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| adcs.cert_template_requests | requests | requests/s |
+| adcs.cert_template_failed_requests | failed | requests/s |
+| adcs.cert_template_issued_requests | issued | requests/s |
+| adcs.cert_template_pending_requests | pending | requests/s |
+| adcs.cert_template_request_processing_time | processing_time | seconds |
+| adcs.cert_template_retrievals | retrievals | retrievals/s |
+| adcs.cert_template_retrieval_processing_time | processing_time | seconds |
+| adcs.cert_template_request_cryptographic_signing_time | singing_time | seconds |
+| adcs.cert_template_request_policy_module_processing | processing_time | seconds |
+| adcs.cert_template_challenge_responses | challenge | responses/s |
+| adcs.cert_template_challenge_response_processing_time | processing_time | seconds |
+| adcs.cert_template_signed_certificate_timestamp_lists | processed | lists/s |
+| adcs.cert_template_signed_certificate_timestamp_list_processing_time | processing_time | seconds |
+
+### Per process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| process | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| netframework.clrexception_thrown | exceptions | exceptions/s |
+| netframework.clrexception_filters | filters | filters/s |
+| netframework.clrexception_finallys | finallys | finallys/s |
+| netframework.clrexception_throw_to_catch_depth | traversed | stack_frames/s |
+| netframework.clrinterop_com_callable_wrappers | com_callable_wrappers | ccw/s |
+| netframework.clrinterop_interop_marshallings | marshallings | marshallings/s |
+| netframework.clrinterop_interop_stubs_created | created | stubs/s |
+| netframework.clrjit_methods | jit-compiled | methods/s |
+| netframework.clrjit_time | time | percentage |
+| netframework.clrjit_standard_failures | failures | failures/s |
+| netframework.clrjit_il_bytes | compiled_msil | bytes/s |
+| netframework.clrloading_loader_heap_size | committed | bytes |
+| netframework.clrloading_appdomains_loaded | loaded | domain/s |
+| netframework.clrloading_appdomains_unloaded | unloaded | domain/s |
+| netframework.clrloading_assemblies_loaded | loaded | assemblies/s |
+| netframework.clrloading_classes_loaded | loaded | classes/s |
+| netframework.clrloading_class_load_failures | class_load | failures/s |
+| netframework.clrlocksandthreads_queue_length | threads | threads/s |
+| netframework.clrlocksandthreads_current_logical_threads | logical | threads |
+| netframework.clrlocksandthreads_current_physical_threads | physical | threads |
+| netframework.clrlocksandthreads_recognized_threads | threads | threads/s |
+| netframework.clrlocksandthreads_contentions | contentions | contentions/s |
+| netframework.clrmemory_allocated_bytes | allocated | bytes/s |
+| netframework.clrmemory_finalization_survivors | survived | objects |
+| netframework.clrmemory_heap_size | heap | bytes |
+| netframework.clrmemory_promoted | promoted | bytes |
+| netframework.clrmemory_number_gc_handles | used | handles |
+| netframework.clrmemory_collections | gc | gc/s |
+| netframework.clrmemory_induced_gc | gc | gc/s |
+| netframework.clrmemory_number_pinned_objects | pinned | objects |
+| netframework.clrmemory_number_sink_blocks_in_use | used | blocks |
+| netframework.clrmemory_committed | committed | bytes |
+| netframework.clrmemory_reserved | reserved | bytes |
+| netframework.clrmemory_gc_time | time | percentage |
+| netframework.clrremoting_channels | registered | channels/s |
+| netframework.clrremoting_context_bound_classes_loaded | loaded | classes |
+| netframework.clrremoting_context_bound_objects | allocated | objects/s |
+| netframework.clrremoting_context_proxies | objects | objects/s |
+| netframework.clrremoting_contexts | contexts | contexts |
+| netframework.clrremoting_remote_calls | rpc | calls/s |
+| netframework.clrsecurity_link_time_checks | linktime | checks/s |
+| netframework.clrsecurity_checks_time | time | percentage |
+| netframework.clrsecurity_stack_walk_depth | stack | depth |
+| netframework.clrsecurity_runtime_checks | runtime | checks/s |
+
+### Per exchange workload
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.workload_active_tasks | active | tasks |
+| exchange.workload_completed_tasks | completed | tasks/s |
+| exchange.workload_queued_tasks | queued | tasks/s |
+| exchange.workload_yielded_tasks | yielded | tasks/s |
+| exchange.workload_activity_status | active, paused | status |
+
+### Per ldap process
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.ldap_long_running_ops_per_sec | long-running | operations/s |
+| exchange.ldap_read_time | read | seconds |
+| exchange.ldap_search_time | search | seconds |
+| exchange.ldap_write_time | write | seconds |
+| exchange.ldap_timeout_errors | timeout | errors/s |
+
+### Per http proxy
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| workload | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| exchange.http_proxy_avg_auth_latency | latency | seconds |
+| exchange.http_proxy_avg_cas_processing_latency_sec | latency | seconds |
+| exchange.http_proxy_mailbox_proxy_failure_rate | failures | percentage |
+| exchange.http_proxy_mailbox_server_locator_avg_latency_sec | latency | seconds |
+| exchange.http_proxy_outstanding_proxy_requests | outstanding | requests |
+| exchange.http_proxy_requests | processed | requests/s |
+
+### Per vm
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_name | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_cpu_usage | gues, hypervisor, remote | percentage |
+| hyperv.vm_memory_physical | assigned_memory | MiB |
+| hyperv.vm_memory_physical_guest_visible | visible_memory | MiB |
+| hyperv.vm_memory_pressure_current | pressure | percentage |
+| hyperv.vm_vid_physical_pages_allocated | allocated | pages |
+| hyperv.vm_vid_remote_physical_pages | remote_physical | pages |
+
+### Per vm device
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_device | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_device_bytes | read, written | bytes/s |
+| hyperv.vm_device_operations | read, write | operations/s |
+| hyperv.vm_device_errors | errors | errors/s |
+
+### Per vm interface
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vm_interface | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vm_interface_bytes | received, sent | bytes/s |
+| hyperv.vm_interface_packets | received, sent | packets/s |
+| hyperv.vm_interface_packets_dropped | incoming, outgoing | drops/s |
+
+### Per vswitch
+
+TBD
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| vswitch | TBD |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| hyperv.vswitch_bytes | received, sent | bytes/s |
+| hyperv.vswitch_packets | received, sent | packets/s |
+| hyperv.vswitch_directed_packets | received, sent | packets/s |
+| hyperv.vswitch_broadcast_packets | received, sent | packets/s |
+| hyperv.vswitch_multicast_packets | received, sent | packets/s |
+| hyperv.vswitch_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_extensions_dropped_packets | incoming, outgoing | drops/s |
+| hyperv.vswitch_packets_flooded | flooded | packets/s |
+| hyperv.vswitch_learned_mac_addresses | learned | mac addresses/s |
+| hyperv.vswitch_purged_mac_addresses | purged | mac addresses/s |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ windows_10min_cpu_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.cpu_utilization_total | average CPU utilization over the last 10 minutes |
+| [ windows_ram_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.memory_utilization | memory utilization |
+| [ windows_inbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of inbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_discarded ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_discarded | number of outbound discarded packets for the network interface in the last 10 minutes |
+| [ windows_inbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of inbound errors for the network interface in the last 10 minutes |
+| [ windows_outbound_packets_errors ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.net_nic_errors | number of outbound errors for the network interface in the last 10 minutes |
+| [ windows_disk_in_use ](https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf) | windows.logical_disk_space_usage | disk space utilization |
+
+
+## Setup
+
+### Prerequisites
+
+#### Install Windows exporter
+
+To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/windows.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/windows.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+Do not validate server certificate chain and hostname.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ url: https://192.0.2.1:9182/metrics
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Virtual Node
+
+The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.
+You can create a virtual node for all your Windows machines and control them as separate entities.
+
+To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:
+
+> **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.
+
+```yaml
+# /etc/netdata/vnodes/vnodes.conf
+- hostname: win_server
+ guid: <value>
+```
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server
+ vnode: win_server
+ url: http://192.0.2.1:9182/metrics
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from multiple remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: win_server1
+ url: http://192.0.2.1:9182/metrics
+
+ - name: win_server2
+ url: http://192.0.2.2:9182/metrics
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `windows` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m windows
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `windows` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep windows
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep windows /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep windows
+```
+
+
diff --git a/src/go/plugin/go.d/modules/windows/metadata.yaml b/src/go/plugin/go.d/modules/windows/metadata.yaml
new file mode 100644
index 000000000..87ac4cf63
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/metadata.yaml
@@ -0,0 +1,2172 @@
+plugin_name: go.d.plugin
+modules:
+ - &module
+ meta: &meta
+ id: collector-go.d.plugin-windows
+ plugin_name: go.d.plugin
+ module_name: windows
+ monitored_instance:
+ name: Windows
+ link: https://www.microsoft.com/en-us/windows
+ categories:
+ - data-collection.windows-systems
+ icon_filename: windows.svg
+ keywords:
+ - windows
+ - microsoft
+ most_popular: true
+ info_provided_to_referring_integrations:
+ description: ""
+ related_resources:
+ integrations:
+ list: []
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors the performance of Windows machines, collects both host metrics and metrics from various Windows applications (e.g. Active Directory, MSSQL).
+ method_description: |
+ It collect metrics by periodically sending HTTP requests to [Prometheus exporter for Windows machines](https://github.com/prometheus-community/windows_exporter), a native Windows agent running on each host.
+ default_behavior:
+ auto_detection:
+ description: |
+ It detects Windows exporter instances running on localhost (requires using [Netdata MSI installer](https://github.com/netdata/msi-installer#instructions)).
+
+ Using the Netdata MSI installer is recommended for testing purposes only. For production use, you need to install Netdata on a Linux host and configure it to collect metrics remotely.
+ limits:
+ description: ""
+ performance_impact:
+ description: |
+ Data collection affects the CPU usage of the Windows host. CPU usage depends on the frequency of data collection and the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list:
+ - title: Install Windows exporter
+ description: |
+ To install the Windows exporter, follow the [official installation guide](https://github.com/prometheus-community/windows_exporter#installation).
+ configuration:
+ file:
+ name: go.d/windows.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: ""
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: win_server
+ url: http://192.0.2.1:9182/metrics
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: Do not validate server certificate chain and hostname.
+ config: |
+ jobs:
+ - name: win_server
+ url: https://192.0.2.1:9182/metrics
+ tls_skip_verify: yes
+ - name: Virtual Node
+ description: |
+ The Virtual Node functionality allows you to define nodes in configuration files and treat them as ordinary nodes in all interfaces, panels, tabs, filters, etc.
+ You can create a virtual node for all your Windows machines and control them as separate entities.
+
+ To make your Windows server a virtual node, you need to define virtual nodes in `/etc/netdata/vnodes/vnodes.conf`:
+
+ > **Note**: To create a valid guid, you can use the `uuidgen` command on Linux, or the `[guid]::NewGuid()` command in PowerShell on Windows.
+
+ ```yaml
+ # /etc/netdata/vnodes/vnodes.conf
+ - hostname: win_server
+ guid: <value>
+ ```
+ config: |
+ jobs:
+ - name: win_server
+ vnode: win_server
+ url: http://192.0.2.1:9182/metrics
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from multiple remote instances.
+ config: |
+ jobs:
+ - name: win_server1
+ url: http://192.0.2.1:9182/metrics
+
+ - name: win_server2
+ url: http://192.0.2.2:9182/metrics
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: windows_10min_cpu_usage
+ metric: windows.cpu_utilization_total
+ info: average CPU utilization over the last 10 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf
+ - name: windows_ram_in_use
+ metric: windows.memory_utilization
+ info: memory utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf
+ - name: windows_inbound_packets_discarded
+ metric: windows.net_nic_discarded
+ info: number of inbound discarded packets for the network interface in the last 10 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf
+ - name: windows_outbound_packets_discarded
+ metric: windows.net_nic_discarded
+ info: number of outbound discarded packets for the network interface in the last 10 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf
+ - name: windows_inbound_packets_errors
+ metric: windows.net_nic_errors
+ info: number of inbound errors for the network interface in the last 10 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf
+ - name: windows_outbound_packets_errors
+ metric: windows.net_nic_errors
+ info: number of outbound errors for the network interface in the last 10 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf
+ - name: windows_disk_in_use
+ metric: windows.logical_disk_space_usage
+ info: disk space utilization
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/windows.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: |
+ The collected set of metrics depends on the [enabled collectors](https://github.com/prometheus-community/windows_exporter#collectors).
+
+ Supported collectors:
+
+ - [cpu](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.cpu.md)
+ - [iis](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.iis.md)
+ - [memory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.memory.md)
+ - [net](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.net.md)
+ - [logical_disk](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logical_disk.md)
+ - [os](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.os.md)
+ - [system](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.system.md)
+ - [logon](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.logon.md)
+ - [tcp](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.tcp.md)
+ - [thermalzone](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.thermalzone.md)
+ - [process](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.process.md)
+ - [service](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.service.md)
+ - [mssql](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.mssql.md)
+ - [ad](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.ad.md)
+ - [adcs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adcs.md)
+ - [adfs](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.adfs.md)
+ - [netframework_clrexceptions](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrexceptions.md)
+ - [netframework_clrinterop](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrinterop.md)
+ - [netframework_clrjit](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrjit.md)
+ - [netframework_clrloading](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrloading.md)
+ - [netframework_clrlocksandthreads](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrlocksandthreads.md)
+ - [netframework_clrmemory](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrmemory.md)
+ - [netframework_clrremoting](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.netframework_clrremoting.md)
+ - [exchange](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.exchange.md)
+ - [hyperv](https://github.com/prometheus-community/windows_exporter/blob/master/docs/collector.hyperv.md)
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored host.
+ labels: []
+ metrics:
+ - name: windows.cpu_utilization_total
+ description: Total CPU Utilization (all cores)
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: dpc
+ - name: user
+ - name: privileged
+ - name: interrupt
+ - name: windows.memory_utilization
+ description: Memory Utilization
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: windows.memory_page_faults
+ description: Memory Page Faults
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: page_faults
+ - name: windows.memory_swap_utilization
+ description: Swap Utilization
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: windows.memory_swap_operations
+ description: Swap Operations
+ unit: operations/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: windows.memory_swap_pages
+ description: Swap Pages
+ unit: pages/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: written
+ - name: windows.memory_cached
+ description: Cached
+ unit: KiB
+ chart_type: area
+ dimensions:
+ - name: cached
+ - name: windows.memory_cache_faults
+ description: Cache Faults
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: cache_faults
+ - name: windows.memory_system_pool
+ description: System Memory Pool
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: paged
+ - name: non-paged
+ - name: windows.tcp_conns_established
+ description: TCP established connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: windows.tcp_conns_active
+ description: TCP active connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: windows.tcp_conns_passive
+ description: TCP passive connections
+ unit: connections/s
+ chart_type: line
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: windows.tcp_conns_failures
+ description: TCP connection failures
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: windows.tcp_conns_resets
+ description: TCP connections resets
+ unit: resets/s
+ chart_type: line
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: windows.tcp_segments_received
+ description: Number of TCP segments received
+ unit: segments/s
+ chart_type: line
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: windows.tcp_segments_sent
+ description: Number of TCP segments sent
+ unit: segments/s
+ chart_type: line
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: windows.tcp_segments_retransmitted
+ description: Number of TCP segments retransmitted
+ unit: segments/s
+ chart_type: line
+ dimensions:
+ - name: ipv4
+ - name: ipv6
+ - name: windows.os_processes
+ description: Processes
+ unit: number
+ chart_type: line
+ dimensions:
+ - name: processes
+ - name: windows.os_users
+ description: Number of Users
+ unit: users
+ chart_type: line
+ dimensions:
+ - name: users
+ - name: windows.os_visible_memory_usage
+ description: Visible Memory Usage
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: windows.os_paging_files_usage
+ description: Paging Files Usage
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: windows.system_threads
+ description: Threads
+ unit: number
+ chart_type: line
+ dimensions:
+ - name: threads
+ - name: windows.system_uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: windows.logon_type_sessions
+ description: Active User Logon Sessions By Type
+ unit: seconds
+ chart_type: stacked
+ dimensions:
+ - name: system
+ - name: interactive
+ - name: network
+ - name: batch
+ - name: service
+ - name: proxy
+ - name: unlock
+ - name: network_clear_text
+ - name: new_credentials
+ - name: remote_interactive
+ - name: cached_interactive
+ - name: cached_remote_interactive
+ - name: cached_unlock
+ - name: windows.processes_cpu_utilization
+ description: CPU usage (100% = 1 core)
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per process
+ - name: windows.processes_handles
+ description: Memory usage
+ unit: handles
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per process
+ - name: windows.processes_io_bytes
+ description: Total of IO bytes (read, write, other)
+ unit: bytes/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per process
+ - name: windows.processes_io_operations
+ description: Total of IO events (read, write, other)
+ unit: operations/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per process
+ - name: windows.processes_page_faults
+ description: Number of page faults
+ unit: pgfaults/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per process
+ - name: windows.processes_page_file_bytes
+ description: Bytes used in page file(s)
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per process
+ - name: windows.processes_pool_bytes
+ description: Active threads
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per process
+ - name: windows.processes_threads
+ description: Number of handles open
+ unit: threads
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per process
+ - name: ad.database_operations
+ description: AD database operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: add
+ - name: delete
+ - name: modify
+ - name: recycle
+ - name: ad.directory_operations
+ description: AD directory operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: search
+ - name: ad.name_cache_lookups
+ description: Name cache lookups
+ unit: lookups/s
+ chart_type: line
+ dimensions:
+ - name: lookups
+ - name: ad.name_cache_hits
+ description: Name cache hits
+ unit: hits/s
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: ad.atq_average_request_latency
+ description: Average request processing time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: ad.atq_outstanding_requests
+ description: Outstanding requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: outstanding
+ - name: ad.dra_replication_intersite_compressed_traffic
+ description: DRA replication compressed traffic withing site
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: ad.dra_replication_intrasite_compressed_traffic
+ description: DRA replication compressed traffic between sites
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: ad.dra_replication_sync_objects_remaining
+ description: DRA replication full sync objects remaining
+ unit: objects
+ chart_type: line
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: ad.dra_replication_objects_filtered
+ description: DRA replication objects filtered
+ unit: objects/s
+ chart_type: line
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: ad.dra_replication_properties_updated
+ description: DRA replication properties updated
+ unit: properties/s
+ chart_type: line
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: ad.dra_replication_properties_filtered
+ description: DRA replication properties filtered
+ unit: properties/s
+ chart_type: line
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: ad.dra_replication_pending_syncs
+ description: DRA replication pending syncs
+ unit: syncs
+ chart_type: line
+ dimensions:
+ - name: pending
+ - name: ad.dra_replication_sync_requests
+ description: DRA replication sync requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: ad.ds_threads
+ description: Directory Service threads
+ unit: threads
+ chart_type: line
+ dimensions:
+ - name: in_use
+ - name: ad.ldap_last_bind_time
+ description: LDAP last successful bind time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: last_bind
+ - name: ad.binds
+ description: Successful binds
+ unit: binds/s
+ chart_type: line
+ dimensions:
+ - name: binds
+ - name: ad.ldap_searches
+ description: LDAP client search operations
+ unit: searches/s
+ chart_type: line
+ dimensions:
+ - name: searches
+ - name: adfs.ad_login_connection_failures
+ description: Connection failures
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: connection
+ - name: adfs.certificate_authentications
+ description: User Certificate authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: authentications
+ - name: adfs.db_artifact_failures
+ description: Connection failures to the artifact database
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: connection
+ - name: adfs.db_artifact_query_time_seconds
+ description: Time taken for an artifact database query
+ unit: seconds/s
+ chart_type: line
+ dimensions:
+ - name: query_time
+ - name: adfs.db_config_failures
+ description: Connection failures to the configuration database
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: connection
+ - name: adfs.db_config_query_time_seconds
+ description: Time taken for a configuration database query
+ unit: seconds/s
+ chart_type: line
+ dimensions:
+ - name: query_time
+ - name: adfs.device_authentications
+ description: Device authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: authentications
+ - name: adfs.external_authentications
+ description: Authentications from external MFA providers
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.federated_authentications
+ description: Authentications from Federated Sources
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: authentications
+ - name: adfs.federation_metadata_requests
+ description: Federation Metadata requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: adfs.oauth_authorization_requests
+ description: Incoming requests to the OAuth Authorization endpoint
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: adfs.oauth_client_authentications
+ description: OAuth client authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.oauth_client_credentials_requests
+ description: OAuth client credentials requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.oauth_client_privkey_jwt_authentications
+ description: OAuth client private key JWT authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.oauth_client_secret_basic_authentications
+ description: OAuth client secret basic authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.oauth_client_secret_post_authentications
+ description: OAuth client secret post authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.oauth_client_windows_authentications
+ description: OAuth client windows integrated authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.oauth_logon_certificate_requests
+ description: OAuth logon certificate requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.oauth_password_grant_requests
+ description: OAuth password grant requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.oauth_token_requests_success
+ description: Successful RP token requests over OAuth protocol
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: adfs.passive_requests
+ description: Passive requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: passive
+ - name: adfs.passport_authentications
+ description: Microsoft Passport SSO authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: passport
+ - name: adfs.password_change_requests
+ description: Password change requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.samlp_token_requests_success
+ description: Successful RP token requests over SAML-P protocol
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: adfs.sso_authentications
+ description: SSO authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.token_requests
+ description: Token access requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: adfs.userpassword_authentications
+ description: AD U/P authentications
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: failure
+ - name: adfs.windows_integrated_authentications
+ description: Windows integrated authentications using Kerberos or NTLM
+ unit: authentications/s
+ chart_type: line
+ dimensions:
+ - name: authentications
+ - name: adfs.wsfed_token_requests_success
+ description: Successful RP token requests over WS-Fed protocol
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: adfs.wstrust_token_requests_success
+ description: Successful RP token requests over WS-Trust protocol
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: success
+ - name: exchange.activesync_ping_cmds_pending
+ description: Ping commands pending in queue
+ unit: commands
+ chart_type: line
+ dimensions:
+ - name: pending
+ - name: exchange.activesync_requests
+ description: HTTP requests received from ASP.NET
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: exchange.activesync_sync_cmds
+ description: Sync commands processed
+ unit: commands/s
+ chart_type: line
+ dimensions:
+ - name: processed
+ - name: exchange.autodiscover_requests
+ description: Autodiscover service requests processed
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: processed
+ - name: exchange.avail_service_requests
+ description: Requests serviced
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: serviced
+ - name: exchange.owa_current_unique_users
+ description: Unique users currently logged on to Outlook Web App
+ unit: users
+ chart_type: line
+ dimensions:
+ - name: logged-in
+ - name: exchange.owa_requests_total
+ description: Requests handled by Outlook Web App
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: handled
+ - name: exchange.rpc_active_user_count
+ description: Active unique users in the last 2 minutes
+ unit: users
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: exchange.rpc_avg_latency
+ description: Average latency
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: latency
+ - name: exchange.rpc_connection_count
+ description: Client connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: exchange.rpc_operations
+ description: RPC operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: operations
+ - name: exchange.rpc_requests
+ description: Clients requests currently being processed
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: processed
+ - name: exchange.rpc_user_count
+ description: RPC users
+ unit: users
+ chart_type: line
+ dimensions:
+ - name: users
+ - name: exchange.transport_queues_active_mail_box_delivery
+ description: Active Mailbox Delivery Queue length
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: low
+ - name: high
+ - name: none
+ - name: normal
+ - name: exchange.transport_queues_external_active_remote_delivery
+ description: External Active Remote Delivery Queue length
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: low
+ - name: high
+ - name: none
+ - name: normal
+ - name: exchange.transport_queues_external_largest_delivery
+ description: External Largest Delivery Queue length
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: low
+ - name: high
+ - name: none
+ - name: normal
+ - name: exchange.transport_queues_internal_active_remote_delivery
+ description: Internal Active Remote Delivery Queue length
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: low
+ - name: high
+ - name: none
+ - name: normal
+ - name: exchange.transport_queues_internal_largest_delivery
+ description: Internal Largest Delivery Queue length
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: low
+ - name: high
+ - name: none
+ - name: normal
+ - name: exchange.transport_queues_retry_mailbox_delivery
+ description: Internal Active Remote Delivery Queue length
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: low
+ - name: high
+ - name: none
+ - name: normal
+ - name: exchange.transport_queues_poison
+ description: Poison Queue Length
+ unit: messages/s
+ chart_type: line
+ dimensions:
+ - name: low
+ - name: high
+ - name: none
+ - name: normal
+ - name: hyperv.vms_health
+ description: Virtual machines health status
+ unit: vms
+ chart_type: stacked
+ dimensions:
+ - name: ok
+ - name: critical
+ - name: hyperv.root_partition_device_space_pages
+ description: Root partition pages in the device space
+ unit: pages
+ chart_type: line
+ dimensions:
+ - name: 4K
+ - name: 2M
+ - name: 1G
+ - name: hyperv.root_partition_gpa_space_pages
+ description: Root partition pages in the GPA space
+ unit: pages
+ chart_type: line
+ dimensions:
+ - name: 4K
+ - name: 2M
+ - name: 1G
+ - name: hyperv.root_partition_gpa_space_modifications
+ description: Root partition GPA space modifications
+ unit: modifications/s
+ chart_type: line
+ dimensions:
+ - name: gpa
+ - name: hyperv.root_partition_attached_devices
+ description: Root partition attached devices
+ unit: devices
+ chart_type: line
+ dimensions:
+ - name: attached
+ - name: hyperv.root_partition_deposited_pages
+ description: Root partition deposited pages
+ unit: pages
+ chart_type: line
+ dimensions:
+ - name: deposited
+ - name: hyperv.root_partition_skipped_interrupts
+ description: Root partition skipped interrupts
+ unit: interrupts
+ chart_type: line
+ dimensions:
+ - name: skipped
+ - name: hyperv.root_partition_device_dma_errors
+ description: Root partition illegal DMA requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: illegal_dma
+ - name: hyperv.root_partition_device_interrupt_errors
+ description: Root partition illegal interrupt requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: illegal_interrupt
+ - name: hyperv.root_partition_device_interrupt_throttle_events
+ description: Root partition throttled interrupts
+ unit: events
+ chart_type: line
+ dimensions:
+ - name: throttling
+ - name: hyperv.root_partition_io_tlb_flush
+ description: Root partition flushes of I/O TLBs
+ unit: flushes/s
+ chart_type: line
+ dimensions:
+ - name: flushes
+ - name: hyperv.root_partition_address_space
+ description: Root partition address spaces in the virtual TLB
+ unit: address spaces
+ chart_type: line
+ dimensions:
+ - name: address_spaces
+ - name: hyperv.root_partition_virtual_tlb_flush_entries
+ description: Root partition flushes of the entire virtual TLB
+ unit: flushes/s
+ chart_type: line
+ dimensions:
+ - name: flushes
+ - name: hyperv.root_partition_virtual_tlb_pages
+ description: Root partition pages used by the virtual TLB
+ unit: pages
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: cpu core
+ description: TBD
+ labels:
+ - name: core
+ description: TBD
+ metrics:
+ - name: windows.cpu_core_utilization
+ description: Core CPU Utilization
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: dpc
+ - name: user
+ - name: privileged
+ - name: interrupt
+ - name: windows.cpu_core_interrupts
+ description: Received and Serviced Hardware Interrupts
+ unit: interrupts/s
+ chart_type: line
+ dimensions:
+ - name: interrupts
+ - name: windows.cpu_core_dpcs
+ description: Received and Serviced Deferred Procedure Calls (DPC)
+ unit: dpcs/s
+ chart_type: line
+ dimensions:
+ - name: dpcs
+ - name: windows.cpu_core_cstate
+ description: Core Time Spent in Low-Power Idle State
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: c1
+ - name: c2
+ - name: c3
+ - name: logical disk
+ description: TBD
+ labels:
+ - name: disk
+ description: TBD
+ metrics:
+ - name: windows.logical_disk_utilization
+ description: Space usage
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: windows.logical_disk_bandwidth
+ description: Bandwidth
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: write
+ - name: windows.logical_disk_operations
+ description: Operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: windows.logical_disk_latency
+ description: Average Read/Write Latency
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: network device
+ description: TBD
+ labels:
+ - name: nic
+ description: TBD
+ metrics:
+ - name: windows.net_nic_bandwidth
+ description: Bandwidth
+ unit: kilobits/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: windows.net_nic_packets
+ description: Packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: windows.net_nic_errors
+ description: Errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: windows.net_nic_discarded
+ description: Discards
+ unit: discards/s
+ chart_type: line
+ dimensions:
+ - name: inbound
+ - name: outbound
+ - name: thermalzone
+ description: TBD
+ labels:
+ - name: thermalzone
+ description: TBD
+ metrics:
+ - name: windows.thermalzone_temperature
+ description: Thermal zone temperature
+ unit: celsius
+ chart_type: line
+ dimensions:
+ - name: temperature
+ - name: service
+ description: TBD
+ labels:
+ - name: service
+ description: TBD
+ metrics:
+ - name: windows.service_state
+ description: Service state
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: running
+ - name: stopped
+ - name: start_pending
+ - name: stop_pending
+ - name: continue_pending
+ - name: pause_pending
+ - name: paused
+ - name: unknown
+ - name: windows.service_status
+ description: Service status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: ok
+ - name: error
+ - name: unknown
+ - name: degraded
+ - name: pred_fail
+ - name: starting
+ - name: stopping
+ - name: service
+ - name: stressed
+ - name: nonrecover
+ - name: no_contact
+ - name: lost_comm
+ - name: website
+ description: TBD
+ labels:
+ - name: website
+ description: TBD
+ metrics:
+ - name: iis.website_traffic
+ description: Website traffic
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: iis.website_requests_rate
+ description: Website requests rate
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: iis.website_active_connections_count
+ description: Website active connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: iis.website_users_count
+ description: Website users with pending requests
+ unit: users
+ chart_type: stacked
+ dimensions:
+ - name: anonymous
+ - name: non_anonymous
+ - name: iis.website_connection_attempts_rate
+ description: Website connections attempts
+ unit: attempts/s
+ chart_type: line
+ dimensions:
+ - name: connection
+ - name: iis.website_isapi_extension_requests_count
+ description: ISAPI extension requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: isapi
+ - name: iis.website_isapi_extension_requests_rate
+ description: Website extensions request
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: isapi
+ - name: iis.website_ftp_file_transfer_rate
+ description: Website FTP file transfer rate
+ unit: files/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: iis.website_logon_attempts_rate
+ description: Website logon attempts
+ unit: attempts/s
+ chart_type: line
+ dimensions:
+ - name: logon
+ - name: iis.website_errors_rate
+ description: Website errors
+ unit: errors/s
+ chart_type: stacked
+ dimensions:
+ - name: document_locked
+ - name: document_not_found
+ - name: iis.website_uptime
+ description: Website uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: document_locked
+ - name: document_not_found
+ - name: mssql instance
+ description: TBD
+ labels:
+ - name: mssql_instance
+ description: TBD
+ metrics:
+ - name: mssql.instance_accessmethods_page_splits
+ description: Page splits
+ unit: splits/s
+ chart_type: line
+ dimensions:
+ - name: page
+ - name: mssql.instance_cache_hit_ratio
+ description: Buffer Cache hit ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: hit_ratio
+ - name: mssql.instance_bufman_checkpoint_pages
+ description: Flushed pages
+ unit: pages/s
+ chart_type: line
+ dimensions:
+ - name: flushed
+ - name: mssql.instance_bufman_page_life_expectancy
+ description: Page life expectancy
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: life_expectancy
+ - name: mssql.instance_bufman_iops
+ description: Number of pages input and output
+ unit: iops
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: written
+ - name: mssql.instance_blocked_processes
+ description: Blocked processes
+ unit: processes
+ chart_type: line
+ dimensions:
+ - name: blocked
+ - name: mssql.instance_user_connection
+ description: User connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: user
+ - name: mssql.instance_locks_lock_wait
+ description: Lock requests that required the caller to wait
+ unit: locks/s
+ chart_type: line
+ dimensions:
+ - name: alloc_unit
+ - name: application
+ - name: database
+ - name: extent
+ - name: file
+ - name: hobt
+ - name: key
+ - name: metadata
+ - name: oib
+ - name: object
+ - name: page
+ - name: rid
+ - name: row_group
+ - name: xact
+ - name: mssql.instance_locks_deadlocks
+ description: Lock requests that resulted in deadlock
+ unit: locks/s
+ chart_type: line
+ dimensions:
+ - name: alloc_unit
+ - name: application
+ - name: database
+ - name: extent
+ - name: file
+ - name: hobt
+ - name: key
+ - name: metadata
+ - name: oib
+ - name: object
+ - name: page
+ - name: rid
+ - name: row_group
+ - name: xact
+ - name: mssql.instance_memmgr_connection_memory_bytes
+ description: Amount of dynamic memory to maintain connections
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: mssql.instance_memmgr_external_benefit_of_memory
+ description: Performance benefit from adding memory to a specific cache
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: benefit
+ - name: mssql.instance_memmgr_pending_memory_grants
+ description: Process waiting for memory grant
+ unit: processes
+ chart_type: line
+ dimensions:
+ - name: pending
+ - name: mssql.instance_memmgr_server_memory
+ description: Memory committed
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: mssql.instance_sql_errors
+ description: Errors
+ unit: errors
+ chart_type: line
+ dimensions:
+ - name: db_offline
+ - name: info
+ - name: kill_connection
+ - name: user
+ - name: mssql.instance_sqlstats_auto_parameterization_attempts
+ description: Failed auto-parameterization attempts
+ unit: attempts/s
+ chart_type: line
+ dimensions:
+ - name: failed
+ - name: mssql.instance_sqlstats_batch_requests
+ description: Total of batches requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: batch
+ - name: mssql.instance_sqlstats_safe_auto_parameterization_attempts
+ description: Safe auto-parameterization attempts
+ unit: attempts/s
+ chart_type: line
+ dimensions:
+ - name: safe
+ - name: mssql.instance_sqlstats_sql_compilations
+ description: SQL compilations
+ unit: compilations/s
+ chart_type: line
+ dimensions:
+ - name: compilations
+ - name: mssql.instance_sqlstats_sql_recompilations
+ description: SQL re-compilations
+ unit: recompiles/s
+ chart_type: line
+ dimensions:
+ - name: recompiles
+ - name: database
+ description: TBD
+ labels:
+ - name: mssql_instance
+ description: TBD
+ - name: database
+ description: TBD
+ metrics:
+ - name: mssql.database_active_transactions
+ description: Active transactions per database
+ unit: transactions
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: mssql.database_backup_restore_operations
+ description: Backup IO per database
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: backup
+ - name: mssql.database_data_files_size
+ description: Current database size
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: mssql.database_log_flushed
+ description: Log flushed
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: flushed
+ - name: mssql.database_log_flushes
+ description: Log flushes
+ unit: flushes/s
+ chart_type: line
+ dimensions:
+ - name: log
+ - name: mssql.database_transactions
+ description: Transactions
+ unit: transactions/s
+ chart_type: line
+ dimensions:
+ - name: transactions
+ - name: mssql.database_write_transactions
+ description: Write transactions
+ unit: transactions/s
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: certificate template
+ description: TBD
+ labels:
+ - name: cert_template
+ description: TBD
+ metrics:
+ - name: adcs.cert_template_requests
+ description: Certificate requests processed
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: requests
+ - name: adcs.cert_template_failed_requests
+ description: Certificate failed requests processed
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: failed
+ - name: adcs.cert_template_issued_requests
+ description: Certificate issued requests processed
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: issued
+ - name: adcs.cert_template_pending_requests
+ description: Certificate pending requests processed
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: pending
+ - name: adcs.cert_template_request_processing_time
+ description: Certificate last request processing time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: processing_time
+ - name: adcs.cert_template_retrievals
+ description: Total of certificate retrievals
+ unit: retrievals/s
+ chart_type: line
+ dimensions:
+ - name: retrievals
+ - name: adcs.cert_template_retrieval_processing_time
+ description: Certificate last retrieval processing time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: processing_time
+ - name: adcs.cert_template_request_cryptographic_signing_time
+ description: Certificate last signing operation request time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: singing_time
+ - name: adcs.cert_template_request_policy_module_processing
+ description: Certificate last policy module processing request time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: processing_time
+ - name: adcs.cert_template_challenge_responses
+ description: Certificate challenge responses
+ unit: responses/s
+ chart_type: line
+ dimensions:
+ - name: challenge
+ - name: adcs.cert_template_challenge_response_processing_time
+ description: Certificate last challenge response time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: processing_time
+ - name: adcs.cert_template_signed_certificate_timestamp_lists
+ description: Certificate Signed Certificate Timestamp Lists processed
+ unit: lists/s
+ chart_type: line
+ dimensions:
+ - name: processed
+ - name: adcs.cert_template_signed_certificate_timestamp_list_processing_time
+ description: Certificate last Signed Certificate Timestamp List process time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: processing_time
+ - name: process
+ description: TBD
+ labels:
+ - name: process
+ description: TBD
+ metrics:
+ - name: netframework.clrexception_thrown
+ description: Thrown exceptions
+ unit: exceptions/s
+ chart_type: line
+ dimensions:
+ - name: exceptions
+ - name: netframework.clrexception_filters
+ description: Executed exception filters
+ unit: filters/s
+ chart_type: line
+ dimensions:
+ - name: filters
+ - name: netframework.clrexception_finallys
+ description: Executed finally blocks
+ unit: finallys/s
+ chart_type: line
+ dimensions:
+ - name: finallys
+ - name: netframework.clrexception_throw_to_catch_depth
+ description: Traversed stack frames
+ unit: stack_frames/s
+ chart_type: line
+ dimensions:
+ - name: traversed
+ - name: netframework.clrinterop_com_callable_wrappers
+ description: COM callable wrappers (CCW)
+ unit: ccw/s
+ chart_type: line
+ dimensions:
+ - name: com_callable_wrappers
+ - name: netframework.clrinterop_interop_marshallings
+ description: Arguments and return values marshallings
+ unit: marshallings/s
+ chart_type: line
+ dimensions:
+ - name: marshallings
+ - name: netframework.clrinterop_interop_stubs_created
+ description: Created stubs
+ unit: stubs/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: netframework.clrjit_methods
+ description: JIT-compiled methods
+ unit: methods/s
+ chart_type: line
+ dimensions:
+ - name: jit-compiled
+ - name: netframework.clrjit_time
+ description: Time spent in JIT compilation
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: netframework.clrjit_standard_failures
+ description: JIT compiler failures
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: failures
+ - name: netframework.clrjit_il_bytes
+ description: Compiled Microsoft intermediate language (MSIL) bytes
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: compiled_msil
+ - name: netframework.clrloading_loader_heap_size
+ description: Memory committed by class loader
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: committed
+ - name: netframework.clrloading_appdomains_loaded
+ description: Loaded application domains
+ unit: domain/s
+ chart_type: line
+ dimensions:
+ - name: loaded
+ - name: netframework.clrloading_appdomains_unloaded
+ description: Unloaded application domains
+ unit: domain/s
+ chart_type: line
+ dimensions:
+ - name: unloaded
+ - name: netframework.clrloading_assemblies_loaded
+ description: Loaded assemblies
+ unit: assemblies/s
+ chart_type: line
+ dimensions:
+ - name: loaded
+ - name: netframework.clrloading_classes_loaded
+ description: Loaded classes in all assemblies
+ unit: classes/s
+ chart_type: line
+ dimensions:
+ - name: loaded
+ - name: netframework.clrloading_class_load_failures
+ description: Class load failures
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: class_load
+ - name: netframework.clrlocksandthreads_queue_length
+ description: Threads waited to acquire a managed lock
+ unit: threads/s
+ chart_type: line
+ dimensions:
+ - name: threads
+ - name: netframework.clrlocksandthreads_current_logical_threads
+ description: Logical threads
+ unit: threads
+ chart_type: line
+ dimensions:
+ - name: logical
+ - name: netframework.clrlocksandthreads_current_physical_threads
+ description: Physical threads
+ unit: threads
+ chart_type: line
+ dimensions:
+ - name: physical
+ - name: netframework.clrlocksandthreads_recognized_threads
+ description: Threads recognized by the runtime
+ unit: threads/s
+ chart_type: line
+ dimensions:
+ - name: threads
+ - name: netframework.clrlocksandthreads_contentions
+ description: Fails to acquire a managed lock
+ unit: contentions/s
+ chart_type: line
+ dimensions:
+ - name: contentions
+ - name: netframework.clrmemory_allocated_bytes
+ description: Memory allocated on the garbage collection heap
+ unit: bytes/s
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: netframework.clrmemory_finalization_survivors
+ description: Objects that survived garbage-collection
+ unit: objects
+ chart_type: line
+ dimensions:
+ - name: survived
+ - name: netframework.clrmemory_heap_size
+ description: Maximum bytes that can be allocated
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: heap
+ - name: netframework.clrmemory_promoted
+ description: Memory promoted to the next generation
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: promoted
+ - name: netframework.clrmemory_number_gc_handles
+ description: Garbage collection handles
+ unit: handles
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: netframework.clrmemory_collections
+ description: Garbage collections
+ unit: gc/s
+ chart_type: line
+ dimensions:
+ - name: gc
+ - name: netframework.clrmemory_induced_gc
+ description: Garbage collections induced
+ unit: gc/s
+ chart_type: line
+ dimensions:
+ - name: gc
+ - name: netframework.clrmemory_number_pinned_objects
+ description: Pinned objects encountered
+ unit: objects
+ chart_type: line
+ dimensions:
+ - name: pinned
+ - name: netframework.clrmemory_number_sink_blocks_in_use
+ description: Synchronization blocks in use
+ unit: blocks
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: netframework.clrmemory_committed
+ description: Virtual memory committed by GC
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: committed
+ - name: netframework.clrmemory_reserved
+ description: Virtual memory reserved by GC
+ unit: bytes
+ chart_type: line
+ dimensions:
+ - name: reserved
+ - name: netframework.clrmemory_gc_time
+ description: Time spent on GC
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: netframework.clrremoting_channels
+ description: Registered channels
+ unit: channels/s
+ chart_type: line
+ dimensions:
+ - name: registered
+ - name: netframework.clrremoting_context_bound_classes_loaded
+ description: Loaded context-bound classes
+ unit: classes
+ chart_type: line
+ dimensions:
+ - name: loaded
+ - name: netframework.clrremoting_context_bound_objects
+ description: Allocated context-bound objects
+ unit: objects/s
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: netframework.clrremoting_context_proxies
+ description: Remoting proxy objects
+ unit: objects/s
+ chart_type: line
+ dimensions:
+ - name: objects
+ - name: netframework.clrremoting_contexts
+ description: Total of remoting contexts
+ unit: contexts
+ chart_type: line
+ dimensions:
+ - name: contexts
+ - name: netframework.clrremoting_remote_calls
+ description: Remote Procedure Calls (RPC) invoked
+ unit: calls/s
+ chart_type: line
+ dimensions:
+ - name: rpc
+ - name: netframework.clrsecurity_link_time_checks
+ description: Link-time code access security checks
+ unit: checks/s
+ chart_type: line
+ dimensions:
+ - name: linktime
+ - name: netframework.clrsecurity_checks_time
+ description: Time spent performing runtime code access security checks
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: netframework.clrsecurity_stack_walk_depth
+ description: Depth of the stack
+ unit: depth
+ chart_type: line
+ dimensions:
+ - name: stack
+ - name: netframework.clrsecurity_runtime_checks
+ description: Runtime code access security checks performed
+ unit: checks/s
+ chart_type: line
+ dimensions:
+ - name: runtime
+ - name: exchange workload
+ description: TBD
+ labels:
+ - name: workload
+ description: TBD
+ metrics:
+ - name: exchange.workload_active_tasks
+ description: Workload active tasks
+ unit: tasks
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: exchange.workload_completed_tasks
+ description: Workload completed tasks
+ unit: tasks/s
+ chart_type: line
+ dimensions:
+ - name: completed
+ - name: exchange.workload_queued_tasks
+ description: Workload queued tasks
+ unit: tasks/s
+ chart_type: line
+ dimensions:
+ - name: queued
+ - name: exchange.workload_yielded_tasks
+ description: Workload yielded tasks
+ unit: tasks/s
+ chart_type: line
+ dimensions:
+ - name: yielded
+ - name: exchange.workload_activity_status
+ description: Workload activity status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: paused
+ - name: ldap process
+ description: TBD
+ labels:
+ - name: workload
+ description: TBD
+ metrics:
+ - name: exchange.ldap_long_running_ops_per_sec
+ description: Long Running LDAP operations
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: long-running
+ - name: exchange.ldap_read_time
+ description: Time to send an LDAP read request and receive a response
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: exchange.ldap_search_time
+ description: Time to send an LDAP search request and receive a response
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: search
+ - name: exchange.ldap_write_time
+ description: Time to send an LDAP search request and receive a response
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: exchange.ldap_timeout_errors
+ description: LDAP timeout errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: timeout
+ - name: http proxy
+ description: TBD
+ labels:
+ - name: workload
+ description: TBD
+ metrics:
+ - name: exchange.http_proxy_avg_auth_latency
+ description: Average time spent authenticating CAS
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: latency
+ - name: exchange.http_proxy_avg_cas_processing_latency_sec
+ description: Average time spent authenticating CAS
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: latency
+ - name: exchange.http_proxy_mailbox_proxy_failure_rate
+ description: Percentage of failures between this CAS and MBX servers
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: failures
+ - name: exchange.http_proxy_mailbox_server_locator_avg_latency_sec
+ description: Average latency of MailboxServerLocator web service calls
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: latency
+ - name: exchange.http_proxy_outstanding_proxy_requests
+ description: Concurrent outstanding proxy requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: outstanding
+ - name: exchange.http_proxy_requests
+ description: Number of proxy requests processed each second
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: processed
+ - name: vm
+ description: TBD
+ labels:
+ - name: vm_name
+ description: TBD
+ metrics:
+ - name: hyperv.vm_cpu_usage
+ description: VM CPU usage (100% = 1 core)
+ unit: percentage
+ chart_type: stacked
+ dimensions:
+ - name: gues
+ - name: hypervisor
+ - name: remote
+ - name: hyperv.vm_memory_physical
+ description: VM assigned memory
+ unit: MiB
+ chart_type: line
+ dimensions:
+ - name: assigned_memory
+ - name: hyperv.vm_memory_physical_guest_visible
+ description: VM guest visible memory
+ unit: MiB
+ chart_type: line
+ dimensions:
+ - name: visible_memory
+ - name: hyperv.vm_memory_pressure_current
+ description: VM current pressure
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: pressure
+ - name: hyperv.vm_vid_physical_pages_allocated
+ description: VM physical pages allocated
+ unit: pages
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: hyperv.vm_vid_remote_physical_pages
+ description: VM physical pages not allocated from the preferred NUMA node
+ unit: pages
+ chart_type: line
+ dimensions:
+ - name: remote_physical
+ - name: vm device
+ description: TBD
+ labels:
+ - name: vm_device
+ description: TBD
+ metrics:
+ - name: hyperv.vm_device_bytes
+ description: VM storage device IO
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: read
+ - name: written
+ - name: hyperv.vm_device_operations
+ description: VM storage device IOPS
+ unit: operations/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: hyperv.vm_device_errors
+ description: VM storage device errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: errors
+ - name: vm interface
+ description: TBD
+ labels:
+ - name: vm_interface
+ description: TBD
+ metrics:
+ - name: hyperv.vm_interface_bytes
+ description: VM interface traffic
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: hyperv.vm_interface_packets
+ description: VM interface packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: hyperv.vm_interface_packets_dropped
+ description: VM interface packets dropped
+ unit: drops/s
+ chart_type: line
+ dimensions:
+ - name: incoming
+ - name: outgoing
+ - name: vswitch
+ description: TBD
+ labels:
+ - name: vswitch
+ description: TBD
+ metrics:
+ - name: hyperv.vswitch_bytes
+ description: Virtual switch traffic
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: received
+ - name: sent
+ - name: hyperv.vswitch_packets
+ description: Virtual switch packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: hyperv.vswitch_directed_packets
+ description: Virtual switch directed packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: hyperv.vswitch_broadcast_packets
+ description: Virtual switch broadcast packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: hyperv.vswitch_multicast_packets
+ description: Virtual switch multicast packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: hyperv.vswitch_dropped_packets
+ description: Virtual switch dropped packets
+ unit: drops/s
+ chart_type: line
+ dimensions:
+ - name: incoming
+ - name: outgoing
+ - name: hyperv.vswitch_extensions_dropped_packets
+ description: Virtual switch extensions dropped packets
+ unit: drops/s
+ chart_type: line
+ dimensions:
+ - name: incoming
+ - name: outgoing
+ - name: hyperv.vswitch_packets_flooded
+ description: Virtual switch flooded packets
+ unit: packets/s
+ chart_type: line
+ dimensions:
+ - name: flooded
+ - name: hyperv.vswitch_learned_mac_addresses
+ description: Virtual switch learned MAC addresses
+ unit: mac addresses/s
+ chart_type: line
+ dimensions:
+ - name: learned
+ - name: hyperv.vswitch_purged_mac_addresses
+ description: Virtual switch purged MAC addresses
+ unit: mac addresses/s
+ chart_type: line
+ dimensions:
+ - name: purged
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-windows-mssql
+ most_popular: false
+ keywords:
+ - windows
+ - microsoft
+ - mssql
+ - database
+ - db
+ monitored_instance:
+ name: MS SQL Server
+ link: https://www.microsoft.com/en-us/sql-server/
+ icon_filename: mssql.svg
+ categories:
+ - data-collection.windows-systems
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-windows-hyperv
+ most_popular: false
+ keywords:
+ - windows
+ - microsoft
+ - hyperv
+ - virtualization
+ - vm
+ monitored_instance:
+ name: HyperV
+ link: https://learn.microsoft.com/en-us/windows-server/virtualization/hyper-v/hyper-v-technology-overview
+ icon_filename: windows.svg
+ categories:
+ - data-collection.windows-systems
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-windows-msexchange
+ most_popular: false
+ keywords:
+ - windows
+ - microsoft
+ - mail
+ monitored_instance:
+ name: MS Exchange
+ link: https://www.microsoft.com/en-us/microsoft-365/exchange/email
+ icon_filename: exchange.svg
+ categories:
+ - data-collection.windows-systems
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-windows-ad
+ most_popular: false
+ keywords:
+ - windows
+ - microsoft
+ - active directory
+ - ad
+ - adcs
+ - adfs
+ monitored_instance:
+ name: Active Directory
+ link: https://learn.microsoft.com/en-us/windows-server/identity/ad-ds/get-started/virtual-dc/active-directory-domain-services-overview
+ icon_filename: windows.svg
+ categories:
+ - data-collection.windows-systems
+ - <<: *module
+ meta:
+ <<: *meta
+ id: collector-go.d.plugin-windows-dotnet
+ most_popular: false
+ keywords:
+ - windows
+ - microsoft
+ - dotnet
+ monitored_instance:
+ name: NET Framework
+ link: https://dotnet.microsoft.com/en-us/download/dotnet-framework
+ icon_filename: dotnet.svg
+ categories:
+ - data-collection.windows-systems
diff --git a/src/go/plugin/go.d/modules/windows/testdata/config.json b/src/go/plugin/go.d/modules/windows/testdata/config.json
new file mode 100644
index 000000000..6f8c1084e
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "vnode": "ok",
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/windows/testdata/config.yaml b/src/go/plugin/go.d/modules/windows/testdata/config.yaml
new file mode 100644
index 000000000..4bbb7474d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+vnode: "ok"
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/windows/testdata/v0.20.0/metrics.txt b/src/go/plugin/go.d/modules/windows/testdata/v0.20.0/metrics.txt
new file mode 100644
index 000000000..02b68c3f8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/testdata/v0.20.0/metrics.txt
@@ -0,0 +1,3129 @@
+# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 0
+go_gc_duration_seconds{quantile="0.25"} 0
+go_gc_duration_seconds{quantile="0.5"} 0
+go_gc_duration_seconds{quantile="0.75"} 0
+go_gc_duration_seconds{quantile="1"} 0.0023911
+go_gc_duration_seconds_sum 0.0044814
+go_gc_duration_seconds_count 23
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 10
+# HELP go_info Information about the Go environment.
+# TYPE go_info gauge
+go_info{version="go1.19.1"} 1
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 8.035808e+06
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 5.9966872e+07
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.462168e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 111234
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 7.78308e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 8.035808e+06
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 5.767168e+06
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 1.0551296e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 34382
+# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes gauge
+go_memstats_heap_released_bytes 5.496832e+06
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 1.6318464e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.6675087416268353e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 0
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 145616
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 4672
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 16352
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 102272
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 114240
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 1.0613856e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 908248
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 458752
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 458752
+# HELP go_memstats_sys_bytes Number of bytes obtained from system.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 2.7061304e+07
+# HELP go_threads Number of OS threads created.
+# TYPE go_threads gauge
+go_threads 10
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 0.609375
+# HELP process_max_fds Maximum number of open file descriptors.
+# TYPE process_max_fds gauge
+process_max_fds 1.6777216e+07
+# HELP process_open_fds Number of open file descriptors.
+# TYPE process_open_fds gauge
+process_open_fds 352
+# HELP process_resident_memory_bytes Resident memory size in bytes.
+# TYPE process_resident_memory_bytes gauge
+process_resident_memory_bytes 3.229696e+07
+# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
+# TYPE process_start_time_seconds gauge
+process_start_time_seconds 1.667508736e+09
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 3.5569664e+07
+# HELP windows_adcs_challenge_response_processing_time_seconds Last time elapsed for challenge response
+# TYPE windows_adcs_challenge_response_processing_time_seconds gauge
+windows_adcs_challenge_response_processing_time_seconds{cert_template="Administrator"} 0
+windows_adcs_challenge_response_processing_time_seconds{cert_template="DomainController"} 0
+# HELP windows_adcs_challenge_responses_total Total certificate challenge responses processed
+# TYPE windows_adcs_challenge_responses_total counter
+windows_adcs_challenge_responses_total{cert_template="Administrator"} 0
+windows_adcs_challenge_responses_total{cert_template="DomainController"} 0
+# HELP windows_adcs_failed_requests_total Total failed certificate requests processed
+# TYPE windows_adcs_failed_requests_total counter
+windows_adcs_failed_requests_total{cert_template="Administrator"} 0
+windows_adcs_failed_requests_total{cert_template="DomainController"} 0
+# HELP windows_adcs_issued_requests_total Total issued certificate requests processed
+# TYPE windows_adcs_issued_requests_total counter
+windows_adcs_issued_requests_total{cert_template="Administrator"} 0
+windows_adcs_issued_requests_total{cert_template="DomainController"} 1
+# HELP windows_adcs_pending_requests_total Total pending certificate requests processed
+# TYPE windows_adcs_pending_requests_total counter
+windows_adcs_pending_requests_total{cert_template="Administrator"} 0
+windows_adcs_pending_requests_total{cert_template="DomainController"} 0
+# HELP windows_adcs_request_cryptographic_signing_time_seconds Last time elapsed for signing operation request
+# TYPE windows_adcs_request_cryptographic_signing_time_seconds gauge
+windows_adcs_request_cryptographic_signing_time_seconds{cert_template="Administrator"} 0
+windows_adcs_request_cryptographic_signing_time_seconds{cert_template="DomainController"} 0
+# HELP windows_adcs_request_policy_module_processing_time_seconds Last time elapsed for policy module processing request
+# TYPE windows_adcs_request_policy_module_processing_time_seconds gauge
+windows_adcs_request_policy_module_processing_time_seconds{cert_template="Administrator"} 0
+windows_adcs_request_policy_module_processing_time_seconds{cert_template="DomainController"} 0.016
+# HELP windows_adcs_request_processing_time_seconds Last time elapsed for certificate requests
+# TYPE windows_adcs_request_processing_time_seconds gauge
+windows_adcs_request_processing_time_seconds{cert_template="Administrator"} 0
+windows_adcs_request_processing_time_seconds{cert_template="DomainController"} 0.063
+# HELP windows_adcs_requests_total Total certificate requests processed
+# TYPE windows_adcs_requests_total counter
+windows_adcs_requests_total{cert_template="Administrator"} 0
+windows_adcs_requests_total{cert_template="DomainController"} 1
+# HELP windows_adcs_retrievals_processing_time_seconds Last time elapsed for certificate retrieval request
+# TYPE windows_adcs_retrievals_processing_time_seconds gauge
+windows_adcs_retrievals_processing_time_seconds{cert_template="Administrator"} 0
+windows_adcs_retrievals_processing_time_seconds{cert_template="DomainController"} 0
+# HELP windows_adcs_retrievals_total Total certificate retrieval requests processed
+# TYPE windows_adcs_retrievals_total counter
+windows_adcs_retrievals_total{cert_template="Administrator"} 0
+windows_adcs_retrievals_total{cert_template="DomainController"} 0
+# HELP windows_adcs_signed_certificate_timestamp_list_processing_time_seconds Last time elapsed for Signed Certificate Timestamp List
+# TYPE windows_adcs_signed_certificate_timestamp_list_processing_time_seconds gauge
+windows_adcs_signed_certificate_timestamp_list_processing_time_seconds{cert_template="Administrator"} 0
+windows_adcs_signed_certificate_timestamp_list_processing_time_seconds{cert_template="DomainController"} 0
+# HELP windows_adcs_signed_certificate_timestamp_lists_total Total Signed Certificate Timestamp Lists processed
+# TYPE windows_adcs_signed_certificate_timestamp_lists_total counter
+windows_adcs_signed_certificate_timestamp_lists_total{cert_template="Administrator"} 0
+windows_adcs_signed_certificate_timestamp_lists_total{cert_template="DomainController"} 0
+# HELP windows_adfs_ad_login_connection_failures_total Total number of connection failures to an Active Directory domain controller
+# TYPE windows_adfs_ad_login_connection_failures_total counter
+windows_adfs_ad_login_connection_failures_total 0
+# HELP windows_adfs_certificate_authentications_total Total number of User Certificate authentications
+# TYPE windows_adfs_certificate_authentications_total counter
+windows_adfs_certificate_authentications_total 0
+# HELP windows_adfs_db_artifact_failure_total Total number of failures connecting to the artifact database
+# TYPE windows_adfs_db_artifact_failure_total counter
+windows_adfs_db_artifact_failure_total 0
+# HELP windows_adfs_db_artifact_query_time_seconds_total Accumulator of time taken for an artifact database query
+# TYPE windows_adfs_db_artifact_query_time_seconds_total counter
+windows_adfs_db_artifact_query_time_seconds_total 0
+# HELP windows_adfs_db_config_failure_total Total number of failures connecting to the configuration database
+# TYPE windows_adfs_db_config_failure_total counter
+windows_adfs_db_config_failure_total 0
+# HELP windows_adfs_db_config_query_time_seconds_total Accumulator of time taken for a configuration database query
+# TYPE windows_adfs_db_config_query_time_seconds_total counter
+windows_adfs_db_config_query_time_seconds_total 0.10111504
+# HELP windows_adfs_device_authentications_total Total number of Device authentications
+# TYPE windows_adfs_device_authentications_total counter
+windows_adfs_device_authentications_total 0
+# HELP windows_adfs_external_authentications_failure_total Total number of failed authentications from external MFA providers
+# TYPE windows_adfs_external_authentications_failure_total counter
+windows_adfs_external_authentications_failure_total 0
+# HELP windows_adfs_external_authentications_success_total Total number of successful authentications from external MFA providers
+# TYPE windows_adfs_external_authentications_success_total counter
+windows_adfs_external_authentications_success_total 0
+# HELP windows_adfs_extranet_account_lockouts_total Total number of Extranet Account Lockouts
+# TYPE windows_adfs_extranet_account_lockouts_total counter
+windows_adfs_extranet_account_lockouts_total 0
+# HELP windows_adfs_federated_authentications_total Total number of authentications from a federated source
+# TYPE windows_adfs_federated_authentications_total counter
+windows_adfs_federated_authentications_total 0
+# HELP windows_adfs_federation_metadata_requests_total Total number of Federation Metadata requests
+# TYPE windows_adfs_federation_metadata_requests_total counter
+windows_adfs_federation_metadata_requests_total 1
+# HELP windows_adfs_oauth_authorization_requests_total Total number of incoming requests to the OAuth Authorization endpoint
+# TYPE windows_adfs_oauth_authorization_requests_total counter
+windows_adfs_oauth_authorization_requests_total 0
+# HELP windows_adfs_oauth_client_authentication_failure_total Total number of failed OAuth client Authentications
+# TYPE windows_adfs_oauth_client_authentication_failure_total counter
+windows_adfs_oauth_client_authentication_failure_total 0
+# HELP windows_adfs_oauth_client_authentication_success_total Total number of successful OAuth client Authentications
+# TYPE windows_adfs_oauth_client_authentication_success_total counter
+windows_adfs_oauth_client_authentication_success_total 0
+# HELP windows_adfs_oauth_client_credentials_failure_total Total number of failed OAuth Client Credentials Requests
+# TYPE windows_adfs_oauth_client_credentials_failure_total counter
+windows_adfs_oauth_client_credentials_failure_total 0
+# HELP windows_adfs_oauth_client_credentials_success_total Total number of successful RP tokens issued for OAuth Client Credentials Requests
+# TYPE windows_adfs_oauth_client_credentials_success_total counter
+windows_adfs_oauth_client_credentials_success_total 0
+# HELP windows_adfs_oauth_client_privkey_jtw_authentication_failure_total Total number of failed OAuth Client Private Key Jwt Authentications
+# TYPE windows_adfs_oauth_client_privkey_jtw_authentication_failure_total counter
+windows_adfs_oauth_client_privkey_jtw_authentication_failure_total 0
+# HELP windows_adfs_oauth_client_privkey_jwt_authentications_success_total Total number of successful OAuth Client Private Key Jwt Authentications
+# TYPE windows_adfs_oauth_client_privkey_jwt_authentications_success_total counter
+windows_adfs_oauth_client_privkey_jwt_authentications_success_total 0
+# HELP windows_adfs_oauth_client_secret_basic_authentications_failure_total Total number of failed OAuth Client Secret Basic Authentications
+# TYPE windows_adfs_oauth_client_secret_basic_authentications_failure_total counter
+windows_adfs_oauth_client_secret_basic_authentications_failure_total 0
+# HELP windows_adfs_oauth_client_secret_basic_authentications_success_total Total number of successful OAuth Client Secret Basic Authentications
+# TYPE windows_adfs_oauth_client_secret_basic_authentications_success_total counter
+windows_adfs_oauth_client_secret_basic_authentications_success_total 0
+# HELP windows_adfs_oauth_client_secret_post_authentications_failure_total Total number of failed OAuth Client Secret Post Authentications
+# TYPE windows_adfs_oauth_client_secret_post_authentications_failure_total counter
+windows_adfs_oauth_client_secret_post_authentications_failure_total 0
+# HELP windows_adfs_oauth_client_secret_post_authentications_success_total Total number of successful OAuth Client Secret Post Authentications
+# TYPE windows_adfs_oauth_client_secret_post_authentications_success_total counter
+windows_adfs_oauth_client_secret_post_authentications_success_total 0
+# HELP windows_adfs_oauth_client_windows_authentications_failure_total Total number of failed OAuth Client Windows Integrated Authentications
+# TYPE windows_adfs_oauth_client_windows_authentications_failure_total counter
+windows_adfs_oauth_client_windows_authentications_failure_total 0
+# HELP windows_adfs_oauth_client_windows_authentications_success_total Total number of successful OAuth Client Windows Integrated Authentications
+# TYPE windows_adfs_oauth_client_windows_authentications_success_total counter
+windows_adfs_oauth_client_windows_authentications_success_total 0
+# HELP windows_adfs_oauth_logon_certificate_requests_failure_total Total number of failed OAuth Logon Certificate Requests
+# TYPE windows_adfs_oauth_logon_certificate_requests_failure_total counter
+windows_adfs_oauth_logon_certificate_requests_failure_total 0
+# HELP windows_adfs_oauth_logon_certificate_token_requests_success_total Total number of successful RP tokens issued for OAuth Logon Certificate Requests
+# TYPE windows_adfs_oauth_logon_certificate_token_requests_success_total counter
+windows_adfs_oauth_logon_certificate_token_requests_success_total 0
+# HELP windows_adfs_oauth_password_grant_requests_failure_total Total number of failed OAuth Password Grant Requests
+# TYPE windows_adfs_oauth_password_grant_requests_failure_total counter
+windows_adfs_oauth_password_grant_requests_failure_total 0
+# HELP windows_adfs_oauth_password_grant_requests_success_total Total number of successful OAuth Password Grant Requests
+# TYPE windows_adfs_oauth_password_grant_requests_success_total counter
+windows_adfs_oauth_password_grant_requests_success_total 0
+# HELP windows_adfs_oauth_token_requests_success_total Total number of successful RP tokens issued over OAuth protocol
+# TYPE windows_adfs_oauth_token_requests_success_total counter
+windows_adfs_oauth_token_requests_success_total 0
+# HELP windows_adfs_passive_requests_total Total number of passive (browser-based) requests
+# TYPE windows_adfs_passive_requests_total counter
+windows_adfs_passive_requests_total 0
+# HELP windows_adfs_passport_authentications_total Total number of Microsoft Passport SSO authentications
+# TYPE windows_adfs_passport_authentications_total counter
+windows_adfs_passport_authentications_total 0
+# HELP windows_adfs_password_change_failed_total Total number of failed password changes
+# TYPE windows_adfs_password_change_failed_total counter
+windows_adfs_password_change_failed_total 0
+# HELP windows_adfs_password_change_succeeded_total Total number of successful password changes
+# TYPE windows_adfs_password_change_succeeded_total counter
+windows_adfs_password_change_succeeded_total 0
+# HELP windows_adfs_samlp_token_requests_success_total Total number of successful RP tokens issued over SAML-P protocol
+# TYPE windows_adfs_samlp_token_requests_success_total counter
+windows_adfs_samlp_token_requests_success_total 0
+# HELP windows_adfs_sso_authentications_failure_total Total number of failed SSO authentications
+# TYPE windows_adfs_sso_authentications_failure_total counter
+windows_adfs_sso_authentications_failure_total 0
+# HELP windows_adfs_sso_authentications_success_total Total number of successful SSO authentications
+# TYPE windows_adfs_sso_authentications_success_total counter
+windows_adfs_sso_authentications_success_total 0
+# HELP windows_adfs_token_requests_total Total number of token requests
+# TYPE windows_adfs_token_requests_total counter
+windows_adfs_token_requests_total 0
+# HELP windows_adfs_userpassword_authentications_failure_total Total number of failed AD U/P authentications
+# TYPE windows_adfs_userpassword_authentications_failure_total counter
+windows_adfs_userpassword_authentications_failure_total 0
+# HELP windows_adfs_userpassword_authentications_success_total Total number of successful AD U/P authentications
+# TYPE windows_adfs_userpassword_authentications_success_total counter
+windows_adfs_userpassword_authentications_success_total 0
+# HELP windows_adfs_windows_integrated_authentications_total Total number of Windows integrated authentications (Kerberos/NTLM)
+# TYPE windows_adfs_windows_integrated_authentications_total counter
+windows_adfs_windows_integrated_authentications_total 0
+# HELP windows_adfs_wsfed_token_requests_success_total Total number of successful RP tokens issued over WS-Fed protocol
+# TYPE windows_adfs_wsfed_token_requests_success_total counter
+windows_adfs_wsfed_token_requests_success_total 0
+# HELP windows_adfs_wstrust_token_requests_success_total Total number of successful RP tokens issued over WS-Trust protocol
+# TYPE windows_adfs_wstrust_token_requests_success_total counter
+windows_adfs_wstrust_token_requests_success_total 0
+# HELP windows_ad_atq_average_request_latency
+# TYPE windows_ad_atq_average_request_latency gauge
+windows_ad_atq_average_request_latency 0
+# HELP windows_ad_atq_outstanding_requests
+# TYPE windows_ad_atq_outstanding_requests gauge
+windows_ad_atq_outstanding_requests 0
+# HELP windows_ad_database_operations_total
+# TYPE windows_ad_database_operations_total counter
+windows_ad_database_operations_total{operation="add"} 1
+windows_ad_database_operations_total{operation="delete"} 0
+windows_ad_database_operations_total{operation="modify"} 30
+windows_ad_database_operations_total{operation="recycle"} 0
+# HELP windows_ad_directory_operations_total
+# TYPE windows_ad_directory_operations_total counter
+windows_ad_directory_operations_total{operation="read",origin="directory_service_api"} 0
+windows_ad_directory_operations_total{operation="read",origin="knowledge_consistency_checker"} 60
+windows_ad_directory_operations_total{operation="read",origin="local_security_authority"} 20
+windows_ad_directory_operations_total{operation="read",origin="name_service_provider_interface"} 0
+windows_ad_directory_operations_total{operation="read",origin="other"} 50
+windows_ad_directory_operations_total{operation="read",origin="replication_agent"} 0
+windows_ad_directory_operations_total{operation="read",origin="security_account_manager"} 596
+windows_ad_directory_operations_total{operation="search",origin="directory_service_api"} 101
+windows_ad_directory_operations_total{operation="search",origin="knowledge_consistency_checker"} 21
+windows_ad_directory_operations_total{operation="search",origin="ldap"} 606
+windows_ad_directory_operations_total{operation="search",origin="local_security_authority"} 9
+windows_ad_directory_operations_total{operation="search",origin="name_service_provider_interface"} 0
+windows_ad_directory_operations_total{operation="search",origin="other"} 56
+windows_ad_directory_operations_total{operation="search",origin="replication_agent"} 0
+windows_ad_directory_operations_total{operation="search",origin="security_account_manager"} 38
+windows_ad_directory_operations_total{operation="write",origin="directory_service_api"} 3
+windows_ad_directory_operations_total{operation="write",origin="knowledge_consistency_checker"} 0
+windows_ad_directory_operations_total{operation="write",origin="ldap"} 1
+windows_ad_directory_operations_total{operation="write",origin="local_security_authority"} 0
+windows_ad_directory_operations_total{operation="write",origin="name_service_provider_interface"} 0
+windows_ad_directory_operations_total{operation="write",origin="other"} 1
+windows_ad_directory_operations_total{operation="write",origin="replication_agent"} 0
+windows_ad_directory_operations_total{operation="write",origin="security_account_manager"} 26
+# HELP windows_ad_name_cache_lookups_total
+# TYPE windows_ad_name_cache_lookups_total counter
+windows_ad_name_cache_lookups_total 53046
+# HELP windows_ad_name_cache_hits_total
+# TYPE windows_ad_name_cache_hits_total counter
+windows_ad_name_cache_hits_total 41161
+# HELP windows_ad_replication_inbound_objects_filtered_total
+# TYPE windows_ad_replication_inbound_objects_filtered_total counter
+windows_ad_replication_inbound_objects_filtered_total 0
+# HELP windows_ad_replication_inbound_properties_filtered_total
+# TYPE windows_ad_replication_inbound_properties_filtered_total counter
+windows_ad_replication_inbound_properties_filtered_total 0
+# HELP windows_ad_replication_inbound_properties_updated_total
+# TYPE windows_ad_replication_inbound_properties_updated_total counter
+windows_ad_replication_inbound_properties_updated_total 0
+# HELP windows_ad_replication_inbound_objects_updated_total
+# TYPE windows_ad_replication_inbound_objects_updated_total counter
+windows_ad_replication_inbound_objects_updated_total 0
+# HELP windows_ad_replication_inbound_sync_objects_remaining
+# TYPE windows_ad_replication_inbound_sync_objects_remaining gauge
+windows_ad_replication_inbound_sync_objects_remaining 0
+# HELP windows_ad_replication_data_intersite_bytes_total
+# TYPE windows_ad_replication_data_intersite_bytes_total counter
+windows_ad_replication_data_intersite_bytes_total{direction="inbound"} 0
+windows_ad_replication_data_intersite_bytes_total{direction="outbound"} 0
+# HELP windows_ad_replication_data_intrasite_bytes_total
+# TYPE windows_ad_replication_data_intrasite_bytes_total counter
+windows_ad_replication_data_intrasite_bytes_total{direction="inbound"} 0
+windows_ad_replication_data_intrasite_bytes_total{direction="outbound"} 0
+# HELP windows_ad_replication_pending_synchronizations
+# TYPE windows_ad_replication_pending_synchronizations gauge
+windows_ad_replication_pending_synchronizations 0
+# HELP windows_ad_replication_sync_requests_total
+# TYPE windows_ad_replication_sync_requests_total counter
+windows_ad_replication_sync_requests_total 0
+# HELP windows_ad_directory_service_threads
+# TYPE windows_ad_directory_service_threads gauge
+windows_ad_directory_service_threads 0
+# HELP windows_ad_ldap_last_bind_time_seconds
+# TYPE windows_ad_ldap_last_bind_time_seconds gauge
+windows_ad_ldap_last_bind_time_seconds 0
+# HELP windows_ad_binds_total
+# TYPE windows_ad_binds_total counter
+windows_ad_binds_total{bind_method="ldap"} 184
+# HELP windows_ad_ldap_searches_total
+# TYPE windows_ad_ldap_searches_total counter
+windows_ad_ldap_searches_total 1382
+# HELP windows_cpu_clock_interrupts_total Total number of received and serviced clock tick interrupts
+# TYPE windows_cpu_clock_interrupts_total counter
+windows_cpu_clock_interrupts_total{core="0,0"} 9.1949524e+07
+windows_cpu_clock_interrupts_total{core="0,1"} 1.0416934e+07
+windows_cpu_clock_interrupts_total{core="0,2"} 1.0417092e+07
+windows_cpu_clock_interrupts_total{core="0,3"} 1.0416548e+07
+# HELP windows_cpu_core_frequency_mhz Core frequency in megahertz
+# TYPE windows_cpu_core_frequency_mhz gauge
+windows_cpu_core_frequency_mhz{core="0,0"} 3187
+windows_cpu_core_frequency_mhz{core="0,1"} 3187
+windows_cpu_core_frequency_mhz{core="0,2"} 3187
+windows_cpu_core_frequency_mhz{core="0,3"} 3187
+# HELP windows_cpu_cstate_seconds_total Time spent in low-power idle state
+# TYPE windows_cpu_cstate_seconds_total counter
+windows_cpu_cstate_seconds_total{core="0,0",state="c1"} 160233.4270483
+windows_cpu_cstate_seconds_total{core="0,0",state="c2"} 0
+windows_cpu_cstate_seconds_total{core="0,0",state="c3"} 0
+windows_cpu_cstate_seconds_total{core="0,1",state="c1"} 159528.0543212
+windows_cpu_cstate_seconds_total{core="0,1",state="c2"} 0
+windows_cpu_cstate_seconds_total{core="0,1",state="c3"} 0
+windows_cpu_cstate_seconds_total{core="0,2",state="c1"} 159891.7232105
+windows_cpu_cstate_seconds_total{core="0,2",state="c2"} 0
+windows_cpu_cstate_seconds_total{core="0,2",state="c3"} 0
+windows_cpu_cstate_seconds_total{core="0,3",state="c1"} 159544.11780809998
+windows_cpu_cstate_seconds_total{core="0,3",state="c2"} 0
+windows_cpu_cstate_seconds_total{core="0,3",state="c3"} 0
+# HELP windows_cpu_dpcs_total Total number of received and serviced deferred procedure calls (DPCs)
+# TYPE windows_cpu_dpcs_total counter
+windows_cpu_dpcs_total{core="0,0"} 4.8719e+06
+windows_cpu_dpcs_total{core="0,1"} 1.650552e+06
+windows_cpu_dpcs_total{core="0,2"} 2.236469e+06
+windows_cpu_dpcs_total{core="0,3"} 1.185046e+06
+# HELP windows_cpu_idle_break_events_total Total number of time processor was woken from idle
+# TYPE windows_cpu_idle_break_events_total counter
+windows_cpu_idle_break_events_total{core="0,0"} 1.40806638e+08
+windows_cpu_idle_break_events_total{core="0,1"} 7.069832e+07
+windows_cpu_idle_break_events_total{core="0,2"} 6.0430118e+07
+windows_cpu_idle_break_events_total{core="0,3"} 5.5224469e+07
+# HELP windows_cpu_interrupts_total Total number of received and serviced hardware interrupts
+# TYPE windows_cpu_interrupts_total counter
+windows_cpu_interrupts_total{core="0,0"} 1.55194331e+08
+windows_cpu_interrupts_total{core="0,1"} 7.9325847e+07
+windows_cpu_interrupts_total{core="0,2"} 6.7305419e+07
+windows_cpu_interrupts_total{core="0,3"} 6.0766938e+07
+# HELP windows_cpu_parking_status Parking Status represents whether a processor is parked or not
+# TYPE windows_cpu_parking_status gauge
+windows_cpu_parking_status{core="0,0"} 0
+windows_cpu_parking_status{core="0,1"} 0
+windows_cpu_parking_status{core="0,2"} 0
+windows_cpu_parking_status{core="0,3"} 0
+# HELP windows_cpu_processor_performance Processor Performance is the average performance of the processor while it is executing instructions, as a percentage of the nominal performance of the processor. On some processors, Processor Performance may exceed 100%
+# TYPE windows_cpu_processor_performance gauge
+windows_cpu_processor_performance{core="0,0"} 2.79873813368e+11
+windows_cpu_processor_performance{core="0,1"} 3.239596095e+11
+windows_cpu_processor_performance{core="0,2"} 3.01145132737e+11
+windows_cpu_processor_performance{core="0,3"} 3.22955641675e+11
+# HELP windows_cpu_time_total Time that processor spent in different modes (dpc, idle, interrupt, privileged, user)
+# TYPE windows_cpu_time_total counter
+windows_cpu_time_total{core="0,0",mode="dpc"} 67.109375
+windows_cpu_time_total{core="0,0",mode="idle"} 162455.59375
+windows_cpu_time_total{core="0,0",mode="interrupt"} 77.28125
+windows_cpu_time_total{core="0,0",mode="privileged"} 1182.109375
+windows_cpu_time_total{core="0,0",mode="user"} 1073.671875
+windows_cpu_time_total{core="0,1",mode="dpc"} 11.09375
+windows_cpu_time_total{core="0,1",mode="idle"} 159478.125
+windows_cpu_time_total{core="0,1",mode="interrupt"} 58.09375
+windows_cpu_time_total{core="0,1",mode="privileged"} 1801.234375
+windows_cpu_time_total{core="0,1",mode="user"} 3432
+windows_cpu_time_total{core="0,2",mode="dpc"} 16.0625
+windows_cpu_time_total{core="0,2",mode="idle"} 159848.4375
+windows_cpu_time_total{core="0,2",mode="interrupt"} 53.515625
+windows_cpu_time_total{core="0,2",mode="privileged"} 1812.546875
+windows_cpu_time_total{core="0,2",mode="user"} 3050.25
+windows_cpu_time_total{core="0,3",mode="dpc"} 8.140625
+windows_cpu_time_total{core="0,3",mode="idle"} 159527.546875
+windows_cpu_time_total{core="0,3",mode="interrupt"} 44.484375
+windows_cpu_time_total{core="0,3",mode="privileged"} 1760.828125
+windows_cpu_time_total{core="0,3",mode="user"} 3422.875
+# HELP windows_exporter_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which windows_exporter was built.
+# TYPE windows_exporter_build_info gauge
+windows_exporter_build_info{branch="heads/tags/v0.20.0",goversion="go1.19.1",revision="677a7c8d67deb99b92f4f24b8c890e0a4c152b0c",version="0.20.0"} 1
+# HELP windows_exporter_collector_duration_seconds windows_exporter: Duration of a collection.
+# TYPE windows_exporter_collector_duration_seconds gauge
+windows_exporter_collector_duration_seconds{collector="ad"} 0.7690505
+windows_exporter_collector_duration_seconds{collector="adcs"} 0.0006833
+windows_exporter_collector_duration_seconds{collector="adfs"} 0.0031012
+windows_exporter_collector_duration_seconds{collector="cpu"} 0.00052
+windows_exporter_collector_duration_seconds{collector="exchange"} 0.0334467
+windows_exporter_collector_duration_seconds{collector="hyperv"} 0.9003895
+windows_exporter_collector_duration_seconds{collector="iis"} 0
+windows_exporter_collector_duration_seconds{collector="logical_disk"} 0
+windows_exporter_collector_duration_seconds{collector="logon"} 0.1139134
+windows_exporter_collector_duration_seconds{collector="memory"} 0.00052
+windows_exporter_collector_duration_seconds{collector="mssql"} 0.003369
+windows_exporter_collector_duration_seconds{collector="netframework_clrexceptions"} 1.437537
+windows_exporter_collector_duration_seconds{collector="netframework_clrinterop"} 1.4911402
+windows_exporter_collector_duration_seconds{collector="netframework_clrjit"} 1.2789005
+windows_exporter_collector_duration_seconds{collector="netframework_clrloading"} 1.3232636
+windows_exporter_collector_duration_seconds{collector="netframework_clrlocksandthreads"} 1.3578413999999999
+windows_exporter_collector_duration_seconds{collector="netframework_clrmemory"} 1.4066725
+windows_exporter_collector_duration_seconds{collector="netframework_clrremoting"} 1.5191553
+windows_exporter_collector_duration_seconds{collector="netframework_clrsecurity"} 1.4670829
+windows_exporter_collector_duration_seconds{collector="net"} 0
+windows_exporter_collector_duration_seconds{collector="os"} 0.0023497
+windows_exporter_collector_duration_seconds{collector="process"} 0.1154812
+windows_exporter_collector_duration_seconds{collector="service"} 0.1016404
+windows_exporter_collector_duration_seconds{collector="system"} 0.0006105
+windows_exporter_collector_duration_seconds{collector="tcp"} 0
+# HELP windows_exporter_collector_success windows_exporter: Whether the collector was successful.
+# TYPE windows_exporter_collector_success gauge
+windows_exporter_collector_success{collector="ad"} 1
+windows_exporter_collector_success{collector="adcs"} 1
+windows_exporter_collector_success{collector="adfs"} 1
+windows_exporter_collector_success{collector="cpu"} 1
+windows_exporter_collector_success{collector="exchange"} 1
+windows_exporter_collector_success{collector="hyperv"} 1
+windows_exporter_collector_success{collector="iis"} 1
+windows_exporter_collector_success{collector="logical_disk"} 1
+windows_exporter_collector_success{collector="logon"} 1
+windows_exporter_collector_success{collector="memory"} 1
+windows_exporter_collector_success{collector="mssql"} 1
+windows_exporter_collector_success{collector="netframework_clrexceptions"} 1
+windows_exporter_collector_success{collector="netframework_clrinterop"} 1
+windows_exporter_collector_success{collector="netframework_clrjit"} 1
+windows_exporter_collector_success{collector="netframework_clrloading"} 1
+windows_exporter_collector_success{collector="netframework_clrlocksandthreads"} 1
+windows_exporter_collector_success{collector="netframework_clrmemory"} 1
+windows_exporter_collector_success{collector="netframework_clrremoting"} 1
+windows_exporter_collector_success{collector="netframework_clrsecurity"} 1
+windows_exporter_collector_success{collector="net"} 1
+windows_exporter_collector_success{collector="os"} 1
+windows_exporter_collector_success{collector="process"} 1
+windows_exporter_collector_success{collector="service"} 1
+windows_exporter_collector_success{collector="system"} 1
+windows_exporter_collector_success{collector="tcp"} 1
+# HELP windows_exporter_collector_timeout windows_exporter: Whether the collector timed out.
+# TYPE windows_exporter_collector_timeout gauge
+windows_exporter_collector_timeout{collector="ad"} 0
+windows_exporter_collector_timeout{collector="adcs"} 0
+windows_exporter_collector_timeout{collector="adfs"} 0
+windows_exporter_collector_timeout{collector="cpu"} 0
+windows_exporter_collector_timeout{collector="exchange"} 0
+windows_exporter_collector_timeout{collector="hyperv"} 0
+windows_exporter_collector_timeout{collector="iis"} 0
+windows_exporter_collector_timeout{collector="logical_disk"} 0
+windows_exporter_collector_timeout{collector="logon"} 0
+windows_exporter_collector_timeout{collector="memory"} 0
+windows_exporter_collector_timeout{collector="mssql"} 0
+windows_exporter_collector_timeout{collector="netframework_clrexceptions"} 0
+windows_exporter_collector_timeout{collector="netframework_clrinterop"} 0
+windows_exporter_collector_timeout{collector="netframework_clrjit"} 0
+windows_exporter_collector_timeout{collector="netframework_clrloading"} 0
+windows_exporter_collector_timeout{collector="netframework_clrlocksandthreads"} 0
+windows_exporter_collector_timeout{collector="netframework_clrmemory"} 0
+windows_exporter_collector_timeout{collector="netframework_clrremoting"} 0
+windows_exporter_collector_timeout{collector="netframework_clrsecurity"} 0
+windows_exporter_collector_timeout{collector="net"} 0
+windows_exporter_collector_timeout{collector="os"} 0
+windows_exporter_collector_timeout{collector="process"} 0
+windows_exporter_collector_timeout{collector="service"} 0
+windows_exporter_collector_timeout{collector="system"} 0
+windows_exporter_collector_timeout{collector="tcp"} 0
+# HELP windows_exchange_http_proxy_avg_auth_latency Average time spent authenticating CAS requests over the last 200 samples
+# TYPE windows_exchange_http_proxy_avg_auth_latency gauge
+windows_exchange_http_proxy_avg_auth_latency{name="autodiscover"} 1
+windows_exchange_http_proxy_avg_auth_latency{name="eas"} 0
+# HELP windows_exchange_http_proxy_avg_cas_proccessing_latency_sec Average latency (sec) of CAS processing time over the last 200 reqs
+# TYPE windows_exchange_http_proxy_avg_cas_proccessing_latency_sec gauge
+windows_exchange_http_proxy_avg_cas_proccessing_latency_sec{name="autodiscover"} 0.003
+windows_exchange_http_proxy_avg_cas_proccessing_latency_sec{name="eas"} 0.003
+# HELP windows_exchange_http_proxy_mailbox_proxy_failure_rate % of failures between this CAS and MBX servers over the last 200 samples
+# TYPE windows_exchange_http_proxy_mailbox_proxy_failure_rate gauge
+windows_exchange_http_proxy_mailbox_proxy_failure_rate{name="autodiscover"} 0
+windows_exchange_http_proxy_mailbox_proxy_failure_rate{name="eas"} 0
+# HELP windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec Average latency (sec) of MailboxServerLocator web service calls
+# TYPE windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec gauge
+windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec{name="autodiscover"} 0.008
+windows_exchange_http_proxy_mailbox_server_locator_avg_latency_sec{name="eas"} 0.008
+# HELP windows_exchange_http_proxy_outstanding_proxy_requests Number of concurrent outstanding proxy requests
+# TYPE windows_exchange_http_proxy_outstanding_proxy_requests gauge
+windows_exchange_http_proxy_outstanding_proxy_requests{name="autodiscover"} 0
+windows_exchange_http_proxy_outstanding_proxy_requests{name="eas"} 0
+# HELP windows_exchange_http_proxy_requests_total Number of proxy requests processed each second
+# TYPE windows_exchange_http_proxy_requests_total counter
+windows_exchange_http_proxy_requests_total{name="autodiscover"} 27122
+windows_exchange_http_proxy_requests_total{name="eas"} 32519
+# HELP windows_exchange_ldap_long_running_ops_per_sec Long Running LDAP operations per second
+# TYPE windows_exchange_ldap_long_running_ops_per_sec counter
+windows_exchange_ldap_long_running_ops_per_sec{name="complianceauditservice"} 0
+windows_exchange_ldap_long_running_ops_per_sec{name="complianceauditservice_10"} 0
+# HELP windows_exchange_ldap_read_time_sec Time (sec) to send an LDAP read request and receive a response
+# TYPE windows_exchange_ldap_read_time_sec counter
+windows_exchange_ldap_read_time_sec{name="complianceauditservice"} 0.008
+windows_exchange_ldap_read_time_sec{name="complianceauditservice_10"} 0.018
+# HELP windows_exchange_ldap_search_time_sec Time (sec) to send an LDAP search request and receive a response
+# TYPE windows_exchange_ldap_search_time_sec counter
+windows_exchange_ldap_search_time_sec{name="complianceauditservice"} 0.046
+windows_exchange_ldap_search_time_sec{name="complianceauditservice_10"} 0.058
+# TYPE windows_exchange_ldap_timeout_errors_total counter
+windows_exchange_ldap_timeout_errors_total{name="complianceauditservice"} 0
+windows_exchange_ldap_timeout_errors_total{name="complianceauditservice_10"} 0
+# HELP windows_exchange_ldap_write_time_sec Time (sec) to send an LDAP Add/Modify/Delete request and receive a response
+# TYPE windows_exchange_ldap_write_time_sec counter
+windows_exchange_ldap_write_time_sec{name="complianceauditservice"} 0
+windows_exchange_ldap_write_time_sec{name="complianceauditservice_10"} 0
+# HELP windows_exporter_perflib_snapshot_duration_seconds Duration of perflib snapshot capture
+# TYPE windows_exporter_perflib_snapshot_duration_seconds gauge
+windows_exporter_perflib_snapshot_duration_seconds 0.0054258
+# HELP windows_exchange_activesync_ping_cmds_pending Number of ping commands currently pending in the queue
+# TYPE windows_exchange_activesync_ping_cmds_pending gauge
+windows_exchange_activesync_ping_cmds_pending 0
+# HELP windows_exchange_activesync_requests_total Num HTTP requests received from the client via ASP.NET per sec. Shows Current user load
+# TYPE windows_exchange_activesync_requests_total counter
+windows_exchange_activesync_requests_total 14
+# HELP windows_exchange_activesync_sync_cmds_total Number of sync commands processed per second. Clients use this command to synchronize items within a folder
+# TYPE windows_exchange_activesync_sync_cmds_total counter
+windows_exchange_activesync_sync_cmds_total 0
+# HELP windows_exchange_autodiscover_requests_total Number of autodiscover service requests processed each second
+# TYPE windows_exchange_autodiscover_requests_total counter
+windows_exchange_autodiscover_requests_total 1
+# HELP windows_exchange_avail_service_requests_per_sec Number of requests serviced per second
+# TYPE windows_exchange_avail_service_requests_per_sec counter
+windows_exchange_avail_service_requests_per_sec 0
+# HELP windows_exchange_owa_current_unique_users Number of unique users currently logged on to Outlook Web App
+# TYPE windows_exchange_owa_current_unique_users gauge
+windows_exchange_owa_current_unique_users 0
+# HELP windows_exchange_owa_requests_total Number of requests handled by Outlook Web App per second
+# TYPE windows_exchange_owa_requests_total counter
+windows_exchange_owa_requests_total 0
+# HELP windows_exchange_rpc_active_user_count Number of unique users that have shown some kind of activity in the last 2 minutes
+# TYPE windows_exchange_rpc_active_user_count gauge
+windows_exchange_rpc_active_user_count 0
+# HELP windows_exchange_rpc_avg_latency_sec The latency (sec), averaged for the past 1024 packets
+# TYPE windows_exchange_rpc_avg_latency_sec gauge
+windows_exchange_rpc_avg_latency_sec 0.001
+# HELP windows_exchange_rpc_connection_count Total number of client connections maintained
+# TYPE windows_exchange_rpc_connection_count gauge
+windows_exchange_rpc_connection_count 0
+# HELP windows_exchange_rpc_operations_total The rate at which RPC operations occur
+# TYPE windows_exchange_rpc_operations_total counter
+windows_exchange_rpc_operations_total 9
+# HELP windows_exchange_rpc_requests Number of client requests currently being processed by the RPC Client Access service
+# TYPE windows_exchange_rpc_requests gauge
+windows_exchange_rpc_requests 0
+# HELP windows_exchange_rpc_user_count Number of users
+# TYPE windows_exchange_rpc_user_count gauge
+windows_exchange_rpc_user_count 0
+# HELP windows_exchange_transport_queues_active_mailbox_delivery Active Mailbox Delivery Queue length
+# TYPE windows_exchange_transport_queues_active_mailbox_delivery gauge
+windows_exchange_transport_queues_active_mailbox_delivery{name="high_priority"} 0
+windows_exchange_transport_queues_active_mailbox_delivery{name="low_priority"} 0
+windows_exchange_transport_queues_active_mailbox_delivery{name="none_priority"} 0
+windows_exchange_transport_queues_active_mailbox_delivery{name="normal_priority"} 0
+windows_exchange_transport_queues_active_mailbox_delivery{name="total_excluding_priority_none"} 0
+# HELP windows_exchange_transport_queues_external_active_remote_delivery External Active Remote Delivery Queue length
+# TYPE windows_exchange_transport_queues_external_active_remote_delivery gauge
+windows_exchange_transport_queues_external_active_remote_delivery{name="high_priority"} 0
+windows_exchange_transport_queues_external_active_remote_delivery{name="low_priority"} 0
+windows_exchange_transport_queues_external_active_remote_delivery{name="none_priority"} 0
+windows_exchange_transport_queues_external_active_remote_delivery{name="normal_priority"} 0
+windows_exchange_transport_queues_external_active_remote_delivery{name="total_excluding_priority_none"} 0
+# HELP windows_exchange_transport_queues_external_largest_delivery External Largest Delivery Queue length
+# TYPE windows_exchange_transport_queues_external_largest_delivery gauge
+windows_exchange_transport_queues_external_largest_delivery{name="high_priority"} 0
+windows_exchange_transport_queues_external_largest_delivery{name="low_priority"} 0
+windows_exchange_transport_queues_external_largest_delivery{name="none_priority"} 0
+windows_exchange_transport_queues_external_largest_delivery{name="normal_priority"} 0
+windows_exchange_transport_queues_external_largest_delivery{name="total_excluding_priority_none"} 0
+# HELP windows_exchange_transport_queues_internal_active_remote_delivery Internal Active Remote Delivery Queue length
+# TYPE windows_exchange_transport_queues_internal_active_remote_delivery gauge
+windows_exchange_transport_queues_internal_active_remote_delivery{name="high_priority"} 0
+windows_exchange_transport_queues_internal_active_remote_delivery{name="low_priority"} 0
+windows_exchange_transport_queues_internal_active_remote_delivery{name="none_priority"} 0
+windows_exchange_transport_queues_internal_active_remote_delivery{name="normal_priority"} 0
+windows_exchange_transport_queues_internal_active_remote_delivery{name="total_excluding_priority_none"} 0
+# HELP windows_exchange_transport_queues_internal_largest_delivery Internal Largest Delivery Queue length
+# TYPE windows_exchange_transport_queues_internal_largest_delivery gauge
+windows_exchange_transport_queues_internal_largest_delivery{name="high_priority"} 0
+windows_exchange_transport_queues_internal_largest_delivery{name="low_priority"} 0
+windows_exchange_transport_queues_internal_largest_delivery{name="none_priority"} 0
+windows_exchange_transport_queues_internal_largest_delivery{name="normal_priority"} 0
+windows_exchange_transport_queues_internal_largest_delivery{name="total_excluding_priority_none"} 0
+# HELP windows_exchange_transport_queues_poison Poison Queue length
+# TYPE windows_exchange_transport_queues_poison gauge
+windows_exchange_transport_queues_poison{name="high_priority"} 0
+windows_exchange_transport_queues_poison{name="low_priority"} 0
+windows_exchange_transport_queues_poison{name="none_priority"} 0
+windows_exchange_transport_queues_poison{name="normal_priority"} 0
+windows_exchange_transport_queues_poison{name="total_excluding_priority_none"} 0
+# HELP windows_exchange_transport_queues_retry_mailbox_delivery Retry Mailbox Delivery Queue length
+# TYPE windows_exchange_transport_queues_retry_mailbox_delivery gauge
+windows_exchange_transport_queues_retry_mailbox_delivery{name="high_priority"} 0
+windows_exchange_transport_queues_retry_mailbox_delivery{name="low_priority"} 0
+windows_exchange_transport_queues_retry_mailbox_delivery{name="none_priority"} 0
+windows_exchange_transport_queues_retry_mailbox_delivery{name="normal_priority"} 0
+windows_exchange_transport_queues_retry_mailbox_delivery{name="total_excluding_priority_none"} 0
+# HELP windows_exchange_transport_queues_unreachable Unreachable Queue length
+# TYPE windows_exchange_transport_queues_unreachable gauge
+windows_exchange_transport_queues_unreachable{name="high_priority"} 0
+windows_exchange_transport_queues_unreachable{name="low_priority"} 0
+windows_exchange_transport_queues_unreachable{name="none_priority"} 0
+windows_exchange_transport_queues_unreachable{name="normal_priority"} 0
+windows_exchange_transport_queues_unreachable{name="total_excluding_priority_none"} 0
+# HELP windows_exchange_workload_active_tasks Number of active tasks currently running in the background for workload management
+# TYPE windows_exchange_workload_active_tasks gauge
+windows_exchange_workload_active_tasks{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 0
+windows_exchange_workload_active_tasks{name="microsoft_exchange_servicehost_darruntime"} 0
+# HELP windows_exchange_workload_completed_tasks Number of workload management tasks that have been completed
+# TYPE windows_exchange_workload_completed_tasks counter
+windows_exchange_workload_completed_tasks{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 0
+windows_exchange_workload_completed_tasks{name="microsoft_exchange_servicehost_darruntime"} 0
+# HELP windows_exchange_workload_is_active Active indicates whether the workload is in an active (1) or paused (0) state
+# TYPE windows_exchange_workload_is_active gauge
+windows_exchange_workload_is_active{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 1
+windows_exchange_workload_is_active{name="microsoft_exchange_servicehost_darruntime"} 1
+# HELP windows_exchange_workload_queued_tasks Number of workload management tasks that are currently queued up waiting to be processed
+# TYPE windows_exchange_workload_queued_tasks counter
+windows_exchange_workload_queued_tasks{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 0
+windows_exchange_workload_queued_tasks{name="microsoft_exchange_servicehost_darruntime"} 0
+# HELP windows_exchange_workload_yielded_tasks The total number of tasks that have been yielded by a workload
+# TYPE windows_exchange_workload_yielded_tasks counter
+windows_exchange_workload_yielded_tasks{name="complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager"} 0
+windows_exchange_workload_yielded_tasks{name="microsoft_exchange_servicehost_darruntime"} 0
+# HELP windows_hyperv_health_critical This counter represents the number of virtual machines with critical health
+# TYPE windows_hyperv_health_critical gauge
+windows_hyperv_health_critical 0
+# HELP windows_hyperv_health_ok This counter represents the number of virtual machines with ok health
+# TYPE windows_hyperv_health_ok gauge
+windows_hyperv_health_ok 1
+# HELP windows_hyperv_host_cpu_guest_run_time The time spent by the virtual processor in guest code
+# TYPE windows_hyperv_host_cpu_guest_run_time gauge
+windows_hyperv_host_cpu_guest_run_time{core="0"} 2.44871648e+09
+# HELP windows_hyperv_host_cpu_hypervisor_run_time The time spent by the virtual processor in hypervisor code
+# TYPE windows_hyperv_host_cpu_hypervisor_run_time gauge
+windows_hyperv_host_cpu_hypervisor_run_time{core="0"} 2.79010764e+08
+# HELP windows_hyperv_host_cpu_remote_run_time The time spent by the virtual processor running on a remote node
+# TYPE windows_hyperv_host_cpu_remote_run_time gauge
+windows_hyperv_host_cpu_remote_run_time{core="0"} 0
+# HELP windows_hyperv_host_cpu_total_run_time The time spent by the virtual processor in guest and hypervisor code
+# TYPE windows_hyperv_host_cpu_total_run_time gauge
+windows_hyperv_host_cpu_total_run_time{core="0"} 2.727727244e+09
+# HELP windows_hyperv_host_lp_guest_run_time_percent The percentage of time spent by the processor in guest code
+# TYPE windows_hyperv_host_lp_guest_run_time_percent gauge
+windows_hyperv_host_lp_guest_run_time_percent{core="0"} 2.476081579e+09
+# HELP windows_hyperv_host_lp_hypervisor_run_time_percent The percentage of time spent by the processor in hypervisor code
+# TYPE windows_hyperv_host_lp_hypervisor_run_time_percent gauge
+windows_hyperv_host_lp_hypervisor_run_time_percent{core="0"} 3.52733652e+08
+# HELP windows_hyperv_host_lp_total_run_time_percent The percentage of time spent by the processor in guest and hypervisor code
+# TYPE windows_hyperv_host_lp_total_run_time_percent gauge
+windows_hyperv_host_lp_total_run_time_percent{core="0"} 2.828815231e+09
+# HELP windows_hyperv_hypervisor_logical_processors The number of logical processors present in the system
+# TYPE windows_hyperv_hypervisor_logical_processors gauge
+windows_hyperv_hypervisor_logical_processors 16
+# HELP windows_hyperv_hypervisor_virtual_processors The number of virtual processors present in the system
+# TYPE windows_hyperv_hypervisor_virtual_processors gauge
+windows_hyperv_hypervisor_virtual_processors 24
+# HELP windows_hyperv_root_partition_1G_device_pages The number of 1G pages present in the device space of the partition
+# TYPE windows_hyperv_root_partition_1G_device_pages gauge
+windows_hyperv_root_partition_1G_device_pages 0
+# HELP windows_hyperv_root_partition_1G_gpa_pages The number of 1G pages present in the GPA space of the partition
+# TYPE windows_hyperv_root_partition_1G_gpa_pages gauge
+windows_hyperv_root_partition_1G_gpa_pages 6
+# HELP windows_hyperv_root_partition_2M_device_pages The number of 2M pages present in the device space of the partition
+# TYPE windows_hyperv_root_partition_2M_device_pages gauge
+windows_hyperv_root_partition_2M_device_pages 0
+# HELP windows_hyperv_root_partition_2M_gpa_pages The number of 2M pages present in the GPA space of the partition
+# TYPE windows_hyperv_root_partition_2M_gpa_pages gauge
+windows_hyperv_root_partition_2M_gpa_pages 5255
+# HELP windows_hyperv_root_partition_4K_device_pages The number of 4K pages present in the device space of the partition
+# TYPE windows_hyperv_root_partition_4K_device_pages gauge
+windows_hyperv_root_partition_4K_device_pages 0
+# HELP windows_hyperv_root_partition_4K_gpa_pages The number of 4K pages present in the GPA space of the partition
+# TYPE windows_hyperv_root_partition_4K_gpa_pages gauge
+windows_hyperv_root_partition_4K_gpa_pages 58880
+# HELP windows_hyperv_root_partition_address_spaces The number of address spaces in the virtual TLB of the partition
+# TYPE windows_hyperv_root_partition_address_spaces gauge
+windows_hyperv_root_partition_address_spaces 0
+# HELP windows_hyperv_root_partition_attached_devices The number of devices attached to the partition
+# TYPE windows_hyperv_root_partition_attached_devices gauge
+windows_hyperv_root_partition_attached_devices 1
+# HELP windows_hyperv_root_partition_deposited_pages The number of pages deposited into the partition
+# TYPE windows_hyperv_root_partition_deposited_pages gauge
+windows_hyperv_root_partition_deposited_pages 31732
+# HELP windows_hyperv_root_partition_device_dma_errors An indicator of illegal DMA requests generated by all devices assigned to the partition
+# TYPE windows_hyperv_root_partition_device_dma_errors gauge
+windows_hyperv_root_partition_device_dma_errors 0
+# HELP windows_hyperv_root_partition_device_interrupt_errors An indicator of illegal interrupt requests generated by all devices assigned to the partition
+# TYPE windows_hyperv_root_partition_device_interrupt_errors gauge
+windows_hyperv_root_partition_device_interrupt_errors 0
+# HELP windows_hyperv_root_partition_device_interrupt_throttle_events The number of times an interrupt from a device assigned to the partition was temporarily throttled because the device was generating too many interrupts
+# TYPE windows_hyperv_root_partition_device_interrupt_throttle_events gauge
+windows_hyperv_root_partition_device_interrupt_throttle_events 0
+# HELP windows_hyperv_root_partition_gpa_space_modifications The rate of modifications to the GPA space of the partition
+# TYPE windows_hyperv_root_partition_gpa_space_modifications counter
+windows_hyperv_root_partition_gpa_space_modifications 0
+# HELP windows_hyperv_root_partition_io_tlb_flush The rate of flushes of I/O TLBs of the partition
+# TYPE windows_hyperv_root_partition_io_tlb_flush counter
+windows_hyperv_root_partition_io_tlb_flush 23901
+# HELP windows_hyperv_root_partition_io_tlb_flush_cost The average time (in nanoseconds) spent processing an I/O TLB flush
+# TYPE windows_hyperv_root_partition_io_tlb_flush_cost gauge
+windows_hyperv_root_partition_io_tlb_flush_cost 312574
+# HELP windows_hyperv_root_partition_physical_pages_allocated The number of timer interrupts skipped for the partition
+# TYPE windows_hyperv_root_partition_physical_pages_allocated gauge
+windows_hyperv_root_partition_physical_pages_allocated 0
+# HELP windows_hyperv_root_partition_preferred_numa_node_index The number of pages present in the GPA space of the partition (zero for root partition)
+# TYPE windows_hyperv_root_partition_preferred_numa_node_index gauge
+windows_hyperv_root_partition_preferred_numa_node_index 0
+# HELP windows_hyperv_root_partition_recommended_virtual_tlb_size The recommended number of pages to be deposited for the virtual TLB
+# TYPE windows_hyperv_root_partition_recommended_virtual_tlb_size gauge
+windows_hyperv_root_partition_recommended_virtual_tlb_size 64
+# HELP windows_hyperv_root_partition_virtual_tlb_flush_entires The rate of flushes of the entire virtual TLB
+# TYPE windows_hyperv_root_partition_virtual_tlb_flush_entires counter
+windows_hyperv_root_partition_virtual_tlb_flush_entires 15234
+# HELP windows_hyperv_root_partition_virtual_tlb_pages The number of pages used by the virtual TLB of the partition
+# TYPE windows_hyperv_root_partition_virtual_tlb_pages gauge
+windows_hyperv_root_partition_virtual_tlb_pages 64
+# HELP windows_hyperv_vid_physical_pages_allocated The number of physical pages allocated
+# TYPE windows_hyperv_vid_physical_pages_allocated gauge
+windows_hyperv_vid_physical_pages_allocated{vm="Ubuntu 22.04 LTS"} 745472
+# HELP windows_hyperv_vid_preferred_numa_node_index The preferred NUMA node index associated with this partition
+# TYPE windows_hyperv_vid_preferred_numa_node_index gauge
+windows_hyperv_vid_preferred_numa_node_index{vm="Ubuntu 22.04 LTS"} 0
+# HELP windows_hyperv_vid_remote_physical_pages The number of physical pages not allocated from the preferred NUMA node
+# TYPE windows_hyperv_vid_remote_physical_pages gauge
+windows_hyperv_vid_remote_physical_pages{vm="Ubuntu 22.04 LTS"} 0
+# HELP windows_hyperv_vm_cpu_guest_run_time The time spent by the virtual processor in guest code
+# TYPE windows_hyperv_vm_cpu_guest_run_time gauge
+windows_hyperv_vm_cpu_guest_run_time{core="0",vm="Ubuntu 22.04 LTS"} 6.2534217e+07
+# HELP windows_hyperv_vm_cpu_hypervisor_run_time The time spent by the virtual processor in hypervisor code
+# TYPE windows_hyperv_vm_cpu_hypervisor_run_time gauge
+windows_hyperv_vm_cpu_hypervisor_run_time{core="0",vm="Ubuntu 22.04 LTS"} 4.457712e+06
+# HELP windows_hyperv_vm_cpu_remote_run_time The time spent by the virtual processor running on a remote node
+# TYPE windows_hyperv_vm_cpu_remote_run_time gauge
+windows_hyperv_vm_cpu_remote_run_time{core="0",vm="Ubuntu 22.04 LTS"} 0
+# HELP windows_hyperv_vm_cpu_total_run_time The time spent by the virtual processor in guest and hypervisor code
+# TYPE windows_hyperv_vm_cpu_total_run_time gauge
+windows_hyperv_vm_cpu_total_run_time{core="0",vm="Ubuntu 22.04 LTS"} 6.6991929e+07
+# HELP windows_hyperv_vm_device_bytes_read This counter represents the total number of bytes that have been read per second on this virtual device
+# TYPE windows_hyperv_vm_device_bytes_read counter
+windows_hyperv_vm_device_bytes_read{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 83456
+windows_hyperv_vm_device_bytes_read{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 5.3118464e+08
+# HELP windows_hyperv_vm_device_bytes_written This counter represents the total number of bytes that have been written per second on this virtual device
+# TYPE windows_hyperv_vm_device_bytes_written counter
+windows_hyperv_vm_device_bytes_written{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 1.148928e+06
+windows_hyperv_vm_device_bytes_written{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 4.25905152e+08
+# HELP windows_hyperv_vm_device_error_count This counter represents the total number of errors that have occurred on this virtual device
+# TYPE windows_hyperv_vm_device_error_count counter
+windows_hyperv_vm_device_error_count{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 0
+windows_hyperv_vm_device_error_count{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 3
+# HELP windows_hyperv_vm_device_operations_read This counter represents the number of read operations that have occurred per second on this virtual device
+# TYPE windows_hyperv_vm_device_operations_read counter
+windows_hyperv_vm_device_operations_read{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 6
+windows_hyperv_vm_device_operations_read{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 13196
+# HELP windows_hyperv_vm_device_operations_written This counter represents the number of write operations that have occurred per second on this virtual device
+# TYPE windows_hyperv_vm_device_operations_written counter
+windows_hyperv_vm_device_operations_written{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 34
+windows_hyperv_vm_device_operations_written{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 3866
+# HELP windows_hyperv_vm_device_queue_length This counter represents the current queue length on this virtual device
+# TYPE windows_hyperv_vm_device_queue_length counter
+windows_hyperv_vm_device_queue_length{vm_device="--?-D:-Ana-VM-hyperv-Virtual Machines-3AA8D474-2365-4041-A7CB-2A78287D6FE0.vmgs"} 1.104182e+06
+windows_hyperv_vm_device_queue_length{vm_device="D:-Ana-VM-hyperv-vhd-Ubuntu 22.04 LTS_838D93A1-7D30-43CD-9F69-F336829C0934.avhdx"} 3.269422187e+09
+# HELP windows_hyperv_vm_interface_bytes_received This counter represents the total number of bytes received per second by the network adapter
+# TYPE windows_hyperv_vm_interface_bytes_received counter
+windows_hyperv_vm_interface_bytes_received{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 473654
+windows_hyperv_vm_interface_bytes_received{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 4.3509444e+07
+# HELP windows_hyperv_vm_interface_bytes_sent This counter represents the total number of bytes sent per second by the network adapter
+# TYPE windows_hyperv_vm_interface_bytes_sent counter
+windows_hyperv_vm_interface_bytes_sent{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 4.3550457e+07
+windows_hyperv_vm_interface_bytes_sent{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 473654
+# HELP windows_hyperv_vm_interface_packets_incoming_dropped This counter represents the total number of dropped packets per second in the incoming direction of the network adapter
+# TYPE windows_hyperv_vm_interface_packets_incoming_dropped counter
+windows_hyperv_vm_interface_packets_incoming_dropped{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 0
+windows_hyperv_vm_interface_packets_incoming_dropped{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 0
+# HELP windows_hyperv_vm_interface_packets_outgoing_dropped This counter represents the total number of dropped packets per second in the outgoing direction of the network adapter
+# TYPE windows_hyperv_vm_interface_packets_outgoing_dropped counter
+windows_hyperv_vm_interface_packets_outgoing_dropped{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 284
+windows_hyperv_vm_interface_packets_outgoing_dropped{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 0
+# HELP windows_hyperv_vm_interface_packets_received This counter represents the total number of packets received per second by the network adapter
+# TYPE windows_hyperv_vm_interface_packets_received counter
+windows_hyperv_vm_interface_packets_received{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 6137
+windows_hyperv_vm_interface_packets_received{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 8621
+# HELP windows_hyperv_vm_interface_packets_sent This counter represents the total number of packets sent per second by the network adapter
+# TYPE windows_hyperv_vm_interface_packets_sent counter
+windows_hyperv_vm_interface_packets_sent{vm_interface="Default Switch_312FF9C7-1F07-4EBA-81FE-F5B4F445B810"} 8905
+windows_hyperv_vm_interface_packets_sent{vm_interface="Ubuntu 22.04 LTS_Adaptador de Rede_3AA8D474-2365-4041-A7CB-2A78287D6FE0--98F1DBEE-505C-4086-B80E-87A27FAECBD4"} 6137
+# HELP windows_hyperv_vm_memory_add_operations_total This counter represents the number of operations adding memory to the VM.
+# TYPE windows_hyperv_vm_memory_add_operations_total counter
+windows_hyperv_vm_memory_add_operations_total{vm="Ubuntu 22.04 LTS"} 3
+# HELP windows_hyperv_vm_memory_added_total This counter represents memory in MB added to the VM
+# TYPE windows_hyperv_vm_memory_added_total counter
+windows_hyperv_vm_memory_added_total{vm="Ubuntu 22.04 LTS"} 856
+# HELP windows_hyperv_vm_memory_physical This gauge represents the current amount of memory in MB assigned to the VM.
+# TYPE windows_hyperv_vm_memory_physical gauge
+windows_hyperv_vm_memory_physical{vm="Ubuntu 22.04 LTS"} 2628
+# HELP windows_hyperv_vm_memory_physical_guest_visible 'This gauge represents the amount of memory in MB visible to the VM guest.'
+# TYPE windows_hyperv_vm_memory_physical_guest_visible gauge
+windows_hyperv_vm_memory_physical_guest_visible{vm="Ubuntu 22.04 LTS"} 2904
+# HELP windows_hyperv_vm_memory_pressure_average This gauge represents the average pressure in the VM.
+# TYPE windows_hyperv_vm_memory_pressure_average gauge
+windows_hyperv_vm_memory_pressure_average{vm="Ubuntu 22.04 LTS"} 83
+# HELP windows_hyperv_vm_memory_pressure_current This gauge represents the current pressure in the VM.
+# TYPE windows_hyperv_vm_memory_pressure_current gauge
+windows_hyperv_vm_memory_pressure_current{vm="Ubuntu 22.04 LTS"} 83
+# HELP windows_hyperv_vm_memory_pressure_maximum This gauge represents the maximum pressure band in the VM.
+# TYPE windows_hyperv_vm_memory_pressure_maximum gauge
+windows_hyperv_vm_memory_pressure_maximum{vm="Ubuntu 22.04 LTS"} 85
+# HELP windows_hyperv_vm_memory_pressure_minimum This gauge represents the minimum pressure band in the VM.
+# TYPE windows_hyperv_vm_memory_pressure_minimum gauge
+windows_hyperv_vm_memory_pressure_minimum{vm="Ubuntu 22.04 LTS"} 81
+# HELP windows_hyperv_vm_memory_remove_operations_total This counter represents the number of operations removing memory from the VM.
+# TYPE windows_hyperv_vm_memory_remove_operations_total counter
+windows_hyperv_vm_memory_remove_operations_total{vm="Ubuntu 22.04 LTS"} 1
+# HELP windows_hyperv_vm_memory_removed_total This counter represents memory in MB removed from the VM
+# TYPE windows_hyperv_vm_memory_removed_total counter
+windows_hyperv_vm_memory_removed_total{vm="Ubuntu 22.04 LTS"} 276
+# HELP windows_hyperv_vswitch_broadcast_packets_received_total This represents the total number of broadcast packets received per second by the virtual switch
+# TYPE windows_hyperv_vswitch_broadcast_packets_received_total counter
+windows_hyperv_vswitch_broadcast_packets_received_total{vswitch="Default Switch"} 51
+# HELP windows_hyperv_vswitch_broadcast_packets_sent_total This represents the total number of broadcast packets sent per second by the virtual switch
+# TYPE windows_hyperv_vswitch_broadcast_packets_sent_total counter
+windows_hyperv_vswitch_broadcast_packets_sent_total{vswitch="Default Switch"} 18
+# HELP windows_hyperv_vswitch_bytes_received_total This represents the total number of bytes received per second by the virtual switch
+# TYPE windows_hyperv_vswitch_bytes_received_total counter
+windows_hyperv_vswitch_bytes_received_total{vswitch="Default Switch"} 4.4024111e+07
+# HELP windows_hyperv_vswitch_bytes_sent_total This represents the total number of bytes sent per second by the virtual switch
+# TYPE windows_hyperv_vswitch_bytes_sent_total counter
+windows_hyperv_vswitch_bytes_sent_total{vswitch="Default Switch"} 4.3983098e+07
+# HELP windows_hyperv_vswitch_bytes_total This represents the total number of bytes per second traversing the virtual switch
+# TYPE windows_hyperv_vswitch_bytes_total counter
+windows_hyperv_vswitch_bytes_total{vswitch="Default Switch"} 8.8007209e+07
+# HELP windows_hyperv_vswitch_directed_packets_received_total This represents the total number of directed packets received per second by the virtual switch
+# TYPE windows_hyperv_vswitch_directed_packets_received_total counter
+windows_hyperv_vswitch_directed_packets_received_total{vswitch="Default Switch"} 14603
+# HELP windows_hyperv_vswitch_directed_packets_send_total This represents the total number of directed packets sent per second by the virtual switch
+# TYPE windows_hyperv_vswitch_directed_packets_send_total counter
+windows_hyperv_vswitch_directed_packets_send_total{vswitch="Default Switch"} 14603
+# HELP windows_hyperv_vswitch_dropped_packets_incoming_total This represents the total number of packet dropped per second by the virtual switch in the incoming direction
+# TYPE windows_hyperv_vswitch_dropped_packets_incoming_total counter
+windows_hyperv_vswitch_dropped_packets_incoming_total{vswitch="Default Switch"} 284
+# HELP windows_hyperv_vswitch_dropped_packets_outcoming_total This represents the total number of packet dropped per second by the virtual switch in the outgoing direction
+# TYPE windows_hyperv_vswitch_dropped_packets_outcoming_total counter
+windows_hyperv_vswitch_dropped_packets_outcoming_total{vswitch="Default Switch"} 0
+# HELP windows_hyperv_vswitch_extensions_dropped_packets_incoming_total This represents the total number of packet dropped per second by the virtual switch extensions in the incoming direction
+# TYPE windows_hyperv_vswitch_extensions_dropped_packets_incoming_total counter
+windows_hyperv_vswitch_extensions_dropped_packets_incoming_total{vswitch="Default Switch"} 0
+# HELP windows_hyperv_vswitch_extensions_dropped_packets_outcoming_total This represents the total number of packet dropped per second by the virtual switch extensions in the outgoing direction
+# TYPE windows_hyperv_vswitch_extensions_dropped_packets_outcoming_total counter
+windows_hyperv_vswitch_extensions_dropped_packets_outcoming_total{vswitch="Default Switch"} 0
+# HELP windows_hyperv_vswitch_learned_mac_addresses_total This counter represents the total number of learned MAC addresses of the virtual switch
+# TYPE windows_hyperv_vswitch_learned_mac_addresses_total counter
+windows_hyperv_vswitch_learned_mac_addresses_total{vswitch="Default Switch"} 2
+# HELP windows_hyperv_vswitch_multicast_packets_received_total This represents the total number of multicast packets received per second by the virtual switch
+# TYPE windows_hyperv_vswitch_multicast_packets_received_total counter
+windows_hyperv_vswitch_multicast_packets_received_total{vswitch="Default Switch"} 388
+# HELP windows_hyperv_vswitch_multicast_packets_sent_total This represents the total number of multicast packets sent per second by the virtual switch
+# TYPE windows_hyperv_vswitch_multicast_packets_sent_total counter
+windows_hyperv_vswitch_multicast_packets_sent_total{vswitch="Default Switch"} 137
+# HELP windows_hyperv_vswitch_number_of_send_channel_moves_total This represents the total number of send channel moves per second on this virtual switch
+# TYPE windows_hyperv_vswitch_number_of_send_channel_moves_total counter
+windows_hyperv_vswitch_number_of_send_channel_moves_total{vswitch="Default Switch"} 0
+# HELP windows_hyperv_vswitch_number_of_vmq_moves_total This represents the total number of VMQ moves per second on this virtual switch
+# TYPE windows_hyperv_vswitch_number_of_vmq_moves_total counter
+windows_hyperv_vswitch_number_of_vmq_moves_total{vswitch="Default Switch"} 0
+# HELP windows_hyperv_vswitch_packets_flooded_total This counter represents the total number of packets flooded by the virtual switch
+# TYPE windows_hyperv_vswitch_packets_flooded_total counter
+windows_hyperv_vswitch_packets_flooded_total{vswitch="Default Switch"} 0
+# HELP windows_hyperv_vswitch_packets_received_total This represents the total number of packets received per second by the virtual switch
+# TYPE windows_hyperv_vswitch_packets_received_total counter
+windows_hyperv_vswitch_packets_received_total{vswitch="Default Switch"} 15042
+# HELP windows_hyperv_vswitch_packets_total This represents the total number of packets per second traversing the virtual switch
+# TYPE windows_hyperv_vswitch_packets_total counter
+windows_hyperv_vswitch_packets_total{vswitch="Default Switch"} 29800
+# HELP windows_hyperv_vswitch_purged_mac_addresses_total This counter represents the total number of purged MAC addresses of the virtual switch
+# TYPE windows_hyperv_vswitch_purged_mac_addresses_total counter
+windows_hyperv_vswitch_purged_mac_addresses_total{vswitch="Default Switch"} 0
+# HELP windows_iis_anonymous_users_total Total number of users who established an anonymous connection with the Web service (WebService.TotalAnonymousUsers)
+# TYPE windows_iis_anonymous_users_total counter
+windows_iis_anonymous_users_total{site="Default Web Site"} 3
+# HELP windows_iis_blocked_async_io_requests_total Total requests temporarily blocked due to bandwidth throttling settings (WebService.TotalBlockedAsyncIORequests)
+# TYPE windows_iis_blocked_async_io_requests_total counter
+windows_iis_blocked_async_io_requests_total{site="Default Web Site"} 0
+# HELP windows_iis_cgi_requests_total Total CGI requests is the total number of CGI requests (WebService.TotalCGIRequests)
+# TYPE windows_iis_cgi_requests_total counter
+windows_iis_cgi_requests_total{site="Default Web Site"} 0
+# HELP windows_iis_connection_attempts_all_instances_total Number of connections that have been attempted using the Web service (WebService.TotalConnectionAttemptsAllInstances)
+# TYPE windows_iis_connection_attempts_all_instances_total counter
+windows_iis_connection_attempts_all_instances_total{site="Default Web Site"} 1
+# HELP windows_iis_current_anonymous_users Number of users who currently have an anonymous connection using the Web service (WebService.CurrentAnonymousUsers)
+# TYPE windows_iis_current_anonymous_users gauge
+windows_iis_current_anonymous_users{site="Default Web Site"} 0
+# HELP windows_iis_current_application_pool_start_time The unix timestamp for the application pool start time (CurrentApplicationPoolUptime)
+# TYPE windows_iis_current_application_pool_start_time gauge
+windows_iis_current_application_pool_start_time{app="DefaultAppPool"} 1.6672399883854828e+09
+# HELP windows_iis_current_application_pool_state The current status of the application pool (1 - Uninitialized, 2 - Initialized, 3 - Running, 4 - Disabling, 5 - Disabled, 6 - Shutdown Pending, 7 - Delete Pending) (CurrentApplicationPoolState)
+# TYPE windows_iis_current_application_pool_state gauge
+windows_iis_current_application_pool_state{app="DefaultAppPool",state="Delete Pending"} 0
+windows_iis_current_application_pool_state{app="DefaultAppPool",state="Disabled"} 0
+windows_iis_current_application_pool_state{app="DefaultAppPool",state="Disabling"} 0
+windows_iis_current_application_pool_state{app="DefaultAppPool",state="Initialized"} 0
+windows_iis_current_application_pool_state{app="DefaultAppPool",state="Running"} 1
+windows_iis_current_application_pool_state{app="DefaultAppPool",state="Shutdown Pending"} 0
+windows_iis_current_application_pool_state{app="DefaultAppPool",state="Uninitialized"} 0
+# HELP windows_iis_current_blocked_async_io_requests Current requests temporarily blocked due to bandwidth throttling settings (WebService.CurrentBlockedAsyncIORequests)
+# TYPE windows_iis_current_blocked_async_io_requests gauge
+windows_iis_current_blocked_async_io_requests{site="Default Web Site"} 0
+# HELP windows_iis_current_cgi_requests Current number of CGI requests being simultaneously processed by the Web service (WebService.CurrentCGIRequests)
+# TYPE windows_iis_current_cgi_requests gauge
+windows_iis_current_cgi_requests{site="Default Web Site"} 0
+# HELP windows_iis_current_connections Current number of connections established with the Web service (WebService.CurrentConnections)
+# TYPE windows_iis_current_connections gauge
+windows_iis_current_connections{site="Default Web Site"} 0
+# HELP windows_iis_current_isapi_extension_requests Current number of ISAPI requests being simultaneously processed by the Web service (WebService.CurrentISAPIExtensionRequests)
+# TYPE windows_iis_current_isapi_extension_requests gauge
+windows_iis_current_isapi_extension_requests{site="Default Web Site"} 0
+# HELP windows_iis_current_non_anonymous_users Number of users who currently have a non-anonymous connection using the Web service (WebService.CurrentNonAnonymousUsers)
+# TYPE windows_iis_current_non_anonymous_users gauge
+windows_iis_current_non_anonymous_users{site="Default Web Site"} 0
+# HELP windows_iis_current_worker_processes The current number of worker processes that are running in the application pool (CurrentWorkerProcesses)
+# TYPE windows_iis_current_worker_processes gauge
+windows_iis_current_worker_processes{app="DefaultAppPool"} 1
+# HELP windows_iis_files_received_total Number of files received by the Web service (WebService.TotalFilesReceived)
+# TYPE windows_iis_files_received_total counter
+windows_iis_files_received_total{site="Default Web Site"} 0
+# HELP windows_iis_files_sent_total Number of files sent by the Web service (WebService.TotalFilesSent)
+# TYPE windows_iis_files_sent_total counter
+windows_iis_files_sent_total{site="Default Web Site"} 2
+# HELP windows_iis_ipapi_extension_requests_total ISAPI Extension Requests received (WebService.TotalISAPIExtensionRequests)
+# TYPE windows_iis_ipapi_extension_requests_total counter
+windows_iis_ipapi_extension_requests_total{site="Default Web Site"} 0
+# HELP windows_iis_locked_errors_total Number of requests that couldn't be satisfied by the server because the requested resource was locked (WebService.TotalLockedErrors)
+# TYPE windows_iis_locked_errors_total counter
+windows_iis_locked_errors_total{site="Default Web Site"} 0
+# HELP windows_iis_logon_attempts_total Number of logons attempts to the Web Service (WebService.TotalLogonAttempts)
+# TYPE windows_iis_logon_attempts_total counter
+windows_iis_logon_attempts_total{site="Default Web Site"} 4
+# HELP windows_iis_maximum_worker_processes The maximum number of worker processes that have been created for the application pool since Windows Process Activation Service (WAS) started (MaximumWorkerProcesses)
+# TYPE windows_iis_maximum_worker_processes gauge
+windows_iis_maximum_worker_processes{app="DefaultAppPool"} 1
+# HELP windows_iis_non_anonymous_users_total Number of users who established a non-anonymous connection with the Web service (WebService.TotalNonAnonymousUsers)
+# TYPE windows_iis_non_anonymous_users_total counter
+windows_iis_non_anonymous_users_total{site="Default Web Site"} 0
+# HELP windows_iis_not_found_errors_total Number of requests that couldn't be satisfied by the server because the requested document could not be found (WebService.TotalNotFoundErrors)
+# TYPE windows_iis_not_found_errors_total counter
+windows_iis_not_found_errors_total{site="Default Web Site"} 1
+# HELP windows_iis_received_bytes_total Number of data bytes that have been received by the Web service (WebService.TotalBytesReceived)
+# TYPE windows_iis_received_bytes_total counter
+windows_iis_received_bytes_total{site="Default Web Site"} 10289
+# HELP windows_iis_recent_worker_process_failures The number of times that worker processes for the application pool failed during the rapid-fail protection interval (RecentWorkerProcessFailures)
+# TYPE windows_iis_recent_worker_process_failures gauge
+windows_iis_recent_worker_process_failures{app="DefaultAppPool"} 0
+# HELP windows_iis_rejected_async_io_requests_total Requests rejected due to bandwidth throttling settings (WebService.TotalRejectedAsyncIORequests)
+# TYPE windows_iis_rejected_async_io_requests_total counter
+windows_iis_rejected_async_io_requests_total{site="Default Web Site"} 0
+# HELP windows_iis_requests_total Number of HTTP requests (WebService.TotalRequests)
+# TYPE windows_iis_requests_total counter
+windows_iis_requests_total{method="COPY",site="Default Web Site"} 0
+windows_iis_requests_total{method="DELETE",site="Default Web Site"} 0
+windows_iis_requests_total{method="GET",site="Default Web Site"} 3
+windows_iis_requests_total{method="HEAD",site="Default Web Site"} 0
+windows_iis_requests_total{method="LOCK",site="Default Web Site"} 0
+windows_iis_requests_total{method="MKCOL",site="Default Web Site"} 0
+windows_iis_requests_total{method="MOVE",site="Default Web Site"} 0
+windows_iis_requests_total{method="OPTIONS",site="Default Web Site"} 0
+windows_iis_requests_total{method="POST",site="Default Web Site"} 0
+windows_iis_requests_total{method="PROPFIND",site="Default Web Site"} 0
+windows_iis_requests_total{method="PROPPATCH",site="Default Web Site"} 0
+windows_iis_requests_total{method="PUT",site="Default Web Site"} 0
+windows_iis_requests_total{method="SEARCH",site="Default Web Site"} 0
+windows_iis_requests_total{method="TRACE",site="Default Web Site"} 0
+windows_iis_requests_total{method="UNLOCK",site="Default Web Site"} 0
+windows_iis_requests_total{method="other",site="Default Web Site"} 0
+# HELP windows_iis_sent_bytes_total Number of data bytes that have been sent by the Web service (WebService.TotalBytesSent)
+# TYPE windows_iis_sent_bytes_total counter
+windows_iis_sent_bytes_total{site="Default Web Site"} 105882
+# HELP windows_iis_server_cache_active_flushed_entries Number of file handles cached that will be closed when all current transfers complete.
+# TYPE windows_iis_server_cache_active_flushed_entries gauge
+windows_iis_server_cache_active_flushed_entries 0
+# HELP windows_iis_server_file_cache_flushes_total Total number of file cache flushes (since service startup)
+# TYPE windows_iis_server_file_cache_flushes_total counter
+windows_iis_server_file_cache_flushes_total 7
+# HELP windows_iis_server_file_cache_hits_total Total number of successful lookups in the user-mode file cache
+# TYPE windows_iis_server_file_cache_hits_total counter
+windows_iis_server_file_cache_hits_total 1
+# HELP windows_iis_server_file_cache_items Current number of files whose contents are present in cache
+# TYPE windows_iis_server_file_cache_items gauge
+windows_iis_server_file_cache_items 1
+# HELP windows_iis_server_file_cache_items_flushed_total Total number of file handles that have been removed from the cache (since service startup)
+# TYPE windows_iis_server_file_cache_items_flushed_total counter
+windows_iis_server_file_cache_items_flushed_total 0
+# HELP windows_iis_server_file_cache_items_total Total number of files whose contents were ever added to the cache (since service startup)
+# TYPE windows_iis_server_file_cache_items_total counter
+windows_iis_server_file_cache_items_total 1
+# HELP windows_iis_server_file_cache_max_memory_bytes Maximum number of bytes used by file cache
+# TYPE windows_iis_server_file_cache_max_memory_bytes counter
+windows_iis_server_file_cache_max_memory_bytes 703
+# HELP windows_iis_server_file_cache_memory_bytes Current number of bytes used by file cache
+# TYPE windows_iis_server_file_cache_memory_bytes gauge
+windows_iis_server_file_cache_memory_bytes 703
+# HELP windows_iis_server_file_cache_queries_total Total number of file cache queries (hits + misses)
+# TYPE windows_iis_server_file_cache_queries_total counter
+windows_iis_server_file_cache_queries_total 9
+# HELP windows_iis_server_metadata_cache_flushes_total Total number of metadata cache flushes (since service startup)
+# TYPE windows_iis_server_metadata_cache_flushes_total counter
+windows_iis_server_metadata_cache_flushes_total 0
+# HELP windows_iis_server_metadata_cache_hits_total Total number of successful lookups in the metadata cache (since service startup)
+# TYPE windows_iis_server_metadata_cache_hits_total counter
+windows_iis_server_metadata_cache_hits_total 3
+# HELP windows_iis_server_metadata_cache_items Number of metadata information blocks currently present in cache
+# TYPE windows_iis_server_metadata_cache_items gauge
+windows_iis_server_metadata_cache_items 1
+# HELP windows_iis_server_metadata_cache_items_cached_total Total number of metadata information blocks added to the cache (since service startup)
+# TYPE windows_iis_server_metadata_cache_items_cached_total counter
+windows_iis_server_metadata_cache_items_cached_total 1
+# HELP windows_iis_server_metadata_cache_items_flushed_total Total number of metadata information blocks removed from the cache (since service startup)
+# TYPE windows_iis_server_metadata_cache_items_flushed_total counter
+windows_iis_server_metadata_cache_items_flushed_total 0
+# HELP windows_iis_server_metadata_cache_queries_total Total metadata cache queries (hits + misses)
+# TYPE windows_iis_server_metadata_cache_queries_total counter
+windows_iis_server_metadata_cache_queries_total 4
+# HELP windows_iis_server_output_cache_active_flushed_items
+# TYPE windows_iis_server_output_cache_active_flushed_items counter
+windows_iis_server_output_cache_active_flushed_items 0
+# HELP windows_iis_server_output_cache_flushes_total Total number of flushes of output cache (since service startup)
+# TYPE windows_iis_server_output_cache_flushes_total counter
+windows_iis_server_output_cache_flushes_total 0
+# HELP windows_iis_server_output_cache_hits_total Total number of successful lookups in output cache (since service startup)
+# TYPE windows_iis_server_output_cache_hits_total counter
+windows_iis_server_output_cache_hits_total 0
+# HELP windows_iis_server_output_cache_items Number of items current present in output cache
+# TYPE windows_iis_server_output_cache_items counter
+windows_iis_server_output_cache_items 0
+# HELP windows_iis_server_output_cache_items_flushed_total Total number of items flushed from output cache (since service startup)
+# TYPE windows_iis_server_output_cache_items_flushed_total counter
+windows_iis_server_output_cache_items_flushed_total 0
+# HELP windows_iis_server_output_cache_memory_bytes Current number of bytes used by output cache
+# TYPE windows_iis_server_output_cache_memory_bytes counter
+windows_iis_server_output_cache_memory_bytes 0
+# HELP windows_iis_server_output_cache_queries_total Total output cache queries (hits + misses)
+# TYPE windows_iis_server_output_cache_queries_total counter
+windows_iis_server_output_cache_queries_total 4
+# HELP windows_iis_server_uri_cache_flushes_total Total number of URI cache flushes (since service startup)
+# TYPE windows_iis_server_uri_cache_flushes_total counter
+windows_iis_server_uri_cache_flushes_total{mode="kernel"} 0
+windows_iis_server_uri_cache_flushes_total{mode="user"} 0
+# HELP windows_iis_server_uri_cache_hits_total Total number of successful lookups in the URI cache (since service startup)
+# TYPE windows_iis_server_uri_cache_hits_total counter
+windows_iis_server_uri_cache_hits_total{mode="kernel"} 0
+windows_iis_server_uri_cache_hits_total{mode="user"} 0
+# HELP windows_iis_server_uri_cache_items Number of URI information blocks currently in the cache
+# TYPE windows_iis_server_uri_cache_items gauge
+windows_iis_server_uri_cache_items{mode="kernel"} 0
+windows_iis_server_uri_cache_items{mode="user"} 0
+# HELP windows_iis_server_uri_cache_items_flushed_total The number of URI information blocks that have been removed from the cache (since service startup)
+# TYPE windows_iis_server_uri_cache_items_flushed_total counter
+windows_iis_server_uri_cache_items_flushed_total{mode="kernel"} 0
+windows_iis_server_uri_cache_items_flushed_total{mode="user"} 0
+# HELP windows_iis_server_uri_cache_items_total Total number of URI information blocks added to the cache (since service startup)
+# TYPE windows_iis_server_uri_cache_items_total counter
+windows_iis_server_uri_cache_items_total{mode="kernel"} 0
+windows_iis_server_uri_cache_items_total{mode="user"} 0
+# HELP windows_iis_server_uri_cache_queries_total Total number of uri cache queries (hits + misses)
+# TYPE windows_iis_server_uri_cache_queries_total counter
+windows_iis_server_uri_cache_queries_total{mode="kernel"} 47
+windows_iis_server_uri_cache_queries_total{mode="user"} 4
+# HELP windows_iis_service_uptime Number of seconds the WebService is up (WebService.ServiceUptime)
+# TYPE windows_iis_service_uptime gauge
+windows_iis_service_uptime{site="Default Web Site"} 258633
+# HELP windows_iis_time_since_last_worker_process_failure The length of time, in seconds, since the last worker process failure occurred for the application pool (TimeSinceLastWorkerProcessFailure)
+# TYPE windows_iis_time_since_last_worker_process_failure gauge
+windows_iis_time_since_last_worker_process_failure{app="DefaultAppPool"} 1.6672399883854828e+09
+# HELP windows_iis_total_application_pool_recycles The number of times that the application pool has been recycled since Windows Process Activation Service (WAS) started (TotalApplicationPoolRecycles)
+# TYPE windows_iis_total_application_pool_recycles counter
+windows_iis_total_application_pool_recycles{app="DefaultAppPool"} 0
+# HELP windows_iis_total_application_pool_start_time The unix timestamp for the application pool of when the Windows Process Activation Service (WAS) started (TotalApplicationPoolUptime)
+# TYPE windows_iis_total_application_pool_start_time counter
+windows_iis_total_application_pool_start_time{app="DefaultAppPool"} 1.6672399883854828e+09
+# HELP windows_iis_total_worker_process_failures The number of times that worker processes have crashed since the application pool was started (TotalWorkerProcessFailures)
+# TYPE windows_iis_total_worker_process_failures counter
+windows_iis_total_worker_process_failures{app="DefaultAppPool"} 0
+# HELP windows_iis_total_worker_process_ping_failures The number of times that Windows Process Activation Service (WAS) did not receive a response to ping messages sent to a worker process (TotalWorkerProcessPingFailures)
+# TYPE windows_iis_total_worker_process_ping_failures counter
+windows_iis_total_worker_process_ping_failures{app="DefaultAppPool"} 0
+# HELP windows_iis_total_worker_process_shutdown_failures The number of times that Windows Process Activation Service (WAS) failed to shut down a worker process (TotalWorkerProcessShutdownFailures)
+# TYPE windows_iis_total_worker_process_shutdown_failures counter
+windows_iis_total_worker_process_shutdown_failures{app="DefaultAppPool"} 0
+# HELP windows_iis_total_worker_process_startup_failures The number of times that Windows Process Activation Service (WAS) failed to start a worker process (TotalWorkerProcessStartupFailures)
+# TYPE windows_iis_total_worker_process_startup_failures counter
+windows_iis_total_worker_process_startup_failures{app="DefaultAppPool"} 0
+# HELP windows_iis_total_worker_processes_created The number of worker processes created for the application pool since Windows Process Activation Service (WAS) started (TotalWorkerProcessesCreated)
+# TYPE windows_iis_total_worker_processes_created counter
+windows_iis_total_worker_processes_created{app="DefaultAppPool"} 1
+# HELP windows_iis_worker_cache_active_flushed_entries Number of file handles cached in user-mode that will be closed when all current transfers complete.
+# TYPE windows_iis_worker_cache_active_flushed_entries gauge
+windows_iis_worker_cache_active_flushed_entries{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_current_requests Current number of requests being processed by the worker process
+# TYPE windows_iis_worker_current_requests counter
+windows_iis_worker_current_requests{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_current_websocket_requests
+# TYPE windows_iis_worker_current_websocket_requests counter
+windows_iis_worker_current_websocket_requests{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_file_cache_flushes_total Total number of files removed from the user-mode cache
+# TYPE windows_iis_worker_file_cache_flushes_total counter
+windows_iis_worker_file_cache_flushes_total{app="DefaultAppPool",pid="880"} 7
+# HELP windows_iis_worker_file_cache_hits_total Total number of successful lookups in the user-mode file cache
+# TYPE windows_iis_worker_file_cache_hits_total counter
+windows_iis_worker_file_cache_hits_total{app="DefaultAppPool",pid="880"} 1
+# HELP windows_iis_worker_file_cache_items Current number of files whose contents are present in user-mode cache
+# TYPE windows_iis_worker_file_cache_items gauge
+windows_iis_worker_file_cache_items{app="DefaultAppPool",pid="880"} 1
+# HELP windows_iis_worker_file_cache_items_flushed_total Total number of file handles that have been removed from the user-mode cache (since service startup)
+# TYPE windows_iis_worker_file_cache_items_flushed_total counter
+windows_iis_worker_file_cache_items_flushed_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_file_cache_items_total Total number of files whose contents were ever added to the user-mode cache (since service startup)
+# TYPE windows_iis_worker_file_cache_items_total counter
+windows_iis_worker_file_cache_items_total{app="DefaultAppPool",pid="880"} 1
+# HELP windows_iis_worker_file_cache_max_memory_bytes Maximum number of bytes used by user-mode file cache
+# TYPE windows_iis_worker_file_cache_max_memory_bytes counter
+windows_iis_worker_file_cache_max_memory_bytes{app="DefaultAppPool",pid="880"} 703
+# HELP windows_iis_worker_file_cache_memory_bytes Current number of bytes used by user-mode file cache
+# TYPE windows_iis_worker_file_cache_memory_bytes gauge
+windows_iis_worker_file_cache_memory_bytes{app="DefaultAppPool",pid="880"} 703
+# HELP windows_iis_worker_file_cache_queries_total Total file cache queries (hits + misses)
+# TYPE windows_iis_worker_file_cache_queries_total counter
+windows_iis_worker_file_cache_queries_total{app="DefaultAppPool",pid="880"} 9
+# HELP windows_iis_worker_max_threads Maximum number of threads to which the thread pool can grow as needed
+# TYPE windows_iis_worker_max_threads counter
+windows_iis_worker_max_threads{app="DefaultAppPool",pid="880"} 256
+# HELP windows_iis_worker_metadata_cache_flushes_total Total number of user-mode metadata cache flushes (since service startup)
+# TYPE windows_iis_worker_metadata_cache_flushes_total counter
+windows_iis_worker_metadata_cache_flushes_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_metadata_cache_hits_total Total number of successful lookups in the user-mode metadata cache (since service startup)
+# TYPE windows_iis_worker_metadata_cache_hits_total counter
+windows_iis_worker_metadata_cache_hits_total{app="DefaultAppPool",pid="880"} 3
+# HELP windows_iis_worker_metadata_cache_items Number of metadata information blocks currently present in user-mode cache
+# TYPE windows_iis_worker_metadata_cache_items gauge
+windows_iis_worker_metadata_cache_items{app="DefaultAppPool",pid="880"} 1
+# HELP windows_iis_worker_metadata_cache_items_cached_total Total number of metadata information blocks added to the user-mode cache (since service startup)
+# TYPE windows_iis_worker_metadata_cache_items_cached_total counter
+windows_iis_worker_metadata_cache_items_cached_total{app="DefaultAppPool",pid="880"} 1
+# HELP windows_iis_worker_metadata_cache_items_flushed_total Total number of metadata information blocks removed from the user-mode cache (since service startup)
+# TYPE windows_iis_worker_metadata_cache_items_flushed_total counter
+windows_iis_worker_metadata_cache_items_flushed_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_metadata_cache_queries_total Total metadata cache queries (hits + misses)
+# TYPE windows_iis_worker_metadata_cache_queries_total counter
+windows_iis_worker_metadata_cache_queries_total{app="DefaultAppPool",pid="880"} 4
+# HELP windows_iis_worker_output_cache_active_flushed_items
+# TYPE windows_iis_worker_output_cache_active_flushed_items counter
+windows_iis_worker_output_cache_active_flushed_items{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_output_cache_flushes_total Total number of flushes of output cache (since service startup)
+# TYPE windows_iis_worker_output_cache_flushes_total counter
+windows_iis_worker_output_cache_flushes_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_output_cache_hits_total Total number of successful lookups in output cache (since service startup)
+# TYPE windows_iis_worker_output_cache_hits_total counter
+windows_iis_worker_output_cache_hits_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_output_cache_items Number of items current present in output cache
+# TYPE windows_iis_worker_output_cache_items counter
+windows_iis_worker_output_cache_items{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_output_cache_items_flushed_total Total number of items flushed from output cache (since service startup)
+# TYPE windows_iis_worker_output_cache_items_flushed_total counter
+windows_iis_worker_output_cache_items_flushed_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_output_cache_memory_bytes Current number of bytes used by output cache
+# TYPE windows_iis_worker_output_cache_memory_bytes counter
+windows_iis_worker_output_cache_memory_bytes{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_output_queries_total Total number of output cache queries (hits + misses)
+# TYPE windows_iis_worker_output_queries_total counter
+windows_iis_worker_output_queries_total{app="DefaultAppPool",pid="880"} 4
+# HELP windows_iis_worker_request_errors_total Total number of requests that returned an error
+# TYPE windows_iis_worker_request_errors_total counter
+windows_iis_worker_request_errors_total{app="DefaultAppPool",pid="880",status_code="401"} 0
+windows_iis_worker_request_errors_total{app="DefaultAppPool",pid="880",status_code="403"} 0
+windows_iis_worker_request_errors_total{app="DefaultAppPool",pid="880",status_code="404"} 1
+windows_iis_worker_request_errors_total{app="DefaultAppPool",pid="880",status_code="500"} 0
+# HELP windows_iis_worker_requests_total Total number of HTTP requests served by the worker process
+# TYPE windows_iis_worker_requests_total counter
+windows_iis_worker_requests_total{app="DefaultAppPool",pid="880"} 3
+# HELP windows_iis_worker_threads Number of threads actively processing requests in the worker process
+# TYPE windows_iis_worker_threads gauge
+windows_iis_worker_threads{app="DefaultAppPool",pid="880",state="busy"} 0
+windows_iis_worker_threads{app="DefaultAppPool",pid="880",state="idle"} 0
+# HELP windows_iis_worker_uri_cache_flushes_total Total number of URI cache flushes (since service startup)
+# TYPE windows_iis_worker_uri_cache_flushes_total counter
+windows_iis_worker_uri_cache_flushes_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_uri_cache_hits_total Total number of successful lookups in the user-mode URI cache (since service startup)
+# TYPE windows_iis_worker_uri_cache_hits_total counter
+windows_iis_worker_uri_cache_hits_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_uri_cache_items Number of URI information blocks currently in the user-mode cache
+# TYPE windows_iis_worker_uri_cache_items gauge
+windows_iis_worker_uri_cache_items{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_uri_cache_items_flushed_total The number of URI information blocks that have been removed from the user-mode cache (since service startup)
+# TYPE windows_iis_worker_uri_cache_items_flushed_total counter
+windows_iis_worker_uri_cache_items_flushed_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_uri_cache_items_total Total number of URI information blocks added to the user-mode cache (since service startup)
+# TYPE windows_iis_worker_uri_cache_items_total counter
+windows_iis_worker_uri_cache_items_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_uri_cache_queries_total Total number of uri cache queries (hits + misses)
+# TYPE windows_iis_worker_uri_cache_queries_total counter
+windows_iis_worker_uri_cache_queries_total{app="DefaultAppPool",pid="880"} 4
+# HELP windows_iis_worker_websocket_connection_accepted_total
+# TYPE windows_iis_worker_websocket_connection_accepted_total counter
+windows_iis_worker_websocket_connection_accepted_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_websocket_connection_attempts_total
+# TYPE windows_iis_worker_websocket_connection_attempts_total counter
+windows_iis_worker_websocket_connection_attempts_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_iis_worker_websocket_connection_rejected_total
+# TYPE windows_iis_worker_websocket_connection_rejected_total counter
+windows_iis_worker_websocket_connection_rejected_total{app="DefaultAppPool",pid="880"} 0
+# HELP windows_logical_disk_free_bytes Free space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace)
+# TYPE windows_logical_disk_free_bytes gauge
+windows_logical_disk_free_bytes{volume="C:"} 4.363649024e+10
+windows_logical_disk_free_bytes{volume="HarddiskVolume4"} 8.5983232e+07
+# HELP windows_logical_disk_idle_seconds_total Seconds that the disk was idle (LogicalDisk.PercentIdleTime)
+# TYPE windows_logical_disk_idle_seconds_total counter
+windows_logical_disk_idle_seconds_total{volume="C:"} 164591.55536549998
+windows_logical_disk_idle_seconds_total{volume="HarddiskVolume4"} 164707.1418503
+# HELP windows_logical_disk_read_bytes_total The number of bytes transferred from the disk during read operations (LogicalDisk.DiskReadBytesPerSec)
+# TYPE windows_logical_disk_read_bytes_total counter
+windows_logical_disk_read_bytes_total{volume="C:"} 1.7676328448e+10
+windows_logical_disk_read_bytes_total{volume="HarddiskVolume4"} 24576
+# HELP windows_logical_disk_read_latency_seconds_total Shows the average time, in seconds, of a read operation from the disk (LogicalDisk.AvgDiskSecPerRead)
+# TYPE windows_logical_disk_read_latency_seconds_total counter
+windows_logical_disk_read_latency_seconds_total{volume="C:"} 97.42094709999999
+windows_logical_disk_read_latency_seconds_total{volume="HarddiskVolume4"} 0.0008895999999999999
+# HELP windows_logical_disk_read_seconds_total Seconds that the disk was busy servicing read requests (LogicalDisk.PercentDiskReadTime)
+# TYPE windows_logical_disk_read_seconds_total counter
+windows_logical_disk_read_seconds_total{volume="C:"} 97.42094709999999
+windows_logical_disk_read_seconds_total{volume="HarddiskVolume4"} 0.0008895999999999999
+# HELP windows_logical_disk_read_write_latency_seconds_total Shows the time, in seconds, of the average disk transfer (LogicalDisk.AvgDiskSecPerTransfer)
+# TYPE windows_logical_disk_read_write_latency_seconds_total counter
+windows_logical_disk_read_write_latency_seconds_total{volume="C:"} 221.3335836
+windows_logical_disk_read_write_latency_seconds_total{volume="HarddiskVolume4"} 0.0031135
+# HELP windows_logical_disk_reads_total The number of read operations on the disk (LogicalDisk.DiskReadsPerSec)
+# TYPE windows_logical_disk_reads_total counter
+windows_logical_disk_reads_total{volume="C:"} 350593
+windows_logical_disk_reads_total{volume="HarddiskVolume4"} 6
+# HELP windows_logical_disk_requests_queued The number of requests queued to the disk (LogicalDisk.CurrentDiskQueueLength)
+# TYPE windows_logical_disk_requests_queued gauge
+windows_logical_disk_requests_queued{volume="C:"} 0
+windows_logical_disk_requests_queued{volume="HarddiskVolume4"} 0
+# HELP windows_logical_disk_size_bytes Total space in bytes, updates every 10-15 min (LogicalDisk.PercentFreeSpace_Base)
+# TYPE windows_logical_disk_size_bytes gauge
+windows_logical_disk_size_bytes{volume="C:"} 6.7938287616e+10
+windows_logical_disk_size_bytes{volume="HarddiskVolume4"} 6.54311424e+08
+# HELP windows_logical_disk_split_ios_total The number of I/Os to the disk were split into multiple I/Os (LogicalDisk.SplitIOPerSec)
+# TYPE windows_logical_disk_split_ios_total counter
+windows_logical_disk_split_ios_total{volume="C:"} 37836
+windows_logical_disk_split_ios_total{volume="HarddiskVolume4"} 0
+# HELP windows_logical_disk_write_bytes_total The number of bytes transferred to the disk during write operations (LogicalDisk.DiskWriteBytesPerSec)
+# TYPE windows_logical_disk_write_bytes_total counter
+windows_logical_disk_write_bytes_total{volume="C:"} 9.135282688e+09
+windows_logical_disk_write_bytes_total{volume="HarddiskVolume4"} 53248
+# HELP windows_logical_disk_write_latency_seconds_total Shows the average time, in seconds, of a write operation to the disk (LogicalDisk.AvgDiskSecPerWrite)
+# TYPE windows_logical_disk_write_latency_seconds_total counter
+windows_logical_disk_write_latency_seconds_total{volume="C:"} 123.91263649999999
+windows_logical_disk_write_latency_seconds_total{volume="HarddiskVolume4"} 0.0022239
+# HELP windows_logical_disk_write_seconds_total Seconds that the disk was busy servicing write requests (LogicalDisk.PercentDiskWriteTime)
+# TYPE windows_logical_disk_write_seconds_total counter
+windows_logical_disk_write_seconds_total{volume="C:"} 123.91263649999999
+windows_logical_disk_write_seconds_total{volume="HarddiskVolume4"} 0.0022239
+# HELP windows_logical_disk_writes_total The number of write operations on the disk (LogicalDisk.DiskWritesPerSec)
+# TYPE windows_logical_disk_writes_total counter
+windows_logical_disk_writes_total{volume="C:"} 450705
+windows_logical_disk_writes_total{volume="HarddiskVolume4"} 11
+# HELP windows_logon_logon_type Number of active logon sessions (LogonSession.LogonType)
+# TYPE windows_logon_logon_type gauge
+windows_logon_logon_type{status="batch"} 0
+windows_logon_logon_type{status="cached_interactive"} 0
+windows_logon_logon_type{status="cached_remote_interactive"} 0
+windows_logon_logon_type{status="cached_unlock"} 0
+windows_logon_logon_type{status="interactive"} 2
+windows_logon_logon_type{status="network"} 0
+windows_logon_logon_type{status="network_clear_text"} 0
+windows_logon_logon_type{status="new_credentials"} 0
+windows_logon_logon_type{status="proxy"} 0
+windows_logon_logon_type{status="remote_interactive"} 0
+windows_logon_logon_type{status="service"} 0
+windows_logon_logon_type{status="system"} 0
+windows_logon_logon_type{status="unlock"} 0
+# HELP windows_memory_available_bytes The amount of physical memory immediately available for allocation to a process or for system use. It is equal to the sum of memory assigned to the standby (cached), free and zero page lists (AvailableBytes)
+# TYPE windows_memory_available_bytes gauge
+windows_memory_available_bytes 1.3799424e+09
+# HELP windows_memory_cache_bytes (CacheBytes)
+# TYPE windows_memory_cache_bytes gauge
+windows_memory_cache_bytes 1.70774528e+08
+# HELP windows_memory_cache_bytes_peak (CacheBytesPeak)
+# TYPE windows_memory_cache_bytes_peak gauge
+windows_memory_cache_bytes_peak 2.08621568e+08
+# HELP windows_memory_cache_faults_total Number of faults which occur when a page sought in the file system cache is not found there and must be retrieved from elsewhere in memory (soft fault) or from disk (hard fault) (Cache Faults/sec)
+# TYPE windows_memory_cache_faults_total counter
+windows_memory_cache_faults_total 8.009603e+06
+# HELP windows_memory_commit_limit (CommitLimit)
+# TYPE windows_memory_commit_limit gauge
+windows_memory_commit_limit 5.733113856e+09
+# HELP windows_memory_committed_bytes (CommittedBytes)
+# TYPE windows_memory_committed_bytes gauge
+windows_memory_committed_bytes 3.44743936e+09
+# HELP windows_memory_demand_zero_faults_total The number of zeroed pages required to satisfy faults. Zeroed pages, pages emptied of previously stored data and filled with zeros, are a security feature of Windows that prevent processes from seeing data stored by earlier processes that used the memory space (Demand Zero Faults/sec)
+# TYPE windows_memory_demand_zero_faults_total counter
+windows_memory_demand_zero_faults_total 1.02505136e+08
+# HELP windows_memory_free_and_zero_page_list_bytes The amount of physical memory, in bytes, that is assigned to the free and zero page lists. This memory does not contain cached data. It is immediately available for allocation to a process or for system use (FreeAndZeroPageListBytes)
+# TYPE windows_memory_free_and_zero_page_list_bytes gauge
+windows_memory_free_and_zero_page_list_bytes 2.0410368e+07
+# HELP windows_memory_free_system_page_table_entries (FreeSystemPageTableEntries)
+# TYPE windows_memory_free_system_page_table_entries gauge
+windows_memory_free_system_page_table_entries 1.6722559e+07
+# HELP windows_memory_modified_page_list_bytes The amount of physical memory, in bytes, that is assigned to the modified page list. This memory contains cached data and code that is not actively in use by processes, the system and the system cache (ModifiedPageListBytes)
+# TYPE windows_memory_modified_page_list_bytes gauge
+windows_memory_modified_page_list_bytes 3.2653312e+07
+# HELP windows_memory_page_faults_total Overall rate at which faulted pages are handled by the processor (Page Faults/sec)
+# TYPE windows_memory_page_faults_total counter
+windows_memory_page_faults_total 1.19093924e+08
+# HELP windows_memory_pool_nonpaged_allocs_total The number of calls to allocate space in the nonpaged pool. The nonpaged pool is an area of system memory area for objects that cannot be written to disk, and must remain in physical memory as long as they are allocated (PoolNonpagedAllocs)
+# TYPE windows_memory_pool_nonpaged_allocs_total gauge
+windows_memory_pool_nonpaged_allocs_total 0
+# HELP windows_memory_pool_nonpaged_bytes Number of bytes in the non-paged pool, an area of the system virtual memory that is used for objects that cannot be written to disk, but must remain in physical memory as long as they are allocated (PoolNonpagedBytes)
+# TYPE windows_memory_pool_nonpaged_bytes gauge
+windows_memory_pool_nonpaged_bytes 1.26865408e+08
+# HELP windows_memory_pool_paged_allocs_total Number of calls to allocate space in the paged pool, regardless of the amount of space allocated in each call (PoolPagedAllocs)
+# TYPE windows_memory_pool_paged_allocs_total counter
+windows_memory_pool_paged_allocs_total 0
+# HELP windows_memory_pool_paged_bytes (PoolPagedBytes)
+# TYPE windows_memory_pool_paged_bytes gauge
+windows_memory_pool_paged_bytes 3.03906816e+08
+# HELP windows_memory_pool_paged_resident_bytes The size, in bytes, of the portion of the paged pool that is currently resident and active in physical memory. The paged pool is an area of the system virtual memory that is used for objects that can be written to disk when they are not being used (PoolPagedResidentBytes)
+# TYPE windows_memory_pool_paged_resident_bytes gauge
+windows_memory_pool_paged_resident_bytes 2.94293504e+08
+# HELP windows_memory_standby_cache_core_bytes The amount of physical memory, in bytes, that is assigned to the core standby cache page lists. This memory contains cached data and code that is not actively in use by processes, the system and the system cache (StandbyCacheCoreBytes)
+# TYPE windows_memory_standby_cache_core_bytes gauge
+windows_memory_standby_cache_core_bytes 1.0737664e+08
+# HELP windows_memory_standby_cache_normal_priority_bytes The amount of physical memory, in bytes, that is assigned to the normal priority standby cache page lists. This memory contains cached data and code that is not actively in use by processes, the system and the system cache (StandbyCacheNormalPriorityBytes)
+# TYPE windows_memory_standby_cache_normal_priority_bytes gauge
+windows_memory_standby_cache_normal_priority_bytes 1.019121664e+09
+# HELP windows_memory_standby_cache_reserve_bytes The amount of physical memory, in bytes, that is assigned to the reserve standby cache page lists. This memory contains cached data and code that is not actively in use by processes, the system and the system cache (StandbyCacheReserveBytes)
+# TYPE windows_memory_standby_cache_reserve_bytes gauge
+windows_memory_standby_cache_reserve_bytes 2.33033728e+08
+# HELP windows_memory_swap_page_operations_total Total number of swap page read and writes (PagesPersec)
+# TYPE windows_memory_swap_page_operations_total counter
+windows_memory_swap_page_operations_total 4.956175e+06
+# HELP windows_memory_swap_page_reads_total Number of disk page reads (a single read operation reading several pages is still only counted once) (PageReadsPersec)
+# TYPE windows_memory_swap_page_reads_total counter
+windows_memory_swap_page_reads_total 402087
+# HELP windows_memory_swap_page_writes_total Number of disk page writes (a single write operation writing several pages is still only counted once) (PageWritesPersec)
+# TYPE windows_memory_swap_page_writes_total counter
+windows_memory_swap_page_writes_total 7012
+# HELP windows_memory_swap_pages_read_total Number of pages read across all page reads (ie counting all pages read even if they are read in a single operation) (PagesInputPersec)
+# TYPE windows_memory_swap_pages_read_total counter
+windows_memory_swap_pages_read_total 4.643279e+06
+# HELP windows_memory_swap_pages_written_total Number of pages written across all page writes (ie counting all pages written even if they are written in a single operation) (PagesOutputPersec)
+# TYPE windows_memory_swap_pages_written_total counter
+windows_memory_swap_pages_written_total 312896
+# HELP windows_memory_system_cache_resident_bytes The size, in bytes, of the portion of the system file cache which is currently resident and active in physical memory (SystemCacheResidentBytes)
+# TYPE windows_memory_system_cache_resident_bytes gauge
+windows_memory_system_cache_resident_bytes 1.70774528e+08
+# HELP windows_memory_system_code_resident_bytes The size, in bytes, of the pageable operating system code that is currently resident and active in physical memory (SystemCodeResidentBytes)
+# TYPE windows_memory_system_code_resident_bytes gauge
+windows_memory_system_code_resident_bytes 1.71008e+07
+# HELP windows_memory_system_code_total_bytes The size, in bytes, of the pageable operating system code currently mapped into the system virtual address space (SystemCodeTotalBytes)
+# TYPE windows_memory_system_code_total_bytes gauge
+windows_memory_system_code_total_bytes 8192
+# HELP windows_memory_system_driver_resident_bytes The size, in bytes, of the pageable physical memory being used by device drivers. It is the working set (physical memory area) of the drivers (SystemDriverResidentBytes)
+# TYPE windows_memory_system_driver_resident_bytes gauge
+windows_memory_system_driver_resident_bytes 4.6092288e+07
+# HELP windows_memory_system_driver_total_bytes The size, in bytes, of the pageable virtual memory currently being used by device drivers. Pageable memory can be written to disk when it is not being used (SystemDriverTotalBytes)
+# TYPE windows_memory_system_driver_total_bytes gauge
+windows_memory_system_driver_total_bytes 1.8731008e+07
+# HELP windows_memory_transition_faults_total Number of faults rate at which page faults are resolved by recovering pages that were being used by another process sharing the page, or were on the modified page list or the standby list, or were being written to disk at the time of the page fault (TransitionFaultsPersec)
+# TYPE windows_memory_transition_faults_total counter
+windows_memory_transition_faults_total 2.7183527e+07
+# HELP windows_memory_transition_pages_repurposed_total Transition Pages RePurposed is the rate at which the number of transition cache pages were reused for a different purpose (TransitionPagesRePurposedPersec)
+# TYPE windows_memory_transition_pages_repurposed_total counter
+windows_memory_transition_pages_repurposed_total 2.856471e+06
+# HELP windows_memory_write_copies_total The number of page faults caused by attempting to write that were satisfied by copying the page from elsewhere in physical memory (WriteCopiesPersec)
+# TYPE windows_memory_write_copies_total counter
+windows_memory_write_copies_total 1.194039e+06
+# HELP windows_mssql_accessmethods_au_batch_cleanup_failures (AccessMethods.FailedAUcleanupbatches)
+# TYPE windows_mssql_accessmethods_au_batch_cleanup_failures counter
+windows_mssql_accessmethods_au_batch_cleanup_failures{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_au_batch_cleanups (AccessMethods.AUcleanupbatches)
+# TYPE windows_mssql_accessmethods_au_batch_cleanups counter
+windows_mssql_accessmethods_au_batch_cleanups{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_au_cleanups (AccessMethods.AUcleanups)
+# TYPE windows_mssql_accessmethods_au_cleanups counter
+windows_mssql_accessmethods_au_cleanups{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_by_reference_lob_creates (AccessMethods.ByreferenceLobCreateCount)
+# TYPE windows_mssql_accessmethods_by_reference_lob_creates counter
+windows_mssql_accessmethods_by_reference_lob_creates{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_by_reference_lob_uses (AccessMethods.ByreferenceLobUseCount)
+# TYPE windows_mssql_accessmethods_by_reference_lob_uses counter
+windows_mssql_accessmethods_by_reference_lob_uses{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_column_value_pulls (AccessMethods.CountPullInRow)
+# TYPE windows_mssql_accessmethods_column_value_pulls counter
+windows_mssql_accessmethods_column_value_pulls{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_column_value_pushes (AccessMethods.CountPushOffRow)
+# TYPE windows_mssql_accessmethods_column_value_pushes counter
+windows_mssql_accessmethods_column_value_pushes{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_deferred_dropped_aus (AccessMethods.DeferreddroppedAUs)
+# TYPE windows_mssql_accessmethods_deferred_dropped_aus gauge
+windows_mssql_accessmethods_deferred_dropped_aus{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_deferred_dropped_rowsets (AccessMethods.DeferredDroppedrowsets)
+# TYPE windows_mssql_accessmethods_deferred_dropped_rowsets gauge
+windows_mssql_accessmethods_deferred_dropped_rowsets{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_dropped_rowset_cleanups (AccessMethods.Droppedrowsetcleanups)
+# TYPE windows_mssql_accessmethods_dropped_rowset_cleanups counter
+windows_mssql_accessmethods_dropped_rowset_cleanups{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_dropped_rowset_skips (AccessMethods.Droppedrowsetsskipped)
+# TYPE windows_mssql_accessmethods_dropped_rowset_skips counter
+windows_mssql_accessmethods_dropped_rowset_skips{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_extent_allocations (AccessMethods.ExtentsAllocated)
+# TYPE windows_mssql_accessmethods_extent_allocations counter
+windows_mssql_accessmethods_extent_allocations{mssql_instance="SQLEXPRESS"} 16
+# HELP windows_mssql_accessmethods_extent_deallocations (AccessMethods.ExtentDeallocations)
+# TYPE windows_mssql_accessmethods_extent_deallocations counter
+windows_mssql_accessmethods_extent_deallocations{mssql_instance="SQLEXPRESS"} 3
+# HELP windows_mssql_accessmethods_forwarded_records (AccessMethods.ForwardedRecords)
+# TYPE windows_mssql_accessmethods_forwarded_records counter
+windows_mssql_accessmethods_forwarded_records{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_free_space_page_fetches (AccessMethods.FreeSpacePageFetches)
+# TYPE windows_mssql_accessmethods_free_space_page_fetches counter
+windows_mssql_accessmethods_free_space_page_fetches{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_free_space_scans (AccessMethods.FreeSpaceScans)
+# TYPE windows_mssql_accessmethods_free_space_scans counter
+windows_mssql_accessmethods_free_space_scans{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_full_scans (AccessMethods.FullScans)
+# TYPE windows_mssql_accessmethods_full_scans counter
+windows_mssql_accessmethods_full_scans{mssql_instance="SQLEXPRESS"} 8743
+# HELP windows_mssql_accessmethods_ghost_record_skips (AccessMethods.SkippedGhostedRecordsPersec)
+# TYPE windows_mssql_accessmethods_ghost_record_skips counter
+windows_mssql_accessmethods_ghost_record_skips{mssql_instance="SQLEXPRESS"} 20
+# HELP windows_mssql_accessmethods_index_searches (AccessMethods.IndexSearches)
+# TYPE windows_mssql_accessmethods_index_searches counter
+windows_mssql_accessmethods_index_searches{mssql_instance="SQLEXPRESS"} 843808
+# HELP windows_mssql_accessmethods_insysxact_waits (AccessMethods.InSysXactwaits)
+# TYPE windows_mssql_accessmethods_insysxact_waits counter
+windows_mssql_accessmethods_insysxact_waits{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_leaf_page_cookie_failures (AccessMethods.Failedleafpagecookie)
+# TYPE windows_mssql_accessmethods_leaf_page_cookie_failures counter
+windows_mssql_accessmethods_leaf_page_cookie_failures{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_leaf_page_cookie_uses (AccessMethods.Usedleafpagecookie)
+# TYPE windows_mssql_accessmethods_leaf_page_cookie_uses counter
+windows_mssql_accessmethods_leaf_page_cookie_uses{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_lob_handle_creates (AccessMethods.LobHandleCreateCount)
+# TYPE windows_mssql_accessmethods_lob_handle_creates counter
+windows_mssql_accessmethods_lob_handle_creates{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_lob_handle_destroys (AccessMethods.LobHandleDestroyCount)
+# TYPE windows_mssql_accessmethods_lob_handle_destroys counter
+windows_mssql_accessmethods_lob_handle_destroys{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_lob_read_aheads (AccessMethods.CountLobReadahead)
+# TYPE windows_mssql_accessmethods_lob_read_aheads counter
+windows_mssql_accessmethods_lob_read_aheads{mssql_instance="SQLEXPRESS"} 2
+# HELP windows_mssql_accessmethods_lob_ss_provider_creates (AccessMethods.LobSSProviderCreateCount)
+# TYPE windows_mssql_accessmethods_lob_ss_provider_creates counter
+windows_mssql_accessmethods_lob_ss_provider_creates{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_lob_ss_provider_destroys (AccessMethods.LobSSProviderDestroyCount)
+# TYPE windows_mssql_accessmethods_lob_ss_provider_destroys counter
+windows_mssql_accessmethods_lob_ss_provider_destroys{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_lob_ss_provider_truncations (AccessMethods.LobSSProviderTruncationCount)
+# TYPE windows_mssql_accessmethods_lob_ss_provider_truncations counter
+windows_mssql_accessmethods_lob_ss_provider_truncations{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_mixed_page_allocations (AccessMethods.MixedpageallocationsPersec)
+# TYPE windows_mssql_accessmethods_mixed_page_allocations counter
+windows_mssql_accessmethods_mixed_page_allocations{mssql_instance="SQLEXPRESS"} 66
+# HELP windows_mssql_accessmethods_page_allocations (AccessMethods.PagesAllocatedPersec)
+# TYPE windows_mssql_accessmethods_page_allocations counter
+windows_mssql_accessmethods_page_allocations{mssql_instance="SQLEXPRESS"} 83
+# HELP windows_mssql_accessmethods_page_compression_attempts (AccessMethods.PagecompressionattemptsPersec)
+# TYPE windows_mssql_accessmethods_page_compression_attempts counter
+windows_mssql_accessmethods_page_compression_attempts{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_page_compressions (AccessMethods.PagescompressedPersec)
+# TYPE windows_mssql_accessmethods_page_compressions counter
+windows_mssql_accessmethods_page_compressions{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_page_deallocations (AccessMethods.PageDeallocationsPersec)
+# TYPE windows_mssql_accessmethods_page_deallocations counter
+windows_mssql_accessmethods_page_deallocations{mssql_instance="SQLEXPRESS"} 60
+# HELP windows_mssql_accessmethods_page_splits (AccessMethods.PageSplitsPersec)
+# TYPE windows_mssql_accessmethods_page_splits counter
+windows_mssql_accessmethods_page_splits{mssql_instance="SQLEXPRESS"} 429
+# HELP windows_mssql_accessmethods_probe_scans (AccessMethods.ProbeScansPersec)
+# TYPE windows_mssql_accessmethods_probe_scans counter
+windows_mssql_accessmethods_probe_scans{mssql_instance="SQLEXPRESS"} 217563
+# HELP windows_mssql_accessmethods_range_scans (AccessMethods.RangeScansPersec)
+# TYPE windows_mssql_accessmethods_range_scans counter
+windows_mssql_accessmethods_range_scans{mssql_instance="SQLEXPRESS"} 590779
+# HELP windows_mssql_accessmethods_scan_point_revalidations (AccessMethods.ScanPointRevalidationsPersec)
+# TYPE windows_mssql_accessmethods_scan_point_revalidations counter
+windows_mssql_accessmethods_scan_point_revalidations{mssql_instance="SQLEXPRESS"} 5
+# HELP windows_mssql_accessmethods_table_lock_escalations (AccessMethods.TableLockEscalationsPersec)
+# TYPE windows_mssql_accessmethods_table_lock_escalations counter
+windows_mssql_accessmethods_table_lock_escalations{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_tree_page_cookie_failures (AccessMethods.Failedtreepagecookie)
+# TYPE windows_mssql_accessmethods_tree_page_cookie_failures counter
+windows_mssql_accessmethods_tree_page_cookie_failures{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_tree_page_cookie_uses (AccessMethods.Usedtreepagecookie)
+# TYPE windows_mssql_accessmethods_tree_page_cookie_uses counter
+windows_mssql_accessmethods_tree_page_cookie_uses{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_accessmethods_workfile_creates (AccessMethods.WorkfilesCreatedPersec)
+# TYPE windows_mssql_accessmethods_workfile_creates counter
+windows_mssql_accessmethods_workfile_creates{mssql_instance="SQLEXPRESS"} 96
+# HELP windows_mssql_accessmethods_worktables_creates (AccessMethods.WorktablesCreatedPersec)
+# TYPE windows_mssql_accessmethods_worktables_creates counter
+windows_mssql_accessmethods_worktables_creates{mssql_instance="SQLEXPRESS"} 557
+# HELP windows_mssql_accessmethods_worktables_from_cache_hits (AccessMethods.WorktablesFromCacheRatio)
+# TYPE windows_mssql_accessmethods_worktables_from_cache_hits counter
+windows_mssql_accessmethods_worktables_from_cache_hits{mssql_instance="SQLEXPRESS"} 357
+# HELP windows_mssql_accessmethods_worktables_from_cache_lookups (AccessMethods.WorktablesFromCacheRatio_Base)
+# TYPE windows_mssql_accessmethods_worktables_from_cache_lookups counter
+windows_mssql_accessmethods_worktables_from_cache_lookups{mssql_instance="SQLEXPRESS"} 364
+# HELP windows_mssql_bufman_background_writer_pages (BufferManager.Backgroundwriterpages)
+# TYPE windows_mssql_bufman_background_writer_pages counter
+windows_mssql_bufman_background_writer_pages{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_buffer_cache_hits (BufferManager.Buffercachehitratio)
+# TYPE windows_mssql_bufman_buffer_cache_hits gauge
+windows_mssql_bufman_buffer_cache_hits{mssql_instance="SQLEXPRESS"} 86
+# HELP windows_mssql_bufman_buffer_cache_lookups (BufferManager.Buffercachehitratio_Base)
+# TYPE windows_mssql_bufman_buffer_cache_lookups gauge
+windows_mssql_bufman_buffer_cache_lookups{mssql_instance="SQLEXPRESS"} 86
+# HELP windows_mssql_bufman_checkpoint_pages (BufferManager.Checkpointpages)
+# TYPE windows_mssql_bufman_checkpoint_pages counter
+windows_mssql_bufman_checkpoint_pages{mssql_instance="SQLEXPRESS"} 82
+# HELP windows_mssql_bufman_database_pages (BufferManager.Databasepages)
+# TYPE windows_mssql_bufman_database_pages gauge
+windows_mssql_bufman_database_pages{mssql_instance="SQLEXPRESS"} 829
+# HELP windows_mssql_bufman_extension_allocated_pages (BufferManager.Extensionallocatedpages)
+# TYPE windows_mssql_bufman_extension_allocated_pages gauge
+windows_mssql_bufman_extension_allocated_pages{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_extension_free_pages (BufferManager.Extensionfreepages)
+# TYPE windows_mssql_bufman_extension_free_pages gauge
+windows_mssql_bufman_extension_free_pages{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_extension_in_use_as_percentage (BufferManager.Extensioninuseaspercentage)
+# TYPE windows_mssql_bufman_extension_in_use_as_percentage gauge
+windows_mssql_bufman_extension_in_use_as_percentage{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_extension_outstanding_io (BufferManager.ExtensionoutstandingIOcounter)
+# TYPE windows_mssql_bufman_extension_outstanding_io gauge
+windows_mssql_bufman_extension_outstanding_io{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_extension_page_evictions (BufferManager.Extensionpageevictions)
+# TYPE windows_mssql_bufman_extension_page_evictions counter
+windows_mssql_bufman_extension_page_evictions{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_extension_page_reads (BufferManager.Extensionpagereads)
+# TYPE windows_mssql_bufman_extension_page_reads counter
+windows_mssql_bufman_extension_page_reads{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_extension_page_unreferenced_seconds (BufferManager.Extensionpageunreferencedtime)
+# TYPE windows_mssql_bufman_extension_page_unreferenced_seconds gauge
+windows_mssql_bufman_extension_page_unreferenced_seconds{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_extension_page_writes (BufferManager.Extensionpagewrites)
+# TYPE windows_mssql_bufman_extension_page_writes counter
+windows_mssql_bufman_extension_page_writes{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_free_list_stalls (BufferManager.Freeliststalls)
+# TYPE windows_mssql_bufman_free_list_stalls counter
+windows_mssql_bufman_free_list_stalls{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_integral_controller_slope (BufferManager.IntegralControllerSlope)
+# TYPE windows_mssql_bufman_integral_controller_slope gauge
+windows_mssql_bufman_integral_controller_slope{mssql_instance="SQLEXPRESS"} 10
+# HELP windows_mssql_bufman_lazywrites (BufferManager.Lazywrites)
+# TYPE windows_mssql_bufman_lazywrites counter
+windows_mssql_bufman_lazywrites{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_bufman_page_life_expectancy_seconds (BufferManager.Pagelifeexpectancy)
+# TYPE windows_mssql_bufman_page_life_expectancy_seconds gauge
+windows_mssql_bufman_page_life_expectancy_seconds{mssql_instance="SQLEXPRESS"} 191350
+# HELP windows_mssql_bufman_page_lookups (BufferManager.Pagelookups)
+# TYPE windows_mssql_bufman_page_lookups counter
+windows_mssql_bufman_page_lookups{mssql_instance="SQLEXPRESS"} 1.699668e+06
+# HELP windows_mssql_bufman_page_reads (BufferManager.Pagereads)
+# TYPE windows_mssql_bufman_page_reads counter
+windows_mssql_bufman_page_reads{mssql_instance="SQLEXPRESS"} 797
+# HELP windows_mssql_bufman_page_writes (BufferManager.Pagewrites)
+# TYPE windows_mssql_bufman_page_writes counter
+windows_mssql_bufman_page_writes{mssql_instance="SQLEXPRESS"} 92
+# HELP windows_mssql_bufman_read_ahead_issuing_seconds (BufferManager.Readaheadtime)
+# TYPE windows_mssql_bufman_read_ahead_issuing_seconds counter
+windows_mssql_bufman_read_ahead_issuing_seconds{mssql_instance="SQLEXPRESS"} 1292
+# HELP windows_mssql_bufman_read_ahead_pages (BufferManager.Readaheadpages)
+# TYPE windows_mssql_bufman_read_ahead_pages counter
+windows_mssql_bufman_read_ahead_pages{mssql_instance="SQLEXPRESS"} 94
+# HELP windows_mssql_bufman_target_pages (BufferManager.Targetpages)
+# TYPE windows_mssql_bufman_target_pages gauge
+windows_mssql_bufman_target_pages{mssql_instance="SQLEXPRESS"} 180480
+# HELP windows_mssql_collector_duration_seconds windows_exporter: Duration of an mssql child collection.
+# TYPE windows_mssql_collector_duration_seconds gauge
+windows_mssql_collector_duration_seconds{collector="accessmethods",mssql_instance="SQLEXPRESS"} 0.0009723
+windows_mssql_collector_duration_seconds{collector="availreplica",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="bufman",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="databases",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="dbreplica",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="genstats",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="locks",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="memmgr",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="sqlerrors",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="sqlstats",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="transactions",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_collector_duration_seconds{collector="waitstats",mssql_instance="SQLEXPRESS"} 0.0012212
+# HELP windows_mssql_collector_success windows_exporter: Whether a mssql child collector was successful.
+# TYPE windows_mssql_collector_success gauge
+windows_mssql_collector_success{collector="accessmethods",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="availreplica",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="bufman",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="databases",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="dbreplica",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="genstats",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="locks",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="memmgr",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="sqlerrors",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="sqlstats",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="transactions",mssql_instance="SQLEXPRESS"} 1
+windows_mssql_collector_success{collector="waitstats",mssql_instance="SQLEXPRESS"} 1
+# HELP windows_mssql_databases_active_parallel_redo_threads (Databases.ActiveParallelredothreads)
+# TYPE windows_mssql_databases_active_parallel_redo_threads gauge
+windows_mssql_databases_active_parallel_redo_threads{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_active_parallel_redo_threads{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_active_parallel_redo_threads{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_active_parallel_redo_threads{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_active_parallel_redo_threads{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_active_transactions (Databases.ActiveTransactions)
+# TYPE windows_mssql_databases_active_transactions gauge
+windows_mssql_databases_active_transactions{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_active_transactions{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_active_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_active_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_active_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_backup_restore_operations (Databases.BackupPerRestoreThroughput)
+# TYPE windows_mssql_databases_backup_restore_operations counter
+windows_mssql_databases_backup_restore_operations{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_backup_restore_operations{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_backup_restore_operations{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_backup_restore_operations{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_backup_restore_operations{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_bulk_copy_bytes (Databases.BulkCopyThroughput)
+# TYPE windows_mssql_databases_bulk_copy_bytes counter
+windows_mssql_databases_bulk_copy_bytes{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_bulk_copy_bytes{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_bulk_copy_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_bulk_copy_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_bulk_copy_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_bulk_copy_rows (Databases.BulkCopyRows)
+# TYPE windows_mssql_databases_bulk_copy_rows counter
+windows_mssql_databases_bulk_copy_rows{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_bulk_copy_rows{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_bulk_copy_rows{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_bulk_copy_rows{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_bulk_copy_rows{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_commit_table_entries (Databases.Committableentries)
+# TYPE windows_mssql_databases_commit_table_entries gauge
+windows_mssql_databases_commit_table_entries{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_commit_table_entries{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_commit_table_entries{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_commit_table_entries{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_commit_table_entries{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_data_files_size_bytes (Databases.DataFilesSizeKB)
+# TYPE windows_mssql_databases_data_files_size_bytes gauge
+windows_mssql_databases_data_files_size_bytes{database="master",mssql_instance="SQLEXPRESS"} 4.653056e+06
+windows_mssql_databases_data_files_size_bytes{database="model",mssql_instance="SQLEXPRESS"} 8.388608e+06
+windows_mssql_databases_data_files_size_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 1.5466496e+07
+windows_mssql_databases_data_files_size_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 4.194304e+07
+windows_mssql_databases_data_files_size_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 8.388608e+06
+# HELP windows_mssql_databases_dbcc_logical_scan_bytes (Databases.DBCCLogicalScanBytes)
+# TYPE windows_mssql_databases_dbcc_logical_scan_bytes counter
+windows_mssql_databases_dbcc_logical_scan_bytes{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_dbcc_logical_scan_bytes{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_dbcc_logical_scan_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_dbcc_logical_scan_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_dbcc_logical_scan_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_group_commit_stall_seconds (Databases.GroupCommitTime)
+# TYPE windows_mssql_databases_group_commit_stall_seconds counter
+windows_mssql_databases_group_commit_stall_seconds{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_group_commit_stall_seconds{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_group_commit_stall_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_group_commit_stall_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_group_commit_stall_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_cache_hits (Databases.LogCacheHitRatio)
+# TYPE windows_mssql_databases_log_cache_hits gauge
+windows_mssql_databases_log_cache_hits{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_hits{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_hits{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_hits{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_hits{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_cache_lookups (Databases.LogCacheHitRatio_Base)
+# TYPE windows_mssql_databases_log_cache_lookups gauge
+windows_mssql_databases_log_cache_lookups{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_lookups{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_lookups{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_lookups{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_lookups{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_cache_reads (Databases.LogCacheReads)
+# TYPE windows_mssql_databases_log_cache_reads counter
+windows_mssql_databases_log_cache_reads{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_reads{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_reads{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_reads{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_cache_reads{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_files_size_bytes (Databases.LogFilesSizeKB)
+# TYPE windows_mssql_databases_log_files_size_bytes gauge
+windows_mssql_databases_log_files_size_bytes{database="master",mssql_instance="SQLEXPRESS"} 2.08896e+06
+windows_mssql_databases_log_files_size_bytes{database="model",mssql_instance="SQLEXPRESS"} 8.380416e+06
+windows_mssql_databases_log_files_size_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 778240
+windows_mssql_databases_log_files_size_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 1.302528e+06
+windows_mssql_databases_log_files_size_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 8.380416e+06
+# HELP windows_mssql_databases_log_files_used_size_bytes (Databases.LogFilesUsedSizeKB)
+# TYPE windows_mssql_databases_log_files_used_size_bytes gauge
+windows_mssql_databases_log_files_used_size_bytes{database="master",mssql_instance="SQLEXPRESS"} 1.210368e+06
+windows_mssql_databases_log_files_used_size_bytes{database="model",mssql_instance="SQLEXPRESS"} 585728
+windows_mssql_databases_log_files_used_size_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 532480
+windows_mssql_databases_log_files_used_size_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 637952
+windows_mssql_databases_log_files_used_size_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 565248
+# HELP windows_mssql_databases_log_flush_wait_seconds (Databases.LogFlushWaitTime)
+# TYPE windows_mssql_databases_log_flush_wait_seconds gauge
+windows_mssql_databases_log_flush_wait_seconds{database="master",mssql_instance="SQLEXPRESS"} 0.226
+windows_mssql_databases_log_flush_wait_seconds{database="model",mssql_instance="SQLEXPRESS"} 0.002
+windows_mssql_databases_log_flush_wait_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flush_wait_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flush_wait_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_flush_waits (Databases.LogFlushWaits)
+# TYPE windows_mssql_databases_log_flush_waits counter
+windows_mssql_databases_log_flush_waits{database="master",mssql_instance="SQLEXPRESS"} 245
+windows_mssql_databases_log_flush_waits{database="model",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_flush_waits{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flush_waits{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flush_waits{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_flush_write_seconds (Databases.LogFlushWriteTimems)
+# TYPE windows_mssql_databases_log_flush_write_seconds gauge
+windows_mssql_databases_log_flush_write_seconds{database="master",mssql_instance="SQLEXPRESS"} 0.164
+windows_mssql_databases_log_flush_write_seconds{database="model",mssql_instance="SQLEXPRESS"} 0.002
+windows_mssql_databases_log_flush_write_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flush_write_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flush_write_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0.002
+# HELP windows_mssql_databases_log_flushed_bytes (Databases.LogBytesFlushed)
+# TYPE windows_mssql_databases_log_flushed_bytes counter
+windows_mssql_databases_log_flushed_bytes{database="master",mssql_instance="SQLEXPRESS"} 3.702784e+06
+windows_mssql_databases_log_flushed_bytes{database="model",mssql_instance="SQLEXPRESS"} 12288
+windows_mssql_databases_log_flushed_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flushed_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flushed_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 118784
+# HELP windows_mssql_databases_log_flushes (Databases.LogFlushes)
+# TYPE windows_mssql_databases_log_flushes counter
+windows_mssql_databases_log_flushes{database="master",mssql_instance="SQLEXPRESS"} 252
+windows_mssql_databases_log_flushes{database="model",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_flushes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_flushes{database="tempdb",mssql_instance="SQLEXPRESS"} 2
+# HELP windows_mssql_databases_log_growths (Databases.LogGrowths)
+# TYPE windows_mssql_databases_log_growths gauge
+windows_mssql_databases_log_growths{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_growths{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_growths{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_growths{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_growths{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_pool_cache_misses (Databases.LogPoolCacheMisses)
+# TYPE windows_mssql_databases_log_pool_cache_misses counter
+windows_mssql_databases_log_pool_cache_misses{database="master",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_cache_misses{database="model",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_cache_misses{database="msdb",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_cache_misses{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_cache_misses{database="tempdb",mssql_instance="SQLEXPRESS"} 3
+# HELP windows_mssql_databases_log_pool_disk_reads (Databases.LogPoolDiskReads)
+# TYPE windows_mssql_databases_log_pool_disk_reads counter
+windows_mssql_databases_log_pool_disk_reads{database="master",mssql_instance="SQLEXPRESS"} 2
+windows_mssql_databases_log_pool_disk_reads{database="model",mssql_instance="SQLEXPRESS"} 2
+windows_mssql_databases_log_pool_disk_reads{database="msdb",mssql_instance="SQLEXPRESS"} 2
+windows_mssql_databases_log_pool_disk_reads{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_disk_reads{database="tempdb",mssql_instance="SQLEXPRESS"} 2
+# HELP windows_mssql_databases_log_pool_empty_free_pool_pushes (Databases.LogPoolPushEmptyFreePool)
+# TYPE windows_mssql_databases_log_pool_empty_free_pool_pushes counter
+windows_mssql_databases_log_pool_empty_free_pool_pushes{database="master",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_empty_free_pool_pushes{database="model",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_empty_free_pool_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_empty_free_pool_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_empty_free_pool_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 1
+# HELP windows_mssql_databases_log_pool_hash_deletes (Databases.LogPoolHashDeletes)
+# TYPE windows_mssql_databases_log_pool_hash_deletes counter
+windows_mssql_databases_log_pool_hash_deletes{database="master",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_hash_deletes{database="model",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_hash_deletes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_hash_deletes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_hash_deletes{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_pool_hash_inserts (Databases.LogPoolHashInserts)
+# TYPE windows_mssql_databases_log_pool_hash_inserts counter
+windows_mssql_databases_log_pool_hash_inserts{database="master",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_hash_inserts{database="model",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_hash_inserts{database="msdb",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_hash_inserts{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_hash_inserts{database="tempdb",mssql_instance="SQLEXPRESS"} 1
+# HELP windows_mssql_databases_log_pool_invalid_hash_entries (Databases.LogPoolInvalidHashEntry)
+# TYPE windows_mssql_databases_log_pool_invalid_hash_entries counter
+windows_mssql_databases_log_pool_invalid_hash_entries{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_invalid_hash_entries{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_invalid_hash_entries{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_invalid_hash_entries{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_invalid_hash_entries{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_pool_log_scan_pushes (Databases.LogPoolLogScanPushes)
+# TYPE windows_mssql_databases_log_pool_log_scan_pushes counter
+windows_mssql_databases_log_pool_log_scan_pushes{database="master",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_log_scan_pushes{database="model",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_log_scan_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_log_scan_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_pool_log_scan_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 1
+# HELP windows_mssql_databases_log_pool_log_writer_pushes (Databases.LogPoolLogWriterPushes)
+# TYPE windows_mssql_databases_log_pool_log_writer_pushes counter
+windows_mssql_databases_log_pool_log_writer_pushes{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_log_writer_pushes{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_log_writer_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_log_writer_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_log_writer_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_pool_low_memory_pushes (Databases.LogPoolPushLowMemory)
+# TYPE windows_mssql_databases_log_pool_low_memory_pushes counter
+windows_mssql_databases_log_pool_low_memory_pushes{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_low_memory_pushes{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_low_memory_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_low_memory_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_low_memory_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_pool_no_free_buffer_pushes (Databases.LogPoolPushNoFreeBuffer)
+# TYPE windows_mssql_databases_log_pool_no_free_buffer_pushes counter
+windows_mssql_databases_log_pool_no_free_buffer_pushes{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_no_free_buffer_pushes{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_no_free_buffer_pushes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_no_free_buffer_pushes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_no_free_buffer_pushes{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_pool_req_behind_trunc (Databases.LogPoolReqBehindTrunc)
+# TYPE windows_mssql_databases_log_pool_req_behind_trunc counter
+windows_mssql_databases_log_pool_req_behind_trunc{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_req_behind_trunc{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_req_behind_trunc{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_req_behind_trunc{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_req_behind_trunc{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_pool_requests (Databases.LogPoolRequests)
+# TYPE windows_mssql_databases_log_pool_requests counter
+windows_mssql_databases_log_pool_requests{database="master",mssql_instance="SQLEXPRESS"} 8
+windows_mssql_databases_log_pool_requests{database="model",mssql_instance="SQLEXPRESS"} 8
+windows_mssql_databases_log_pool_requests{database="msdb",mssql_instance="SQLEXPRESS"} 8
+windows_mssql_databases_log_pool_requests{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 8
+windows_mssql_databases_log_pool_requests{database="tempdb",mssql_instance="SQLEXPRESS"} 4
+# HELP windows_mssql_databases_log_pool_requests_old_vlf (Databases.LogPoolRequestsOldVLF)
+# TYPE windows_mssql_databases_log_pool_requests_old_vlf counter
+windows_mssql_databases_log_pool_requests_old_vlf{database="master",mssql_instance="SQLEXPRESS"} 4
+windows_mssql_databases_log_pool_requests_old_vlf{database="model",mssql_instance="SQLEXPRESS"} 4
+windows_mssql_databases_log_pool_requests_old_vlf{database="msdb",mssql_instance="SQLEXPRESS"} 4
+windows_mssql_databases_log_pool_requests_old_vlf{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 4
+windows_mssql_databases_log_pool_requests_old_vlf{database="tempdb",mssql_instance="SQLEXPRESS"} 2
+# HELP windows_mssql_databases_log_pool_total_active_log_bytes (Databases.LogPoolTotalActiveLogSize)
+# TYPE windows_mssql_databases_log_pool_total_active_log_bytes gauge
+windows_mssql_databases_log_pool_total_active_log_bytes{database="master",mssql_instance="SQLEXPRESS"} 806912
+windows_mssql_databases_log_pool_total_active_log_bytes{database="model",mssql_instance="SQLEXPRESS"} 1.855488e+06
+windows_mssql_databases_log_pool_total_active_log_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 118784
+windows_mssql_databases_log_pool_total_active_log_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 107008
+windows_mssql_databases_log_pool_total_active_log_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 2.142208e+06
+# HELP windows_mssql_databases_log_pool_total_shared_pool_bytes (Databases.LogPoolTotalSharedPoolSize)
+# TYPE windows_mssql_databases_log_pool_total_shared_pool_bytes gauge
+windows_mssql_databases_log_pool_total_shared_pool_bytes{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_total_shared_pool_bytes{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_pool_total_shared_pool_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 16384
+windows_mssql_databases_log_pool_total_shared_pool_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 67584
+windows_mssql_databases_log_pool_total_shared_pool_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 4096
+# HELP windows_mssql_databases_log_shrinks (Databases.LogShrinks)
+# TYPE windows_mssql_databases_log_shrinks gauge
+windows_mssql_databases_log_shrinks{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_shrinks{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_shrinks{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_shrinks{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_shrinks{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_truncations (Databases.LogTruncations)
+# TYPE windows_mssql_databases_log_truncations gauge
+windows_mssql_databases_log_truncations{database="master",mssql_instance="SQLEXPRESS"} 3
+windows_mssql_databases_log_truncations{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_truncations{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_truncations{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_log_truncations{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_log_used_percent (Databases.PercentLogUsed)
+# TYPE windows_mssql_databases_log_used_percent gauge
+windows_mssql_databases_log_used_percent{database="master",mssql_instance="SQLEXPRESS"} 57
+windows_mssql_databases_log_used_percent{database="model",mssql_instance="SQLEXPRESS"} 6
+windows_mssql_databases_log_used_percent{database="msdb",mssql_instance="SQLEXPRESS"} 68
+windows_mssql_databases_log_used_percent{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 49
+windows_mssql_databases_log_used_percent{database="tempdb",mssql_instance="SQLEXPRESS"} 6
+# HELP windows_mssql_databases_pending_repl_transactions (Databases.ReplPendingTransactions)
+# TYPE windows_mssql_databases_pending_repl_transactions gauge
+windows_mssql_databases_pending_repl_transactions{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_pending_repl_transactions{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_pending_repl_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_pending_repl_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_pending_repl_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_repl_transactions (Databases.ReplTranactions)
+# TYPE windows_mssql_databases_repl_transactions counter
+windows_mssql_databases_repl_transactions{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_repl_transactions{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_repl_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_repl_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_repl_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_shrink_data_movement_bytes (Databases.ShrinkDataMovementBytes)
+# TYPE windows_mssql_databases_shrink_data_movement_bytes counter
+windows_mssql_databases_shrink_data_movement_bytes{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_shrink_data_movement_bytes{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_shrink_data_movement_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_shrink_data_movement_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_shrink_data_movement_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_tracked_transactions (Databases.Trackedtransactions)
+# TYPE windows_mssql_databases_tracked_transactions counter
+windows_mssql_databases_tracked_transactions{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_tracked_transactions{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_tracked_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_tracked_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_tracked_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_transactions (Databases.Transactions)
+# TYPE windows_mssql_databases_transactions counter
+windows_mssql_databases_transactions{database="master",mssql_instance="SQLEXPRESS"} 2183
+windows_mssql_databases_transactions{database="model",mssql_instance="SQLEXPRESS"} 4467
+windows_mssql_databases_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 4582
+windows_mssql_databases_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 2
+windows_mssql_databases_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 1558
+# HELP windows_mssql_databases_write_transactions (Databases.WriteTransactions)
+# TYPE windows_mssql_databases_write_transactions counter
+windows_mssql_databases_write_transactions{database="master",mssql_instance="SQLEXPRESS"} 236
+windows_mssql_databases_write_transactions{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_write_transactions{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_write_transactions{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_write_transactions{database="tempdb",mssql_instance="SQLEXPRESS"} 29
+# HELP windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds (Databases.XTPControllerDLCLatencyPerFetch)
+# TYPE windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds gauge
+windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_dlc_fetch_latency_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds (Databases.XTPControllerDLCPeakLatency)
+# TYPE windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds gauge
+windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_dlc_peak_latency_seconds{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_xtp_controller_log_processed_bytes (Databases.XTPControllerLogProcessed)
+# TYPE windows_mssql_databases_xtp_controller_log_processed_bytes counter
+windows_mssql_databases_xtp_controller_log_processed_bytes{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_log_processed_bytes{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_log_processed_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_log_processed_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_controller_log_processed_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_databases_xtp_memory_used_bytes (Databases.XTPMemoryUsedKB)
+# TYPE windows_mssql_databases_xtp_memory_used_bytes gauge
+windows_mssql_databases_xtp_memory_used_bytes{database="master",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_memory_used_bytes{database="model",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_memory_used_bytes{database="msdb",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_memory_used_bytes{database="mssqlsystemresource",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_databases_xtp_memory_used_bytes{database="tempdb",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_active_temp_tables (GeneralStatistics.ActiveTempTables)
+# TYPE windows_mssql_genstats_active_temp_tables gauge
+windows_mssql_genstats_active_temp_tables{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_blocked_processes (GeneralStatistics.Processesblocked)
+# TYPE windows_mssql_genstats_blocked_processes gauge
+windows_mssql_genstats_blocked_processes{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_connection_resets (GeneralStatistics.ConnectionReset)
+# TYPE windows_mssql_genstats_connection_resets counter
+windows_mssql_genstats_connection_resets{mssql_instance="SQLEXPRESS"} 1108
+# HELP windows_mssql_genstats_event_notifications_delayed_drop (GeneralStatistics.EventNotificationsDelayedDrop)
+# TYPE windows_mssql_genstats_event_notifications_delayed_drop gauge
+windows_mssql_genstats_event_notifications_delayed_drop{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_http_authenticated_requests (GeneralStatistics.HTTPAuthenticatedRequests)
+# TYPE windows_mssql_genstats_http_authenticated_requests gauge
+windows_mssql_genstats_http_authenticated_requests{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_logical_connections (GeneralStatistics.LogicalConnections)
+# TYPE windows_mssql_genstats_logical_connections gauge
+windows_mssql_genstats_logical_connections{mssql_instance="SQLEXPRESS"} 1
+# HELP windows_mssql_genstats_logins (GeneralStatistics.Logins)
+# TYPE windows_mssql_genstats_logins counter
+windows_mssql_genstats_logins{mssql_instance="SQLEXPRESS"} 378
+# HELP windows_mssql_genstats_logouts (GeneralStatistics.Logouts)
+# TYPE windows_mssql_genstats_logouts counter
+windows_mssql_genstats_logouts{mssql_instance="SQLEXPRESS"} 377
+# HELP windows_mssql_genstats_mars_deadlocks (GeneralStatistics.MarsDeadlocks)
+# TYPE windows_mssql_genstats_mars_deadlocks gauge
+windows_mssql_genstats_mars_deadlocks{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_non_atomic_yields (GeneralStatistics.Nonatomicyields)
+# TYPE windows_mssql_genstats_non_atomic_yields counter
+windows_mssql_genstats_non_atomic_yields{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_soap_empty_requests (GeneralStatistics.SOAPEmptyRequests)
+# TYPE windows_mssql_genstats_soap_empty_requests gauge
+windows_mssql_genstats_soap_empty_requests{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_soap_method_invocations (GeneralStatistics.SOAPMethodInvocations)
+# TYPE windows_mssql_genstats_soap_method_invocations gauge
+windows_mssql_genstats_soap_method_invocations{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_soap_session_initiate_requests (GeneralStatistics.SOAPSessionInitiateRequests)
+# TYPE windows_mssql_genstats_soap_session_initiate_requests gauge
+windows_mssql_genstats_soap_session_initiate_requests{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_soap_session_terminate_requests (GeneralStatistics.SOAPSessionTerminateRequests)
+# TYPE windows_mssql_genstats_soap_session_terminate_requests gauge
+windows_mssql_genstats_soap_session_terminate_requests{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_soapsql_requests (GeneralStatistics.SOAPSQLRequests)
+# TYPE windows_mssql_genstats_soapsql_requests gauge
+windows_mssql_genstats_soapsql_requests{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_soapwsdl_requests (GeneralStatistics.SOAPWSDLRequests)
+# TYPE windows_mssql_genstats_soapwsdl_requests gauge
+windows_mssql_genstats_soapwsdl_requests{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_sql_trace_io_provider_lock_waits (GeneralStatistics.SQLTraceIOProviderLockWaits)
+# TYPE windows_mssql_genstats_sql_trace_io_provider_lock_waits gauge
+windows_mssql_genstats_sql_trace_io_provider_lock_waits{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_temp_tables_awaiting_destruction (GeneralStatistics.TempTablesForDestruction)
+# TYPE windows_mssql_genstats_temp_tables_awaiting_destruction gauge
+windows_mssql_genstats_temp_tables_awaiting_destruction{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_temp_tables_creations (GeneralStatistics.TempTablesCreations)
+# TYPE windows_mssql_genstats_temp_tables_creations counter
+windows_mssql_genstats_temp_tables_creations{mssql_instance="SQLEXPRESS"} 4
+# HELP windows_mssql_genstats_tempdb_recovery_unit_ids_generated (GeneralStatistics.Tempdbrecoveryunitid)
+# TYPE windows_mssql_genstats_tempdb_recovery_unit_ids_generated gauge
+windows_mssql_genstats_tempdb_recovery_unit_ids_generated{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_tempdb_rowset_ids_generated (GeneralStatistics.Tempdbrowsetid)
+# TYPE windows_mssql_genstats_tempdb_rowset_ids_generated gauge
+windows_mssql_genstats_tempdb_rowset_ids_generated{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_trace_event_notification_queue_size (GeneralStatistics.TraceEventNotificationQueue)
+# TYPE windows_mssql_genstats_trace_event_notification_queue_size gauge
+windows_mssql_genstats_trace_event_notification_queue_size{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_transactions (GeneralStatistics.Transactions)
+# TYPE windows_mssql_genstats_transactions gauge
+windows_mssql_genstats_transactions{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_genstats_user_connections (GeneralStatistics.UserConnections)
+# TYPE windows_mssql_genstats_user_connections gauge
+windows_mssql_genstats_user_connections{mssql_instance="SQLEXPRESS"} 1
+# HELP windows_mssql_locks_count (Locks.AverageWaitTimems_Base count of how often requests have run into locks)
+# TYPE windows_mssql_locks_count gauge
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Application"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Database"} 0.002
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Extent"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="File"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="HoBT"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Key"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Metadata"} 0.001
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="OIB"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Object"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Page"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="RID"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0
+windows_mssql_locks_count{mssql_instance="SQLEXPRESS",resource="Xact"} 0
+# HELP windows_mssql_locks_deadlocks (Locks.NumberofDeadlocks)
+# TYPE windows_mssql_locks_deadlocks counter
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Application"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Database"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Extent"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="File"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="HoBT"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Key"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Metadata"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="OIB"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Object"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Page"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="RID"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Xact"} 0
+# HELP windows_mssql_locks_lock_requests (Locks.LockRequests)
+# TYPE windows_mssql_locks_lock_requests counter
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Application"} 0
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Database"} 204467
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Extent"} 402
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="File"} 19
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="HoBT"} 28
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Key"} 1.681875e+06
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Metadata"} 25785
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="OIB"} 0
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Object"} 760875
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Page"} 757
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="RID"} 123
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0
+windows_mssql_locks_lock_requests{mssql_instance="SQLEXPRESS",resource="Xact"} 0
+# HELP windows_mssql_locks_lock_timeouts (Locks.LockTimeouts)
+# TYPE windows_mssql_locks_lock_timeouts counter
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Application"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Database"} 4
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Extent"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="File"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="HoBT"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Key"} 216
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Metadata"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="OIB"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Object"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Page"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="RID"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0
+windows_mssql_locks_lock_timeouts{mssql_instance="SQLEXPRESS",resource="Xact"} 0
+# HELP windows_mssql_locks_lock_timeouts_excluding_NOWAIT (Locks.LockTimeoutstimeout0)
+# TYPE windows_mssql_locks_lock_timeouts_excluding_NOWAIT counter
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Application"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Database"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Extent"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="File"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="HoBT"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Key"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Metadata"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="OIB"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Object"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Page"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="RID"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0
+windows_mssql_locks_lock_timeouts_excluding_NOWAIT{mssql_instance="SQLEXPRESS",resource="Xact"} 0
+# HELP windows_mssql_locks_lock_wait_seconds (Locks.LockWaitTimems)
+# TYPE windows_mssql_locks_lock_wait_seconds gauge
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Application"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Database"} 0.391
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Extent"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="File"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="HoBT"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Key"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Metadata"} 0.015
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="OIB"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Object"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Page"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="RID"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0
+windows_mssql_locks_lock_wait_seconds{mssql_instance="SQLEXPRESS",resource="Xact"} 0
+# HELP windows_mssql_locks_lock_waits (Locks.LockWaits)
+# TYPE windows_mssql_locks_lock_waits counter
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Application"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Database"} 2
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Extent"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="File"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="HoBT"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Key"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Metadata"} 1
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="OIB"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Object"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Page"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="RID"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0
+windows_mssql_locks_lock_waits{mssql_instance="SQLEXPRESS",resource="Xact"} 0
+# HELP windows_mssql_locks_deadlocks (Locks.NumberofDeadlocks)
+# TYPE windows_mssql_locks_deadlocks counter
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Application"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Database"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Extent"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="File"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="HoBT"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Key"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Metadata"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="OIB"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Object"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Page"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="RID"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0
+windows_mssql_locks_deadlocks{mssql_instance="SQLEXPRESS",resource="Xact"} 0
+# HELP windows_mssql_locks_wait_time_seconds (Locks.AverageWaitTimems Total time in seconds which locks have been holding resources)
+# TYPE windows_mssql_locks_wait_time_seconds gauge
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="AllocUnit"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Application"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Database"} 0.391
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Extent"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="File"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="HoBT"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Key"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Metadata"} 0.015
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="OIB"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Object"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Page"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="RID"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="RowGroup"} 0
+windows_mssql_locks_wait_time_seconds{mssql_instance="SQLEXPRESS",resource="Xact"} 0
+# HELP windows_mssql_memmgr_allocated_lock_blocks (MemoryManager.LockBlocksAllocated)
+# TYPE windows_mssql_memmgr_allocated_lock_blocks gauge
+windows_mssql_memmgr_allocated_lock_blocks{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_memmgr_allocated_lock_owner_blocks (MemoryManager.LockOwnerBlocksAllocated)
+# TYPE windows_mssql_memmgr_allocated_lock_owner_blocks gauge
+windows_mssql_memmgr_allocated_lock_owner_blocks{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_memmgr_connection_memory_bytes (MemoryManager.ConnectionMemoryKB)
+# TYPE windows_mssql_memmgr_connection_memory_bytes gauge
+windows_mssql_memmgr_connection_memory_bytes{mssql_instance="SQLEXPRESS"} 1.015808e+06
+# HELP windows_mssql_memmgr_database_cache_memory_bytes (MemoryManager.DatabaseCacheMemoryKB)
+# TYPE windows_mssql_memmgr_database_cache_memory_bytes gauge
+windows_mssql_memmgr_database_cache_memory_bytes{mssql_instance="SQLEXPRESS"} 6.791168e+06
+# HELP windows_mssql_memmgr_external_benefit_of_memory (MemoryManager.Externalbenefitofmemory)
+# TYPE windows_mssql_memmgr_external_benefit_of_memory gauge
+windows_mssql_memmgr_external_benefit_of_memory{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_memmgr_free_memory_bytes (MemoryManager.FreeMemoryKB)
+# TYPE windows_mssql_memmgr_free_memory_bytes gauge
+windows_mssql_memmgr_free_memory_bytes{mssql_instance="SQLEXPRESS"} 1.9234816e+07
+# HELP windows_mssql_memmgr_granted_workspace_memory_bytes (MemoryManager.GrantedWorkspaceMemoryKB)
+# TYPE windows_mssql_memmgr_granted_workspace_memory_bytes gauge
+windows_mssql_memmgr_granted_workspace_memory_bytes{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_memmgr_lock_blocks (MemoryManager.LockBlocks)
+# TYPE windows_mssql_memmgr_lock_blocks gauge
+windows_mssql_memmgr_lock_blocks{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_memmgr_lock_memory_bytes (MemoryManager.LockMemoryKB)
+# TYPE windows_mssql_memmgr_lock_memory_bytes gauge
+windows_mssql_memmgr_lock_memory_bytes{mssql_instance="SQLEXPRESS"} 663552
+# HELP windows_mssql_memmgr_lock_owner_blocks (MemoryManager.LockOwnerBlocks)
+# TYPE windows_mssql_memmgr_lock_owner_blocks gauge
+windows_mssql_memmgr_lock_owner_blocks{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_memmgr_log_pool_memory_bytes (MemoryManager.LogPoolMemoryKB)
+# TYPE windows_mssql_memmgr_log_pool_memory_bytes gauge
+windows_mssql_memmgr_log_pool_memory_bytes{mssql_instance="SQLEXPRESS"} 2.834432e+06
+# HELP windows_mssql_memmgr_maximum_workspace_memory_bytes (MemoryManager.MaximumWorkspaceMemoryKB)
+# TYPE windows_mssql_memmgr_maximum_workspace_memory_bytes gauge
+windows_mssql_memmgr_maximum_workspace_memory_bytes{mssql_instance="SQLEXPRESS"} 1.36482816e+09
+# HELP windows_mssql_memmgr_optimizer_memory_bytes (MemoryManager.OptimizerMemoryKB)
+# TYPE windows_mssql_memmgr_optimizer_memory_bytes gauge
+windows_mssql_memmgr_optimizer_memory_bytes{mssql_instance="SQLEXPRESS"} 1.007616e+06
+# HELP windows_mssql_memmgr_outstanding_memory_grants (MemoryManager.MemoryGrantsOutstanding)
+# TYPE windows_mssql_memmgr_outstanding_memory_grants gauge
+windows_mssql_memmgr_outstanding_memory_grants{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_memmgr_pending_memory_grants (MemoryManager.MemoryGrantsPending)
+# TYPE windows_mssql_memmgr_pending_memory_grants gauge
+windows_mssql_memmgr_pending_memory_grants{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_memmgr_reserved_server_memory_bytes (MemoryManager.ReservedServerMemoryKB)
+# TYPE windows_mssql_memmgr_reserved_server_memory_bytes gauge
+windows_mssql_memmgr_reserved_server_memory_bytes{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_memmgr_sql_cache_memory_bytes (MemoryManager.SQLCacheMemoryKB)
+# TYPE windows_mssql_memmgr_sql_cache_memory_bytes gauge
+windows_mssql_memmgr_sql_cache_memory_bytes{mssql_instance="SQLEXPRESS"} 1.728512e+06
+# HELP windows_mssql_memmgr_stolen_server_memory_bytes (MemoryManager.StolenServerMemoryKB)
+# TYPE windows_mssql_memmgr_stolen_server_memory_bytes gauge
+windows_mssql_memmgr_stolen_server_memory_bytes{mssql_instance="SQLEXPRESS"} 1.7281024e+08
+# HELP windows_mssql_memmgr_target_server_memory_bytes (MemoryManager.TargetServerMemoryKB)
+# TYPE windows_mssql_memmgr_target_server_memory_bytes gauge
+windows_mssql_memmgr_target_server_memory_bytes{mssql_instance="SQLEXPRESS"} 1.816387584e+09
+# HELP windows_mssql_memmgr_total_server_memory_bytes (MemoryManager.TotalServerMemoryKB)
+# TYPE windows_mssql_memmgr_total_server_memory_bytes gauge
+windows_mssql_memmgr_total_server_memory_bytes{mssql_instance="SQLEXPRESS"} 1.98836224e+08
+# HELP windows_mssql_sql_errors_total (SQLErrors.Total)
+# TYPE windows_mssql_sql_errors_total counter
+windows_mssql_sql_errors_total{mssql_instance="SQLEXPRESS",resource="DB Offline Errors"} 0
+windows_mssql_sql_errors_total{mssql_instance="SQLEXPRESS",resource="Info Errors"} 766
+windows_mssql_sql_errors_total{mssql_instance="SQLEXPRESS",resource="Kill Connection Errors"} 0
+windows_mssql_sql_errors_total{mssql_instance="SQLEXPRESS",resource="User Errors"} 29
+# HELP windows_mssql_sqlstats_auto_parameterization_attempts (SQLStatistics.AutoParamAttempts)
+# TYPE windows_mssql_sqlstats_auto_parameterization_attempts counter
+windows_mssql_sqlstats_auto_parameterization_attempts{mssql_instance="SQLEXPRESS"} 37
+# HELP windows_mssql_sqlstats_batch_requests (SQLStatistics.BatchRequests)
+# TYPE windows_mssql_sqlstats_batch_requests counter
+windows_mssql_sqlstats_batch_requests{mssql_instance="SQLEXPRESS"} 2972
+# HELP windows_mssql_sqlstats_failed_auto_parameterization_attempts (SQLStatistics.FailedAutoParams)
+# TYPE windows_mssql_sqlstats_failed_auto_parameterization_attempts counter
+windows_mssql_sqlstats_failed_auto_parameterization_attempts{mssql_instance="SQLEXPRESS"} 29
+# HELP windows_mssql_sqlstats_forced_parameterizations (SQLStatistics.ForcedParameterizations)
+# TYPE windows_mssql_sqlstats_forced_parameterizations counter
+windows_mssql_sqlstats_forced_parameterizations{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_sqlstats_guided_plan_executions (SQLStatistics.Guidedplanexecutions)
+# TYPE windows_mssql_sqlstats_guided_plan_executions counter
+windows_mssql_sqlstats_guided_plan_executions{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_sqlstats_misguided_plan_executions (SQLStatistics.Misguidedplanexecutions)
+# TYPE windows_mssql_sqlstats_misguided_plan_executions counter
+windows_mssql_sqlstats_misguided_plan_executions{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_sqlstats_safe_auto_parameterization_attempts (SQLStatistics.SafeAutoParams)
+# TYPE windows_mssql_sqlstats_safe_auto_parameterization_attempts counter
+windows_mssql_sqlstats_safe_auto_parameterization_attempts{mssql_instance="SQLEXPRESS"} 2
+# HELP windows_mssql_sqlstats_sql_attentions (SQLStatistics.SQLAttentions)
+# TYPE windows_mssql_sqlstats_sql_attentions counter
+windows_mssql_sqlstats_sql_attentions{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_sqlstats_sql_compilations (SQLStatistics.SQLCompilations)
+# TYPE windows_mssql_sqlstats_sql_compilations counter
+windows_mssql_sqlstats_sql_compilations{mssql_instance="SQLEXPRESS"} 376
+# HELP windows_mssql_sqlstats_sql_recompilations (SQLStatistics.SQLReCompilations)
+# TYPE windows_mssql_sqlstats_sql_recompilations counter
+windows_mssql_sqlstats_sql_recompilations{mssql_instance="SQLEXPRESS"} 8
+# HELP windows_mssql_sqlstats_unsafe_auto_parameterization_attempts (SQLStatistics.UnsafeAutoParams)
+# TYPE windows_mssql_sqlstats_unsafe_auto_parameterization_attempts counter
+windows_mssql_sqlstats_unsafe_auto_parameterization_attempts{mssql_instance="SQLEXPRESS"} 6
+# HELP windows_mssql_transactions_active (Transactions.Transactions)
+# TYPE windows_mssql_transactions_active gauge
+windows_mssql_transactions_active{mssql_instance="SQLEXPRESS"} 6
+# HELP windows_mssql_transactions_longest_transaction_running_seconds (Transactions.LongestTransactionRunningTime)
+# TYPE windows_mssql_transactions_longest_transaction_running_seconds gauge
+windows_mssql_transactions_longest_transaction_running_seconds{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_transactions_nonsnapshot_version_active_total (Transactions.NonSnapshotVersionTransactions)
+# TYPE windows_mssql_transactions_nonsnapshot_version_active_total counter
+windows_mssql_transactions_nonsnapshot_version_active_total{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_transactions_snapshot_active_total (Transactions.SnapshotTransactions)
+# TYPE windows_mssql_transactions_snapshot_active_total counter
+windows_mssql_transactions_snapshot_active_total{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_transactions_tempdb_free_space_bytes (Transactions.FreeSpaceInTempDbKB)
+# TYPE windows_mssql_transactions_tempdb_free_space_bytes gauge
+windows_mssql_transactions_tempdb_free_space_bytes{mssql_instance="SQLEXPRESS"} 5.046272e+06
+# HELP windows_mssql_transactions_update_conflicts_total (Transactions.UpdateConflictRatio)
+# TYPE windows_mssql_transactions_update_conflicts_total counter
+windows_mssql_transactions_update_conflicts_total{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_transactions_update_snapshot_active_total (Transactions.UpdateSnapshotTransactions)
+# TYPE windows_mssql_transactions_update_snapshot_active_total counter
+windows_mssql_transactions_update_snapshot_active_total{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_transactions_version_cleanup_rate_bytes (Transactions.VersionCleanupRateKBs)
+# TYPE windows_mssql_transactions_version_cleanup_rate_bytes gauge
+windows_mssql_transactions_version_cleanup_rate_bytes{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_transactions_version_generation_rate_bytes (Transactions.VersionGenerationRateKBs)
+# TYPE windows_mssql_transactions_version_generation_rate_bytes gauge
+windows_mssql_transactions_version_generation_rate_bytes{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_transactions_version_store_creation_units (Transactions.VersionStoreUnitCreation)
+# TYPE windows_mssql_transactions_version_store_creation_units counter
+windows_mssql_transactions_version_store_creation_units{mssql_instance="SQLEXPRESS"} 2
+# HELP windows_mssql_transactions_version_store_size_bytes (Transactions.VersionStoreSizeKB)
+# TYPE windows_mssql_transactions_version_store_size_bytes gauge
+windows_mssql_transactions_version_store_size_bytes{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_transactions_version_store_truncation_units (Transactions.VersionStoreUnitTruncation)
+# TYPE windows_mssql_transactions_version_store_truncation_units counter
+windows_mssql_transactions_version_store_truncation_units{mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_transactions_version_store_units (Transactions.VersionStoreUnitCount)
+# TYPE windows_mssql_transactions_version_store_units counter
+windows_mssql_transactions_version_store_units{mssql_instance="SQLEXPRESS"} 2
+# HELP windows_mssql_waitstats_lock_waits (WaitStats.LockWaits)
+# TYPE windows_mssql_waitstats_lock_waits counter
+windows_mssql_waitstats_lock_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_lock_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_lock_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_lock_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_log_buffer_waits (WaitStats.LogBufferWaits)
+# TYPE windows_mssql_waitstats_log_buffer_waits counter
+windows_mssql_waitstats_log_buffer_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_log_buffer_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_log_buffer_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_log_buffer_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_log_write_waits (WaitStats.LogWriteWaits)
+# TYPE windows_mssql_waitstats_log_write_waits counter
+windows_mssql_waitstats_log_write_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_log_write_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_log_write_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_log_write_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_memory_grant_queue_waits (WaitStats.MemoryGrantQueueWaits)
+# TYPE windows_mssql_waitstats_memory_grant_queue_waits counter
+windows_mssql_waitstats_memory_grant_queue_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_memory_grant_queue_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_memory_grant_queue_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_memory_grant_queue_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_network_io_waits (WaitStats.NetworkIOWaits)
+# TYPE windows_mssql_waitstats_network_io_waits counter
+windows_mssql_waitstats_network_io_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_network_io_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_network_io_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_network_io_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_nonpage_latch_waits (WaitStats.NonpageLatchWaits)
+# TYPE windows_mssql_waitstats_nonpage_latch_waits counter
+windows_mssql_waitstats_nonpage_latch_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_nonpage_latch_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_nonpage_latch_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_nonpage_latch_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_page_io_latch_waits (WaitStats.PageIOLatchWaits)
+# TYPE windows_mssql_waitstats_page_io_latch_waits counter
+windows_mssql_waitstats_page_io_latch_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_page_io_latch_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_page_io_latch_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_page_io_latch_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_page_latch_waits (WaitStats.PageLatchWaits)
+# TYPE windows_mssql_waitstats_page_latch_waits counter
+windows_mssql_waitstats_page_latch_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_page_latch_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_page_latch_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_page_latch_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_thread_safe_memory_objects_waits (WaitStats.ThreadSafeMemoryObjectsWaits)
+# TYPE windows_mssql_waitstats_thread_safe_memory_objects_waits counter
+windows_mssql_waitstats_thread_safe_memory_objects_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_thread_safe_memory_objects_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_thread_safe_memory_objects_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_thread_safe_memory_objects_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_transaction_ownership_waits (WaitStats.TransactionOwnershipWaits)
+# TYPE windows_mssql_waitstats_transaction_ownership_waits counter
+windows_mssql_waitstats_transaction_ownership_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_transaction_ownership_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_transaction_ownership_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_transaction_ownership_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_wait_for_the_worker_waits (WaitStats.WaitForTheWorkerWaits)
+# TYPE windows_mssql_waitstats_wait_for_the_worker_waits counter
+windows_mssql_waitstats_wait_for_the_worker_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_wait_for_the_worker_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_wait_for_the_worker_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_wait_for_the_worker_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_mssql_waitstats_workspace_synchronization_waits (WaitStats.WorkspaceSynchronizationWaits)
+# TYPE windows_mssql_waitstats_workspace_synchronization_waits counter
+windows_mssql_waitstats_workspace_synchronization_waits{item="Average wait time (ms)",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_workspace_synchronization_waits{item="Cumulative wait time (ms) per second",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_workspace_synchronization_waits{item="Waits in progress",mssql_instance="SQLEXPRESS"} 0
+windows_mssql_waitstats_workspace_synchronization_waits{item="Waits started per second",mssql_instance="SQLEXPRESS"} 0
+# HELP windows_net_bytes_received_total (Network.BytesReceivedPerSec)
+# TYPE windows_net_bytes_received_total counter
+windows_net_bytes_received_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 4.786344482e+09
+# HELP windows_net_bytes_sent_total (Network.BytesSentPerSec)
+# TYPE windows_net_bytes_sent_total counter
+windows_net_bytes_sent_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 1.026395688e+09
+# HELP windows_net_bytes_total (Network.BytesTotalPerSec)
+# TYPE windows_net_bytes_total counter
+windows_net_bytes_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 5.81274017e+09
+# HELP windows_net_current_bandwidth_bytes (Network.CurrentBandwidth)
+# TYPE windows_net_current_bandwidth_bytes gauge
+windows_net_current_bandwidth_bytes{nic="Intel_R__PRO_1000_MT_Network_Connection"} 1.25e+08
+# HELP windows_net_packets_outbound_discarded_total (Network.PacketsOutboundDiscarded)
+# TYPE windows_net_packets_outbound_discarded_total counter
+windows_net_packets_outbound_discarded_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0
+# HELP windows_net_packets_outbound_errors_total (Network.PacketsOutboundErrors)
+# TYPE windows_net_packets_outbound_errors_total counter
+windows_net_packets_outbound_errors_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0
+# HELP windows_net_packets_received_discarded_total (Network.PacketsReceivedDiscarded)
+# TYPE windows_net_packets_received_discarded_total counter
+windows_net_packets_received_discarded_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0
+# HELP windows_net_packets_received_errors_total (Network.PacketsReceivedErrors)
+# TYPE windows_net_packets_received_errors_total counter
+windows_net_packets_received_errors_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0
+# HELP windows_net_packets_received_total (Network.PacketsReceivedPerSec)
+# TYPE windows_net_packets_received_total counter
+windows_net_packets_received_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 4.120869e+06
+# HELP windows_net_packets_received_unknown_total (Network.PacketsReceivedUnknown)
+# TYPE windows_net_packets_received_unknown_total counter
+windows_net_packets_received_unknown_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 0
+# HELP windows_net_packets_sent_total (Network.PacketsSentPerSec)
+# TYPE windows_net_packets_sent_total counter
+windows_net_packets_sent_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 1.332466e+06
+# HELP windows_net_packets_total (Network.PacketsPerSec)
+# TYPE windows_net_packets_total counter
+windows_net_packets_total{nic="Intel_R__PRO_1000_MT_Network_Connection"} 5.453335e+06
+# HELP windows_netframework_clrexceptions_exceptions_filters_total Displays the total number of .NET exception filters executed. An exception filter evaluates regardless of whether an exception is handled.
+# TYPE windows_netframework_clrexceptions_exceptions_filters_total counter
+windows_netframework_clrexceptions_exceptions_filters_total{process="WMSvc"} 0
+windows_netframework_clrexceptions_exceptions_filters_total{process="powershell"} 0
+# HELP windows_netframework_clrexceptions_exceptions_finallys_total Displays the total number of finally blocks executed. Only the finally blocks executed for an exception are counted; finally blocks on normal code paths are not counted by this counter.
+# TYPE windows_netframework_clrexceptions_exceptions_finallys_total counter
+windows_netframework_clrexceptions_exceptions_finallys_total{process="WMSvc"} 0
+windows_netframework_clrexceptions_exceptions_finallys_total{process="powershell"} 56
+# HELP windows_netframework_clrexceptions_exceptions_thrown_total Displays the total number of exceptions thrown since the application started. This includes both .NET exceptions and unmanaged exceptions that are converted into .NET exceptions.
+# TYPE windows_netframework_clrexceptions_exceptions_thrown_total counter
+windows_netframework_clrexceptions_exceptions_thrown_total{process="WMSvc"} 0
+windows_netframework_clrexceptions_exceptions_thrown_total{process="powershell"} 37
+# HELP windows_netframework_clrexceptions_throw_to_catch_depth_total Displays the total number of stack frames traversed, from the frame that threw the exception to the frame that handled the exception.
+# TYPE windows_netframework_clrexceptions_throw_to_catch_depth_total counter
+windows_netframework_clrexceptions_throw_to_catch_depth_total{process="WMSvc"} 0
+windows_netframework_clrexceptions_throw_to_catch_depth_total{process="powershell"} 140
+# HELP windows_netframework_clrinterop_com_callable_wrappers_total Displays the current number of COM callable wrappers (CCWs). A CCW is a proxy for a managed object being referenced from an unmanaged COM client.
+# TYPE windows_netframework_clrinterop_com_callable_wrappers_total counter
+windows_netframework_clrinterop_com_callable_wrappers_total{process="WMSvc"} 2
+windows_netframework_clrinterop_com_callable_wrappers_total{process="powershell"} 5
+# HELP windows_netframework_clrinterop_interop_marshalling_total Displays the total number of times arguments and return values have been marshaled from managed to unmanaged code, and vice versa, since the application started.
+# TYPE windows_netframework_clrinterop_interop_marshalling_total counter
+windows_netframework_clrinterop_interop_marshalling_total{process="WMSvc"} 0
+windows_netframework_clrinterop_interop_marshalling_total{process="powershell"} 0
+# HELP windows_netframework_clrinterop_interop_stubs_created_total Displays the current number of stubs created by the common language runtime. Stubs are responsible for marshaling arguments and return values from managed to unmanaged code, and vice versa, during a COM interop call or a platform invoke call.
+# TYPE windows_netframework_clrinterop_interop_stubs_created_total counter
+windows_netframework_clrinterop_interop_stubs_created_total{process="WMSvc"} 29
+windows_netframework_clrinterop_interop_stubs_created_total{process="powershell"} 345
+# HELP windows_netframework_clrjit_jit_il_bytes_total Displays the total number of Microsoft intermediate language (MSIL) bytes compiled by the just-in-time (JIT) compiler since the application started
+# TYPE windows_netframework_clrjit_jit_il_bytes_total counter
+windows_netframework_clrjit_jit_il_bytes_total{process="WMSvc"} 4007
+windows_netframework_clrjit_jit_il_bytes_total{process="powershell"} 47021
+# HELP windows_netframework_clrjit_jit_methods_total Displays the total number of methods JIT-compiled since the application started. This counter does not include pre-JIT-compiled methods.
+# TYPE windows_netframework_clrjit_jit_methods_total counter
+windows_netframework_clrjit_jit_methods_total{process="WMSvc"} 27
+windows_netframework_clrjit_jit_methods_total{process="powershell"} 344
+# HELP windows_netframework_clrjit_jit_standard_failures_total Displays the peak number of methods the JIT compiler has failed to compile since the application started. This failure can occur if the MSIL cannot be verified or if there is an internal error in the JIT compiler.
+# TYPE windows_netframework_clrjit_jit_standard_failures_total gauge
+windows_netframework_clrjit_jit_standard_failures_total{process="WMSvc"} 0
+windows_netframework_clrjit_jit_standard_failures_total{process="powershell"} 0
+# HELP windows_netframework_clrjit_jit_time_percent Displays the percentage of time spent in JIT compilation. This counter is updated at the end of every JIT compilation phase. A JIT compilation phase occurs when a method and its dependencies are compiled.
+# TYPE windows_netframework_clrjit_jit_time_percent gauge
+windows_netframework_clrjit_jit_time_percent{process="WMSvc"} 0
+windows_netframework_clrjit_jit_time_percent{process="powershell"} 0
+# HELP windows_netframework_clrloading_appdomains_loaded_current Displays the current number of application domains loaded in this application.
+# TYPE windows_netframework_clrloading_appdomains_loaded_current gauge
+windows_netframework_clrloading_appdomains_loaded_current{process="WMSvc"} 1
+windows_netframework_clrloading_appdomains_loaded_current{process="powershell"} 1
+# HELP windows_netframework_clrloading_appdomains_loaded_total Displays the peak number of application domains loaded since the application started.
+# TYPE windows_netframework_clrloading_appdomains_loaded_total counter
+windows_netframework_clrloading_appdomains_loaded_total{process="WMSvc"} 1
+windows_netframework_clrloading_appdomains_loaded_total{process="powershell"} 1
+# HELP windows_netframework_clrloading_appdomains_unloaded_total Displays the total number of application domains unloaded since the application started. If an application domain is loaded and unloaded multiple times, this counter increments each time the application domain is unloaded.
+# TYPE windows_netframework_clrloading_appdomains_unloaded_total counter
+windows_netframework_clrloading_appdomains_unloaded_total{process="WMSvc"} 0
+windows_netframework_clrloading_appdomains_unloaded_total{process="powershell"} 0
+# HELP windows_netframework_clrloading_assemblies_loaded_current Displays the current number of assemblies loaded across all application domains in the currently running application. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.
+# TYPE windows_netframework_clrloading_assemblies_loaded_current gauge
+windows_netframework_clrloading_assemblies_loaded_current{process="WMSvc"} 5
+windows_netframework_clrloading_assemblies_loaded_current{process="powershell"} 20
+# HELP windows_netframework_clrloading_assemblies_loaded_total Displays the total number of assemblies loaded since the application started. If the assembly is loaded as domain-neutral from multiple application domains, this counter is incremented only once.
+# TYPE windows_netframework_clrloading_assemblies_loaded_total counter
+windows_netframework_clrloading_assemblies_loaded_total{process="WMSvc"} 5
+windows_netframework_clrloading_assemblies_loaded_total{process="powershell"} 20
+# HELP windows_netframework_clrloading_class_load_failures_total Displays the peak number of classes that have failed to load since the application started.
+# TYPE windows_netframework_clrloading_class_load_failures_total counter
+windows_netframework_clrloading_class_load_failures_total{process="WMSvc"} 0
+windows_netframework_clrloading_class_load_failures_total{process="powershell"} 1
+# HELP windows_netframework_clrloading_classes_loaded_current Displays the current number of classes loaded in all assemblies.
+# TYPE windows_netframework_clrloading_classes_loaded_current gauge
+windows_netframework_clrloading_classes_loaded_current{process="WMSvc"} 18
+windows_netframework_clrloading_classes_loaded_current{process="powershell"} 477
+# HELP windows_netframework_clrloading_classes_loaded_total Displays the cumulative number of classes loaded in all assemblies since the application started.
+# TYPE windows_netframework_clrloading_classes_loaded_total counter
+windows_netframework_clrloading_classes_loaded_total{process="WMSvc"} 18
+windows_netframework_clrloading_classes_loaded_total{process="powershell"} 477
+# HELP windows_netframework_clrloading_loader_heap_size_bytes Displays the current size, in bytes, of the memory committed by the class loader across all application domains. Committed memory is the physical space reserved in the disk paging file.
+# TYPE windows_netframework_clrloading_loader_heap_size_bytes gauge
+windows_netframework_clrloading_loader_heap_size_bytes{process="WMSvc"} 270336
+windows_netframework_clrloading_loader_heap_size_bytes{process="powershell"} 2.285568e+06
+# HELP windows_netframework_clrlocksandthreads_contentions_total Displays the total number of times that threads in the runtime have attempted to acquire a managed lock unsuccessfully.
+# TYPE windows_netframework_clrlocksandthreads_contentions_total counter
+windows_netframework_clrlocksandthreads_contentions_total{process="WMSvc"} 0
+windows_netframework_clrlocksandthreads_contentions_total{process="powershell"} 10
+# HELP windows_netframework_clrlocksandthreads_current_logical_threads Displays the number of current managed thread objects in the application. This counter maintains the count of both running and stopped threads.
+# TYPE windows_netframework_clrlocksandthreads_current_logical_threads gauge
+windows_netframework_clrlocksandthreads_current_logical_threads{process="WMSvc"} 2
+windows_netframework_clrlocksandthreads_current_logical_threads{process="powershell"} 16
+# HELP windows_netframework_clrlocksandthreads_current_queue_length Displays the total number of threads that are currently waiting to acquire a managed lock in the application.
+# TYPE windows_netframework_clrlocksandthreads_current_queue_length gauge
+windows_netframework_clrlocksandthreads_current_queue_length{process="WMSvc"} 0
+windows_netframework_clrlocksandthreads_current_queue_length{process="powershell"} 0
+# HELP windows_netframework_clrlocksandthreads_physical_threads_current Displays the number of native operating system threads created and owned by the common language runtime to act as underlying threads for managed thread objects. This counter's value does not include the threads used by the runtime in its internal operations; it is a subset of the threads in the operating system process.
+# TYPE windows_netframework_clrlocksandthreads_physical_threads_current gauge
+windows_netframework_clrlocksandthreads_physical_threads_current{process="WMSvc"} 1
+windows_netframework_clrlocksandthreads_physical_threads_current{process="powershell"} 13
+# HELP windows_netframework_clrlocksandthreads_queue_length_total Displays the total number of threads that waited to acquire a managed lock since the application started.
+# TYPE windows_netframework_clrlocksandthreads_queue_length_total counter
+windows_netframework_clrlocksandthreads_queue_length_total{process="WMSvc"} 0
+windows_netframework_clrlocksandthreads_queue_length_total{process="powershell"} 3
+# HELP windows_netframework_clrlocksandthreads_recognized_threads_current Displays the number of threads that are currently recognized by the runtime. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.
+# TYPE windows_netframework_clrlocksandthreads_recognized_threads_current gauge
+windows_netframework_clrlocksandthreads_recognized_threads_current{process="WMSvc"} 1
+windows_netframework_clrlocksandthreads_recognized_threads_current{process="powershell"} 3
+# HELP windows_netframework_clrlocksandthreads_recognized_threads_total Displays the total number of threads that have been recognized by the runtime since the application started. These threads are associated with a corresponding managed thread object. The runtime does not create these threads, but they have run inside the runtime at least once.
+# TYPE windows_netframework_clrlocksandthreads_recognized_threads_total counter
+windows_netframework_clrlocksandthreads_recognized_threads_total{process="WMSvc"} 1
+windows_netframework_clrlocksandthreads_recognized_threads_total{process="powershell"} 6
+# HELP windows_netframework_clrmemory_allocated_bytes_total Displays the total number of bytes allocated on the garbage collection heap.
+# TYPE windows_netframework_clrmemory_allocated_bytes_total counter
+windows_netframework_clrmemory_allocated_bytes_total{process="WMSvc"} 227792
+windows_netframework_clrmemory_allocated_bytes_total{process="powershell"} 4.63338e+07
+# HELP windows_netframework_clrmemory_collections_total Displays the number of times the generation objects are garbage collected since the application started.
+# TYPE windows_netframework_clrmemory_collections_total counter
+windows_netframework_clrmemory_collections_total{area="Gen0",process="WMSvc"} 1
+windows_netframework_clrmemory_collections_total{area="Gen0",process="powershell"} 7
+windows_netframework_clrmemory_collections_total{area="Gen1",process="WMSvc"} 1
+windows_netframework_clrmemory_collections_total{area="Gen1",process="powershell"} 3
+windows_netframework_clrmemory_collections_total{area="Gen2",process="WMSvc"} 0
+windows_netframework_clrmemory_collections_total{area="Gen2",process="powershell"} 1
+# HELP windows_netframework_clrmemory_committed_bytes Displays the amount of virtual memory, in bytes, currently committed by the garbage collector. Committed memory is the physical memory for which space has been reserved in the disk paging file.
+# TYPE windows_netframework_clrmemory_committed_bytes gauge
+windows_netframework_clrmemory_committed_bytes{process="WMSvc"} 270336
+windows_netframework_clrmemory_committed_bytes{process="powershell"} 2.0475904e+07
+# HELP windows_netframework_clrmemory_finalization_survivors Displays the number of garbage-collected objects that survive a collection because they are waiting to be finalized.
+# TYPE windows_netframework_clrmemory_finalization_survivors gauge
+windows_netframework_clrmemory_finalization_survivors{process="WMSvc"} 7
+windows_netframework_clrmemory_finalization_survivors{process="powershell"} 244
+# HELP windows_netframework_clrmemory_gc_time_percent Displays the percentage of time that was spent performing a garbage collection in the last sample.
+# TYPE windows_netframework_clrmemory_gc_time_percent gauge
+windows_netframework_clrmemory_gc_time_percent{process="WMSvc"} 0
+windows_netframework_clrmemory_gc_time_percent{process="powershell"} 0.00027784979937050934
+# HELP windows_netframework_clrmemory_heap_size_bytes Displays the maximum bytes that can be allocated; it does not indicate the current number of bytes allocated.
+# TYPE windows_netframework_clrmemory_heap_size_bytes gauge
+windows_netframework_clrmemory_heap_size_bytes{area="Gen0",process="WMSvc"} 4.194304e+06
+windows_netframework_clrmemory_heap_size_bytes{area="Gen0",process="powershell"} 2.6417392e+07
+windows_netframework_clrmemory_heap_size_bytes{area="Gen1",process="WMSvc"} 50200
+windows_netframework_clrmemory_heap_size_bytes{area="Gen1",process="powershell"} 122776
+windows_netframework_clrmemory_heap_size_bytes{area="Gen2",process="WMSvc"} 24
+windows_netframework_clrmemory_heap_size_bytes{area="Gen2",process="powershell"} 6.71388e+06
+windows_netframework_clrmemory_heap_size_bytes{area="LOH",process="WMSvc"} 68168
+windows_netframework_clrmemory_heap_size_bytes{area="LOH",process="powershell"} 1.457824e+06
+# HELP windows_netframework_clrmemory_induced_gc_total Displays the peak number of times garbage collection was performed because of an explicit call to GC.Collect.
+# TYPE windows_netframework_clrmemory_induced_gc_total counter
+windows_netframework_clrmemory_induced_gc_total{process="WMSvc"} 0
+windows_netframework_clrmemory_induced_gc_total{process="powershell"} 0
+# HELP windows_netframework_clrmemory_number_gc_handles Displays the current number of garbage collection handles in use. Garbage collection handles are handles to resources external to the common language runtime and the managed environment.
+# TYPE windows_netframework_clrmemory_number_gc_handles gauge
+windows_netframework_clrmemory_number_gc_handles{process="WMSvc"} 24
+windows_netframework_clrmemory_number_gc_handles{process="powershell"} 834
+# HELP windows_netframework_clrmemory_number_pinned_objects Displays the number of pinned objects encountered in the last garbage collection.
+# TYPE windows_netframework_clrmemory_number_pinned_objects gauge
+windows_netframework_clrmemory_number_pinned_objects{process="WMSvc"} 1
+windows_netframework_clrmemory_number_pinned_objects{process="powershell"} 0
+# HELP windows_netframework_clrmemory_number_sink_blocksinuse Displays the current number of synchronization blocks in use. Synchronization blocks are per-object data structures allocated for storing synchronization information. They hold weak references to managed objects and must be scanned by the garbage collector.
+# TYPE windows_netframework_clrmemory_number_sink_blocksinuse gauge
+windows_netframework_clrmemory_number_sink_blocksinuse{process="WMSvc"} 1
+windows_netframework_clrmemory_number_sink_blocksinuse{process="powershell"} 42
+# HELP windows_netframework_clrmemory_promoted_bytes Displays the bytes that were promoted from the generation to the next one during the last GC. Memory is promoted when it survives a garbage collection.
+# TYPE windows_netframework_clrmemory_promoted_bytes gauge
+windows_netframework_clrmemory_promoted_bytes{area="Gen0",process="WMSvc"} 49720
+windows_netframework_clrmemory_promoted_bytes{area="Gen0",process="powershell"} 107352
+windows_netframework_clrmemory_promoted_bytes{area="Gen1",process="WMSvc"} 0
+windows_netframework_clrmemory_promoted_bytes{area="Gen1",process="powershell"} 0
+# HELP windows_netframework_clrmemory_reserved_bytes Displays the amount of virtual memory, in bytes, currently reserved by the garbage collector. Reserved memory is the virtual memory space reserved for the application when no disk or main memory pages have been used.
+# TYPE windows_netframework_clrmemory_reserved_bytes gauge
+windows_netframework_clrmemory_reserved_bytes{process="WMSvc"} 4.02644992e+08
+windows_netframework_clrmemory_reserved_bytes{process="powershell"} 4.02644992e+08
+# HELP windows_netframework_clrremoting_channels_total Displays the total number of remoting channels registered across all application domains since application started.
+# TYPE windows_netframework_clrremoting_channels_total counter
+windows_netframework_clrremoting_channels_total{process="WMSvc"} 0
+windows_netframework_clrremoting_channels_total{process="powershell"} 0
+# HELP windows_netframework_clrremoting_context_bound_classes_loaded Displays the current number of context-bound classes that are loaded.
+# TYPE windows_netframework_clrremoting_context_bound_classes_loaded gauge
+windows_netframework_clrremoting_context_bound_classes_loaded{process="WMSvc"} 0
+windows_netframework_clrremoting_context_bound_classes_loaded{process="powershell"} 0
+# HELP windows_netframework_clrremoting_context_bound_objects_total Displays the total number of context-bound objects allocated.
+# TYPE windows_netframework_clrremoting_context_bound_objects_total counter
+windows_netframework_clrremoting_context_bound_objects_total{process="WMSvc"} 0
+windows_netframework_clrremoting_context_bound_objects_total{process="powershell"} 0
+# HELP windows_netframework_clrremoting_context_proxies_total Displays the total number of remoting proxy objects in this process since it started.
+# TYPE windows_netframework_clrremoting_context_proxies_total counter
+windows_netframework_clrremoting_context_proxies_total{process="WMSvc"} 0
+windows_netframework_clrremoting_context_proxies_total{process="powershell"} 0
+# HELP windows_netframework_clrremoting_contexts Displays the current number of remoting contexts in the application.
+# TYPE windows_netframework_clrremoting_contexts gauge
+windows_netframework_clrremoting_contexts{process="WMSvc"} 1
+windows_netframework_clrremoting_contexts{process="powershell"} 1
+# HELP windows_netframework_clrremoting_remote_calls_total Displays the total number of remote procedure calls invoked since the application started.
+# TYPE windows_netframework_clrremoting_remote_calls_total counter
+windows_netframework_clrremoting_remote_calls_total{process="WMSvc"} 0
+windows_netframework_clrremoting_remote_calls_total{process="powershell"} 0
+# HELP windows_netframework_clrsecurity_link_time_checks_total Displays the total number of link-time code access security checks since the application started.
+# TYPE windows_netframework_clrsecurity_link_time_checks_total counter
+windows_netframework_clrsecurity_link_time_checks_total{process="WMSvc"} 0
+windows_netframework_clrsecurity_link_time_checks_total{process="powershell"} 0
+# HELP windows_netframework_clrsecurity_rt_checks_time_percent Displays the percentage of time spent performing runtime code access security checks in the last sample.
+# TYPE windows_netframework_clrsecurity_rt_checks_time_percent gauge
+windows_netframework_clrsecurity_rt_checks_time_percent{process="WMSvc"} 0
+windows_netframework_clrsecurity_rt_checks_time_percent{process="powershell"} 0
+# HELP windows_netframework_clrsecurity_runtime_checks_total Displays the total number of runtime code access security checks performed since the application started.
+# TYPE windows_netframework_clrsecurity_runtime_checks_total counter
+windows_netframework_clrsecurity_runtime_checks_total{process="WMSvc"} 3
+windows_netframework_clrsecurity_runtime_checks_total{process="powershell"} 4386
+# HELP windows_netframework_clrsecurity_stack_walk_depth Displays the depth of the stack during that last runtime code access security check.
+# TYPE windows_netframework_clrsecurity_stack_walk_depth gauge
+windows_netframework_clrsecurity_stack_walk_depth{process="WMSvc"} 1
+windows_netframework_clrsecurity_stack_walk_depth{process="powershell"} 1
+# HELP windows_os_info OperatingSystem.Caption, OperatingSystem.Version
+# TYPE windows_os_info gauge
+windows_os_info{build_number="22621",major_version="10",minor_version="0",product="Microsoft Windows 10 Pro",version="10.0.22621"} 1
+# HELP windows_os_paging_free_bytes OperatingSystem.FreeSpaceInPagingFiles
+# TYPE windows_os_paging_free_bytes gauge
+windows_os_paging_free_bytes 1.414107136e+09
+# HELP windows_os_paging_limit_bytes OperatingSystem.SizeStoredInPagingFiles
+# TYPE windows_os_paging_limit_bytes gauge
+windows_os_paging_limit_bytes 1.476395008e+09
+# HELP windows_os_physical_memory_free_bytes OperatingSystem.FreePhysicalMemory
+# TYPE windows_os_physical_memory_free_bytes gauge
+windows_os_physical_memory_free_bytes 1.379946496e+09
+# HELP windows_os_process_memory_limit_bytes OperatingSystem.MaxProcessMemorySize
+# TYPE windows_os_process_memory_limit_bytes gauge
+windows_os_process_memory_limit_bytes 1.40737488224256e+14
+# HELP windows_os_processes OperatingSystem.NumberOfProcesses
+# TYPE windows_os_processes gauge
+windows_os_processes 152
+# HELP windows_os_processes_limit OperatingSystem.MaxNumberOfProcesses
+# TYPE windows_os_processes_limit gauge
+windows_os_processes_limit 4.294967295e+09
+# HELP windows_os_time OperatingSystem.LocalDateTime
+# TYPE windows_os_time gauge
+windows_os_time 1.667508748e+09
+# HELP windows_os_timezone OperatingSystem.LocalDateTime
+# TYPE windows_os_timezone gauge
+windows_os_timezone{timezone="EET"} 1
+# HELP windows_os_users OperatingSystem.NumberOfUsers
+# TYPE windows_os_users gauge
+windows_os_users 2
+# HELP windows_os_virtual_memory_bytes OperatingSystem.TotalVirtualMemorySize
+# TYPE windows_os_virtual_memory_bytes gauge
+windows_os_virtual_memory_bytes 5.733113856e+09
+# HELP windows_os_virtual_memory_free_bytes OperatingSystem.FreeVirtualMemory
+# TYPE windows_os_virtual_memory_free_bytes gauge
+windows_os_virtual_memory_free_bytes 2.285674496e+09
+# HELP windows_os_visible_memory_bytes OperatingSystem.TotalVisibleMemorySize
+# TYPE windows_os_visible_memory_bytes gauge
+windows_os_visible_memory_bytes 4.256718848e+09
+# HELP windows_process_cpu_time_total Returns elapsed time that all of the threads of this process used the processor to execute instructions by mode (privileged, user).
+# TYPE windows_process_cpu_time_total counter
+windows_process_cpu_time_total{creating_process_id="4300",mode="privileged",process="msedge",process_id="6032"} 21.78125
+windows_process_cpu_time_total{creating_process_id="4300",mode="user",process="msedge",process_id="6032"} 31.46875
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="1204"} 0.09375
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="2296"} 0.203125
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="3044"} 0.15625
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="3728"} 0.28125
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="5060"} 110.171875
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="5904"} 0.359375
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="5936"} 37.40625
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="7800"} 0.03125
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="844"} 1.765625
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="8512"} 0.40625
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="8736"} 47.796875
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="896"} 69.1875
+windows_process_cpu_time_total{creating_process_id="6032",mode="privileged",process="msedge",process_id="900"} 0.265625
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="1204"} 0.171875
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="2296"} 0.28125
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="3044"} 0.734375
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="3728"} 0.734375
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="5060"} 1281.59375
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="5904"} 0.84375
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="5936"} 52.515625
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="7800"} 0.015625
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="844"} 10.109375
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="8512"} 1.203125
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="8736"} 85.71875
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="896"} 163.78125
+windows_process_cpu_time_total{creating_process_id="6032",mode="user",process="msedge",process_id="900"} 0.828125
+# HELP windows_process_handles Total number of handles the process has open. This number is the sum of the handles currently open by each thread in the process.
+# TYPE windows_process_handles gauge
+windows_process_handles{creating_process_id="4300",process="msedge",process_id="6032"} 1868
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="1204"} 227
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="2296"} 254
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="3044"} 285
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="3728"} 220
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="5060"} 443
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="5904"} 271
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="5936"} 298
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="7800"} 204
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="844"} 379
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="8512"} 274
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="8736"} 245
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="896"} 488
+windows_process_handles{creating_process_id="6032",process="msedge",process_id="900"} 323
+# HELP windows_process_io_bytes_total Bytes issued to I/O operations in different modes (read, write, other).
+# TYPE windows_process_io_bytes_total counter
+windows_process_io_bytes_total{creating_process_id="4300",mode="other",process="msedge",process_id="6032"} 4.348941e+06
+windows_process_io_bytes_total{creating_process_id="4300",mode="read",process="msedge",process_id="6032"} 3.30817247e+08
+windows_process_io_bytes_total{creating_process_id="4300",mode="write",process="msedge",process_id="6032"} 4.71331306e+08
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="1204"} 26082
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="2296"} 26144
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="3044"} 26078
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="3728"} 23912
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="5060"} 26596
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="5904"} 30800
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="5936"} 1.83334e+06
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="7800"} 5128
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="844"} 26598
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="8512"} 26174
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="8736"} 26268
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="896"} 188254
+windows_process_io_bytes_total{creating_process_id="6032",mode="other",process="msedge",process_id="900"} 26142
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="1204"} 68868
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="2296"} 261004
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="3044"} 400260
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="3728"} 734626
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="5060"} 7.35770137e+08
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="5904"} 45529
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="5936"} 2.72541538e+08
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="7800"} 8804
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="844"} 2.4573337e+07
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="8512"} 1.0120572e+07
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="8736"} 7.202112e+06
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="896"} 5.49114536e+08
+windows_process_io_bytes_total{creating_process_id="6032",mode="read",process="msedge",process_id="900"} 656823
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="1204"} 249336
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="2296"} 576080
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="3044"} 1.7264e+06
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="3728"} 1.257063e+06
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="5060"} 7.54045349e+08
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="5904"} 217248
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="5936"} 4.55388644e+08
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="7800"} 1128
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="844"} 1.5475693e+07
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="8512"} 3.635552e+06
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="8736"} 7.987096e+06
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="896"} 3.26369864e+08
+windows_process_io_bytes_total{creating_process_id="6032",mode="write",process="msedge",process_id="900"} 1.010769e+06
+# HELP windows_process_io_operations_total I/O operations issued in different modes (read, write, other).
+# TYPE windows_process_io_operations_total counter
+windows_process_io_operations_total{creating_process_id="4300",mode="other",process="msedge",process_id="6032"} 113456
+windows_process_io_operations_total{creating_process_id="4300",mode="read",process="msedge",process_id="6032"} 294229
+windows_process_io_operations_total{creating_process_id="4300",mode="write",process="msedge",process_id="6032"} 200349
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="1204"} 331
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="2296"} 335
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="3044"} 349
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="3728"} 327
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="5060"} 399
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="5904"} 395
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="5936"} 78519
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="7800"} 673
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="844"} 359
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="8512"} 340
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="8736"} 394
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="896"} 4069
+windows_process_io_operations_total{creating_process_id="6032",mode="other",process="msedge",process_id="900"} 337
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="1204"} 74
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="2296"} 732
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="3044"} 950
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="3728"} 1447
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="5060"} 3.995322e+06
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="5904"} 124
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="5936"} 1.571962e+06
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="7800"} 102
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="844"} 20686
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="8512"} 6686
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="8736"} 1.788249e+06
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="896"} 537551
+windows_process_io_operations_total{creating_process_id="6032",mode="read",process="msedge",process_id="900"} 1519
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="1204"} 114
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="2296"} 437
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="3044"} 1405
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="3728"} 3705
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="5060"} 3.848906e+06
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="5904"} 118
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="5936"} 1.701602e+06
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="7800"} 94
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="844"} 24678
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="8512"} 9689
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="8736"} 1.790946e+06
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="896"} 734759
+windows_process_io_operations_total{creating_process_id="6032",mode="write",process="msedge",process_id="900"} 1924
+# HELP windows_process_page_faults_total Page faults by the threads executing in this process.
+# TYPE windows_process_page_faults_total counter
+windows_process_page_faults_total{creating_process_id="4300",process="msedge",process_id="6032"} 296027
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="1204"} 7965
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="2296"} 11749
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="3044"} 41335
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="3728"} 9529
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="5060"} 3.750099e+06
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="5904"} 8101
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="5936"} 533380
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="7800"} 2636
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="844"} 402098
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="8512"} 35487
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="8736"} 9427
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="896"} 205035
+windows_process_page_faults_total{creating_process_id="6032",process="msedge",process_id="900"} 43073
+# HELP windows_process_page_file_bytes Current number of bytes this process has used in the paging file(s).
+# TYPE windows_process_page_file_bytes gauge
+windows_process_page_file_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 7.041024e+07
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 1.3561856e+07
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 1.5511552e+07
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 3.0756864e+07
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 8.298496e+06
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 3.32230656e+08
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 8.97024e+06
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 1.3877248e+07
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 2.060288e+06
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="844"} 9.2012544e+07
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 2.0672512e+07
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 8.126464e+06
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="896"} 4.1484288e+07
+windows_process_page_file_bytes{creating_process_id="6032",process="msedge",process_id="900"} 2.3629824e+07
+# HELP windows_process_pool_bytes Pool Bytes is the last observed number of bytes in the paged or nonpaged pool.
+# TYPE windows_process_pool_bytes gauge
+windows_process_pool_bytes{creating_process_id="4300",pool="nonpaged",process="msedge",process_id="6032"} 72072
+windows_process_pool_bytes{creating_process_id="4300",pool="paged",process="msedge",process_id="6032"} 1.262872e+06
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="1204"} 15544
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="2296"} 16024
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="3044"} 17816
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="3728"} 14544
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="5060"} 24600
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="5904"} 16992
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="5936"} 19088
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="7800"} 9920
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="844"} 18472
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="8512"} 18536
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="8736"} 15944
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="896"} 34464
+windows_process_pool_bytes{creating_process_id="6032",pool="nonpaged",process="msedge",process_id="900"} 17040
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="1204"} 651472
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="2296"} 665496
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="3044"} 674248
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="3728"} 656216
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="5060"} 849040
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="5904"} 722296
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="5936"} 705232
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="7800"} 140256
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="844"} 680896
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="8512"} 679648
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="8736"} 677152
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="896"} 839128
+windows_process_pool_bytes{creating_process_id="6032",pool="paged",process="msedge",process_id="900"} 682408
+# HELP windows_process_priority_base Current base priority of this process. Threads within a process can raise and lower their own base priority relative to the process base priority of the process.
+# TYPE windows_process_priority_base gauge
+windows_process_priority_base{creating_process_id="4300",process="msedge",process_id="6032"} 8
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="1204"} 4
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="2296"} 4
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="3044"} 8
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="3728"} 8
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="5060"} 8
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="5904"} 8
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="5936"} 8
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="7800"} 8
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="844"} 4
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="8512"} 8
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="8736"} 8
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="896"} 10
+windows_process_priority_base{creating_process_id="6032",process="msedge",process_id="900"} 4
+# HELP windows_process_private_bytes Current number of bytes this process has allocated that cannot be shared with other processes.
+# TYPE windows_process_private_bytes gauge
+windows_process_private_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 7.041024e+07
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 1.3561856e+07
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 1.5511552e+07
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 3.0756864e+07
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 8.298496e+06
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 3.32230656e+08
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 8.97024e+06
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 1.3877248e+07
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 2.060288e+06
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="844"} 9.2012544e+07
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 2.0672512e+07
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 8.126464e+06
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="896"} 4.1484288e+07
+windows_process_private_bytes{creating_process_id="6032",process="msedge",process_id="900"} 2.3629824e+07
+# HELP windows_process_start_time Time of process start.
+# TYPE windows_process_start_time gauge
+windows_process_start_time{creating_process_id="4300",process="msedge",process_id="6032"} 1.6674729863403437e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="1204"} 1.667489261506441e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="2296"} 1.6674729883723967e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="3044"} 1.6674892546961231e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="3728"} 1.667472986486918e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="5060"} 1.6674729865421767e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="5904"} 1.6674730465087523e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="5936"} 1.6674729864704254e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="7800"} 1.667472986365871e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="844"} 1.6674729865463045e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="8512"} 1.6674729970112965e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="8736"} 1.667472989342484e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="896"} 1.667472986462684e+09
+windows_process_start_time{creating_process_id="6032",process="msedge",process_id="900"} 1.667472995850073e+09
+# HELP windows_process_threads Number of threads currently active in this process.
+# TYPE windows_process_threads gauge
+windows_process_threads{creating_process_id="4300",process="msedge",process_id="6032"} 38
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="1204"} 12
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="2296"} 15
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="3044"} 15
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="3728"} 9
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="5060"} 21
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="5904"} 9
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="5936"} 12
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="7800"} 7
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="844"} 17
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="8512"} 15
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="8736"} 9
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="896"} 19
+windows_process_threads{creating_process_id="6032",process="msedge",process_id="900"} 15
+# HELP windows_process_virtual_bytes Current size, in bytes, of the virtual address space that the process is using.
+# TYPE windows_process_virtual_bytes gauge
+windows_process_virtual_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 2.341704609792e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 3.48529324032e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 3.485321392128e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 3.48532901888e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 2.306839302144e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 3.485494009856e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 2.306863792128e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 2.30688589824e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 2.272204521472e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="844"} 3.486428184576e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 3.485333880832e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 2.306843000832e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="896"} 2.307077632e+12
+windows_process_virtual_bytes{creating_process_id="6032",process="msedge",process_id="900"} 3.485325856768e+12
+# HELP windows_process_working_set_bytes Maximum number of bytes in the working set of this process at any point in time. The working set is the set of memory pages touched recently by the threads in the process.
+# TYPE windows_process_working_set_bytes gauge
+windows_process_working_set_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 1.59309824e+08
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 2.7205632e+07
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 3.65568e+07
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 7.5198464e+07
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 1.7866752e+07
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 3.79973632e+08
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 2.3228416e+07
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 3.6646912e+07
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 6.950912e+06
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="844"} 1.32747264e+08
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 5.5025664e+07
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 1.9361792e+07
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="896"} 5.873664e+07
+windows_process_working_set_bytes{creating_process_id="6032",process="msedge",process_id="900"} 5.6283136e+07
+# HELP windows_process_working_set_peak_bytes Maximum size, in bytes, of the Working Set of this process at any point in time. The Working Set is the set of memory pages touched recently by the threads in the process.
+# TYPE windows_process_working_set_peak_bytes gauge
+windows_process_working_set_peak_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 1.73211648e+08
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 2.7205632e+07
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 4.1439232e+07
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 9.2250112e+07
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 1.9263488e+07
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 4.54914048e+08
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 2.4363008e+07
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 4.2278912e+07
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 7.626752e+06
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="844"} 2.28954112e+08
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 5.9830272e+07
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 2.0250624e+07
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="896"} 7.835648e+07
+windows_process_working_set_peak_bytes{creating_process_id="6032",process="msedge",process_id="900"} 5.943296e+07
+# HELP windows_process_working_set_private_bytes Size of the working set, in bytes, that is use for this process only and not shared nor shareable by other processes.
+# TYPE windows_process_working_set_private_bytes gauge
+windows_process_working_set_private_bytes{creating_process_id="4300",process="msedge",process_id="6032"} 3.6057088e+07
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="1204"} 5.373952e+06
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="2296"} 2.072576e+06
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="3044"} 1.9554304e+07
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="3728"} 1.691648e+06
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="5060"} 2.96091648e+08
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="5904"} 1.654784e+06
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="5936"} 6.49216e+06
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="7800"} 421888
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="844"} 6.250496e+07
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="8512"} 7.59808e+06
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="8736"} 1.449984e+06
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="896"} 8.429568e+06
+windows_process_working_set_private_bytes{creating_process_id="6032",process="msedge",process_id="900"} 1.1952128e+07
+# HELP windows_service_info A metric with a constant '1' value labeled with service information
+# TYPE windows_service_info gauge
+windows_service_info{display_name="DHCP Client",name="dhcp",process_id="1908",run_as="NT Authority\\LocalService"} 1
+# HELP windows_service_start_mode The start mode of the service (StartMode)
+# TYPE windows_service_start_mode gauge
+windows_service_start_mode{name="dhcp",start_mode="auto"} 1
+windows_service_start_mode{name="dhcp",start_mode="boot"} 0
+windows_service_start_mode{name="dhcp",start_mode="disabled"} 0
+windows_service_start_mode{name="dhcp",start_mode="manual"} 0
+windows_service_start_mode{name="dhcp",start_mode="system"} 0
+# HELP windows_service_state The state of the service (State)
+# TYPE windows_service_state gauge
+windows_service_state{name="dhcp",state="continue pending"} 0
+windows_service_state{name="dhcp",state="pause pending"} 0
+windows_service_state{name="dhcp",state="paused"} 0
+windows_service_state{name="dhcp",state="running"} 1
+windows_service_state{name="dhcp",state="start pending"} 0
+windows_service_state{name="dhcp",state="stop pending"} 0
+windows_service_state{name="dhcp",state="stopped"} 0
+windows_service_state{name="dhcp",state="unknown"} 0
+# HELP windows_service_status The status of the service (Status)
+# TYPE windows_service_status gauge
+windows_service_status{name="dhcp",status="degraded"} 0
+windows_service_status{name="dhcp",status="error"} 0
+windows_service_status{name="dhcp",status="lost comm"} 0
+windows_service_status{name="dhcp",status="no contact"} 0
+windows_service_status{name="dhcp",status="nonrecover"} 0
+windows_service_status{name="dhcp",status="ok"} 1
+windows_service_status{name="dhcp",status="pred fail"} 0
+windows_service_status{name="dhcp",status="service"} 0
+windows_service_status{name="dhcp",status="starting"} 0
+windows_service_status{name="dhcp",status="stopping"} 0
+windows_service_status{name="dhcp",status="stressed"} 0
+windows_service_status{name="dhcp",status="unknown"} 0
+# HELP windows_system_context_switches_total Total number of context switches (WMI source: PerfOS_System.ContextSwitchesPersec)
+# TYPE windows_system_context_switches_total counter
+windows_system_context_switches_total 4.8655033e+08
+# HELP windows_system_exception_dispatches_total Total number of exceptions dispatched (WMI source: PerfOS_System.ExceptionDispatchesPersec)
+# TYPE windows_system_exception_dispatches_total counter
+windows_system_exception_dispatches_total 160348
+# HELP windows_system_processor_queue_length Length of processor queue (WMI source: PerfOS_System.ProcessorQueueLength)
+# TYPE windows_system_processor_queue_length gauge
+windows_system_processor_queue_length 0
+# HELP windows_system_system_calls_total Total number of system calls (WMI source: PerfOS_System.SystemCallsPersec)
+# TYPE windows_system_system_calls_total counter
+windows_system_system_calls_total 1.886567439e+09
+# HELP windows_system_system_up_time System boot time (WMI source: PerfOS_System.SystemUpTime)
+# TYPE windows_system_system_up_time gauge
+windows_system_system_up_time 1.6673440377290363e+09
+# HELP windows_system_threads Current number of threads (WMI source: PerfOS_System.Threads)
+# TYPE windows_system_threads gauge
+windows_system_threads 1559
+# HELP windows_tcp_connection_failures_total (TCP.ConnectionFailures)
+# TYPE windows_tcp_connection_failures_total counter
+windows_tcp_connection_failures_total{af="ipv4"} 137
+windows_tcp_connection_failures_total{af="ipv6"} 214
+# HELP windows_tcp_connections_active_total (TCP.ConnectionsActive)
+# TYPE windows_tcp_connections_active_total counter
+windows_tcp_connections_active_total{af="ipv4"} 4301
+windows_tcp_connections_active_total{af="ipv6"} 214
+# HELP windows_tcp_connections_established (TCP.ConnectionsEstablished)
+# TYPE windows_tcp_connections_established gauge
+windows_tcp_connections_established{af="ipv4"} 7
+windows_tcp_connections_established{af="ipv6"} 0
+# HELP windows_tcp_connections_passive_total (TCP.ConnectionsPassive)
+# TYPE windows_tcp_connections_passive_total counter
+windows_tcp_connections_passive_total{af="ipv4"} 501
+windows_tcp_connections_passive_total{af="ipv6"} 0
+# HELP windows_tcp_connections_reset_total (TCP.ConnectionsReset)
+# TYPE windows_tcp_connections_reset_total counter
+windows_tcp_connections_reset_total{af="ipv4"} 1282
+windows_tcp_connections_reset_total{af="ipv6"} 0
+# HELP windows_tcp_segments_received_total (TCP.SegmentsReceivedTotal)
+# TYPE windows_tcp_segments_received_total counter
+windows_tcp_segments_received_total{af="ipv4"} 676388
+windows_tcp_segments_received_total{af="ipv6"} 1284
+# HELP windows_tcp_segments_retransmitted_total (TCP.SegmentsRetransmittedTotal)
+# TYPE windows_tcp_segments_retransmitted_total counter
+windows_tcp_segments_retransmitted_total{af="ipv4"} 2120
+windows_tcp_segments_retransmitted_total{af="ipv6"} 428
+# HELP windows_tcp_segments_sent_total (TCP.SegmentsSentTotal)
+# TYPE windows_tcp_segments_sent_total counter
+windows_tcp_segments_sent_total{af="ipv4"} 871379
+windows_tcp_segments_sent_total{af="ipv6"} 856
+# HELP windows_tcp_segments_total (TCP.SegmentsTotal)
+# TYPE windows_tcp_segments_total counter
+windows_tcp_segments_total{af="ipv4"} 1.547767e+06
+windows_tcp_segments_total{af="ipv6"} 2140
diff --git a/src/go/plugin/go.d/modules/windows/windows.go b/src/go/plugin/go.d/modules/windows/windows.go
new file mode 100644
index 000000000..555990784
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/windows.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("windows", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 5,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Windows {
+ return &Windows{
+ Config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 5),
+ },
+ },
+ },
+ cache: cache{
+ collection: make(map[string]bool),
+ collectors: make(map[string]bool),
+ cores: make(map[string]bool),
+ nics: make(map[string]bool),
+ volumes: make(map[string]bool),
+ thermalZones: make(map[string]bool),
+ processes: make(map[string]bool),
+ iis: make(map[string]bool),
+ adcs: make(map[string]bool),
+ services: make(map[string]bool),
+ netFrameworkCLRExceptions: make(map[string]bool),
+ netFrameworkCLRInterops: make(map[string]bool),
+ netFrameworkCLRJIT: make(map[string]bool),
+ netFrameworkCLRLoading: make(map[string]bool),
+ netFrameworkCLRLocksThreads: make(map[string]bool),
+ netFrameworkCLRMemory: make(map[string]bool),
+ netFrameworkCLRRemoting: make(map[string]bool),
+ netFrameworkCLRSecurity: make(map[string]bool),
+ mssqlInstances: make(map[string]bool),
+ mssqlDBs: make(map[string]bool),
+ exchangeWorkload: make(map[string]bool),
+ exchangeLDAP: make(map[string]bool),
+ exchangeHTTPProxy: make(map[string]bool),
+ hypervVMMem: make(map[string]bool),
+ hypervVMDevices: make(map[string]bool),
+ hypervVMInterfaces: make(map[string]bool),
+ hypervVswitch: make(map[string]bool),
+ },
+ charts: &module.Charts{},
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+ Vnode string `yaml:"vnode,omitempty" json:"vnode"`
+}
+
+type (
+ Windows struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prom prometheus.Prometheus
+
+ cache cache
+ }
+ cache struct {
+ cores map[string]bool
+ volumes map[string]bool
+ nics map[string]bool
+ thermalZones map[string]bool
+ processes map[string]bool
+ iis map[string]bool
+ adcs map[string]bool
+ mssqlInstances map[string]bool
+ mssqlDBs map[string]bool
+ services map[string]bool
+ netFrameworkCLRExceptions map[string]bool
+ netFrameworkCLRInterops map[string]bool
+ netFrameworkCLRJIT map[string]bool
+ netFrameworkCLRLoading map[string]bool
+ netFrameworkCLRLocksThreads map[string]bool
+ netFrameworkCLRMemory map[string]bool
+ netFrameworkCLRRemoting map[string]bool
+ netFrameworkCLRSecurity map[string]bool
+ collectors map[string]bool
+ collection map[string]bool
+ exchangeWorkload map[string]bool
+ exchangeLDAP map[string]bool
+ exchangeHTTPProxy map[string]bool
+ hypervVMMem map[string]bool
+ hypervVMDevices map[string]bool
+ hypervVMInterfaces map[string]bool
+ hypervVswitch map[string]bool
+ }
+)
+
+func (w *Windows) Configuration() any {
+ return w.Config
+}
+
+func (w *Windows) Init() error {
+ if err := w.validateConfig(); err != nil {
+ w.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prom, err := w.initPrometheusClient()
+ if err != nil {
+ w.Errorf("init prometheus clients: %v", err)
+ return err
+ }
+ w.prom = prom
+
+ return nil
+}
+
+func (w *Windows) Check() error {
+ mx, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (w *Windows) Charts() *module.Charts {
+ return w.charts
+}
+
+func (w *Windows) Collect() map[string]int64 {
+ ms, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ }
+
+ if len(ms) == 0 {
+ return nil
+ }
+ return ms
+}
+
+func (w *Windows) Cleanup() {
+ if w.prom != nil && w.prom.HTTPClient() != nil {
+ w.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/src/go/plugin/go.d/modules/windows/windows_test.go b/src/go/plugin/go.d/modules/windows/windows_test.go
new file mode 100644
index 000000000..052950248
--- /dev/null
+++ b/src/go/plugin/go.d/modules/windows/windows_test.go
@@ -0,0 +1,1100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package windows
+
+import (
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer0200Metrics, _ = os.ReadFile("testdata/v0.20.0/metrics.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer0200Metrics": dataVer0200Metrics,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestWindows_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Windows{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestNew(t *testing.T) {
+ assert.IsType(t, (*Windows)(nil), New())
+}
+
+func TestWindows_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "success if 'url' is set": {
+ config: Config{
+ HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:9182/metrics"}}},
+ },
+ "fails on default config": {
+ wantFail: true,
+ config: New().Config,
+ },
+ "fails if 'url' is unset": {
+ wantFail: true,
+ config: Config{HTTP: web.HTTP{Request: web.Request{URL: ""}}},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ win := New()
+ win.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, win.Init())
+ } else {
+ assert.NoError(t, win.Init())
+ }
+ })
+ }
+}
+
+func TestWindows_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (win *Windows, cleanup func())
+ wantFail bool
+ }{
+ "success on valid response v0.20.0": {
+ prepare: prepareWindowsV0200,
+ },
+ "fails if endpoint returns invalid data": {
+ wantFail: true,
+ prepare: prepareWindowsReturnsInvalidData,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareWindowsConnectionRefused,
+ },
+ "fails on 404 response": {
+ wantFail: true,
+ prepare: prepareWindowsResponse404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ win, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, win.Init())
+
+ if test.wantFail {
+ assert.Error(t, win.Check())
+ } else {
+ assert.NoError(t, win.Check())
+ }
+ })
+ }
+}
+
+func TestWindows_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestWindows_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestWindows_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() (win *Windows, cleanup func())
+ wantCollected map[string]int64
+ }{
+ "success on valid response v0.20.0": {
+ prepare: prepareWindowsV0200,
+ wantCollected: map[string]int64{
+ "ad_atq_average_request_latency": 0,
+ "ad_atq_outstanding_requests": 0,
+ "ad_binds_total": 184,
+ "ad_database_operations_total_add": 1,
+ "ad_database_operations_total_delete": 0,
+ "ad_database_operations_total_modify": 30,
+ "ad_database_operations_total_recycle": 0,
+ "ad_directory_operations_total_read": 726,
+ "ad_directory_operations_total_search": 831,
+ "ad_directory_operations_total_write": 31,
+ "ad_directory_service_threads": 0,
+ "ad_ldap_last_bind_time_seconds": 0,
+ "ad_ldap_searches_total": 1382,
+ "ad_name_cache_hits_total": 41161,
+ "ad_name_cache_lookups_total": 53046,
+ "ad_replication_data_intersite_bytes_total_inbound": 0,
+ "ad_replication_data_intersite_bytes_total_outbound": 0,
+ "ad_replication_data_intrasite_bytes_total_inbound": 0,
+ "ad_replication_data_intrasite_bytes_total_outbound": 0,
+ "ad_replication_inbound_objects_filtered_total": 0,
+ "ad_replication_inbound_properties_filtered_total": 0,
+ "ad_replication_inbound_properties_updated_total": 0,
+ "ad_replication_inbound_sync_objects_remaining": 0,
+ "ad_replication_pending_synchronizations": 0,
+ "ad_replication_sync_requests_total": 0,
+ "adcs_cert_template_Administrator_challenge_response_processing_time_seconds": 0,
+ "adcs_cert_template_Administrator_challenge_responses_total": 0,
+ "adcs_cert_template_Administrator_failed_requests_total": 0,
+ "adcs_cert_template_Administrator_issued_requests_total": 0,
+ "adcs_cert_template_Administrator_pending_requests_total": 0,
+ "adcs_cert_template_Administrator_request_cryptographic_signing_time_seconds": 0,
+ "adcs_cert_template_Administrator_request_policy_module_processing_time_seconds": 0,
+ "adcs_cert_template_Administrator_request_processing_time_seconds": 0,
+ "adcs_cert_template_Administrator_requests_total": 0,
+ "adcs_cert_template_Administrator_retrievals_processing_time_seconds": 0,
+ "adcs_cert_template_Administrator_retrievals_total": 0,
+ "adcs_cert_template_Administrator_signed_certificate_timestamp_list_processing_time_seconds": 0,
+ "adcs_cert_template_Administrator_signed_certificate_timestamp_lists_total": 0,
+ "adcs_cert_template_DomainController_challenge_response_processing_time_seconds": 0,
+ "adcs_cert_template_DomainController_challenge_responses_total": 0,
+ "adcs_cert_template_DomainController_failed_requests_total": 0,
+ "adcs_cert_template_DomainController_issued_requests_total": 1,
+ "adcs_cert_template_DomainController_pending_requests_total": 0,
+ "adcs_cert_template_DomainController_request_cryptographic_signing_time_seconds": 0,
+ "adcs_cert_template_DomainController_request_policy_module_processing_time_seconds": 16,
+ "adcs_cert_template_DomainController_request_processing_time_seconds": 63,
+ "adcs_cert_template_DomainController_requests_total": 1,
+ "adcs_cert_template_DomainController_retrievals_processing_time_seconds": 0,
+ "adcs_cert_template_DomainController_retrievals_total": 0,
+ "adcs_cert_template_DomainController_signed_certificate_timestamp_list_processing_time_seconds": 0,
+ "adcs_cert_template_DomainController_signed_certificate_timestamp_lists_total": 0,
+ "adfs_ad_login_connection_failures_total": 0,
+ "adfs_certificate_authentications_total": 0,
+ "adfs_db_artifact_failure_total": 0,
+ "adfs_db_artifact_query_time_seconds_total": 0,
+ "adfs_db_config_failure_total": 0,
+ "adfs_db_config_query_time_seconds_total": 101,
+ "adfs_device_authentications_total": 0,
+ "adfs_external_authentications_failure_total": 0,
+ "adfs_external_authentications_success_total": 0,
+ "adfs_extranet_account_lockouts_total": 0,
+ "adfs_federated_authentications_total": 0,
+ "adfs_federation_metadata_requests_total": 1,
+ "adfs_oauth_authorization_requests_total": 0,
+ "adfs_oauth_client_authentication_failure_total": 0,
+ "adfs_oauth_client_authentication_success_total": 0,
+ "adfs_oauth_client_credentials_failure_total": 0,
+ "adfs_oauth_client_credentials_success_total": 0,
+ "adfs_oauth_client_privkey_jtw_authentication_failure_total": 0,
+ "adfs_oauth_client_privkey_jwt_authentications_success_total": 0,
+ "adfs_oauth_client_secret_basic_authentications_failure_total": 0,
+ "adfs_oauth_client_secret_basic_authentications_success_total": 0,
+ "adfs_oauth_client_secret_post_authentications_failure_total": 0,
+ "adfs_oauth_client_secret_post_authentications_success_total": 0,
+ "adfs_oauth_client_windows_authentications_failure_total": 0,
+ "adfs_oauth_client_windows_authentications_success_total": 0,
+ "adfs_oauth_logon_certificate_requests_failure_total": 0,
+ "adfs_oauth_logon_certificate_token_requests_success_total": 0,
+ "adfs_oauth_password_grant_requests_failure_total": 0,
+ "adfs_oauth_password_grant_requests_success_total": 0,
+ "adfs_oauth_token_requests_success_total": 0,
+ "adfs_passive_requests_total": 0,
+ "adfs_passport_authentications_total": 0,
+ "adfs_password_change_failed_total": 0,
+ "adfs_password_change_succeeded_total": 0,
+ "adfs_samlp_token_requests_success_total": 0,
+ "adfs_sso_authentications_failure_total": 0,
+ "adfs_sso_authentications_success_total": 0,
+ "adfs_token_requests_total": 0,
+ "adfs_userpassword_authentications_failure_total": 0,
+ "adfs_userpassword_authentications_success_total": 0,
+ "adfs_windows_integrated_authentications_total": 0,
+ "adfs_wsfed_token_requests_success_total": 0,
+ "adfs_wstrust_token_requests_success_total": 0,
+ "collector_ad_duration": 769,
+ "collector_ad_status_fail": 0,
+ "collector_ad_status_success": 1,
+ "collector_adcs_duration": 0,
+ "collector_adcs_status_fail": 0,
+ "collector_adcs_status_success": 1,
+ "collector_adfs_duration": 3,
+ "collector_adfs_status_fail": 0,
+ "collector_adfs_status_success": 1,
+ "collector_cpu_duration": 0,
+ "collector_cpu_status_fail": 0,
+ "collector_cpu_status_success": 1,
+ "collector_exchange_duration": 33,
+ "collector_exchange_status_fail": 0,
+ "collector_exchange_status_success": 1,
+ "collector_hyperv_duration": 900,
+ "collector_hyperv_status_fail": 0,
+ "collector_hyperv_status_success": 1,
+ "collector_iis_duration": 0,
+ "collector_iis_status_fail": 0,
+ "collector_iis_status_success": 1,
+ "collector_logical_disk_duration": 0,
+ "collector_logical_disk_status_fail": 0,
+ "collector_logical_disk_status_success": 1,
+ "collector_logon_duration": 113,
+ "collector_logon_status_fail": 0,
+ "collector_logon_status_success": 1,
+ "collector_memory_duration": 0,
+ "collector_memory_status_fail": 0,
+ "collector_memory_status_success": 1,
+ "collector_mssql_duration": 3,
+ "collector_mssql_status_fail": 0,
+ "collector_mssql_status_success": 1,
+ "collector_net_duration": 0,
+ "collector_net_status_fail": 0,
+ "collector_net_status_success": 1,
+ "collector_netframework_clrexceptions_duration": 1437,
+ "collector_netframework_clrexceptions_status_fail": 0,
+ "collector_netframework_clrexceptions_status_success": 1,
+ "collector_netframework_clrinterop_duration": 1491,
+ "collector_netframework_clrinterop_status_fail": 0,
+ "collector_netframework_clrinterop_status_success": 1,
+ "collector_netframework_clrjit_duration": 1278,
+ "collector_netframework_clrjit_status_fail": 0,
+ "collector_netframework_clrjit_status_success": 1,
+ "collector_netframework_clrloading_duration": 1323,
+ "collector_netframework_clrloading_status_fail": 0,
+ "collector_netframework_clrloading_status_success": 1,
+ "collector_netframework_clrlocksandthreads_duration": 1357,
+ "collector_netframework_clrlocksandthreads_status_fail": 0,
+ "collector_netframework_clrlocksandthreads_status_success": 1,
+ "collector_netframework_clrmemory_duration": 1406,
+ "collector_netframework_clrmemory_status_fail": 0,
+ "collector_netframework_clrmemory_status_success": 1,
+ "collector_netframework_clrremoting_duration": 1519,
+ "collector_netframework_clrremoting_status_fail": 0,
+ "collector_netframework_clrremoting_status_success": 1,
+ "collector_netframework_clrsecurity_duration": 1467,
+ "collector_netframework_clrsecurity_status_fail": 0,
+ "collector_netframework_clrsecurity_status_success": 1,
+ "collector_os_duration": 2,
+ "collector_os_status_fail": 0,
+ "collector_os_status_success": 1,
+ "collector_process_duration": 115,
+ "collector_process_status_fail": 0,
+ "collector_process_status_success": 1,
+ "collector_service_duration": 101,
+ "collector_service_status_fail": 0,
+ "collector_service_status_success": 1,
+ "collector_system_duration": 0,
+ "collector_system_status_fail": 0,
+ "collector_system_status_success": 1,
+ "collector_tcp_duration": 0,
+ "collector_tcp_status_fail": 0,
+ "collector_tcp_status_success": 1,
+ "cpu_core_0,0_cstate_c1": 160233427,
+ "cpu_core_0,0_cstate_c2": 0,
+ "cpu_core_0,0_cstate_c3": 0,
+ "cpu_core_0,0_dpc_time": 67109,
+ "cpu_core_0,0_dpcs": 4871900,
+ "cpu_core_0,0_idle_time": 162455593,
+ "cpu_core_0,0_interrupt_time": 77281,
+ "cpu_core_0,0_interrupts": 155194331,
+ "cpu_core_0,0_privileged_time": 1182109,
+ "cpu_core_0,0_user_time": 1073671,
+ "cpu_core_0,1_cstate_c1": 159528054,
+ "cpu_core_0,1_cstate_c2": 0,
+ "cpu_core_0,1_cstate_c3": 0,
+ "cpu_core_0,1_dpc_time": 11093,
+ "cpu_core_0,1_dpcs": 1650552,
+ "cpu_core_0,1_idle_time": 159478125,
+ "cpu_core_0,1_interrupt_time": 58093,
+ "cpu_core_0,1_interrupts": 79325847,
+ "cpu_core_0,1_privileged_time": 1801234,
+ "cpu_core_0,1_user_time": 3432000,
+ "cpu_core_0,2_cstate_c1": 159891723,
+ "cpu_core_0,2_cstate_c2": 0,
+ "cpu_core_0,2_cstate_c3": 0,
+ "cpu_core_0,2_dpc_time": 16062,
+ "cpu_core_0,2_dpcs": 2236469,
+ "cpu_core_0,2_idle_time": 159848437,
+ "cpu_core_0,2_interrupt_time": 53515,
+ "cpu_core_0,2_interrupts": 67305419,
+ "cpu_core_0,2_privileged_time": 1812546,
+ "cpu_core_0,2_user_time": 3050250,
+ "cpu_core_0,3_cstate_c1": 159544117,
+ "cpu_core_0,3_cstate_c2": 0,
+ "cpu_core_0,3_cstate_c3": 0,
+ "cpu_core_0,3_dpc_time": 8140,
+ "cpu_core_0,3_dpcs": 1185046,
+ "cpu_core_0,3_idle_time": 159527546,
+ "cpu_core_0,3_interrupt_time": 44484,
+ "cpu_core_0,3_interrupts": 60766938,
+ "cpu_core_0,3_privileged_time": 1760828,
+ "cpu_core_0,3_user_time": 3422875,
+ "cpu_dpc_time": 102404,
+ "cpu_idle_time": 641309701,
+ "cpu_interrupt_time": 233373,
+ "cpu_privileged_time": 6556717,
+ "cpu_user_time": 10978796,
+ "exchange_activesync_ping_cmds_pending": 0,
+ "exchange_activesync_requests_total": 14,
+ "exchange_activesync_sync_cmds_total": 0,
+ "exchange_autodiscover_requests_total": 1,
+ "exchange_avail_service_requests_per_sec": 0,
+ "exchange_http_proxy_autodiscover_avg_auth_latency": 1,
+ "exchange_http_proxy_autodiscover_avg_cas_proccessing_latency_sec": 3,
+ "exchange_http_proxy_autodiscover_mailbox_proxy_failure_rate": 0,
+ "exchange_http_proxy_autodiscover_mailbox_server_locator_avg_latency_sec": 8,
+ "exchange_http_proxy_autodiscover_outstanding_proxy_requests": 0,
+ "exchange_http_proxy_autodiscover_requests_total": 27122,
+ "exchange_http_proxy_eas_avg_auth_latency": 0,
+ "exchange_http_proxy_eas_avg_cas_proccessing_latency_sec": 3,
+ "exchange_http_proxy_eas_mailbox_proxy_failure_rate": 0,
+ "exchange_http_proxy_eas_mailbox_server_locator_avg_latency_sec": 8,
+ "exchange_http_proxy_eas_outstanding_proxy_requests": 0,
+ "exchange_http_proxy_eas_requests_total": 32519,
+ "exchange_ldap_complianceauditservice_10_long_running_ops_per_sec": 0,
+ "exchange_ldap_complianceauditservice_10_read_time_sec": 18,
+ "exchange_ldap_complianceauditservice_10_search_time_sec": 58,
+ "exchange_ldap_complianceauditservice_10_timeout_errors_total": 0,
+ "exchange_ldap_complianceauditservice_10_write_time_sec": 0,
+ "exchange_ldap_complianceauditservice_long_running_ops_per_sec": 0,
+ "exchange_ldap_complianceauditservice_read_time_sec": 8,
+ "exchange_ldap_complianceauditservice_search_time_sec": 46,
+ "exchange_ldap_complianceauditservice_timeout_errors_total": 0,
+ "exchange_ldap_complianceauditservice_write_time_sec": 0,
+ "exchange_owa_current_unique_users": 0,
+ "exchange_owa_requests_total": 0,
+ "exchange_rpc_active_user_count": 0,
+ "exchange_rpc_avg_latency_sec": 1,
+ "exchange_rpc_connection_count": 0,
+ "exchange_rpc_operations_total": 9,
+ "exchange_rpc_requests": 0,
+ "exchange_rpc_user_count": 0,
+ "exchange_transport_queues_active_mailbox_delivery_high_priority": 0,
+ "exchange_transport_queues_active_mailbox_delivery_low_priority": 0,
+ "exchange_transport_queues_active_mailbox_delivery_none_priority": 0,
+ "exchange_transport_queues_active_mailbox_delivery_normal_priority": 0,
+ "exchange_transport_queues_external_active_remote_delivery_high_priority": 0,
+ "exchange_transport_queues_external_active_remote_delivery_low_priority": 0,
+ "exchange_transport_queues_external_active_remote_delivery_none_priority": 0,
+ "exchange_transport_queues_external_active_remote_delivery_normal_priority": 0,
+ "exchange_transport_queues_external_largest_delivery_high_priority": 0,
+ "exchange_transport_queues_external_largest_delivery_low_priority": 0,
+ "exchange_transport_queues_external_largest_delivery_none_priority": 0,
+ "exchange_transport_queues_external_largest_delivery_normal_priority": 0,
+ "exchange_transport_queues_internal_active_remote_delivery_high_priority": 0,
+ "exchange_transport_queues_internal_active_remote_delivery_low_priority": 0,
+ "exchange_transport_queues_internal_active_remote_delivery_none_priority": 0,
+ "exchange_transport_queues_internal_active_remote_delivery_normal_priority": 0,
+ "exchange_transport_queues_internal_largest_delivery_high_priority": 0,
+ "exchange_transport_queues_internal_largest_delivery_low_priority": 0,
+ "exchange_transport_queues_internal_largest_delivery_none_priority": 0,
+ "exchange_transport_queues_internal_largest_delivery_normal_priority": 0,
+ "exchange_transport_queues_poison_high_priority": 0,
+ "exchange_transport_queues_poison_low_priority": 0,
+ "exchange_transport_queues_poison_none_priority": 0,
+ "exchange_transport_queues_poison_normal_priority": 0,
+ "exchange_transport_queues_retry_mailbox_delivery_high_priority": 0,
+ "exchange_transport_queues_retry_mailbox_delivery_low_priority": 0,
+ "exchange_transport_queues_retry_mailbox_delivery_none_priority": 0,
+ "exchange_transport_queues_retry_mailbox_delivery_normal_priority": 0,
+ "exchange_transport_queues_unreachable_high_priority": 0,
+ "exchange_transport_queues_unreachable_low_priority": 0,
+ "exchange_transport_queues_unreachable_none_priority": 0,
+ "exchange_transport_queues_unreachable_normal_priority": 0,
+ "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_active_tasks": 0,
+ "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_completed_tasks": 0,
+ "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_is_active": 1,
+ "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_is_paused": 0,
+ "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_queued_tasks": 0,
+ "exchange_workload_complianceauditservice_auditcomplianceserviceprioritized_audit_task_execution_manager_yielded_tasks": 0,
+ "exchange_workload_microsoft_exchange_servicehost_darruntime_active_tasks": 0,
+ "exchange_workload_microsoft_exchange_servicehost_darruntime_completed_tasks": 0,
+ "exchange_workload_microsoft_exchange_servicehost_darruntime_is_active": 1,
+ "exchange_workload_microsoft_exchange_servicehost_darruntime_is_paused": 0,
+ "exchange_workload_microsoft_exchange_servicehost_darruntime_queued_tasks": 0,
+ "exchange_workload_microsoft_exchange_servicehost_darruntime_yielded_tasks": 0,
+ "hyperv_health_critical": 0,
+ "hyperv_health_ok": 1,
+ "hyperv_root_partition_1G_device_pages": 0,
+ "hyperv_root_partition_1G_gpa_pages": 6,
+ "hyperv_root_partition_2M_device_pages": 0,
+ "hyperv_root_partition_2M_gpa_pages": 5255,
+ "hyperv_root_partition_4K_device_pages": 0,
+ "hyperv_root_partition_4K_gpa_pages": 58880,
+ "hyperv_root_partition_address_spaces": 0,
+ "hyperv_root_partition_attached_devices": 1,
+ "hyperv_root_partition_deposited_pages": 31732,
+ "hyperv_root_partition_device_dma_errors": 0,
+ "hyperv_root_partition_device_interrupt_errors": 0,
+ "hyperv_root_partition_device_interrupt_throttle_events": 0,
+ "hyperv_root_partition_gpa_space_modifications": 0,
+ "hyperv_root_partition_io_tlb_flush": 23901,
+ "hyperv_root_partition_physical_pages_allocated": 0,
+ "hyperv_root_partition_virtual_tlb_flush_entires": 15234,
+ "hyperv_root_partition_virtual_tlb_pages": 64,
+ "hyperv_vid_ubuntu_22_04_lts_physical_pages_allocated": 745472,
+ "hyperv_vid_ubuntu_22_04_lts_remote_physical_pages": 0,
+ "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_bytes_read": 83456,
+ "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_bytes_written": 1148928,
+ "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_error_count": 0,
+ "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_operations_read": 6,
+ "hyperv_vm_device_--_-d_-ana-vm-hyperv-virtual_machines-3aa8d474-2365-4041-a7cb-2a78287d6fe0_vmgs_operations_written": 34,
+ "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_bytes_read": 531184640,
+ "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_bytes_written": 425905152,
+ "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_error_count": 3,
+ "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_operations_read": 13196,
+ "hyperv_vm_device_d_-ana-vm-hyperv-vhd-ubuntu_22_04_lts_838d93a1-7d30-43cd-9f69-f336829c0934_avhdx_operations_written": 3866,
+ "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_bytes_received": 473654,
+ "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_bytes_sent": 43550457,
+ "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_packets_incoming_dropped": 0,
+ "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_packets_outgoing_dropped": 284,
+ "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_packets_received": 6137,
+ "hyperv_vm_interface_default_switch_312ff9c7-1f07-4eba-81fe-f5b4f445b810_packets_sent": 8905,
+ "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_bytes_received": 43509444,
+ "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_bytes_sent": 473654,
+ "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_packets_incoming_dropped": 0,
+ "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_packets_outgoing_dropped": 0,
+ "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_packets_received": 8621,
+ "hyperv_vm_interface_ubuntu_22_04_lts_adaptador_de_rede_3aa8d474-2365-4041-a7cb-2a78287d6fe0--98f1dbee-505c-4086-b80e-87a27faecbd4_packets_sent": 6137,
+ "hyperv_vm_ubuntu_22_04_lts_cpu_guest_run_time": 62534217,
+ "hyperv_vm_ubuntu_22_04_lts_cpu_hypervisor_run_time": 4457712,
+ "hyperv_vm_ubuntu_22_04_lts_cpu_remote_run_time": 0,
+ "hyperv_vm_ubuntu_22_04_lts_memory_physical": 2628,
+ "hyperv_vm_ubuntu_22_04_lts_memory_physical_guest_visible": 2904,
+ "hyperv_vm_ubuntu_22_04_lts_memory_pressure_current": 83,
+ "hyperv_vswitch_default_switch_broadcast_packets_received_total": 51,
+ "hyperv_vswitch_default_switch_broadcast_packets_sent_total": 18,
+ "hyperv_vswitch_default_switch_bytes_received_total": 44024111,
+ "hyperv_vswitch_default_switch_bytes_sent_total": 43983098,
+ "hyperv_vswitch_default_switch_directed_packets_received_total": 14603,
+ "hyperv_vswitch_default_switch_directed_packets_send_total": 14603,
+ "hyperv_vswitch_default_switch_dropped_packets_incoming_total": 284,
+ "hyperv_vswitch_default_switch_dropped_packets_outcoming_total": 0,
+ "hyperv_vswitch_default_switch_extensions_dropped_packets_incoming_total": 0,
+ "hyperv_vswitch_default_switch_extensions_dropped_packets_outcoming_total": 0,
+ "hyperv_vswitch_default_switch_learned_mac_addresses_total": 2,
+ "hyperv_vswitch_default_switch_multicast_packets_received_total": 388,
+ "hyperv_vswitch_default_switch_multicast_packets_sent_total": 137,
+ "hyperv_vswitch_default_switch_number_of_send_channel_moves_total": 0,
+ "hyperv_vswitch_default_switch_number_of_vmq_moves_total": 0,
+ "hyperv_vswitch_default_switch_packets_flooded_total": 0,
+ "hyperv_vswitch_default_switch_packets_received_total": 15042,
+ "hyperv_vswitch_default_switch_purged_mac_addresses_total": 0,
+ "iis_website_Default_Web_Site_connection_attempts_all_instances_total": 1,
+ "iis_website_Default_Web_Site_current_anonymous_users": 0,
+ "iis_website_Default_Web_Site_current_connections": 0,
+ "iis_website_Default_Web_Site_current_isapi_extension_requests": 0,
+ "iis_website_Default_Web_Site_current_non_anonymous_users": 0,
+ "iis_website_Default_Web_Site_files_received_total": 0,
+ "iis_website_Default_Web_Site_files_sent_total": 2,
+ "iis_website_Default_Web_Site_isapi_extension_requests_total": 0,
+ "iis_website_Default_Web_Site_locked_errors_total": 0,
+ "iis_website_Default_Web_Site_logon_attempts_total": 4,
+ "iis_website_Default_Web_Site_not_found_errors_total": 1,
+ "iis_website_Default_Web_Site_received_bytes_total": 10289,
+ "iis_website_Default_Web_Site_requests_total": 3,
+ "iis_website_Default_Web_Site_sent_bytes_total": 105882,
+ "iis_website_Default_Web_Site_service_uptime": 258633,
+ "logical_disk_C:_free_space": 43636490240,
+ "logical_disk_C:_read_bytes_total": 17676328448,
+ "logical_disk_C:_read_latency": 97420,
+ "logical_disk_C:_reads_total": 350593,
+ "logical_disk_C:_total_space": 67938287616,
+ "logical_disk_C:_used_space": 24301797376,
+ "logical_disk_C:_write_bytes_total": 9135282688,
+ "logical_disk_C:_write_latency": 123912,
+ "logical_disk_C:_writes_total": 450705,
+ "logon_type_batch_sessions": 0,
+ "logon_type_cached_interactive_sessions": 0,
+ "logon_type_cached_remote_interactive_sessions": 0,
+ "logon_type_cached_unlock_sessions": 0,
+ "logon_type_interactive_sessions": 2,
+ "logon_type_network_clear_text_sessions": 0,
+ "logon_type_network_sessions": 0,
+ "logon_type_new_credentials_sessions": 0,
+ "logon_type_proxy_sessions": 0,
+ "logon_type_remote_interactive_sessions": 0,
+ "logon_type_service_sessions": 0,
+ "logon_type_system_sessions": 0,
+ "logon_type_unlock_sessions": 0,
+ "memory_available_bytes": 1379942400,
+ "memory_cache_faults_total": 8009603,
+ "memory_cache_total": 1392185344,
+ "memory_commit_limit": 5733113856,
+ "memory_committed_bytes": 3447439360,
+ "memory_modified_page_list_bytes": 32653312,
+ "memory_not_committed_bytes": 2285674496,
+ "memory_page_faults_total": 119093924,
+ "memory_pool_nonpaged_bytes_total": 126865408,
+ "memory_pool_paged_bytes": 303906816,
+ "memory_standby_cache_core_bytes": 107376640,
+ "memory_standby_cache_normal_priority_bytes": 1019121664,
+ "memory_standby_cache_reserve_bytes": 233033728,
+ "memory_standby_cache_total": 1359532032,
+ "memory_swap_page_reads_total": 402087,
+ "memory_swap_page_writes_total": 7012,
+ "memory_swap_pages_read_total": 4643279,
+ "memory_swap_pages_written_total": 312896,
+ "memory_used_bytes": 2876776448,
+ "mssql_db_master_instance_SQLEXPRESS_active_transactions": 0,
+ "mssql_db_master_instance_SQLEXPRESS_backup_restore_operations": 0,
+ "mssql_db_master_instance_SQLEXPRESS_data_files_size_bytes": 4653056,
+ "mssql_db_master_instance_SQLEXPRESS_log_flushed_bytes": 3702784,
+ "mssql_db_master_instance_SQLEXPRESS_log_flushes": 252,
+ "mssql_db_master_instance_SQLEXPRESS_transactions": 2183,
+ "mssql_db_master_instance_SQLEXPRESS_write_transactions": 236,
+ "mssql_db_model_instance_SQLEXPRESS_active_transactions": 0,
+ "mssql_db_model_instance_SQLEXPRESS_backup_restore_operations": 0,
+ "mssql_db_model_instance_SQLEXPRESS_data_files_size_bytes": 8388608,
+ "mssql_db_model_instance_SQLEXPRESS_log_flushed_bytes": 12288,
+ "mssql_db_model_instance_SQLEXPRESS_log_flushes": 3,
+ "mssql_db_model_instance_SQLEXPRESS_transactions": 4467,
+ "mssql_db_model_instance_SQLEXPRESS_write_transactions": 0,
+ "mssql_db_msdb_instance_SQLEXPRESS_active_transactions": 0,
+ "mssql_db_msdb_instance_SQLEXPRESS_backup_restore_operations": 0,
+ "mssql_db_msdb_instance_SQLEXPRESS_data_files_size_bytes": 15466496,
+ "mssql_db_msdb_instance_SQLEXPRESS_log_flushed_bytes": 0,
+ "mssql_db_msdb_instance_SQLEXPRESS_log_flushes": 0,
+ "mssql_db_msdb_instance_SQLEXPRESS_transactions": 4582,
+ "mssql_db_msdb_instance_SQLEXPRESS_write_transactions": 0,
+ "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_active_transactions": 0,
+ "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_backup_restore_operations": 0,
+ "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_data_files_size_bytes": 41943040,
+ "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_log_flushed_bytes": 0,
+ "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_log_flushes": 0,
+ "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_transactions": 2,
+ "mssql_db_mssqlsystemresource_instance_SQLEXPRESS_write_transactions": 0,
+ "mssql_db_tempdb_instance_SQLEXPRESS_active_transactions": 0,
+ "mssql_db_tempdb_instance_SQLEXPRESS_backup_restore_operations": 0,
+ "mssql_db_tempdb_instance_SQLEXPRESS_data_files_size_bytes": 8388608,
+ "mssql_db_tempdb_instance_SQLEXPRESS_log_flushed_bytes": 118784,
+ "mssql_db_tempdb_instance_SQLEXPRESS_log_flushes": 2,
+ "mssql_db_tempdb_instance_SQLEXPRESS_transactions": 1558,
+ "mssql_db_tempdb_instance_SQLEXPRESS_write_transactions": 29,
+ "mssql_instance_SQLEXPRESS_accessmethods_page_splits": 429,
+ "mssql_instance_SQLEXPRESS_bufman_buffer_cache_hits": 86,
+ "mssql_instance_SQLEXPRESS_bufman_checkpoint_pages": 82,
+ "mssql_instance_SQLEXPRESS_bufman_page_life_expectancy_seconds": 191350,
+ "mssql_instance_SQLEXPRESS_bufman_page_reads": 797,
+ "mssql_instance_SQLEXPRESS_bufman_page_writes": 92,
+ "mssql_instance_SQLEXPRESS_cache_hit_ratio": 100,
+ "mssql_instance_SQLEXPRESS_genstats_blocked_processes": 0,
+ "mssql_instance_SQLEXPRESS_genstats_user_connections": 1,
+ "mssql_instance_SQLEXPRESS_memmgr_connection_memory_bytes": 1015808,
+ "mssql_instance_SQLEXPRESS_memmgr_external_benefit_of_memory": 0,
+ "mssql_instance_SQLEXPRESS_memmgr_pending_memory_grants": 0,
+ "mssql_instance_SQLEXPRESS_memmgr_total_server_memory_bytes": 198836224,
+ "mssql_instance_SQLEXPRESS_resource_AllocUnit_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_AllocUnit_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_Application_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_Application_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_Database_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_Database_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_Extent_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_Extent_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_File_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_File_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_HoBT_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_HoBT_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_Key_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_Key_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_Metadata_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_Metadata_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_OIB_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_OIB_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_Object_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_Object_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_Page_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_Page_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_RID_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_RID_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_RowGroup_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_RowGroup_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_resource_Xact_locks_deadlocks": 0,
+ "mssql_instance_SQLEXPRESS_resource_Xact_locks_lock_wait_seconds": 0,
+ "mssql_instance_SQLEXPRESS_sql_errors_total_db_offline_errors": 0,
+ "mssql_instance_SQLEXPRESS_sql_errors_total_info_errors": 766,
+ "mssql_instance_SQLEXPRESS_sql_errors_total_kill_connection_errors": 0,
+ "mssql_instance_SQLEXPRESS_sql_errors_total_user_errors": 29,
+ "mssql_instance_SQLEXPRESS_sqlstats_auto_parameterization_attempts": 37,
+ "mssql_instance_SQLEXPRESS_sqlstats_batch_requests": 2972,
+ "mssql_instance_SQLEXPRESS_sqlstats_safe_auto_parameterization_attempts": 2,
+ "mssql_instance_SQLEXPRESS_sqlstats_sql_compilations": 376,
+ "mssql_instance_SQLEXPRESS_sqlstats_sql_recompilations": 8,
+ "net_nic_Intel_R_PRO_1000_MT_Network_Connection_bytes_received": 38290755856,
+ "net_nic_Intel_R_PRO_1000_MT_Network_Connection_bytes_sent": 8211165504,
+ "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_outbound_discarded": 0,
+ "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_outbound_errors": 0,
+ "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_received_discarded": 0,
+ "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_received_errors": 0,
+ "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_received_total": 4120869,
+ "net_nic_Intel_R_PRO_1000_MT_Network_Connection_packets_sent_total": 1332466,
+ "netframework_WMSvc_clrexception_filters_total": 0,
+ "netframework_WMSvc_clrexception_finallys_total": 0,
+ "netframework_WMSvc_clrexception_throw_to_catch_depth_total": 0,
+ "netframework_WMSvc_clrexception_thrown_total": 0,
+ "netframework_WMSvc_clrinterop_com_callable_wrappers_total": 2,
+ "netframework_WMSvc_clrinterop_interop_marshalling_total": 0,
+ "netframework_WMSvc_clrinterop_interop_stubs_created_total": 29,
+ "netframework_WMSvc_clrjit_il_bytes_total": 4007,
+ "netframework_WMSvc_clrjit_methods_total": 27,
+ "netframework_WMSvc_clrjit_standard_failures_total": 0,
+ "netframework_WMSvc_clrjit_time_percent": 0,
+ "netframework_WMSvc_clrloading_appdomains_loaded_total": 1,
+ "netframework_WMSvc_clrloading_appdomains_unloaded_total": 0,
+ "netframework_WMSvc_clrloading_assemblies_loaded_total": 5,
+ "netframework_WMSvc_clrloading_class_load_failures_total": 0,
+ "netframework_WMSvc_clrloading_classes_loaded_total": 18,
+ "netframework_WMSvc_clrloading_loader_heap_size_bytes": 270336,
+ "netframework_WMSvc_clrlocksandthreads_contentions_total": 0,
+ "netframework_WMSvc_clrlocksandthreads_current_logical_threads": 2,
+ "netframework_WMSvc_clrlocksandthreads_physical_threads_current": 1,
+ "netframework_WMSvc_clrlocksandthreads_queue_length_total": 0,
+ "netframework_WMSvc_clrlocksandthreads_recognized_threads_total": 1,
+ "netframework_WMSvc_clrmemory_allocated_bytes_total": 227792,
+ "netframework_WMSvc_clrmemory_collections_total": 2,
+ "netframework_WMSvc_clrmemory_committed_bytes": 270336,
+ "netframework_WMSvc_clrmemory_finalization_survivors": 7,
+ "netframework_WMSvc_clrmemory_gc_time_percent": 0,
+ "netframework_WMSvc_clrmemory_heap_size_bytes": 4312696,
+ "netframework_WMSvc_clrmemory_induced_gc_total": 0,
+ "netframework_WMSvc_clrmemory_number_gc_handles": 24,
+ "netframework_WMSvc_clrmemory_number_pinned_objects": 1,
+ "netframework_WMSvc_clrmemory_number_sink_blocksinuse": 1,
+ "netframework_WMSvc_clrmemory_promoted_bytes": 49720,
+ "netframework_WMSvc_clrmemory_reserved_bytes": 402644992,
+ "netframework_WMSvc_clrremoting_channels_total": 0,
+ "netframework_WMSvc_clrremoting_context_bound_classes_loaded": 0,
+ "netframework_WMSvc_clrremoting_context_bound_objects_total": 0,
+ "netframework_WMSvc_clrremoting_context_proxies_total": 0,
+ "netframework_WMSvc_clrremoting_contexts": 1,
+ "netframework_WMSvc_clrremoting_remote_calls_total": 0,
+ "netframework_WMSvc_clrsecurity_checks_time_percent": 0,
+ "netframework_WMSvc_clrsecurity_link_time_checks_total": 0,
+ "netframework_WMSvc_clrsecurity_runtime_checks_total": 3,
+ "netframework_WMSvc_clrsecurity_stack_walk_depth": 1,
+ "netframework_powershell_clrexception_filters_total": 0,
+ "netframework_powershell_clrexception_finallys_total": 56,
+ "netframework_powershell_clrexception_throw_to_catch_depth_total": 140,
+ "netframework_powershell_clrexception_thrown_total": 37,
+ "netframework_powershell_clrinterop_com_callable_wrappers_total": 5,
+ "netframework_powershell_clrinterop_interop_marshalling_total": 0,
+ "netframework_powershell_clrinterop_interop_stubs_created_total": 345,
+ "netframework_powershell_clrjit_il_bytes_total": 47021,
+ "netframework_powershell_clrjit_methods_total": 344,
+ "netframework_powershell_clrjit_standard_failures_total": 0,
+ "netframework_powershell_clrjit_time_percent": 0,
+ "netframework_powershell_clrloading_appdomains_loaded_total": 1,
+ "netframework_powershell_clrloading_appdomains_unloaded_total": 0,
+ "netframework_powershell_clrloading_assemblies_loaded_total": 20,
+ "netframework_powershell_clrloading_class_load_failures_total": 1,
+ "netframework_powershell_clrloading_classes_loaded_total": 477,
+ "netframework_powershell_clrloading_loader_heap_size_bytes": 2285568,
+ "netframework_powershell_clrlocksandthreads_contentions_total": 10,
+ "netframework_powershell_clrlocksandthreads_current_logical_threads": 16,
+ "netframework_powershell_clrlocksandthreads_physical_threads_current": 13,
+ "netframework_powershell_clrlocksandthreads_queue_length_total": 3,
+ "netframework_powershell_clrlocksandthreads_recognized_threads_total": 6,
+ "netframework_powershell_clrmemory_allocated_bytes_total": 46333800,
+ "netframework_powershell_clrmemory_collections_total": 11,
+ "netframework_powershell_clrmemory_committed_bytes": 20475904,
+ "netframework_powershell_clrmemory_finalization_survivors": 244,
+ "netframework_powershell_clrmemory_gc_time_percent": 0,
+ "netframework_powershell_clrmemory_heap_size_bytes": 34711872,
+ "netframework_powershell_clrmemory_induced_gc_total": 0,
+ "netframework_powershell_clrmemory_number_gc_handles": 834,
+ "netframework_powershell_clrmemory_number_pinned_objects": 0,
+ "netframework_powershell_clrmemory_number_sink_blocksinuse": 42,
+ "netframework_powershell_clrmemory_promoted_bytes": 107352,
+ "netframework_powershell_clrmemory_reserved_bytes": 402644992,
+ "netframework_powershell_clrremoting_channels_total": 0,
+ "netframework_powershell_clrremoting_context_bound_classes_loaded": 0,
+ "netframework_powershell_clrremoting_context_bound_objects_total": 0,
+ "netframework_powershell_clrremoting_context_proxies_total": 0,
+ "netframework_powershell_clrremoting_contexts": 1,
+ "netframework_powershell_clrremoting_remote_calls_total": 0,
+ "netframework_powershell_clrsecurity_checks_time_percent": 0,
+ "netframework_powershell_clrsecurity_link_time_checks_total": 0,
+ "netframework_powershell_clrsecurity_runtime_checks_total": 4386,
+ "netframework_powershell_clrsecurity_stack_walk_depth": 1,
+ "os_paging_free_bytes": 1414107136,
+ "os_paging_limit_bytes": 1476395008,
+ "os_paging_used_bytes": 62287872,
+ "os_physical_memory_free_bytes": 1379946496,
+ "os_processes": 152,
+ "os_processes_limit": 4294967295,
+ "os_users": 2,
+ "os_visible_memory_bytes": 4256718848,
+ "os_visible_memory_used_bytes": 2876772352,
+ "process_msedge_cpu_time": 1919893,
+ "process_msedge_handles": 5779,
+ "process_msedge_io_bytes": 3978227378,
+ "process_msedge_io_operations": 16738642,
+ "process_msedge_page_faults": 5355941,
+ "process_msedge_page_file_bytes": 681603072,
+ "process_msedge_threads": 213,
+ "process_msedge_working_set_private_bytes": 461344768,
+ "service_dhcp_state_continue_pending": 0,
+ "service_dhcp_state_pause_pending": 0,
+ "service_dhcp_state_paused": 0,
+ "service_dhcp_state_running": 1,
+ "service_dhcp_state_start_pending": 0,
+ "service_dhcp_state_stop_pending": 0,
+ "service_dhcp_state_stopped": 0,
+ "service_dhcp_state_unknown": 0,
+ "service_dhcp_status_degraded": 0,
+ "service_dhcp_status_error": 0,
+ "service_dhcp_status_lost_comm": 0,
+ "service_dhcp_status_no_contact": 0,
+ "service_dhcp_status_nonrecover": 0,
+ "service_dhcp_status_ok": 1,
+ "service_dhcp_status_pred_fail": 0,
+ "service_dhcp_status_service": 0,
+ "service_dhcp_status_starting": 0,
+ "service_dhcp_status_stopping": 0,
+ "service_dhcp_status_stressed": 0,
+ "service_dhcp_status_unknown": 0,
+ "system_threads": 1559,
+ "system_up_time": 16208210,
+ "tcp_ipv4_conns_active": 4301,
+ "tcp_ipv4_conns_established": 7,
+ "tcp_ipv4_conns_failures": 137,
+ "tcp_ipv4_conns_passive": 501,
+ "tcp_ipv4_conns_resets": 1282,
+ "tcp_ipv4_segments_received": 676388,
+ "tcp_ipv4_segments_retransmitted": 2120,
+ "tcp_ipv4_segments_sent": 871379,
+ "tcp_ipv6_conns_active": 214,
+ "tcp_ipv6_conns_established": 0,
+ "tcp_ipv6_conns_failures": 214,
+ "tcp_ipv6_conns_passive": 0,
+ "tcp_ipv6_conns_resets": 0,
+ "tcp_ipv6_segments_received": 1284,
+ "tcp_ipv6_segments_retransmitted": 428,
+ "tcp_ipv6_segments_sent": 856,
+ },
+ },
+ "fails if endpoint returns invalid data": {
+ prepare: prepareWindowsReturnsInvalidData,
+ },
+ "fails on connection refused": {
+ prepare: prepareWindowsConnectionRefused,
+ },
+ "fails on 404 response": {
+ prepare: prepareWindowsResponse404,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ win, cleanup := test.prepare()
+ defer cleanup()
+
+ require.NoError(t, win.Init())
+
+ mx := win.Collect()
+
+ if mx != nil && test.wantCollected != nil {
+ mx["system_up_time"] = test.wantCollected["system_up_time"]
+ }
+
+ assert.Equal(t, test.wantCollected, mx)
+ if len(test.wantCollected) > 0 {
+ testCharts(t, win, mx)
+ }
+ })
+ }
+}
+
+func testCharts(t *testing.T, win *Windows, mx map[string]int64) {
+ ensureChartsDimsCreated(t, win)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, win, mx)
+}
+
+func ensureChartsDimsCreated(t *testing.T, w *Windows) {
+ for _, chart := range cpuCharts {
+ if w.cache.collection[collectorCPU] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range memCharts {
+ if w.cache.collection[collectorMemory] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range tcpCharts {
+ if w.cache.collection[collectorTCP] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range osCharts {
+ if w.cache.collection[collectorOS] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range systemCharts {
+ if w.cache.collection[collectorSystem] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range logonCharts {
+ if w.cache.collection[collectorLogon] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range processesCharts {
+ if w.cache.collection[collectorProcess] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range netFrameworkCLRExceptionsChartsTmpl {
+ if w.cache.collection[collectorNetFrameworkCLRExceptions] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range netFrameworkCLRInteropChartsTmpl {
+ if w.cache.collection[collectorNetFrameworkCLRInterop] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range netFrameworkCLRJITChartsTmpl {
+ if w.cache.collection[collectorNetFrameworkCLRJIT] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range netFrameworkCLRLoadingChartsTmpl {
+ if w.cache.collection[collectorNetFrameworkCLRLoading] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range netFrameworkCLRLocksAndThreadsChartsTmpl {
+ if w.cache.collection[collectorNetFrameworkCLRLocksAndThreads] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range netFrameworkCLRMemoryChartsTmpl {
+ if w.cache.collection[collectorNetFrameworkCLRMemory] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range netFrameworkCLRRemotingChartsTmpl {
+ if w.cache.collection[collectorNetFrameworkCLRRemoting] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for _, chart := range netFrameworkCLRSecurityChartsTmpl {
+ if w.cache.collection[collectorNetFrameworkCLRSecurity] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+
+ for core := range w.cache.cores {
+ for _, chart := range cpuCoreChartsTmpl {
+ id := fmt.Sprintf(chart.ID, core)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' core", id, core)
+ }
+ }
+ for disk := range w.cache.volumes {
+ for _, chart := range diskChartsTmpl {
+ id := fmt.Sprintf(chart.ID, disk)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' disk", id, disk)
+ }
+ }
+ for nic := range w.cache.nics {
+ for _, chart := range nicChartsTmpl {
+ id := fmt.Sprintf(chart.ID, nic)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' nic", id, nic)
+ }
+ }
+ for zone := range w.cache.thermalZones {
+ for _, chart := range thermalzoneChartsTmpl {
+ id := fmt.Sprintf(chart.ID, zone)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' thermalzone", id, zone)
+ }
+ }
+ for svc := range w.cache.services {
+ for _, chart := range serviceChartsTmpl {
+ id := fmt.Sprintf(chart.ID, svc)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' service", id, svc)
+ }
+ }
+ for website := range w.cache.iis {
+ for _, chart := range iisWebsiteChartsTmpl {
+ id := fmt.Sprintf(chart.ID, website)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' website", id, website)
+ }
+ }
+ for instance := range w.cache.mssqlInstances {
+ for _, chart := range mssqlInstanceChartsTmpl {
+ id := fmt.Sprintf(chart.ID, instance)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' instance", id, instance)
+ }
+ }
+ for instanceDB := range w.cache.mssqlDBs {
+ s := strings.Split(instanceDB, ":")
+ if assert.Lenf(t, s, 2, "can not extract intance/database from cache.mssqlDBs") {
+ instance, db := s[0], s[1]
+ for _, chart := range mssqlDatabaseChartsTmpl {
+ id := fmt.Sprintf(chart.ID, db, instance)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' instance", id, instance)
+ }
+ }
+ }
+ for _, chart := range adCharts {
+ if w.cache.collection[collectorAD] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for template := range w.cache.adcs {
+ for _, chart := range adcsCertTemplateChartsTmpl {
+ id := fmt.Sprintf(chart.ID, template)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' template certificate", id, template)
+ }
+ }
+ for name := range w.cache.collectors {
+ for _, chart := range collectorChartsTmpl {
+ id := fmt.Sprintf(chart.ID, name)
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' collector", id, name)
+ }
+ }
+
+ for _, chart := range processesCharts {
+ if chart = w.Charts().Get(chart.ID); chart == nil {
+ continue
+ }
+ for proc := range w.cache.processes {
+ var found bool
+ for _, dim := range chart.Dims {
+ if found = strings.HasPrefix(dim.ID, "process_"+proc); found {
+ break
+ }
+ }
+ assert.Truef(t, found, "chart '%s' has not dim for '%s' process", chart.ID, proc)
+ }
+ }
+
+ for _, chart := range hypervChartsTmpl {
+ if w.cache.collection[collectorHyperv] {
+ assert.Truef(t, w.Charts().Has(chart.ID), "chart '%s' not created", chart.ID)
+ } else {
+ assert.Falsef(t, w.Charts().Has(chart.ID), "chart '%s' created", chart.ID)
+ }
+ }
+ for vm := range w.cache.hypervVMMem {
+ for _, chart := range hypervVMChartsTemplate {
+ id := fmt.Sprintf(chart.ID, hypervCleanName(vm))
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' virtual machine", id, vm)
+ }
+ }
+ for device := range w.cache.hypervVMDevices {
+ for _, chart := range hypervVMDeviceChartsTemplate {
+ id := fmt.Sprintf(chart.ID, hypervCleanName(device))
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' vm storage device", id, device)
+ }
+ }
+ for iface := range w.cache.hypervVMInterfaces {
+ for _, chart := range hypervVMInterfaceChartsTemplate {
+ id := fmt.Sprintf(chart.ID, hypervCleanName(iface))
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' vm network interface", id, iface)
+ }
+ }
+ for vswitch := range w.cache.hypervVswitch {
+ for _, chart := range hypervVswitchChartsTemplate {
+ id := fmt.Sprintf(chart.ID, hypervCleanName(vswitch))
+ assert.Truef(t, w.Charts().Has(id), "charts has no '%s' chart for '%s' virtual switch", id, vswitch)
+ }
+ }
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, w *Windows, mx map[string]int64) {
+ for _, chart := range *w.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := mx[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+func prepareWindowsV0200() (win *Windows, cleanup func()) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(dataVer0200Metrics)
+ }))
+
+ win = New()
+ win.URL = ts.URL
+ return win, ts.Close
+}
+
+func prepareWindowsReturnsInvalidData() (win *Windows, cleanup func()) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ win = New()
+ win.URL = ts.URL
+ return win, ts.Close
+}
+
+func prepareWindowsConnectionRefused() (win *Windows, cleanup func()) {
+ win = New()
+ win.URL = "http://127.0.0.1:38001"
+ return win, func() {}
+}
+
+func prepareWindowsResponse404() (win *Windows, cleanup func()) {
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusNotFound)
+ }))
+
+ win = New()
+ win.URL = ts.URL
+ return win, ts.Close
+}
diff --git a/src/go/plugin/go.d/modules/wireguard/README.md b/src/go/plugin/go.d/modules/wireguard/README.md
new file mode 120000
index 000000000..389e494d7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/README.md
@@ -0,0 +1 @@
+integrations/wireguard.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/wireguard/charts.go b/src/go/plugin/go.d/modules/wireguard/charts.go
new file mode 100644
index 000000000..c2defa9b3
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/charts.go
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package wireguard
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioDeviceNetworkIO = module.Priority + iota
+ prioDevicePeers
+ prioPeerNetworkIO
+ prioPeerLatestHandShake
+)
+
+var (
+ deviceChartsTmpl = module.Charts{
+ deviceNetworkIOChartTmpl.Copy(),
+ devicePeersChartTmpl.Copy(),
+ }
+
+ deviceNetworkIOChartTmpl = module.Chart{
+ ID: "device_%s_network_io",
+ Title: "Device traffic",
+ Units: "B/s",
+ Fam: "device traffic",
+ Ctx: "wireguard.device_network_io",
+ Type: module.Area,
+ Priority: prioDeviceNetworkIO,
+ Dims: module.Dims{
+ {ID: "device_%s_receive", Name: "receive", Algo: module.Incremental},
+ {ID: "device_%s_transmit", Name: "transmit", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ devicePeersChartTmpl = module.Chart{
+ ID: "device_%s_peers",
+ Title: "Device peers",
+ Units: "peers",
+ Fam: "device peers",
+ Ctx: "wireguard.device_peers",
+ Priority: prioDevicePeers,
+ Dims: module.Dims{
+ {ID: "device_%s_peers", Name: "peers"},
+ },
+ }
+)
+
+var (
+ peerChartsTmpl = module.Charts{
+ peerNetworkIOChartTmpl.Copy(),
+ peerLatestHandShakeChartTmpl.Copy(),
+ }
+
+ peerNetworkIOChartTmpl = module.Chart{
+ ID: "peer_%s_network_io",
+ Title: "Peer traffic",
+ Units: "B/s",
+ Fam: "peer traffic",
+ Ctx: "wireguard.peer_network_io",
+ Type: module.Area,
+ Priority: prioPeerNetworkIO,
+ Dims: module.Dims{
+ {ID: "peer_%s_receive", Name: "receive", Algo: module.Incremental},
+ {ID: "peer_%s_transmit", Name: "transmit", Algo: module.Incremental, Mul: -1},
+ },
+ }
+ peerLatestHandShakeChartTmpl = module.Chart{
+ ID: "peer_%s_latest_handshake_ago",
+ Title: "Peer time elapsed since the latest handshake",
+ Units: "seconds",
+ Fam: "peer latest handshake",
+ Ctx: "wireguard.peer_latest_handshake_ago",
+ Priority: prioPeerLatestHandShake,
+ Dims: module.Dims{
+ {ID: "peer_%s_latest_handshake_ago", Name: "time"},
+ },
+ }
+)
+
+func newDeviceCharts(device string) *module.Charts {
+ charts := deviceChartsTmpl.Copy()
+
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, device)
+ c.Labels = []module.Label{
+ {Key: "device", Value: device},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, device)
+ }
+ }
+
+ return charts
+}
+
+func (w *WireGuard) addNewDeviceCharts(device string) {
+ charts := newDeviceCharts(device)
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *WireGuard) removeDeviceCharts(device string) {
+ prefix := fmt.Sprintf("device_%s", device)
+
+ for _, c := range *w.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+func newPeerCharts(id, device, pubKey string) *module.Charts {
+ charts := peerChartsTmpl.Copy()
+
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, id)
+ c.Labels = []module.Label{
+ {Key: "device", Value: device},
+ {Key: "public_key", Value: pubKey},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, id)
+ }
+ }
+
+ return charts
+}
+
+func (w *WireGuard) addNewPeerCharts(id, device, pubKey string) {
+ charts := newPeerCharts(id, device, pubKey)
+
+ if err := w.Charts().Add(*charts...); err != nil {
+ w.Warning(err)
+ }
+}
+
+func (w *WireGuard) removePeerCharts(id string) {
+ prefix := fmt.Sprintf("peer_%s", id)
+
+ for _, c := range *w.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/wireguard/collect.go b/src/go/plugin/go.d/modules/wireguard/collect.go
new file mode 100644
index 000000000..cbcc180ec
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/collect.go
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package wireguard
+
+import (
+ "fmt"
+ "time"
+
+ "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
+)
+
+func (w *WireGuard) collect() (map[string]int64, error) {
+ if w.client == nil {
+ client, err := w.newWGClient()
+ if err != nil {
+ return nil, fmt.Errorf("creating WireGuard client: %v", err)
+ }
+ w.client = client
+ }
+
+ // TODO: probably we need to get a list of interfaces and query interfaces using client.Device()
+ // https://github.com/WireGuard/wgctrl-go/blob/3d4a969bb56bb6931f6661af606bc9c4195b4249/internal/wglinux/client_linux.go#L79-L80
+ devices, err := w.client.Devices()
+ if err != nil {
+ return nil, fmt.Errorf("retrieving WireGuard devices: %v", err)
+ }
+
+ if len(devices) == 0 {
+ w.Info("no WireGuard devices found on the host system")
+ }
+
+ now := time.Now()
+ if w.cleanupLastTime.IsZero() {
+ w.cleanupLastTime = now
+ }
+
+ mx := make(map[string]int64)
+
+ w.collectDevicesPeers(mx, devices, now)
+
+ if now.Sub(w.cleanupLastTime) > w.cleanupEvery {
+ w.cleanupLastTime = now
+ w.cleanupDevicesPeers(devices)
+ }
+
+ return mx, nil
+}
+
+func (w *WireGuard) collectDevicesPeers(mx map[string]int64, devices []*wgtypes.Device, now time.Time) {
+ for _, d := range devices {
+ if !w.devices[d.Name] {
+ w.devices[d.Name] = true
+ w.addNewDeviceCharts(d.Name)
+ }
+
+ mx["device_"+d.Name+"_peers"] = int64(len(d.Peers))
+ if len(d.Peers) == 0 {
+ mx["device_"+d.Name+"_receive"] = 0
+ mx["device_"+d.Name+"_transmit"] = 0
+ continue
+ }
+
+ for _, p := range d.Peers {
+ if p.LastHandshakeTime.IsZero() {
+ continue
+ }
+
+ pubKey := p.PublicKey.String()
+ id := peerID(d.Name, pubKey)
+
+ if !w.peers[id] {
+ w.peers[id] = true
+ w.addNewPeerCharts(id, d.Name, pubKey)
+ }
+
+ mx["device_"+d.Name+"_receive"] += p.ReceiveBytes
+ mx["device_"+d.Name+"_transmit"] += p.TransmitBytes
+ mx["peer_"+id+"_receive"] = p.ReceiveBytes
+ mx["peer_"+id+"_transmit"] = p.TransmitBytes
+ mx["peer_"+id+"_latest_handshake_ago"] = int64(now.Sub(p.LastHandshakeTime).Seconds())
+ }
+ }
+}
+
+func (w *WireGuard) cleanupDevicesPeers(devices []*wgtypes.Device) {
+ seenDevices, seenPeers := make(map[string]bool), make(map[string]bool)
+ for _, d := range devices {
+ seenDevices[d.Name] = true
+ for _, p := range d.Peers {
+ seenPeers[peerID(d.Name, p.PublicKey.String())] = true
+ }
+ }
+ for d := range w.devices {
+ if !seenDevices[d] {
+ delete(w.devices, d)
+ w.removeDeviceCharts(d)
+ }
+ }
+ for p := range w.peers {
+ if !seenPeers[p] {
+ delete(w.peers, p)
+ w.removePeerCharts(p)
+ }
+ }
+}
+
+func peerID(device, peerPublicKey string) string {
+ return device + "_" + peerPublicKey
+}
diff --git a/src/go/plugin/go.d/modules/wireguard/config_schema.json b/src/go/plugin/go.d/modules/wireguard/config_schema.json
new file mode 100644
index 000000000..5ff8ff717
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/config_schema.json
@@ -0,0 +1,25 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "WireGuard collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/wireguard/integrations/wireguard.md b/src/go/plugin/go.d/modules/wireguard/integrations/wireguard.md
new file mode 100644
index 000000000..2460cc839
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/integrations/wireguard.md
@@ -0,0 +1,204 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/wireguard/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/wireguard/metadata.yaml"
+sidebar_label: "WireGuard"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/VPNs"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# WireGuard
+
+
+<img src="https://netdata.cloud/img/wireguard.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: wireguard
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors WireGuard VPN devices and peers traffic.
+
+
+It connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+This collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+It automatically detects instances running on localhost.
+
+
+#### Limits
+
+Doesn't work if Netdata or WireGuard is installed in the container.
+
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per device
+
+These metrics refer to the VPN network interface.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | VPN network interface |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| wireguard.device_network_io | receive, transmit | B/s |
+| wireguard.device_peers | peers | peers |
+
+### Per peer
+
+These metrics refer to the VPN peer.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| device | VPN network interface |
+| public_key | Public key of a peer |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| wireguard.peer_network_io | receive, transmit | B/s |
+| wireguard.peer_latest_handshake_ago | time | seconds |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/wireguard.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/wireguard.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+
+</details>
+
+#### Examples
+There are no configuration examples.
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `wireguard` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m wireguard
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `wireguard` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep wireguard
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep wireguard /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep wireguard
+```
+
+
diff --git a/src/go/plugin/go.d/modules/wireguard/metadata.yaml b/src/go/plugin/go.d/modules/wireguard/metadata.yaml
new file mode 100644
index 000000000..0ac680d58
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/metadata.yaml
@@ -0,0 +1,121 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-wireguard
+ plugin_name: go.d.plugin
+ module_name: wireguard
+ monitored_instance:
+ name: WireGuard
+ link: https://www.wireguard.com/
+ categories:
+ - data-collection.vpns
+ icon_filename: wireguard.svg
+ keywords:
+ - wireguard
+ - vpn
+ - security
+ most_popular: false
+ info_provided_to_referring_integrations:
+ description: ""
+ related_resources:
+ integrations:
+ list: []
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector monitors WireGuard VPN devices and peers traffic.
+ method_description: |
+ It connects to the local WireGuard instance using [wireguard-go client](https://github.com/WireGuard/wireguard-go).
+ default_behavior:
+ auto_detection:
+ description: |
+ It automatically detects instances running on localhost.
+ limits:
+ description: |
+ Doesn't work if Netdata or WireGuard is installed in the container.
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: |
+ This collector requires the CAP_NET_ADMIN capability, but it is set automatically during installation, so no manual configuration is needed.
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/wireguard.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: device
+ description: These metrics refer to the VPN network interface.
+ labels:
+ - name: device
+ description: VPN network interface
+ metrics:
+ - name: wireguard.device_network_io
+ description: Device traffic
+ unit: B/s
+ chart_type: area
+ dimensions:
+ - name: receive
+ - name: transmit
+ - name: wireguard.device_peers
+ description: Device peers
+ unit: peers
+ chart_type: line
+ dimensions:
+ - name: peers
+ - name: peer
+ description: These metrics refer to the VPN peer.
+ labels:
+ - name: device
+ description: VPN network interface
+ - name: public_key
+ description: Public key of a peer
+ metrics:
+ - name: wireguard.peer_network_io
+ description: Peer traffic
+ unit: B/s
+ chart_type: area
+ dimensions:
+ - name: receive
+ - name: transmit
+ - name: wireguard.peer_latest_handshake_ago
+ description: Peer time elapsed since the latest handshake
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
diff --git a/src/go/plugin/go.d/modules/wireguard/testdata/config.json b/src/go/plugin/go.d/modules/wireguard/testdata/config.json
new file mode 100644
index 000000000..0e3f7c403
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/testdata/config.json
@@ -0,0 +1,3 @@
+{
+ "update_every": 123
+}
diff --git a/src/go/plugin/go.d/modules/wireguard/testdata/config.yaml b/src/go/plugin/go.d/modules/wireguard/testdata/config.yaml
new file mode 100644
index 000000000..f21a3a7a0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/testdata/config.yaml
@@ -0,0 +1 @@
+update_every: 123
diff --git a/src/go/plugin/go.d/modules/wireguard/wireguard.go b/src/go/plugin/go.d/modules/wireguard/wireguard.go
new file mode 100644
index 000000000..fdd42e193
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/wireguard.go
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package wireguard
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "golang.zx2c4.com/wireguard/wgctrl"
+ "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("wireguard", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *WireGuard {
+ return &WireGuard{
+ newWGClient: func() (wgClient, error) { return wgctrl.New() },
+ charts: &module.Charts{},
+ devices: make(map[string]bool),
+ peers: make(map[string]bool),
+ cleanupEvery: time.Minute,
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+}
+
+type (
+ WireGuard struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ client wgClient
+ newWGClient func() (wgClient, error)
+
+ cleanupLastTime time.Time
+ cleanupEvery time.Duration
+ devices map[string]bool
+ peers map[string]bool
+ }
+ wgClient interface {
+ Devices() ([]*wgtypes.Device, error)
+ Close() error
+ }
+)
+
+func (w *WireGuard) Configuration() any {
+ return w.Config
+}
+
+func (w *WireGuard) Init() error {
+ return nil
+}
+
+func (w *WireGuard) Check() error {
+ mx, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (w *WireGuard) Charts() *module.Charts {
+ return w.charts
+}
+
+func (w *WireGuard) Collect() map[string]int64 {
+ mx, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (w *WireGuard) Cleanup() {
+ if w.client == nil {
+ return
+ }
+ if err := w.client.Close(); err != nil {
+ w.Warningf("cleanup: error on closing connection: %v", err)
+ }
+ w.client = nil
+}
diff --git a/src/go/plugin/go.d/modules/wireguard/wireguard_test.go b/src/go/plugin/go.d/modules/wireguard/wireguard_test.go
new file mode 100644
index 000000000..c9d27cbd0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/wireguard/wireguard_test.go
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package wireguard
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestWireGuard_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &WireGuard{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestWireGuard_Init(t *testing.T) {
+ assert.NoError(t, New().Init())
+}
+
+func TestWireGuard_Charts(t *testing.T) {
+ assert.Len(t, *New().Charts(), 0)
+
+}
+
+func TestWireGuard_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(w *WireGuard)
+ wantClose bool
+ }{
+ "after New": {
+ wantClose: false,
+ prepare: func(w *WireGuard) {},
+ },
+ "after Init": {
+ wantClose: false,
+ prepare: func(w *WireGuard) { _ = w.Init() },
+ },
+ "after Check": {
+ wantClose: true,
+ prepare: func(w *WireGuard) { _ = w.Init(); _ = w.Check() },
+ },
+ "after Collect": {
+ wantClose: true,
+ prepare: func(w *WireGuard) { _ = w.Init(); _ = w.Collect() },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ w := New()
+ m := &mockClient{}
+ w.newWGClient = func() (wgClient, error) { return m, nil }
+
+ test.prepare(w)
+
+ require.NotPanics(t, w.Cleanup)
+
+ if test.wantClose {
+ assert.True(t, m.closeCalled)
+ } else {
+ assert.False(t, m.closeCalled)
+ }
+ })
+ }
+}
+
+func TestWireGuard_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(w *WireGuard)
+ }{
+ "success when devices and peers found": {
+ wantFail: false,
+ prepare: func(w *WireGuard) {
+ m := &mockClient{}
+ d1 := prepareDevice(1)
+ d1.Peers = append(d1.Peers, preparePeer("11"))
+ d1.Peers = append(d1.Peers, preparePeer("12"))
+ m.devices = append(m.devices, d1)
+ w.client = m
+ },
+ },
+ "success when devices and no peers found": {
+ wantFail: false,
+ prepare: func(w *WireGuard) {
+ m := &mockClient{}
+ m.devices = append(m.devices, prepareDevice(1))
+ w.client = m
+ },
+ },
+ "fail when no devices and no peers found": {
+ wantFail: true,
+ prepare: func(w *WireGuard) {
+ w.client = &mockClient{}
+ },
+ },
+ "fail when error on retrieving devices": {
+ wantFail: true,
+ prepare: func(w *WireGuard) {
+ w.client = &mockClient{errOnDevices: true}
+ },
+ },
+ "fail when error on creating client": {
+ wantFail: true,
+ prepare: func(w *WireGuard) {
+ w.newWGClient = func() (wgClient, error) { return nil, errors.New("mock.newWGClient() error") }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ w := New()
+ require.NoError(t, w.Init())
+ test.prepare(w)
+
+ if test.wantFail {
+ assert.Error(t, w.Check())
+ } else {
+ assert.NoError(t, w.Check())
+ }
+ })
+ }
+}
+
+func TestWireGuard_Collect(t *testing.T) {
+ type testCaseStep struct {
+ prepareMock func(m *mockClient)
+ check func(t *testing.T, w *WireGuard)
+ }
+ tests := map[string][]testCaseStep{
+ "several devices no peers": {
+ {
+ prepareMock: func(m *mockClient) {
+ m.devices = append(m.devices, prepareDevice(1))
+ m.devices = append(m.devices, prepareDevice(2))
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ mx := w.Collect()
+
+ expected := map[string]int64{
+ "device_wg1_peers": 0,
+ "device_wg1_receive": 0,
+ "device_wg1_transmit": 0,
+ "device_wg2_peers": 0,
+ "device_wg2_receive": 0,
+ "device_wg2_transmit": 0,
+ }
+
+ copyLatestHandshake(mx, expected)
+ assert.Equal(t, expected, mx)
+ assert.Equal(t, len(deviceChartsTmpl)*2, len(*w.Charts()))
+ },
+ },
+ },
+ "several devices several peers each": {
+ {
+ prepareMock: func(m *mockClient) {
+ d1 := prepareDevice(1)
+ d1.Peers = append(d1.Peers, preparePeer("11"))
+ d1.Peers = append(d1.Peers, preparePeer("12"))
+ m.devices = append(m.devices, d1)
+
+ d2 := prepareDevice(2)
+ d2.Peers = append(d2.Peers, preparePeer("21"))
+ d2.Peers = append(d2.Peers, preparePeer("22"))
+ m.devices = append(m.devices, d2)
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ mx := w.Collect()
+
+ expected := map[string]int64{
+ "device_wg1_peers": 2,
+ "device_wg1_receive": 0,
+ "device_wg1_transmit": 0,
+ "device_wg2_peers": 2,
+ "device_wg2_receive": 0,
+ "device_wg2_transmit": 0,
+ "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60,
+ "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0,
+ "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0,
+ "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60,
+ "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0,
+ "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0,
+ "peer_wg2_cGVlcjIxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60,
+ "peer_wg2_cGVlcjIxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0,
+ "peer_wg2_cGVlcjIxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0,
+ "peer_wg2_cGVlcjIyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60,
+ "peer_wg2_cGVlcjIyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0,
+ "peer_wg2_cGVlcjIyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0,
+ }
+
+ copyLatestHandshake(mx, expected)
+ assert.Equal(t, expected, mx)
+ assert.Equal(t, len(deviceChartsTmpl)*2+len(peerChartsTmpl)*4, len(*w.Charts()))
+ },
+ },
+ },
+ "peers without last handshake time": {
+ {
+ prepareMock: func(m *mockClient) {
+ d1 := prepareDevice(1)
+ d1.Peers = append(d1.Peers, preparePeer("11"))
+ d1.Peers = append(d1.Peers, preparePeer("12"))
+ d1.Peers = append(d1.Peers, prepareNoLastHandshakePeer("13"))
+ d1.Peers = append(d1.Peers, prepareNoLastHandshakePeer("14"))
+ m.devices = append(m.devices, d1)
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ mx := w.Collect()
+
+ expected := map[string]int64{
+ "device_wg1_peers": 4,
+ "device_wg1_receive": 0,
+ "device_wg1_transmit": 0,
+ "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60,
+ "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0,
+ "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0,
+ "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60,
+ "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0,
+ "peer_wg1_cGVlcjEyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0,
+ }
+
+ copyLatestHandshake(mx, expected)
+ assert.Equal(t, expected, mx)
+ assert.Equal(t, len(deviceChartsTmpl)+len(peerChartsTmpl)*2, len(*w.Charts()))
+ },
+ },
+ },
+ "device added at runtime": {
+ {
+ prepareMock: func(m *mockClient) {
+ m.devices = append(m.devices, prepareDevice(1))
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ _ = w.Collect()
+ assert.Equal(t, len(deviceChartsTmpl)*1, len(*w.Charts()))
+ },
+ },
+ {
+ prepareMock: func(m *mockClient) {
+ m.devices = append(m.devices, prepareDevice(2))
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ mx := w.Collect()
+
+ expected := map[string]int64{
+ "device_wg1_peers": 0,
+ "device_wg1_receive": 0,
+ "device_wg1_transmit": 0,
+ "device_wg2_peers": 0,
+ "device_wg2_receive": 0,
+ "device_wg2_transmit": 0,
+ }
+ copyLatestHandshake(mx, expected)
+ assert.Equal(t, expected, mx)
+ assert.Equal(t, len(deviceChartsTmpl)*2, len(*w.Charts()))
+
+ },
+ },
+ },
+ "device removed at run time, no cleanup occurred": {
+ {
+ prepareMock: func(m *mockClient) {
+ m.devices = append(m.devices, prepareDevice(1))
+ m.devices = append(m.devices, prepareDevice(2))
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ _ = w.Collect()
+ },
+ },
+ {
+ prepareMock: func(m *mockClient) {
+ m.devices = m.devices[:len(m.devices)-1]
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ _ = w.Collect()
+ assert.Equal(t, len(deviceChartsTmpl)*2, len(*w.Charts()))
+ assert.Equal(t, 0, calcObsoleteCharts(w.Charts()))
+ },
+ },
+ },
+ "device removed at run time, cleanup occurred": {
+ {
+ prepareMock: func(m *mockClient) {
+ m.devices = append(m.devices, prepareDevice(1))
+ m.devices = append(m.devices, prepareDevice(2))
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ _ = w.Collect()
+ },
+ },
+ {
+ prepareMock: func(m *mockClient) {
+ m.devices = m.devices[:len(m.devices)-1]
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ w.cleanupEvery = time.Second
+ time.Sleep(time.Second)
+ _ = w.Collect()
+ assert.Equal(t, len(deviceChartsTmpl)*2, len(*w.Charts()))
+ assert.Equal(t, len(deviceChartsTmpl)*1, calcObsoleteCharts(w.Charts()))
+ },
+ },
+ },
+ "peer added at runtime": {
+ {
+ prepareMock: func(m *mockClient) {
+ m.devices = append(m.devices, prepareDevice(1))
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ _ = w.Collect()
+ assert.Equal(t, len(deviceChartsTmpl)*1, len(*w.Charts()))
+ },
+ },
+ {
+ prepareMock: func(m *mockClient) {
+ d1 := m.devices[0]
+ d1.Peers = append(d1.Peers, preparePeer("11"))
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ mx := w.Collect()
+
+ expected := map[string]int64{
+ "device_wg1_peers": 1,
+ "device_wg1_receive": 0,
+ "device_wg1_transmit": 0,
+ "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_latest_handshake_ago": 60,
+ "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_receive": 0,
+ "peer_wg1_cGVlcjExAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=_transmit": 0,
+ }
+ copyLatestHandshake(mx, expected)
+ assert.Equal(t, expected, mx)
+ assert.Equal(t, len(deviceChartsTmpl)*1+len(peerChartsTmpl)*1, len(*w.Charts()))
+ },
+ },
+ },
+ "peer removed at run time, no cleanup occurred": {
+ {
+ prepareMock: func(m *mockClient) {
+ d1 := prepareDevice(1)
+ d1.Peers = append(d1.Peers, preparePeer("11"))
+ d1.Peers = append(d1.Peers, preparePeer("12"))
+ m.devices = append(m.devices, d1)
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ _ = w.Collect()
+ },
+ },
+ {
+ prepareMock: func(m *mockClient) {
+ d1 := m.devices[0]
+ d1.Peers = d1.Peers[:len(d1.Peers)-1]
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ _ = w.Collect()
+ assert.Equal(t, len(deviceChartsTmpl)*1+len(peerChartsTmpl)*2, len(*w.Charts()))
+ assert.Equal(t, 0, calcObsoleteCharts(w.Charts()))
+ },
+ },
+ },
+ "peer removed at run time, cleanup occurred": {
+ {
+ prepareMock: func(m *mockClient) {
+ d1 := prepareDevice(1)
+ d1.Peers = append(d1.Peers, preparePeer("11"))
+ d1.Peers = append(d1.Peers, preparePeer("12"))
+ m.devices = append(m.devices, d1)
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ _ = w.Collect()
+ },
+ },
+ {
+ prepareMock: func(m *mockClient) {
+ d1 := m.devices[0]
+ d1.Peers = d1.Peers[:len(d1.Peers)-1]
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ w.cleanupEvery = time.Second
+ time.Sleep(time.Second)
+ _ = w.Collect()
+ assert.Equal(t, len(deviceChartsTmpl)*1+len(peerChartsTmpl)*2, len(*w.Charts()))
+ assert.Equal(t, len(peerChartsTmpl)*1, calcObsoleteCharts(w.Charts()))
+ },
+ },
+ },
+ "fails if no devices found": {
+ {
+ prepareMock: func(m *mockClient) {},
+ check: func(t *testing.T, w *WireGuard) {
+ assert.Equal(t, map[string]int64(nil), w.Collect())
+ },
+ },
+ },
+ "fails if error on getting devices list": {
+ {
+ prepareMock: func(m *mockClient) {
+ m.errOnDevices = true
+ },
+ check: func(t *testing.T, w *WireGuard) {
+ assert.Equal(t, map[string]int64(nil), w.Collect())
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ w := New()
+ require.NoError(t, w.Init())
+ m := &mockClient{}
+ w.client = m
+
+ for i, step := range test {
+ t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
+ step.prepareMock(m)
+ step.check(t, w)
+ })
+ }
+ })
+ }
+}
+
+type mockClient struct {
+ devices []*wgtypes.Device
+ errOnDevices bool
+ closeCalled bool
+}
+
+func (m *mockClient) Devices() ([]*wgtypes.Device, error) {
+ if m.errOnDevices {
+ return nil, errors.New("mock.Devices() error")
+ }
+ return m.devices, nil
+}
+
+func (m *mockClient) Close() error {
+ m.closeCalled = true
+ return nil
+}
+
+func prepareDevice(num uint8) *wgtypes.Device {
+ return &wgtypes.Device{
+ Name: fmt.Sprintf("wg%d", num),
+ }
+}
+
+func preparePeer(s string) wgtypes.Peer {
+ b := make([]byte, 32)
+ b = append(b[:0], fmt.Sprintf("peer%s", s)...)
+ k, _ := wgtypes.NewKey(b[:32])
+
+ return wgtypes.Peer{
+ PublicKey: k,
+ LastHandshakeTime: time.Now().Add(-time.Minute),
+ ReceiveBytes: 0,
+ TransmitBytes: 0,
+ }
+}
+
+func prepareNoLastHandshakePeer(s string) wgtypes.Peer {
+ p := preparePeer(s)
+ var lh time.Time
+ p.LastHandshakeTime = lh
+ return p
+}
+
+func copyLatestHandshake(dst, src map[string]int64) {
+ for k, v := range src {
+ if strings.HasSuffix(k, "latest_handshake_ago") {
+ if _, ok := dst[k]; ok {
+ dst[k] = v
+ }
+ }
+ }
+}
+
+func calcObsoleteCharts(charts *module.Charts) int {
+ var num int
+ for _, c := range *charts {
+ if c.Obsolete {
+ num++
+ }
+ }
+ return num
+}
diff --git a/src/go/plugin/go.d/modules/x509check/README.md b/src/go/plugin/go.d/modules/x509check/README.md
new file mode 120000
index 000000000..28978ccf7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/README.md
@@ -0,0 +1 @@
+integrations/x.509_certificate.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/x509check/charts.go b/src/go/plugin/go.d/modules/x509check/charts.go
new file mode 100644
index 000000000..5105c6d17
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/charts.go
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package x509check
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+var (
+ baseCharts = module.Charts{
+ timeUntilExpirationChart.Copy(),
+ }
+ withRevocationCharts = module.Charts{
+ timeUntilExpirationChart.Copy(),
+ revocationStatusChart.Copy(),
+ }
+
+ timeUntilExpirationChart = module.Chart{
+ ID: "time_until_expiration",
+ Title: "Time Until Certificate Expiration",
+ Units: "seconds",
+ Fam: "expiration time",
+ Ctx: "x509check.time_until_expiration",
+ Opts: module.Opts{StoreFirst: true},
+ Dims: module.Dims{
+ {ID: "expiry"},
+ },
+ Vars: module.Vars{
+ {ID: "days_until_expiration_warning"},
+ {ID: "days_until_expiration_critical"},
+ },
+ }
+ revocationStatusChart = module.Chart{
+ ID: "revocation_status",
+ Title: "Revocation Status",
+ Units: "boolean",
+ Fam: "revocation",
+ Ctx: "x509check.revocation_status",
+ Opts: module.Opts{StoreFirst: true},
+ Dims: module.Dims{
+ {ID: "not_revoked"},
+ {ID: "revoked"},
+ },
+ }
+)
diff --git a/src/go/plugin/go.d/modules/x509check/collect.go b/src/go/plugin/go.d/modules/x509check/collect.go
new file mode 100644
index 000000000..fc98e3a26
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/collect.go
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package x509check
+
+import (
+ "crypto/x509"
+ "fmt"
+ "time"
+
+ "github.com/cloudflare/cfssl/revoke"
+)
+
+func (x *X509Check) collect() (map[string]int64, error) {
+ certs, err := x.prov.certificates()
+ if err != nil {
+ return nil, err
+ }
+
+ if len(certs) == 0 {
+ return nil, fmt.Errorf("no certificate was provided by '%s'", x.Config.Source)
+ }
+
+ mx := make(map[string]int64)
+
+ x.collectExpiration(mx, certs)
+ if x.CheckRevocation {
+ x.collectRevocation(mx, certs)
+ }
+
+ return mx, nil
+}
+
+func (x *X509Check) collectExpiration(mx map[string]int64, certs []*x509.Certificate) {
+ expiry := time.Until(certs[0].NotAfter).Seconds()
+ mx["expiry"] = int64(expiry)
+ mx["days_until_expiration_warning"] = x.DaysUntilWarn
+ mx["days_until_expiration_critical"] = x.DaysUntilCritical
+
+}
+
+func (x *X509Check) collectRevocation(mx map[string]int64, certs []*x509.Certificate) {
+ rev, ok, err := revoke.VerifyCertificateError(certs[0])
+ if err != nil {
+ x.Debug(err)
+ }
+ if !ok {
+ return
+ }
+
+ mx["revoked"] = 0
+ mx["not_revoked"] = 0
+
+ if rev {
+ mx["revoked"] = 1
+ } else {
+ mx["not_revoked"] = 1
+ }
+}
diff --git a/src/go/plugin/go.d/modules/x509check/config_schema.json b/src/go/plugin/go.d/modules/x509check/config_schema.json
new file mode 100644
index 000000000..7246cfa7a
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/config_schema.json
@@ -0,0 +1,114 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "X509Check collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "source": {
+ "title": "Certificate source",
+ "description": "The source of the certificate. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file, smtp.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the certificate retrieval.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "check_revocation_status": {
+ "title": "Revocation status check",
+ "description": "Whether to check the revocation status of the certificate.",
+ "type": "boolean"
+ },
+ "days_until_expiration_warning": {
+ "title": "Days until warning",
+ "description": "Number of days before the alarm status is set to warning.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 14
+ },
+ "days_until_expiration_critical": {
+ "title": "Days until critical",
+ "description": "Number of days before the alarm status is set to critical.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 7
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ }
+ },
+ "required": [
+ "source"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "source": {
+ "ui:placeholder": "https://example.com:443",
+ "ui:help": " Website: `https://domainName:443`. Local file: `file:///path/to/cert.pem`. SMTP: `smtp://smtp.example.com:587`."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "source",
+ "timeout",
+ "check_revocation_status",
+ "days_until_expiration_warning",
+ "days_until_expiration_critical"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/x509check/init.go b/src/go/plugin/go.d/modules/x509check/init.go
new file mode 100644
index 000000000..8d6dece2f
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/init.go
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package x509check
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+func (x *X509Check) validateConfig() error {
+ if x.Source == "" {
+ return errors.New("source is not set")
+ }
+ return nil
+}
+
+func (x *X509Check) initProvider() (provider, error) {
+ return newProvider(x.Config)
+}
+
+func (x *X509Check) initCharts() *module.Charts {
+ var charts *module.Charts
+ if x.CheckRevocation {
+ charts = withRevocationCharts.Copy()
+ } else {
+ charts = baseCharts.Copy()
+ }
+
+ for _, chart := range *charts {
+ chart.Labels = []module.Label{
+ {Key: "source", Value: x.Source},
+ }
+ }
+
+ return charts
+
+}
diff --git a/src/go/plugin/go.d/modules/x509check/integrations/x.509_certificate.md b/src/go/plugin/go.d/modules/x509check/integrations/x.509_certificate.md
new file mode 100644
index 000000000..ccbe12948
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/integrations/x.509_certificate.md
@@ -0,0 +1,260 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/x509check/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/x509check/metadata.yaml"
+sidebar_label: "X.509 certificate"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Synthetic Checks"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# X.509 certificate
+
+
+<img src="https://netdata.cloud/img/lock.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: x509check
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+
+
+This collectors monitors x509 certificates expiration time and revocation status.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per source
+
+These metrics refer to the configured source.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| source | Configured source. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| x509check.time_until_expiration | expiry | seconds |
+| x509check.revocation_status | not_revoked, revoked | boolean |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ x509check_days_until_expiration ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.time_until_expiration | Time until x509 certificate expires for ${label:source} |
+| [ x509check_revocation_status ](https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf) | x509check.revocation_status | x509 certificate revocation status for ${label:source} |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/x509check.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/x509check.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| source | Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file, smtp. | | no |
+| days_until_expiration_warning | Number of days before the alarm status is warning. | 30 | no |
+| days_until_expiration_critical | Number of days before the alarm status is critical. | 15 | no |
+| check_revocation_status | Whether to check the revocation status of the certificate. | no | no |
+| timeout | SSL connection timeout. | 2 | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Website certificate
+
+Website certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: my_site_cert
+ source: https://my_site.org:443
+
+```
+</details>
+
+##### Local file certificate
+
+Local file certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: my_file_cert
+ source: file:///home/me/cert.pem
+
+```
+</details>
+
+##### SMTP certificate
+
+SMTP certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: my_smtp_cert
+ source: smtp://smtp.my_mail.org:587
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define more than one job, their names must be unique.
+
+Check the expiration status of the multiple websites' certificates.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: my_site_cert1
+ source: https://my_site1.org:443
+
+ - name: my_site_cert2
+ source: https://my_site1.org:443
+
+ - name: my_site_cert3
+ source: https://my_site3.org:443
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `x509check` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m x509check
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `x509check` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep x509check
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep x509check /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep x509check
+```
+
+
diff --git a/src/go/plugin/go.d/modules/x509check/metadata.yaml b/src/go/plugin/go.d/modules/x509check/metadata.yaml
new file mode 100644
index 000000000..e373f33d7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/metadata.yaml
@@ -0,0 +1,172 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-x509check
+ plugin_name: go.d.plugin
+ module_name: x509check
+ monitored_instance:
+ name: X.509 certificate
+ link: ""
+ categories:
+ - data-collection.synthetic-checks
+ icon_filename: lock.svg
+ keywords:
+ - x509
+ - certificate
+ most_popular: false
+ info_provided_to_referring_integrations:
+ description: ""
+ related_resources:
+ integrations:
+ list: []
+ overview:
+ data_collection:
+ metrics_description: ""
+ method_description: |
+ This collectors monitors x509 certificates expiration time and revocation status.
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: "go.d/x509check.conf"
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: source
+ description: "Certificate source. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file, smtp."
+ default_value: ""
+ required: false
+ - name: days_until_expiration_warning
+ description: Number of days before the alarm status is warning.
+ default_value: 30
+ required: false
+ - name: days_until_expiration_critical
+ description: Number of days before the alarm status is critical.
+ default_value: 15
+ required: false
+ - name: check_revocation_status
+ description: Whether to check the revocation status of the certificate.
+ default_value: false
+ required: false
+ - name: timeout
+ description: SSL connection timeout.
+ default_value: 2
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Website certificate
+ description: Website certificate.
+ config: |
+ jobs:
+ - name: my_site_cert
+ source: https://my_site.org:443
+ - name: Local file certificate
+ description: Local file certificate.
+ config: |
+ jobs:
+ - name: my_file_cert
+ source: file:///home/me/cert.pem
+ - name: SMTP certificate
+ description: SMTP certificate.
+ config: |
+ jobs:
+ - name: my_smtp_cert
+ source: smtp://smtp.my_mail.org:587
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define more than one job, their names must be unique.
+
+ Check the expiration status of the multiple websites' certificates.
+ config: |
+ jobs:
+ - name: my_site_cert1
+ source: https://my_site1.org:443
+
+ - name: my_site_cert2
+ source: https://my_site1.org:443
+
+ - name: my_site_cert3
+ source: https://my_site3.org:443
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: x509check_days_until_expiration
+ metric: x509check.time_until_expiration
+ info: "Time until x509 certificate expires for ${label:source}"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf
+ - name: x509check_revocation_status
+ metric: x509check.revocation_status
+ info: "x509 certificate revocation status for ${label:source}"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/x509check.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: source
+ description: These metrics refer to the configured source.
+ labels:
+ - name: source
+ description: Configured source.
+ metrics:
+ - name: x509check.time_until_expiration
+ description: Time Until Certificate Expiration
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: expiry
+ - name: x509check.revocation_status
+ description: Revocation Status
+ unit: boolean
+ chart_type: line
+ dimensions:
+ - name: not_revoked
+ - name: revoked
diff --git a/src/go/plugin/go.d/modules/x509check/provider.go b/src/go/plugin/go.d/modules/x509check/provider.go
new file mode 100644
index 000000000..4a0635704
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/provider.go
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package x509check
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "encoding/pem"
+ "fmt"
+ "net"
+ "net/smtp"
+ "net/url"
+ "os"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+)
+
+type provider interface {
+ certificates() ([]*x509.Certificate, error)
+}
+
+type fromFile struct {
+ path string
+}
+
+type fromNet struct {
+ url *url.URL
+ tlsConfig *tls.Config
+ timeout time.Duration
+}
+
+type fromSMTP struct {
+ url *url.URL
+ tlsConfig *tls.Config
+ timeout time.Duration
+}
+
+func newProvider(config Config) (provider, error) {
+ sourceURL, err := url.Parse(config.Source)
+ if err != nil {
+ return nil, fmt.Errorf("source parse: %v", err)
+ }
+
+ tlsCfg, err := tlscfg.NewTLSConfig(config.TLSConfig)
+ if err != nil {
+ return nil, fmt.Errorf("create tls config: %v", err)
+ }
+
+ if tlsCfg == nil {
+ tlsCfg = &tls.Config{}
+ }
+ tlsCfg.ServerName = sourceURL.Hostname()
+
+ switch sourceURL.Scheme {
+ case "file":
+ return &fromFile{path: sourceURL.Path}, nil
+ case "https", "udp", "udp4", "udp6", "tcp", "tcp4", "tcp6":
+ if sourceURL.Scheme == "https" {
+ sourceURL.Scheme = "tcp"
+ }
+ return &fromNet{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration()}, nil
+ case "smtp":
+ sourceURL.Scheme = "tcp"
+ return &fromSMTP{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration()}, nil
+ default:
+ return nil, fmt.Errorf("unsupported scheme '%s'", sourceURL)
+ }
+}
+
+func (f fromFile) certificates() ([]*x509.Certificate, error) {
+ content, err := os.ReadFile(f.path)
+ if err != nil {
+ return nil, fmt.Errorf("error on reading '%s': %v", f.path, err)
+ }
+
+ block, _ := pem.Decode(content)
+ if block == nil {
+ return nil, fmt.Errorf("error on decoding '%s': %v", f.path, err)
+ }
+
+ cert, err := x509.ParseCertificate(block.Bytes)
+ if err != nil {
+ return nil, fmt.Errorf("error on parsing certificate '%s': %v", f.path, err)
+ }
+
+ return []*x509.Certificate{cert}, nil
+}
+
+func (f fromNet) certificates() ([]*x509.Certificate, error) {
+ ipConn, err := net.DialTimeout(f.url.Scheme, f.url.Host, f.timeout)
+ if err != nil {
+ return nil, fmt.Errorf("error on dial to '%s': %v", f.url, err)
+ }
+ defer func() { _ = ipConn.Close() }()
+
+ conn := tls.Client(ipConn, f.tlsConfig.Clone())
+ defer func() { _ = conn.Close() }()
+ if err := conn.Handshake(); err != nil {
+ return nil, fmt.Errorf("error on SSL handshake with '%s': %v", f.url, err)
+ }
+
+ certs := conn.ConnectionState().PeerCertificates
+ return certs, nil
+}
+
+func (f fromSMTP) certificates() ([]*x509.Certificate, error) {
+ ipConn, err := net.DialTimeout(f.url.Scheme, f.url.Host, f.timeout)
+ if err != nil {
+ return nil, fmt.Errorf("error on dial to '%s': %v", f.url, err)
+ }
+ defer func() { _ = ipConn.Close() }()
+
+ host, _, _ := net.SplitHostPort(f.url.Host)
+ smtpClient, err := smtp.NewClient(ipConn, host)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating SMTP client: %v", err)
+ }
+ defer func() { _ = smtpClient.Quit() }()
+
+ err = smtpClient.StartTLS(f.tlsConfig.Clone())
+ if err != nil {
+ return nil, fmt.Errorf("error on startTLS with '%s': %v", f.url, err)
+ }
+
+ conn, ok := smtpClient.TLSConnectionState()
+ if !ok {
+ return nil, fmt.Errorf("startTLS didn't succeed")
+ }
+ return conn.PeerCertificates, nil
+}
diff --git a/src/go/plugin/go.d/modules/x509check/testdata/config.json b/src/go/plugin/go.d/modules/x509check/testdata/config.json
new file mode 100644
index 000000000..9bb2dade4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/testdata/config.json
@@ -0,0 +1,12 @@
+{
+ "update_every": 123,
+ "source": "ok",
+ "timeout": 123.123,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "days_until_expiration_warning": 123,
+ "days_until_expiration_critical": 123,
+ "check_revocation_status": true
+}
diff --git a/src/go/plugin/go.d/modules/x509check/testdata/config.yaml b/src/go/plugin/go.d/modules/x509check/testdata/config.yaml
new file mode 100644
index 000000000..e1f273f56
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/testdata/config.yaml
@@ -0,0 +1,10 @@
+update_every: 123
+source: "ok"
+timeout: 123.123
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+days_until_expiration_warning: 123
+days_until_expiration_critical: 123
+check_revocation_status: yes
diff --git a/src/go/plugin/go.d/modules/x509check/x509check.go b/src/go/plugin/go.d/modules/x509check/x509check.go
new file mode 100644
index 000000000..c4fa70eac
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/x509check.go
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package x509check
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ cfssllog "github.com/cloudflare/cfssl/log"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ cfssllog.Level = cfssllog.LevelFatal
+ module.Register("x509check", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 60,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *X509Check {
+ return &X509Check{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ DaysUntilWarn: 14,
+ DaysUntilCritical: 7,
+ },
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Source string `yaml:"source" json:"source"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ DaysUntilWarn int64 `yaml:"days_until_expiration_warning,omitempty" json:"days_until_expiration_warning"`
+ DaysUntilCritical int64 `yaml:"days_until_expiration_critical,omitempty" json:"days_until_expiration_critical"`
+ CheckRevocation bool `yaml:"check_revocation_status" json:"check_revocation_status"`
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+}
+
+type X509Check struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prov provider
+}
+
+func (x *X509Check) Configuration() any {
+ return x.Config
+}
+
+func (x *X509Check) Init() error {
+ if err := x.validateConfig(); err != nil {
+ x.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prov, err := x.initProvider()
+ if err != nil {
+ x.Errorf("certificate provider init: %v", err)
+ return err
+ }
+ x.prov = prov
+
+ x.charts = x.initCharts()
+
+ return nil
+}
+
+func (x *X509Check) Check() error {
+ mx, err := x.collect()
+ if err != nil {
+ x.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (x *X509Check) Charts() *module.Charts {
+ return x.charts
+}
+
+func (x *X509Check) Collect() map[string]int64 {
+ mx, err := x.collect()
+ if err != nil {
+ x.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (x *X509Check) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/x509check/x509check_test.go b/src/go/plugin/go.d/modules/x509check/x509check_test.go
new file mode 100644
index 000000000..e0b287251
--- /dev/null
+++ b/src/go/plugin/go.d/modules/x509check/x509check_test.go
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package x509check
+
+import (
+ "crypto/x509"
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestX509Check_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &X509Check{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestX509Check_Cleanup(t *testing.T) {
+ assert.NotPanics(t, New().Cleanup)
+}
+
+func TestX509Check_Charts(t *testing.T) {
+ x509Check := New()
+ x509Check.Source = "https://example.com"
+ require.NoError(t, x509Check.Init())
+ assert.NotNil(t, x509Check.Charts())
+}
+
+func TestX509Check_Init(t *testing.T) {
+ const (
+ file = iota
+ net
+ smtp
+ )
+ tests := map[string]struct {
+ config Config
+ providerType int
+ err bool
+ }{
+ "ok from net https": {
+ config: Config{Source: "https://example.org"},
+ providerType: net,
+ },
+ "ok from net tcp": {
+ config: Config{Source: "tcp://example.org"},
+ providerType: net,
+ },
+ "ok from file": {
+ config: Config{Source: "file:///home/me/cert.pem"},
+ providerType: file,
+ },
+ "ok from smtp": {
+ config: Config{Source: "smtp://smtp.my_mail.org:587"},
+ providerType: smtp,
+ },
+ "empty source": {
+ config: Config{Source: ""},
+ err: true},
+ "unknown provider": {
+ config: Config{Source: "http://example.org"},
+ err: true,
+ },
+ "nonexistent TLSCA": {
+ config: Config{Source: "https://example.org", TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}},
+ err: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ x509Check := New()
+ x509Check.Config = test.config
+
+ if test.err {
+ assert.Error(t, x509Check.Init())
+ } else {
+ require.NoError(t, x509Check.Init())
+
+ var typeOK bool
+ switch test.providerType {
+ case file:
+ _, typeOK = x509Check.prov.(*fromFile)
+ case net:
+ _, typeOK = x509Check.prov.(*fromNet)
+ case smtp:
+ _, typeOK = x509Check.prov.(*fromSMTP)
+ }
+
+ assert.True(t, typeOK)
+ }
+ })
+ }
+}
+
+func TestX509Check_Check(t *testing.T) {
+ x509Check := New()
+ x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}}
+
+ assert.NoError(t, x509Check.Check())
+}
+
+func TestX509Check_Check_ReturnsFalseOnProviderError(t *testing.T) {
+ x509Check := New()
+ x509Check.prov = &mockProvider{err: true}
+
+ assert.Error(t, x509Check.Check())
+}
+
+func TestX509Check_Collect(t *testing.T) {
+ x509Check := New()
+ x509Check.Source = "https://example.com"
+ require.NoError(t, x509Check.Init())
+ x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}}
+
+ collected := x509Check.Collect()
+
+ assert.NotZero(t, collected)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, x509Check, collected)
+}
+
+func TestX509Check_Collect_ReturnsNilOnProviderError(t *testing.T) {
+ x509Check := New()
+ x509Check.prov = &mockProvider{err: true}
+
+ assert.Nil(t, x509Check.Collect())
+}
+
+func TestX509Check_Collect_ReturnsNilOnZeroCertificates(t *testing.T) {
+ x509Check := New()
+ x509Check.prov = &mockProvider{certs: []*x509.Certificate{}}
+ mx := x509Check.Collect()
+
+ assert.Nil(t, mx)
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, x509Check *X509Check, collected map[string]int64) {
+ for _, chart := range *x509Check.Charts() {
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+type mockProvider struct {
+ certs []*x509.Certificate
+ err bool
+}
+
+func (m mockProvider) certificates() ([]*x509.Certificate, error) {
+ if m.err {
+ return nil, errors.New("mock certificates error")
+ }
+ return m.certs, nil
+}
diff --git a/src/go/plugin/go.d/modules/zfspool/README.md b/src/go/plugin/go.d/modules/zfspool/README.md
new file mode 120000
index 000000000..8a292336d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/README.md
@@ -0,0 +1 @@
+integrations/zfs_pools.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/zfspool/charts.go b/src/go/plugin/go.d/modules/zfspool/charts.go
new file mode 100644
index 000000000..92a7d53bd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/charts.go
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zfspool
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+)
+
+const (
+ prioZpoolHealthState = 2820 + iota
+ prioVdevHealthState
+
+ prioZpoolSpaceUtilization
+ prioZpoolSpaceUsage
+
+ prioZpoolFragmentation
+)
+
+var zpoolChartsTmpl = module.Charts{
+ zpoolHealthStateChartTmpl.Copy(),
+
+ zpoolSpaceUtilizationChartTmpl.Copy(),
+ zpoolSpaceUsageChartTmpl.Copy(),
+
+ zpoolFragmentationChartTmpl.Copy(),
+}
+
+var (
+ zpoolHealthStateChartTmpl = module.Chart{
+ ID: "zfspool_%s_health_state",
+ Title: "Zpool health state",
+ Units: "state",
+ Fam: "health",
+ Ctx: "zfspool.pool_health_state",
+ Type: module.Line,
+ Priority: prioZpoolHealthState,
+ Dims: module.Dims{
+ {ID: "zpool_%s_health_state_online", Name: "online"},
+ {ID: "zpool_%s_health_state_degraded", Name: "degraded"},
+ {ID: "zpool_%s_health_state_faulted", Name: "faulted"},
+ {ID: "zpool_%s_health_state_offline", Name: "offline"},
+ {ID: "zpool_%s_health_state_unavail", Name: "unavail"},
+ {ID: "zpool_%s_health_state_removed", Name: "removed"},
+ {ID: "zpool_%s_health_state_suspended", Name: "suspended"},
+ },
+ }
+
+ zpoolSpaceUtilizationChartTmpl = module.Chart{
+ ID: "zfspool_%s_space_utilization",
+ Title: "Zpool space utilization",
+ Units: "percentage",
+ Fam: "space usage",
+ Ctx: "zfspool.pool_space_utilization",
+ Type: module.Area,
+ Priority: prioZpoolSpaceUtilization,
+ Dims: module.Dims{
+ {ID: "zpool_%s_cap", Name: "utilization"},
+ },
+ }
+ zpoolSpaceUsageChartTmpl = module.Chart{
+ ID: "zfspool_%s_space_usage",
+ Title: "Zpool space usage",
+ Units: "bytes",
+ Fam: "space usage",
+ Ctx: "zfspool.pool_space_usage",
+ Type: module.Stacked,
+ Priority: prioZpoolSpaceUsage,
+ Dims: module.Dims{
+ {ID: "zpool_%s_free", Name: "free"},
+ {ID: "zpool_%s_alloc", Name: "used"},
+ },
+ }
+
+ zpoolFragmentationChartTmpl = module.Chart{
+ ID: "zfspool_%s_fragmentation",
+ Title: "Zpool fragmentation",
+ Units: "percentage",
+ Fam: "fragmentation",
+ Ctx: "zfspool.pool_fragmentation",
+ Type: module.Line,
+ Priority: prioZpoolFragmentation,
+ Dims: module.Dims{
+ {ID: "zpool_%s_frag", Name: "fragmentation"},
+ },
+ }
+)
+
+var vdevChartsTmpl = module.Charts{
+ vdevHealthStateChartTmpl.Copy(),
+}
+
+var (
+ vdevHealthStateChartTmpl = module.Chart{
+ ID: "vdev_%s_health_state",
+ Title: "Zpool Vdev health state",
+ Units: "state",
+ Fam: "health",
+ Ctx: "zfspool.vdev_health_state",
+ Type: module.Line,
+ Priority: prioVdevHealthState,
+ Dims: module.Dims{
+ {ID: "vdev_%s_health_state_online", Name: "online"},
+ {ID: "vdev_%s_health_state_degraded", Name: "degraded"},
+ {ID: "vdev_%s_health_state_faulted", Name: "faulted"},
+ {ID: "vdev_%s_health_state_offline", Name: "offline"},
+ {ID: "vdev_%s_health_state_unavail", Name: "unavail"},
+ {ID: "vdev_%s_health_state_removed", Name: "removed"},
+ {ID: "vdev_%s_health_state_suspended", Name: "suspended"},
+ },
+ }
+)
+
+func (z *ZFSPool) addZpoolCharts(name string) {
+ charts := zpoolChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, name)
+ chart.Labels = []module.Label{
+ {Key: "pool", Value: name},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ if err := z.Charts().Add(*charts...); err != nil {
+ z.Warning(err)
+ }
+}
+
+func (z *ZFSPool) removeZpoolCharts(name string) {
+ px := fmt.Sprintf("zfspool_%s_", name)
+ z.removeCharts(px)
+}
+
+func (z *ZFSPool) addVdevCharts(pool, vdev string) {
+ charts := vdevChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, cleanVdev(vdev))
+ chart.Labels = []module.Label{
+ {Key: "pool", Value: pool},
+ {Key: "vdev", Value: vdev},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, vdev)
+ }
+ }
+
+ if err := z.Charts().Add(*charts...); err != nil {
+ z.Warning(err)
+ }
+}
+
+func (z *ZFSPool) removeVdevCharts(vdev string) {
+ px := fmt.Sprintf("vdev_%s_", cleanVdev(vdev))
+ z.removeCharts(px)
+}
+
+func (z *ZFSPool) removeCharts(px string) {
+ for _, chart := range *z.Charts() {
+ if strings.HasPrefix(chart.ID, px) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
+
+func cleanVdev(vdev string) string {
+ r := strings.NewReplacer(".", "_")
+ return r.Replace(vdev)
+}
diff --git a/src/go/plugin/go.d/modules/zfspool/collect.go b/src/go/plugin/go.d/modules/zfspool/collect.go
new file mode 100644
index 000000000..b9b29058b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/collect.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zfspool
+
+var zpoolHealthStates = []string{
+ "online",
+ "degraded",
+ "faulted",
+ "offline",
+ "removed",
+ "unavail",
+ "suspended",
+}
+
+func (z *ZFSPool) collect() (map[string]int64, error) {
+
+ mx := make(map[string]int64)
+
+ if err := z.collectZpoolList(mx); err != nil {
+ return nil, err
+ }
+ if err := z.collectZpoolListVdev(mx); err != nil {
+ return mx, err
+ }
+
+ return mx, nil
+}
diff --git a/src/go/plugin/go.d/modules/zfspool/collect_zpool_list.go b/src/go/plugin/go.d/modules/zfspool/collect_zpool_list.go
new file mode 100644
index 000000000..f5e1c0812
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/collect_zpool_list.go
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zfspool
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type zpoolEntry struct {
+ name string
+ sizeBytes string
+ allocBytes string
+ freeBytes string
+ fragPerc string
+ capPerc string
+ dedupRatio string
+ health string
+}
+
+func (z *ZFSPool) collectZpoolList(mx map[string]int64) error {
+ bs, err := z.exec.list()
+ if err != nil {
+ return err
+ }
+
+ zpools, err := parseZpoolListOutput(bs)
+ if err != nil {
+ return fmt.Errorf("bad zpool list output: %v", err)
+ }
+
+ seen := make(map[string]bool)
+
+ for _, zpool := range zpools {
+ seen[zpool.name] = true
+
+ if !z.seenZpools[zpool.name] {
+ z.addZpoolCharts(zpool.name)
+ z.seenZpools[zpool.name] = true
+ }
+
+ px := "zpool_" + zpool.name + "_"
+
+ if v, ok := parseInt(zpool.sizeBytes); ok {
+ mx[px+"size"] = v
+ }
+ if v, ok := parseInt(zpool.freeBytes); ok {
+ mx[px+"free"] = v
+ }
+ if v, ok := parseInt(zpool.allocBytes); ok {
+ mx[px+"alloc"] = v
+ }
+ if v, ok := parseFloat(zpool.capPerc); ok {
+ mx[px+"cap"] = int64(v)
+ }
+ if v, ok := parseFloat(zpool.fragPerc); ok {
+ mx[px+"frag"] = int64(v)
+ }
+ for _, s := range zpoolHealthStates {
+ mx[px+"health_state_"+s] = 0
+ }
+ mx[px+"health_state_"+zpool.health] = 1
+ }
+
+ for name := range z.seenZpools {
+ if !seen[name] {
+ z.removeZpoolCharts(name)
+ delete(z.seenZpools, name)
+ }
+ }
+
+ return nil
+}
+
+func parseZpoolListOutput(bs []byte) ([]zpoolEntry, error) {
+ /*
+ # zpool list -p
+ NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
+ rpool 21367462298 9051643576 12240656794 - 33 42 1.00 ONLINE -
+ zion - - - - - - - FAULTED -
+ */
+
+ var headers []string
+ var zpools []zpoolEntry
+ sc := bufio.NewScanner(bytes.NewReader(bs))
+
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+ if line == "" {
+ continue
+ }
+
+ if len(headers) == 0 {
+ if !strings.HasPrefix(line, "NAME") {
+ return nil, fmt.Errorf("missing headers (line '%s')", line)
+ }
+ headers = strings.Fields(line)
+ continue
+ }
+
+ values := strings.Fields(line)
+ if len(values) != len(headers) {
+ return nil, fmt.Errorf("unequal columns: headers(%d) != values(%d)", len(headers), len(values))
+ }
+
+ var zpool zpoolEntry
+
+ for i, v := range values {
+ v = strings.TrimSpace(v)
+ switch strings.ToLower(headers[i]) {
+ case "name":
+ zpool.name = v
+ case "size":
+ zpool.sizeBytes = v
+ case "alloc":
+ zpool.allocBytes = v
+ case "free":
+ zpool.freeBytes = v
+ case "frag":
+ zpool.fragPerc = v
+ case "cap":
+ zpool.capPerc = v
+ case "dedup":
+ zpool.dedupRatio = v
+ case "health":
+ zpool.health = strings.ToLower(v)
+ }
+ }
+
+ if zpool.name != "" && zpool.health != "" {
+ zpools = append(zpools, zpool)
+ }
+ }
+
+ if len(zpools) == 0 {
+ return nil, errors.New("no pools found")
+ }
+
+ return zpools, nil
+}
+
+func parseInt(s string) (int64, bool) {
+ if s == "-" {
+ return 0, false
+ }
+ v, err := strconv.ParseInt(s, 10, 64)
+ return v, err == nil
+}
+
+func parseFloat(s string) (float64, bool) {
+ if s == "-" {
+ return 0, false
+ }
+ v, err := strconv.ParseFloat(s, 64)
+ return v, err == nil
+}
diff --git a/src/go/plugin/go.d/modules/zfspool/collect_zpool_list_vdev.go b/src/go/plugin/go.d/modules/zfspool/collect_zpool_list_vdev.go
new file mode 100644
index 000000000..30e1fe4e1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/collect_zpool_list_vdev.go
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zfspool
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+type vdevEntry struct {
+ name string
+ vdev string // The full path of the vdev within the zpool hierarchy.
+ health string
+
+ // Represents the nesting level of the vdev within the zpool hierarchy, based on indentation.
+ // A level of -1 indicates the root vdev (the pool itself).
+ level int
+}
+
+func (z *ZFSPool) collectZpoolListVdev(mx map[string]int64) error {
+ seen := make(map[string]bool)
+
+ for pool := range z.seenZpools {
+ bs, err := z.exec.listWithVdev(pool)
+ if err != nil {
+ return err
+ }
+
+ vdevs, err := parseZpoolListVdevOutput(bs)
+ if err != nil {
+ return fmt.Errorf("bad zpool list vdev output (pool '%s'): %v", pool, err)
+ }
+
+ for _, vdev := range vdevs {
+ if vdev.health == "" || vdev.health == "-" {
+ continue
+ }
+
+ seen[vdev.vdev] = true
+ if !z.seenVdevs[vdev.vdev] {
+ z.seenVdevs[vdev.vdev] = true
+ z.addVdevCharts(pool, vdev.vdev)
+ }
+
+ px := fmt.Sprintf("vdev_%s_", vdev.vdev)
+
+ for _, s := range zpoolHealthStates {
+ mx[px+"health_state_"+s] = 0
+ }
+ mx[px+"health_state_"+vdev.health] = 1
+ }
+ }
+
+ for name := range z.seenVdevs {
+ if !seen[name] {
+ z.removeVdevCharts(name)
+ delete(z.seenVdevs, name)
+ }
+ }
+
+ return nil
+}
+
+func parseZpoolListVdevOutput(bs []byte) ([]vdevEntry, error) {
+ var headers []string
+ var vdevs []vdevEntry
+ sc := bufio.NewScanner(bytes.NewReader(bs))
+
+ for sc.Scan() {
+ line := sc.Text()
+ if line == "" {
+ continue
+ }
+
+ if len(headers) == 0 {
+ if !strings.HasPrefix(line, "NAME") {
+ return nil, fmt.Errorf("missing headers (line '%s')", line)
+ }
+ headers = strings.Fields(line)
+ continue
+ }
+
+ values := strings.Fields(line)
+ if len(values) == 0 || len(values) > len(headers) {
+ return nil, fmt.Errorf("unexpected columns: headers(%d) values(%d) (line '%s')", len(headers), len(values), line)
+ }
+
+ vdev := vdevEntry{
+ level: len(line) - len(strings.TrimLeft(line, " ")),
+ }
+
+ for i, v := range values {
+ switch strings.ToLower(headers[i]) {
+ case "name":
+ vdev.name = v
+ case "health":
+ vdev.health = strings.ToLower(v)
+ }
+ }
+
+ if vdev.name != "" {
+ if len(vdevs) == 0 {
+ vdev.level = -1 // Pool
+ }
+ vdevs = append(vdevs, vdev)
+ }
+ }
+
+ // set parent/child relationships
+ for i := range vdevs {
+ v := &vdevs[i]
+
+ switch i {
+ case 0:
+ v.vdev = v.name
+ default:
+ // find parent with a lower level
+ for j := i - 1; j >= 0; j-- {
+ if vdevs[j].level < v.level {
+ v.vdev = fmt.Sprintf("%s/%s", vdevs[j].vdev, v.name)
+ break
+ }
+ }
+ if v.vdev == "" {
+ return nil, fmt.Errorf("no parent for vdev '%s'", v.name)
+ }
+ }
+ }
+
+ // first is Pool
+ if len(vdevs) < 2 {
+ return nil, fmt.Errorf("no vdevs found")
+ }
+
+ return vdevs[1:], nil
+}
diff --git a/src/go/plugin/go.d/modules/zfspool/config_schema.json b/src/go/plugin/go.d/modules/zfspool/config_schema.json
new file mode 100644
index 000000000..fcfcff1d4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/config_schema.json
@@ -0,0 +1,47 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ZFS Pools collector configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "binary_path": {
+ "title": "Binary path",
+ "description": "Path to the `zpool` binary.",
+ "type": "string",
+ "default": "/usr/bin/zpool"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ }
+ },
+ "required": [
+ "binary_path"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "binary_path": {
+ "ui:help": "If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable."
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/zfspool/exec.go b/src/go/plugin/go.d/modules/zfspool/exec.go
new file mode 100644
index 000000000..1a2bcf203
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/exec.go
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zfspool
+
+import (
+ "context"
+ "fmt"
+ "os/exec"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+func newZpoolCLIExec(binPath string, timeout time.Duration) *zpoolCLIExec {
+ return &zpoolCLIExec{
+ binPath: binPath,
+ timeout: timeout,
+ }
+}
+
+type zpoolCLIExec struct {
+ *logger.Logger
+
+ binPath string
+ timeout time.Duration
+}
+
+func (e *zpoolCLIExec) list() ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.binPath, "list", "-p")
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
+
+func (e *zpoolCLIExec) listWithVdev(pool string) ([]byte, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), e.timeout)
+ defer cancel()
+
+ cmd := exec.CommandContext(ctx, e.binPath, "list", "-p", "-v", "-L", pool)
+ e.Debugf("executing '%s'", cmd)
+
+ bs, err := cmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ }
+
+ return bs, nil
+}
diff --git a/src/go/plugin/go.d/modules/zfspool/init.go b/src/go/plugin/go.d/modules/zfspool/init.go
new file mode 100644
index 000000000..f640801dd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/init.go
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zfspool
+
+import (
+ "errors"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+func (z *ZFSPool) validateConfig() error {
+ if z.BinaryPath == "" {
+ return errors.New("no zpool binary path specified")
+ }
+ return nil
+}
+
+func (z *ZFSPool) initZPoolCLIExec() (zpoolCLI, error) {
+ binPath := z.BinaryPath
+
+ if !strings.HasPrefix(binPath, "/") {
+ path, err := exec.LookPath(binPath)
+ if err != nil {
+ return nil, err
+ }
+ binPath = path
+ }
+
+ if _, err := os.Stat(binPath); err != nil {
+ return nil, err
+ }
+
+ zpoolExec := newZpoolCLIExec(binPath, z.Timeout.Duration())
+ zpoolExec.Logger = z.Logger
+
+ return zpoolExec, nil
+}
diff --git a/src/go/plugin/go.d/modules/zfspool/integrations/zfs_pools.md b/src/go/plugin/go.d/modules/zfspool/integrations/zfs_pools.md
new file mode 100644
index 000000000..060e4fb71
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/integrations/zfs_pools.md
@@ -0,0 +1,222 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/zfspool/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/zfspool/metadata.yaml"
+sidebar_label: "ZFS Pools"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ZFS Pools
+
+
+<img src="https://netdata.cloud/img/filesystem.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: zfspool
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the health and space usage of ZFS pools using the command line tool [zpool](https://openzfs.github.io/openzfs-docs/man/master/8/zpool-list.8.html).
+
+
+
+
+This collector is supported on all platforms.
+
+This collector only supports collecting metrics from a single instance of this integration.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+This integration doesn't support auto-detection.
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per zfs pool
+
+These metrics refer to the ZFS pool.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| pool | Zpool name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| zfspool.pool_space_utilization | utilization | % |
+| zfspool.pool_space_usage | free, used | bytes |
+| zfspool.pool_fragmentation | fragmentation | % |
+| zfspool.pool_health_state | online, degraded, faulted, offline, unavail, removed, suspended | state |
+
+### Per zfs pool vdev
+
+These metrics refer to the ZFS pool virtual device.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| pool | Zpool name |
+| vdev | Unique identifier for a virtual device (vdev) within a ZFS pool. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| zfspool.vdev_health_state | online, degraded, faulted, offline, unavail, removed, suspended | state |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ zfs_pool_space_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_space_utilization | ZFS pool ${label:pool} is nearing capacity. Current space usage is above the threshold. |
+| [ zfs_pool_health_state_warn ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is degraded |
+| [ zfs_pool_health_state_crit ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.pool_health_state | ZFS pool ${label:pool} state is faulted or unavail |
+| [ zfs_vdev_health_state ](https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf) | zfspool.vdev_health_state | ZFS vdev ${label:vdev} state is faulted or degraded |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/zfspool.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/zfspool.conf
+```
+#### Options
+
+The following options can be defined globally: update_every.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 10 | no |
+| binary_path | Path to the `zpool` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable. | /usr/bin/zpool | yes |
+| timeout | Timeout for executing the binary, specified in seconds. | 2 | no |
+
+</details>
+
+#### Examples
+
+##### Custom binary path
+
+The executable is not in the directories specified in the PATH environment variable.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: zfspool
+ binary_path: /usr/local/sbin/zpool
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `zfspool` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m zfspool
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `zfspool` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep zfspool
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep zfspool /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep zfspool
+```
+
+
diff --git a/src/go/plugin/go.d/modules/zfspool/metadata.yaml b/src/go/plugin/go.d/modules/zfspool/metadata.yaml
new file mode 100644
index 000000000..21cc307ca
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/metadata.yaml
@@ -0,0 +1,162 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-zfspool
+ plugin_name: go.d.plugin
+ module_name: zfspool
+ monitored_instance:
+ name: ZFS Pools
+ link: ""
+ icon_filename: filesystem.svg
+ categories:
+ - data-collection.storage-mount-points-and-filesystems
+ keywords:
+ - zfs pools
+ - pools
+ - zfs
+ - filesystem
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: >
+ This collector monitors the health and space usage of ZFS pools using the command line
+ tool [zpool](https://openzfs.github.io/openzfs-docs/man/master/8/zpool-list.8.html).
+ method_description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: false
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/zfspool.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 10
+ required: false
+ - name: binary_path
+ description: Path to the `zpool` binary. If an absolute path is provided, the collector will use it directly; otherwise, it will search for the binary in directories specified in the PATH environment variable.
+ default_value: /usr/bin/zpool
+ required: true
+ - name: timeout
+ description: Timeout for executing the binary, specified in seconds.
+ default_value: 2
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Custom binary path
+ description: The executable is not in the directories specified in the PATH environment variable.
+ config: |
+ jobs:
+ - name: zfspool
+ binary_path: /usr/local/sbin/zpool
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: zfs_pool_space_utilization
+ metric: zfspool.pool_space_utilization
+ info: "ZFS pool ${label:pool} is nearing capacity. Current space usage is above the threshold."
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf
+ - name: zfs_pool_health_state_warn
+ metric: zfspool.pool_health_state
+ info: "ZFS pool ${label:pool} state is degraded"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf
+ - name: zfs_pool_health_state_crit
+ metric: zfspool.pool_health_state
+ info: "ZFS pool ${label:pool} state is faulted or unavail"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf
+ - name: zfs_vdev_health_state
+ metric: zfspool.vdev_health_state
+ info: "ZFS vdev ${label:vdev} state is faulted or degraded"
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/zfs.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: zfs pool
+ description: These metrics refer to the ZFS pool.
+ labels:
+ - name: pool
+ description: Zpool name
+ metrics:
+ - name: zfspool.pool_space_utilization
+ description: Zpool space utilization
+ unit: '%'
+ chart_type: area
+ dimensions:
+ - name: utilization
+ - name: zfspool.pool_space_usage
+ description: Zpool space usage
+ unit: 'bytes'
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: zfspool.pool_fragmentation
+ description: Zpool fragmentation
+ unit: '%'
+ chart_type: line
+ dimensions:
+ - name: fragmentation
+ - name: zfspool.pool_health_state
+ description: Zpool health state
+ unit: 'state'
+ chart_type: line
+ dimensions:
+ - name: online
+ - name: degraded
+ - name: faulted
+ - name: offline
+ - name: unavail
+ - name: removed
+ - name: suspended
+ - name: zfs pool vdev
+ description: These metrics refer to the ZFS pool virtual device.
+ labels:
+ - name: pool
+ description: Zpool name
+ - name: vdev
+ description: Unique identifier for a virtual device (vdev) within a ZFS pool.
+ metrics:
+ - name: zfspool.vdev_health_state
+ description: Zpool Vdev health state
+ unit: 'state'
+ chart_type: line
+ dimensions:
+ - name: online
+ - name: degraded
+ - name: faulted
+ - name: offline
+ - name: unavail
+ - name: removed
+ - name: suspended
diff --git a/src/go/plugin/go.d/modules/zfspool/testdata/config.json b/src/go/plugin/go.d/modules/zfspool/testdata/config.json
new file mode 100644
index 000000000..095713193
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "binary_path": "ok"
+}
diff --git a/src/go/plugin/go.d/modules/zfspool/testdata/config.yaml b/src/go/plugin/go.d/modules/zfspool/testdata/config.yaml
new file mode 100644
index 000000000..baf3bcd0b
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+timeout: 123.123
+binary_path: "ok"
diff --git a/src/go/plugin/go.d/modules/zfspool/testdata/zpool-list-vdev-logs-cache.txt b/src/go/plugin/go.d/modules/zfspool/testdata/zpool-list-vdev-logs-cache.txt
new file mode 100644
index 000000000..061ca6ccd
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/testdata/zpool-list-vdev-logs-cache.txt
@@ -0,0 +1,12 @@
+NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
+rpool 9981503995904 3046188658688 6935315337216 - - 9 30 1.00 DEGRADED -
+ mirror-0 9981503995904 3046188658688 6935315337216 - - 9 30 - ONLINE
+ sdc2 9998683602944 - - - - - - - ONLINE
+ sdd2 9998683602944 - - - - - - - ONLINE
+logs - - - - - - - - -
+ mirror-1 17716740096 393216 17716346880 - - 0 0 - DEGRADED
+ sdb1 17951621120 - - - - - - - ONLINE
+ 14807975228228307538 - - - - - - - - UNAVAIL
+cache - - - - - - - - -
+ sdb2 99000254464 98755866624 239665152 - - 0 99 - ONLINE
+ wwn-0x500151795954c095-part2 - - - - - - - - UNAVAIL
diff --git a/src/go/plugin/go.d/modules/zfspool/testdata/zpool-list-vdev.txt b/src/go/plugin/go.d/modules/zfspool/testdata/zpool-list-vdev.txt
new file mode 100644
index 000000000..ff78f8df0
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/testdata/zpool-list-vdev.txt
@@ -0,0 +1,5 @@
+NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
+rpool 3985729650688 1647130456064 2338599194624 - - 55 41 1.00 ONLINE -
+ mirror-0 3985729650688 1647130456064 2338599194624 - - 55 41 - ONLINE
+ nvme2n1p3 4000249020416 - - - - - - - ONLINE
+ nvme0n1p3 4000249020416 - - - - - - - ONLINE
diff --git a/src/go/plugin/go.d/modules/zfspool/testdata/zpool-list.txt b/src/go/plugin/go.d/modules/zfspool/testdata/zpool-list.txt
new file mode 100644
index 000000000..06d9915c2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/testdata/zpool-list.txt
@@ -0,0 +1,3 @@
+NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
+rpool 21367462298 9051643576 12240656794 - 33 42 1.00 ONLINE -
+zion - - - - - - - FAULTED -
diff --git a/src/go/plugin/go.d/modules/zfspool/zfspool.go b/src/go/plugin/go.d/modules/zfspool/zfspool.go
new file mode 100644
index 000000000..02f1f7ce9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/zfspool.go
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zfspool
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("zfspool", module.Creator{
+ JobConfigSchema: configSchema,
+ Defaults: module.Defaults{
+ UpdateEvery: 10,
+ },
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *ZFSPool {
+ return &ZFSPool{
+ Config: Config{
+ BinaryPath: "/usr/bin/zpool",
+ Timeout: web.Duration(time.Second * 2),
+ },
+ charts: &module.Charts{},
+ seenZpools: make(map[string]bool),
+ seenVdevs: make(map[string]bool),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ BinaryPath string `yaml:"binary_path,omitempty" json:"binary_path"`
+}
+
+type (
+ ZFSPool struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ exec zpoolCLI
+
+ seenZpools map[string]bool
+ seenVdevs map[string]bool
+ }
+ zpoolCLI interface {
+ list() ([]byte, error)
+ listWithVdev(pool string) ([]byte, error)
+ }
+)
+
+func (z *ZFSPool) Configuration() any {
+ return z.Config
+}
+
+func (z *ZFSPool) Init() error {
+ if err := z.validateConfig(); err != nil {
+ z.Errorf("config validation: %s", err)
+ return err
+ }
+
+ zpoolExec, err := z.initZPoolCLIExec()
+ if err != nil {
+ z.Errorf("zpool exec initialization: %v", err)
+ return err
+ }
+ z.exec = zpoolExec
+
+ return nil
+}
+
+func (z *ZFSPool) Check() error {
+ mx, err := z.collect()
+ if err != nil {
+ z.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (z *ZFSPool) Charts() *module.Charts {
+ return z.charts
+}
+
+func (z *ZFSPool) Collect() map[string]int64 {
+ mx, err := z.collect()
+ if err != nil {
+ z.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (z *ZFSPool) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/zfspool/zfspool_test.go b/src/go/plugin/go.d/modules/zfspool/zfspool_test.go
new file mode 100644
index 000000000..bf64d1713
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zfspool/zfspool_test.go
@@ -0,0 +1,546 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zfspool
+
+import (
+ "errors"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataZpoolList, _ = os.ReadFile("testdata/zpool-list.txt")
+ dataZpoolListWithVdev, _ = os.ReadFile("testdata/zpool-list-vdev.txt")
+ dataZpoolListWithVdevLogsCache, _ = os.ReadFile("testdata/zpool-list-vdev-logs-cache.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+
+ "dataZpoolList": dataZpoolList,
+ "dataZpoolListWithVdev": dataZpoolListWithVdev,
+ "dataZpoolListWithVdevLogsCache": dataZpoolListWithVdevLogsCache,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestZFSPool_Configuration(t *testing.T) {
+ module.TestConfigurationSerialize(t, &ZFSPool{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestZFSPool_Init(t *testing.T) {
+ tests := map[string]struct {
+ config Config
+ wantFail bool
+ }{
+ "fails if 'binary_path' is not set": {
+ wantFail: true,
+ config: Config{
+ BinaryPath: "",
+ },
+ },
+ "fails if failed to find binary": {
+ wantFail: true,
+ config: Config{
+ BinaryPath: "zpool!!!",
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ zp := New()
+ zp.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, zp.Init())
+ } else {
+ assert.NoError(t, zp.Init())
+ }
+ })
+ }
+}
+
+func TestZFSPool_Cleanup(t *testing.T) {
+ tests := map[string]struct {
+ prepare func() *ZFSPool
+ }{
+ "not initialized exec": {
+ prepare: func() *ZFSPool {
+ return New()
+ },
+ },
+ "after check": {
+ prepare: func() *ZFSPool {
+ zp := New()
+ zp.exec = prepareMockOk()
+ _ = zp.Check()
+ return zp
+ },
+ },
+ "after collect": {
+ prepare: func() *ZFSPool {
+ zp := New()
+ zp.exec = prepareMockOk()
+ _ = zp.Collect()
+ return zp
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ zp := test.prepare()
+
+ assert.NotPanics(t, zp.Cleanup)
+ })
+ }
+}
+
+func TestZFSPool_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestZFSPool_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockZpoolCLIExec
+ wantFail bool
+ }{
+ "success case": {
+ prepareMock: prepareMockOk,
+ wantFail: false,
+ },
+ "error on list call": {
+ prepareMock: prepareMockErrOnList,
+ wantFail: true,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantFail: true,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ zp := New()
+ mock := test.prepareMock()
+ zp.exec = mock
+
+ if test.wantFail {
+ assert.Error(t, zp.Check())
+ } else {
+ assert.NoError(t, zp.Check())
+ }
+ })
+ }
+}
+
+func TestZFSPool_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func() *mockZpoolCLIExec
+ wantMetrics map[string]int64
+ }{
+ "success case": {
+ prepareMock: prepareMockOk,
+ wantMetrics: map[string]int64{
+ "vdev_rpool/mirror-0/nvme0n1p3_health_state_degraded": 0,
+ "vdev_rpool/mirror-0/nvme0n1p3_health_state_faulted": 0,
+ "vdev_rpool/mirror-0/nvme0n1p3_health_state_offline": 0,
+ "vdev_rpool/mirror-0/nvme0n1p3_health_state_online": 1,
+ "vdev_rpool/mirror-0/nvme0n1p3_health_state_removed": 0,
+ "vdev_rpool/mirror-0/nvme0n1p3_health_state_suspended": 0,
+ "vdev_rpool/mirror-0/nvme0n1p3_health_state_unavail": 0,
+ "vdev_rpool/mirror-0/nvme2n1p3_health_state_degraded": 0,
+ "vdev_rpool/mirror-0/nvme2n1p3_health_state_faulted": 0,
+ "vdev_rpool/mirror-0/nvme2n1p3_health_state_offline": 0,
+ "vdev_rpool/mirror-0/nvme2n1p3_health_state_online": 1,
+ "vdev_rpool/mirror-0/nvme2n1p3_health_state_removed": 0,
+ "vdev_rpool/mirror-0/nvme2n1p3_health_state_suspended": 0,
+ "vdev_rpool/mirror-0/nvme2n1p3_health_state_unavail": 0,
+ "vdev_rpool/mirror-0_health_state_degraded": 0,
+ "vdev_rpool/mirror-0_health_state_faulted": 0,
+ "vdev_rpool/mirror-0_health_state_offline": 0,
+ "vdev_rpool/mirror-0_health_state_online": 1,
+ "vdev_rpool/mirror-0_health_state_removed": 0,
+ "vdev_rpool/mirror-0_health_state_suspended": 0,
+ "vdev_rpool/mirror-0_health_state_unavail": 0,
+ "vdev_zion/mirror-0/nvme0n1p3_health_state_degraded": 0,
+ "vdev_zion/mirror-0/nvme0n1p3_health_state_faulted": 0,
+ "vdev_zion/mirror-0/nvme0n1p3_health_state_offline": 0,
+ "vdev_zion/mirror-0/nvme0n1p3_health_state_online": 1,
+ "vdev_zion/mirror-0/nvme0n1p3_health_state_removed": 0,
+ "vdev_zion/mirror-0/nvme0n1p3_health_state_suspended": 0,
+ "vdev_zion/mirror-0/nvme0n1p3_health_state_unavail": 0,
+ "vdev_zion/mirror-0/nvme2n1p3_health_state_degraded": 0,
+ "vdev_zion/mirror-0/nvme2n1p3_health_state_faulted": 0,
+ "vdev_zion/mirror-0/nvme2n1p3_health_state_offline": 0,
+ "vdev_zion/mirror-0/nvme2n1p3_health_state_online": 1,
+ "vdev_zion/mirror-0/nvme2n1p3_health_state_removed": 0,
+ "vdev_zion/mirror-0/nvme2n1p3_health_state_suspended": 0,
+ "vdev_zion/mirror-0/nvme2n1p3_health_state_unavail": 0,
+ "vdev_zion/mirror-0_health_state_degraded": 0,
+ "vdev_zion/mirror-0_health_state_faulted": 0,
+ "vdev_zion/mirror-0_health_state_offline": 0,
+ "vdev_zion/mirror-0_health_state_online": 1,
+ "vdev_zion/mirror-0_health_state_removed": 0,
+ "vdev_zion/mirror-0_health_state_suspended": 0,
+ "vdev_zion/mirror-0_health_state_unavail": 0,
+ "zpool_rpool_alloc": 9051643576,
+ "zpool_rpool_cap": 42,
+ "zpool_rpool_frag": 33,
+ "zpool_rpool_free": 12240656794,
+ "zpool_rpool_health_state_degraded": 0,
+ "zpool_rpool_health_state_faulted": 0,
+ "zpool_rpool_health_state_offline": 0,
+ "zpool_rpool_health_state_online": 1,
+ "zpool_rpool_health_state_removed": 0,
+ "zpool_rpool_health_state_suspended": 0,
+ "zpool_rpool_health_state_unavail": 0,
+ "zpool_rpool_size": 21367462298,
+ "zpool_zion_health_state_degraded": 0,
+ "zpool_zion_health_state_faulted": 1,
+ "zpool_zion_health_state_offline": 0,
+ "zpool_zion_health_state_online": 0,
+ "zpool_zion_health_state_removed": 0,
+ "zpool_zion_health_state_suspended": 0,
+ "zpool_zion_health_state_unavail": 0,
+ },
+ },
+ "success case vdev logs and cache": {
+ prepareMock: prepareMockOkVdevLogsCache,
+ wantMetrics: map[string]int64{
+ "vdev_rpool/cache/sdb2_health_state_degraded": 0,
+ "vdev_rpool/cache/sdb2_health_state_faulted": 0,
+ "vdev_rpool/cache/sdb2_health_state_offline": 0,
+ "vdev_rpool/cache/sdb2_health_state_online": 1,
+ "vdev_rpool/cache/sdb2_health_state_removed": 0,
+ "vdev_rpool/cache/sdb2_health_state_suspended": 0,
+ "vdev_rpool/cache/sdb2_health_state_unavail": 0,
+ "vdev_rpool/cache/wwn-0x500151795954c095-part2_health_state_degraded": 0,
+ "vdev_rpool/cache/wwn-0x500151795954c095-part2_health_state_faulted": 0,
+ "vdev_rpool/cache/wwn-0x500151795954c095-part2_health_state_offline": 0,
+ "vdev_rpool/cache/wwn-0x500151795954c095-part2_health_state_online": 0,
+ "vdev_rpool/cache/wwn-0x500151795954c095-part2_health_state_removed": 0,
+ "vdev_rpool/cache/wwn-0x500151795954c095-part2_health_state_suspended": 0,
+ "vdev_rpool/cache/wwn-0x500151795954c095-part2_health_state_unavail": 1,
+ "vdev_rpool/logs/mirror-1/14807975228228307538_health_state_degraded": 0,
+ "vdev_rpool/logs/mirror-1/14807975228228307538_health_state_faulted": 0,
+ "vdev_rpool/logs/mirror-1/14807975228228307538_health_state_offline": 0,
+ "vdev_rpool/logs/mirror-1/14807975228228307538_health_state_online": 0,
+ "vdev_rpool/logs/mirror-1/14807975228228307538_health_state_removed": 0,
+ "vdev_rpool/logs/mirror-1/14807975228228307538_health_state_suspended": 0,
+ "vdev_rpool/logs/mirror-1/14807975228228307538_health_state_unavail": 1,
+ "vdev_rpool/logs/mirror-1/sdb1_health_state_degraded": 0,
+ "vdev_rpool/logs/mirror-1/sdb1_health_state_faulted": 0,
+ "vdev_rpool/logs/mirror-1/sdb1_health_state_offline": 0,
+ "vdev_rpool/logs/mirror-1/sdb1_health_state_online": 1,
+ "vdev_rpool/logs/mirror-1/sdb1_health_state_removed": 0,
+ "vdev_rpool/logs/mirror-1/sdb1_health_state_suspended": 0,
+ "vdev_rpool/logs/mirror-1/sdb1_health_state_unavail": 0,
+ "vdev_rpool/logs/mirror-1_health_state_degraded": 1,
+ "vdev_rpool/logs/mirror-1_health_state_faulted": 0,
+ "vdev_rpool/logs/mirror-1_health_state_offline": 0,
+ "vdev_rpool/logs/mirror-1_health_state_online": 0,
+ "vdev_rpool/logs/mirror-1_health_state_removed": 0,
+ "vdev_rpool/logs/mirror-1_health_state_suspended": 0,
+ "vdev_rpool/logs/mirror-1_health_state_unavail": 0,
+ "vdev_rpool/mirror-0/sdc2_health_state_degraded": 0,
+ "vdev_rpool/mirror-0/sdc2_health_state_faulted": 0,
+ "vdev_rpool/mirror-0/sdc2_health_state_offline": 0,
+ "vdev_rpool/mirror-0/sdc2_health_state_online": 1,
+ "vdev_rpool/mirror-0/sdc2_health_state_removed": 0,
+ "vdev_rpool/mirror-0/sdc2_health_state_suspended": 0,
+ "vdev_rpool/mirror-0/sdc2_health_state_unavail": 0,
+ "vdev_rpool/mirror-0/sdd2_health_state_degraded": 0,
+ "vdev_rpool/mirror-0/sdd2_health_state_faulted": 0,
+ "vdev_rpool/mirror-0/sdd2_health_state_offline": 0,
+ "vdev_rpool/mirror-0/sdd2_health_state_online": 1,
+ "vdev_rpool/mirror-0/sdd2_health_state_removed": 0,
+ "vdev_rpool/mirror-0/sdd2_health_state_suspended": 0,
+ "vdev_rpool/mirror-0/sdd2_health_state_unavail": 0,
+ "vdev_rpool/mirror-0_health_state_degraded": 0,
+ "vdev_rpool/mirror-0_health_state_faulted": 0,
+ "vdev_rpool/mirror-0_health_state_offline": 0,
+ "vdev_rpool/mirror-0_health_state_online": 1,
+ "vdev_rpool/mirror-0_health_state_removed": 0,
+ "vdev_rpool/mirror-0_health_state_suspended": 0,
+ "vdev_rpool/mirror-0_health_state_unavail": 0,
+ "vdev_zion/cache/sdb2_health_state_degraded": 0,
+ "vdev_zion/cache/sdb2_health_state_faulted": 0,
+ "vdev_zion/cache/sdb2_health_state_offline": 0,
+ "vdev_zion/cache/sdb2_health_state_online": 1,
+ "vdev_zion/cache/sdb2_health_state_removed": 0,
+ "vdev_zion/cache/sdb2_health_state_suspended": 0,
+ "vdev_zion/cache/sdb2_health_state_unavail": 0,
+ "vdev_zion/cache/wwn-0x500151795954c095-part2_health_state_degraded": 0,
+ "vdev_zion/cache/wwn-0x500151795954c095-part2_health_state_faulted": 0,
+ "vdev_zion/cache/wwn-0x500151795954c095-part2_health_state_offline": 0,
+ "vdev_zion/cache/wwn-0x500151795954c095-part2_health_state_online": 0,
+ "vdev_zion/cache/wwn-0x500151795954c095-part2_health_state_removed": 0,
+ "vdev_zion/cache/wwn-0x500151795954c095-part2_health_state_suspended": 0,
+ "vdev_zion/cache/wwn-0x500151795954c095-part2_health_state_unavail": 1,
+ "vdev_zion/logs/mirror-1/14807975228228307538_health_state_degraded": 0,
+ "vdev_zion/logs/mirror-1/14807975228228307538_health_state_faulted": 0,
+ "vdev_zion/logs/mirror-1/14807975228228307538_health_state_offline": 0,
+ "vdev_zion/logs/mirror-1/14807975228228307538_health_state_online": 0,
+ "vdev_zion/logs/mirror-1/14807975228228307538_health_state_removed": 0,
+ "vdev_zion/logs/mirror-1/14807975228228307538_health_state_suspended": 0,
+ "vdev_zion/logs/mirror-1/14807975228228307538_health_state_unavail": 1,
+ "vdev_zion/logs/mirror-1/sdb1_health_state_degraded": 0,
+ "vdev_zion/logs/mirror-1/sdb1_health_state_faulted": 0,
+ "vdev_zion/logs/mirror-1/sdb1_health_state_offline": 0,
+ "vdev_zion/logs/mirror-1/sdb1_health_state_online": 1,
+ "vdev_zion/logs/mirror-1/sdb1_health_state_removed": 0,
+ "vdev_zion/logs/mirror-1/sdb1_health_state_suspended": 0,
+ "vdev_zion/logs/mirror-1/sdb1_health_state_unavail": 0,
+ "vdev_zion/logs/mirror-1_health_state_degraded": 1,
+ "vdev_zion/logs/mirror-1_health_state_faulted": 0,
+ "vdev_zion/logs/mirror-1_health_state_offline": 0,
+ "vdev_zion/logs/mirror-1_health_state_online": 0,
+ "vdev_zion/logs/mirror-1_health_state_removed": 0,
+ "vdev_zion/logs/mirror-1_health_state_suspended": 0,
+ "vdev_zion/logs/mirror-1_health_state_unavail": 0,
+ "vdev_zion/mirror-0/sdc2_health_state_degraded": 0,
+ "vdev_zion/mirror-0/sdc2_health_state_faulted": 0,
+ "vdev_zion/mirror-0/sdc2_health_state_offline": 0,
+ "vdev_zion/mirror-0/sdc2_health_state_online": 1,
+ "vdev_zion/mirror-0/sdc2_health_state_removed": 0,
+ "vdev_zion/mirror-0/sdc2_health_state_suspended": 0,
+ "vdev_zion/mirror-0/sdc2_health_state_unavail": 0,
+ "vdev_zion/mirror-0/sdd2_health_state_degraded": 0,
+ "vdev_zion/mirror-0/sdd2_health_state_faulted": 0,
+ "vdev_zion/mirror-0/sdd2_health_state_offline": 0,
+ "vdev_zion/mirror-0/sdd2_health_state_online": 1,
+ "vdev_zion/mirror-0/sdd2_health_state_removed": 0,
+ "vdev_zion/mirror-0/sdd2_health_state_suspended": 0,
+ "vdev_zion/mirror-0/sdd2_health_state_unavail": 0,
+ "vdev_zion/mirror-0_health_state_degraded": 0,
+ "vdev_zion/mirror-0_health_state_faulted": 0,
+ "vdev_zion/mirror-0_health_state_offline": 0,
+ "vdev_zion/mirror-0_health_state_online": 1,
+ "vdev_zion/mirror-0_health_state_removed": 0,
+ "vdev_zion/mirror-0_health_state_suspended": 0,
+ "vdev_zion/mirror-0_health_state_unavail": 0,
+ "zpool_rpool_alloc": 9051643576,
+ "zpool_rpool_cap": 42,
+ "zpool_rpool_frag": 33,
+ "zpool_rpool_free": 12240656794,
+ "zpool_rpool_health_state_degraded": 0,
+ "zpool_rpool_health_state_faulted": 0,
+ "zpool_rpool_health_state_offline": 0,
+ "zpool_rpool_health_state_online": 1,
+ "zpool_rpool_health_state_removed": 0,
+ "zpool_rpool_health_state_suspended": 0,
+ "zpool_rpool_health_state_unavail": 0,
+ "zpool_rpool_size": 21367462298,
+ "zpool_zion_health_state_degraded": 0,
+ "zpool_zion_health_state_faulted": 1,
+ "zpool_zion_health_state_offline": 0,
+ "zpool_zion_health_state_online": 0,
+ "zpool_zion_health_state_removed": 0,
+ "zpool_zion_health_state_suspended": 0,
+ "zpool_zion_health_state_unavail": 0,
+ },
+ },
+ "error on list call": {
+ prepareMock: prepareMockErrOnList,
+ wantMetrics: nil,
+ },
+ "empty response": {
+ prepareMock: prepareMockEmptyResponse,
+ wantMetrics: nil,
+ },
+ "unexpected response": {
+ prepareMock: prepareMockUnexpectedResponse,
+ wantMetrics: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ zp := New()
+ mock := test.prepareMock()
+ zp.exec = mock
+
+ mx := zp.Collect()
+
+ assert.Equal(t, test.wantMetrics, mx)
+
+ if len(test.wantMetrics) > 0 {
+ want := len(zpoolChartsTmpl)*len(zp.seenZpools) + len(vdevChartsTmpl)*len(zp.seenVdevs)
+
+ assert.Len(t, *zp.Charts(), want, "want charts")
+
+ module.TestMetricsHasAllChartsDimsSkip(t, zp.Charts(), mx, func(chart *module.Chart) bool {
+ return strings.HasPrefix(chart.ID, "zfspool_zion") && !strings.HasSuffix(chart.ID, "health_state")
+ })
+ }
+ })
+ }
+}
+
+func TestZFSPool_parseZpoolListDevOutput(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ want []vdevEntry
+ }{
+ "": {
+ input: `
+NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
+store 9981503995904 3046188658688 6935315337216 - - 9 30 1.00 DEGRADED -
+ mirror-0 9981503995904 3046188658688 6935315337216 - - 9 30 - ONLINE
+ sdc2 9998683602944 - - - - - - - ONLINE
+ sdd2 9998683602944 - - - - - - - ONLINE
+logs - - - - - - - - -
+ mirror-1 17716740096 393216 17716346880 - - 0 0 - DEGRADED
+ sdb1 17951621120 - - - - - - - ONLINE
+ 14807975228228307538 - - - - - - - - UNAVAIL
+cache - - - - - - - - -
+ sdb2 99000254464 98755866624 239665152 - - 0 99 - ONLINE
+ wwn-0x500151795954c095-part2 - - - - - - - - UNAVAIL
+`,
+ want: []vdevEntry{
+ {
+ name: "mirror-0",
+ health: "online",
+ vdev: "store/mirror-0",
+ level: 2,
+ },
+ {
+ name: "sdc2",
+ health: "online",
+ vdev: "store/mirror-0/sdc2",
+ level: 4,
+ },
+ {
+ name: "sdd2",
+ health: "online",
+ vdev: "store/mirror-0/sdd2",
+ level: 4,
+ },
+ {
+ name: "logs",
+ health: "-",
+ vdev: "store/logs",
+ level: 0,
+ },
+ {
+ name: "mirror-1",
+ health: "degraded",
+ vdev: "store/logs/mirror-1",
+ level: 2,
+ },
+ {
+ name: "sdb1",
+ health: "online",
+ vdev: "store/logs/mirror-1/sdb1",
+ level: 4,
+ },
+ {
+ name: "14807975228228307538",
+ health: "unavail",
+ vdev: "store/logs/mirror-1/14807975228228307538",
+ level: 4,
+ },
+ {
+ name: "cache",
+ health: "-",
+ vdev: "store/cache",
+ level: 0,
+ },
+ {
+ name: "sdb2",
+ health: "online",
+ vdev: "store/cache/sdb2",
+ level: 2,
+ },
+ {
+ name: "wwn-0x500151795954c095-part2",
+ health: "unavail",
+ vdev: "store/cache/wwn-0x500151795954c095-part2",
+ level: 2,
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ v, err := parseZpoolListVdevOutput([]byte(test.input))
+ require.NoError(t, err)
+ assert.Equal(t, test.want, v)
+ })
+ }
+}
+
+func prepareMockOk() *mockZpoolCLIExec {
+ return &mockZpoolCLIExec{
+ listData: dataZpoolList,
+ listWithVdevData: dataZpoolListWithVdev,
+ }
+}
+
+func prepareMockOkVdevLogsCache() *mockZpoolCLIExec {
+ return &mockZpoolCLIExec{
+ listData: dataZpoolList,
+ listWithVdevData: dataZpoolListWithVdevLogsCache,
+ }
+}
+
+func prepareMockErrOnList() *mockZpoolCLIExec {
+ return &mockZpoolCLIExec{
+ errOnList: true,
+ }
+}
+
+func prepareMockEmptyResponse() *mockZpoolCLIExec {
+ return &mockZpoolCLIExec{}
+}
+
+func prepareMockUnexpectedResponse() *mockZpoolCLIExec {
+ return &mockZpoolCLIExec{
+ listData: []byte(`
+Lorem ipsum dolor sit amet, consectetur adipiscing elit.
+Nulla malesuada erat id magna mattis, eu viverra tellus rhoncus.
+Fusce et felis pulvinar, posuere sem non, porttitor eros.
+`),
+ }
+}
+
+type mockZpoolCLIExec struct {
+ errOnList bool
+ listData []byte
+ listWithVdevData []byte
+}
+
+func (m *mockZpoolCLIExec) list() ([]byte, error) {
+ if m.errOnList {
+ return nil, errors.New("mock.list() error")
+ }
+
+ return m.listData, nil
+}
+
+func (m *mockZpoolCLIExec) listWithVdev(pool string) ([]byte, error) {
+ s := string(m.listWithVdevData)
+ s = strings.Replace(s, "rpool", pool, 1)
+
+ return []byte(s), nil
+}
diff --git a/src/go/plugin/go.d/modules/zookeeper/README.md b/src/go/plugin/go.d/modules/zookeeper/README.md
new file mode 120000
index 000000000..ae81b3714
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/README.md
@@ -0,0 +1 @@
+integrations/zookeeper.md \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/zookeeper/charts.go b/src/go/plugin/go.d/modules/zookeeper/charts.go
new file mode 100644
index 000000000..9f081a9c2
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/charts.go
@@ -0,0 +1,111 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zookeeper
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+type (
+ Charts = module.Charts
+ Dims = module.Dims
+ Vars = module.Vars
+)
+
+var charts = Charts{
+ {
+ ID: "requests",
+ Title: "Outstanding Requests",
+ Units: "requests",
+ Fam: "requests",
+ Ctx: "zookeeper.requests",
+ Dims: Dims{
+ {ID: "outstanding_requests", Name: "outstanding"},
+ },
+ },
+ {
+ ID: "requests_latency",
+ Title: "Requests Latency",
+ Units: "ms",
+ Fam: "requests",
+ Ctx: "zookeeper.requests_latency",
+ Dims: Dims{
+ {ID: "min_latency", Name: "min", Div: 1000},
+ {ID: "avg_latency", Name: "avg", Div: 1000},
+ {ID: "max_latency", Name: "max", Div: 1000},
+ },
+ },
+ {
+ ID: "connections",
+ Title: "Alive Connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "zookeeper.connections",
+ Dims: Dims{
+ {ID: "num_alive_connections", Name: "alive"},
+ },
+ },
+ {
+ ID: "packets",
+ Title: "Packets",
+ Units: "pps",
+ Fam: "net",
+ Ctx: "zookeeper.packets",
+ Dims: Dims{
+ {ID: "packets_received", Name: "received", Algo: module.Incremental},
+ {ID: "packets_sent", Name: "sent", Algo: module.Incremental, Mul: -1},
+ },
+ },
+ {
+ ID: "file_descriptor",
+ Title: "Open File Descriptors",
+ Units: "file descriptors",
+ Fam: "file descriptors",
+ Ctx: "zookeeper.file_descriptor",
+ Dims: Dims{
+ {ID: "open_file_descriptor_count", Name: "open"},
+ },
+ Vars: Vars{
+ {ID: "max_file_descriptor_count"},
+ },
+ },
+ {
+ ID: "nodes",
+ Title: "Number of Nodes",
+ Units: "nodes",
+ Fam: "data tree",
+ Ctx: "zookeeper.nodes",
+ Dims: Dims{
+ {ID: "znode_count", Name: "znode"},
+ {ID: "ephemerals_count", Name: "ephemerals"},
+ },
+ },
+ {
+ ID: "watches",
+ Title: "Number of Watches",
+ Units: "watches",
+ Fam: "data tree",
+ Ctx: "zookeeper.watches",
+ Dims: Dims{
+ {ID: "watch_count", Name: "watches"},
+ },
+ },
+ {
+ ID: "approximate_data_size",
+ Title: "Approximate Data Tree Size",
+ Units: "KiB",
+ Fam: "data tree",
+ Ctx: "zookeeper.approximate_data_size",
+ Dims: Dims{
+ {ID: "approximate_data_size", Name: "size", Div: 1024},
+ },
+ },
+ {
+ ID: "server_state",
+ Title: "Server State",
+ Units: "state",
+ Fam: "server state",
+ Ctx: "zookeeper.server_state",
+ Dims: Dims{
+ {ID: "server_state", Name: "state"},
+ },
+ },
+}
diff --git a/src/go/plugin/go.d/modules/zookeeper/collect.go b/src/go/plugin/go.d/modules/zookeeper/collect.go
new file mode 100644
index 000000000..86491e1b1
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/collect.go
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zookeeper
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func (z *Zookeeper) collect() (map[string]int64, error) {
+ return z.collectMntr()
+}
+
+func (z *Zookeeper) collectMntr() (map[string]int64, error) {
+ const command = "mntr"
+
+ lines, err := z.fetch("mntr")
+ if err != nil {
+ return nil, err
+ }
+
+ switch len(lines) {
+ case 0:
+ return nil, fmt.Errorf("'%s' command returned empty response", command)
+ case 1:
+ // mntr is not executed because it is not in the whitelist.
+ return nil, fmt.Errorf("'%s' command returned bad response: %s", command, lines[0])
+ }
+
+ mx := make(map[string]int64)
+
+ for _, line := range lines {
+ parts := strings.Fields(line)
+ if len(parts) != 2 || !strings.HasPrefix(parts[0], "zk_") {
+ continue
+ }
+
+ key, value := strings.TrimPrefix(parts[0], "zk_"), parts[1]
+ switch key {
+ case "version":
+ case "server_state":
+ mx[key] = convertServerState(value)
+ case "min_latency", "avg_latency", "max_latency":
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ continue
+ }
+ mx[key] = int64(v * 1000)
+ default:
+ v, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ continue
+ }
+ mx[key] = int64(v)
+ }
+ }
+
+ if len(mx) == 0 {
+ return nil, fmt.Errorf("'%s' command: failed to parse response", command)
+ }
+
+ return mx, nil
+}
+
+func convertServerState(state string) int64 {
+ switch state {
+ default:
+ return 0
+ case "leader":
+ return 1
+ case "follower":
+ return 2
+ case "observer":
+ return 3
+ case "standalone":
+ return 4
+ }
+}
diff --git a/src/go/plugin/go.d/modules/zookeeper/config_schema.json b/src/go/plugin/go.d/modules/zookeeper/config_schema.json
new file mode 100644
index 000000000..e07a27c29
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/config_schema.json
@@ -0,0 +1,95 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Zookeeper collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the Zookeeper server listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:2181"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout, in seconds, for connection, read, write, and SSL handshake operations.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "use_tls": {
+ "title": "Use TLS",
+ "description": "Indicates whether TLS should be used for secure communication.",
+ "type": "boolean"
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ }
+ },
+ "required": [
+ "address"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "use_tls",
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/plugin/go.d/modules/zookeeper/fetcher.go b/src/go/plugin/go.d/modules/zookeeper/fetcher.go
new file mode 100644
index 000000000..a6611b506
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/fetcher.go
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zookeeper
+
+import (
+ "bytes"
+ "fmt"
+ "unsafe"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+)
+
+const limitReadLines = 2000
+
+type zookeeperFetcher struct {
+ socket.Client
+}
+
+func (c *zookeeperFetcher) fetch(command string) (rows []string, err error) {
+ if err = c.Connect(); err != nil {
+ return nil, err
+ }
+ defer func() { _ = c.Disconnect() }()
+
+ var num int
+ clientErr := c.Command(command, func(b []byte) bool {
+ if !isZKLine(b) || isMntrLineOK(b) {
+ rows = append(rows, string(b))
+ }
+ if num += 1; num >= limitReadLines {
+ err = fmt.Errorf("read line limit exceeded (%d)", limitReadLines)
+ return false
+ }
+ return true
+ })
+ if clientErr != nil {
+ return nil, clientErr
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ return rows, nil
+}
+
+func isZKLine(line []byte) bool {
+ return bytes.HasPrefix(line, []byte("zk_"))
+}
+
+func isMntrLineOK(line []byte) bool {
+ idx := bytes.LastIndexByte(line, '\t')
+ return idx > 0 && collectedZKKeys[unsafeString(line)[:idx]]
+}
+
+func unsafeString(b []byte) string {
+ return *((*string)(unsafe.Pointer(&b)))
+}
+
+var collectedZKKeys = map[string]bool{
+ "zk_num_alive_connections": true,
+ "zk_outstanding_requests": true,
+ "zk_min_latency": true,
+ "zk_avg_latency": true,
+ "zk_max_latency": true,
+ "zk_packets_received": true,
+ "zk_packets_sent": true,
+ "zk_open_file_descriptor_count": true,
+ "zk_max_file_descriptor_count": true,
+ "zk_znode_count": true,
+ "zk_ephemerals_count": true,
+ "zk_watch_count": true,
+ "zk_approximate_data_size": true,
+ "zk_server_state": true,
+}
diff --git a/src/go/plugin/go.d/modules/zookeeper/fetcher_test.go b/src/go/plugin/go.d/modules/zookeeper/fetcher_test.go
new file mode 100644
index 000000000..d0931abb9
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/fetcher_test.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zookeeper
+
+import (
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_clientFetch(t *testing.T) {
+ c := &zookeeperFetcher{Client: &mockSocket{rowsNumResp: 10}}
+
+ rows, err := c.fetch("whatever\n")
+ assert.NoError(t, err)
+ assert.Len(t, rows, 10)
+
+ rows, err = c.fetch("whatever\n")
+ assert.NoError(t, err)
+ assert.Len(t, rows, 10)
+}
+
+func Test_clientFetchReadLineLimitExceeded(t *testing.T) {
+ c := &zookeeperFetcher{Client: &mockSocket{rowsNumResp: limitReadLines + 1}}
+
+ rows, err := c.fetch("whatever\n")
+ assert.Error(t, err)
+ assert.Len(t, rows, 0)
+}
+
+type mockSocket struct {
+ rowsNumResp int
+}
+
+func (m *mockSocket) Connect() error {
+ return nil
+}
+
+func (m *mockSocket) Disconnect() error {
+ return nil
+}
+
+func (m *mockSocket) Command(command string, process socket.Processor) error {
+ for i := 0; i < m.rowsNumResp; i++ {
+ process([]byte(command))
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/modules/zookeeper/init.go b/src/go/plugin/go.d/modules/zookeeper/init.go
new file mode 100644
index 000000000..380f4bb33
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/init.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zookeeper
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/socket"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+)
+
+func (z *Zookeeper) verifyConfig() error {
+ if z.Address == "" {
+ return errors.New("address not set")
+ }
+ return nil
+}
+
+func (z *Zookeeper) initZookeeperFetcher() (fetcher, error) {
+ var tlsConf *tls.Config
+ var err error
+
+ if z.UseTLS {
+ tlsConf, err = tlscfg.NewTLSConfig(z.TLSConfig)
+ if err != nil {
+ return nil, fmt.Errorf("creating tls config : %v", err)
+ }
+ }
+
+ sock := socket.New(socket.Config{
+ Address: z.Address,
+ ConnectTimeout: z.Timeout.Duration(),
+ ReadTimeout: z.Timeout.Duration(),
+ WriteTimeout: z.Timeout.Duration(),
+ TLSConf: tlsConf,
+ })
+
+ return &zookeeperFetcher{Client: sock}, nil
+}
diff --git a/src/go/plugin/go.d/modules/zookeeper/integrations/zookeeper.md b/src/go/plugin/go.d/modules/zookeeper/integrations/zookeeper.md
new file mode 100644
index 000000000..8481ff8c8
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/integrations/zookeeper.md
@@ -0,0 +1,250 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/zookeeper/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/zookeeper/metadata.yaml"
+sidebar_label: "ZooKeeper"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Service Discovery / Registry"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ZooKeeper
+
+
+<img src="https://netdata.cloud/img/zookeeper.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: zookeeper
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+
+
+It connects to the Zookeeper instance via a TCP and executes the following commands:
+
+- [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:
+
+- 127.0.0.1:2181
+- 127.0.0.1:2182
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per ZooKeeper instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| zookeeper.requests | outstanding | requests |
+| zookeeper.requests_latency | min, avg, max | ms |
+| zookeeper.connections | alive | connections |
+| zookeeper.packets | received, sent | pps |
+| zookeeper.file_descriptor | open | file descriptors |
+| zookeeper.nodes | znode, ephemerals | nodes |
+| zookeeper.watches | watches | watches |
+| zookeeper.approximate_data_size | size | KiB |
+| zookeeper.server_state | state | state |
+
+
+
+## Alerts
+
+There are no alerts configured by default for this integration.
+
+
+## Setup
+
+### Prerequisites
+
+#### Whitelist `mntr` command
+
+Add `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/zookeeper.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/zookeeper.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | Server address. The format is IP:PORT. | 127.0.0.1:2181 | yes |
+| timeout | Connection/read/write/ssl handshake timeout. | 1 | no |
+| use_tls | Whether to use TLS or not. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+Local server.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:2181
+
+```
+</details>
+
+##### TLS with self-signed certificate
+
+Zookeeper with TLS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:2181
+ use_tls: yes
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ address: 127.0.0.1:2181
+
+ - name: remote
+ address: 192.0.2.1:2181
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `zookeeper` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m zookeeper
+ ```
+
+### Getting Logs
+
+If you're encountering problems with the `zookeeper` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep zookeeper
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep zookeeper /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep zookeeper
+```
+
+
diff --git a/src/go/plugin/go.d/modules/zookeeper/metadata.yaml b/src/go/plugin/go.d/modules/zookeeper/metadata.yaml
new file mode 100644
index 000000000..527a55fb4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/metadata.yaml
@@ -0,0 +1,202 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-zookeeper
+ plugin_name: go.d.plugin
+ module_name: zookeeper
+ monitored_instance:
+ name: ZooKeeper
+ link: https://zookeeper.apache.org/
+ categories:
+ - data-collection.service-discovery-registry
+ icon_filename: zookeeper.svg
+ keywords:
+ - zookeeper
+ most_popular: false
+ info_provided_to_referring_integrations:
+ description: ""
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ overview:
+ data_collection:
+ metrics_description: ""
+ method_description: |
+ It connects to the Zookeeper instance via a TCP and executes the following commands:
+
+ - [mntr](https://zookeeper.apache.org/doc/r3.4.8/zookeeperAdmin.html#sc_zkCommands).
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects instances running on localhost by attempting to connect using known ZooKeeper TCP sockets:
+
+ - 127.0.0.1:2181
+ - 127.0.0.1:2182
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list:
+ - title: Whitelist `mntr` command
+ description: |
+ Add `mntr` to Zookeeper's [4lw.commands.whitelist](https://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_4lw).
+ configuration:
+ file:
+ name: "go.d/zookeeper.conf"
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: address
+ description: Server address. The format is IP:PORT.
+ default_value: 127.0.0.1:2181
+ required: true
+ - name: timeout
+ description: Connection/read/write/ssl handshake timeout.
+ default_value: 1
+ required: false
+ - name: use_tls
+ description: Whether to use TLS or not.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ description: Local server.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:2181
+ - name: TLS with self-signed certificate
+ description: Zookeeper with TLS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:2181
+ use_tls: yes
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ address: 127.0.0.1:2181
+
+ - name: remote
+ address: 192.0.2.1:2181
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: zookeeper.requests
+ description: Outstanding Requests
+ unit: requests
+ chart_type: line
+ dimensions:
+ - name: outstanding
+ - name: zookeeper.requests_latency
+ description: Requests Latency
+ unit: ms
+ chart_type: line
+ dimensions:
+ - name: min
+ - name: avg
+ - name: max
+ - name: zookeeper.connections
+ description: Alive Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: alive
+ - name: zookeeper.packets
+ description: Packets
+ unit: pps
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: sent
+ - name: zookeeper.file_descriptor
+ description: Open File Descriptors
+ unit: file descriptors
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: zookeeper.nodes
+ description: Number of Nodes
+ unit: nodes
+ chart_type: line
+ dimensions:
+ - name: znode
+ - name: ephemerals
+ - name: zookeeper.watches
+ description: Number of Watches
+ unit: watches
+ chart_type: line
+ dimensions:
+ - name: watches
+ - name: zookeeper.approximate_data_size
+ description: Approximate Data Tree Size
+ unit: KiB
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: zookeeper.server_state
+ description: Server State
+ unit: state
+ chart_type: line
+ dimensions:
+ - name: state
diff --git a/src/go/plugin/go.d/modules/zookeeper/testdata/config.json b/src/go/plugin/go.d/modules/zookeeper/testdata/config.json
new file mode 100644
index 000000000..0cf6c4727
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/testdata/config.json
@@ -0,0 +1,10 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "use_tls": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/plugin/go.d/modules/zookeeper/testdata/config.yaml b/src/go/plugin/go.d/modules/zookeeper/testdata/config.yaml
new file mode 100644
index 000000000..54456cc80
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/testdata/config.yaml
@@ -0,0 +1,8 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+use_tls: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/plugin/go.d/modules/zookeeper/testdata/mntr.txt b/src/go/plugin/go.d/modules/zookeeper/testdata/mntr.txt
new file mode 100644
index 000000000..8e10c287d
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/testdata/mntr.txt
@@ -0,0 +1,416 @@
+zk_version 3.6.1--104dcb3e3fb464b30c5186d229e00af9f332524b, built on 04/21/2020 15:01 GMT
+zk_server_state standalone
+zk_ephemerals_count 0
+zk_min_latency 0.1
+zk_avg_latency 0.1
+zk_num_alive_connections 1
+zk_max_file_descriptor_count 1048576
+zk_outstanding_requests 0
+zk_approximate_data_size 44
+zk_znode_count 5
+zk_open_file_descriptor_count 63
+zk_global_sessions 0
+zk_local_sessions 0
+zk_uptime 27595191
+zk_last_client_response_size -1
+zk_max_latency 0.1
+zk_packets_sent 182
+zk_outstanding_tls_handshake 0
+zk_packets_received 92
+zk_max_client_response_size -1
+zk_connection_drop_probability 0.0
+zk_watch_count 0
+zk_min_client_response_size -1
+zk_proposal_count 0
+zk_outstanding_changes_removed 0
+zk_stale_requests_dropped 0
+zk_large_requests_rejected 0
+zk_connection_rejected 0
+zk_sessionless_connections_expired 0
+zk_looking_count 0
+zk_dead_watchers_queued 0
+zk_stale_requests 0
+zk_connection_drop_count 0
+zk_learner_proposal_received_count 0
+zk_digest_mismatches_count 0
+zk_dead_watchers_cleared 0
+zk_response_packet_cache_hits 0
+zk_bytes_received_count 368
+zk_add_dead_watcher_stall_time 0
+zk_request_throttle_wait_count 0
+zk_response_packet_cache_misses 0
+zk_ensemble_auth_success 0
+zk_prep_processor_request_queued 0
+zk_learner_commit_received_count 0
+zk_stale_replies 0
+zk_connection_request_count 0
+zk_ensemble_auth_fail 0
+zk_diff_count 0
+zk_response_packet_get_children_cache_misses 0
+zk_connection_revalidate_count 0
+zk_quit_leading_due_to_disloyal_voter 0
+zk_snap_count 0
+zk_unrecoverable_error_count 0
+zk_commit_count 0
+zk_stale_sessions_expired 0
+zk_response_packet_get_children_cache_hits 0
+zk_sync_processor_request_queued 0
+zk_outstanding_changes_queued 0
+zk_request_commit_queued 0
+zk_ensemble_auth_skip 0
+zk_tls_handshake_exceeded 0
+zk_revalidate_count 0
+zk_avg_node_created_watch_count 0.0
+zk_min_node_created_watch_count 0
+zk_max_node_created_watch_count 0
+zk_cnt_node_created_watch_count 0
+zk_sum_node_created_watch_count 0
+zk_avg_session_queues_drained 0.0
+zk_min_session_queues_drained 0
+zk_max_session_queues_drained 0
+zk_cnt_session_queues_drained 0
+zk_sum_session_queues_drained 0
+zk_avg_write_commit_proc_req_queued 0.0
+zk_min_write_commit_proc_req_queued 0
+zk_max_write_commit_proc_req_queued 0
+zk_cnt_write_commit_proc_req_queued 0
+zk_sum_write_commit_proc_req_queued 0
+zk_avg_connection_token_deficit 0.0
+zk_min_connection_token_deficit 0
+zk_max_connection_token_deficit 0
+zk_cnt_connection_token_deficit 0
+zk_sum_connection_token_deficit 0
+zk_avg_read_commit_proc_req_queued 0.0
+zk_min_read_commit_proc_req_queued 0
+zk_max_read_commit_proc_req_queued 0
+zk_cnt_read_commit_proc_req_queued 0
+zk_sum_read_commit_proc_req_queued 0
+zk_avg_node_deleted_watch_count 0.0
+zk_min_node_deleted_watch_count 0
+zk_max_node_deleted_watch_count 0
+zk_cnt_node_deleted_watch_count 0
+zk_sum_node_deleted_watch_count 0
+zk_avg_startup_txns_load_time 0.0
+zk_min_startup_txns_load_time 0
+zk_max_startup_txns_load_time 0
+zk_cnt_startup_txns_load_time 0
+zk_sum_startup_txns_load_time 0
+zk_avg_sync_processor_queue_size 0.0
+zk_min_sync_processor_queue_size 0
+zk_max_sync_processor_queue_size 0
+zk_cnt_sync_processor_queue_size 1
+zk_sum_sync_processor_queue_size 0
+zk_avg_follower_sync_time 0.0
+zk_min_follower_sync_time 0
+zk_max_follower_sync_time 0
+zk_cnt_follower_sync_time 0
+zk_sum_follower_sync_time 0
+zk_avg_prep_processor_queue_size 0.0
+zk_min_prep_processor_queue_size 0
+zk_max_prep_processor_queue_size 0
+zk_cnt_prep_processor_queue_size 1
+zk_sum_prep_processor_queue_size 0
+zk_avg_fsynctime 0.0
+zk_min_fsynctime 0
+zk_max_fsynctime 0
+zk_cnt_fsynctime 0
+zk_sum_fsynctime 0
+zk_avg_reads_issued_from_session_queue 0.0
+zk_min_reads_issued_from_session_queue 0
+zk_max_reads_issued_from_session_queue 0
+zk_cnt_reads_issued_from_session_queue 0
+zk_sum_reads_issued_from_session_queue 0
+zk_avg_snapshottime 0.0
+zk_min_snapshottime 0
+zk_max_snapshottime 0
+zk_cnt_snapshottime 1
+zk_sum_snapshottime 0
+zk_avg_startup_txns_loaded 0.0
+zk_min_startup_txns_loaded 0
+zk_max_startup_txns_loaded 0
+zk_cnt_startup_txns_loaded 0
+zk_sum_startup_txns_loaded 0
+zk_avg_reads_after_write_in_session_queue 0.0
+zk_min_reads_after_write_in_session_queue 0
+zk_max_reads_after_write_in_session_queue 0
+zk_cnt_reads_after_write_in_session_queue 0
+zk_sum_reads_after_write_in_session_queue 0
+zk_avg_requests_in_session_queue 0.0
+zk_min_requests_in_session_queue 0
+zk_max_requests_in_session_queue 0
+zk_cnt_requests_in_session_queue 0
+zk_sum_requests_in_session_queue 0
+zk_avg_write_commit_proc_issued 0.0
+zk_min_write_commit_proc_issued 0
+zk_max_write_commit_proc_issued 0
+zk_cnt_write_commit_proc_issued 0
+zk_sum_write_commit_proc_issued 0
+zk_avg_prep_process_time 0.0
+zk_min_prep_process_time 0
+zk_max_prep_process_time 0
+zk_cnt_prep_process_time 0
+zk_sum_prep_process_time 0
+zk_avg_pending_session_queue_size 0.0
+zk_min_pending_session_queue_size 0
+zk_max_pending_session_queue_size 0
+zk_cnt_pending_session_queue_size 0
+zk_sum_pending_session_queue_size 0
+zk_avg_time_waiting_empty_pool_in_commit_processor_read_ms 0.0
+zk_min_time_waiting_empty_pool_in_commit_processor_read_ms 0
+zk_max_time_waiting_empty_pool_in_commit_processor_read_ms 0
+zk_cnt_time_waiting_empty_pool_in_commit_processor_read_ms 0
+zk_sum_time_waiting_empty_pool_in_commit_processor_read_ms 0
+zk_avg_commit_process_time 0.0
+zk_min_commit_process_time 0
+zk_max_commit_process_time 0
+zk_cnt_commit_process_time 0
+zk_sum_commit_process_time 0
+zk_avg_dbinittime 6.0
+zk_min_dbinittime 6
+zk_max_dbinittime 6
+zk_cnt_dbinittime 1
+zk_sum_dbinittime 6
+zk_avg_netty_queued_buffer_capacity 0.0
+zk_min_netty_queued_buffer_capacity 0
+zk_max_netty_queued_buffer_capacity 0
+zk_cnt_netty_queued_buffer_capacity 0
+zk_sum_netty_queued_buffer_capacity 0
+zk_avg_election_time 0.0
+zk_min_election_time 0
+zk_max_election_time 0
+zk_cnt_election_time 0
+zk_sum_election_time 0
+zk_avg_commit_commit_proc_req_queued 0.0
+zk_min_commit_commit_proc_req_queued 0
+zk_max_commit_commit_proc_req_queued 0
+zk_cnt_commit_commit_proc_req_queued 0
+zk_sum_commit_commit_proc_req_queued 0
+zk_avg_sync_processor_batch_size 0.0
+zk_min_sync_processor_batch_size 0
+zk_max_sync_processor_batch_size 0
+zk_cnt_sync_processor_batch_size 0
+zk_sum_sync_processor_batch_size 0
+zk_avg_node_children_watch_count 0.0
+zk_min_node_children_watch_count 0
+zk_max_node_children_watch_count 0
+zk_cnt_node_children_watch_count 0
+zk_sum_node_children_watch_count 0
+zk_avg_write_batch_time_in_commit_processor 0.0
+zk_min_write_batch_time_in_commit_processor 0
+zk_max_write_batch_time_in_commit_processor 0
+zk_cnt_write_batch_time_in_commit_processor 0
+zk_sum_write_batch_time_in_commit_processor 0
+zk_avg_read_commit_proc_issued 0.0
+zk_min_read_commit_proc_issued 0
+zk_max_read_commit_proc_issued 0
+zk_cnt_read_commit_proc_issued 0
+zk_sum_read_commit_proc_issued 0
+zk_avg_concurrent_request_processing_in_commit_processor 0.0
+zk_min_concurrent_request_processing_in_commit_processor 0
+zk_max_concurrent_request_processing_in_commit_processor 0
+zk_cnt_concurrent_request_processing_in_commit_processor 0
+zk_sum_concurrent_request_processing_in_commit_processor 0
+zk_avg_node_changed_watch_count 0.0
+zk_min_node_changed_watch_count 0
+zk_max_node_changed_watch_count 0
+zk_cnt_node_changed_watch_count 0
+zk_sum_node_changed_watch_count 0
+zk_avg_sync_process_time 0.0
+zk_min_sync_process_time 0
+zk_max_sync_process_time 0
+zk_cnt_sync_process_time 0
+zk_sum_sync_process_time 0
+zk_avg_startup_snap_load_time 5.0
+zk_min_startup_snap_load_time 5
+zk_max_startup_snap_load_time 5
+zk_cnt_startup_snap_load_time 1
+zk_sum_startup_snap_load_time 5
+zk_avg_prep_processor_queue_time_ms 0.0
+zk_min_prep_processor_queue_time_ms 0
+zk_max_prep_processor_queue_time_ms 0
+zk_cnt_prep_processor_queue_time_ms 0
+zk_sum_prep_processor_queue_time_ms 0
+zk_p50_prep_processor_queue_time_ms 0
+zk_p95_prep_processor_queue_time_ms 0
+zk_p99_prep_processor_queue_time_ms 0
+zk_p999_prep_processor_queue_time_ms 0
+zk_avg_close_session_prep_time 0.0
+zk_min_close_session_prep_time 0
+zk_max_close_session_prep_time 0
+zk_cnt_close_session_prep_time 0
+zk_sum_close_session_prep_time 0
+zk_p50_close_session_prep_time 0
+zk_p95_close_session_prep_time 0
+zk_p99_close_session_prep_time 0
+zk_p999_close_session_prep_time 0
+zk_avg_read_commitproc_time_ms 0.0
+zk_min_read_commitproc_time_ms 0
+zk_max_read_commitproc_time_ms 0
+zk_cnt_read_commitproc_time_ms 0
+zk_sum_read_commitproc_time_ms 0
+zk_p50_read_commitproc_time_ms 0
+zk_p95_read_commitproc_time_ms 0
+zk_p99_read_commitproc_time_ms 0
+zk_p999_read_commitproc_time_ms 0
+zk_avg_updatelatency 0.0
+zk_min_updatelatency 0
+zk_max_updatelatency 0
+zk_cnt_updatelatency 0
+zk_sum_updatelatency 0
+zk_p50_updatelatency 0
+zk_p95_updatelatency 0
+zk_p99_updatelatency 0
+zk_p999_updatelatency 0
+zk_avg_local_write_committed_time_ms 0.0
+zk_min_local_write_committed_time_ms 0
+zk_max_local_write_committed_time_ms 0
+zk_cnt_local_write_committed_time_ms 0
+zk_sum_local_write_committed_time_ms 0
+zk_p50_local_write_committed_time_ms 0
+zk_p95_local_write_committed_time_ms 0
+zk_p99_local_write_committed_time_ms 0
+zk_p999_local_write_committed_time_ms 0
+zk_avg_readlatency 0.0
+zk_min_readlatency 0
+zk_max_readlatency 0
+zk_cnt_readlatency 0
+zk_sum_readlatency 0
+zk_p50_readlatency 0
+zk_p95_readlatency 0
+zk_p99_readlatency 0
+zk_p999_readlatency 0
+zk_avg_quorum_ack_latency 0.0
+zk_min_quorum_ack_latency 0
+zk_max_quorum_ack_latency 0
+zk_cnt_quorum_ack_latency 0
+zk_sum_quorum_ack_latency 0
+zk_p50_quorum_ack_latency 0
+zk_p95_quorum_ack_latency 0
+zk_p99_quorum_ack_latency 0
+zk_p999_quorum_ack_latency 0
+zk_avg_om_commit_process_time_ms 0.0
+zk_min_om_commit_process_time_ms 0
+zk_max_om_commit_process_time_ms 0
+zk_cnt_om_commit_process_time_ms 0
+zk_sum_om_commit_process_time_ms 0
+zk_p50_om_commit_process_time_ms 0
+zk_p95_om_commit_process_time_ms 0
+zk_p99_om_commit_process_time_ms 0
+zk_p999_om_commit_process_time_ms 0
+zk_avg_read_final_proc_time_ms 0.0
+zk_min_read_final_proc_time_ms 0
+zk_max_read_final_proc_time_ms 0
+zk_cnt_read_final_proc_time_ms 0
+zk_sum_read_final_proc_time_ms 0
+zk_p50_read_final_proc_time_ms 0
+zk_p95_read_final_proc_time_ms 0
+zk_p99_read_final_proc_time_ms 0
+zk_p999_read_final_proc_time_ms 0
+zk_avg_commit_propagation_latency 0.0
+zk_min_commit_propagation_latency 0
+zk_max_commit_propagation_latency 0
+zk_cnt_commit_propagation_latency 0
+zk_sum_commit_propagation_latency 0
+zk_p50_commit_propagation_latency 0
+zk_p95_commit_propagation_latency 0
+zk_p99_commit_propagation_latency 0
+zk_p999_commit_propagation_latency 0
+zk_avg_dead_watchers_cleaner_latency 0.0
+zk_min_dead_watchers_cleaner_latency 0
+zk_max_dead_watchers_cleaner_latency 0
+zk_cnt_dead_watchers_cleaner_latency 0
+zk_sum_dead_watchers_cleaner_latency 0
+zk_p50_dead_watchers_cleaner_latency 0
+zk_p95_dead_watchers_cleaner_latency 0
+zk_p99_dead_watchers_cleaner_latency 0
+zk_p999_dead_watchers_cleaner_latency 0
+zk_avg_write_final_proc_time_ms 0.0
+zk_min_write_final_proc_time_ms 0
+zk_max_write_final_proc_time_ms 0
+zk_cnt_write_final_proc_time_ms 0
+zk_sum_write_final_proc_time_ms 0
+zk_p50_write_final_proc_time_ms 0
+zk_p95_write_final_proc_time_ms 0
+zk_p99_write_final_proc_time_ms 0
+zk_p999_write_final_proc_time_ms 0
+zk_avg_proposal_ack_creation_latency 0.0
+zk_min_proposal_ack_creation_latency 0
+zk_max_proposal_ack_creation_latency 0
+zk_cnt_proposal_ack_creation_latency 0
+zk_sum_proposal_ack_creation_latency 0
+zk_p50_proposal_ack_creation_latency 0
+zk_p95_proposal_ack_creation_latency 0
+zk_p99_proposal_ack_creation_latency 0
+zk_p999_proposal_ack_creation_latency 0
+zk_avg_proposal_latency 0.0
+zk_min_proposal_latency 0
+zk_max_proposal_latency 0
+zk_cnt_proposal_latency 0
+zk_sum_proposal_latency 0
+zk_p50_proposal_latency 0
+zk_p95_proposal_latency 0
+zk_p99_proposal_latency 0
+zk_p999_proposal_latency 0
+zk_avg_om_proposal_process_time_ms 0.0
+zk_min_om_proposal_process_time_ms 0
+zk_max_om_proposal_process_time_ms 0
+zk_cnt_om_proposal_process_time_ms 0
+zk_sum_om_proposal_process_time_ms 0
+zk_p50_om_proposal_process_time_ms 0
+zk_p95_om_proposal_process_time_ms 0
+zk_p99_om_proposal_process_time_ms 0
+zk_p999_om_proposal_process_time_ms 0
+zk_avg_sync_processor_queue_and_flush_time_ms 0.0
+zk_min_sync_processor_queue_and_flush_time_ms 0
+zk_max_sync_processor_queue_and_flush_time_ms 0
+zk_cnt_sync_processor_queue_and_flush_time_ms 0
+zk_sum_sync_processor_queue_and_flush_time_ms 0
+zk_p50_sync_processor_queue_and_flush_time_ms 0
+zk_p95_sync_processor_queue_and_flush_time_ms 0
+zk_p99_sync_processor_queue_and_flush_time_ms 0
+zk_p999_sync_processor_queue_and_flush_time_ms 0
+zk_avg_propagation_latency 0.0
+zk_min_propagation_latency 0
+zk_max_propagation_latency 0
+zk_cnt_propagation_latency 0
+zk_sum_propagation_latency 0
+zk_p50_propagation_latency 0
+zk_p95_propagation_latency 0
+zk_p99_propagation_latency 0
+zk_p999_propagation_latency 0
+zk_avg_server_write_committed_time_ms 0.0
+zk_min_server_write_committed_time_ms 0
+zk_max_server_write_committed_time_ms 0
+zk_cnt_server_write_committed_time_ms 0
+zk_sum_server_write_committed_time_ms 0
+zk_p50_server_write_committed_time_ms 0
+zk_p95_server_write_committed_time_ms 0
+zk_p99_server_write_committed_time_ms 0
+zk_p999_server_write_committed_time_ms 0
+zk_avg_sync_processor_queue_time_ms 0.0
+zk_min_sync_processor_queue_time_ms 0
+zk_max_sync_processor_queue_time_ms 0
+zk_cnt_sync_processor_queue_time_ms 0
+zk_sum_sync_processor_queue_time_ms 0
+zk_p50_sync_processor_queue_time_ms 0
+zk_p95_sync_processor_queue_time_ms 0
+zk_p99_sync_processor_queue_time_ms 0
+zk_p999_sync_processor_queue_time_ms 0
+zk_avg_sync_processor_queue_flush_time_ms 0.0
+zk_min_sync_processor_queue_flush_time_ms 0
+zk_max_sync_processor_queue_flush_time_ms 0
+zk_cnt_sync_processor_queue_flush_time_ms 0
+zk_sum_sync_processor_queue_flush_time_ms 0
+zk_p50_sync_processor_queue_flush_time_ms 0
+zk_p95_sync_processor_queue_flush_time_ms 0
+zk_p99_sync_processor_queue_flush_time_ms 0
+zk_p999_sync_processor_queue_flush_time_ms 0
+zk_avg_write_commitproc_time_ms 0.0
+zk_min_write_commitproc_time_ms 0
+zk_max_write_commitproc_time_ms 0
+zk_cnt_write_commitproc_time_ms 0
+zk_sum_write_commitproc_time_ms 0
+zk_p50_write_commitproc_time_ms 0
+zk_p95_write_commitproc_time_ms 0
+zk_p99_write_commitproc_time_ms 0
+zk_p999_write_commitproc_time_ms 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/zookeeper/testdata/mntr_notinwhitelist.txt b/src/go/plugin/go.d/modules/zookeeper/testdata/mntr_notinwhitelist.txt
new file mode 100644
index 000000000..1fd1983b7
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/testdata/mntr_notinwhitelist.txt
@@ -0,0 +1 @@
+mntr is not executed because it is not in the whitelist. \ No newline at end of file
diff --git a/src/go/plugin/go.d/modules/zookeeper/zookeeper.go b/src/go/plugin/go.d/modules/zookeeper/zookeeper.go
new file mode 100644
index 000000000..6d004a405
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/zookeeper.go
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zookeeper
+
+import (
+ _ "embed"
+ "errors"
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("zookeeper", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Zookeeper {
+ return &Zookeeper{
+ Config: Config{
+ Address: "127.0.0.1:2181",
+ Timeout: web.Duration(time.Second),
+ UseTLS: false,
+ }}
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+ UseTLS bool `yaml:"use_tls,omitempty" json:"use_tls"`
+}
+
+type (
+ Zookeeper struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ fetcher
+ }
+ fetcher interface {
+ fetch(command string) ([]string, error)
+ }
+)
+
+func (z *Zookeeper) Configuration() any {
+ return z.Config
+}
+
+func (z *Zookeeper) Init() error {
+ if err := z.verifyConfig(); err != nil {
+ z.Error(err)
+ return err
+ }
+
+ f, err := z.initZookeeperFetcher()
+ if err != nil {
+ z.Error(err)
+ return err
+ }
+ z.fetcher = f
+
+ return nil
+}
+
+func (z *Zookeeper) Check() error {
+ mx, err := z.collect()
+ if err != nil {
+ z.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (z *Zookeeper) Charts() *Charts {
+ return charts.Copy()
+}
+
+func (z *Zookeeper) Collect() map[string]int64 {
+ mx, err := z.collect()
+ if err != nil {
+ z.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (z *Zookeeper) Cleanup() {}
diff --git a/src/go/plugin/go.d/modules/zookeeper/zookeeper_test.go b/src/go/plugin/go.d/modules/zookeeper/zookeeper_test.go
new file mode 100644
index 000000000..3fc8ad5b4
--- /dev/null
+++ b/src/go/plugin/go.d/modules/zookeeper/zookeeper_test.go
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zookeeper
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/agent/module"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataMntrMetrics, _ = os.ReadFile("testdata/mntr.txt")
+ dataMntrNotInWhiteListResponse, _ = os.ReadFile("testdata/mntr_notinwhitelist.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataMntrMetrics": dataMntrMetrics,
+ "dataMntrNotInWhiteListResponse": dataMntrNotInWhiteListResponse,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestZookeeper_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Zookeeper{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestZookeeper_Init(t *testing.T) {
+ job := New()
+
+ assert.NoError(t, job.Init())
+ assert.NotNil(t, job.fetcher)
+}
+
+func TestZookeeper_InitErrorOnCreatingTLSConfig(t *testing.T) {
+ job := New()
+ job.UseTLS = true
+ job.TLSConfig.TLSCA = "testdata/tls"
+
+ assert.Error(t, job.Init())
+}
+
+func TestZookeeper_Check(t *testing.T) {
+ job := New()
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{data: dataMntrMetrics}
+
+ assert.NoError(t, job.Check())
+}
+
+func TestZookeeper_CheckErrorOnFetch(t *testing.T) {
+ job := New()
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{err: true}
+
+ assert.Error(t, job.Check())
+}
+
+func TestZookeeper_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestZookeeper_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
+
+func TestZookeeper_Collect(t *testing.T) {
+ job := New()
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{data: dataMntrMetrics}
+
+ expected := map[string]int64{
+ "approximate_data_size": 44,
+ "avg_latency": 100,
+ "ephemerals_count": 0,
+ "max_file_descriptor_count": 1048576,
+ "max_latency": 100,
+ "min_latency": 100,
+ "num_alive_connections": 1,
+ "open_file_descriptor_count": 63,
+ "outstanding_requests": 0,
+ "packets_received": 92,
+ "packets_sent": 182,
+ "server_state": 4,
+ "watch_count": 0,
+ "znode_count": 5,
+ }
+
+ collected := job.Collect()
+
+ assert.Equal(t, expected, collected)
+ ensureCollectedHasAllChartsDimsVarsIDs(t, job, collected)
+}
+
+func TestZookeeper_CollectMntrNotInWhiteList(t *testing.T) {
+ job := New()
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{data: dataMntrNotInWhiteListResponse}
+
+ assert.Nil(t, job.Collect())
+}
+
+func TestZookeeper_CollectMntrEmptyResponse(t *testing.T) {
+ job := New()
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{}
+
+ assert.Nil(t, job.Collect())
+}
+
+func TestZookeeper_CollectMntrInvalidData(t *testing.T) {
+ job := New()
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{data: []byte("hello \nand good buy\n")}
+
+ assert.Nil(t, job.Collect())
+}
+
+func TestZookeeper_CollectMntrReceiveError(t *testing.T) {
+ job := New()
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{err: true}
+
+ assert.Nil(t, job.Collect())
+}
+
+func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, zk *Zookeeper, collected map[string]int64) {
+ for _, chart := range *zk.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := collected[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ for _, v := range chart.Vars {
+ _, ok := collected[v.ID]
+ assert.Truef(t, ok, "collected metrics has no data for var '%s' chart '%s'", v.ID, chart.ID)
+ }
+ }
+}
+
+type mockZookeeperFetcher struct {
+ data []byte
+ err bool
+}
+
+func (m mockZookeeperFetcher) fetch(_ string) ([]string, error) {
+ if m.err {
+ return nil, errors.New("mock fetch error")
+ }
+
+ var lines []string
+ s := bufio.NewScanner(bytes.NewReader(m.data))
+ for s.Scan() {
+ if !isZKLine(s.Bytes()) || isMntrLineOK(s.Bytes()) {
+ lines = append(lines, s.Text())
+ }
+ }
+ return lines, nil
+}
diff --git a/src/go/plugin/go.d/pkg/README.md b/src/go/plugin/go.d/pkg/README.md
new file mode 100644
index 000000000..34561395f
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/README.md
@@ -0,0 +1,22 @@
+<!--
+title: "Helper Packages"
+custom_edit_url: "/src/go/plugin/go.d/pkg/README.md"
+sidebar_label: "Helper Packages"
+learn_status: "Published"
+learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages"
+-->
+
+# Helper Packages
+
+- if you need IP ranges consider to
+ use [`iprange`](/src/go/plugin/go.d/pkg/iprange).
+- if you parse an application log files, then [`log`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/logs) is
+ handy.
+- if you need filtering
+ check [`matcher`](/src/go/plugin/go.d/pkg/matcher).
+- if you collect metrics from an HTTP endpoint use [`web`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/web).
+- if you collect metrics from a prometheus endpoint,
+ then [`prometheus`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/prometheus)
+ and [`web`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/web) is what you need.
+- [`tlscfg`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/tlscfg) provides TLS support.
+- [`stm`](https://github.com/netdata/netdata/tree/master/src/go/plugin/go.d/pkg/stm) helps you to convert any struct to a `map[string]int64`.
diff --git a/src/go/plugin/go.d/pkg/dockerhost/dockerhost.go b/src/go/plugin/go.d/pkg/dockerhost/dockerhost.go
new file mode 100644
index 000000000..eb26b18fa
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/dockerhost/dockerhost.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerhost
+
+import (
+ "fmt"
+ "os"
+ "strings"
+)
+
+func FromEnv() string {
+ addr := os.Getenv("DOCKER_HOST")
+ if addr == "" {
+ return ""
+ }
+ if strings.HasPrefix(addr, "tcp://") || strings.HasPrefix(addr, "unix://") {
+ return addr
+ }
+ if strings.HasPrefix(addr, "/") {
+ return fmt.Sprintf("unix://%s", addr)
+ }
+ return fmt.Sprintf("tcp://%s", addr)
+}
diff --git a/src/go/plugin/go.d/pkg/iprange/README.md b/src/go/plugin/go.d/pkg/iprange/README.md
new file mode 100644
index 000000000..ee777989d
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/iprange/README.md
@@ -0,0 +1,37 @@
+<!--
+title: "iprange"
+custom_edit_url: "/src/go/plugin/go.d/pkg/iprange/README.md"
+sidebar_label: "iprange"
+learn_status: "Published"
+learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages"
+-->
+
+# iprange
+
+This package helps you to work with IP ranges.
+
+IP range is a set of IP addresses. Both IPv4 and IPv6 are supported.
+
+IP range interface:
+
+```
+type Range interface {
+ Family() Family
+ Contains(ip net.IP) bool
+ Size() *big.Int
+ fmt.Stringer
+}
+```
+
+## Supported formats
+
+- `IPv4 address` (192.0.2.1)
+- `IPv4 range` (192.0.2.0-192.0.2.10)
+- `IPv4 CIDR` (192.0.2.0/24)
+- `IPv4 subnet mask` (192.0.2.0/255.255.255.0)
+- `IPv6 address` (2001:db8::1)
+- `IPv6 range` (2001:db8::-2001:db8::10)
+- `IPv6 CIDR` (2001:db8::/64)
+
+IP range doesn't contain network and broadcast IP addresses if the format is `IPv4 CIDR`, `IPv4 subnet mask`
+or `IPv6 CIDR`.
diff --git a/src/go/plugin/go.d/pkg/iprange/parse.go b/src/go/plugin/go.d/pkg/iprange/parse.go
new file mode 100644
index 000000000..3471702a1
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/iprange/parse.go
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package iprange
+
+import (
+ "bytes"
+ "fmt"
+ "net"
+ "regexp"
+ "strings"
+
+ "github.com/apparentlymart/go-cidr/cidr"
+)
+
+// ParseRanges parses s as a space separated list of IP Ranges, returning the result and an error if any.
+// IP Range can be in IPv4 address ("192.0.2.1"), IPv4 range ("192.0.2.0-192.0.2.10")
+// IPv4 CIDR ("192.0.2.0/24"), IPv4 subnet mask ("192.0.2.0/255.255.255.0"),
+// IPv6 address ("2001:db8::1"), IPv6 range ("2001:db8::-2001:db8::10"),
+// or IPv6 CIDR ("2001:db8::/64") form.
+// IPv4 CIDR, IPv4 subnet mask and IPv6 CIDR ranges don't include network and broadcast addresses.
+func ParseRanges(s string) ([]Range, error) {
+ parts := strings.Fields(s)
+ if len(parts) == 0 {
+ return nil, nil
+ }
+
+ var ranges []Range
+ for _, v := range parts {
+ r, err := ParseRange(v)
+ if err != nil {
+ return nil, err
+ }
+
+ if r != nil {
+ ranges = append(ranges, r)
+ }
+ }
+ return ranges, nil
+}
+
+var (
+ reRange = regexp.MustCompile("^[0-9a-f.:-]+$") // addr | addr-addr
+ reCIDR = regexp.MustCompile("^[0-9a-f.:]+/[0-9]{1,3}$") // addr/prefix_length
+ reSubnetMask = regexp.MustCompile("^[0-9.]+/[0-9.]{7,}$") // v4_addr/mask
+)
+
+// ParseRange parses s as an IP Range, returning the result and an error if any.
+// The string s can be in IPv4 address ("192.0.2.1"), IPv4 range ("192.0.2.0-192.0.2.10")
+// IPv4 CIDR ("192.0.2.0/24"), IPv4 subnet mask ("192.0.2.0/255.255.255.0"),
+// IPv6 address ("2001:db8::1"), IPv6 range ("2001:db8::-2001:db8::10"),
+// or IPv6 CIDR ("2001:db8::/64") form.
+// IPv4 CIDR, IPv4 subnet mask and IPv6 CIDR ranges don't include network and broadcast addresses.
+func ParseRange(s string) (Range, error) {
+ s = strings.ToLower(s)
+ if s == "" {
+ return nil, nil
+ }
+
+ var r Range
+ switch {
+ case reRange.MatchString(s):
+ r = parseRange(s)
+ case reCIDR.MatchString(s):
+ r = parseCIDR(s)
+ case reSubnetMask.MatchString(s):
+ r = parseSubnetMask(s)
+ }
+
+ if r == nil {
+ return nil, fmt.Errorf("ip range (%s) invalid syntax", s)
+ }
+ return r, nil
+}
+
+func parseRange(s string) Range {
+ var start, end net.IP
+ if idx := strings.IndexByte(s, '-'); idx != -1 {
+ start, end = net.ParseIP(s[:idx]), net.ParseIP(s[idx+1:])
+ } else {
+ start, end = net.ParseIP(s), net.ParseIP(s)
+ }
+
+ return New(start, end)
+}
+
+func parseCIDR(s string) Range {
+ ip, network, err := net.ParseCIDR(s)
+ if err != nil {
+ return nil
+ }
+
+ start, end := cidr.AddressRange(network)
+ prefixLen, _ := network.Mask.Size()
+
+ if isV4IP(ip) && prefixLen < 31 || isV6IP(ip) && prefixLen < 127 {
+ start = cidr.Inc(start)
+ end = cidr.Dec(end)
+ }
+
+ return parseRange(fmt.Sprintf("%s-%s", start, end))
+}
+
+func parseSubnetMask(s string) Range {
+ idx := strings.LastIndexByte(s, '/')
+ if idx == -1 {
+ return nil
+ }
+
+ address, mask := s[:idx], s[idx+1:]
+
+ ip := net.ParseIP(mask).To4()
+ if ip == nil {
+ return nil
+ }
+
+ prefixLen, bits := net.IPv4Mask(ip[0], ip[1], ip[2], ip[3]).Size()
+ if prefixLen+bits == 0 {
+ return nil
+ }
+
+ return parseCIDR(fmt.Sprintf("%s/%d", address, prefixLen))
+}
+
+func isV4RangeValid(start, end net.IP) bool {
+ return isV4IP(start) && isV4IP(end) && bytes.Compare(end, start) >= 0
+}
+
+func isV6RangeValid(start, end net.IP) bool {
+ return isV6IP(start) && isV6IP(end) && bytes.Compare(end, start) >= 0
+}
+
+func isV4IP(ip net.IP) bool {
+ return ip.To4() != nil
+}
+
+func isV6IP(ip net.IP) bool {
+ return !isV4IP(ip) && ip.To16() != nil
+}
diff --git a/src/go/plugin/go.d/pkg/iprange/parse_test.go b/src/go/plugin/go.d/pkg/iprange/parse_test.go
new file mode 100644
index 000000000..8b4ab96b3
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/iprange/parse_test.go
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package iprange
+
+import (
+ "fmt"
+ "net"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParseRanges(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantRanges []Range
+ wantErr bool
+ }{
+ "single range": {
+ input: "192.0.2.0-192.0.2.10",
+ wantRanges: []Range{
+ prepareRange("192.0.2.0", "192.0.2.10"),
+ },
+ },
+ "multiple ranges": {
+ input: "2001:db8::0 192.0.2.0-192.0.2.10 2001:db8::0/126 192.0.2.0/255.255.255.0",
+ wantRanges: []Range{
+ prepareRange("2001:db8::0", "2001:db8::0"),
+ prepareRange("192.0.2.0", "192.0.2.10"),
+ prepareRange("2001:db8::1", "2001:db8::2"),
+ prepareRange("192.0.2.1", "192.0.2.254"),
+ },
+ },
+ "single invalid syntax": {
+ input: "192.0.2.0-192.0.2.",
+ wantErr: true,
+ },
+ "multiple invalid syntax": {
+ input: "2001:db8::0 192.0.2.0-192.0.2.10 2001:db8::0/999 192.0.2.0/255.255.255.0",
+ wantErr: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rs, err := ParseRanges(test.input)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ assert.Nilf(t, rs, "want: nil, got: %s", rs)
+ } else {
+ assert.NoError(t, err)
+ assert.Equalf(t, test.wantRanges, rs, "want: %s, got: %s", test.wantRanges, rs)
+ }
+ })
+ }
+}
+
+func TestParseRange(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantRange Range
+ wantErr bool
+ }{
+ "v4 IP": {
+ input: "192.0.2.0",
+ wantRange: prepareRange("192.0.2.0", "192.0.2.0"),
+ },
+ "v4 IP: invalid address": {
+ input: "192.0.2.",
+ wantErr: true,
+ },
+ "v4 Range": {
+ input: "192.0.2.0-192.0.2.10",
+ wantRange: prepareRange("192.0.2.0", "192.0.2.10"),
+ },
+ "v4 Range: start == end": {
+ input: "192.0.2.0-192.0.2.0",
+ wantRange: prepareRange("192.0.2.0", "192.0.2.0"),
+ },
+ "v4 Range: start > end": {
+ input: "192.0.2.10-192.0.2.0",
+ wantErr: true,
+ },
+ "v4 Range: invalid start": {
+ input: "192.0.2.-192.0.2.10",
+ wantErr: true,
+ },
+ "v4 Range: invalid end": {
+ input: "192.0.2.0-192.0.2.",
+ wantErr: true,
+ },
+ "v4 Range: v6 start": {
+ input: "2001:db8::0-192.0.2.10",
+ wantErr: true,
+ },
+ "v4 Range: v6 end": {
+ input: "192.0.2.0-2001:db8::0",
+ wantErr: true,
+ },
+ "v4 CIDR: /0": {
+ input: "192.0.2.0/0",
+ wantRange: prepareRange("0.0.0.1", "255.255.255.254"),
+ },
+ "v4 CIDR: /24": {
+ input: "192.0.2.0/24",
+ wantRange: prepareRange("192.0.2.1", "192.0.2.254"),
+ },
+ "v4 CIDR: /30": {
+ input: "192.0.2.0/30",
+ wantRange: prepareRange("192.0.2.1", "192.0.2.2"),
+ },
+ "v4 CIDR: /31": {
+ input: "192.0.2.0/31",
+ wantRange: prepareRange("192.0.2.0", "192.0.2.1"),
+ },
+ "v4 CIDR: /32": {
+ input: "192.0.2.0/32",
+ wantRange: prepareRange("192.0.2.0", "192.0.2.0"),
+ },
+ "v4 CIDR: ip instead of host address": {
+ input: "192.0.2.10/24",
+ wantRange: prepareRange("192.0.2.1", "192.0.2.254"),
+ },
+ "v4 CIDR: missing prefix length": {
+ input: "192.0.2.0/",
+ wantErr: true,
+ },
+ "v4 CIDR: invalid prefix length": {
+ input: "192.0.2.0/99",
+ wantErr: true,
+ },
+ "v4 Mask: /0": {
+ input: "192.0.2.0/0.0.0.0",
+ wantRange: prepareRange("0.0.0.1", "255.255.255.254"),
+ },
+ "v4 Mask: /24": {
+ input: "192.0.2.0/255.255.255.0",
+ wantRange: prepareRange("192.0.2.1", "192.0.2.254"),
+ },
+ "v4 Mask: /30": {
+ input: "192.0.2.0/255.255.255.252",
+ wantRange: prepareRange("192.0.2.1", "192.0.2.2"),
+ },
+ "v4 Mask: /31": {
+ input: "192.0.2.0/255.255.255.254",
+ wantRange: prepareRange("192.0.2.0", "192.0.2.1"),
+ },
+ "v4 Mask: /32": {
+ input: "192.0.2.0/255.255.255.255",
+ wantRange: prepareRange("192.0.2.0", "192.0.2.0"),
+ },
+ "v4 Mask: missing prefix mask": {
+ input: "192.0.2.0/",
+ wantErr: true,
+ },
+ "v4 Mask: invalid mask": {
+ input: "192.0.2.0/mask",
+ wantErr: true,
+ },
+ "v4 Mask: not canonical form mask": {
+ input: "192.0.2.0/255.255.0.254",
+ wantErr: true,
+ },
+ "v4 Mask: v6 address": {
+ input: "2001:db8::/255.255.255.0",
+ wantErr: true,
+ },
+
+ "v6 IP": {
+ input: "2001:db8::0",
+ wantRange: prepareRange("2001:db8::0", "2001:db8::0"),
+ },
+ "v6 IP: invalid address": {
+ input: "2001:db8",
+ wantErr: true,
+ },
+ "v6 Range": {
+ input: "2001:db8::-2001:db8::10",
+ wantRange: prepareRange("2001:db8::", "2001:db8::10"),
+ },
+ "v6 Range: start == end": {
+ input: "2001:db8::-2001:db8::",
+ wantRange: prepareRange("2001:db8::", "2001:db8::"),
+ },
+ "v6 Range: start > end": {
+ input: "2001:db8::10-2001:db8::",
+ wantErr: true,
+ },
+ "v6 Range: invalid start": {
+ input: "2001:db8-2001:db8::10",
+ wantErr: true,
+ },
+ "v6 Range: invalid end": {
+ input: "2001:db8::-2001:db8",
+ wantErr: true,
+ },
+ "v6 Range: v4 start": {
+ input: "192.0.2.0-2001:db8::10",
+ wantErr: true,
+ },
+ "v6 Range: v4 end": {
+ input: "2001:db8::-192.0.2.10",
+ wantErr: true,
+ },
+ "v6 CIDR: /0": {
+ input: "2001:db8::/0",
+ wantRange: prepareRange("::1", "ffff:ffff:ffff:ffff:ffff:ffff:ffff:fffe"),
+ },
+ "v6 CIDR: /64": {
+ input: "2001:db8::/64",
+ wantRange: prepareRange("2001:db8::1", "2001:db8::ffff:ffff:ffff:fffe"),
+ },
+ "v6 CIDR: /126": {
+ input: "2001:db8::/126",
+ wantRange: prepareRange("2001:db8::1", "2001:db8::2"),
+ },
+ "v6 CIDR: /127": {
+ input: "2001:db8::/127",
+ wantRange: prepareRange("2001:db8::", "2001:db8::1"),
+ },
+ "v6 CIDR: /128": {
+ input: "2001:db8::/128",
+ wantRange: prepareRange("2001:db8::", "2001:db8::"),
+ },
+ "v6 CIDR: ip instead of host address": {
+ input: "2001:db8::10/64",
+ wantRange: prepareRange("2001:db8::1", "2001:db8::ffff:ffff:ffff:fffe"),
+ },
+ "v6 CIDR: missing prefix length": {
+ input: "2001:db8::/",
+ wantErr: true,
+ },
+ "v6 CIDR: invalid prefix length": {
+ input: "2001:db8::/999",
+ wantErr: true,
+ },
+ }
+
+ for name, test := range tests {
+ name = fmt.Sprintf("%s (%s)", name, test.input)
+ t.Run(name, func(t *testing.T) {
+ r, err := ParseRange(test.input)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ assert.Nilf(t, r, "want: nil, got: %s", r)
+ } else {
+ assert.NoError(t, err)
+ assert.Equalf(t, test.wantRange, r, "want: %s, got: %s", test.wantRange, r)
+ }
+ })
+ }
+}
+
+func prepareRange(start, end string) Range {
+ return New(net.ParseIP(start), net.ParseIP(end))
+}
diff --git a/src/go/plugin/go.d/pkg/iprange/pool.go b/src/go/plugin/go.d/pkg/iprange/pool.go
new file mode 100644
index 000000000..48ba5689b
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/iprange/pool.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package iprange
+
+import (
+ "math/big"
+ "net"
+ "strings"
+)
+
+// Pool is a collection of IP Ranges.
+type Pool []Range
+
+// String returns the string form of the pool.
+func (p Pool) String() string {
+ var b strings.Builder
+ for _, r := range p {
+ b.WriteString(r.String() + " ")
+ }
+ return strings.TrimSpace(b.String())
+}
+
+// Size reports the number of IP addresses in the pool.
+func (p Pool) Size() *big.Int {
+ size := big.NewInt(0)
+ for _, r := range p {
+ size.Add(size, r.Size())
+ }
+ return size
+}
+
+// Contains reports whether the pool includes IP.
+func (p Pool) Contains(ip net.IP) bool {
+ for _, r := range p {
+ if r.Contains(ip) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/go/plugin/go.d/pkg/iprange/pool_test.go b/src/go/plugin/go.d/pkg/iprange/pool_test.go
new file mode 100644
index 000000000..2864b6711
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/iprange/pool_test.go
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package iprange
+
+import (
+ "fmt"
+ "math/big"
+ "net"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPool_String(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantString string
+ }{
+ "singe": {
+ input: "192.0.2.0-192.0.2.10",
+ wantString: "192.0.2.0-192.0.2.10",
+ },
+ "multiple": {
+ input: "192.0.2.0-192.0.2.10 2001:db8::-2001:db8::10",
+ wantString: "192.0.2.0-192.0.2.10 2001:db8::-2001:db8::10",
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rs, err := ParseRanges(test.input)
+ require.NoError(t, err)
+ p := Pool(rs)
+
+ assert.Equal(t, test.wantString, p.String())
+ })
+ }
+}
+
+func TestPool_Size(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantSize *big.Int
+ }{
+ "singe": {
+ input: "192.0.2.0-192.0.2.10",
+ wantSize: big.NewInt(11),
+ },
+ "multiple": {
+ input: "192.0.2.0-192.0.2.10 2001:db8::-2001:db8::10",
+ wantSize: big.NewInt(11 + 17),
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ rs, err := ParseRanges(test.input)
+ require.NoError(t, err)
+ p := Pool(rs)
+
+ assert.Equal(t, test.wantSize, p.Size())
+ })
+ }
+}
+
+func TestPool_Contains(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ ip string
+ wantFail bool
+ }{
+ "inside first": {
+ input: "192.0.2.0-192.0.2.10 192.0.2.20-192.0.2.30 2001:db8::-2001:db8::10",
+ ip: "192.0.2.5",
+ },
+ "inside last": {
+ input: "192.0.2.0-192.0.2.10 192.0.2.20-192.0.2.30 2001:db8::-2001:db8::10",
+ ip: "2001:db8::5",
+ },
+ "outside": {
+ input: "192.0.2.0-192.0.2.10 192.0.2.20-192.0.2.30 2001:db8::-2001:db8::10",
+ ip: "192.0.2.100",
+ wantFail: true,
+ },
+ }
+
+ for name, test := range tests {
+ name = fmt.Sprintf("%s (range: %s, ip: %s)", name, test.input, test.ip)
+ t.Run(name, func(t *testing.T) {
+ rs, err := ParseRanges(test.input)
+ require.NoError(t, err)
+ ip := net.ParseIP(test.ip)
+ require.NotNil(t, ip)
+ p := Pool(rs)
+
+ if test.wantFail {
+ assert.False(t, p.Contains(ip))
+ } else {
+ assert.True(t, p.Contains(ip))
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/iprange/range.go b/src/go/plugin/go.d/pkg/iprange/range.go
new file mode 100644
index 000000000..1fe02eace
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/iprange/range.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package iprange
+
+import (
+ "bytes"
+ "fmt"
+ "math/big"
+ "net"
+)
+
+// Family represents IP Range address-family.
+type Family uint8
+
+const (
+ // V4Family is IPv4 address-family.
+ V4Family Family = iota
+ // V6Family is IPv6 address-family.
+ V6Family
+)
+
+// Range represents an IP range.
+type Range interface {
+ Family() Family
+ Contains(ip net.IP) bool
+ Size() *big.Int
+ fmt.Stringer
+}
+
+// New returns new IP Range.
+// If it is not a valid range (start and end IPs have different address-families, or start > end),
+// New returns nil.
+func New(start, end net.IP) Range {
+ if isV4RangeValid(start, end) {
+ return v4Range{start: start, end: end}
+ }
+ if isV6RangeValid(start, end) {
+ return v6Range{start: start, end: end}
+ }
+ return nil
+}
+
+type v4Range struct {
+ start net.IP
+ end net.IP
+}
+
+// String returns the string form of the range.
+func (r v4Range) String() string {
+ return fmt.Sprintf("%s-%s", r.start, r.end)
+}
+
+// Family returns the range address family.
+func (r v4Range) Family() Family {
+ return V4Family
+}
+
+// Contains reports whether the range includes IP.
+func (r v4Range) Contains(ip net.IP) bool {
+ return bytes.Compare(ip, r.start) >= 0 && bytes.Compare(ip, r.end) <= 0
+}
+
+// Size reports the number of IP addresses in the range.
+func (r v4Range) Size() *big.Int {
+ return big.NewInt(v4ToInt(r.end) - v4ToInt(r.start) + 1)
+}
+
+type v6Range struct {
+ start net.IP
+ end net.IP
+}
+
+// String returns the string form of the range.
+func (r v6Range) String() string {
+ return fmt.Sprintf("%s-%s", r.start, r.end)
+}
+
+// Family returns the range address family.
+func (r v6Range) Family() Family {
+ return V6Family
+}
+
+// Contains reports whether the range includes IP.
+func (r v6Range) Contains(ip net.IP) bool {
+ return bytes.Compare(ip, r.start) >= 0 && bytes.Compare(ip, r.end) <= 0
+}
+
+// Size reports the number of IP addresses in the range.
+func (r v6Range) Size() *big.Int {
+ size := big.NewInt(0)
+ size.Add(size, big.NewInt(0).SetBytes(r.end))
+ size.Sub(size, big.NewInt(0).SetBytes(r.start))
+ size.Add(size, big.NewInt(1))
+ return size
+}
+
+func v4ToInt(ip net.IP) int64 {
+ ip = ip.To4()
+ return int64(ip[0])<<24 | int64(ip[1])<<16 | int64(ip[2])<<8 | int64(ip[3])
+}
diff --git a/src/go/plugin/go.d/pkg/iprange/range_test.go b/src/go/plugin/go.d/pkg/iprange/range_test.go
new file mode 100644
index 000000000..631d012e0
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/iprange/range_test.go
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package iprange
+
+import (
+ "fmt"
+ "math/big"
+ "net"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestV4Range_String(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantString string
+ }{
+ "IP": {input: "192.0.2.0", wantString: "192.0.2.0-192.0.2.0"},
+ "Range": {input: "192.0.2.0-192.0.2.10", wantString: "192.0.2.0-192.0.2.10"},
+ "CIDR": {input: "192.0.2.0/24", wantString: "192.0.2.1-192.0.2.254"},
+ "Mask": {input: "192.0.2.0/255.255.255.0", wantString: "192.0.2.1-192.0.2.254"},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ r, err := ParseRange(test.input)
+ require.NoError(t, err)
+
+ assert.Equal(t, test.wantString, r.String())
+ })
+ }
+}
+
+func TestV4Range_Family(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ }{
+ "IP": {input: "192.0.2.0"},
+ "Range": {input: "192.0.2.0-192.0.2.10"},
+ "CIDR": {input: "192.0.2.0/24"},
+ "Mask": {input: "192.0.2.0/255.255.255.0"},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ r, err := ParseRange(test.input)
+ require.NoError(t, err)
+
+ assert.Equal(t, V4Family, r.Family())
+ })
+ }
+}
+
+func TestV4Range_Size(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantSize *big.Int
+ }{
+ "IP": {input: "192.0.2.0", wantSize: big.NewInt(1)},
+ "Range": {input: "192.0.2.0-192.0.2.10", wantSize: big.NewInt(11)},
+ "CIDR": {input: "192.0.2.0/24", wantSize: big.NewInt(254)},
+ "CIDR 31": {input: "192.0.2.0/31", wantSize: big.NewInt(2)},
+ "CIDR 32": {input: "192.0.2.0/32", wantSize: big.NewInt(1)},
+ "Mask": {input: "192.0.2.0/255.255.255.0", wantSize: big.NewInt(254)},
+ "Mask 31": {input: "192.0.2.0/255.255.255.254", wantSize: big.NewInt(2)},
+ "Mask 32": {input: "192.0.2.0/255.255.255.255", wantSize: big.NewInt(1)},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ r, err := ParseRange(test.input)
+ require.NoError(t, err)
+
+ assert.Equal(t, test.wantSize, r.Size())
+ })
+ }
+}
+
+func TestV4Range_Contains(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ ip string
+ wantFail bool
+ }{
+ "inside": {input: "192.0.2.0-192.0.2.10", ip: "192.0.2.5"},
+ "outside": {input: "192.0.2.0-192.0.2.10", ip: "192.0.2.55", wantFail: true},
+ "eq start": {input: "192.0.2.0-192.0.2.10", ip: "192.0.2.0"},
+ "eq end": {input: "192.0.2.0-192.0.2.10", ip: "192.0.2.10"},
+ "v6": {input: "192.0.2.0-192.0.2.10", ip: "2001:db8::", wantFail: true},
+ }
+
+ for name, test := range tests {
+ name = fmt.Sprintf("%s (range: %s, ip: %s)", name, test.input, test.ip)
+ t.Run(name, func(t *testing.T) {
+ r, err := ParseRange(test.input)
+ require.NoError(t, err)
+ ip := net.ParseIP(test.ip)
+ require.NotNil(t, ip)
+
+ if test.wantFail {
+ assert.False(t, r.Contains(ip))
+ } else {
+ assert.True(t, r.Contains(ip))
+ }
+ })
+ }
+}
+
+func TestV6Range_String(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantString string
+ }{
+ "IP": {input: "2001:db8::", wantString: "2001:db8::-2001:db8::"},
+ "Range": {input: "2001:db8::-2001:db8::10", wantString: "2001:db8::-2001:db8::10"},
+ "CIDR": {input: "2001:db8::/126", wantString: "2001:db8::1-2001:db8::2"},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ r, err := ParseRange(test.input)
+ require.NoError(t, err)
+
+ assert.Equal(t, test.wantString, r.String())
+ })
+ }
+}
+
+func TestV6Range_Family(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ }{
+ "IP": {input: "2001:db8::"},
+ "Range": {input: "2001:db8::-2001:db8::10"},
+ "CIDR": {input: "2001:db8::/126"},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ r, err := ParseRange(test.input)
+ require.NoError(t, err)
+
+ assert.Equal(t, V6Family, r.Family())
+ })
+ }
+}
+
+func TestV6Range_Size(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ wantSize *big.Int
+ }{
+ "IP": {input: "2001:db8::", wantSize: big.NewInt(1)},
+ "Range": {input: "2001:db8::-2001:db8::10", wantSize: big.NewInt(17)},
+ "CIDR": {input: "2001:db8::/120", wantSize: big.NewInt(254)},
+ "CIDR 127": {input: "2001:db8::/127", wantSize: big.NewInt(2)},
+ "CIDR 128": {input: "2001:db8::/128", wantSize: big.NewInt(1)},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ r, err := ParseRange(test.input)
+ require.NoError(t, err)
+
+ assert.Equal(t, test.wantSize, r.Size())
+ })
+ }
+}
+
+func TestV6Range_Contains(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ ip string
+ wantFail bool
+ }{
+ "inside": {input: "2001:db8::-2001:db8::10", ip: "2001:db8::5"},
+ "outside": {input: "2001:db8::-2001:db8::10", ip: "2001:db8::ff", wantFail: true},
+ "eq start": {input: "2001:db8::-2001:db8::10", ip: "2001:db8::"},
+ "eq end": {input: "2001:db8::-2001:db8::10", ip: "2001:db8::10"},
+ "v4": {input: "2001:db8::-2001:db8::10", ip: "192.0.2.0", wantFail: true},
+ }
+
+ for name, test := range tests {
+ name = fmt.Sprintf("%s (range: %s, ip: %s)", name, test.input, test.ip)
+ t.Run(name, func(t *testing.T) {
+ r, err := ParseRange(test.input)
+ require.NoError(t, err)
+ ip := net.ParseIP(test.ip)
+ require.NotNil(t, ip)
+
+ if test.wantFail {
+ assert.False(t, r.Contains(ip))
+ } else {
+ assert.True(t, r.Contains(ip))
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/k8sclient/k8sclient.go b/src/go/plugin/go.d/pkg/k8sclient/k8sclient.go
new file mode 100644
index 000000000..079239c1c
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/k8sclient/k8sclient.go
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8sclient
+
+import (
+ "errors"
+ "os"
+ "path/filepath"
+
+ "k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/kubernetes/fake"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/clientcmd"
+
+ _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
+)
+
+const (
+ EnvFakeClient = "KUBERNETES_FAKE_CLIENTSET"
+ defaultUserAgent = "Netdata/k8s-client"
+)
+
+func New(userAgent string) (kubernetes.Interface, error) {
+ if userAgent == "" {
+ userAgent = defaultUserAgent
+ }
+
+ switch {
+ case os.Getenv(EnvFakeClient) != "":
+ return fake.NewSimpleClientset(), nil
+ case os.Getenv("KUBERNETES_SERVICE_HOST") != "" && os.Getenv("KUBERNETES_SERVICE_PORT") != "":
+ return newInCluster(userAgent)
+ default:
+ return newOutOfCluster(userAgent)
+ }
+}
+
+func newInCluster(userAgent string) (*kubernetes.Clientset, error) {
+ config, err := rest.InClusterConfig()
+ if err != nil {
+ return nil, err
+ }
+
+ config.UserAgent = userAgent
+
+ return kubernetes.NewForConfig(config)
+}
+
+func newOutOfCluster(userAgent string) (*kubernetes.Clientset, error) {
+ home := homeDir()
+ if home == "" {
+ return nil, errors.New("couldn't find home directory")
+ }
+
+ path := filepath.Join(home, ".kube", "config")
+ config, err := clientcmd.BuildConfigFromFlags("", path)
+ if err != nil {
+ return nil, err
+ }
+
+ config.UserAgent = userAgent
+
+ return kubernetes.NewForConfig(config)
+}
+
+func homeDir() string {
+ if h := os.Getenv("HOME"); h != "" {
+ return h
+ }
+ return os.Getenv("USERPROFILE") // windows
+}
diff --git a/src/go/plugin/go.d/pkg/logs/csv.go b/src/go/plugin/go.d/pkg/logs/csv.go
new file mode 100644
index 000000000..4057b8c2f
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/csv.go
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "bytes"
+ "encoding/csv"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+type (
+ CSVConfig struct {
+ FieldsPerRecord int `yaml:"fields_per_record,omitempty" json:"fields_per_record"`
+ Delimiter string `yaml:"delimiter,omitempty" json:"delimiter"`
+ TrimLeadingSpace bool `yaml:"trim_leading_space" json:"trim_leading_space"`
+ Format string `yaml:"format,omitempty" json:"format"`
+ CheckField func(string) (string, int, bool) `yaml:"-" json:"-"`
+ }
+
+ CSVParser struct {
+ Config CSVConfig
+ reader *csv.Reader
+ format *csvFormat
+ }
+
+ csvFormat struct {
+ raw string
+ maxIndex int
+ fields []csvField
+ }
+
+ csvField struct {
+ name string
+ idx int
+ }
+)
+
+func NewCSVParser(config CSVConfig, in io.Reader) (*CSVParser, error) {
+ if config.Format == "" {
+ return nil, errors.New("empty csv format")
+ }
+
+ format, err := newCSVFormat(config)
+ if err != nil {
+ return nil, fmt.Errorf("bad csv format '%s': %v", config.Format, err)
+ }
+
+ p := &CSVParser{
+ Config: config,
+ reader: newCSVReader(in, config),
+ format: format,
+ }
+ return p, nil
+}
+
+func (p *CSVParser) ReadLine(line LogLine) error {
+ record, err := p.reader.Read()
+ if err != nil {
+ return handleCSVReaderError(err)
+ }
+ return p.format.parse(record, line)
+}
+
+func (p *CSVParser) Parse(row []byte, line LogLine) error {
+ r := newCSVReader(bytes.NewBuffer(row), p.Config)
+ record, err := r.Read()
+ if err != nil {
+ return handleCSVReaderError(err)
+ }
+ return p.format.parse(record, line)
+}
+
+func (p CSVParser) Info() string {
+ return fmt.Sprintf("csv: %s", p.format.raw)
+}
+
+func (f *csvFormat) parse(record []string, line LogLine) error {
+ if len(record) <= f.maxIndex {
+ return &ParseError{msg: "csv parse: unmatched line"}
+ }
+
+ for _, v := range f.fields {
+ if err := line.Assign(v.name, record[v.idx]); err != nil {
+ return &ParseError{msg: fmt.Sprintf("csv parse: %v", err), err: err}
+ }
+ }
+ return nil
+}
+
+func newCSVReader(in io.Reader, config CSVConfig) *csv.Reader {
+ r := csv.NewReader(in)
+ if config.Delimiter != "" {
+ if d, err := parseCSVDelimiter(config.Delimiter); err == nil {
+ r.Comma = d
+ }
+ }
+ r.TrimLeadingSpace = config.TrimLeadingSpace
+ r.FieldsPerRecord = config.FieldsPerRecord
+ r.ReuseRecord = true
+ return r
+}
+
+func newCSVFormat(config CSVConfig) (*csvFormat, error) {
+ r := csv.NewReader(strings.NewReader(config.Format))
+ if config.Delimiter != "" {
+ if d, err := parseCSVDelimiter(config.Delimiter); err == nil {
+ r.Comma = d
+ }
+ }
+ r.TrimLeadingSpace = config.TrimLeadingSpace
+
+ record, err := r.Read()
+ if err != nil {
+ return nil, err
+ }
+
+ fields, err := createCSVFields(record, config.CheckField)
+ if err != nil {
+ return nil, err
+ }
+
+ if len(fields) == 0 {
+ return nil, errors.New("zero fields")
+ }
+
+ format := &csvFormat{
+ raw: config.Format,
+ maxIndex: fields[len(fields)-1].idx,
+ fields: fields,
+ }
+ return format, nil
+}
+
+func createCSVFields(format []string, check func(string) (string, int, bool)) ([]csvField, error) {
+ if check == nil {
+ check = checkCSVFormatField
+ }
+ var fields []csvField
+ var offset int
+ seen := make(map[string]bool)
+
+ for i, name := range format {
+ name = strings.Trim(name, `"`)
+
+ name, addOffset, valid := check(name)
+ offset += addOffset
+ if !valid {
+ continue
+ }
+ if seen[name] {
+ return nil, fmt.Errorf("duplicate field: %s", name)
+ }
+ seen[name] = true
+
+ idx := i + offset
+ fields = append(fields, csvField{name, idx})
+ }
+ return fields, nil
+}
+
+func handleCSVReaderError(err error) error {
+ if isCSVParseError(err) {
+ return &ParseError{msg: fmt.Sprintf("csv parse: %v", err), err: err}
+ }
+ return err
+}
+
+func isCSVParseError(err error) bool {
+ return errors.Is(err, csv.ErrBareQuote) || errors.Is(err, csv.ErrFieldCount) || errors.Is(err, csv.ErrQuote)
+}
+
+func checkCSVFormatField(name string) (newName string, offset int, valid bool) {
+ if len(name) < 2 || !strings.HasPrefix(name, "$") {
+ return "", 0, false
+ }
+ return name, 0, true
+}
+
+func parseCSVDelimiter(s string) (rune, error) {
+ if isNumber(s) {
+ d, err := strconv.ParseInt(s, 10, 32)
+ if err != nil {
+ return 0, fmt.Errorf("invalid CSV delimiter: %v", err)
+ }
+ return rune(d), nil
+ }
+ if len(s) != 1 {
+ return 0, errors.New("invalid CSV delimiter: must be a single character")
+ }
+ return rune(s[0]), nil
+}
diff --git a/src/go/plugin/go.d/pkg/logs/csv_test.go b/src/go/plugin/go.d/pkg/logs/csv_test.go
new file mode 100644
index 000000000..d7baaa1b5
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/csv_test.go
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var testCSVConfig = CSVConfig{
+ Delimiter: " ",
+ Format: "$A %B",
+}
+
+func TestNewCSVParser(t *testing.T) {
+ tests := []struct {
+ name string
+ format string
+ wantErr bool
+ }{
+ {name: "valid format", format: "$A $B"},
+ {name: "empty format", wantErr: true},
+ {name: "bad format: csv read error", format: "$A $B \"$C", wantErr: true},
+ {name: "bad format: duplicate fields", format: "$A $A", wantErr: true},
+ {name: "bad format: zero fields", format: "!A !B", wantErr: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ c := testCSVConfig
+ c.Format = tt.format
+ p, err := NewCSVParser(c, nil)
+ if tt.wantErr {
+ assert.Error(t, err)
+ assert.Nil(t, p)
+ } else {
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+ }
+ })
+ }
+}
+
+func TestNewCSVFormat(t *testing.T) {
+ tests := []struct {
+ format string
+ wantFormat csvFormat
+ wantErr bool
+ }{
+ {format: "$A $B", wantFormat: csvFormat{maxIndex: 1, fields: []csvField{{"$A", 0}, {"$B", 1}}}},
+ {format: "$A $B !C $E", wantFormat: csvFormat{maxIndex: 3, fields: []csvField{{"$A", 0}, {"$B", 1}, {"$E", 3}}}},
+ {format: "!A !B !C $E", wantFormat: csvFormat{maxIndex: 3, fields: []csvField{{"$E", 3}}}},
+ {format: "$A $OFFSET $B", wantFormat: csvFormat{maxIndex: 3, fields: []csvField{{"$A", 0}, {"$B", 3}}}},
+ {format: "$A $OFFSET $B $OFFSET !A", wantFormat: csvFormat{maxIndex: 3, fields: []csvField{{"$A", 0}, {"$B", 3}}}},
+ {format: "$A $OFFSET $OFFSET $B", wantFormat: csvFormat{maxIndex: 5, fields: []csvField{{"$A", 0}, {"$B", 5}}}},
+ {format: "$OFFSET $A $OFFSET $B", wantFormat: csvFormat{maxIndex: 5, fields: []csvField{{"$A", 2}, {"$B", 5}}}},
+ {format: "$A \"$A", wantErr: true},
+ {format: "$A $A", wantErr: true},
+ {format: "!A !A", wantErr: true},
+ {format: "", wantErr: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.format, func(t *testing.T) {
+ c := testCSVConfig
+ c.Format = tt.format
+ c.CheckField = testCheckCSVFormatField
+ tt.wantFormat.raw = tt.format
+
+ f, err := newCSVFormat(c)
+
+ if tt.wantErr {
+ assert.Error(t, err)
+ assert.Nil(t, f)
+ } else {
+ assert.NoError(t, err)
+ assert.Equal(t, tt.wantFormat, *f)
+ }
+ })
+ }
+}
+
+func TestCSVParser_ReadLine(t *testing.T) {
+ tests := []struct {
+ name string
+ row string
+ format string
+ wantErr bool
+ wantParseErr bool
+ }{
+ {name: "match and no error", row: "1 2 3", format: `$A $B $C`},
+ {name: "match but error on assigning", row: "1 2 3", format: `$A $B $ERR`, wantErr: true, wantParseErr: true},
+ {name: "not match", row: "1 2 3", format: `$A $B $C $d`, wantErr: true, wantParseErr: true},
+ {name: "error on reading csv.Err", row: "1 2\"3", format: `$A $B $C`, wantErr: true, wantParseErr: true},
+ {name: "error on reading EOF", row: "", format: `$A $B $C`, wantErr: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var line logLine
+ r := strings.NewReader(tt.row)
+ c := testCSVConfig
+ c.Format = tt.format
+ p, err := NewCSVParser(c, r)
+ require.NoError(t, err)
+
+ err = p.ReadLine(&line)
+
+ if tt.wantErr {
+ require.Error(t, err)
+ if tt.wantParseErr {
+ assert.True(t, IsParseError(err))
+ } else {
+ assert.False(t, IsParseError(err))
+ }
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestCSVParser_Parse(t *testing.T) {
+ tests := []struct {
+ name string
+ row string
+ format string
+ wantErr bool
+ }{
+ {name: "match and no error", row: "1 2 3", format: `$A $B $C`},
+ {name: "match but error on assigning", row: "1 2 3", format: `$A $B $ERR`, wantErr: true},
+ {name: "not match", row: "1 2 3", format: `$A $B $C $d`, wantErr: true},
+ {name: "error on reading csv.Err", row: "1 2\"3", format: `$A $B $C`, wantErr: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var line logLine
+ r := strings.NewReader(tt.row)
+ c := testCSVConfig
+ c.Format = tt.format
+ p, err := NewCSVParser(c, r)
+ require.NoError(t, err)
+
+ err = p.ReadLine(&line)
+
+ if tt.wantErr {
+ require.Error(t, err)
+ assert.True(t, IsParseError(err))
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+
+}
+
+func TestCSVParser_Info(t *testing.T) {
+ p, err := NewCSVParser(testCSVConfig, nil)
+ require.NoError(t, err)
+ assert.NotZero(t, p.Info())
+}
+
+func testCheckCSVFormatField(name string) (newName string, offset int, valid bool) {
+ if len(name) < 2 || !strings.HasPrefix(name, "$") {
+ return "", 0, false
+ }
+ if name == "$OFFSET" {
+ return "", 1, false
+ }
+ return name, 0, true
+}
diff --git a/src/go/plugin/go.d/pkg/logs/json.go b/src/go/plugin/go.d/pkg/logs/json.go
new file mode 100644
index 000000000..ceb32e272
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/json.go
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+
+ "github.com/valyala/fastjson"
+)
+
+type JSONConfig struct {
+ Mapping map[string]string `yaml:"mapping" json:"mapping"`
+}
+
+type JSONParser struct {
+ reader *bufio.Reader
+ parser fastjson.Parser
+ buf []byte
+ mapping map[string]string
+}
+
+func NewJSONParser(config JSONConfig, in io.Reader) (*JSONParser, error) {
+ parser := &JSONParser{
+ reader: bufio.NewReader(in),
+ mapping: config.Mapping,
+ buf: make([]byte, 0, 100),
+ }
+ return parser, nil
+}
+
+func (p *JSONParser) ReadLine(line LogLine) error {
+ row, err := p.reader.ReadSlice('\n')
+ if err != nil && len(row) == 0 {
+ return err
+ }
+ if len(row) > 0 && row[len(row)-1] == '\n' {
+ row = row[:len(row)-1]
+ }
+ return p.Parse(row, line)
+}
+
+func (p *JSONParser) Parse(row []byte, line LogLine) error {
+ val, err := p.parser.ParseBytes(row)
+ if err != nil {
+ return err
+ }
+
+ if err := p.parseObject("", val, line); err != nil {
+ return &ParseError{msg: fmt.Sprintf("json parse: %v", err), err: err}
+ }
+
+ return nil
+}
+
+func (p *JSONParser) parseObject(prefix string, val *fastjson.Value, line LogLine) error {
+ obj, err := val.Object()
+ if err != nil {
+ return err
+ }
+
+ obj.Visit(func(key []byte, v *fastjson.Value) {
+ if err != nil {
+ return
+ }
+
+ k := jsonObjKey(prefix, string(key))
+
+ switch v.Type() {
+ case fastjson.TypeString, fastjson.TypeNumber:
+ err = p.parseStringNumber(k, v, line)
+ case fastjson.TypeArray:
+ err = p.parseArray(k, v, line)
+ case fastjson.TypeObject:
+ err = p.parseObject(k, v, line)
+ default:
+ return
+ }
+ })
+
+ return err
+}
+
+func jsonObjKey(prefix, key string) string {
+ if prefix == "" {
+ return key
+ }
+ return prefix + "." + key
+}
+
+func (p *JSONParser) parseArray(key string, val *fastjson.Value, line LogLine) error {
+ arr, err := val.Array()
+ if err != nil {
+ return err
+ }
+
+ for i, v := range arr {
+ k := jsonObjKey(key, strconv.Itoa(i))
+
+ switch v.Type() {
+ case fastjson.TypeString, fastjson.TypeNumber:
+ err = p.parseStringNumber(k, v, line)
+ case fastjson.TypeArray:
+ err = p.parseArray(k, v, line)
+ case fastjson.TypeObject:
+ err = p.parseObject(k, v, line)
+ default:
+ continue
+ }
+
+ if err != nil {
+ return err
+ }
+ }
+
+ return err
+}
+
+func (p *JSONParser) parseStringNumber(key string, val *fastjson.Value, line LogLine) error {
+ if mapped, ok := p.mapping[key]; ok {
+ key = mapped
+ }
+
+ p.buf = p.buf[:0]
+ if p.buf = val.MarshalTo(p.buf); len(p.buf) == 0 {
+ return nil
+ }
+
+ if val.Type() == fastjson.TypeString {
+ // trim "
+ return line.Assign(key, string(p.buf[1:len(p.buf)-1]))
+ }
+ return line.Assign(key, string(p.buf))
+}
+
+func (p *JSONParser) Info() string {
+ return fmt.Sprintf("json: %q", p.mapping)
+}
diff --git a/src/go/plugin/go.d/pkg/logs/json_test.go b/src/go/plugin/go.d/pkg/logs/json_test.go
new file mode 100644
index 000000000..b82850031
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/json_test.go
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewJSONParser(t *testing.T) {
+ tests := map[string]struct {
+ config JSONConfig
+ wantErr bool
+ }{
+ "empty config": {
+ config: JSONConfig{},
+ wantErr: false,
+ },
+ "with mappings": {
+ config: JSONConfig{Mapping: map[string]string{"from_field_1": "to_field_1"}},
+ wantErr: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ p, err := NewJSONParser(test.config, nil)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ assert.Nil(t, p)
+ } else {
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+ }
+ })
+ }
+}
+
+func TestJSONParser_ReadLine(t *testing.T) {
+ tests := map[string]struct {
+ config JSONConfig
+ input string
+ wantAssigned map[string]string
+ wantErr bool
+ }{
+ "string value": {
+ input: `{ "string": "example.com" }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "string": "example.com",
+ },
+ },
+ "int value": {
+ input: `{ "int": 1 }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "int": "1",
+ },
+ },
+ "float value": {
+ input: `{ "float": 1.1 }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "float": "1.1",
+ },
+ },
+ "string, int, float values": {
+ input: `{ "string": "example.com", "int": 1, "float": 1.1 }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "string": "example.com",
+ "int": "1",
+ "float": "1.1",
+ },
+ },
+ "string, int, float values with mappings": {
+ config: JSONConfig{Mapping: map[string]string{
+ "string": "STRING",
+ "int": "INT",
+ "float": "FLOAT",
+ }},
+ input: `{ "string": "example.com", "int": 1, "float": 1.1 }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "STRING": "example.com",
+ "INT": "1",
+ "FLOAT": "1.1",
+ },
+ },
+ "nested": {
+ input: `{"one":{"two":2,"three":{"four":4}},"five":5}`,
+ config: JSONConfig{Mapping: map[string]string{
+ "one.two": "mapped_value",
+ }},
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "mapped_value": "2",
+ "one.three.four": "4",
+ "five": "5",
+ },
+ },
+ "nested with array": {
+ input: `{"one":{"two":[2,22]},"five":5}`,
+ config: JSONConfig{Mapping: map[string]string{
+ "one.two.1": "mapped_value",
+ }},
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "one.two.0": "2",
+ "mapped_value": "22",
+ "five": "5",
+ },
+ },
+ "error on malformed JSON": {
+ input: `{ "host"": unquoted_string}`,
+ wantErr: true,
+ },
+ "error on empty input": {
+ wantErr: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ line := newLogLine()
+ in := strings.NewReader(test.input)
+ p, err := NewJSONParser(test.config, in)
+ require.NoError(t, err)
+ require.NotNil(t, p)
+
+ err = p.ReadLine(line)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.Equal(t, test.wantAssigned, line.assigned)
+ }
+ })
+ }
+}
+
+func TestJSONParser_Parse(t *testing.T) {
+ tests := map[string]struct {
+ config JSONConfig
+ input string
+ wantAssigned map[string]string
+ wantErr bool
+ }{
+ "string value": {
+ input: `{ "string": "example.com" }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "string": "example.com",
+ },
+ },
+ "int value": {
+ input: `{ "int": 1 }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "int": "1",
+ },
+ },
+ "float value": {
+ input: `{ "float": 1.1 }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "float": "1.1",
+ },
+ },
+ "string, int, float values": {
+ input: `{ "string": "example.com", "int": 1, "float": 1.1 }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "string": "example.com",
+ "int": "1",
+ "float": "1.1",
+ },
+ },
+ "string, int, float values with mappings": {
+ config: JSONConfig{Mapping: map[string]string{
+ "string": "STRING",
+ "int": "INT",
+ "float": "FLOAT",
+ }},
+ input: `{ "string": "example.com", "int": 1, "float": 1.1 }`,
+ wantErr: false,
+ wantAssigned: map[string]string{
+ "STRING": "example.com",
+ "INT": "1",
+ "FLOAT": "1.1",
+ },
+ },
+ "error on malformed JSON": {
+ input: `{ "host"": unquoted_string}`,
+ wantErr: true,
+ },
+ "error on empty input": {
+ wantErr: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ line := newLogLine()
+ p, err := NewJSONParser(test.config, nil)
+ require.NoError(t, err)
+ require.NotNil(t, p)
+
+ err = p.Parse([]byte(test.input), line)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ } else {
+ require.NoError(t, err)
+ assert.Equal(t, test.wantAssigned, line.assigned)
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/logs/lastline.go b/src/go/plugin/go.d/pkg/logs/lastline.go
new file mode 100644
index 000000000..911dbf497
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/lastline.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "errors"
+ "os"
+
+ "github.com/clbanning/rfile/v2"
+)
+
+const DefaultMaxLineWidth = 4 * 1024 // assume disk block size is 4K
+
+var ErrTooLongLine = errors.New("too long line")
+
+// ReadLastLine returns the last line of the file and any read error encountered.
+// It expects last line width <= maxLineWidth.
+// If maxLineWidth <= 0, it defaults to DefaultMaxLineWidth.
+func ReadLastLine(filename string, maxLineWidth int64) ([]byte, error) {
+ if maxLineWidth <= 0 {
+ maxLineWidth = DefaultMaxLineWidth
+ }
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = f.Close() }()
+
+ stat, _ := f.Stat()
+ endPos := stat.Size()
+ if endPos == 0 {
+ return []byte{}, nil
+ }
+ startPos := endPos - maxLineWidth
+ if startPos < 0 {
+ startPos = 0
+ }
+ buf := make([]byte, endPos-startPos)
+ n, err := f.ReadAt(buf, startPos)
+ if err != nil {
+ return nil, err
+ }
+ lnPos := 0
+ foundLn := false
+ for i := n - 2; i >= 0; i-- {
+ ch := buf[i]
+ if ch == '\n' {
+ foundLn = true
+ lnPos = i
+ break
+ }
+ }
+ if foundLn {
+ return buf[lnPos+1 : n], nil
+ }
+ if startPos == 0 {
+ return buf[0:n], nil
+ }
+
+ return nil, ErrTooLongLine
+}
+
+func ReadLastLines(filename string, n uint) ([]string, error) {
+ return rfile.Tail(filename, int(n))
+}
diff --git a/src/go/plugin/go.d/pkg/logs/lastline_test.go b/src/go/plugin/go.d/pkg/logs/lastline_test.go
new file mode 100644
index 000000000..ea0a75e9e
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/lastline_test.go
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestReadLastLine(t *testing.T) {
+ tests := []struct {
+ name string
+ content string
+ expected string
+ err error
+ }{
+ {"empty", "", "", nil},
+ {"empty-ln", "\n", "\n", nil},
+ {"one-line", "hello", "hello", nil},
+ {"one-line-ln", "hello\n", "hello\n", nil},
+ {"multi-line", "hello\nworld", "world", nil},
+ {"multi-line-ln", "hello\nworld\n", "world\n", nil},
+ {"long-line", "hello hello hello", "", ErrTooLongLine},
+ {"long-line-ln", "hello hello hello\n", "", ErrTooLongLine},
+ }
+ for _, test := range tests {
+ t.Run(test.name, func(t *testing.T) {
+ filename := prepareFile(t, test.content)
+ defer func() { _ = os.Remove(filename) }()
+
+ line, err := ReadLastLine(filename, 10)
+
+ if test.err != nil {
+ require.NotNil(t, err)
+ assert.Contains(t, err.Error(), test.err.Error())
+ } else {
+ assert.Equal(t, test.expected, string(line))
+ }
+ })
+ }
+}
+
+func prepareFile(t *testing.T, content string) string {
+ t.Helper()
+ file, err := os.CreateTemp("", "go-test")
+ require.NoError(t, err)
+ defer func() { _ = file.Close() }()
+
+ _, _ = file.WriteString(content)
+ return file.Name()
+}
diff --git a/src/go/plugin/go.d/pkg/logs/ltsv.go b/src/go/plugin/go.d/pkg/logs/ltsv.go
new file mode 100644
index 000000000..b7fbceb14
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/ltsv.go
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+ "unsafe"
+
+ "github.com/Wing924/ltsv"
+)
+
+type (
+ LTSVConfig struct {
+ FieldDelimiter string `yaml:"field_delimiter" json:"field_delimiter"`
+ ValueDelimiter string `yaml:"value_delimiter" json:"value_delimiter"`
+ Mapping map[string]string `yaml:"mapping" json:"mapping"`
+ }
+
+ LTSVParser struct {
+ r *bufio.Reader
+ parser ltsv.Parser
+ mapping map[string]string
+ }
+)
+
+func NewLTSVParser(config LTSVConfig, in io.Reader) (*LTSVParser, error) {
+ p := ltsv.Parser{
+ FieldDelimiter: ltsv.DefaultParser.FieldDelimiter,
+ ValueDelimiter: ltsv.DefaultParser.ValueDelimiter,
+ StrictMode: false,
+ }
+ if config.FieldDelimiter != "" {
+ if d, err := parseLTSVDelimiter(config.FieldDelimiter); err == nil {
+ p.FieldDelimiter = d
+ }
+ }
+ if config.ValueDelimiter != "" {
+ if d, err := parseLTSVDelimiter(config.ValueDelimiter); err == nil {
+ p.ValueDelimiter = d
+ }
+ }
+ parser := &LTSVParser{
+ r: bufio.NewReader(in),
+ parser: p,
+ mapping: config.Mapping,
+ }
+ return parser, nil
+}
+
+func (p *LTSVParser) ReadLine(line LogLine) error {
+ row, err := p.r.ReadSlice('\n')
+ if err != nil && len(row) == 0 {
+ return err
+ }
+ if len(row) > 0 && row[len(row)-1] == '\n' {
+ row = row[:len(row)-1]
+ }
+ return p.Parse(row, line)
+}
+
+func (p *LTSVParser) Parse(row []byte, line LogLine) error {
+ err := p.parser.ParseLine(row, func(label []byte, value []byte) error {
+ s := *(*string)(unsafe.Pointer(&label)) // no alloc, same as in fmt.Builder.String()
+ if v, ok := p.mapping[s]; ok {
+ s = v
+ }
+ return line.Assign(s, string(value))
+ })
+ if err != nil {
+ return &ParseError{msg: fmt.Sprintf("ltsv parse: %v", err), err: err}
+ }
+ return nil
+}
+
+func (p LTSVParser) Info() string {
+ return fmt.Sprintf("ltsv: %q", p.mapping)
+}
+
+func parseLTSVDelimiter(s string) (byte, error) {
+ if isNumber(s) {
+ d, err := strconv.ParseUint(s, 10, 8)
+ if err != nil {
+ return 0, fmt.Errorf("invalid LTSV delimiter: %v", err)
+ }
+ return byte(d), nil
+ }
+ if len(s) != 1 {
+ return 0, errors.New("invalid LTSV delimiter: must be a single character")
+ }
+ return s[0], nil
+}
diff --git a/src/go/plugin/go.d/pkg/logs/ltsv_test.go b/src/go/plugin/go.d/pkg/logs/ltsv_test.go
new file mode 100644
index 000000000..f6d5ec2bd
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/ltsv_test.go
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "strings"
+ "testing"
+
+ "github.com/Wing924/ltsv"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var testLTSVConfig = LTSVConfig{
+ FieldDelimiter: " ",
+ ValueDelimiter: "=",
+ Mapping: map[string]string{"KEY": "key"},
+}
+
+func TestNewLTSVParser(t *testing.T) {
+ tests := []struct {
+ name string
+ config LTSVConfig
+ wantErr bool
+ }{
+ {name: "config", config: testLTSVConfig},
+ {name: "empty config"},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p, err := NewLTSVParser(tt.config, nil)
+
+ if tt.wantErr {
+ assert.Error(t, err)
+ assert.Nil(t, p)
+ } else {
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+ if tt.config.FieldDelimiter == "" {
+ assert.Equal(t, ltsv.DefaultParser.FieldDelimiter, p.parser.FieldDelimiter)
+ } else {
+ assert.Equal(t, tt.config.FieldDelimiter, string(p.parser.FieldDelimiter))
+ }
+ if tt.config.ValueDelimiter == "" {
+ assert.Equal(t, ltsv.DefaultParser.ValueDelimiter, p.parser.ValueDelimiter)
+ } else {
+ assert.Equal(t, tt.config.ValueDelimiter, string(p.parser.ValueDelimiter))
+ }
+ assert.Equal(t, tt.config.Mapping, p.mapping)
+ }
+ })
+ }
+}
+
+func TestLTSVParser_ReadLine(t *testing.T) {
+ tests := []struct {
+ name string
+ row string
+ wantErr bool
+ wantParseErr bool
+ }{
+ {name: "no error", row: "A=1 B=2 KEY=3"},
+ {name: "error on parsing", row: "NO LABEL", wantErr: true, wantParseErr: true},
+ {name: "error on assigning", row: "A=1 ERR=2", wantErr: true, wantParseErr: true},
+ {name: "error on reading EOF", row: "", wantErr: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var line logLine
+ r := strings.NewReader(tt.row)
+ p, err := NewLTSVParser(testLTSVConfig, r)
+ require.NoError(t, err)
+
+ err = p.ReadLine(&line)
+
+ if tt.wantErr {
+ require.Error(t, err)
+ if tt.wantParseErr {
+ assert.True(t, IsParseError(err))
+ } else {
+ assert.False(t, IsParseError(err))
+ }
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestLTSVParser_Parse(t *testing.T) {
+ tests := []struct {
+ name string
+ row string
+ wantErr bool
+ }{
+ {name: "no error", row: "A=1 B=2"},
+ {name: "error on parsing", row: "NO LABEL", wantErr: true},
+ {name: "error on assigning", row: "A=1 ERR=2", wantErr: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var line logLine
+ p, err := NewLTSVParser(testLTSVConfig, nil)
+ require.NoError(t, err)
+
+ err = p.Parse([]byte(tt.row), &line)
+
+ if tt.wantErr {
+ require.Error(t, err)
+ assert.True(t, IsParseError(err))
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestLTSVParser_Info(t *testing.T) {
+ p, err := NewLTSVParser(testLTSVConfig, nil)
+ require.NoError(t, err)
+ assert.NotZero(t, p.Info())
+}
diff --git a/src/go/plugin/go.d/pkg/logs/parser.go b/src/go/plugin/go.d/pkg/logs/parser.go
new file mode 100644
index 000000000..f22047b0c
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/parser.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+)
+
+type ParseError struct {
+ msg string
+ err error
+}
+
+func (e ParseError) Error() string { return e.msg }
+
+func (e ParseError) Unwrap() error { return e.err }
+
+func IsParseError(err error) bool { var v *ParseError; return errors.As(err, &v) }
+
+type (
+ LogLine interface {
+ Assign(name string, value string) error
+ }
+
+ Parser interface {
+ ReadLine(LogLine) error
+ Parse(row []byte, line LogLine) error
+ Info() string
+ }
+)
+
+const (
+ TypeCSV = "csv"
+ TypeLTSV = "ltsv"
+ TypeRegExp = "regexp"
+ TypeJSON = "json"
+)
+
+type ParserConfig struct {
+ LogType string `yaml:"log_type,omitempty" json:"log_type"`
+ CSV CSVConfig `yaml:"csv_config,omitempty" json:"csv_config"`
+ LTSV LTSVConfig `yaml:"ltsv_config,omitempty" json:"ltsv_config"`
+ RegExp RegExpConfig `yaml:"regexp_config,omitempty" json:"regexp_config"`
+ JSON JSONConfig `yaml:"json_config,omitempty" json:"json_config"`
+}
+
+func NewParser(config ParserConfig, in io.Reader) (Parser, error) {
+ switch config.LogType {
+ case TypeCSV:
+ return NewCSVParser(config.CSV, in)
+ case TypeLTSV:
+ return NewLTSVParser(config.LTSV, in)
+ case TypeRegExp:
+ return NewRegExpParser(config.RegExp, in)
+ case TypeJSON:
+ return NewJSONParser(config.JSON, in)
+ default:
+ return nil, fmt.Errorf("invalid type: %q", config.LogType)
+ }
+}
+
+func isNumber(s string) bool { _, err := strconv.Atoi(s); return err == nil }
diff --git a/src/go/plugin/go.d/pkg/logs/parser_test.go b/src/go/plugin/go.d/pkg/logs/parser_test.go
new file mode 100644
index 000000000..88ef46c27
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/parser_test.go
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
diff --git a/src/go/plugin/go.d/pkg/logs/reader.go b/src/go/plugin/go.d/pkg/logs/reader.go
new file mode 100644
index 000000000..55f0ee18f
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/reader.go
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "sort"
+
+ "github.com/netdata/netdata/go/plugins/logger"
+)
+
+const (
+ maxEOF = 60
+)
+
+var (
+ ErrNoMatchedFile = errors.New("no matched files")
+)
+
+// Reader is a log rotate aware Reader
+// TODO: better reopen algorithm
+// TODO: handle truncate
+type Reader struct {
+ file *os.File
+ path string
+ excludePath string
+ eofCounter int
+ continuousEOF int
+ log *logger.Logger
+}
+
+// Open a file and seek to end of the file.
+// path: the shell file name pattern
+// excludePath: the shell file name pattern
+func Open(path string, excludePath string, log *logger.Logger) (*Reader, error) {
+ var err error
+ if path, err = filepath.Abs(path); err != nil {
+ return nil, err
+ }
+ if _, err = filepath.Match(path, "/"); err != nil {
+ return nil, fmt.Errorf("bad path syntax: %q", path)
+ }
+ if _, err = filepath.Match(excludePath, "/"); err != nil {
+ return nil, fmt.Errorf("bad exclude_path syntax: %q", path)
+ }
+ r := &Reader{
+ path: path,
+ excludePath: excludePath,
+ log: log,
+ }
+
+ if err = r.open(); err != nil {
+ return nil, err
+ }
+ return r, nil
+}
+
+// CurrentFilename get current opened file name
+func (r *Reader) CurrentFilename() string {
+ return r.file.Name()
+}
+
+func (r *Reader) open() error {
+ path := r.findFile()
+ if path == "" {
+ r.log.Debugf("couldn't find log file, used path: '%s', exclude_path: '%s'", r.path, r.excludePath)
+ return ErrNoMatchedFile
+ }
+ r.log.Debug("open log file: ", path)
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ stat, err := file.Stat()
+ if err != nil {
+ return err
+ }
+ if _, err = file.Seek(stat.Size(), io.SeekStart); err != nil {
+ return err
+ }
+ r.file = file
+ return nil
+}
+
+func (r *Reader) Read(p []byte) (n int, err error) {
+ n, err = r.file.Read(p)
+ if err != nil {
+ switch {
+ case err == io.EOF:
+ err = r.handleEOFErr()
+ case errors.Is(err, os.ErrInvalid): // r.file is nil after Close
+ err = r.handleInvalidArgErr()
+ }
+ return
+ }
+ r.continuousEOF = 0
+ return
+}
+
+func (r *Reader) handleEOFErr() (err error) {
+ err = io.EOF
+ r.eofCounter++
+ r.continuousEOF++
+ if r.eofCounter < maxEOF || r.continuousEOF < 2 {
+ return err
+ }
+ if err2 := r.reopen(); err2 != nil {
+ err = err2
+ }
+ return err
+}
+
+func (r *Reader) handleInvalidArgErr() (err error) {
+ err = io.EOF
+ if err2 := r.reopen(); err2 != nil {
+ err = err2
+ }
+ return err
+}
+
+func (r *Reader) Close() (err error) {
+ if r == nil || r.file == nil {
+ return
+ }
+ r.log.Debug("close log file: ", r.file.Name())
+ err = r.file.Close()
+ r.file = nil
+ r.eofCounter = 0
+ return
+}
+
+func (r *Reader) reopen() error {
+ r.log.Debugf("reopen, look for: %s", r.path)
+ _ = r.Close()
+ return r.open()
+}
+
+func (r *Reader) findFile() string {
+ return find(r.path, r.excludePath)
+}
+
+func find(path, exclude string) string {
+ return finder{}.find(path, exclude)
+}
+
+// TODO: tests
+type finder struct{}
+
+func (f finder) find(path, exclude string) string {
+ files, _ := filepath.Glob(path)
+ if len(files) == 0 {
+ return ""
+ }
+
+ files = f.filter(files, exclude)
+ if len(files) == 0 {
+ return ""
+ }
+
+ return f.findLastFile(files)
+}
+
+func (f finder) filter(files []string, exclude string) []string {
+ if exclude == "" {
+ return files
+ }
+
+ fs := make([]string, 0, len(files))
+ for _, file := range files {
+ if ok, _ := filepath.Match(exclude, file); ok {
+ continue
+ }
+ fs = append(fs, file)
+ }
+ return fs
+}
+
+// TODO: the logic is probably wrong
+func (f finder) findLastFile(files []string) string {
+ sort.Strings(files)
+ for i := len(files) - 1; i >= 0; i-- {
+ stat, err := os.Stat(files[i])
+ if err != nil || !stat.Mode().IsRegular() {
+ continue
+ }
+ return files[i]
+ }
+ return ""
+}
diff --git a/src/go/plugin/go.d/pkg/logs/reader_test.go b/src/go/plugin/go.d/pkg/logs/reader_test.go
new file mode 100644
index 000000000..e6ef47fe7
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/reader_test.go
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestReader_Read(t *testing.T) {
+ reader, teardown := prepareTestReader(t)
+ defer teardown()
+
+ r := testReader{bufio.NewReader(reader)}
+ filename := reader.CurrentFilename()
+ numLogs := 5
+ var sum int
+
+ for i := 0; i < 10; i++ {
+ appendLogs(t, filename, time.Millisecond*10, numLogs)
+ n, err := r.readUntilEOF()
+ sum += n
+
+ assert.Equal(t, io.EOF, err)
+ assert.Equal(t, numLogs*(i+1), sum)
+ }
+}
+
+func TestReader_Read_HandleFileRotation(t *testing.T) {
+ reader, teardown := prepareTestReader(t)
+ defer teardown()
+
+ r := testReader{bufio.NewReader(reader)}
+ filename := reader.CurrentFilename()
+ numLogs := 5
+ rotateFile(t, filename)
+ appendLogs(t, filename, time.Millisecond*10, numLogs)
+
+ n, err := r.readUntilEOFTimes(maxEOF)
+ assert.Equal(t, io.EOF, err)
+ assert.Equal(t, 0, n)
+
+ appendLogs(t, filename, time.Millisecond*10, numLogs)
+ n, err = r.readUntilEOF()
+ assert.Equal(t, io.EOF, err)
+ assert.Equal(t, numLogs, n)
+}
+
+func TestReader_Read_HandleFileRotationWithDelay(t *testing.T) {
+ reader, teardown := prepareTestReader(t)
+ defer teardown()
+
+ r := testReader{bufio.NewReader(reader)}
+ filename := reader.CurrentFilename()
+ _ = os.Remove(filename)
+
+ // trigger reopen first time
+ n, err := r.readUntilEOFTimes(maxEOF)
+ assert.Equal(t, ErrNoMatchedFile, err)
+ assert.Equal(t, 0, n)
+
+ f, err := os.Create(filename)
+ require.NoError(t, err)
+ _ = f.Close()
+
+ // trigger reopen 2nd time
+ n, err = r.readUntilEOF()
+ assert.Equal(t, io.EOF, err)
+ assert.Equal(t, 0, n)
+
+ numLogs := 5
+ appendLogs(t, filename, time.Millisecond*10, numLogs)
+ n, err = r.readUntilEOF()
+ assert.Equal(t, io.EOF, err)
+ assert.Equal(t, numLogs, n)
+}
+
+func TestReader_Close(t *testing.T) {
+ reader, teardown := prepareTestReader(t)
+ defer teardown()
+
+ assert.NoError(t, reader.Close())
+ assert.Nil(t, reader.file)
+}
+
+func TestReader_Close_NilFile(t *testing.T) {
+ var r Reader
+ assert.NoError(t, r.Close())
+}
+
+func TestOpen(t *testing.T) {
+ tempFileName1 := prepareTempFile(t, "*-web_log-open-test-1.log")
+ tempFileName2 := prepareTempFile(t, "*-web_log-open-test-2.log")
+ tempFileName3 := prepareTempFile(t, "*-web_log-open-test-3.log")
+ defer func() {
+ _ = os.Remove(tempFileName1)
+ _ = os.Remove(tempFileName2)
+ _ = os.Remove(tempFileName3)
+ }()
+
+ makePath := func(s string) string {
+ return filepath.Join(os.TempDir(), s)
+ }
+
+ tests := []struct {
+ name string
+ path string
+ exclude string
+ err bool
+ }{
+ {
+ name: "match without exclude",
+ path: makePath("*-web_log-open-test-[1-3].log"),
+ },
+ {
+ name: "match with exclude",
+ path: makePath("*-web_log-open-test-[1-3].log"),
+ exclude: makePath("*-web_log-open-test-[2-3].log"),
+ },
+ {
+ name: "exclude everything",
+ path: makePath("*-web_log-open-test-[1-3].log"),
+ exclude: makePath("*"),
+ err: true,
+ },
+ {
+ name: "no match",
+ path: makePath("*-web_log-no-match-test-[1-3].log"),
+ err: true,
+ },
+ {
+ name: "bad path pattern",
+ path: "[qw",
+ err: true,
+ },
+ {
+ name: "bad exclude path pattern",
+ path: "[qw",
+ err: true,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ r, err := Open(tt.path, tt.exclude, nil)
+
+ if tt.err {
+ assert.Error(t, err)
+ } else {
+ assert.NoError(t, err)
+ assert.NotNil(t, r.file)
+ _ = r.Close()
+ }
+ })
+ }
+}
+
+func TestReader_CurrentFilename(t *testing.T) {
+ reader, teardown := prepareTestReader(t)
+ defer teardown()
+
+ assert.Equal(t, reader.file.Name(), reader.CurrentFilename())
+}
+
+type testReader struct {
+ *bufio.Reader
+}
+
+func (r *testReader) readUntilEOF() (n int, err error) {
+ for {
+ _, err = r.ReadBytes('\n')
+ if err != nil {
+ break
+ }
+ n++
+ }
+ return n, err
+}
+
+func (r *testReader) readUntilEOFTimes(times int) (sum int, err error) {
+ var n int
+ for i := 0; i < times; i++ {
+ n, err = r.readUntilEOF()
+ if err != io.EOF {
+ break
+ }
+ sum += n
+ }
+ return sum, err
+}
+
+func prepareTempFile(t *testing.T, pattern string) string {
+ t.Helper()
+ f, err := os.CreateTemp("", pattern)
+ require.NoError(t, err)
+ return f.Name()
+}
+
+func prepareTestReader(t *testing.T) (reader *Reader, teardown func()) {
+ t.Helper()
+ filename := prepareTempFile(t, "*-web_log-test.log")
+ f, err := os.Open(filename)
+ require.NoError(t, err)
+
+ teardown = func() {
+ _ = os.Remove(filename)
+ _ = reader.file.Close()
+ }
+ reader = &Reader{
+ file: f,
+ path: filename,
+ }
+ return reader, teardown
+}
+
+func rotateFile(t *testing.T, filename string) {
+ t.Helper()
+ require.NoError(t, os.Remove(filename))
+ f, err := os.Create(filename)
+ require.NoError(t, err)
+ _ = f.Close()
+}
+
+func appendLogs(t *testing.T, filename string, interval time.Duration, numOfLogs int) {
+ t.Helper()
+ base := filepath.Base(filename)
+ file, err := os.OpenFile(filename, os.O_RDWR|os.O_APPEND, os.ModeAppend)
+ require.NoError(t, err)
+ require.NotNil(t, file)
+ defer func() { _ = file.Close() }()
+
+ for i := 0; i < numOfLogs; i++ {
+ _, err = fmt.Fprintln(file, "line", i, "filename", base)
+ require.NoError(t, err)
+ time.Sleep(interval)
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/logs/regexp.go b/src/go/plugin/go.d/pkg/logs/regexp.go
new file mode 100644
index 000000000..e0dee1d02
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/regexp.go
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "regexp"
+)
+
+type (
+ RegExpConfig struct {
+ Pattern string `yaml:"pattern" json:"pattern"`
+ }
+
+ RegExpParser struct {
+ r *bufio.Reader
+ pattern *regexp.Regexp
+ }
+)
+
+func NewRegExpParser(config RegExpConfig, in io.Reader) (*RegExpParser, error) {
+ if config.Pattern == "" {
+ return nil, errors.New("empty pattern")
+ }
+
+ pattern, err := regexp.Compile(config.Pattern)
+ if err != nil {
+ return nil, fmt.Errorf("compile: %w", err)
+ }
+
+ if pattern.NumSubexp() == 0 {
+ return nil, errors.New("pattern has no named subgroups")
+ }
+
+ p := &RegExpParser{
+ r: bufio.NewReader(in),
+ pattern: pattern,
+ }
+ return p, nil
+}
+
+func (p *RegExpParser) ReadLine(line LogLine) error {
+ row, err := p.r.ReadSlice('\n')
+ if err != nil && len(row) == 0 {
+ return err
+ }
+ if len(row) > 0 && row[len(row)-1] == '\n' {
+ row = row[:len(row)-1]
+ }
+ return p.Parse(row, line)
+}
+
+func (p *RegExpParser) Parse(row []byte, line LogLine) error {
+ match := p.pattern.FindSubmatch(row)
+ if len(match) == 0 {
+ return &ParseError{msg: "regexp parse: unmatched line"}
+ }
+
+ for i, name := range p.pattern.SubexpNames() {
+ if name == "" || match[i] == nil {
+ continue
+ }
+ err := line.Assign(name, string(match[i]))
+ if err != nil {
+ return &ParseError{msg: fmt.Sprintf("regexp parse: %v", err), err: err}
+ }
+ }
+ return nil
+}
+
+func (p RegExpParser) Info() string {
+ return fmt.Sprintf("regexp: %s", p.pattern)
+}
diff --git a/src/go/plugin/go.d/pkg/logs/regexp_test.go b/src/go/plugin/go.d/pkg/logs/regexp_test.go
new file mode 100644
index 000000000..fc7bacaa5
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/logs/regexp_test.go
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package logs
+
+import (
+ "errors"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewRegExpParser(t *testing.T) {
+ tests := []struct {
+ name string
+ pattern string
+ wantErr bool
+ }{
+ {name: "valid pattern", pattern: `(?P<A>\d+) (?P<B>\d+)`},
+ {name: "no names subgroups in pattern", pattern: `(?:\d+) (?:\d+)`, wantErr: true},
+ {name: "invalid pattern", pattern: `(((?P<A>\d+) (?P<B>\d+)`, wantErr: true},
+ {name: "empty pattern", wantErr: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ p, err := NewRegExpParser(RegExpConfig{Pattern: tt.pattern}, nil)
+ if tt.wantErr {
+ assert.Error(t, err)
+ assert.Nil(t, p)
+ } else {
+ assert.NoError(t, err)
+ assert.NotNil(t, p)
+ }
+ })
+ }
+}
+
+func TestRegExpParser_ReadLine(t *testing.T) {
+ tests := []struct {
+ name string
+ row string
+ pattern string
+ wantErr bool
+ wantParseErr bool
+ }{
+ {name: "match and no error", row: "1 2", pattern: `(?P<A>\d+) (?P<B>\d+)`},
+ {name: "match but error on assigning", row: "1 2", pattern: `(?P<A>\d+) (?P<ERR>\d+)`, wantErr: true, wantParseErr: true},
+ {name: "not match", row: "A B", pattern: `(?P<A>\d+) (?P<B>\d+)`, wantErr: true, wantParseErr: true},
+ {name: "not match multiline", row: "a b\n3 4", pattern: `(?P<A>\d+) (?P<B>\d+)`, wantErr: true, wantParseErr: true},
+ {name: "error on reading EOF", row: "", pattern: `(?P<A>\d+) (?P<B>\d+)`, wantErr: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var line logLine
+ r := strings.NewReader(tt.row)
+ p, err := NewRegExpParser(RegExpConfig{Pattern: tt.pattern}, r)
+ require.NoError(t, err)
+
+ err = p.ReadLine(&line)
+ if tt.wantErr {
+ require.Error(t, err)
+ if tt.wantParseErr {
+ assert.True(t, IsParseError(err))
+ } else {
+ assert.False(t, IsParseError(err))
+ }
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestRegExpParser_Parse(t *testing.T) {
+ tests := []struct {
+ name string
+ row string
+ pattern string
+ wantErr bool
+ }{
+ {name: "match and no error", row: "1 2", pattern: `(?P<A>\d+) (?P<B>\d+)`},
+ {name: "match but error on assigning", row: "1 2", pattern: `(?P<A>\d+) (?P<ERR>\d+)`, wantErr: true},
+ {name: "not match", row: "A B", pattern: `(?P<A>\d+) (?P<B>\d+)`, wantErr: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var line logLine
+ p, err := NewRegExpParser(RegExpConfig{Pattern: tt.pattern}, nil)
+ require.NoError(t, err)
+
+ err = p.Parse([]byte(tt.row), &line)
+ if tt.wantErr {
+ require.Error(t, err)
+ assert.True(t, IsParseError(err))
+ } else {
+ assert.NoError(t, err)
+ }
+ })
+ }
+}
+
+func TestRegExpParser_Info(t *testing.T) {
+ p, err := NewRegExpParser(RegExpConfig{Pattern: `(?P<A>\d+) (?P<B>\d+)`}, nil)
+ require.NoError(t, err)
+ assert.NotZero(t, p.Info())
+}
+
+type logLine struct {
+ assigned map[string]string
+}
+
+func newLogLine() *logLine {
+ return &logLine{
+ assigned: make(map[string]string),
+ }
+}
+
+func (l *logLine) Assign(name, val string) error {
+ switch name {
+ case "$ERR", "ERR":
+ return errors.New("assign error")
+ }
+ if l.assigned != nil {
+ l.assigned[name] = val
+ }
+ return nil
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/README.md b/src/go/plugin/go.d/pkg/matcher/README.md
new file mode 100644
index 000000000..971774ec2
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/README.md
@@ -0,0 +1,142 @@
+<!--
+title: "matcher"
+custom_edit_url: "/src/go/plugin/go.d/pkg/matcher/README.md"
+sidebar_label: "matcher"
+learn_status: "Published"
+learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages"
+-->
+
+# matcher
+## Supported Format
+
+* string
+* glob
+* regexp
+* simple patterns
+
+Depending on the symbol at the start of the string, the `matcher` will use one of the supported formats.
+
+| matcher | short format | long format |
+|-----------------|--------------|-------------------|
+| string | ` =` | `string` |
+| glob | `*` | `glob` |
+| regexp | `~` | `regexp` |
+| simple patterns | | `simple_patterns` |
+
+Example:
+
+- `* pattern`: It will use the `glob` matcher to find the `pattern` in the string.
+
+### Syntax
+
+**Tip**: Read `::=` as `is defined as`.
+
+```
+Short Syntax
+ [ <not> ] <format> <space> <expr>
+
+ <not> ::= '!'
+ negative expression
+ <format> ::= [ '=', '~', '*' ]
+ '=' means string match
+ '~' means regexp match
+ '*' means glob match
+ <space> ::= { ' ' | '\t' | '\n' | '\n' | '\r' }
+ <expr> ::= any string
+
+ Long Syntax
+ [ <not> ] <format> <separator> <expr>
+
+ <format> ::= [ 'string' | 'glob' | 'regexp' | 'simple_patterns' ]
+ <not> ::= '!'
+ negative expression
+ <separator> ::= ':'
+ <expr> ::= any string
+```
+
+When using the short syntax, you can enable the glob format by starting the string with a `*`, while in the long syntax
+you need to define it more explicitly. The following examples are identical. `simple_patterns` can be used **only** with
+the long syntax.
+
+Examples:
+
+- Short Syntax: `'* * '`
+- Long Syntax: `'glob:*'`
+
+### String matcher
+
+The string matcher reports whether the given value equals to the string.
+
+Examples:
+
+- `'= foo'` matches only if the string is `foo`.
+- `'!= bar'` matches any string that is not `bar`.
+
+String matcher means **exact match** of the `string`. There are other string match related cases:
+
+- string has prefix `something`
+- string has suffix `something`
+- string contains `something`
+
+This is achievable using the `glob` matcher:
+
+- `* PREFIX*`, means that it matches with any string that *starts* with `PREFIX`, e.g `PREFIXnetdata`
+- `* *SUFFIX`, means that it matches with any string that *ends* with `SUFFIX`, e.g `netdataSUFFIX`
+- `* *SUBSTRING*`, means that it matches with any string that *contains* `SUBSTRING`, e.g `netdataSUBSTRINGnetdata`
+
+### Glob matcher
+
+The glob matcher reports whether the given value matches the wildcard pattern. It uses the standard `golang`
+library `path`. You can read more about the library in the [golang documentation](https://golang.org/pkg/path/#Match),
+where you can also practice with the library in order to learn the syntax and use it in your Netdata configuration.
+
+The pattern syntax is:
+
+```
+ pattern:
+ { term }
+ term:
+ '*' matches any sequence of characters
+ '?' matches any single character
+ '[' [ '^' ] { character-range } ']'
+ character class (must be non-empty)
+ c matches character c (c != '*', '?', '\\', '[')
+ '\\' c matches character c
+
+ character-range:
+ c matches character c (c != '\\', '-', ']')
+ '\\' c matches character c
+ lo '-' hi matches character c for lo <= c <= hi
+```
+
+Examples:
+
+- `* ?` matches any string that is a single character.
+- `'?a'` matches any 2 character string that starts with any character and the second character is `a`, like `ba` but
+ not `bb` or `bba`.
+- `'[^abc]'` matches any character that is NOT a,b,c. `'[abc]'` matches only a, b, c.
+- `'*[a-d]'` matches any string (`*`) that ends with a character that is between `a` and `d` (i.e `a,b,c,d`).
+
+### Regexp matcher
+
+The regexp matcher reports whether the given value matches the RegExp pattern ( use regexp.Match ).
+
+The RegExp syntax is described at https://golang.org/pkg/regexp/syntax/.
+
+Learn more about regular expressions at [RegexOne](https://regexone.com/).
+
+### Simple patterns matcher
+
+The simple patterns matcher reports whether the given value matches the simple patterns.
+
+Simple patterns are a space separated list of words. Each word may use any number of wildcards `*`. Simple patterns
+allow negative matches by prefixing a word with `!`.
+
+Examples:
+
+- `!*bad* *` matches anything, except all those that contain the word bad.
+- `*foobar* !foo* !*bar *` matches everything containing foobar, except strings that start with foo or end with bar.
+
+
+
+
diff --git a/src/go/plugin/go.d/pkg/matcher/cache.go b/src/go/plugin/go.d/pkg/matcher/cache.go
new file mode 100644
index 000000000..4594fa06f
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/cache.go
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import "sync"
+
+type (
+ cachedMatcher struct {
+ matcher Matcher
+
+ mux sync.RWMutex
+ cache map[string]bool
+ }
+)
+
+// WithCache adds cache to the matcher.
+func WithCache(m Matcher) Matcher {
+ switch m {
+ case TRUE(), FALSE():
+ return m
+ default:
+ return &cachedMatcher{matcher: m, cache: make(map[string]bool)}
+ }
+}
+
+func (m *cachedMatcher) Match(b []byte) bool {
+ s := string(b)
+ if result, ok := m.fetch(s); ok {
+ return result
+ }
+ result := m.matcher.Match(b)
+ m.put(s, result)
+ return result
+}
+
+func (m *cachedMatcher) MatchString(s string) bool {
+ if result, ok := m.fetch(s); ok {
+ return result
+ }
+ result := m.matcher.MatchString(s)
+ m.put(s, result)
+ return result
+}
+
+func (m *cachedMatcher) fetch(key string) (result bool, ok bool) {
+ m.mux.RLock()
+ result, ok = m.cache[key]
+ m.mux.RUnlock()
+ return
+}
+
+func (m *cachedMatcher) put(key string, result bool) {
+ m.mux.Lock()
+ m.cache[key] = result
+ m.mux.Unlock()
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/cache_test.go b/src/go/plugin/go.d/pkg/matcher/cache_test.go
new file mode 100644
index 000000000..a545777b3
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/cache_test.go
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestWithCache(t *testing.T) {
+ regMatcher, _ := NewRegExpMatcher("[0-9]+")
+ cached := WithCache(regMatcher)
+
+ assert.True(t, cached.MatchString("1"))
+ assert.True(t, cached.MatchString("1"))
+ assert.True(t, cached.Match([]byte("2")))
+ assert.True(t, cached.Match([]byte("2")))
+}
+
+func TestWithCache_specialCase(t *testing.T) {
+ assert.Equal(t, TRUE(), WithCache(TRUE()))
+ assert.Equal(t, FALSE(), WithCache(FALSE()))
+}
+func BenchmarkCachedMatcher_MatchString_cache_hit(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ expr string
+ target string
+ }{
+ {"stringFullMatcher", "= abc123", "abc123"},
+ {"stringPrefixMatcher", "~ ^abc123", "abc123456"},
+ {"stringSuffixMatcher", "~ abc123$", "hello abc123"},
+ {"stringSuffixMatcher", "~ abc123", "hello abc123 world"},
+ {"globMatcher", "* abc*def", "abc12345678def"},
+ {"regexp", "~ [0-9]+", "1234567890"},
+ }
+ for _, bm := range benchmarks {
+ m := Must(Parse(bm.expr))
+ b.Run(bm.name+"_raw", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ m.MatchString(bm.target)
+ }
+ })
+ b.Run(bm.name+"_cache", func(b *testing.B) {
+ cached := WithCache(m)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ cached.MatchString(bm.target)
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/doc.go b/src/go/plugin/go.d/pkg/matcher/doc.go
new file mode 100644
index 000000000..33b06988d
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/doc.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+Package matcher implements vary formats of string matcher.
+
+Supported Format
+
+ string
+ glob
+ regexp
+ simple patterns
+
+The string matcher reports whether the given value equals to the string ( use == ).
+
+The glob matcher reports whether the given value matches the wildcard pattern.
+The pattern syntax is:
+
+ pattern:
+ { term }
+ term:
+ '*' matches any sequence of characters
+ '?' matches any single character
+ '[' [ '^' ] { character-range } ']'
+ character class (must be non-empty)
+ c matches character c (c != '*', '?', '\\', '[')
+ '\\' c matches character c
+
+ character-range:
+ c matches character c (c != '\\', '-', ']')
+ '\\' c matches character c
+ lo '-' hi matches character c for lo <= c <= hi
+
+The regexp matcher reports whether the given value matches the RegExp pattern ( use regexp.Match ).
+The RegExp syntax is described at https://golang.org/pkg/regexp/syntax/.
+
+The simple patterns matcher reports whether the given value matches the simple patterns.
+The simple patterns is a custom format used in netdata,
+it's syntax is described at https://docs.netdata.cloud/libnetdata/simple_pattern/.
+*/
+package matcher
diff --git a/src/go/plugin/go.d/pkg/matcher/doc_test.go b/src/go/plugin/go.d/pkg/matcher/doc_test.go
new file mode 100644
index 000000000..4cc3944df
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/doc_test.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher_test
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+func ExampleNew_string_format() {
+ // create a string matcher, which perform full text match
+ m, err := matcher.New(matcher.FmtString, "hello")
+ if err != nil {
+ panic(err)
+ }
+ m.MatchString("hello") // => true
+ m.MatchString("hello world") // => false
+}
+
+func ExampleNew_glob_format() {
+ // create a glob matcher, which perform wildcard match
+ m, err := matcher.New(matcher.FmtString, "hello*")
+ if err != nil {
+ panic(err)
+ }
+ m.MatchString("hello") // => true
+ m.MatchString("hello world") // => true
+ m.MatchString("Hello world") // => false
+}
+
+func ExampleNew_simple_patterns_format() {
+ // create a simple patterns matcher, which perform wildcard match
+ m, err := matcher.New(matcher.FmtSimplePattern, "hello* !*world *")
+ if err != nil {
+ panic(err)
+ }
+ m.MatchString("hello") // => true
+ m.MatchString("hello world") // => true
+ m.MatchString("Hello world") // => false
+ m.MatchString("Hello world!") // => false
+}
+
+func ExampleNew_regexp_format() {
+ // create a regexp matcher, which perform wildcard match
+ m, err := matcher.New(matcher.FmtRegExp, "[0-9]+")
+ if err != nil {
+ panic(err)
+ }
+ m.MatchString("1") // => true
+ m.MatchString("1a") // => true
+ m.MatchString("a") // => false
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/expr.go b/src/go/plugin/go.d/pkg/matcher/expr.go
new file mode 100644
index 000000000..e5ea0cb2e
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/expr.go
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "errors"
+ "fmt"
+)
+
+type (
+ Expr interface {
+ Parse() (Matcher, error)
+ }
+
+ // SimpleExpr is a simple expression to describe the condition:
+ // (includes[0].Match(v) || includes[1].Match(v) || ...) && !(excludes[0].Match(v) || excludes[1].Match(v) || ...)
+ SimpleExpr struct {
+ Includes []string `yaml:"includes,omitempty" json:"includes"`
+ Excludes []string `yaml:"excludes,omitempty" json:"excludes"`
+ }
+)
+
+var (
+ ErrEmptyExpr = errors.New("empty expression")
+)
+
+// Empty returns true if both Includes and Excludes are empty. You can't
+func (s *SimpleExpr) Empty() bool {
+ return len(s.Includes) == 0 && len(s.Excludes) == 0
+}
+
+// Parse parses the given matchers in Includes and Excludes
+func (s *SimpleExpr) Parse() (Matcher, error) {
+ if len(s.Includes) == 0 && len(s.Excludes) == 0 {
+ return nil, ErrEmptyExpr
+ }
+ var (
+ includes = FALSE()
+ excludes = FALSE()
+ )
+ if len(s.Includes) > 0 {
+ for _, item := range s.Includes {
+ m, err := Parse(item)
+ if err != nil {
+ return nil, fmt.Errorf("parse matcher %q error: %v", item, err)
+ }
+ includes = Or(includes, m)
+ }
+ } else {
+ includes = TRUE()
+ }
+
+ for _, item := range s.Excludes {
+ m, err := Parse(item)
+ if err != nil {
+ return nil, fmt.Errorf("parse matcher %q error: %v", item, err)
+ }
+ excludes = Or(excludes, m)
+ }
+
+ return And(includes, Not(excludes)), nil
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/expr_test.go b/src/go/plugin/go.d/pkg/matcher/expr_test.go
new file mode 100644
index 000000000..93a183226
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/expr_test.go
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSimpleExpr_none(t *testing.T) {
+ expr := &SimpleExpr{}
+
+ m, err := expr.Parse()
+ assert.EqualError(t, err, ErrEmptyExpr.Error())
+ assert.Nil(t, m)
+}
+
+func TestSimpleExpr_include(t *testing.T) {
+ expr := &SimpleExpr{
+ Includes: []string{
+ "~ /api/",
+ "~ .php$",
+ },
+ }
+
+ m, err := expr.Parse()
+ assert.NoError(t, err)
+
+ assert.True(t, m.MatchString("/api/a.php"))
+ assert.True(t, m.MatchString("/api/a.php2"))
+ assert.True(t, m.MatchString("/api2/a.php"))
+ assert.True(t, m.MatchString("/api/img.php"))
+ assert.False(t, m.MatchString("/api2/img.php2"))
+}
+
+func TestSimpleExpr_exclude(t *testing.T) {
+ expr := &SimpleExpr{
+ Excludes: []string{
+ "~ /api/img",
+ },
+ }
+
+ m, err := expr.Parse()
+ assert.NoError(t, err)
+
+ assert.True(t, m.MatchString("/api/a.php"))
+ assert.True(t, m.MatchString("/api/a.php2"))
+ assert.True(t, m.MatchString("/api2/a.php"))
+ assert.False(t, m.MatchString("/api/img.php"))
+ assert.True(t, m.MatchString("/api2/img.php2"))
+}
+
+func TestSimpleExpr_both(t *testing.T) {
+ expr := &SimpleExpr{
+ Includes: []string{
+ "~ /api/",
+ "~ .php$",
+ },
+ Excludes: []string{
+ "~ /api/img",
+ },
+ }
+
+ m, err := expr.Parse()
+ assert.NoError(t, err)
+
+ assert.True(t, m.MatchString("/api/a.php"))
+ assert.True(t, m.MatchString("/api/a.php2"))
+ assert.True(t, m.MatchString("/api2/a.php"))
+ assert.False(t, m.MatchString("/api/img.php"))
+ assert.False(t, m.MatchString("/api2/img.php2"))
+}
+
+func TestSimpleExpr_Parse_NG(t *testing.T) {
+ {
+ expr := &SimpleExpr{
+ Includes: []string{
+ "~ (ab",
+ "~ .php$",
+ },
+ }
+
+ m, err := expr.Parse()
+ assert.Error(t, err)
+ assert.Nil(t, m)
+ }
+ {
+ expr := &SimpleExpr{
+ Excludes: []string{
+ "~ (ab",
+ "~ .php$",
+ },
+ }
+
+ m, err := expr.Parse()
+ assert.Error(t, err)
+ assert.Nil(t, m)
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/glob.go b/src/go/plugin/go.d/pkg/matcher/glob.go
new file mode 100644
index 000000000..726c94c45
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/glob.go
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "errors"
+ "path/filepath"
+ "regexp"
+ "unicode/utf8"
+)
+
+// globMatcher implements Matcher, it uses filepath.MatchString to match.
+type globMatcher string
+
+var (
+ errBadGlobPattern = errors.New("bad glob pattern")
+ erGlobPattern = regexp.MustCompile(`(?s)^(?:[*?]|\[\^?([^\\-\]]|\\.|.-.)+\]|\\.|[^\*\?\\\[])*$`)
+)
+
+// NewGlobMatcher create a new matcher with glob format
+func NewGlobMatcher(expr string) (Matcher, error) {
+ switch expr {
+ case "":
+ return stringFullMatcher(""), nil
+ case "*":
+ return TRUE(), nil
+ }
+
+ // any strings pass this regexp check are valid pattern
+ if !erGlobPattern.MatchString(expr) {
+ return nil, errBadGlobPattern
+ }
+
+ size := len(expr)
+ chars := []rune(expr)
+ startWith := true
+ endWith := true
+ startIdx := 0
+ endIdx := size - 1
+ if chars[startIdx] == '*' {
+ startWith = false
+ startIdx = 1
+ }
+ if chars[endIdx] == '*' {
+ endWith = false
+ endIdx--
+ }
+
+ unescapedExpr := make([]rune, 0, endIdx-startIdx+1)
+ for i := startIdx; i <= endIdx; i++ {
+ ch := chars[i]
+ if ch == '\\' {
+ nextCh := chars[i+1]
+ unescapedExpr = append(unescapedExpr, nextCh)
+ i++
+ } else if isGlobMeta(ch) {
+ return globMatcher(expr), nil
+ } else {
+ unescapedExpr = append(unescapedExpr, ch)
+ }
+ }
+
+ return NewStringMatcher(string(unescapedExpr), startWith, endWith)
+}
+
+func isGlobMeta(ch rune) bool {
+ switch ch {
+ case '*', '?', '[':
+ return true
+ default:
+ return false
+ }
+}
+
+// Match matches.
+func (m globMatcher) Match(b []byte) bool {
+ return m.MatchString(string(b))
+}
+
+// MatchString matches.
+func (m globMatcher) MatchString(line string) bool {
+ rs, _ := m.globMatch(line)
+ return rs
+}
+
+func (m globMatcher) globMatch(name string) (matched bool, err error) {
+ pattern := string(m)
+Pattern:
+ for len(pattern) > 0 {
+ var star bool
+ var chunk string
+ star, chunk, pattern = scanChunk(pattern)
+ if star && chunk == "" {
+ // Trailing * matches rest of string unless it has a /.
+ // return !strings.Contains(name, string(Separator)), nil
+
+ return true, nil
+ }
+ // Look for match at current position.
+ t, ok, err := matchChunk(chunk, name)
+ // if we're the last chunk, make sure we've exhausted the name
+ // otherwise we'll give a false result even if we could still match
+ // using the star
+ if ok && (len(t) == 0 || len(pattern) > 0) {
+ name = t
+ continue
+ }
+ if err != nil {
+ return false, err
+ }
+ if star {
+ // Look for match skipping i+1 bytes.
+ // Cannot skip /.
+ for i := 0; i < len(name); i++ {
+ //for i := 0; i < len(name) && name[i] != Separator; i++ {
+ t, ok, err := matchChunk(chunk, name[i+1:])
+ if ok {
+ // if we're the last chunk, make sure we exhausted the name
+ if len(pattern) == 0 && len(t) > 0 {
+ continue
+ }
+ name = t
+ continue Pattern
+ }
+ if err != nil {
+ return false, err
+ }
+ }
+ }
+ return false, nil
+ }
+ return len(name) == 0, nil
+}
+
+// scanChunk gets the next segment of pattern, which is a non-star string
+// possibly preceded by a star.
+func scanChunk(pattern string) (star bool, chunk, rest string) {
+ for len(pattern) > 0 && pattern[0] == '*' {
+ pattern = pattern[1:]
+ star = true
+ }
+ inrange := false
+ var i int
+Scan:
+ for i = 0; i < len(pattern); i++ {
+ switch pattern[i] {
+ case '\\':
+ if i+1 < len(pattern) {
+ i++
+ }
+ case '[':
+ inrange = true
+ case ']':
+ inrange = false
+ case '*':
+ if !inrange {
+ break Scan
+ }
+ }
+ }
+ return star, pattern[0:i], pattern[i:]
+}
+
+// matchChunk checks whether chunk matches the beginning of s.
+// If so, it returns the remainder of s (after the match).
+// Chunk is all single-character operators: literals, char classes, and ?.
+func matchChunk(chunk, s string) (rest string, ok bool, err error) {
+ for len(chunk) > 0 {
+ if len(s) == 0 {
+ return
+ }
+ switch chunk[0] {
+ case '[':
+ // character class
+ r, n := utf8.DecodeRuneInString(s)
+ s = s[n:]
+ chunk = chunk[1:]
+ // We can't end right after '[', we're expecting at least
+ // a closing bracket and possibly a caret.
+ if len(chunk) == 0 {
+ err = filepath.ErrBadPattern
+ return
+ }
+ // possibly negated
+ negated := chunk[0] == '^'
+ if negated {
+ chunk = chunk[1:]
+ }
+ // parse all ranges
+ match := false
+ nrange := 0
+ for {
+ if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
+ chunk = chunk[1:]
+ break
+ }
+ var lo, hi rune
+ if lo, chunk, err = getEsc(chunk); err != nil {
+ return
+ }
+ hi = lo
+ if chunk[0] == '-' {
+ if hi, chunk, err = getEsc(chunk[1:]); err != nil {
+ return
+ }
+ }
+ if lo <= r && r <= hi {
+ match = true
+ }
+ nrange++
+ }
+ if match == negated {
+ return
+ }
+
+ case '?':
+ //if s[0] == Separator {
+ // return
+ //}
+ _, n := utf8.DecodeRuneInString(s)
+ s = s[n:]
+ chunk = chunk[1:]
+
+ case '\\':
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ err = filepath.ErrBadPattern
+ return
+ }
+ fallthrough
+
+ default:
+ if chunk[0] != s[0] {
+ return
+ }
+ s = s[1:]
+ chunk = chunk[1:]
+ }
+ }
+ return s, true, nil
+}
+
+// getEsc gets a possibly-escaped character from chunk, for a character class.
+func getEsc(chunk string) (r rune, nchunk string, err error) {
+ if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
+ err = filepath.ErrBadPattern
+ return
+ }
+ if chunk[0] == '\\' {
+ chunk = chunk[1:]
+ if len(chunk) == 0 {
+ err = filepath.ErrBadPattern
+ return
+ }
+ }
+ r, n := utf8.DecodeRuneInString(chunk)
+ if r == utf8.RuneError && n == 1 {
+ err = filepath.ErrBadPattern
+ }
+ nchunk = chunk[n:]
+ if len(nchunk) == 0 {
+ err = filepath.ErrBadPattern
+ }
+ return
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/glob_test.go b/src/go/plugin/go.d/pkg/matcher/glob_test.go
new file mode 100644
index 000000000..09d456105
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/glob_test.go
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewGlobMatcher(t *testing.T) {
+ cases := []struct {
+ expr string
+ matcher Matcher
+ }{
+ {"", stringFullMatcher("")},
+ {"a", stringFullMatcher("a")},
+ {"a*b", globMatcher("a*b")},
+ {`a*\b`, globMatcher(`a*\b`)},
+ {`a\[`, stringFullMatcher(`a[`)},
+ {`ab\`, nil},
+ {`ab[`, nil},
+ {`ab]`, stringFullMatcher("ab]")},
+ }
+ for _, c := range cases {
+ t.Run(c.expr, func(t *testing.T) {
+ m, err := NewGlobMatcher(c.expr)
+ if c.matcher != nil {
+ assert.NoError(t, err)
+ assert.Equal(t, c.matcher, m)
+ } else {
+ assert.Error(t, err)
+ }
+ })
+ }
+}
+
+func TestGlobMatcher_MatchString(t *testing.T) {
+
+ cases := []struct {
+ expected bool
+ expr string
+ line string
+ }{
+ {true, "/a/*/d", "/a/b/c/d"},
+ {true, "foo*", "foo123"},
+ {true, "*foo*", "123foo123"},
+ {true, "*foo", "123foo"},
+ {true, "foo*bar", "foobar"},
+ {true, "foo*bar", "foo baz bar"},
+ {true, "a[bc]d", "abd"},
+ {true, "a[^bc]d", "add"},
+ {true, "a??d", "abcd"},
+ {true, `a\??d`, "a?cd"},
+ {true, "a[b-z]d", "abd"},
+ {false, "/a/*/d", "a/b/c/d"},
+ {false, "/a/*/d", "This will fail!"},
+ }
+
+ for _, c := range cases {
+ t.Run(c.line, func(t *testing.T) {
+ m := globMatcher(c.expr)
+ assert.Equal(t, c.expected, m.Match([]byte(c.line)))
+ assert.Equal(t, c.expected, m.MatchString(c.line))
+ })
+ }
+}
+
+func BenchmarkGlob_MatchString(b *testing.B) {
+ benchmarks := []struct {
+ expr string
+ test string
+ }{
+ {"", ""},
+ {"abc", "abcd"},
+ {"*abc", "abcd"},
+ {"abc*", "abcd"},
+ {"*abc*", "abcd"},
+ {"[a-z]", "abcd"},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.expr+"_raw", func(b *testing.B) {
+ m := globMatcher(bm.expr)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.MatchString(bm.test)
+ }
+ })
+ b.Run(bm.expr+"_optimized", func(b *testing.B) {
+ m, _ := NewGlobMatcher(bm.expr)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.MatchString(bm.test)
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/logical.go b/src/go/plugin/go.d/pkg/matcher/logical.go
new file mode 100644
index 000000000..af07be8f4
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/logical.go
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+type (
+ trueMatcher struct{}
+ falseMatcher struct{}
+ andMatcher struct{ lhs, rhs Matcher }
+ orMatcher struct{ lhs, rhs Matcher }
+ negMatcher struct{ Matcher }
+)
+
+var (
+ matcherT trueMatcher
+ matcherF falseMatcher
+)
+
+// TRUE returns a matcher which always returns true
+func TRUE() Matcher {
+ return matcherT
+}
+
+// FALSE returns a matcher which always returns false
+func FALSE() Matcher {
+ return matcherF
+}
+
+// Not returns a matcher which positive the sub-matcher's result
+func Not(m Matcher) Matcher {
+ switch m {
+ case TRUE():
+ return FALSE()
+ case FALSE():
+ return TRUE()
+ default:
+ return negMatcher{m}
+ }
+}
+
+// And returns a matcher which returns true only if all of it's sub-matcher return true
+func And(lhs, rhs Matcher, others ...Matcher) Matcher {
+ var matcher Matcher
+ switch lhs {
+ case TRUE():
+ matcher = rhs
+ case FALSE():
+ matcher = FALSE()
+ default:
+ switch rhs {
+ case TRUE():
+ matcher = lhs
+ case FALSE():
+ matcher = FALSE()
+ default:
+ matcher = andMatcher{lhs, rhs}
+ }
+ }
+ if len(others) > 0 {
+ return And(matcher, others[0], others[1:]...)
+ }
+ return matcher
+}
+
+// Or returns a matcher which returns true if any of it's sub-matcher return true
+func Or(lhs, rhs Matcher, others ...Matcher) Matcher {
+ var matcher Matcher
+ switch lhs {
+ case TRUE():
+ matcher = TRUE()
+ case FALSE():
+ matcher = rhs
+ default:
+ switch rhs {
+ case TRUE():
+ matcher = TRUE()
+ case FALSE():
+ matcher = lhs
+ default:
+ matcher = orMatcher{lhs, rhs}
+ }
+ }
+ if len(others) > 0 {
+ return Or(matcher, others[0], others[1:]...)
+ }
+ return matcher
+}
+
+func (trueMatcher) Match(_ []byte) bool { return true }
+func (trueMatcher) MatchString(_ string) bool { return true }
+
+func (falseMatcher) Match(_ []byte) bool { return false }
+func (falseMatcher) MatchString(_ string) bool { return false }
+
+func (m andMatcher) Match(b []byte) bool { return m.lhs.Match(b) && m.rhs.Match(b) }
+func (m andMatcher) MatchString(s string) bool { return m.lhs.MatchString(s) && m.rhs.MatchString(s) }
+
+func (m orMatcher) Match(b []byte) bool { return m.lhs.Match(b) || m.rhs.Match(b) }
+func (m orMatcher) MatchString(s string) bool { return m.lhs.MatchString(s) || m.rhs.MatchString(s) }
+
+func (m negMatcher) Match(b []byte) bool { return !m.Matcher.Match(b) }
+func (m negMatcher) MatchString(s string) bool { return !m.Matcher.MatchString(s) }
diff --git a/src/go/plugin/go.d/pkg/matcher/logical_test.go b/src/go/plugin/go.d/pkg/matcher/logical_test.go
new file mode 100644
index 000000000..64491f1ad
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/logical_test.go
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestTRUE(t *testing.T) {
+ assert.True(t, TRUE().Match(nil))
+ assert.True(t, TRUE().MatchString(""))
+}
+
+func TestFALSE(t *testing.T) {
+ assert.False(t, FALSE().Match(nil))
+ assert.False(t, FALSE().MatchString(""))
+}
+
+func TestAnd(t *testing.T) {
+ assert.Equal(t,
+ matcherF,
+ And(FALSE(), stringFullMatcher("")))
+ assert.Equal(t,
+ matcherF,
+ And(stringFullMatcher(""), FALSE()))
+
+ assert.Equal(t,
+ stringFullMatcher(""),
+ And(TRUE(), stringFullMatcher("")))
+ assert.Equal(t,
+ stringFullMatcher(""),
+ And(stringFullMatcher(""), TRUE()))
+
+ assert.Equal(t,
+ andMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")},
+ And(stringPartialMatcher("a"), stringPartialMatcher("b")))
+
+ assert.Equal(t,
+ andMatcher{
+ andMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")},
+ stringPartialMatcher("c"),
+ },
+ And(stringPartialMatcher("a"), stringPartialMatcher("b"), stringPartialMatcher("c")))
+}
+
+func TestOr(t *testing.T) {
+ assert.Equal(t,
+ stringFullMatcher(""),
+ Or(FALSE(), stringFullMatcher("")))
+ assert.Equal(t,
+ stringFullMatcher(""),
+ Or(stringFullMatcher(""), FALSE()))
+
+ assert.Equal(t,
+ TRUE(),
+ Or(TRUE(), stringFullMatcher("")))
+ assert.Equal(t,
+ TRUE(),
+ Or(stringFullMatcher(""), TRUE()))
+
+ assert.Equal(t,
+ orMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")},
+ Or(stringPartialMatcher("a"), stringPartialMatcher("b")))
+
+ assert.Equal(t,
+ orMatcher{
+ orMatcher{stringPartialMatcher("a"), stringPartialMatcher("b")},
+ stringPartialMatcher("c"),
+ },
+ Or(stringPartialMatcher("a"), stringPartialMatcher("b"), stringPartialMatcher("c")))
+}
+
+func TestAndMatcher_Match(t *testing.T) {
+ and := andMatcher{
+ stringPrefixMatcher("a"),
+ stringSuffixMatcher("c"),
+ }
+ assert.True(t, and.Match([]byte("abc")))
+ assert.True(t, and.MatchString("abc"))
+}
+
+func TestOrMatcher_Match(t *testing.T) {
+ or := orMatcher{
+ stringPrefixMatcher("a"),
+ stringPrefixMatcher("c"),
+ }
+ assert.True(t, or.Match([]byte("aaa")))
+ assert.True(t, or.MatchString("ccc"))
+}
+
+func TestNegMatcher_Match(t *testing.T) {
+ neg := negMatcher{stringPrefixMatcher("a")}
+ assert.False(t, neg.Match([]byte("aaa")))
+ assert.True(t, neg.MatchString("ccc"))
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/matcher.go b/src/go/plugin/go.d/pkg/matcher/matcher.go
new file mode 100644
index 000000000..76d903325
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/matcher.go
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "errors"
+ "fmt"
+ "regexp"
+)
+
+type (
+ // Matcher is an interface that wraps MatchString method.
+ Matcher interface {
+ // Match performs match against given []byte
+ Match(b []byte) bool
+ // MatchString performs match against given string
+ MatchString(string) bool
+ }
+
+ // Format matcher format
+ Format string
+)
+
+const (
+ // FmtString is a string match format.
+ FmtString Format = "string"
+ // FmtGlob is a glob match format.
+ FmtGlob Format = "glob"
+ // FmtRegExp is a regex match format.
+ FmtRegExp Format = "regexp"
+ // FmtSimplePattern is a simple pattern match format
+ // https://docs.netdata.cloud/libnetdata/simple_pattern/
+ FmtSimplePattern Format = "simple_patterns"
+
+ // Separator is a separator between match format and expression.
+ Separator = ":"
+)
+
+const (
+ symString = "="
+ symGlob = "*"
+ symRegExp = "~"
+)
+
+var (
+ reShortSyntax = regexp.MustCompile(`(?s)^(!)?(.)\s*(.*)$`)
+ reLongSyntax = regexp.MustCompile(`(?s)^(!)?([^:]+):(.*)$`)
+
+ errNotShortSyntax = errors.New("not short syntax")
+)
+
+// Must is a helper that wraps a call to a function returning (Matcher, error) and panics if the error is non-nil.
+// It is intended for use in variable initializations such as
+//
+// var m = matcher.Must(matcher.New(matcher.FmtString, "hello world"))
+func Must(m Matcher, err error) Matcher {
+ if err != nil {
+ panic(err)
+ }
+ return m
+}
+
+// New create a matcher
+func New(format Format, expr string) (Matcher, error) {
+ switch format {
+ case FmtString:
+ return NewStringMatcher(expr, true, true)
+ case FmtGlob:
+ return NewGlobMatcher(expr)
+ case FmtRegExp:
+ return NewRegExpMatcher(expr)
+ case FmtSimplePattern:
+ return NewSimplePatternsMatcher(expr)
+ default:
+ return nil, fmt.Errorf("unsupported matcher format: '%s'", format)
+ }
+}
+
+// Parse parses line and returns appropriate matcher based on matched format.
+//
+// Short Syntax
+//
+// <line> ::= [ <not> ] <format> <space> <expr>
+// <not> ::= '!'
+// negative expression
+// <format> ::= [ '=', '~', '*' ]
+// '=' means string match
+// '~' means regexp match
+// '*' means glob match
+// <space> ::= { ' ' | '\t' | '\n' | '\n' | '\r' }
+// <expr> ::= any string
+//
+// Long Syntax
+//
+// <line> ::= [ <not> ] <format> <separator> <expr>
+// <format> ::= [ 'string' | 'glob' | 'regexp' | 'simple_patterns' ]
+// <not> ::= '!'
+// negative expression
+// <separator> ::= ':'
+// <expr> ::= any string
+func Parse(line string) (Matcher, error) {
+ matcher, err := parseShortFormat(line)
+ if err == nil {
+ return matcher, nil
+ }
+ return parseLongSyntax(line)
+}
+
+func parseShortFormat(line string) (Matcher, error) {
+ m := reShortSyntax.FindStringSubmatch(line)
+ if m == nil {
+ return nil, errNotShortSyntax
+ }
+ var format Format
+ switch m[2] {
+ case symString:
+ format = FmtString
+ case symGlob:
+ format = FmtGlob
+ case symRegExp:
+ format = FmtRegExp
+ default:
+ return nil, fmt.Errorf("invalid short syntax: unknown symbol '%s'", m[2])
+ }
+ expr := m[3]
+ matcher, err := New(format, expr)
+ if err != nil {
+ return nil, err
+ }
+ if m[1] != "" {
+ matcher = Not(matcher)
+ }
+ return matcher, nil
+}
+
+func parseLongSyntax(line string) (Matcher, error) {
+ m := reLongSyntax.FindStringSubmatch(line)
+ if m == nil {
+ return nil, fmt.Errorf("invalid syntax")
+ }
+ matcher, err := New(Format(m[2]), m[3])
+ if err != nil {
+ return nil, err
+ }
+ if m[1] != "" {
+ matcher = Not(matcher)
+ }
+ return matcher, nil
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/matcher_test.go b/src/go/plugin/go.d/pkg/matcher/matcher_test.go
new file mode 100644
index 000000000..f304d983d
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/matcher_test.go
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "log"
+ "reflect"
+ "regexp"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParse(t *testing.T) {
+ tests := []struct {
+ valid bool
+ line string
+ matcher Matcher
+ }{
+ {false, "", nil},
+ {false, "abc", nil},
+ {false, `~ abc\`, nil},
+ {false, `invalid_fmt:abc`, nil},
+
+ {true, "=", stringFullMatcher("")},
+ {true, "= ", stringFullMatcher("")},
+ {true, "=full", stringFullMatcher("full")},
+ {true, "= full", stringFullMatcher("full")},
+ {true, "= \t\ffull", stringFullMatcher("full")},
+
+ {true, "string:", stringFullMatcher("")},
+ {true, "string:full", stringFullMatcher("full")},
+
+ {true, "!=", Not(stringFullMatcher(""))},
+ {true, "!=full", Not(stringFullMatcher("full"))},
+ {true, "!= full", Not(stringFullMatcher("full"))},
+ {true, "!= \t\ffull", Not(stringFullMatcher("full"))},
+
+ {true, "!string:", Not(stringFullMatcher(""))},
+ {true, "!string:full", Not(stringFullMatcher("full"))},
+
+ {true, "~", TRUE()},
+ {true, "~ ", TRUE()},
+ {true, `~ ^$`, stringFullMatcher("")},
+ {true, "~ partial", stringPartialMatcher("partial")},
+ {true, `~ part\.ial`, stringPartialMatcher("part.ial")},
+ {true, "~ ^prefix", stringPrefixMatcher("prefix")},
+ {true, "~ suffix$", stringSuffixMatcher("suffix")},
+ {true, "~ ^full$", stringFullMatcher("full")},
+ {true, "~ [0-9]+", regexp.MustCompile(`[0-9]+`)},
+ {true, `~ part\s1`, regexp.MustCompile(`part\s1`)},
+
+ {true, "!~", FALSE()},
+ {true, "!~ ", FALSE()},
+ {true, "!~ partial", Not(stringPartialMatcher("partial"))},
+ {true, `!~ part\.ial`, Not(stringPartialMatcher("part.ial"))},
+ {true, "!~ ^prefix", Not(stringPrefixMatcher("prefix"))},
+ {true, "!~ suffix$", Not(stringSuffixMatcher("suffix"))},
+ {true, "!~ ^full$", Not(stringFullMatcher("full"))},
+ {true, "!~ [0-9]+", Not(regexp.MustCompile(`[0-9]+`))},
+
+ {true, `regexp:partial`, stringPartialMatcher("partial")},
+ {true, `!regexp:partial`, Not(stringPartialMatcher("partial"))},
+
+ {true, `*`, stringFullMatcher("")},
+ {true, `* foo`, stringFullMatcher("foo")},
+ {true, `* foo*`, stringPrefixMatcher("foo")},
+ {true, `* *foo`, stringSuffixMatcher("foo")},
+ {true, `* *foo*`, stringPartialMatcher("foo")},
+ {true, `* foo*bar`, globMatcher("foo*bar")},
+ {true, `* *foo*bar`, globMatcher("*foo*bar")},
+ {true, `* foo?bar`, globMatcher("foo?bar")},
+
+ {true, `!*`, Not(stringFullMatcher(""))},
+ {true, `!* foo`, Not(stringFullMatcher("foo"))},
+ {true, `!* foo*`, Not(stringPrefixMatcher("foo"))},
+ {true, `!* *foo`, Not(stringSuffixMatcher("foo"))},
+ {true, `!* *foo*`, Not(stringPartialMatcher("foo"))},
+ {true, `!* foo*bar`, Not(globMatcher("foo*bar"))},
+ {true, `!* *foo*bar`, Not(globMatcher("*foo*bar"))},
+ {true, `!* foo?bar`, Not(globMatcher("foo?bar"))},
+
+ {true, "glob:foo*bar", globMatcher("foo*bar")},
+ {true, "!glob:foo*bar", Not(globMatcher("foo*bar"))},
+
+ {true, `simple_patterns:`, FALSE()},
+ {true, `simple_patterns: `, FALSE()},
+ {true, `simple_patterns: foo`, simplePatternsMatcher{
+ {stringFullMatcher("foo"), true},
+ }},
+ {true, `simple_patterns: !foo`, simplePatternsMatcher{
+ {stringFullMatcher("foo"), false},
+ }},
+ }
+ for _, test := range tests {
+ t.Run(test.line, func(t *testing.T) {
+ m, err := Parse(test.line)
+ if test.valid {
+ require.NoError(t, err)
+ if test.matcher != nil {
+ log.Printf("%s %#v", reflect.TypeOf(m).Name(), m)
+ assert.Equal(t, test.matcher, m)
+ }
+ } else {
+ assert.Error(t, err)
+ }
+ })
+ }
+}
+
+func TestMust(t *testing.T) {
+ assert.NotPanics(t, func() {
+ m := Must(New(FmtRegExp, `[0-9]+`))
+ assert.NotNil(t, m)
+ })
+
+ assert.Panics(t, func() {
+ Must(New(FmtRegExp, `[0-9]+\`))
+ })
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/regexp.go b/src/go/plugin/go.d/pkg/matcher/regexp.go
new file mode 100644
index 000000000..3a297f3b3
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/regexp.go
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import "regexp"
+
+// NewRegExpMatcher create new matcher with RegExp format
+func NewRegExpMatcher(expr string) (Matcher, error) {
+ switch expr {
+ case "", "^", "$":
+ return TRUE(), nil
+ case "^$", "$^":
+ return NewStringMatcher("", true, true)
+ }
+ size := len(expr)
+ chars := []rune(expr)
+ var startWith, endWith bool
+ startIdx := 0
+ endIdx := size - 1
+ if chars[startIdx] == '^' {
+ startWith = true
+ startIdx = 1
+ }
+ if chars[endIdx] == '$' {
+ endWith = true
+ endIdx--
+ }
+
+ unescapedExpr := make([]rune, 0, endIdx-startIdx+1)
+ for i := startIdx; i <= endIdx; i++ {
+ ch := chars[i]
+ if ch == '\\' {
+ if i == endIdx { // end with '\' => invalid format
+ return regexp.Compile(expr)
+ }
+ nextCh := chars[i+1]
+ if !isRegExpMeta(nextCh) { // '\' + mon-meta char => special meaning
+ return regexp.Compile(expr)
+ }
+ unescapedExpr = append(unescapedExpr, nextCh)
+ i++
+ } else if isRegExpMeta(ch) {
+ return regexp.Compile(expr)
+ } else {
+ unescapedExpr = append(unescapedExpr, ch)
+ }
+ }
+
+ return NewStringMatcher(string(unescapedExpr), startWith, endWith)
+}
+
+// isRegExpMeta reports whether byte b needs to be escaped by QuoteMeta.
+func isRegExpMeta(b rune) bool {
+ switch b {
+ case '\\', '.', '+', '*', '?', '(', ')', '|', '[', ']', '{', '}', '^', '$':
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/regexp_test.go b/src/go/plugin/go.d/pkg/matcher/regexp_test.go
new file mode 100644
index 000000000..fe644747b
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/regexp_test.go
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "regexp"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestRegExpMatch_Match(t *testing.T) {
+ m := regexp.MustCompile("[0-9]+")
+
+ cases := []struct {
+ expected bool
+ line string
+ }{
+ {
+ expected: true,
+ line: "2019",
+ },
+ {
+ expected: true,
+ line: "It's over 9000!",
+ },
+ {
+ expected: false,
+ line: "This will never fail!",
+ },
+ }
+
+ for _, c := range cases {
+ assert.Equal(t, c.expected, m.MatchString(c.line))
+ }
+}
+
+func BenchmarkRegExp_MatchString(b *testing.B) {
+ benchmarks := []struct {
+ expr string
+ test string
+ }{
+ {"", ""},
+ {"abc", "abcd"},
+ {"^abc", "abcd"},
+ {"abc$", "abcd"},
+ {"^abc$", "abcd"},
+ {"[a-z]+", "abcd"},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.expr+"_raw", func(b *testing.B) {
+ m := regexp.MustCompile(bm.expr)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.MatchString(bm.test)
+ }
+ })
+ b.Run(bm.expr+"_optimized", func(b *testing.B) {
+ m, _ := NewRegExpMatcher(bm.expr)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ m.MatchString(bm.test)
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/simple_patterns.go b/src/go/plugin/go.d/pkg/matcher/simple_patterns.go
new file mode 100644
index 000000000..91a0a3bbd
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/simple_patterns.go
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "strings"
+)
+
+type (
+ simplePatternTerm struct {
+ matcher Matcher
+ positive bool
+ }
+
+ // simplePatternsMatcher patterns.
+ simplePatternsMatcher []simplePatternTerm
+)
+
+// NewSimplePatternsMatcher creates new simple patterns. It returns error in case one of patterns has bad syntax.
+func NewSimplePatternsMatcher(expr string) (Matcher, error) {
+ ps := simplePatternsMatcher{}
+
+ for _, pattern := range strings.Fields(expr) {
+ if err := ps.add(pattern); err != nil {
+ return nil, err
+ }
+ }
+ if len(ps) == 0 {
+ return FALSE(), nil
+ }
+ return ps, nil
+}
+
+func (m *simplePatternsMatcher) add(term string) error {
+ p := simplePatternTerm{}
+ if term[0] == '!' {
+ p.positive = false
+ term = term[1:]
+ } else {
+ p.positive = true
+ }
+ matcher, err := NewGlobMatcher(term)
+ if err != nil {
+ return err
+ }
+
+ p.matcher = matcher
+ *m = append(*m, p)
+
+ return nil
+}
+
+func (m simplePatternsMatcher) Match(b []byte) bool {
+ return m.MatchString(string(b))
+}
+
+// MatchString matches.
+func (m simplePatternsMatcher) MatchString(line string) bool {
+ for _, p := range m {
+ if p.matcher.MatchString(line) {
+ return p.positive
+ }
+ }
+ return false
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/simple_patterns_test.go b/src/go/plugin/go.d/pkg/matcher/simple_patterns_test.go
new file mode 100644
index 000000000..016096d57
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/simple_patterns_test.go
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewSimplePatternsMatcher(t *testing.T) {
+ tests := []struct {
+ expr string
+ expected Matcher
+ }{
+ {"", FALSE()},
+ {" ", FALSE()},
+ {"foo", simplePatternsMatcher{
+ {stringFullMatcher("foo"), true},
+ }},
+ {"!foo", simplePatternsMatcher{
+ {stringFullMatcher("foo"), false},
+ }},
+ {"foo bar", simplePatternsMatcher{
+ {stringFullMatcher("foo"), true},
+ {stringFullMatcher("bar"), true},
+ }},
+ {"*foobar* !foo* !*bar *", simplePatternsMatcher{
+ {stringPartialMatcher("foobar"), true},
+ {stringPrefixMatcher("foo"), false},
+ {stringSuffixMatcher("bar"), false},
+ {TRUE(), true},
+ }},
+ {`ab\`, nil},
+ }
+ for _, test := range tests {
+ t.Run(test.expr, func(t *testing.T) {
+ matcher, err := NewSimplePatternsMatcher(test.expr)
+ if test.expected == nil {
+ assert.Error(t, err)
+ } else {
+ assert.Equal(t, test.expected, matcher)
+ }
+ })
+ }
+}
+
+func TestSimplePatterns_Match(t *testing.T) {
+ m, err := NewSimplePatternsMatcher("*foobar* !foo* !*bar *")
+
+ require.NoError(t, err)
+
+ cases := []struct {
+ expected bool
+ line string
+ }{
+ {
+ expected: true,
+ line: "hello world",
+ },
+ {
+ expected: false,
+ line: "hello world bar",
+ },
+ {
+ expected: true,
+ line: "hello world foobar",
+ },
+ }
+
+ for _, c := range cases {
+ t.Run(c.line, func(t *testing.T) {
+ assert.Equal(t, c.expected, m.MatchString(c.line))
+ assert.Equal(t, c.expected, m.Match([]byte(c.line)))
+ })
+ }
+}
+
+func TestSimplePatterns_Match2(t *testing.T) {
+ m, err := NewSimplePatternsMatcher("*foobar")
+
+ require.NoError(t, err)
+
+ assert.True(t, m.MatchString("foobar"))
+ assert.True(t, m.MatchString("foo foobar"))
+ assert.False(t, m.MatchString("foobar baz"))
+}
diff --git a/src/go/plugin/go.d/pkg/matcher/string.go b/src/go/plugin/go.d/pkg/matcher/string.go
new file mode 100644
index 000000000..43ba43eb3
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/string.go
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "bytes"
+ "strings"
+)
+
+type (
+ // stringFullMatcher implements Matcher, it uses "==" to match.
+ stringFullMatcher string
+
+ // stringPartialMatcher implements Matcher, it uses strings.Contains to match.
+ stringPartialMatcher string
+
+ // stringPrefixMatcher implements Matcher, it uses strings.HasPrefix to match.
+ stringPrefixMatcher string
+
+ // stringSuffixMatcher implements Matcher, it uses strings.HasSuffix to match.
+ stringSuffixMatcher string
+)
+
+// NewStringMatcher create a new matcher with string format
+func NewStringMatcher(s string, startWith, endWith bool) (Matcher, error) {
+ if startWith {
+ if endWith {
+ return stringFullMatcher(s), nil
+ }
+ return stringPrefixMatcher(s), nil
+ }
+ if endWith {
+ return stringSuffixMatcher(s), nil
+ }
+ return stringPartialMatcher(s), nil
+}
+
+func (m stringFullMatcher) Match(b []byte) bool { return string(m) == string(b) }
+func (m stringFullMatcher) MatchString(line string) bool { return string(m) == line }
+
+func (m stringPartialMatcher) Match(b []byte) bool { return bytes.Contains(b, []byte(m)) }
+func (m stringPartialMatcher) MatchString(line string) bool { return strings.Contains(line, string(m)) }
+
+func (m stringPrefixMatcher) Match(b []byte) bool { return bytes.HasPrefix(b, []byte(m)) }
+func (m stringPrefixMatcher) MatchString(line string) bool { return strings.HasPrefix(line, string(m)) }
+
+func (m stringSuffixMatcher) Match(b []byte) bool { return bytes.HasSuffix(b, []byte(m)) }
+func (m stringSuffixMatcher) MatchString(line string) bool { return strings.HasSuffix(line, string(m)) }
diff --git a/src/go/plugin/go.d/pkg/matcher/string_test.go b/src/go/plugin/go.d/pkg/matcher/string_test.go
new file mode 100644
index 000000000..1694efbd0
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/matcher/string_test.go
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package matcher
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+var stringMatcherTestCases = []struct {
+ line string
+ expr string
+ full, prefix, suffix, partial bool
+}{
+ {"", "", true, true, true, true},
+ {"abc", "", false, true, true, true},
+ {"power", "pow", false, true, false, true},
+ {"netdata", "data", false, false, true, true},
+ {"abc", "def", false, false, false, false},
+ {"soon", "o", false, false, false, true},
+}
+
+func TestStringFullMatcher_MatchString(t *testing.T) {
+ for _, c := range stringMatcherTestCases {
+ t.Run(c.line, func(t *testing.T) {
+ m := stringFullMatcher(c.expr)
+ assert.Equal(t, c.full, m.Match([]byte(c.line)))
+ assert.Equal(t, c.full, m.MatchString(c.line))
+ })
+ }
+}
+
+func TestStringPrefixMatcher_MatchString(t *testing.T) {
+ for _, c := range stringMatcherTestCases {
+ t.Run(c.line, func(t *testing.T) {
+ m := stringPrefixMatcher(c.expr)
+ assert.Equal(t, c.prefix, m.Match([]byte(c.line)))
+ assert.Equal(t, c.prefix, m.MatchString(c.line))
+ })
+ }
+}
+
+func TestStringSuffixMatcher_MatchString(t *testing.T) {
+ for _, c := range stringMatcherTestCases {
+ t.Run(c.line, func(t *testing.T) {
+ m := stringSuffixMatcher(c.expr)
+ assert.Equal(t, c.suffix, m.Match([]byte(c.line)))
+ assert.Equal(t, c.suffix, m.MatchString(c.line))
+ })
+ }
+}
+
+func TestStringPartialMatcher_MatchString(t *testing.T) {
+ for _, c := range stringMatcherTestCases {
+ t.Run(c.line, func(t *testing.T) {
+ m := stringPartialMatcher(c.expr)
+ assert.Equal(t, c.partial, m.Match([]byte(c.line)))
+ assert.Equal(t, c.partial, m.MatchString(c.line))
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/counter.go b/src/go/plugin/go.d/pkg/metrics/counter.go
new file mode 100644
index 000000000..406bc8792
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/counter.go
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+type (
+ // Counter is a Metric that represents a single numerical bits that only ever
+ // goes up. That implies that it cannot be used to count items whose number can
+ // also go down, e.g. the number of currently running goroutines. Those
+ // "counters" are represented by Gauges.
+ //
+ // A Counter is typically used to count requests served, tasks completed, errors
+ // occurred, etc.
+ Counter struct {
+ valInt int64
+ valFloat float64
+ }
+
+ // CounterVec is a Collector that bundles a set of Counters which have different values for their names.
+ // This is used if you want to count the same thing partitioned by various dimensions
+ // (e.g. number of HTTP requests, partitioned by response code and method).
+ //
+ // Create instances with NewCounterVec.
+ CounterVec map[string]*Counter
+)
+
+var (
+ _ stm.Value = Counter{}
+ _ stm.Value = CounterVec{}
+)
+
+// WriteTo writes its value into given map.
+func (c Counter) WriteTo(rv map[string]int64, key string, mul, div int) {
+ rv[key] = int64(c.Value() * float64(mul) / float64(div))
+}
+
+// Value gets current counter.
+func (c Counter) Value() float64 {
+ return float64(c.valInt) + c.valFloat
+}
+
+// Inc increments the counter by 1. Use Add to increment it by arbitrary
+// non-negative values.
+func (c *Counter) Inc() {
+ c.valInt++
+}
+
+// Add adds the given bits to the counter. It panics if the value is < 0.
+func (c *Counter) Add(v float64) {
+ if v < 0 {
+ panic(errors.New("counter cannot decrease in value"))
+ }
+ val := int64(v)
+ if float64(val) == v {
+ c.valInt += val
+ return
+ }
+ c.valFloat += v
+}
+
+// NewCounterVec creates a new CounterVec
+func NewCounterVec() CounterVec {
+ return CounterVec{}
+}
+
+// WriteTo writes its value into given map.
+func (c CounterVec) WriteTo(rv map[string]int64, key string, mul, div int) {
+ for name, value := range c {
+ rv[key+"_"+name] = int64(value.Value() * float64(mul) / float64(div))
+ }
+}
+
+// Get gets counter instance by name
+func (c CounterVec) Get(name string) *Counter {
+ item, _ := c.GetP(name)
+ return item
+}
+
+// GetP gets counter instance by name
+func (c CounterVec) GetP(name string) (counter *Counter, ok bool) {
+ counter, ok = c[name]
+ if ok {
+ return
+ }
+ counter = &Counter{}
+ c[name] = counter
+ return
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/counter_test.go b/src/go/plugin/go.d/pkg/metrics/counter_test.go
new file mode 100644
index 000000000..61f50501a
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/counter_test.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCounter_WriteTo(t *testing.T) {
+ c := Counter{}
+ c.Inc()
+ c.Inc()
+ c.Inc()
+ c.Add(0.14)
+ m := map[string]int64{}
+ c.WriteTo(m, "pi", 100, 1)
+ assert.Len(t, m, 1)
+ assert.EqualValues(t, 314, m["pi"])
+}
+
+func TestCounterVec_WriteTo(t *testing.T) {
+ c := NewCounterVec()
+ c.Get("foo").Inc()
+ c.Get("foo").Inc()
+ c.Get("bar").Inc()
+ c.Get("bar").Add(0.14)
+
+ m := map[string]int64{}
+ c.WriteTo(m, "pi", 100, 1)
+ assert.Len(t, m, 2)
+ assert.EqualValues(t, 200, m["pi_foo"])
+ assert.EqualValues(t, 114, m["pi_bar"])
+}
+
+func TestCounter_Inc(t *testing.T) {
+ c := Counter{}
+ c.Inc()
+ assert.Equal(t, 1.0, c.Value())
+ c.Inc()
+ assert.Equal(t, 2.0, c.Value())
+}
+
+func TestCounter_Add(t *testing.T) {
+ c := Counter{}
+ c.Add(3.14)
+ assert.InDelta(t, 3.14, c.Value(), 0.0001)
+ c.Add(2)
+ assert.InDelta(t, 5.14, c.Value(), 0.0001)
+ assert.Panics(t, func() {
+ c.Add(-1)
+ })
+}
+
+func BenchmarkCounter_Add(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ value float64
+ }{
+ {"int", 1},
+ {"float", 3.14},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ var c Counter
+ for i := 0; i < b.N; i++ {
+ c.Add(bm.value)
+ }
+ })
+ }
+}
+
+func BenchmarkCounter_Inc(b *testing.B) {
+ var c Counter
+ for i := 0; i < b.N; i++ {
+ c.Inc()
+ }
+}
+
+func BenchmarkCounterVec_Inc(b *testing.B) {
+ c := NewCounterVec()
+ for i := 0; i < b.N; i++ {
+ c.Get("foo").Inc()
+ }
+}
+
+func BenchmarkCounter_Value(b *testing.B) {
+ var c Counter
+ c.Inc()
+ c.Add(3.14)
+ for i := 0; i < b.N; i++ {
+ c.Value()
+ }
+}
+
+func BenchmarkCounter_WriteTo(b *testing.B) {
+ var c Counter
+ c.Inc()
+ c.Add(3.14)
+ m := map[string]int64{}
+ for i := 0; i < b.N; i++ {
+ c.WriteTo(m, "pi", 100, 1)
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/gauge.go b/src/go/plugin/go.d/pkg/metrics/gauge.go
new file mode 100644
index 000000000..20f0823a8
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/gauge.go
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "time"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+type (
+ // Gauge is a Metric that represents a single numerical value that can
+ // arbitrarily go up and down.
+ //
+ // A Gauge is typically used for measured values like temperatures or current
+ // memory usage, but also "counts" that can go up and down, like the number of
+ // running goroutines.
+ Gauge float64
+
+ // GaugeVec is a Collector that bundles a set of Gauges which have different values for their names.
+ // This is used if you want to count the same thing partitioned by various dimensions
+ //
+ // Create instances with NewGaugeVec.
+ GaugeVec map[string]*Gauge
+)
+
+var (
+ _ stm.Value = Gauge(0)
+ _ stm.Value = GaugeVec{}
+)
+
+// WriteTo writes its value into given map.
+func (g Gauge) WriteTo(rv map[string]int64, key string, mul, div int) {
+ rv[key] = int64(float64(g) * float64(mul) / float64(div))
+}
+
+// Value gets current counter.
+func (g Gauge) Value() float64 {
+ return float64(g)
+}
+
+// Set sets the atomicGauge to an arbitrary bits.
+func (g *Gauge) Set(v float64) {
+ *g = Gauge(v)
+}
+
+// Inc increments the atomicGauge by 1. Use Add to increment it by arbitrary
+// values.
+func (g *Gauge) Inc() {
+ *g++
+}
+
+// Dec decrements the atomicGauge by 1. Use Sub to decrement it by arbitrary
+// values.
+func (g *Gauge) Dec() {
+ *g--
+}
+
+// Add adds the given bits to the atomicGauge. (The bits can be negative,
+// resulting in a decrease of the atomicGauge.)
+func (g *Gauge) Add(delta float64) {
+ *g += Gauge(delta)
+}
+
+// Sub subtracts the given bits from the atomicGauge. (The bits can be
+// negative, resulting in an increase of the atomicGauge.)
+func (g *Gauge) Sub(delta float64) {
+ *g -= Gauge(delta)
+}
+
+// SetToCurrentTime sets the atomicGauge to the current Unix time in second.
+func (g *Gauge) SetToCurrentTime() {
+ *g = Gauge(time.Now().UnixNano()) / 1e9
+}
+
+// NewGaugeVec creates a new GaugeVec
+func NewGaugeVec() GaugeVec {
+ return GaugeVec{}
+}
+
+// WriteTo writes its value into given map.
+func (g GaugeVec) WriteTo(rv map[string]int64, key string, mul, div int) {
+ for name, value := range g {
+ rv[key+"_"+name] = int64(value.Value() * float64(mul) / float64(div))
+ }
+}
+
+// Get gets counter instance by name
+func (g GaugeVec) Get(name string) *Gauge {
+ item, _ := g.GetP(name)
+ return item
+}
+
+// GetP gets counter instance by name
+func (g GaugeVec) GetP(name string) (gauge *Gauge, ok bool) {
+ gauge, ok = g[name]
+ if ok {
+ return
+ }
+ gauge = new(Gauge)
+ g[name] = gauge
+ return
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/gauge_test.go b/src/go/plugin/go.d/pkg/metrics/gauge_test.go
new file mode 100644
index 000000000..8940e330e
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/gauge_test.go
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestGauge_Set(t *testing.T) {
+ var g Gauge
+ assert.Equal(t, 0.0, g.Value())
+ g.Set(100)
+ assert.Equal(t, 100.0, g.Value())
+ g.Set(200)
+ assert.Equal(t, 200.0, g.Value())
+}
+
+func TestGauge_Add(t *testing.T) {
+ var g Gauge
+ assert.Equal(t, 0.0, g.Value())
+ g.Add(100)
+ assert.Equal(t, 100.0, g.Value())
+ g.Add(200)
+ assert.Equal(t, 300.0, g.Value())
+}
+func TestGauge_Sub(t *testing.T) {
+ var g Gauge
+ assert.Equal(t, 0.0, g.Value())
+ g.Sub(100)
+ assert.Equal(t, -100.0, g.Value())
+ g.Sub(200)
+ assert.Equal(t, -300.0, g.Value())
+}
+
+func TestGauge_Inc(t *testing.T) {
+ var g Gauge
+ assert.Equal(t, 0.0, g.Value())
+ g.Inc()
+ assert.Equal(t, 1.0, g.Value())
+}
+
+func TestGauge_Dec(t *testing.T) {
+ var g Gauge
+ assert.Equal(t, 0.0, g.Value())
+ g.Dec()
+ assert.Equal(t, -1.0, g.Value())
+}
+
+func TestGauge_SetToCurrentTime(t *testing.T) {
+ var g Gauge
+ g.SetToCurrentTime()
+ assert.InDelta(t, time.Now().Unix(), g.Value(), 1)
+}
+
+func TestGauge_WriteTo(t *testing.T) {
+ g := Gauge(3.14)
+ m := map[string]int64{}
+ g.WriteTo(m, "pi", 100, 1)
+ assert.Len(t, m, 1)
+ assert.EqualValues(t, 314, m["pi"])
+}
+
+func TestGaugeVec_WriteTo(t *testing.T) {
+ g := NewGaugeVec()
+ g.Get("foo").Inc()
+ g.Get("foo").Inc()
+ g.Get("bar").Inc()
+ g.Get("bar").Add(0.14)
+
+ m := map[string]int64{}
+ g.WriteTo(m, "pi", 100, 1)
+ assert.Len(t, m, 2)
+ assert.EqualValues(t, 200, m["pi_foo"])
+ assert.EqualValues(t, 114, m["pi_bar"])
+}
+
+func BenchmarkGauge_Add(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ value float64
+ }{
+ {"int", 1},
+ {"float", 3.14},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ var c Gauge
+ for i := 0; i < b.N; i++ {
+ c.Add(bm.value)
+ }
+ })
+ }
+}
+
+func BenchmarkGauge_Inc(b *testing.B) {
+ var c Gauge
+ for i := 0; i < b.N; i++ {
+ c.Inc()
+ }
+}
+
+func BenchmarkGauge_Set(b *testing.B) {
+ var c Gauge
+ for i := 0; i < b.N; i++ {
+ c.Set(3.14)
+ }
+}
+
+func BenchmarkGauge_Value(b *testing.B) {
+ var c Gauge
+ c.Inc()
+ c.Add(3.14)
+ for i := 0; i < b.N; i++ {
+ c.Value()
+ }
+}
+
+func BenchmarkGauge_WriteTo(b *testing.B) {
+ var c Gauge
+ c.Inc()
+ c.Add(3.14)
+ m := map[string]int64{}
+ for i := 0; i < b.N; i++ {
+ c.WriteTo(m, "pi", 100, 1)
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/histogram.go b/src/go/plugin/go.d/pkg/metrics/histogram.go
new file mode 100644
index 000000000..98c2302ca
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/histogram.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+type (
+ // A Histogram counts individual observations from an event or sample stream in
+ // configurable buckets. Similar to a summary, it also provides a sum of
+ // observations and an observation count.
+ //
+ // Note that Histograms, in contrast to Summaries, can be aggregated.
+ // However, Histograms require the user to pre-define suitable
+ // buckets, and they are in general less accurate. The Observe method of a
+ // histogram has a very low performance overhead in comparison with the Observe
+ // method of a summary.
+ //
+ // To create histogram instances, use NewHistogram.
+ Histogram interface {
+ Observer
+ }
+
+ histogram struct {
+ buckets []int64
+ upperBounds []float64
+ sum float64
+ count int64
+ rangeBuckets bool
+ }
+)
+
+var (
+ _ stm.Value = histogram{}
+)
+
+// DefBuckets are the default histogram buckets. The default buckets are
+// tailored to broadly measure the response time (in seconds) of a network
+// service. Most likely, however, you will be required to define buckets
+// customized to your use case.
+var DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
+
+// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest
+// bucket has an upper bound of 'start'. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is zero or negative.
+func LinearBuckets(start, width float64, count int) []float64 {
+ if count < 1 {
+ panic("LinearBuckets needs a positive count")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start += width
+ }
+ return buckets
+}
+
+// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an
+// upper bound of 'start' and each following bucket's upper bound is 'factor'
+// times the previous bucket's upper bound. The final +Inf bucket is not counted
+// and not included in the returned slice. The returned slice is meant to be
+// used for the Buckets field of HistogramOpts.
+//
+// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,
+// or if 'factor' is less than or equal 1.
+func ExponentialBuckets(start, factor float64, count int) []float64 {
+ if count < 1 {
+ panic("ExponentialBuckets needs a positive count")
+ }
+ if start <= 0 {
+ panic("ExponentialBuckets needs a positive start value")
+ }
+ if factor <= 1 {
+ panic("ExponentialBuckets needs a factor greater than 1")
+ }
+ buckets := make([]float64, count)
+ for i := range buckets {
+ buckets[i] = start
+ start *= factor
+ }
+ return buckets
+}
+
+// NewHistogram creates a new Histogram.
+func NewHistogram(buckets []float64) Histogram {
+ if len(buckets) == 0 {
+ buckets = DefBuckets
+ } else {
+ sort.Slice(buckets, func(i, j int) bool { return buckets[i] < buckets[j] })
+ }
+
+ return &histogram{
+ buckets: make([]int64, len(buckets)),
+ upperBounds: buckets,
+ count: 0,
+ sum: 0,
+ }
+}
+
+func NewHistogramWithRangeBuckets(buckets []float64) Histogram {
+ if len(buckets) == 0 {
+ buckets = DefBuckets
+ } else {
+ sort.Slice(buckets, func(i, j int) bool { return buckets[i] < buckets[j] })
+ }
+
+ return &histogram{
+ buckets: make([]int64, len(buckets)),
+ upperBounds: buckets,
+ count: 0,
+ sum: 0,
+ rangeBuckets: true,
+ }
+}
+
+// WriteTo writes its values into given map.
+// It adds those key-value pairs:
+//
+// ${key}_sum gauge, for sum of it's observed values
+// ${key}_count counter, for count of it's observed values (equals to +Inf bucket)
+// ${key}_bucket_1 counter, for 1st bucket count
+// ${key}_bucket_2 counter, for 2nd bucket count
+// ...
+// ${key}_bucket_N counter, for Nth bucket count
+func (h histogram) WriteTo(rv map[string]int64, key string, mul, div int) {
+ rv[key+"_sum"] = int64(h.sum * float64(mul) / float64(div))
+ rv[key+"_count"] = h.count
+ var conn int64
+ for i, bucket := range h.buckets {
+ name := fmt.Sprintf("%s_bucket_%d", key, i+1)
+ conn += bucket
+ if h.rangeBuckets {
+ rv[name] = bucket
+ } else {
+ rv[name] = conn
+ }
+ }
+ if h.rangeBuckets {
+ name := fmt.Sprintf("%s_bucket_inf", key)
+ rv[name] = h.count - conn
+ }
+}
+
+// Observe observes a value
+func (h *histogram) Observe(v float64) {
+ hotIdx := h.searchBucketIndex(v)
+ if hotIdx < len(h.buckets) {
+ h.buckets[hotIdx]++
+ }
+ h.sum += v
+ h.count++
+}
+
+func (h *histogram) searchBucketIndex(v float64) int {
+ if len(h.upperBounds) < 30 {
+ for i, upper := range h.upperBounds {
+ if upper >= v {
+ return i
+ }
+ }
+ return len(h.upperBounds)
+ }
+ return sort.SearchFloat64s(h.upperBounds, v)
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/histogram_test.go b/src/go/plugin/go.d/pkg/metrics/histogram_test.go
new file mode 100644
index 000000000..91266915c
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/histogram_test.go
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestLinearBuckets(t *testing.T) {
+ buckets := LinearBuckets(0, 1, 10)
+ assert.Len(t, buckets, 10)
+ assert.EqualValues(t, 0, buckets[0])
+ assert.EqualValues(t, 5.0, buckets[5])
+ assert.EqualValues(t, 9.0, buckets[9])
+
+ assert.Panics(t, func() {
+ LinearBuckets(0, 1, 0)
+ })
+}
+
+func TestExponentialBuckets(t *testing.T) {
+ buckets := ExponentialBuckets(1, 2, 10)
+ assert.Len(t, buckets, 10)
+ assert.EqualValues(t, 1, buckets[0])
+ assert.EqualValues(t, 32.0, buckets[5])
+ assert.EqualValues(t, 512.0, buckets[9])
+
+ assert.Panics(t, func() {
+ ExponentialBuckets(1, 2, 0)
+ })
+ assert.Panics(t, func() {
+ ExponentialBuckets(0, 2, 2)
+ })
+
+ assert.Panics(t, func() {
+ ExponentialBuckets(1, 1, 2)
+ })
+}
+
+func TestNewHistogram(t *testing.T) {
+ h := NewHistogram(nil).(*histogram)
+ assert.EqualValues(t, 0, h.count)
+ assert.EqualValues(t, 0.0, h.sum)
+ assert.Equal(t, DefBuckets, h.upperBounds)
+
+ h = NewHistogram([]float64{1, 10, 5}).(*histogram)
+ assert.Equal(t, []float64{1, 5, 10}, h.upperBounds)
+ assert.Len(t, h.buckets, 3)
+}
+
+func TestHistogram_WriteTo(t *testing.T) {
+ h := NewHistogram([]float64{1, 2, 3})
+ m := map[string]int64{}
+ h.WriteTo(m, "pi", 100, 1)
+ assert.Len(t, m, 5)
+ assert.EqualValues(t, 0, m["pi_count"])
+ assert.EqualValues(t, 0, m["pi_sum"])
+ assert.EqualValues(t, 0, m["pi_bucket_1"])
+ assert.EqualValues(t, 0, m["pi_bucket_2"])
+ assert.EqualValues(t, 0, m["pi_bucket_3"])
+
+ h.Observe(0)
+ h.Observe(1.5)
+ h.Observe(3.5)
+ h.WriteTo(m, "pi", 100, 1)
+ assert.Len(t, m, 5)
+ assert.EqualValues(t, 3, m["pi_count"])
+ assert.EqualValues(t, 500, m["pi_sum"])
+ assert.EqualValues(t, 1, m["pi_bucket_1"])
+ assert.EqualValues(t, 2, m["pi_bucket_2"])
+ assert.EqualValues(t, 2, m["pi_bucket_3"])
+}
+
+func TestHistogram_searchBucketIndex(t *testing.T) {
+ h := NewHistogram(LinearBuckets(1, 1, 5)).(*histogram) // [1, 2, ..., 5]
+ assert.Equal(t, 0, h.searchBucketIndex(0.1))
+ assert.Equal(t, 1, h.searchBucketIndex(1.1))
+ assert.Equal(t, 5, h.searchBucketIndex(8.1))
+
+ h = NewHistogram(LinearBuckets(1, 1, 40)).(*histogram) // [1, 2, ..., 5]
+ assert.Equal(t, 0, h.searchBucketIndex(0.1))
+ assert.Equal(t, 1, h.searchBucketIndex(1.1))
+ assert.Equal(t, 5, h.searchBucketIndex(5.1))
+ assert.Equal(t, 7, h.searchBucketIndex(8))
+ assert.Equal(t, 39, h.searchBucketIndex(39.5))
+ assert.Equal(t, 40, h.searchBucketIndex(40.5))
+}
+
+func BenchmarkHistogram_Observe(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ buckets []float64
+ }{
+ {"default", nil},
+ {"len_10", LinearBuckets(0, 0.1, 10)},
+ {"len_20", LinearBuckets(0, 0.1, 20)},
+ {"len_30", LinearBuckets(0, 0.1, 30)},
+ {"len_40", LinearBuckets(0, 0.1, 40)},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ h := NewHistogram(bm.buckets)
+ for i := 0; i < b.N; i++ {
+ h.Observe(2.5)
+ }
+ })
+ }
+}
+
+func BenchmarkHistogram_WriteTo(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ buckets []float64
+ }{
+ {"default", nil},
+ {"len_10", LinearBuckets(0, 0.1, 10)},
+ {"len_20", LinearBuckets(0, 0.1, 20)},
+ {"len_30", LinearBuckets(0, 0.1, 30)},
+ {"len_40", LinearBuckets(0, 0.1, 40)},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ h := NewHistogram(bm.buckets)
+ h.Observe(0.1)
+ h.Observe(0.01)
+ h.Observe(0.5)
+ h.Observe(10)
+ m := map[string]int64{}
+ for i := 0; i < b.N; i++ {
+ h.WriteTo(m, "pi", 100, 1)
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/metrics.go b/src/go/plugin/go.d/pkg/metrics/metrics.go
new file mode 100644
index 000000000..9f6b7529b
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/metrics.go
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+
+// Observer is an interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ stm.Value
+ Observe(v float64)
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/summary.go b/src/go/plugin/go.d/pkg/metrics/summary.go
new file mode 100644
index 000000000..d72d968e6
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/summary.go
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "math"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+type (
+ // A Summary captures individual observations from an event or sample stream and
+ // summarizes them in a manner similar to traditional summary statistics:
+ // sum of observations
+ // observation count
+ // observation average.
+ //
+ // To create summary instances, use NewSummary.
+ Summary interface {
+ Observer
+ Reset()
+ }
+
+ // SummaryVec is a Collector that bundles a set of Summary which have different values for their names.
+ // This is used if you want to count the same thing partitioned by various dimensions
+ // (e.g. number of HTTP response time, partitioned by response code and method).
+ //
+ // Create instances with NewSummaryVec.
+ SummaryVec map[string]Summary
+
+ summary struct {
+ min float64
+ max float64
+ sum float64
+ count int64
+ }
+)
+
+var (
+ _ stm.Value = summary{}
+ _ stm.Value = SummaryVec{}
+)
+
+// NewSummary creates a new Summary.
+func NewSummary() Summary {
+ return &summary{
+ min: math.MaxFloat64,
+ max: -math.MaxFloat64,
+ }
+}
+
+// WriteTo writes its values into given map.
+// It adds those key-value pairs:
+//
+// ${key}_sum gauge, for sum of it's observed values from last Reset calls
+// ${key}_count counter, for count of it's observed values from last Reset calls
+// ${key}_min gauge, for min of it's observed values from last Reset calls (only exists if count > 0)
+// ${key}_max gauge, for max of it's observed values from last Reset calls (only exists if count > 0)
+// ${key}_avg gauge, for avg of it's observed values from last Reset calls (only exists if count > 0)
+func (s summary) WriteTo(rv map[string]int64, key string, mul, div int) {
+ if s.count > 0 {
+ rv[key+"_min"] = int64(s.min * float64(mul) / float64(div))
+ rv[key+"_max"] = int64(s.max * float64(mul) / float64(div))
+ rv[key+"_sum"] = int64(s.sum * float64(mul) / float64(div))
+ rv[key+"_count"] = s.count
+ rv[key+"_avg"] = int64(s.sum / float64(s.count) * float64(mul) / float64(div))
+ } else {
+ rv[key+"_count"] = 0
+ rv[key+"_sum"] = 0
+ delete(rv, key+"_min")
+ delete(rv, key+"_max")
+ delete(rv, key+"_avg")
+ }
+}
+
+// Reset resets all of its counters.
+// Call it before every scrape loop.
+func (s *summary) Reset() {
+ s.min = math.MaxFloat64
+ s.max = -math.MaxFloat64
+ s.sum = 0
+ s.count = 0
+}
+
+// Observe observes a value
+func (s *summary) Observe(v float64) {
+ if v > s.max {
+ s.max = v
+ }
+ if v < s.min {
+ s.min = v
+ }
+ s.sum += v
+ s.count++
+}
+
+// NewSummaryVec creates a new SummaryVec instance.
+func NewSummaryVec() SummaryVec {
+ return SummaryVec{}
+}
+
+// WriteTo writes its value into given map.
+func (c SummaryVec) WriteTo(rv map[string]int64, key string, mul, div int) {
+ for name, value := range c {
+ value.WriteTo(rv, key+"_"+name, mul, div)
+ }
+}
+
+// Get gets counter instance by name.
+func (c SummaryVec) Get(name string) Summary {
+ item, ok := c[name]
+ if ok {
+ return item
+ }
+ item = NewSummary()
+ c[name] = item
+ return item
+}
+
+// Reset resets its all summaries.
+func (c SummaryVec) Reset() {
+ for _, value := range c {
+ value.Reset()
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/summary_test.go b/src/go/plugin/go.d/pkg/metrics/summary_test.go
new file mode 100644
index 000000000..b98218369
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/summary_test.go
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewSummary(t *testing.T) {
+ s := NewSummary().(*summary)
+ assert.EqualValues(t, 0, s.count)
+ assert.Equal(t, 0.0, s.sum)
+ s.Observe(3.14)
+ assert.Equal(t, 3.14, s.min)
+ assert.Equal(t, 3.14, s.max)
+}
+
+func TestSummary_WriteTo(t *testing.T) {
+ s := NewSummary()
+
+ m1 := map[string]int64{}
+ s.WriteTo(m1, "pi", 100, 1)
+ assert.Len(t, m1, 2)
+ assert.Contains(t, m1, "pi_count")
+ assert.Contains(t, m1, "pi_sum")
+ assert.EqualValues(t, 0, m1["pi_count"])
+ assert.EqualValues(t, 0, m1["pi_sum"])
+
+ s.Observe(3.14)
+ s.Observe(2.71)
+ s.Observe(-10)
+
+ m2 := map[string]int64{}
+ s.WriteTo(m1, "pi", 100, 1)
+ s.WriteTo(m2, "pi", 100, 1)
+ assert.Equal(t, m1, m2)
+ assert.Len(t, m1, 5)
+ assert.EqualValues(t, 3, m1["pi_count"])
+ assert.EqualValues(t, -415, m1["pi_sum"])
+ assert.EqualValues(t, -1000, m1["pi_min"])
+ assert.EqualValues(t, 314, m1["pi_max"])
+ assert.EqualValues(t, -138, m1["pi_avg"])
+
+ s.Reset()
+ s.WriteTo(m1, "pi", 100, 1)
+ assert.Len(t, m1, 2)
+ assert.Contains(t, m1, "pi_count")
+ assert.Contains(t, m1, "pi_sum")
+ assert.EqualValues(t, 0, m1["pi_count"])
+ assert.EqualValues(t, 0, m1["pi_sum"])
+}
+
+func TestSummary_Reset(t *testing.T) {
+ s := NewSummary().(*summary)
+ s.Observe(1)
+ s.Reset()
+ assert.EqualValues(t, 0, s.count)
+}
+
+func BenchmarkSummary_Observe(b *testing.B) {
+ s := NewSummary()
+ for i := 0; i < b.N; i++ {
+ s.Observe(2.5)
+ }
+}
+
+func BenchmarkSummary_WriteTo(b *testing.B) {
+ s := NewSummary()
+ s.Observe(2.5)
+ s.Observe(3.5)
+ s.Observe(4.5)
+ m := map[string]int64{}
+ for i := 0; i < b.N; i++ {
+ s.WriteTo(m, "pi", 100, 1)
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/unique_counter.go b/src/go/plugin/go.d/pkg/metrics/unique_counter.go
new file mode 100644
index 000000000..da80fd3d0
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/unique_counter.go
@@ -0,0 +1,109 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "github.com/axiomhq/hyperloglog"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+)
+
+type (
+ UniqueCounter interface {
+ stm.Value
+ Insert(s string)
+ Value() int
+ Reset()
+ }
+
+ mapUniqueCounter struct {
+ m map[string]bool
+ }
+
+ hyperLogLogUniqueCounter struct {
+ sketch *hyperloglog.Sketch
+ }
+
+ UniqueCounterVec struct {
+ useHyperLogLog bool
+ Items map[string]UniqueCounter
+ }
+)
+
+var (
+ _ stm.Value = mapUniqueCounter{}
+ _ stm.Value = hyperLogLogUniqueCounter{}
+ _ stm.Value = UniqueCounterVec{}
+)
+
+func NewUniqueCounter(useHyperLogLog bool) UniqueCounter {
+ if useHyperLogLog {
+ return &hyperLogLogUniqueCounter{hyperloglog.New()}
+ }
+ return mapUniqueCounter{map[string]bool{}}
+}
+
+func (c mapUniqueCounter) WriteTo(rv map[string]int64, key string, mul, div int) {
+ rv[key] = int64(float64(c.Value()*mul) / float64(div))
+}
+
+func (c mapUniqueCounter) Insert(s string) {
+ c.m[s] = true
+}
+
+func (c mapUniqueCounter) Value() int {
+ return len(c.m)
+}
+
+func (c mapUniqueCounter) Reset() {
+ for key := range c.m {
+ delete(c.m, key)
+ }
+}
+
+// WriteTo writes its value into given map.
+func (c hyperLogLogUniqueCounter) WriteTo(rv map[string]int64, key string, mul, div int) {
+ rv[key] = int64(float64(c.Value()*mul) / float64(div))
+}
+
+func (c *hyperLogLogUniqueCounter) Insert(s string) {
+ c.sketch.Insert([]byte(s))
+}
+
+func (c *hyperLogLogUniqueCounter) Value() int {
+ return int(c.sketch.Estimate())
+}
+
+func (c *hyperLogLogUniqueCounter) Reset() {
+ c.sketch = hyperloglog.New()
+}
+
+func NewUniqueCounterVec(useHyperLogLog bool) UniqueCounterVec {
+ return UniqueCounterVec{
+ Items: map[string]UniqueCounter{},
+ useHyperLogLog: useHyperLogLog,
+ }
+}
+
+// WriteTo writes its value into given map.
+func (c UniqueCounterVec) WriteTo(rv map[string]int64, key string, mul, div int) {
+ for name, value := range c.Items {
+ value.WriteTo(rv, key+"_"+name, mul, div)
+ }
+}
+
+// Get gets UniqueCounter instance by name
+func (c UniqueCounterVec) Get(name string) UniqueCounter {
+ item, ok := c.Items[name]
+ if ok {
+ return item
+ }
+ item = NewUniqueCounter(c.useHyperLogLog)
+ c.Items[name] = item
+ return item
+}
+
+func (c UniqueCounterVec) Reset() {
+ for _, value := range c.Items {
+ value.Reset()
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/metrics/unique_counter_test.go b/src/go/plugin/go.d/pkg/metrics/unique_counter_test.go
new file mode 100644
index 000000000..b9439c9a3
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/metrics/unique_counter_test.go
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package metrics
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestHyperLogLogUniqueCounter_Value(t *testing.T) {
+ for _, useHLL := range []bool{true, false} {
+ t.Run(fmt.Sprintf("HLL=%v", useHLL), func(t *testing.T) {
+ c := NewUniqueCounter(useHLL)
+ assert.Equal(t, 0, c.Value())
+
+ c.Insert("foo")
+ assert.Equal(t, 1, c.Value())
+
+ c.Insert("foo")
+ assert.Equal(t, 1, c.Value())
+
+ c.Insert("bar")
+ assert.Equal(t, 2, c.Value())
+
+ c.Insert("baz")
+ assert.Equal(t, 3, c.Value())
+
+ c.Reset()
+ assert.Equal(t, 0, c.Value())
+
+ c.Insert("foo")
+ assert.Equal(t, 1, c.Value())
+ })
+ }
+}
+
+func TestHyperLogLogUniqueCounter_WriteTo(t *testing.T) {
+ for _, useHLL := range []bool{true, false} {
+ t.Run(fmt.Sprintf("HLL=%v", useHLL), func(t *testing.T) {
+ c := NewUniqueCounterVec(useHLL)
+ c.Get("a").Insert("foo")
+ c.Get("a").Insert("bar")
+ c.Get("b").Insert("foo")
+
+ m := map[string]int64{}
+ c.WriteTo(m, "pi", 100, 1)
+ assert.Len(t, m, 2)
+ assert.EqualValues(t, 200, m["pi_a"])
+ assert.EqualValues(t, 100, m["pi_b"])
+ })
+ }
+}
+
+func TestUniqueCounterVec_Reset(t *testing.T) {
+ for _, useHLL := range []bool{true, false} {
+ t.Run(fmt.Sprintf("HLL=%v", useHLL), func(t *testing.T) {
+ c := NewUniqueCounterVec(useHLL)
+ c.Get("a").Insert("foo")
+ c.Get("a").Insert("bar")
+ c.Get("b").Insert("foo")
+
+ assert.Equal(t, 2, len(c.Items))
+ assert.Equal(t, 2, c.Get("a").Value())
+ assert.Equal(t, 1, c.Get("b").Value())
+
+ c.Reset()
+ assert.Equal(t, 2, len(c.Items))
+ assert.Equal(t, 0, c.Get("a").Value())
+ assert.Equal(t, 0, c.Get("b").Value())
+ })
+ }
+}
+
+func BenchmarkUniqueCounter_Insert(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ same bool
+ hyperloglog bool
+ nop bool
+ }{
+
+ {"map-same", true, false, false},
+ {"hll-same", true, true, false},
+
+ {"nop", false, false, true},
+ {"map-diff", false, false, false},
+ {"hll-diff", false, true, false},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ c := NewUniqueCounter(bm.hyperloglog)
+ if bm.same {
+ for i := 0; i < b.N; i++ {
+ c.Insert("foo")
+ }
+ } else if bm.nop {
+ for i := 0; i < b.N; i++ {
+ strconv.Itoa(i)
+ }
+ } else {
+ for i := 0; i < b.N; i++ {
+ c.Insert(strconv.Itoa(i))
+ }
+ }
+ })
+ }
+}
+
+func BenchmarkUniqueCounterVec_Insert(b *testing.B) {
+ benchmarks := []struct {
+ name string
+ same bool
+ hyperloglog bool
+ nop bool
+ }{
+
+ {"map-same", true, false, false},
+ {"hll-same", true, true, false},
+
+ {"nop", false, false, true},
+ {"map-diff", false, false, false},
+ {"hll-diff", false, true, false},
+ }
+ for _, bm := range benchmarks {
+ b.Run(bm.name, func(b *testing.B) {
+ c := NewUniqueCounterVec(bm.hyperloglog)
+ if bm.same {
+ for i := 0; i < b.N; i++ {
+ c.Get("a").Insert("foo")
+ }
+ } else if bm.nop {
+ for i := 0; i < b.N; i++ {
+ strconv.Itoa(i)
+ }
+ } else {
+ for i := 0; i < b.N; i++ {
+ c.Get("a").Insert(strconv.Itoa(i))
+ }
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/multipath/multipath.go b/src/go/plugin/go.d/pkg/multipath/multipath.go
new file mode 100644
index 000000000..6172def06
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/multipath/multipath.go
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package multipath
+
+import (
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "slices"
+ "strings"
+
+ "github.com/mitchellh/go-homedir"
+)
+
+type ErrNotFound struct{ msg string }
+
+func (e ErrNotFound) Error() string { return e.msg }
+
+// IsNotFound returns a boolean indicating whether the error is ErrNotFound or not.
+func IsNotFound(err error) bool {
+ var errNotFound ErrNotFound
+ return errors.As(err, &errNotFound)
+}
+
+// MultiPath multi-paths
+type MultiPath []string
+
+// New multi-paths
+func New(paths ...string) MultiPath {
+ set := map[string]bool{}
+ mPath := make(MultiPath, 0)
+
+ for _, dir := range paths {
+ if dir == "" {
+ continue
+ }
+ if d, err := homedir.Expand(dir); err != nil {
+ dir = d
+ }
+ if !set[dir] {
+ mPath = append(mPath, dir)
+ set[dir] = true
+ }
+ }
+
+ return mPath
+}
+
+// Find finds a file in given paths
+func (p MultiPath) Find(filename string) (string, error) {
+ for _, dir := range p {
+ file := filepath.Join(dir, filename)
+ if _, err := os.Stat(file); !os.IsNotExist(err) {
+ return file, nil
+ }
+ }
+ return "", ErrNotFound{msg: fmt.Sprintf("can't find '%s' in %v", filename, p)}
+}
+
+func (p MultiPath) FindFiles(suffixes ...string) ([]string, error) {
+ set := make(map[string]bool)
+ var files []string
+
+ for _, dir := range p {
+ entries, err := os.ReadDir(dir)
+ if err != nil {
+ continue
+ }
+
+ for _, e := range entries {
+ if !e.Type().IsRegular() {
+ continue
+ }
+
+ ext := filepath.Ext(e.Name())
+ name := strings.TrimSuffix(e.Name(), ext)
+
+ if (len(suffixes) != 0 && !slices.Contains(suffixes, ext)) || set[name] {
+ continue
+ }
+
+ set[name] = true
+ file := filepath.Join(dir, e.Name())
+ files = append(files, file)
+ }
+ }
+
+ return files, nil
+}
diff --git a/src/go/plugin/go.d/pkg/multipath/multipath_test.go b/src/go/plugin/go.d/pkg/multipath/multipath_test.go
new file mode 100644
index 000000000..cd6c90d95
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/multipath/multipath_test.go
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package multipath
+
+import (
+ "errors"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNew(t *testing.T) {
+ assert.Len(
+ t,
+ New("path1", "path2", "path2", "", "path3"),
+ 3,
+ )
+}
+
+func TestMultiPath_Find(t *testing.T) {
+ m := New("path1", "testdata/data1")
+
+ v, err := m.Find("not exist")
+ assert.Zero(t, v)
+ assert.Error(t, err)
+
+ v, err = m.Find("test-empty.conf")
+ assert.Equal(t, "testdata/data1/test-empty.conf", v)
+ assert.Nil(t, err)
+
+ v, err = m.Find("test.conf")
+ assert.Equal(t, "testdata/data1/test.conf", v)
+ assert.Nil(t, err)
+}
+
+func TestIsNotFound(t *testing.T) {
+ assert.True(t, IsNotFound(ErrNotFound{}))
+ assert.False(t, IsNotFound(errors.New("")))
+}
+
+func TestMultiPath_FindFiles(t *testing.T) {
+ m := New("path1", "testdata/data2", "testdata/data1")
+
+ files, err := m.FindFiles(".conf")
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"testdata/data2/test-empty.conf", "testdata/data2/test.conf"}, files)
+
+ files, err = m.FindFiles()
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"testdata/data2/test-empty.conf", "testdata/data2/test.conf"}, files)
+
+ files, err = m.FindFiles(".not_exist")
+ assert.NoError(t, err)
+ assert.Equal(t, []string(nil), files)
+
+ m = New("path1", "testdata/data1", "testdata/data2")
+ files, err = m.FindFiles(".conf")
+ assert.NoError(t, err)
+ assert.Equal(t, []string{"testdata/data1/test-empty.conf", "testdata/data1/test.conf"}, files)
+}
diff --git a/src/go/plugin/go.d/pkg/multipath/testdata/data1/test-empty.conf b/src/go/plugin/go.d/pkg/multipath/testdata/data1/test-empty.conf
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/multipath/testdata/data1/test-empty.conf
diff --git a/src/go/plugin/go.d/pkg/multipath/testdata/data1/test.conf b/src/go/plugin/go.d/pkg/multipath/testdata/data1/test.conf
new file mode 100644
index 000000000..aebe64730
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/multipath/testdata/data1/test.conf
@@ -0,0 +1 @@
+not empty! \ No newline at end of file
diff --git a/src/go/plugin/go.d/pkg/multipath/testdata/data2/test-empty.conf b/src/go/plugin/go.d/pkg/multipath/testdata/data2/test-empty.conf
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/multipath/testdata/data2/test-empty.conf
diff --git a/src/go/plugin/go.d/pkg/multipath/testdata/data2/test.conf b/src/go/plugin/go.d/pkg/multipath/testdata/data2/test.conf
new file mode 100644
index 000000000..aebe64730
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/multipath/testdata/data2/test.conf
@@ -0,0 +1 @@
+not empty! \ No newline at end of file
diff --git a/src/go/plugin/go.d/pkg/prometheus/client.go b/src/go/plugin/go.d/pkg/prometheus/client.go
new file mode 100644
index 000000000..19d6bcfbc
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/client.go
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus/selector"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+)
+
+type (
+ // Prometheus is a helper for scrape and parse prometheus format metrics.
+ Prometheus interface {
+ // ScrapeSeries and parse prometheus format metrics
+ ScrapeSeries() (Series, error)
+ Scrape() (MetricFamilies, error)
+ HTTPClient() *http.Client
+ }
+
+ prometheus struct {
+ client *http.Client
+ request web.Request
+ filepath string
+
+ sr selector.Selector
+
+ parser promTextParser
+
+ buf *bytes.Buffer
+ gzipr *gzip.Reader
+ bodyBuf *bufio.Reader
+ }
+)
+
+const (
+ acceptHeader = `text/plain;version=0.0.4;q=1,*/*;q=0.1`
+)
+
+// New creates a Prometheus instance.
+func New(client *http.Client, request web.Request) Prometheus {
+ return &prometheus{
+ client: client,
+ request: request,
+ buf: bytes.NewBuffer(make([]byte, 0, 16000)),
+ }
+}
+
+// NewWithSelector creates a Prometheus instance with the selector.
+func NewWithSelector(client *http.Client, request web.Request, sr selector.Selector) Prometheus {
+ p := &prometheus{
+ client: client,
+ request: request,
+ sr: sr,
+ buf: bytes.NewBuffer(make([]byte, 0, 16000)),
+ parser: promTextParser{sr: sr},
+ }
+
+ if v, err := url.Parse(request.URL); err == nil && v.Scheme == "file" {
+ p.filepath = filepath.Join(v.Host, v.Path)
+ }
+
+ return p
+}
+
+func (p *prometheus) HTTPClient() *http.Client {
+ return p.client
+}
+
+// ScrapeSeries scrapes metrics, parses and sorts
+func (p *prometheus) ScrapeSeries() (Series, error) {
+ p.buf.Reset()
+
+ if err := p.fetch(p.buf); err != nil {
+ return nil, err
+ }
+
+ return p.parser.parseToSeries(p.buf.Bytes())
+}
+
+func (p *prometheus) Scrape() (MetricFamilies, error) {
+ p.buf.Reset()
+
+ if err := p.fetch(p.buf); err != nil {
+ return nil, err
+ }
+
+ return p.parser.parseToMetricFamilies(p.buf.Bytes())
+}
+
+func (p *prometheus) fetch(w io.Writer) error {
+ // TODO: should be a separate text file prom client
+ if p.filepath != "" {
+ f, err := os.Open(p.filepath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ _, err = io.Copy(w, f)
+
+ return err
+ }
+
+ req, err := web.NewHTTPRequest(p.request)
+ if err != nil {
+ return err
+ }
+
+ req.Header.Add("Accept", acceptHeader)
+ req.Header.Add("Accept-Encoding", "gzip")
+
+ resp, err := p.client.Do(req)
+ if err != nil {
+ return err
+ }
+
+ defer func() {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }()
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("server '%s' returned HTTP status code %d (%s)", req.URL, resp.StatusCode, resp.Status)
+ }
+
+ if resp.Header.Get("Content-Encoding") != "gzip" {
+ _, err = io.Copy(w, resp.Body)
+ return err
+ }
+
+ if p.gzipr == nil {
+ p.bodyBuf = bufio.NewReader(resp.Body)
+ p.gzipr, err = gzip.NewReader(p.bodyBuf)
+ if err != nil {
+ return err
+ }
+ } else {
+ p.bodyBuf.Reset(resp.Body)
+ _ = p.gzipr.Reset(p.bodyBuf)
+ }
+
+ _, err = io.Copy(w, p.gzipr)
+ _ = p.gzipr.Close()
+
+ return err
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/client_test.go b/src/go/plugin/go.d/pkg/prometheus/client_test.go
new file mode 100644
index 000000000..e6f61b9af
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/client_test.go
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "bytes"
+ "compress/gzip"
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus/selector"
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ testData, _ = os.ReadFile("testdata/testdata.txt")
+ testDataNoMeta, _ = os.ReadFile("testdata/testdata.nometa.txt")
+)
+
+func Test_testClientDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "testData": testData,
+ } {
+ require.NotNilf(t, data, name)
+ }
+}
+
+func TestPrometheus404(t *testing.T) {
+ tsMux := http.NewServeMux()
+ tsMux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(404)
+ })
+ ts := httptest.NewServer(tsMux)
+ defer ts.Close()
+
+ req := web.Request{URL: ts.URL + "/metrics"}
+ prom := New(http.DefaultClient, req)
+ res, err := prom.ScrapeSeries()
+
+ assert.Error(t, err)
+ assert.Nil(t, res)
+}
+
+func TestPrometheusPlain(t *testing.T) {
+ tsMux := http.NewServeMux()
+ tsMux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(testData)
+ })
+ ts := httptest.NewServer(tsMux)
+ defer ts.Close()
+
+ req := web.Request{URL: ts.URL + "/metrics"}
+ prom := New(http.DefaultClient, req)
+ res, err := prom.ScrapeSeries()
+
+ assert.NoError(t, err)
+ verifyTestData(t, res)
+}
+
+func TestPrometheusPlainWithSelector(t *testing.T) {
+ tsMux := http.NewServeMux()
+ tsMux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write(testData)
+ })
+ ts := httptest.NewServer(tsMux)
+ defer ts.Close()
+
+ req := web.Request{URL: ts.URL + "/metrics"}
+ sr, err := selector.Parse("go_gc*")
+ require.NoError(t, err)
+ prom := NewWithSelector(http.DefaultClient, req, sr)
+
+ res, err := prom.ScrapeSeries()
+ require.NoError(t, err)
+
+ for _, v := range res {
+ assert.Truef(t, strings.HasPrefix(v.Name(), "go_gc"), v.Name())
+ }
+}
+
+func TestPrometheusGzip(t *testing.T) {
+ counter := 0
+ rawTestData := [][]byte{testData, testDataNoMeta}
+ tsMux := http.NewServeMux()
+ tsMux.HandleFunc("/metrics", func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Encoding", "gzip")
+ w.WriteHeader(200)
+ gz := new(bytes.Buffer)
+ ww := gzip.NewWriter(gz)
+ _, _ = ww.Write(rawTestData[counter])
+ _ = ww.Close()
+ _, _ = gz.WriteTo(w)
+ counter++
+ })
+ ts := httptest.NewServer(tsMux)
+ defer ts.Close()
+
+ req := web.Request{URL: ts.URL + "/metrics"}
+ prom := New(http.DefaultClient, req)
+
+ for i := 0; i < 2; i++ {
+ res, err := prom.ScrapeSeries()
+ assert.NoError(t, err)
+ verifyTestData(t, res)
+ }
+}
+
+func TestPrometheusReadFromFile(t *testing.T) {
+ req := web.Request{URL: "file://testdata/testdata.txt"}
+ prom := NewWithSelector(http.DefaultClient, req, nil)
+
+ for i := 0; i < 2; i++ {
+ res, err := prom.ScrapeSeries()
+ assert.NoError(t, err)
+ verifyTestData(t, res)
+ }
+}
+
+func verifyTestData(t *testing.T, ms Series) {
+ assert.Equal(t, 410, len(ms))
+ assert.Equal(t, "go_gc_duration_seconds", ms[0].Labels.Get("__name__"))
+ assert.Equal(t, "0.25", ms[0].Labels.Get("quantile"))
+ assert.InDelta(t, 4.9351e-05, ms[0].Value, 0.0001)
+
+ notExistYet := ms.FindByName("not_exist_yet")
+ assert.NotNil(t, notExistYet)
+ assert.Len(t, notExistYet, 0)
+
+ targetInterval := ms.FindByName("prometheus_target_interval_length_seconds")
+ assert.Len(t, targetInterval, 5)
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/metric_family.go b/src/go/plugin/go.d/pkg/prometheus/metric_family.go
new file mode 100644
index 000000000..dde08801e
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/metric_family.go
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+type (
+ MetricFamilies map[string]*MetricFamily
+
+ MetricFamily struct {
+ name string
+ help string
+ typ model.MetricType
+ metrics []Metric
+ }
+ Metric struct {
+ labels []labels.Label
+ gauge *Gauge
+ counter *Counter
+ summary *Summary
+ histogram *Histogram
+ untyped *Untyped
+ }
+ Gauge struct {
+ value float64
+ }
+ Counter struct {
+ value float64
+ }
+ Summary struct {
+ sum float64
+ count float64
+ quantiles []Quantile
+ }
+ Quantile struct {
+ quantile float64
+ value float64
+ }
+ Histogram struct {
+ sum float64
+ count float64
+ buckets []Bucket
+ }
+ Bucket struct {
+ upperBound float64
+ cumulativeCount float64
+ }
+ Untyped struct {
+ value float64
+ }
+)
+
+func (mfs MetricFamilies) Len() int {
+ return len(mfs)
+}
+
+func (mfs MetricFamilies) Get(name string) *MetricFamily {
+ return (mfs)[name]
+}
+
+func (mfs MetricFamilies) GetGauge(name string) *MetricFamily {
+ return mfs.get(name, model.MetricTypeGauge)
+}
+
+func (mfs MetricFamilies) GetCounter(name string) *MetricFamily {
+ return mfs.get(name, model.MetricTypeCounter)
+}
+
+func (mfs MetricFamilies) GetSummary(name string) *MetricFamily {
+ return mfs.get(name, model.MetricTypeSummary)
+}
+
+func (mfs MetricFamilies) GetHistogram(name string) *MetricFamily {
+ return mfs.get(name, model.MetricTypeHistogram)
+}
+
+func (mfs MetricFamilies) get(name string, typ model.MetricType) *MetricFamily {
+ mf := mfs.Get(name)
+ if mf == nil || mf.typ != typ {
+ return nil
+ }
+ return mf
+}
+
+func (mf *MetricFamily) Name() string { return mf.name }
+func (mf *MetricFamily) Help() string { return mf.help }
+func (mf *MetricFamily) Type() model.MetricType { return mf.typ }
+func (mf *MetricFamily) Metrics() []Metric { return mf.metrics }
+
+func (m *Metric) Labels() labels.Labels { return m.labels }
+func (m *Metric) Gauge() *Gauge { return m.gauge }
+func (m *Metric) Counter() *Counter { return m.counter }
+func (m *Metric) Summary() *Summary { return m.summary }
+func (m *Metric) Histogram() *Histogram { return m.histogram }
+func (m *Metric) Untyped() *Untyped { return m.untyped }
+
+func (g Gauge) Value() float64 { return g.value }
+func (c Counter) Value() float64 { return c.value }
+func (u Untyped) Value() float64 { return u.value }
+
+func (s Summary) Count() float64 { return s.count }
+func (s Summary) Sum() float64 { return s.sum }
+func (s Summary) Quantiles() []Quantile { return s.quantiles }
+
+func (q Quantile) Quantile() float64 { return q.quantile }
+func (q Quantile) Value() float64 { return q.value }
+
+func (h Histogram) Count() float64 { return h.count }
+func (h Histogram) Sum() float64 { return h.sum }
+func (h Histogram) Buckets() []Bucket { return h.buckets }
+
+func (b Bucket) UpperBound() float64 { return b.upperBound }
+func (b Bucket) CumulativeCount() float64 { return b.cumulativeCount }
diff --git a/src/go/plugin/go.d/pkg/prometheus/metric_family_test.go b/src/go/plugin/go.d/pkg/prometheus/metric_family_test.go
new file mode 100644
index 000000000..f373996da
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/metric_family_test.go
@@ -0,0 +1,356 @@
+package prometheus
+
+import (
+ "testing"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestMetricFamilies_Len(t *testing.T) {
+ tests := map[string]struct {
+ mfs MetricFamilies
+ wantLen int
+ }{
+ "initialized with two elements": {
+ mfs: MetricFamilies{"1": nil, "2": nil},
+ wantLen: 2,
+ },
+ "not initialized": {
+ mfs: nil,
+ wantLen: 0,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.mfs.Len(), test.wantLen)
+ })
+ }
+}
+
+func TestMetricFamilies_Get(t *testing.T) {
+ const n = "metric"
+
+ tests := map[string]struct {
+ mfs MetricFamilies
+ wantMF *MetricFamily
+ }{
+ "etric is found": {
+ mfs: MetricFamilies{n: &MetricFamily{name: n}},
+ wantMF: &MetricFamily{name: n},
+ },
+ "metric is not found": {
+ mfs: MetricFamilies{"!" + n: &MetricFamily{name: n}},
+ wantMF: nil,
+ },
+ "not initialized": {
+ mfs: nil,
+ wantMF: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.mfs.Get(n), test.wantMF)
+ })
+ }
+}
+
+func TestMetricFamilies_GetGauge(t *testing.T) {
+ const n = "metric"
+
+ tests := map[string]struct {
+ mfs MetricFamilies
+ wantMF *MetricFamily
+ }{
+ "metric is found and is Gauge": {
+ mfs: MetricFamilies{n: &MetricFamily{name: n, typ: model.MetricTypeGauge}},
+ wantMF: &MetricFamily{name: n, typ: model.MetricTypeGauge},
+ },
+ "metric is found but it is not Gauge": {
+ mfs: MetricFamilies{n: &MetricFamily{name: n, typ: model.MetricTypeUnknown}},
+ wantMF: nil,
+ },
+ "metric is not found": {
+ mfs: MetricFamilies{"!" + n: &MetricFamily{name: n, typ: model.MetricTypeGauge}},
+ wantMF: nil,
+ },
+ "not initialized": {
+ mfs: nil,
+ wantMF: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.mfs.GetGauge(n), test.wantMF)
+ })
+ }
+}
+
+func TestMetricFamilies_GetCounter(t *testing.T) {
+ const n = "metric"
+
+ tests := map[string]struct {
+ mfs MetricFamilies
+ wantMF *MetricFamily
+ }{
+ "metric is found and is Counter": {
+ mfs: MetricFamilies{n: &MetricFamily{name: n, typ: model.MetricTypeCounter}},
+ wantMF: &MetricFamily{name: n, typ: model.MetricTypeCounter},
+ },
+ "metric is found but it is not Counter": {
+ mfs: MetricFamilies{n: &MetricFamily{name: n, typ: model.MetricTypeGauge}},
+ wantMF: nil,
+ },
+ "metric is not found": {
+ mfs: MetricFamilies{"!" + n: &MetricFamily{name: n, typ: model.MetricTypeGauge}},
+ wantMF: nil,
+ },
+ "not initialized": {
+ mfs: nil,
+ wantMF: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.mfs.GetCounter(n), test.wantMF)
+ })
+ }
+}
+
+func TestMetricFamilies_GetSummary(t *testing.T) {
+ const n = "metric"
+
+ tests := map[string]struct {
+ mfs MetricFamilies
+ wantMF *MetricFamily
+ }{
+ "metric is found and is Summary": {
+ mfs: MetricFamilies{n: &MetricFamily{name: n, typ: model.MetricTypeSummary}},
+ wantMF: &MetricFamily{name: n, typ: model.MetricTypeSummary},
+ },
+ "metric is found but it is not Summary": {
+ mfs: MetricFamilies{n: &MetricFamily{name: n, typ: model.MetricTypeGauge}},
+ wantMF: nil,
+ },
+ "metric is not found": {
+ mfs: MetricFamilies{"!" + n: &MetricFamily{name: n, typ: model.MetricTypeGauge}},
+ wantMF: nil,
+ },
+ "not initialized": {
+ mfs: nil,
+ wantMF: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.mfs.GetSummary(n), test.wantMF)
+ })
+ }
+}
+
+func TestMetricFamilies_GetHistogram(t *testing.T) {
+ const n = "metric"
+
+ tests := map[string]struct {
+ mfs MetricFamilies
+ wantMF *MetricFamily
+ }{
+ "metric is found and is Histogram": {
+ mfs: MetricFamilies{n: &MetricFamily{name: n, typ: model.MetricTypeHistogram}},
+ wantMF: &MetricFamily{name: n, typ: model.MetricTypeHistogram},
+ },
+ "metric is found but it is not Histogram": {
+ mfs: MetricFamilies{n: &MetricFamily{name: n, typ: model.MetricTypeGauge}},
+ wantMF: nil,
+ },
+ "metric is not found": {
+ mfs: MetricFamilies{"!" + n: &MetricFamily{name: n, typ: model.MetricTypeGauge}},
+ wantMF: nil,
+ },
+ "not initialized": {
+ mfs: nil,
+ wantMF: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.mfs.GetHistogram(n), test.wantMF)
+ })
+ }
+}
+
+func TestMetricFamily_Name(t *testing.T) {
+ mf := &MetricFamily{name: "name"}
+ assert.Equal(t, mf.Name(), "name")
+}
+
+func TestMetricFamily_Type(t *testing.T) {
+ mf := &MetricFamily{typ: model.MetricTypeGauge}
+ assert.Equal(t, mf.Type(), model.MetricTypeGauge)
+}
+
+func TestMetricFamily_Help(t *testing.T) {
+ mf := &MetricFamily{help: "help"}
+ assert.Equal(t, mf.Help(), "help")
+}
+
+func TestMetricFamily_Metrics(t *testing.T) {
+ metrics := []Metric{{gauge: &Gauge{value: 1}, counter: &Counter{value: 1}}}
+ mf := &MetricFamily{metrics: metrics}
+ assert.Equal(t, mf.Metrics(), metrics)
+}
+
+func TestMetric_Labels(t *testing.T) {
+ lbs := labels.Labels{{Name: "1", Value: "1"}, {Name: "2", Value: "2"}}
+ m := &Metric{labels: lbs}
+ assert.Equal(t, m.Labels(), lbs)
+}
+
+func TestMetric_Gauge(t *testing.T) {
+ tests := map[string]struct {
+ m *Metric
+ want *Gauge
+ }{
+ "gauge set": {
+ m: &Metric{gauge: &Gauge{value: 1}},
+ want: &Gauge{value: 1},
+ },
+ "gauge not set": {
+ m: &Metric{},
+ want: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.m.Gauge(), test.want)
+ })
+ }
+}
+
+func TestMetric_Counter(t *testing.T) {
+ tests := map[string]struct {
+ m *Metric
+ want *Counter
+ }{
+ "counter set": {
+ m: &Metric{counter: &Counter{value: 1}},
+ want: &Counter{value: 1},
+ },
+ "counter not set": {
+ m: &Metric{},
+ want: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.m.Counter(), test.want)
+ })
+ }
+}
+
+func TestMetric_Summary(t *testing.T) {
+ tests := map[string]struct {
+ m *Metric
+ want *Summary
+ }{
+ "summary set": {
+ m: &Metric{summary: &Summary{sum: 0.1, count: 3}},
+ want: &Summary{sum: 0.1, count: 3},
+ },
+ "summary not set": {
+ m: &Metric{},
+ want: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.m.Summary(), test.want)
+ })
+ }
+}
+
+func TestMetric_Histogram(t *testing.T) {
+ tests := map[string]struct {
+ m *Metric
+ want *Histogram
+ }{
+ "histogram set": {
+ m: &Metric{histogram: &Histogram{sum: 0.1, count: 3}},
+ want: &Histogram{sum: 0.1, count: 3},
+ },
+ "histogram not set": {
+ m: &Metric{},
+ want: nil,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.m.Histogram(), test.want)
+ })
+ }
+}
+
+func TestGauge_Value(t *testing.T) {
+ assert.Equal(t, Gauge{value: 1}.Value(), 1.0)
+}
+
+func TestCounter_Value(t *testing.T) {
+ assert.Equal(t, Counter{value: 1}.Value(), 1.0)
+}
+
+func TestSummary_Sum(t *testing.T) {
+ assert.Equal(t, Summary{sum: 1}.Sum(), 1.0)
+}
+
+func TestSummary_Count(t *testing.T) {
+ assert.Equal(t, Summary{count: 1}.Count(), 1.0)
+}
+
+func TestSummary_Quantiles(t *testing.T) {
+ assert.Equal(t,
+ Summary{quantiles: []Quantile{{quantile: 0.1, value: 1}}}.Quantiles(),
+ []Quantile{{quantile: 0.1, value: 1}},
+ )
+}
+
+func TestQuantile_Value(t *testing.T) {
+ assert.Equal(t, Quantile{value: 1}.Value(), 1.0)
+}
+
+func TestQuantile_Quantile(t *testing.T) {
+ assert.Equal(t, Quantile{quantile: 0.1}.Quantile(), 0.1)
+}
+
+func TestHistogram_Sum(t *testing.T) {
+ assert.Equal(t, Histogram{sum: 1}.Sum(), 1.0)
+}
+
+func TestHistogram_Count(t *testing.T) {
+ assert.Equal(t, Histogram{count: 1}.Count(), 1.0)
+}
+
+func TestHistogram_Buckets(t *testing.T) {
+ assert.Equal(t,
+ Histogram{buckets: []Bucket{{upperBound: 0.1, cumulativeCount: 1}}}.Buckets(),
+ []Bucket{{upperBound: 0.1, cumulativeCount: 1}},
+ )
+}
+
+func TestBucket_UpperBound(t *testing.T) {
+ assert.Equal(t, Bucket{upperBound: 0.1}.UpperBound(), 0.1)
+}
+
+func TestBucket_CumulativeCount(t *testing.T) {
+ assert.Equal(t, Bucket{cumulativeCount: 1}.CumulativeCount(), 1.0)
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/metric_series.go b/src/go/plugin/go.d/pkg/prometheus/metric_series.go
new file mode 100644
index 000000000..31914f4b2
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/metric_series.go
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "sort"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+type (
+ // SeriesSample is a pair of label set and value
+ SeriesSample struct {
+ Labels labels.Labels
+ Value float64
+ }
+
+ // Series is a list of SeriesSample
+ Series []SeriesSample
+)
+
+// Name the __name__ label value
+func (s SeriesSample) Name() string {
+ return s.Labels[0].Value
+}
+
+// Add appends a metric.
+func (s *Series) Add(kv SeriesSample) {
+ *s = append(*s, kv)
+}
+
+// Reset resets the buffer to be empty,
+// but it retains the underlying storage for use by future writes.
+func (s *Series) Reset() {
+ *s = (*s)[:0]
+}
+
+// Sort sorts data.
+func (s Series) Sort() {
+ sort.Sort(s)
+}
+
+// Len returns metric length.
+func (s Series) Len() int {
+ return len(s)
+}
+
+// Less reports whether the element with
+// index i should sort before the element with index j.
+func (s Series) Less(i, j int) bool {
+ return s[i].Name() < s[j].Name()
+}
+
+// Swap swaps the elements with indexes i and j.
+func (s Series) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+// FindByName finds metrics where it's __name__ label matches given name.
+// It expects the metrics is sorted.
+// Complexity: O(log(N))
+func (s Series) FindByName(name string) Series {
+ from := sort.Search(len(s), func(i int) bool {
+ return s[i].Name() >= name
+ })
+ if from == len(s) || s[from].Name() != name { // not found
+ return Series{}
+ }
+ until := from + 1
+ for until < len(s) && s[until].Name() == name {
+ until++
+ }
+ return s[from:until]
+}
+
+// FindByNames finds metrics where it's __name__ label matches given any of names.
+// It expects the metrics is sorted.
+// Complexity: O(log(N))
+func (s Series) FindByNames(names ...string) Series {
+ switch len(names) {
+ case 0:
+ return Series{}
+ case 1:
+ return s.FindByName(names[0])
+ }
+ var result Series
+ for _, name := range names {
+ result = append(result, s.FindByName(name)...)
+ }
+ return result
+}
+
+// Max returns the max value.
+// It does NOT expect the metrics is sorted.
+// Complexity: O(N)
+func (s Series) Max() float64 {
+ switch len(s) {
+ case 0:
+ return 0
+ case 1:
+ return s[0].Value
+ }
+ max := s[0].Value
+ for _, kv := range s[1:] {
+ if max < kv.Value {
+ max = kv.Value
+ }
+ }
+ return max
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/metric_series_test.go b/src/go/plugin/go.d/pkg/prometheus/metric_series_test.go
new file mode 100644
index 000000000..80c805474
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/metric_series_test.go
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package prometheus
+
+import (
+ "testing"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+// TODO: write better tests
+
+const (
+ testName1 = "logback_events_total"
+ testName2 = "jvm_threads_peak"
+)
+
+func newTestSeries() Series {
+ return Series{
+ {
+ Value: 10,
+ Labels: labels.Labels{
+ {Name: "__name__", Value: testName1},
+ {Name: "level", Value: "error"},
+ },
+ },
+ {
+ Value: 20,
+ Labels: labels.Labels{
+ {Name: "__name__", Value: testName1},
+ {Name: "level", Value: "warn"},
+ },
+ },
+ {
+ Value: 5,
+ Labels: labels.Labels{
+ {Name: "__name__", Value: testName1},
+ {Name: "level", Value: "info"},
+ },
+ },
+ {
+ Value: 15,
+ Labels: labels.Labels{
+ {Name: "__name__", Value: testName1},
+ {Name: "level", Value: "debug"},
+ },
+ },
+ {
+ Value: 26,
+ Labels: labels.Labels{
+ {Name: "__name__", Value: testName2},
+ },
+ },
+ }
+}
+
+func TestSeries_Name(t *testing.T) {
+ m := newTestSeries()
+
+ assert.Equal(t, testName1, m[0].Name())
+ assert.Equal(t, testName1, m[1].Name())
+
+}
+
+func TestSeries_Add(t *testing.T) {
+ m := newTestSeries()
+
+ require.Len(t, m, 5)
+ m.Add(SeriesSample{})
+ assert.Len(t, m, 6)
+}
+
+func TestSeries_FindByName(t *testing.T) {
+ m := newTestSeries()
+ m.Sort()
+ assert.Len(t, Series{}.FindByName(testName1), 0)
+ assert.Len(t, m.FindByName(testName1), len(m)-1)
+}
+
+func TestSeries_FindByNames(t *testing.T) {
+ m := newTestSeries()
+ m.Sort()
+ assert.Len(t, m.FindByNames(), 0)
+ assert.Len(t, m.FindByNames(testName1), len(m)-1)
+ assert.Len(t, m.FindByNames(testName1, testName2), len(m))
+}
+
+func TestSeries_Len(t *testing.T) {
+ m := newTestSeries()
+
+ assert.Equal(t, len(m), m.Len())
+}
+
+func TestSeries_Less(t *testing.T) {
+ m := newTestSeries()
+
+ assert.False(t, m.Less(0, 1))
+ assert.True(t, m.Less(4, 0))
+}
+
+func TestSeries_Max(t *testing.T) {
+ m := newTestSeries()
+
+ assert.Equal(t, float64(26), m.Max())
+
+}
+
+func TestSeries_Reset(t *testing.T) {
+ m := newTestSeries()
+ m.Reset()
+
+ assert.Len(t, m, 0)
+
+}
+
+func TestSeries_Sort(t *testing.T) {
+ {
+ m := newTestSeries()
+ m.Sort()
+ assert.Equal(t, testName2, m[0].Name())
+ }
+ {
+ m := Series{}
+ assert.Equal(t, 0.0, m.Max())
+ }
+}
+
+func TestSeries_Swap(t *testing.T) {
+ m := newTestSeries()
+
+ m0 := m[0]
+ m1 := m[1]
+
+ m.Swap(0, 1)
+
+ assert.Equal(t, m0, m[1])
+ assert.Equal(t, m1, m[0])
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/parse.go b/src/go/plugin/go.d/pkg/prometheus/parse.go
new file mode 100644
index 000000000..2c7d2eb40
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/parse.go
@@ -0,0 +1,414 @@
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus/selector"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/prometheus/prometheus/model/textparse"
+)
+
+const (
+ quantileLabel = "quantile"
+ bucketLabel = "le"
+)
+
+const (
+ countSuffix = "_count"
+ sumSuffix = "_sum"
+ bucketSuffix = "_bucket"
+)
+
+type promTextParser struct {
+ metrics MetricFamilies
+ series Series
+
+ sr selector.Selector
+
+ currMF *MetricFamily
+ currSeries labels.Labels
+
+ summaries map[uint64]*Summary
+ histograms map[uint64]*Histogram
+
+ isCount bool
+ isSum bool
+ isQuantile bool
+ isBucket bool
+
+ currQuantile float64
+ currBucket float64
+}
+
+func (p *promTextParser) parseToSeries(text []byte) (Series, error) {
+ p.series.Reset()
+
+ parser := textparse.NewPromParser(text)
+ for {
+ entry, err := parser.Next()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if entry == textparse.EntryInvalid && strings.HasPrefix(err.Error(), "invalid metric type") {
+ continue
+ }
+ return nil, fmt.Errorf("failed to parse prometheus metrics: %v", err)
+ }
+
+ switch entry {
+ case textparse.EntrySeries:
+ p.currSeries = p.currSeries[:0]
+
+ parser.Metric(&p.currSeries)
+
+ if p.sr != nil && !p.sr.Matches(p.currSeries) {
+ continue
+ }
+
+ _, _, val := parser.Series()
+ p.series.Add(SeriesSample{Labels: copyLabels(p.currSeries), Value: val})
+ }
+ }
+
+ p.series.Sort()
+
+ return p.series, nil
+}
+
+var reSpace = regexp.MustCompile(`\s+`)
+
+func (p *promTextParser) parseToMetricFamilies(text []byte) (MetricFamilies, error) {
+ p.reset()
+
+ parser := textparse.NewPromParser(text)
+ for {
+ entry, err := parser.Next()
+ if err != nil {
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if entry == textparse.EntryInvalid && strings.HasPrefix(err.Error(), "invalid metric type") {
+ continue
+ }
+ return nil, fmt.Errorf("failed to parse prometheus metrics: %v", err)
+ }
+
+ switch entry {
+ case textparse.EntryHelp:
+ name, help := parser.Help()
+ p.setMetricFamilyByName(string(name))
+ p.currMF.help = string(help)
+ if strings.IndexByte(p.currMF.help, '\n') != -1 {
+ // convert multiline to one line because HELP is used as the chart title.
+ p.currMF.help = reSpace.ReplaceAllString(strings.TrimSpace(p.currMF.help), " ")
+ }
+ case textparse.EntryType:
+ name, typ := parser.Type()
+ p.setMetricFamilyByName(string(name))
+ p.currMF.typ = typ
+ case textparse.EntrySeries:
+ p.currSeries = p.currSeries[:0]
+
+ parser.Metric(&p.currSeries)
+
+ if p.sr != nil && !p.sr.Matches(p.currSeries) {
+ continue
+ }
+
+ p.setMetricFamilyBySeries()
+
+ _, _, value := parser.Series()
+
+ switch p.currMF.typ {
+ case model.MetricTypeGauge:
+ p.addGauge(value)
+ case model.MetricTypeCounter:
+ p.addCounter(value)
+ case model.MetricTypeSummary:
+ p.addSummary(value)
+ case model.MetricTypeHistogram:
+ p.addHistogram(value)
+ case model.MetricTypeUnknown:
+ p.addUnknown(value)
+ }
+ }
+ }
+
+ for k, v := range p.metrics {
+ if len(v.Metrics()) == 0 {
+ delete(p.metrics, k)
+ }
+ }
+
+ return p.metrics, nil
+}
+
+func (p *promTextParser) setMetricFamilyByName(name string) {
+ mf, ok := p.metrics[name]
+ if !ok {
+ mf = &MetricFamily{name: name, typ: model.MetricTypeUnknown}
+ p.metrics[name] = mf
+ }
+ p.currMF = mf
+}
+
+func (p *promTextParser) setMetricFamilyBySeries() {
+ p.isSum, p.isCount, p.isQuantile, p.isBucket = false, false, false, false
+ p.currQuantile, p.currBucket = 0, 0
+
+ name := p.currSeries[0].Value
+
+ if p.currMF != nil && p.currMF.name == name {
+ if p.currMF.typ == model.MetricTypeSummary {
+ p.setQuantile()
+ }
+ return
+ }
+
+ typ := model.MetricTypeUnknown
+
+ switch {
+ case strings.HasSuffix(name, sumSuffix):
+ n := strings.TrimSuffix(name, sumSuffix)
+ if mf, ok := p.metrics[n]; ok && isSummaryOrHistogram(mf.typ) {
+ p.isSum = true
+ p.currSeries[0].Value = n
+ p.currMF = mf
+ return
+ }
+ case strings.HasSuffix(name, countSuffix):
+ n := strings.TrimSuffix(name, countSuffix)
+ if mf, ok := p.metrics[n]; ok && isSummaryOrHistogram(mf.typ) {
+ p.isCount = true
+ p.currSeries[0].Value = n
+ p.currMF = mf
+ return
+ }
+ case strings.HasSuffix(name, bucketSuffix):
+ n := strings.TrimSuffix(name, bucketSuffix)
+ if mf, ok := p.metrics[n]; ok && isSummaryOrHistogram(mf.typ) {
+ p.currSeries[0].Value = n
+ p.setBucket()
+ p.currMF = mf
+ return
+ }
+ if p.currSeries.Has(bucketLabel) {
+ p.currSeries[0].Value = n
+ p.setBucket()
+ name = n
+ typ = model.MetricTypeHistogram
+ }
+ case p.currSeries.Has(quantileLabel):
+ typ = model.MetricTypeSummary
+ p.setQuantile()
+ }
+
+ p.setMetricFamilyByName(name)
+ if p.currMF.typ == "" || p.currMF.typ == model.MetricTypeUnknown {
+ p.currMF.typ = typ
+ }
+}
+
+func (p *promTextParser) setQuantile() {
+ if lbs, v, ok := removeLabel(p.currSeries, quantileLabel); ok {
+ p.isQuantile = true
+ p.currSeries = lbs
+ p.currQuantile, _ = strconv.ParseFloat(v, 64)
+ }
+}
+
+func (p *promTextParser) setBucket() {
+ if lbs, v, ok := removeLabel(p.currSeries, bucketLabel); ok {
+ p.isBucket = true
+ p.currSeries = lbs
+ p.currBucket, _ = strconv.ParseFloat(v, 64)
+ }
+}
+
+func (p *promTextParser) addGauge(value float64) {
+ p.currSeries = p.currSeries[1:] // remove "__name__"
+
+ if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) {
+ p.currMF.metrics = append(p.currMF.metrics, Metric{
+ labels: copyLabels(p.currSeries),
+ gauge: &Gauge{value: value},
+ })
+ } else {
+ p.currMF.metrics = p.currMF.metrics[:v+1]
+ if p.currMF.metrics[v].gauge == nil {
+ p.currMF.metrics[v].gauge = &Gauge{}
+ }
+ p.currMF.metrics[v].gauge.value = value
+ p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0]
+ p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...)
+ }
+}
+
+func (p *promTextParser) addCounter(value float64) {
+ p.currSeries = p.currSeries[1:] // remove "__name__"
+
+ if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) {
+ p.currMF.metrics = append(p.currMF.metrics, Metric{
+ labels: copyLabels(p.currSeries),
+ counter: &Counter{value: value},
+ })
+ } else {
+ p.currMF.metrics = p.currMF.metrics[:v+1]
+ if p.currMF.metrics[v].counter == nil {
+ p.currMF.metrics[v].counter = &Counter{}
+ }
+ p.currMF.metrics[v].counter.value = value
+ p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0]
+ p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...)
+ }
+}
+
+func (p *promTextParser) addUnknown(value float64) {
+ p.currSeries = p.currSeries[1:] // remove "__name__"
+
+ if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) {
+ p.currMF.metrics = append(p.currMF.metrics, Metric{
+ labels: copyLabels(p.currSeries),
+ untyped: &Untyped{value: value},
+ })
+ } else {
+ p.currMF.metrics = p.currMF.metrics[:v+1]
+ if p.currMF.metrics[v].untyped == nil {
+ p.currMF.metrics[v].untyped = &Untyped{}
+ }
+ p.currMF.metrics[v].untyped.value = value
+ p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0]
+ p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...)
+ }
+}
+
+func (p *promTextParser) addSummary(value float64) {
+ hash := p.currSeries.Hash()
+
+ p.currSeries = p.currSeries[1:] // remove "__name__"
+
+ s, ok := p.summaries[hash]
+ if !ok {
+ if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) {
+ s = &Summary{}
+ p.currMF.metrics = append(p.currMF.metrics, Metric{
+ labels: copyLabels(p.currSeries),
+ summary: s,
+ })
+ } else {
+ p.currMF.metrics = p.currMF.metrics[:v+1]
+ if p.currMF.metrics[v].summary == nil {
+ p.currMF.metrics[v].summary = &Summary{}
+ }
+ p.currMF.metrics[v].summary.sum = 0
+ p.currMF.metrics[v].summary.count = 0
+ p.currMF.metrics[v].summary.quantiles = p.currMF.metrics[v].summary.quantiles[:0]
+ p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0]
+ p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...)
+ s = p.currMF.metrics[v].summary
+ }
+
+ p.summaries[hash] = s
+ }
+
+ switch {
+ case p.isQuantile:
+ s.quantiles = append(s.quantiles, Quantile{quantile: p.currQuantile, value: value})
+ case p.isSum:
+ s.sum = value
+ case p.isCount:
+ s.count = value
+ }
+}
+
+func (p *promTextParser) addHistogram(value float64) {
+ hash := p.currSeries.Hash()
+
+ p.currSeries = p.currSeries[1:] // remove "__name__"
+
+ h, ok := p.histograms[hash]
+ if !ok {
+ if v := len(p.currMF.metrics); v == cap(p.currMF.metrics) {
+ h = &Histogram{}
+ p.currMF.metrics = append(p.currMF.metrics, Metric{
+ labels: copyLabels(p.currSeries),
+ histogram: h,
+ })
+ } else {
+ p.currMF.metrics = p.currMF.metrics[:v+1]
+ if p.currMF.metrics[v].histogram == nil {
+ p.currMF.metrics[v].histogram = &Histogram{}
+ }
+ p.currMF.metrics[v].histogram.sum = 0
+ p.currMF.metrics[v].histogram.count = 0
+ p.currMF.metrics[v].histogram.buckets = p.currMF.metrics[v].histogram.buckets[:0]
+ p.currMF.metrics[v].labels = p.currMF.metrics[v].labels[:0]
+ p.currMF.metrics[v].labels = append(p.currMF.metrics[v].labels, p.currSeries...)
+ h = p.currMF.metrics[v].histogram
+ }
+
+ p.histograms[hash] = h
+ }
+
+ switch {
+ case p.isBucket:
+ h.buckets = append(h.buckets, Bucket{upperBound: p.currBucket, cumulativeCount: value})
+ case p.isSum:
+ h.sum = value
+ case p.isCount:
+ h.count = value
+ }
+}
+
+func (p *promTextParser) reset() {
+ p.currMF = nil
+ p.currSeries = p.currSeries[:0]
+
+ if p.metrics == nil {
+ p.metrics = make(MetricFamilies)
+ }
+ for _, mf := range p.metrics {
+ mf.help = ""
+ mf.typ = ""
+ mf.metrics = mf.metrics[:0]
+ }
+
+ if p.summaries == nil {
+ p.summaries = make(map[uint64]*Summary)
+ }
+ for k := range p.summaries {
+ delete(p.summaries, k)
+ }
+
+ if p.histograms == nil {
+ p.histograms = make(map[uint64]*Histogram)
+ }
+ for k := range p.histograms {
+ delete(p.histograms, k)
+ }
+}
+
+func copyLabels(lbs []labels.Label) []labels.Label {
+ return append([]labels.Label(nil), lbs...)
+}
+
+func removeLabel(lbs labels.Labels, name string) (labels.Labels, string, bool) {
+ for i, v := range lbs {
+ if v.Name == name {
+ return append(lbs[:i], lbs[i+1:]...), v.Value, true
+ }
+ }
+ return lbs, "", false
+}
+
+func isSummaryOrHistogram(typ model.MetricType) bool {
+ return typ == model.MetricTypeSummary || typ == model.MetricTypeHistogram
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/parse_test.go b/src/go/plugin/go.d/pkg/prometheus/parse_test.go
new file mode 100644
index 000000000..cb128ffe5
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/parse_test.go
@@ -0,0 +1,1675 @@
+package prometheus
+
+import (
+ "bytes"
+ "fmt"
+ "math"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/prometheus/selector"
+
+ "github.com/prometheus/common/model"
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataMultilineHelp, _ = os.ReadFile("testdata/multiline-help.txt")
+
+ dataGaugeMeta, _ = os.ReadFile("testdata/gauge-meta.txt")
+ dataGaugeNoMeta, _ = os.ReadFile("testdata/gauge-no-meta.txt")
+ dataCounterMeta, _ = os.ReadFile("testdata/counter-meta.txt")
+ dataCounterNoMeta, _ = os.ReadFile("testdata/counter-no-meta.txt")
+ dataSummaryMeta, _ = os.ReadFile("testdata/summary-meta.txt")
+ dataSummaryNoMeta, _ = os.ReadFile("testdata/summary-no-meta.txt")
+ dataHistogramMeta, _ = os.ReadFile("testdata/histogram-meta.txt")
+ dataHistogramNoMeta, _ = os.ReadFile("testdata/histogram-no-meta.txt")
+ dataAllTypes = joinData(
+ dataGaugeMeta, dataGaugeNoMeta, dataCounterMeta, dataCounterNoMeta,
+ dataSummaryMeta, dataSummaryNoMeta, dataHistogramMeta, dataHistogramNoMeta,
+ )
+)
+
+func Test_testParseDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataMultilineHelp": dataMultilineHelp,
+ "dataGaugeMeta": dataGaugeMeta,
+ "dataGaugeNoMeta": dataGaugeNoMeta,
+ "dataCounterMeta": dataCounterMeta,
+ "dataCounterNoMeta": dataCounterNoMeta,
+ "dataSummaryMeta": dataSummaryMeta,
+ "dataSummaryNoMeta": dataSummaryNoMeta,
+ "dataHistogramMeta": dataHistogramMeta,
+ "dataHistogramNoMeta": dataHistogramNoMeta,
+ "dataAllTypes": dataAllTypes,
+ } {
+ require.NotNilf(t, data, name)
+ }
+}
+
+func TestPromTextParser_parseToMetricFamilies(t *testing.T) {
+ tests := map[string]struct {
+ input []byte
+ want MetricFamilies
+ }{
+ "Gauge with multiline HELP": {
+ input: dataMultilineHelp,
+ want: MetricFamilies{
+ "test_gauge_metric_1": {
+ name: "test_gauge_metric_1",
+ help: "First line. Second line.",
+ typ: model.MetricTypeGauge,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ gauge: &Gauge{value: 11},
+ },
+ },
+ },
+ },
+ },
+ "Gauge with meta parsed as Gauge": {
+ input: dataGaugeMeta,
+ want: MetricFamilies{
+ "test_gauge_metric_1": {
+ name: "test_gauge_metric_1",
+ help: "Test Gauge Metric 1",
+ typ: model.MetricTypeGauge,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ gauge: &Gauge{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ gauge: &Gauge{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ gauge: &Gauge{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ gauge: &Gauge{value: 14},
+ },
+ },
+ },
+ "test_gauge_metric_2": {
+ name: "test_gauge_metric_2",
+ typ: model.MetricTypeGauge,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ gauge: &Gauge{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ gauge: &Gauge{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ gauge: &Gauge{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ gauge: &Gauge{value: 14},
+ },
+ },
+ },
+ },
+ },
+ "Counter with meta parsed as Counter": {
+ input: dataCounterMeta,
+ want: MetricFamilies{
+ "test_counter_metric_1_total": {
+ name: "test_counter_metric_1_total",
+ help: "Test Counter Metric 1",
+ typ: model.MetricTypeCounter,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ counter: &Counter{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ counter: &Counter{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ counter: &Counter{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ counter: &Counter{value: 14},
+ },
+ },
+ },
+ "test_counter_metric_2_total": {
+ name: "test_counter_metric_2_total",
+ typ: model.MetricTypeCounter,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ counter: &Counter{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ counter: &Counter{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ counter: &Counter{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ counter: &Counter{value: 14},
+ },
+ },
+ },
+ },
+ },
+ "Summary with meta parsed as Summary": {
+ input: dataSummaryMeta,
+ want: MetricFamilies{
+ "test_summary_1_duration_microseconds": {
+ name: "test_summary_1_duration_microseconds",
+ help: "Test Summary Metric 1",
+ typ: model.MetricTypeSummary,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ },
+ },
+ "test_summary_2_duration_microseconds": {
+ name: "test_summary_2_duration_microseconds",
+ typ: model.MetricTypeSummary,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "Histogram with meta parsed as Histogram": {
+ input: dataHistogramMeta,
+ want: MetricFamilies{
+ "test_histogram_1_duration_seconds": {
+ name: "test_histogram_1_duration_seconds",
+ help: "Test Histogram Metric 1",
+ typ: model.MetricTypeHistogram,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ },
+ },
+ "test_histogram_2_duration_seconds": {
+ name: "test_histogram_2_duration_seconds",
+ typ: model.MetricTypeHistogram,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "Gauge no meta parsed as Untyped": {
+ input: dataGaugeNoMeta,
+ want: MetricFamilies{
+ "test_gauge_no_meta_metric_1": {
+ name: "test_gauge_no_meta_metric_1",
+ typ: model.MetricTypeUnknown,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ untyped: &Untyped{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ untyped: &Untyped{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ untyped: &Untyped{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ untyped: &Untyped{value: 14},
+ },
+ },
+ },
+ "test_gauge_no_meta_metric_2": {
+ name: "test_gauge_no_meta_metric_2",
+ typ: model.MetricTypeUnknown,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ untyped: &Untyped{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ untyped: &Untyped{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ untyped: &Untyped{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ untyped: &Untyped{value: 14},
+ },
+ },
+ },
+ },
+ },
+ "Counter no meta parsed as Untyped": {
+ input: dataCounterNoMeta,
+ want: MetricFamilies{
+ "test_counter_no_meta_metric_1_total": {
+ name: "test_counter_no_meta_metric_1_total",
+ typ: model.MetricTypeUnknown,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ untyped: &Untyped{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ untyped: &Untyped{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ untyped: &Untyped{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ untyped: &Untyped{value: 14},
+ },
+ },
+ },
+ "test_counter_no_meta_metric_2_total": {
+ name: "test_counter_no_meta_metric_2_total",
+ typ: model.MetricTypeUnknown,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ untyped: &Untyped{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ untyped: &Untyped{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ untyped: &Untyped{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ untyped: &Untyped{value: 14},
+ },
+ },
+ },
+ },
+ },
+ "Summary no meta parsed as Summary": {
+ input: dataSummaryNoMeta,
+ want: MetricFamilies{
+ "test_summary_no_meta_1_duration_microseconds": {
+ name: "test_summary_no_meta_1_duration_microseconds",
+ typ: model.MetricTypeSummary,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ },
+ },
+ "test_summary_no_meta_2_duration_microseconds": {
+ name: "test_summary_no_meta_2_duration_microseconds",
+ typ: model.MetricTypeSummary,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "Histogram no meta parsed as Histogram": {
+ input: dataHistogramNoMeta,
+ want: MetricFamilies{
+ "test_histogram_no_meta_1_duration_seconds": {
+ name: "test_histogram_no_meta_1_duration_seconds",
+ typ: model.MetricTypeHistogram,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ },
+ },
+ "test_histogram_no_meta_2_duration_seconds": {
+ name: "test_histogram_no_meta_2_duration_seconds",
+ typ: model.MetricTypeHistogram,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "All types": {
+ input: dataAllTypes,
+ want: MetricFamilies{
+ "test_gauge_metric_1": {
+ name: "test_gauge_metric_1",
+ help: "Test Gauge Metric 1",
+ typ: model.MetricTypeGauge,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ gauge: &Gauge{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ gauge: &Gauge{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ gauge: &Gauge{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ gauge: &Gauge{value: 14},
+ },
+ },
+ },
+ "test_gauge_metric_2": {
+ name: "test_gauge_metric_2",
+ typ: model.MetricTypeGauge,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ gauge: &Gauge{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ gauge: &Gauge{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ gauge: &Gauge{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ gauge: &Gauge{value: 14},
+ },
+ },
+ },
+ "test_counter_metric_1_total": {
+ name: "test_counter_metric_1_total",
+ help: "Test Counter Metric 1",
+ typ: model.MetricTypeCounter,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ counter: &Counter{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ counter: &Counter{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ counter: &Counter{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ counter: &Counter{value: 14},
+ },
+ },
+ },
+ "test_counter_metric_2_total": {
+ name: "test_counter_metric_2_total",
+ typ: model.MetricTypeCounter,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ counter: &Counter{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ counter: &Counter{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ counter: &Counter{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ counter: &Counter{value: 14},
+ },
+ },
+ },
+ "test_summary_1_duration_microseconds": {
+ name: "test_summary_1_duration_microseconds",
+ help: "Test Summary Metric 1",
+ typ: model.MetricTypeSummary,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ },
+ },
+ "test_summary_2_duration_microseconds": {
+ name: "test_summary_2_duration_microseconds",
+ typ: model.MetricTypeSummary,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ },
+ },
+ "test_histogram_1_duration_seconds": {
+ name: "test_histogram_1_duration_seconds",
+ help: "Test Histogram Metric 1",
+ typ: model.MetricTypeHistogram,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ },
+ },
+ "test_histogram_2_duration_seconds": {
+ name: "test_histogram_2_duration_seconds",
+ typ: model.MetricTypeHistogram,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ },
+ },
+ "test_gauge_no_meta_metric_1": {
+ name: "test_gauge_no_meta_metric_1",
+ typ: model.MetricTypeUnknown,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ untyped: &Untyped{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ untyped: &Untyped{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ untyped: &Untyped{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ untyped: &Untyped{value: 14},
+ },
+ },
+ },
+ "test_gauge_no_meta_metric_2": {
+ name: "test_gauge_no_meta_metric_2",
+ typ: model.MetricTypeUnknown,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ untyped: &Untyped{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ untyped: &Untyped{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ untyped: &Untyped{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ untyped: &Untyped{value: 14},
+ },
+ },
+ },
+ "test_counter_no_meta_metric_1_total": {
+ name: "test_counter_no_meta_metric_1_total",
+ typ: model.MetricTypeUnknown,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ untyped: &Untyped{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ untyped: &Untyped{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ untyped: &Untyped{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ untyped: &Untyped{value: 14},
+ },
+ },
+ },
+ "test_counter_no_meta_metric_2_total": {
+ name: "test_counter_no_meta_metric_2_total",
+ typ: model.MetricTypeUnknown,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ untyped: &Untyped{value: 11},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ untyped: &Untyped{value: 12},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ untyped: &Untyped{value: 13},
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ untyped: &Untyped{value: 14},
+ },
+ },
+ },
+ "test_summary_no_meta_1_duration_microseconds": {
+ name: "test_summary_no_meta_1_duration_microseconds",
+ typ: model.MetricTypeSummary,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ summary: &Summary{
+ sum: 283201.29,
+ count: 31,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 4931.921},
+ {quantile: 0.9, value: 4932.921},
+ {quantile: 0.99, value: 4933.921},
+ },
+ },
+ },
+ },
+ },
+ "test_summary_no_meta_2_duration_microseconds": {
+ name: "test_summary_no_meta_2_duration_microseconds",
+ typ: model.MetricTypeSummary,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ summary: &Summary{
+ sum: 383201.29,
+ count: 41,
+ quantiles: []Quantile{
+ {quantile: 0.5, value: 5931.921},
+ {quantile: 0.9, value: 5932.921},
+ {quantile: 0.99, value: 5933.921},
+ },
+ },
+ },
+ },
+ },
+ "test_histogram_no_meta_1_duration_seconds": {
+ name: "test_histogram_no_meta_1_duration_seconds",
+ typ: model.MetricTypeHistogram,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ histogram: &Histogram{
+ sum: 0.00147889,
+ count: 6,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 4},
+ {upperBound: 0.5, cumulativeCount: 5},
+ {upperBound: math.Inf(1), cumulativeCount: 6},
+ },
+ },
+ },
+ },
+ },
+ "test_histogram_no_meta_2_duration_seconds": {
+ name: "test_histogram_no_meta_2_duration_seconds",
+ typ: model.MetricTypeHistogram,
+ metrics: []Metric{
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value1"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value2"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value3"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ {
+ labels: labels.Labels{{Name: "label1", Value: "value4"}},
+ histogram: &Histogram{
+ sum: 0.00247889,
+ count: 9,
+ buckets: []Bucket{
+ {upperBound: 0.1, cumulativeCount: 7},
+ {upperBound: 0.5, cumulativeCount: 8},
+ {upperBound: math.Inf(1), cumulativeCount: 9},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ var p promTextParser
+
+ for i := 0; i < 10; i++ {
+ t.Run(fmt.Sprintf("parse num %d", i+1), func(t *testing.T) {
+ mfs, err := p.parseToMetricFamilies(test.input)
+ if len(test.want) > 0 {
+ assert.Equal(t, test.want, mfs)
+ } else {
+ assert.Error(t, err)
+ }
+ })
+ }
+ })
+ }
+}
+
+func TestPromTextParser_parseToMetricFamiliesWithSelector(t *testing.T) {
+ sr, err := selector.Parse(`test_gauge_metric_1{label1="value2"}`)
+ require.NoError(t, err)
+
+ p := promTextParser{sr: sr}
+
+ txt := []byte(`
+test_gauge_metric_1{label1="value1"} 1
+test_gauge_metric_1{label1="value2"} 1
+test_gauge_metric_2{label1="value1"} 1
+test_gauge_metric_2{label1="value2"} 1
+`)
+
+ want := MetricFamilies{
+ "test_gauge_metric_1": &MetricFamily{
+ name: "test_gauge_metric_1",
+ typ: model.MetricTypeUnknown,
+ metrics: []Metric{
+ {labels: labels.Labels{{Name: "label1", Value: "value2"}}, untyped: &Untyped{value: 1}},
+ },
+ },
+ }
+
+ mfs, err := p.parseToMetricFamilies(txt)
+
+ require.NoError(t, err)
+ assert.Equal(t, want, mfs)
+}
+
+func TestPromTextParser_parseToSeries(t *testing.T) {
+ tests := map[string]struct {
+ input []byte
+ want Series
+ }{
+ "All types": {
+ input: []byte(`
+# HELP test_gauge_metric_1 Test Gauge Metric 1
+# TYPE test_gauge_metric_1 gauge
+test_gauge_metric_1{label1="value1"} 11
+test_gauge_no_meta_metric_1{label1="value1"} 11
+# HELP test_counter_metric_1_total Test Counter Metric 1
+# TYPE test_counter_metric_1_total counter
+test_counter_metric_1_total{label1="value1"} 11
+test_counter_no_meta_metric_1_total{label1="value1"} 11
+# HELP test_summary_1_duration_microseconds Test Summary Metric 1
+# TYPE test_summary_1_duration_microseconds summary
+test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921
+test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_1_duration_microseconds_count{label1="value1"} 31
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921
+test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31
+# HELP test_histogram_1_duration_seconds Test Histogram Metric 1
+# TYPE test_histogram_1_duration_seconds histogram
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5
+test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6
+test_histogram_1_duration_seconds_sum{label1="value1"} 0.00147889
+test_histogram_1_duration_seconds_count{label1="value1"} 6
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.1"} 4
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.5"} 5
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6
+test_histogram_no_meta_1_duration_seconds_sum{label1="value1"} 0.00147889
+test_histogram_no_meta_1_duration_seconds_count{label1="value1"} 6
+`),
+ want: Series{
+ // Gauge
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_gauge_metric_1"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 11,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_gauge_no_meta_metric_1"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 11,
+ },
+ // Counter
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_counter_metric_1_total"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 11,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_counter_no_meta_metric_1_total"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 11,
+ },
+ //// Summary
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_1_duration_microseconds"},
+ {Name: "label1", Value: "value1"},
+ {Name: "quantile", Value: "0.5"},
+ },
+ Value: 4931.921,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_1_duration_microseconds"},
+ {Name: "label1", Value: "value1"},
+ {Name: "quantile", Value: "0.9"},
+ },
+ Value: 4932.921,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_1_duration_microseconds"},
+ {Name: "label1", Value: "value1"},
+ {Name: "quantile", Value: "0.99"},
+ },
+ Value: 4933.921,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_1_duration_microseconds_sum"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 283201.29,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_1_duration_microseconds_count"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 31,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds"},
+ {Name: "label1", Value: "value1"},
+ {Name: "quantile", Value: "0.5"},
+ },
+ Value: 4931.921,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds"},
+ {Name: "label1", Value: "value1"},
+ {Name: "quantile", Value: "0.9"},
+ },
+ Value: 4932.921,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds"},
+ {Name: "label1", Value: "value1"},
+ {Name: "quantile", Value: "0.99"},
+ },
+ Value: 4933.921,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds_sum"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 283201.29,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_summary_no_meta_1_duration_microseconds_count"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 31,
+ },
+ // Histogram
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_1_duration_seconds_bucket"},
+ {Name: "label1", Value: "value1"},
+ {Name: "le", Value: "0.1"},
+ },
+ Value: 4,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_1_duration_seconds_bucket"},
+ {Name: "label1", Value: "value1"},
+ {Name: "le", Value: "0.5"},
+ },
+ Value: 5,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_1_duration_seconds_bucket"},
+ {Name: "label1", Value: "value1"},
+ {Name: "le", Value: "+Inf"},
+ },
+ Value: 6,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_1_duration_seconds_sum"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 0.00147889,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_1_duration_seconds_count"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 6,
+ },
+
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_bucket"},
+ {Name: "label1", Value: "value1"},
+ {Name: "le", Value: "0.1"},
+ },
+ Value: 4,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_bucket"},
+ {Name: "label1", Value: "value1"},
+ {Name: "le", Value: "0.5"},
+ },
+ Value: 5,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_bucket"},
+ {Name: "label1", Value: "value1"},
+ {Name: "le", Value: "+Inf"},
+ },
+ Value: 6,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_sum"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 0.00147889,
+ },
+ {
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_histogram_no_meta_1_duration_seconds_count"},
+ {Name: "label1", Value: "value1"},
+ },
+ Value: 6,
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ var p promTextParser
+
+ for i := 0; i < 10; i++ {
+ t.Run(fmt.Sprintf("parse num %d", i+1), func(t *testing.T) {
+ series, err := p.parseToSeries(test.input)
+
+ if len(test.want) > 0 {
+ test.want.Sort()
+ assert.Equal(t, test.want, series)
+ } else {
+ assert.Error(t, err)
+ }
+ })
+ }
+ })
+ }
+}
+
+func TestPromTextParser_parseToSeriesWithSelector(t *testing.T) {
+ sr, err := selector.Parse(`test_gauge_metric_1{label1="value2"}`)
+ require.NoError(t, err)
+
+ p := promTextParser{sr: sr}
+
+ txt := []byte(`
+test_gauge_metric_1{label1="value1"} 1
+test_gauge_metric_1{label1="value2"} 1
+test_gauge_metric_2{label1="value1"} 1
+test_gauge_metric_2{label1="value2"} 1
+`)
+
+ want := Series{SeriesSample{
+ Labels: labels.Labels{
+ {Name: "__name__", Value: "test_gauge_metric_1"},
+ {Name: "label1", Value: "value2"},
+ },
+ Value: 1,
+ }}
+
+ series, err := p.parseToSeries(txt)
+
+ require.NoError(t, err)
+ assert.Equal(t, want, series)
+}
+
+func joinData(data ...[]byte) []byte {
+ var buf bytes.Buffer
+ for _, v := range data {
+ _, _ = buf.Write(v)
+ _ = buf.WriteByte('\n')
+ }
+ return buf.Bytes()
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/README.md b/src/go/plugin/go.d/pkg/prometheus/selector/README.md
new file mode 100644
index 000000000..601eb0891
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/selector/README.md
@@ -0,0 +1,102 @@
+<!--
+title: "Time series selector"
+custom_edit_url: "/src/go/plugin/go.d/pkg/prometheus/selector/README.md"
+sidebar_label: "Time series selector"
+learn_status: "Published"
+learn_rel_path: "Developers/External plugins/go.d.plugin/Helper Packages"
+-->
+
+# Time series selector
+
+Selectors allow selecting and filtering of a set of time series.
+
+## Simple Selector
+
+In the simplest form you need to specify only a metric name.
+
+### Syntax
+
+```cmd
+ <line> ::= <metric_name_pattern>
+ <metric_name_pattern> ::= simple pattern
+```
+
+The metric name pattern syntax is [simple pattern](/src/libnetdata/simple_pattern/README.md).
+
+### Examples
+
+This example selects all time series that have the `go_memstats_alloc_bytes` metric name:
+
+```cmd
+go_memstats_alloc_bytes
+```
+
+This example selects all time series with metric names starts with `go_memstats_`:
+
+```cmd
+go_memstats_*
+```
+
+This example selects all time series with metric names starts with `go_` except `go_memstats_`:
+
+```cmd
+!go_memstats_* go_*
+```
+
+## Advanced Selector
+
+It is possible to filter these time series further by appending a comma separated list of label matchers in curly braces (`{}`).
+
+### Syntax
+
+```cmd
+ <line> ::= [ <metric_name_pattern> ]{ <list_of_selectors> }
+ <metric_name_pattern> ::= simple pattern
+ <list_of_selectors> ::= a comma separated list <label_name><op><label_value_pattern>
+ <label_name> ::= an exact label name
+ <op> ::= [ '=', '!=', '=~', '!~', '=*', '!*' ]
+ <label_value_pattern> ::= a label value pattern, depends on <op>
+```
+
+The metric name pattern syntax is [simple pattern](/src/libnetdata/simple_pattern/README.md).
+
+Label matching operators:
+
+- `=`: Match labels that are exactly equal to the provided string.
+- `!=`: Match labels that are not equal to the provided string.
+- `=~`: Match labels that [regex-match](https://golang.org/pkg/regexp/syntax/) the provided string.
+- `!~`: Match labels that do not [regex-match](https://golang.org/pkg/regexp/syntax/) the provided string.
+- `=*`: Match labels that [simple-pattern-match](/src/libnetdata/simple_pattern/README.md) the provided string.
+- `!*`: Match labels that do not [simple-pattern-match](/src/libnetdata/simple_pattern/README.md) the provided string.
+
+### Examples
+
+This example selects all time series that:
+
+- have the `node_cooling_device_cur_state` metric name and
+- label `type` value not equal to `Fan`:
+
+```cmd
+node_cooling_device_cur_state{type!="Fan"}
+```
+
+This example selects all time series that:
+
+- have the `node_filesystem_size_bytes` metric name and
+- label `device` value is either `/dev/nvme0n1p1` or `/dev/nvme0n1p2` and
+- label `fstype` is equal to `ext4`
+
+```cmd
+node_filesystem_size_bytes{device=~"/dev/nvme0n1p1$|/dev/nvme0n1p2$",fstype="ext4"}
+```
+
+Label matchers can also be applied to metric names by matching against the internal `__name__` label.
+
+For example, the expression `node_filesystem_size_bytes` is equivalent to `{__name__="node_filesystem_size_bytes"}`.
+This allows using all operators (other than `=*`) for metric names matching.
+
+The following expression selects all metrics that have a name starting with `node_`:
+
+```cmd
+{__name__=*"node_*"}
+```
diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/expr.go b/src/go/plugin/go.d/pkg/prometheus/selector/expr.go
new file mode 100644
index 000000000..6f61cf3a5
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/selector/expr.go
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package selector
+
+import "fmt"
+
+type Expr struct {
+ Allow []string `yaml:"allow,omitempty" json:"allow"`
+ Deny []string `yaml:"deny,omitempty" json:"deny"`
+}
+
+func (e Expr) Empty() bool {
+ return len(e.Allow) == 0 && len(e.Deny) == 0
+
+}
+
+func (e Expr) Parse() (Selector, error) {
+ if e.Empty() {
+ return nil, nil
+ }
+
+ var srs []Selector
+ var allow Selector
+ var deny Selector
+
+ for _, item := range e.Allow {
+ sr, err := Parse(item)
+ if err != nil {
+ return nil, fmt.Errorf("parse selector '%s': %v", item, err)
+ }
+ srs = append(srs, sr)
+ }
+
+ switch len(srs) {
+ case 0:
+ allow = trueSelector{}
+ case 1:
+ allow = srs[0]
+ default:
+ allow = Or(srs[0], srs[1], srs[2:]...)
+ }
+
+ srs = srs[:0]
+ for _, item := range e.Deny {
+ sr, err := Parse(item)
+ if err != nil {
+ return nil, fmt.Errorf("parse selector '%s': %v", item, err)
+ }
+ srs = append(srs, sr)
+ }
+
+ switch len(srs) {
+ case 0:
+ deny = falseSelector{}
+ case 1:
+ deny = srs[0]
+ default:
+ deny = Or(srs[0], srs[1], srs[2:]...)
+ }
+
+ return And(allow, Not(deny)), nil
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/expr_test.go b/src/go/plugin/go.d/pkg/prometheus/selector/expr_test.go
new file mode 100644
index 000000000..598cef9b8
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/selector/expr_test.go
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package selector
+
+import (
+ "testing"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestExpr_Empty(t *testing.T) {
+ tests := map[string]struct {
+ expr Expr
+ expected bool
+ }{
+ "empty: both allow and deny": {
+ expr: Expr{
+ Allow: []string{},
+ Deny: []string{},
+ },
+ expected: true,
+ },
+ "nil: both allow and deny": {
+ expected: true,
+ },
+ "nil, empty: allow, deny": {
+ expr: Expr{
+ Deny: []string{""},
+ },
+ expected: false,
+ },
+ "empty, nil: allow, deny": {
+ expr: Expr{
+ Allow: []string{""},
+ },
+ expected: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if test.expected {
+ assert.True(t, test.expr.Empty())
+ } else {
+ assert.False(t, test.expr.Empty())
+ }
+ })
+ }
+}
+
+func TestExpr_Parse(t *testing.T) {
+ tests := map[string]struct {
+ expr Expr
+ expectedSr Selector
+ expectedErr bool
+ }{
+ "not set: both allow and deny": {
+ expr: Expr{},
+ },
+ "set: both allow and deny": {
+ expr: Expr{
+ Allow: []string{
+ "go_memstats_*",
+ "node_*",
+ },
+ Deny: []string{
+ "go_memstats_frees_total",
+ "node_cooling_*",
+ },
+ },
+ expectedSr: andSelector{
+ lhs: orSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: mustSPName("node_*"),
+ },
+ rhs: Not(orSelector{
+ lhs: mustSPName("go_memstats_frees_total"),
+ rhs: mustSPName("node_cooling_*"),
+ }),
+ },
+ },
+ "set: only includes": {
+ expr: Expr{
+ Allow: []string{
+ "go_memstats_*",
+ "node_*",
+ },
+ },
+ expectedSr: andSelector{
+ lhs: orSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: mustSPName("node_*"),
+ },
+ rhs: Not(falseSelector{}),
+ },
+ },
+ "set: only excludes": {
+ expr: Expr{
+ Deny: []string{
+ "go_memstats_frees_total",
+ "node_cooling_*",
+ },
+ },
+ expectedSr: andSelector{
+ lhs: trueSelector{},
+ rhs: Not(orSelector{
+ lhs: mustSPName("go_memstats_frees_total"),
+ rhs: mustSPName("node_cooling_*"),
+ }),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ m, err := test.expr.Parse()
+
+ if test.expectedErr {
+ assert.Error(t, err)
+ } else {
+ assert.Equal(t, test.expectedSr, m)
+ }
+ })
+ }
+}
+
+func TestExprSelector_Matches(t *testing.T) {
+ tests := map[string]struct {
+ expr Expr
+ lbs labels.Labels
+ expectedMatches bool
+ }{
+ "allow matches: single pattern": {
+ expr: Expr{
+ Allow: []string{"go_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: true,
+ },
+ "allow matches: several patterns": {
+ expr: Expr{
+ Allow: []string{"node_*", "go_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: true,
+ },
+ "allow not matches": {
+ expr: Expr{
+ Allow: []string{"node_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: false,
+ },
+ "deny matches: single pattern": {
+ expr: Expr{
+ Deny: []string{"go_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: false,
+ },
+ "deny matches: several patterns": {
+ expr: Expr{
+ Deny: []string{"node_*", "go_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: false,
+ },
+ "deny not matches": {
+ expr: Expr{
+ Deny: []string{"node_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: true,
+ },
+ "allow and deny matches: single pattern": {
+ expr: Expr{
+ Allow: []string{"go_*"},
+ Deny: []string{"go_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: false,
+ },
+ "allow and deny matches: several patterns": {
+ expr: Expr{
+ Allow: []string{"node_*", "go_*"},
+ Deny: []string{"node_*", "go_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: false,
+ },
+ "allow matches and deny not matches": {
+ expr: Expr{
+ Allow: []string{"go_*"},
+ Deny: []string{"node_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: true,
+ },
+ "allow not matches and deny matches": {
+ expr: Expr{
+ Allow: []string{"node_*"},
+ Deny: []string{"go_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: false,
+ },
+ "allow not matches and deny not matches": {
+ expr: Expr{
+ Allow: []string{"node_*"},
+ Deny: []string{"node_*"},
+ },
+ lbs: []labels.Label{{Name: labels.MetricName, Value: "go_memstats_alloc_bytes"}},
+ expectedMatches: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sr, err := test.expr.Parse()
+ require.NoError(t, err)
+
+ if test.expectedMatches {
+ assert.True(t, sr.Matches(test.lbs))
+ } else {
+ assert.False(t, sr.Matches(test.lbs))
+ }
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/logical.go b/src/go/plugin/go.d/pkg/prometheus/selector/logical.go
new file mode 100644
index 000000000..1556d1715
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/selector/logical.go
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package selector
+
+import (
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+type (
+ trueSelector struct{}
+ falseSelector struct{}
+ negSelector struct{ s Selector }
+ andSelector struct{ lhs, rhs Selector }
+ orSelector struct{ lhs, rhs Selector }
+)
+
+func (trueSelector) Matches(_ labels.Labels) bool { return true }
+func (falseSelector) Matches(_ labels.Labels) bool { return false }
+func (s negSelector) Matches(lbs labels.Labels) bool { return !s.s.Matches(lbs) }
+func (s andSelector) Matches(lbs labels.Labels) bool { return s.lhs.Matches(lbs) && s.rhs.Matches(lbs) }
+func (s orSelector) Matches(lbs labels.Labels) bool { return s.lhs.Matches(lbs) || s.rhs.Matches(lbs) }
+
+// True returns a selector which always returns true
+func True() Selector {
+ return trueSelector{}
+}
+
+// And returns a selector which returns true only if all of it's sub-selectors return true
+func And(lhs, rhs Selector, others ...Selector) Selector {
+ s := andSelector{lhs: lhs, rhs: rhs}
+ if len(others) == 0 {
+ return s
+ }
+ return And(s, others[0], others[1:]...)
+}
+
+// Or returns a selector which returns true if any of it's sub-selectors return true
+func Or(lhs, rhs Selector, others ...Selector) Selector {
+ s := orSelector{lhs: lhs, rhs: rhs}
+ if len(others) == 0 {
+ return s
+ }
+ return Or(s, others[0], others[1:]...)
+}
+
+// Not returns a selector which returns the negation of the sub-selector's result
+func Not(s Selector) Selector {
+ return negSelector{s}
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/logical_test.go b/src/go/plugin/go.d/pkg/prometheus/selector/logical_test.go
new file mode 100644
index 000000000..239c7f715
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/selector/logical_test.go
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package selector
+
+import (
+ "testing"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestTrueSelector_Matches(t *testing.T) {
+ tests := map[string]struct {
+ sr trueSelector
+ lbs labels.Labels
+ expected bool
+ }{
+ "not empty labels": {
+ lbs: labels.Labels{{Name: labels.MetricName, Value: "name"}},
+ expected: true,
+ },
+ "empty labels": {
+ expected: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if test.expected {
+ assert.True(t, test.sr.Matches(test.lbs))
+ } else {
+ assert.False(t, test.sr.Matches(test.lbs))
+ }
+ })
+ }
+}
+
+func TestFalseSelector_Matches(t *testing.T) {
+ tests := map[string]struct {
+ sr falseSelector
+ lbs labels.Labels
+ expected bool
+ }{
+ "not empty labels": {
+ lbs: labels.Labels{{Name: labels.MetricName, Value: "name"}},
+ expected: false,
+ },
+ "empty labels": {
+ expected: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if test.expected {
+ assert.True(t, test.sr.Matches(test.lbs))
+ } else {
+ assert.False(t, test.sr.Matches(test.lbs))
+ }
+ })
+ }
+}
+
+func TestNegSelector_Matches(t *testing.T) {
+ tests := map[string]struct {
+ sr negSelector
+ lbs labels.Labels
+ expected bool
+ }{
+ "true matcher": {
+ sr: negSelector{trueSelector{}},
+ lbs: labels.Labels{{Name: labels.MetricName, Value: "name"}},
+ expected: false,
+ },
+ "false matcher": {
+ sr: negSelector{falseSelector{}},
+ lbs: labels.Labels{{Name: labels.MetricName, Value: "name"}},
+ expected: true,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ if test.expected {
+ assert.True(t, test.sr.Matches(test.lbs))
+ } else {
+ assert.False(t, test.sr.Matches(test.lbs))
+ }
+ })
+ }
+}
+
+func TestAndSelector_Matches(t *testing.T) {
+ tests := map[string]struct {
+ sr andSelector
+ lbs labels.Labels
+ expected bool
+ }{
+ "true, true": {
+ sr: andSelector{lhs: trueSelector{}, rhs: trueSelector{}},
+ expected: true,
+ },
+ "true, false": {
+ sr: andSelector{lhs: trueSelector{}, rhs: falseSelector{}},
+ expected: false,
+ },
+ "false, true": {
+ sr: andSelector{lhs: trueSelector{}, rhs: falseSelector{}},
+ expected: false,
+ },
+ "false, false": {
+ sr: andSelector{lhs: falseSelector{}, rhs: falseSelector{}},
+ expected: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.sr.Matches(test.lbs))
+ })
+ }
+}
+
+func TestOrSelector_Matches(t *testing.T) {
+ tests := map[string]struct {
+ sr orSelector
+ lbs labels.Labels
+ expected bool
+ }{
+ "true, true": {
+ sr: orSelector{lhs: trueSelector{}, rhs: trueSelector{}},
+ expected: true,
+ },
+ "true, false": {
+ sr: orSelector{lhs: trueSelector{}, rhs: falseSelector{}},
+ expected: true,
+ },
+ "false, true": {
+ sr: orSelector{lhs: trueSelector{}, rhs: falseSelector{}},
+ expected: true,
+ },
+ "false, false": {
+ sr: orSelector{lhs: falseSelector{}, rhs: falseSelector{}},
+ expected: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ assert.Equal(t, test.expected, test.sr.Matches(test.lbs))
+ })
+ }
+}
+
+func Test_And(t *testing.T) {
+ tests := map[string]struct {
+ srs []Selector
+ expected Selector
+ }{
+ "2 selectors": {
+ srs: []Selector{trueSelector{}, trueSelector{}},
+ expected: andSelector{
+ lhs: trueSelector{},
+ rhs: trueSelector{},
+ },
+ },
+ "4 selectors": {
+ srs: []Selector{trueSelector{}, trueSelector{}, trueSelector{}, trueSelector{}},
+ expected: andSelector{
+ lhs: andSelector{
+ lhs: andSelector{
+ lhs: trueSelector{},
+ rhs: trueSelector{},
+ },
+ rhs: trueSelector{},
+ },
+ rhs: trueSelector{}},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ require.GreaterOrEqual(t, len(test.srs), 2)
+
+ s := And(test.srs[0], test.srs[1], test.srs[2:]...)
+ assert.Equal(t, test.expected, s)
+ })
+ }
+}
+
+func Test_Or(t *testing.T) {
+ tests := map[string]struct {
+ srs []Selector
+ expected Selector
+ }{
+ "2 selectors": {
+ srs: []Selector{trueSelector{}, trueSelector{}},
+ expected: orSelector{
+ lhs: trueSelector{},
+ rhs: trueSelector{},
+ },
+ },
+ "4 selectors": {
+ srs: []Selector{trueSelector{}, trueSelector{}, trueSelector{}, trueSelector{}},
+ expected: orSelector{
+ lhs: orSelector{
+ lhs: orSelector{
+ lhs: trueSelector{},
+ rhs: trueSelector{},
+ },
+ rhs: trueSelector{},
+ },
+ rhs: trueSelector{}},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ require.GreaterOrEqual(t, len(test.srs), 2)
+
+ s := Or(test.srs[0], test.srs[1], test.srs[2:]...)
+ assert.Equal(t, test.expected, s)
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/parse.go b/src/go/plugin/go.d/pkg/prometheus/selector/parse.go
new file mode 100644
index 000000000..81e970c48
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/selector/parse.go
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package selector
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+)
+
+var (
+ reLV = regexp.MustCompile(`^(?P<label_name>[a-zA-Z0-9_]+)(?P<op>=~|!~|=\*|!\*|=|!=)"(?P<pattern>.+)"$`)
+)
+
+func Parse(expr string) (Selector, error) {
+ var srs []Selector
+ lvs := strings.Split(unsugarExpr(expr), ",")
+
+ for _, lv := range lvs {
+ sr, err := parseSelector(lv)
+ if err != nil {
+ return nil, err
+ }
+ srs = append(srs, sr)
+ }
+
+ switch len(srs) {
+ case 0:
+ return nil, nil
+ case 1:
+ return srs[0], nil
+ default:
+ return And(srs[0], srs[1], srs[2:]...), nil
+ }
+}
+
+func parseSelector(line string) (Selector, error) {
+ sub := reLV.FindStringSubmatch(strings.TrimSpace(line))
+ if sub == nil {
+ return nil, fmt.Errorf("invalid selector syntax: '%s'", line)
+ }
+
+ name, op, pattern := sub[1], sub[2], strings.Trim(sub[3], "\"")
+
+ var m matcher.Matcher
+ var err error
+
+ switch op {
+ case OpEqual, OpNegEqual:
+ m, err = matcher.NewStringMatcher(pattern, true, true)
+ case OpRegexp, OpNegRegexp:
+ m, err = matcher.NewRegExpMatcher(pattern)
+ case OpSimplePatterns, OpNegSimplePatterns:
+ m, err = matcher.NewSimplePatternsMatcher(pattern)
+ default:
+ err = fmt.Errorf("unknown matching operator: %s", op)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ sr := labelSelector{
+ name: name,
+ m: m,
+ }
+
+ if neg := strings.HasPrefix(op, "!"); neg {
+ return Not(sr), nil
+ }
+ return sr, nil
+}
+
+func unsugarExpr(expr string) string {
+ // name => __name__=*"name"
+ // name{label="value"} => __name__=*"name",label="value"
+ // {label="value"} => label="value"
+ expr = strings.TrimSpace(expr)
+
+ switch idx := strings.IndexByte(expr, '{'); true {
+ case idx == -1:
+ expr = fmt.Sprintf(`__name__%s"%s"`,
+ OpSimplePatterns,
+ strings.TrimSpace(expr),
+ )
+ case idx == 0:
+ expr = strings.Trim(expr, "{}")
+ default:
+ expr = fmt.Sprintf(`__name__%s"%s",%s`,
+ OpSimplePatterns,
+ strings.TrimSpace(expr[:idx]),
+ strings.Trim(expr[idx:], "{}"),
+ )
+ }
+ return expr
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/parse_test.go b/src/go/plugin/go.d/pkg/prometheus/selector/parse_test.go
new file mode 100644
index 000000000..1a1f8ab79
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/selector/parse_test.go
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package selector
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+ "github.com/prometheus/prometheus/model/labels"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestParse(t *testing.T) {
+ tests := map[string]struct {
+ input string
+ expectedSr Selector
+ expectedErr bool
+ }{
+ "sp op: only metric name": {
+ input: "go_memstats_alloc_bytes !go_memstats_* *",
+ expectedSr: mustSPName("go_memstats_alloc_bytes !go_memstats_* *"),
+ },
+ "string op: metric name with labels": {
+ input: fmt.Sprintf(`go_memstats_*{label%s"value"}`, OpEqual),
+ expectedSr: andSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: mustString("label", "value"),
+ },
+ },
+ "neg string op: metric name with labels": {
+ input: fmt.Sprintf(`go_memstats_*{label%s"value"}`, OpNegEqual),
+ expectedSr: andSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: Not(mustString("label", "value")),
+ },
+ },
+ "regexp op: metric name with labels": {
+ input: fmt.Sprintf(`go_memstats_*{label%s"valu.+"}`, OpRegexp),
+ expectedSr: andSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: mustRegexp("label", "valu.+"),
+ },
+ },
+ "neg regexp op: metric name with labels": {
+ input: fmt.Sprintf(`go_memstats_*{label%s"valu.+"}`, OpNegRegexp),
+ expectedSr: andSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: Not(mustRegexp("label", "valu.+")),
+ },
+ },
+ "sp op: metric name with labels": {
+ input: fmt.Sprintf(`go_memstats_*{label%s"valu*"}`, OpSimplePatterns),
+ expectedSr: andSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: mustSP("label", "valu*"),
+ },
+ },
+ "neg sp op: metric name with labels": {
+ input: fmt.Sprintf(`go_memstats_*{label%s"valu*"}`, OpNegSimplePatterns),
+ expectedSr: andSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: Not(mustSP("label", "valu*")),
+ },
+ },
+ "metric name with several labels": {
+ input: fmt.Sprintf(`go_memstats_*{label1%s"value1",label2%s"value2"}`, OpEqual, OpEqual),
+ expectedSr: andSelector{
+ lhs: andSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: mustString("label1", "value1"),
+ },
+ rhs: mustString("label2", "value2"),
+ },
+ },
+ "only labels (unsugar)": {
+ input: fmt.Sprintf(`{__name__%s"go_memstats_*",label1%s"value1",label2%s"value2"}`,
+ OpSimplePatterns, OpEqual, OpEqual),
+ expectedSr: andSelector{
+ lhs: andSelector{
+ lhs: mustSPName("go_memstats_*"),
+ rhs: mustString("label1", "value1"),
+ },
+ rhs: mustString("label2", "value2"),
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sr, err := Parse(test.input)
+
+ if test.expectedErr {
+ assert.Error(t, err)
+ } else {
+ assert.Equal(t, test.expectedSr, sr)
+ }
+ })
+ }
+}
+
+func mustSPName(pattern string) Selector {
+ return mustSP(labels.MetricName, pattern)
+}
+
+func mustString(name string, pattern string) Selector {
+ return labelSelector{name: name, m: matcher.Must(matcher.NewStringMatcher(pattern, true, true))}
+}
+
+func mustRegexp(name string, pattern string) Selector {
+ return labelSelector{name: name, m: matcher.Must(matcher.NewRegExpMatcher(pattern))}
+}
+
+func mustSP(name string, pattern string) Selector {
+ return labelSelector{name: name, m: matcher.Must(matcher.NewSimplePatternsMatcher(pattern))}
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/selector.go b/src/go/plugin/go.d/pkg/prometheus/selector/selector.go
new file mode 100644
index 000000000..a42b846f2
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/selector/selector.go
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package selector
+
+import (
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/matcher"
+
+ "github.com/prometheus/prometheus/model/labels"
+)
+
+type Selector interface {
+ Matches(lbs labels.Labels) bool
+}
+
+const (
+ OpEqual = "="
+ OpNegEqual = "!="
+ OpRegexp = "=~"
+ OpNegRegexp = "!~"
+ OpSimplePatterns = "=*"
+ OpNegSimplePatterns = "!*"
+)
+
+type labelSelector struct {
+ name string
+ m matcher.Matcher
+}
+
+func (s labelSelector) Matches(lbs labels.Labels) bool {
+ if s.name == labels.MetricName {
+ return s.m.MatchString(lbs[0].Value)
+ }
+ if label, ok := lookupLabel(s.name, lbs[1:]); ok {
+ return s.m.MatchString(label.Value)
+ }
+ return false
+}
+
+type Func func(lbs labels.Labels) bool
+
+func (fn Func) Matches(lbs labels.Labels) bool {
+ return fn(lbs)
+}
+
+func lookupLabel(name string, lbs labels.Labels) (labels.Label, bool) {
+ for _, label := range lbs {
+ if label.Name == name {
+ return label, true
+ }
+ }
+ return labels.Label{}, false
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/selector/selector_test.go b/src/go/plugin/go.d/pkg/prometheus/selector/selector_test.go
new file mode 100644
index 000000000..aa3110b03
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/selector/selector_test.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package selector
+
+import (
+ "testing"
+)
+
+func TestLabelMatcher_Matches(t *testing.T) {
+
+}
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/counter-meta.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/counter-meta.txt
new file mode 100644
index 000000000..53eccda63
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/counter-meta.txt
@@ -0,0 +1,11 @@
+# HELP test_counter_metric_1_total Test Counter Metric 1
+# TYPE test_counter_metric_1_total counter
+test_counter_metric_1_total{label1="value1"} 11
+test_counter_metric_1_total{label1="value2"} 12
+test_counter_metric_1_total{label1="value3"} 13
+test_counter_metric_1_total{label1="value4"} 14
+# TYPE test_counter_metric_2_total counter
+test_counter_metric_2_total{label1="value1"} 11
+test_counter_metric_2_total{label1="value2"} 12
+test_counter_metric_2_total{label1="value3"} 13
+test_counter_metric_2_total{label1="value4"} 14
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/counter-no-meta.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/counter-no-meta.txt
new file mode 100644
index 000000000..afb11b9b8
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/counter-no-meta.txt
@@ -0,0 +1,8 @@
+test_counter_no_meta_metric_1_total{label1="value1"} 11
+test_counter_no_meta_metric_1_total{label1="value2"} 12
+test_counter_no_meta_metric_1_total{label1="value3"} 13
+test_counter_no_meta_metric_1_total{label1="value4"} 14
+test_counter_no_meta_metric_2_total{label1="value1"} 11
+test_counter_no_meta_metric_2_total{label1="value2"} 12
+test_counter_no_meta_metric_2_total{label1="value3"} 13
+test_counter_no_meta_metric_2_total{label1="value4"} 14
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/gauge-meta.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/gauge-meta.txt
new file mode 100644
index 000000000..c0773a426
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/gauge-meta.txt
@@ -0,0 +1,11 @@
+# HELP test_gauge_metric_1 Test Gauge Metric 1
+# TYPE test_gauge_metric_1 gauge
+test_gauge_metric_1{label1="value1"} 11
+test_gauge_metric_1{label1="value2"} 12
+test_gauge_metric_1{label1="value3"} 13
+test_gauge_metric_1{label1="value4"} 14
+# TYPE test_gauge_metric_2 gauge
+test_gauge_metric_2{label1="value1"} 11
+test_gauge_metric_2{label1="value2"} 12
+test_gauge_metric_2{label1="value3"} 13
+test_gauge_metric_2{label1="value4"} 14
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/gauge-no-meta.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/gauge-no-meta.txt
new file mode 100644
index 000000000..e89e0e4d9
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/gauge-no-meta.txt
@@ -0,0 +1,8 @@
+test_gauge_no_meta_metric_1{label1="value1"} 11
+test_gauge_no_meta_metric_1{label1="value2"} 12
+test_gauge_no_meta_metric_1{label1="value3"} 13
+test_gauge_no_meta_metric_1{label1="value4"} 14
+test_gauge_no_meta_metric_2{label1="value1"} 11
+test_gauge_no_meta_metric_2{label1="value2"} 12
+test_gauge_no_meta_metric_2{label1="value3"} 13
+test_gauge_no_meta_metric_2{label1="value4"} 14
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/histogram-meta.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/histogram-meta.txt
new file mode 100644
index 000000000..9b4b8a965
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/histogram-meta.txt
@@ -0,0 +1,43 @@
+# HELP test_histogram_1_duration_seconds Test Histogram Metric 1
+# TYPE test_histogram_1_duration_seconds histogram
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.1"} 4
+test_histogram_1_duration_seconds_bucket{label1="value1",le="0.5"} 5
+test_histogram_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6
+test_histogram_1_duration_seconds_sum{label1="value1"} 0.00147889
+test_histogram_1_duration_seconds_count{label1="value1"} 6
+test_histogram_1_duration_seconds_bucket{label1="value2",le="0.1"} 4
+test_histogram_1_duration_seconds_bucket{label1="value2",le="0.5"} 5
+test_histogram_1_duration_seconds_bucket{label1="value2",le="+Inf"} 6
+test_histogram_1_duration_seconds_sum{label1="value2"} 0.00147889
+test_histogram_1_duration_seconds_count{label1="value2"} 6
+test_histogram_1_duration_seconds_bucket{label1="value3",le="0.1"} 4
+test_histogram_1_duration_seconds_bucket{label1="value3",le="0.5"} 5
+test_histogram_1_duration_seconds_bucket{label1="value3",le="+Inf"} 6
+test_histogram_1_duration_seconds_sum{label1="value3"} 0.00147889
+test_histogram_1_duration_seconds_count{label1="value3"} 6
+test_histogram_1_duration_seconds_bucket{label1="value4",le="0.1"} 4
+test_histogram_1_duration_seconds_bucket{label1="value4",le="0.5"} 5
+test_histogram_1_duration_seconds_bucket{label1="value4",le="+Inf"} 6
+test_histogram_1_duration_seconds_sum{label1="value4"} 0.00147889
+test_histogram_1_duration_seconds_count{label1="value4"} 6
+# TYPE test_histogram_2_duration_seconds histogram
+test_histogram_2_duration_seconds_bucket{label1="value1",le="0.1"} 7
+test_histogram_2_duration_seconds_bucket{label1="value1",le="0.5"} 8
+test_histogram_2_duration_seconds_bucket{label1="value1",le="+Inf"} 9
+test_histogram_2_duration_seconds_sum{label1="value1"} 0.00247889
+test_histogram_2_duration_seconds_count{label1="value1"} 9
+test_histogram_2_duration_seconds_bucket{label1="value2",le="0.1"} 7
+test_histogram_2_duration_seconds_bucket{label1="value2",le="0.5"} 8
+test_histogram_2_duration_seconds_bucket{label1="value2",le="+Inf"} 9
+test_histogram_2_duration_seconds_sum{label1="value2"} 0.00247889
+test_histogram_2_duration_seconds_count{label1="value2"} 9
+test_histogram_2_duration_seconds_bucket{label1="value3",le="0.1"} 7
+test_histogram_2_duration_seconds_bucket{label1="value3",le="0.5"} 8
+test_histogram_2_duration_seconds_bucket{label1="value3",le="+Inf"} 9
+test_histogram_2_duration_seconds_sum{label1="value3"} 0.00247889
+test_histogram_2_duration_seconds_count{label1="value3"} 9
+test_histogram_2_duration_seconds_bucket{label1="value4",le="0.1"} 7
+test_histogram_2_duration_seconds_bucket{label1="value4",le="0.5"} 8
+test_histogram_2_duration_seconds_bucket{label1="value4",le="+Inf"} 9
+test_histogram_2_duration_seconds_sum{label1="value4"} 0.00247889
+test_histogram_2_duration_seconds_count{label1="value4"} 9
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/histogram-no-meta.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/histogram-no-meta.txt
new file mode 100644
index 000000000..49def677c
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/histogram-no-meta.txt
@@ -0,0 +1,40 @@
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.1"} 4
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="0.5"} 5
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value1",le="+Inf"} 6
+test_histogram_no_meta_1_duration_seconds_sum{label1="value1"} 0.00147889
+test_histogram_no_meta_1_duration_seconds_count{label1="value1"} 6
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value2",le="0.1"} 4
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value2",le="0.5"} 5
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value2",le="+Inf"} 6
+test_histogram_no_meta_1_duration_seconds_sum{label1="value2"} 0.00147889
+test_histogram_no_meta_1_duration_seconds_count{label1="value2"} 6
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value3",le="0.1"} 4
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value3",le="0.5"} 5
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value3",le="+Inf"} 6
+test_histogram_no_meta_1_duration_seconds_sum{label1="value3"} 0.00147889
+test_histogram_no_meta_1_duration_seconds_count{label1="value3"} 6
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value4",le="0.1"} 4
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value4",le="0.5"} 5
+test_histogram_no_meta_1_duration_seconds_bucket{label1="value4",le="+Inf"} 6
+test_histogram_no_meta_1_duration_seconds_sum{label1="value4"} 0.00147889
+test_histogram_no_meta_1_duration_seconds_count{label1="value4"} 6
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value1",le="0.1"} 7
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value1",le="0.5"} 8
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value1",le="+Inf"} 9
+test_histogram_no_meta_2_duration_seconds_sum{label1="value1"} 0.00247889
+test_histogram_no_meta_2_duration_seconds_count{label1="value1"} 9
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value2",le="0.1"} 7
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value2",le="0.5"} 8
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value2",le="+Inf"} 9
+test_histogram_no_meta_2_duration_seconds_sum{label1="value2"} 0.00247889
+test_histogram_no_meta_2_duration_seconds_count{label1="value2"} 9
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value3",le="0.1"} 7
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value3",le="0.5"} 8
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value3",le="+Inf"} 9
+test_histogram_no_meta_2_duration_seconds_sum{label1="value3"} 0.00247889
+test_histogram_no_meta_2_duration_seconds_count{label1="value3"} 9
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value4",le="0.1"} 7
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value4",le="0.5"} 8
+test_histogram_no_meta_2_duration_seconds_bucket{label1="value4",le="+Inf"} 9
+test_histogram_no_meta_2_duration_seconds_sum{label1="value4"} 0.00247889
+test_histogram_no_meta_2_duration_seconds_count{label1="value4"} 9
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/multiline-help.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/multiline-help.txt
new file mode 100644
index 000000000..f1598fcce
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/multiline-help.txt
@@ -0,0 +1,3 @@
+# HELP test_gauge_metric_1 \n First line.\n Second line.\n
+# TYPE test_gauge_metric_1 gauge
+test_gauge_metric_1{label1="value1"} 11
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/summary-meta.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/summary-meta.txt
new file mode 100644
index 000000000..3056e8076
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/summary-meta.txt
@@ -0,0 +1,43 @@
+# HELP test_summary_1_duration_microseconds Test Summary Metric 1
+# TYPE test_summary_1_duration_microseconds summary
+test_summary_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921
+test_summary_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921
+test_summary_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_1_duration_microseconds_count{label1="value1"} 31
+test_summary_1_duration_microseconds{label1="value2",quantile="0.5"} 4931.921
+test_summary_1_duration_microseconds{label1="value2",quantile="0.9"} 4932.921
+test_summary_1_duration_microseconds{label1="value2",quantile="0.99"} 4933.921
+test_summary_1_duration_microseconds_sum{label1="value2"} 283201.29
+test_summary_1_duration_microseconds_count{label1="value2"} 31
+test_summary_1_duration_microseconds{label1="value3",quantile="0.5"} 4931.921
+test_summary_1_duration_microseconds{label1="value3",quantile="0.9"} 4932.921
+test_summary_1_duration_microseconds{label1="value3",quantile="0.99"} 4933.921
+test_summary_1_duration_microseconds_sum{label1="value3"} 283201.29
+test_summary_1_duration_microseconds_count{label1="value3"} 31
+test_summary_1_duration_microseconds{label1="value4",quantile="0.5"} 4931.921
+test_summary_1_duration_microseconds{label1="value4",quantile="0.9"} 4932.921
+test_summary_1_duration_microseconds{label1="value4",quantile="0.99"} 4933.921
+test_summary_1_duration_microseconds_sum{label1="value4"} 283201.29
+test_summary_1_duration_microseconds_count{label1="value4"} 31
+# TYPE test_summary_2_duration_microseconds summary
+test_summary_2_duration_microseconds{label1="value1",quantile="0.5"} 5931.921
+test_summary_2_duration_microseconds{label1="value1",quantile="0.9"} 5932.921
+test_summary_2_duration_microseconds{label1="value1",quantile="0.99"} 5933.921
+test_summary_2_duration_microseconds_sum{label1="value1"} 383201.29
+test_summary_2_duration_microseconds_count{label1="value1"} 41
+test_summary_2_duration_microseconds{label1="value2",quantile="0.5"} 5931.921
+test_summary_2_duration_microseconds{label1="value2",quantile="0.9"} 5932.921
+test_summary_2_duration_microseconds{label1="value2",quantile="0.99"} 5933.921
+test_summary_2_duration_microseconds_sum{label1="value2"} 383201.29
+test_summary_2_duration_microseconds_count{label1="value2"} 41
+test_summary_2_duration_microseconds{label1="value3",quantile="0.5"} 5931.921
+test_summary_2_duration_microseconds{label1="value3",quantile="0.9"} 5932.921
+test_summary_2_duration_microseconds{label1="value3",quantile="0.99"} 5933.921
+test_summary_2_duration_microseconds_sum{label1="value3"} 383201.29
+test_summary_2_duration_microseconds_count{label1="value3"} 41
+test_summary_2_duration_microseconds{label1="value4",quantile="0.5"} 5931.921
+test_summary_2_duration_microseconds{label1="value4",quantile="0.9"} 5932.921
+test_summary_2_duration_microseconds{label1="value4",quantile="0.99"} 5933.921
+test_summary_2_duration_microseconds_sum{label1="value4"} 383201.29
+test_summary_2_duration_microseconds_count{label1="value4"} 41
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/summary-no-meta.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/summary-no-meta.txt
new file mode 100644
index 000000000..e66564bb7
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/summary-no-meta.txt
@@ -0,0 +1,40 @@
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.5"} 4931.921
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.9"} 4932.921
+test_summary_no_meta_1_duration_microseconds{label1="value1",quantile="0.99"} 4933.921
+test_summary_no_meta_1_duration_microseconds_sum{label1="value1"} 283201.29
+test_summary_no_meta_1_duration_microseconds_count{label1="value1"} 31
+test_summary_no_meta_1_duration_microseconds{label1="value2",quantile="0.5"} 4931.921
+test_summary_no_meta_1_duration_microseconds{label1="value2",quantile="0.9"} 4932.921
+test_summary_no_meta_1_duration_microseconds{label1="value2",quantile="0.99"} 4933.921
+test_summary_no_meta_1_duration_microseconds_sum{label1="value2"} 283201.29
+test_summary_no_meta_1_duration_microseconds_count{label1="value2"} 31
+test_summary_no_meta_1_duration_microseconds{label1="value3",quantile="0.5"} 4931.921
+test_summary_no_meta_1_duration_microseconds{label1="value3",quantile="0.9"} 4932.921
+test_summary_no_meta_1_duration_microseconds{label1="value3",quantile="0.99"} 4933.921
+test_summary_no_meta_1_duration_microseconds_sum{label1="value3"} 283201.29
+test_summary_no_meta_1_duration_microseconds_count{label1="value3"} 31
+test_summary_no_meta_1_duration_microseconds{label1="value4",quantile="0.5"} 4931.921
+test_summary_no_meta_1_duration_microseconds{label1="value4",quantile="0.9"} 4932.921
+test_summary_no_meta_1_duration_microseconds{label1="value4",quantile="0.99"} 4933.921
+test_summary_no_meta_1_duration_microseconds_sum{label1="value4"} 283201.29
+test_summary_no_meta_1_duration_microseconds_count{label1="value4"} 31
+test_summary_no_meta_2_duration_microseconds{label1="value1",quantile="0.5"} 5931.921
+test_summary_no_meta_2_duration_microseconds{label1="value1",quantile="0.9"} 5932.921
+test_summary_no_meta_2_duration_microseconds{label1="value1",quantile="0.99"} 5933.921
+test_summary_no_meta_2_duration_microseconds_sum{label1="value1"} 383201.29
+test_summary_no_meta_2_duration_microseconds_count{label1="value1"} 41
+test_summary_no_meta_2_duration_microseconds{label1="value2",quantile="0.5"} 5931.921
+test_summary_no_meta_2_duration_microseconds{label1="value2",quantile="0.9"} 5932.921
+test_summary_no_meta_2_duration_microseconds{label1="value2",quantile="0.99"} 5933.921
+test_summary_no_meta_2_duration_microseconds_sum{label1="value2"} 383201.29
+test_summary_no_meta_2_duration_microseconds_count{label1="value2"} 41
+test_summary_no_meta_2_duration_microseconds{label1="value3",quantile="0.5"} 5931.921
+test_summary_no_meta_2_duration_microseconds{label1="value3",quantile="0.9"} 5932.921
+test_summary_no_meta_2_duration_microseconds{label1="value3",quantile="0.99"} 5933.921
+test_summary_no_meta_2_duration_microseconds_sum{label1="value3"} 383201.29
+test_summary_no_meta_2_duration_microseconds_count{label1="value3"} 41
+test_summary_no_meta_2_duration_microseconds{label1="value4",quantile="0.5"} 5931.921
+test_summary_no_meta_2_duration_microseconds{label1="value4",quantile="0.9"} 5932.921
+test_summary_no_meta_2_duration_microseconds{label1="value4",quantile="0.99"} 5933.921
+test_summary_no_meta_2_duration_microseconds_sum{label1="value4"} 383201.29
+test_summary_no_meta_2_duration_microseconds_count{label1="value4"} 41
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/testdata.nometa.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/testdata.nometa.txt
new file mode 100644
index 000000000..e760ad268
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/testdata.nometa.txt
@@ -0,0 +1,410 @@
+go_gc_duration_seconds{quantile="0"} 4.9351e-05
+go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
+go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
+go_gc_duration_seconds{quantile="0.75"} 0.000106744
+go_gc_duration_seconds{quantile="1"} 0.002072195
+go_gc_duration_seconds_sum 0.012139815
+go_gc_duration_seconds_count 99
+go_goroutines 33
+go_memstats_alloc_bytes 1.7518624e+07
+go_memstats_alloc_bytes_total 8.3062296e+08
+go_memstats_buck_hash_sys_bytes 1.494637e+06
+go_memstats_frees_total 4.65658e+06
+go_memstats_gc_sys_bytes 1.107968e+06
+go_memstats_heap_alloc_bytes 1.7518624e+07
+go_memstats_heap_idle_bytes 6.668288e+06
+go_memstats_heap_inuse_bytes 1.8956288e+07
+go_memstats_heap_objects 72755
+go_memstats_heap_released_bytes_total 0
+go_memstats_heap_sys_bytes 2.5624576e+07
+go_memstats_last_gc_time_seconds 1.4843955586166437e+09
+go_memstats_lookups_total 2089
+go_memstats_mallocs_total 4.729335e+06
+go_memstats_mcache_inuse_bytes 9600
+go_memstats_mcache_sys_bytes 16384
+go_memstats_mspan_inuse_bytes 211520
+go_memstats_mspan_sys_bytes 245760
+go_memstats_next_gc_bytes 2.033527e+07
+go_memstats_other_sys_bytes 2.077323e+06
+go_memstats_stack_inuse_bytes 1.6384e+06
+go_memstats_stack_sys_bytes 1.6384e+06
+go_memstats_sys_bytes 3.2205048e+07
+http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="alerts"} 0
+http_request_duration_microseconds_count{handler="alerts"} 0
+http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="config"} 0
+http_request_duration_microseconds_count{handler="config"} 0
+http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="consoles"} 0
+http_request_duration_microseconds_count{handler="consoles"} 0
+http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="drop_series"} 0
+http_request_duration_microseconds_count{handler="drop_series"} 0
+http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="federate"} 0
+http_request_duration_microseconds_count{handler="federate"} 0
+http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="flags"} 0
+http_request_duration_microseconds_count{handler="flags"} 0
+http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
+http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
+http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
+http_request_duration_microseconds_sum{handler="graph"} 5803.93
+http_request_duration_microseconds_count{handler="graph"} 3
+http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="heap"} 0
+http_request_duration_microseconds_count{handler="heap"} 0
+http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
+http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
+http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
+http_request_duration_microseconds_sum{handler="label_values"} 3995.574
+http_request_duration_microseconds_count{handler="label_values"} 3
+http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="options"} 0
+http_request_duration_microseconds_count{handler="options"} 0
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
+http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
+http_request_duration_microseconds_count{handler="prometheus"} 462
+http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
+http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
+http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
+http_request_duration_microseconds_sum{handler="query"} 26074.11
+http_request_duration_microseconds_count{handler="query"} 6
+http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="query_range"} 0
+http_request_duration_microseconds_count{handler="query_range"} 0
+http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="rules"} 0
+http_request_duration_microseconds_count{handler="rules"} 0
+http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="series"} 0
+http_request_duration_microseconds_count{handler="series"} 0
+http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
+http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
+http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
+http_request_duration_microseconds_sum{handler="static"} 6458.621
+http_request_duration_microseconds_count{handler="static"} 3
+http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="status"} 0
+http_request_duration_microseconds_count{handler="status"} 0
+http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="targets"} 0
+http_request_duration_microseconds_count{handler="targets"} 0
+http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="version"} 0
+http_request_duration_microseconds_count{handler="version"} 0
+http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
+http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
+http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="alerts"} 0
+http_request_size_bytes_count{handler="alerts"} 0
+http_request_size_bytes{handler="config",quantile="0.5"} NaN
+http_request_size_bytes{handler="config",quantile="0.9"} NaN
+http_request_size_bytes{handler="config",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="config"} 0
+http_request_size_bytes_count{handler="config"} 0
+http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
+http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
+http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="consoles"} 0
+http_request_size_bytes_count{handler="consoles"} 0
+http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
+http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
+http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="drop_series"} 0
+http_request_size_bytes_count{handler="drop_series"} 0
+http_request_size_bytes{handler="federate",quantile="0.5"} NaN
+http_request_size_bytes{handler="federate",quantile="0.9"} NaN
+http_request_size_bytes{handler="federate",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="federate"} 0
+http_request_size_bytes_count{handler="federate"} 0
+http_request_size_bytes{handler="flags",quantile="0.5"} NaN
+http_request_size_bytes{handler="flags",quantile="0.9"} NaN
+http_request_size_bytes{handler="flags",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="flags"} 0
+http_request_size_bytes_count{handler="flags"} 0
+http_request_size_bytes{handler="graph",quantile="0.5"} 367
+http_request_size_bytes{handler="graph",quantile="0.9"} 389
+http_request_size_bytes{handler="graph",quantile="0.99"} 389
+http_request_size_bytes_sum{handler="graph"} 1145
+http_request_size_bytes_count{handler="graph"} 3
+http_request_size_bytes{handler="heap",quantile="0.5"} NaN
+http_request_size_bytes{handler="heap",quantile="0.9"} NaN
+http_request_size_bytes{handler="heap",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="heap"} 0
+http_request_size_bytes_count{handler="heap"} 0
+http_request_size_bytes{handler="label_values",quantile="0.5"} 416
+http_request_size_bytes{handler="label_values",quantile="0.9"} 416
+http_request_size_bytes{handler="label_values",quantile="0.99"} 416
+http_request_size_bytes_sum{handler="label_values"} 1248
+http_request_size_bytes_count{handler="label_values"} 3
+http_request_size_bytes{handler="options",quantile="0.5"} NaN
+http_request_size_bytes{handler="options",quantile="0.9"} NaN
+http_request_size_bytes{handler="options",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="options"} 0
+http_request_size_bytes_count{handler="options"} 0
+http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
+http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
+http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
+http_request_size_bytes_sum{handler="prometheus"} 109956
+http_request_size_bytes_count{handler="prometheus"} 462
+http_request_size_bytes{handler="query",quantile="0.5"} 531
+http_request_size_bytes{handler="query",quantile="0.9"} 531
+http_request_size_bytes{handler="query",quantile="0.99"} 531
+http_request_size_bytes_sum{handler="query"} 3186
+http_request_size_bytes_count{handler="query"} 6
+http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
+http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
+http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="query_range"} 0
+http_request_size_bytes_count{handler="query_range"} 0
+http_request_size_bytes{handler="rules",quantile="0.5"} NaN
+http_request_size_bytes{handler="rules",quantile="0.9"} NaN
+http_request_size_bytes{handler="rules",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="rules"} 0
+http_request_size_bytes_count{handler="rules"} 0
+http_request_size_bytes{handler="series",quantile="0.5"} NaN
+http_request_size_bytes{handler="series",quantile="0.9"} NaN
+http_request_size_bytes{handler="series",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="series"} 0
+http_request_size_bytes_count{handler="series"} 0
+http_request_size_bytes{handler="static",quantile="0.5"} 379
+http_request_size_bytes{handler="static",quantile="0.9"} 379
+http_request_size_bytes{handler="static",quantile="0.99"} 379
+http_request_size_bytes_sum{handler="static"} 1137
+http_request_size_bytes_count{handler="static"} 3
+http_request_size_bytes{handler="status",quantile="0.5"} NaN
+http_request_size_bytes{handler="status",quantile="0.9"} NaN
+http_request_size_bytes{handler="status",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="status"} 0
+http_request_size_bytes_count{handler="status"} 0
+http_request_size_bytes{handler="targets",quantile="0.5"} NaN
+http_request_size_bytes{handler="targets",quantile="0.9"} NaN
+http_request_size_bytes{handler="targets",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="targets"} 0
+http_request_size_bytes_count{handler="targets"} 0
+http_request_size_bytes{handler="version",quantile="0.5"} NaN
+http_request_size_bytes{handler="version",quantile="0.9"} NaN
+http_request_size_bytes{handler="version",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="version"} 0
+http_request_size_bytes_count{handler="version"} 0
+http_requests_total{code="200",handler="graph",method="get"} 3
+http_requests_total{code="200",handler="label_values",method="get"} 3
+http_requests_total{code="200",handler="prometheus",method="get"} 462
+http_requests_total{code="200",handler="query",method="get"} 6
+http_requests_total{code="200",handler="static",method="get"} 3
+http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
+http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
+http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="alerts"} 0
+http_response_size_bytes_count{handler="alerts"} 0
+http_response_size_bytes{handler="config",quantile="0.5"} NaN
+http_response_size_bytes{handler="config",quantile="0.9"} NaN
+http_response_size_bytes{handler="config",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="config"} 0
+http_response_size_bytes_count{handler="config"} 0
+http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
+http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
+http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="consoles"} 0
+http_response_size_bytes_count{handler="consoles"} 0
+http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
+http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
+http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="drop_series"} 0
+http_response_size_bytes_count{handler="drop_series"} 0
+http_response_size_bytes{handler="federate",quantile="0.5"} NaN
+http_response_size_bytes{handler="federate",quantile="0.9"} NaN
+http_response_size_bytes{handler="federate",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="federate"} 0
+http_response_size_bytes_count{handler="federate"} 0
+http_response_size_bytes{handler="flags",quantile="0.5"} NaN
+http_response_size_bytes{handler="flags",quantile="0.9"} NaN
+http_response_size_bytes{handler="flags",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="flags"} 0
+http_response_size_bytes_count{handler="flags"} 0
+http_response_size_bytes{handler="graph",quantile="0.5"} 3619
+http_response_size_bytes{handler="graph",quantile="0.9"} 3619
+http_response_size_bytes{handler="graph",quantile="0.99"} 3619
+http_response_size_bytes_sum{handler="graph"} 10857
+http_response_size_bytes_count{handler="graph"} 3
+http_response_size_bytes{handler="heap",quantile="0.5"} NaN
+http_response_size_bytes{handler="heap",quantile="0.9"} NaN
+http_response_size_bytes{handler="heap",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="heap"} 0
+http_response_size_bytes_count{handler="heap"} 0
+http_response_size_bytes{handler="label_values",quantile="0.5"} 642
+http_response_size_bytes{handler="label_values",quantile="0.9"} 642
+http_response_size_bytes{handler="label_values",quantile="0.99"} 642
+http_response_size_bytes_sum{handler="label_values"} 1926
+http_response_size_bytes_count{handler="label_values"} 3
+http_response_size_bytes{handler="options",quantile="0.5"} NaN
+http_response_size_bytes{handler="options",quantile="0.9"} NaN
+http_response_size_bytes{handler="options",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="options"} 0
+http_response_size_bytes_count{handler="options"} 0
+http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
+http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
+http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
+http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
+http_response_size_bytes_count{handler="prometheus"} 462
+http_response_size_bytes{handler="query",quantile="0.5"} 776
+http_response_size_bytes{handler="query",quantile="0.9"} 781
+http_response_size_bytes{handler="query",quantile="0.99"} 781
+http_response_size_bytes_sum{handler="query"} 4656
+http_response_size_bytes_count{handler="query"} 6
+http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
+http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
+http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="query_range"} 0
+http_response_size_bytes_count{handler="query_range"} 0
+http_response_size_bytes{handler="rules",quantile="0.5"} NaN
+http_response_size_bytes{handler="rules",quantile="0.9"} NaN
+http_response_size_bytes{handler="rules",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="rules"} 0
+http_response_size_bytes_count{handler="rules"} 0
+http_response_size_bytes{handler="series",quantile="0.5"} NaN
+http_response_size_bytes{handler="series",quantile="0.9"} NaN
+http_response_size_bytes{handler="series",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="series"} 0
+http_response_size_bytes_count{handler="series"} 0
+http_response_size_bytes{handler="static",quantile="0.5"} 6316
+http_response_size_bytes{handler="static",quantile="0.9"} 6316
+http_response_size_bytes{handler="static",quantile="0.99"} 6316
+http_response_size_bytes_sum{handler="static"} 18948
+http_response_size_bytes_count{handler="static"} 3
+http_response_size_bytes{handler="status",quantile="0.5"} NaN
+http_response_size_bytes{handler="status",quantile="0.9"} NaN
+http_response_size_bytes{handler="status",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="status"} 0
+http_response_size_bytes_count{handler="status"} 0
+http_response_size_bytes{handler="targets",quantile="0.5"} NaN
+http_response_size_bytes{handler="targets",quantile="0.9"} NaN
+http_response_size_bytes{handler="targets",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="targets"} 0
+http_response_size_bytes_count{handler="targets"} 0
+http_response_size_bytes{handler="version",quantile="0.5"} NaN
+http_response_size_bytes{handler="version",quantile="0.9"} NaN
+http_response_size_bytes{handler="version",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="version"} 0
+http_response_size_bytes_count{handler="version"} 0
+prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
+prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
+prometheus_config_last_reload_successful 1
+prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds_count 1
+prometheus_evaluator_iterations_skipped_total 0
+prometheus_notifications_dropped_total 0
+prometheus_notifications_queue_capacity 10000
+prometheus_notifications_queue_length 0
+prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
+prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_azure_refresh_duration_seconds_sum 0
+prometheus_sd_azure_refresh_duration_seconds_count 0
+prometheus_sd_azure_refresh_failures_total 0
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
+prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
+prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_failures_total 0
+prometheus_sd_dns_lookup_failures_total 0
+prometheus_sd_dns_lookups_total 0
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_ec2_refresh_duration_seconds_sum 0
+prometheus_sd_ec2_refresh_duration_seconds_count 0
+prometheus_sd_ec2_refresh_failures_total 0
+prometheus_sd_file_read_errors_total 0
+prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_file_scan_duration_seconds_sum 0
+prometheus_sd_file_scan_duration_seconds_count 0
+prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
+prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
+prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
+prometheus_sd_gce_refresh_duration_sum 0
+prometheus_sd_gce_refresh_duration_count 0
+prometheus_sd_gce_refresh_failures_total 0
+prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_marathon_refresh_duration_seconds_sum 0
+prometheus_sd_marathon_refresh_duration_seconds_count 0
+prometheus_sd_marathon_refresh_failures_total 0
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
+prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
+prometheus_target_interval_length_seconds_count{interval="50ms"} 685
+prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
+prometheus_target_skipped_scrapes_total 0
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
+prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
+prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
+prometheus_treecache_watcher_goroutines 0
+prometheus_treecache_zookeeper_failures_total 0
diff --git a/src/go/plugin/go.d/pkg/prometheus/testdata/testdata.txt b/src/go/plugin/go.d/pkg/prometheus/testdata/testdata.txt
new file mode 100644
index 000000000..c7f2a7af0
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/prometheus/testdata/testdata.txt
@@ -0,0 +1,528 @@
+# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 4.9351e-05
+go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
+go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
+go_gc_duration_seconds{quantile="0.75"} 0.000106744
+go_gc_duration_seconds{quantile="1"} 0.002072195
+go_gc_duration_seconds_sum 0.012139815
+go_gc_duration_seconds_count 99
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 33
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 1.7518624e+07
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 8.3062296e+08
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.494637e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 4.65658e+06
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 1.107968e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 1.7518624e+07
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 6.668288e+06
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 1.8956288e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 72755
+# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes_total counter
+go_memstats_heap_released_bytes_total 0
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 2.5624576e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.4843955586166437e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 2089
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 4.729335e+06
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 9600
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 16384
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 211520
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 245760
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 2.033527e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 2.077323e+06
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 1.6384e+06
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 1.6384e+06
+# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 3.2205048e+07
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="alerts"} 0
+http_request_duration_microseconds_count{handler="alerts"} 0
+http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="config"} 0
+http_request_duration_microseconds_count{handler="config"} 0
+http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="consoles"} 0
+http_request_duration_microseconds_count{handler="consoles"} 0
+http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="drop_series"} 0
+http_request_duration_microseconds_count{handler="drop_series"} 0
+http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="federate"} 0
+http_request_duration_microseconds_count{handler="federate"} 0
+http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="flags"} 0
+http_request_duration_microseconds_count{handler="flags"} 0
+http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
+http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
+http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
+http_request_duration_microseconds_sum{handler="graph"} 5803.93
+http_request_duration_microseconds_count{handler="graph"} 3
+http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="heap"} 0
+http_request_duration_microseconds_count{handler="heap"} 0
+http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
+http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
+http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
+http_request_duration_microseconds_sum{handler="label_values"} 3995.574
+http_request_duration_microseconds_count{handler="label_values"} 3
+http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="options"} 0
+http_request_duration_microseconds_count{handler="options"} 0
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
+http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
+http_request_duration_microseconds_count{handler="prometheus"} 462
+http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
+http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
+http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
+http_request_duration_microseconds_sum{handler="query"} 26074.11
+http_request_duration_microseconds_count{handler="query"} 6
+http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="query_range"} 0
+http_request_duration_microseconds_count{handler="query_range"} 0
+http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="rules"} 0
+http_request_duration_microseconds_count{handler="rules"} 0
+http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="series"} 0
+http_request_duration_microseconds_count{handler="series"} 0
+http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
+http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
+http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
+http_request_duration_microseconds_sum{handler="static"} 6458.621
+http_request_duration_microseconds_count{handler="static"} 3
+http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="status"} 0
+http_request_duration_microseconds_count{handler="status"} 0
+http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="targets"} 0
+http_request_duration_microseconds_count{handler="targets"} 0
+http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="version"} 0
+http_request_duration_microseconds_count{handler="version"} 0
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
+http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
+http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="alerts"} 0
+http_request_size_bytes_count{handler="alerts"} 0
+http_request_size_bytes{handler="config",quantile="0.5"} NaN
+http_request_size_bytes{handler="config",quantile="0.9"} NaN
+http_request_size_bytes{handler="config",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="config"} 0
+http_request_size_bytes_count{handler="config"} 0
+http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
+http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
+http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="consoles"} 0
+http_request_size_bytes_count{handler="consoles"} 0
+http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
+http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
+http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="drop_series"} 0
+http_request_size_bytes_count{handler="drop_series"} 0
+http_request_size_bytes{handler="federate",quantile="0.5"} NaN
+http_request_size_bytes{handler="federate",quantile="0.9"} NaN
+http_request_size_bytes{handler="federate",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="federate"} 0
+http_request_size_bytes_count{handler="federate"} 0
+http_request_size_bytes{handler="flags",quantile="0.5"} NaN
+http_request_size_bytes{handler="flags",quantile="0.9"} NaN
+http_request_size_bytes{handler="flags",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="flags"} 0
+http_request_size_bytes_count{handler="flags"} 0
+http_request_size_bytes{handler="graph",quantile="0.5"} 367
+http_request_size_bytes{handler="graph",quantile="0.9"} 389
+http_request_size_bytes{handler="graph",quantile="0.99"} 389
+http_request_size_bytes_sum{handler="graph"} 1145
+http_request_size_bytes_count{handler="graph"} 3
+http_request_size_bytes{handler="heap",quantile="0.5"} NaN
+http_request_size_bytes{handler="heap",quantile="0.9"} NaN
+http_request_size_bytes{handler="heap",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="heap"} 0
+http_request_size_bytes_count{handler="heap"} 0
+http_request_size_bytes{handler="label_values",quantile="0.5"} 416
+http_request_size_bytes{handler="label_values",quantile="0.9"} 416
+http_request_size_bytes{handler="label_values",quantile="0.99"} 416
+http_request_size_bytes_sum{handler="label_values"} 1248
+http_request_size_bytes_count{handler="label_values"} 3
+http_request_size_bytes{handler="options",quantile="0.5"} NaN
+http_request_size_bytes{handler="options",quantile="0.9"} NaN
+http_request_size_bytes{handler="options",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="options"} 0
+http_request_size_bytes_count{handler="options"} 0
+http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
+http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
+http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
+http_request_size_bytes_sum{handler="prometheus"} 109956
+http_request_size_bytes_count{handler="prometheus"} 462
+http_request_size_bytes{handler="query",quantile="0.5"} 531
+http_request_size_bytes{handler="query",quantile="0.9"} 531
+http_request_size_bytes{handler="query",quantile="0.99"} 531
+http_request_size_bytes_sum{handler="query"} 3186
+http_request_size_bytes_count{handler="query"} 6
+http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
+http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
+http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="query_range"} 0
+http_request_size_bytes_count{handler="query_range"} 0
+http_request_size_bytes{handler="rules",quantile="0.5"} NaN
+http_request_size_bytes{handler="rules",quantile="0.9"} NaN
+http_request_size_bytes{handler="rules",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="rules"} 0
+http_request_size_bytes_count{handler="rules"} 0
+http_request_size_bytes{handler="series",quantile="0.5"} NaN
+http_request_size_bytes{handler="series",quantile="0.9"} NaN
+http_request_size_bytes{handler="series",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="series"} 0
+http_request_size_bytes_count{handler="series"} 0
+http_request_size_bytes{handler="static",quantile="0.5"} 379
+http_request_size_bytes{handler="static",quantile="0.9"} 379
+http_request_size_bytes{handler="static",quantile="0.99"} 379
+http_request_size_bytes_sum{handler="static"} 1137
+http_request_size_bytes_count{handler="static"} 3
+http_request_size_bytes{handler="status",quantile="0.5"} NaN
+http_request_size_bytes{handler="status",quantile="0.9"} NaN
+http_request_size_bytes{handler="status",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="status"} 0
+http_request_size_bytes_count{handler="status"} 0
+http_request_size_bytes{handler="targets",quantile="0.5"} NaN
+http_request_size_bytes{handler="targets",quantile="0.9"} NaN
+http_request_size_bytes{handler="targets",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="targets"} 0
+http_request_size_bytes_count{handler="targets"} 0
+http_request_size_bytes{handler="version",quantile="0.5"} NaN
+http_request_size_bytes{handler="version",quantile="0.9"} NaN
+http_request_size_bytes{handler="version",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="version"} 0
+http_request_size_bytes_count{handler="version"} 0
+# HELP http_requests_total Total number of HTTP requests made.
+# TYPE http_requests_total counter
+http_requests_total{code="200",handler="graph",method="get"} 3
+http_requests_total{code="200",handler="label_values",method="get"} 3
+http_requests_total{code="200",handler="prometheus",method="get"} 462
+http_requests_total{code="200",handler="query",method="get"} 6
+http_requests_total{code="200",handler="static",method="get"} 3
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
+http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
+http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="alerts"} 0
+http_response_size_bytes_count{handler="alerts"} 0
+http_response_size_bytes{handler="config",quantile="0.5"} NaN
+http_response_size_bytes{handler="config",quantile="0.9"} NaN
+http_response_size_bytes{handler="config",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="config"} 0
+http_response_size_bytes_count{handler="config"} 0
+http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
+http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
+http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="consoles"} 0
+http_response_size_bytes_count{handler="consoles"} 0
+http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
+http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
+http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="drop_series"} 0
+http_response_size_bytes_count{handler="drop_series"} 0
+http_response_size_bytes{handler="federate",quantile="0.5"} NaN
+http_response_size_bytes{handler="federate",quantile="0.9"} NaN
+http_response_size_bytes{handler="federate",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="federate"} 0
+http_response_size_bytes_count{handler="federate"} 0
+http_response_size_bytes{handler="flags",quantile="0.5"} NaN
+http_response_size_bytes{handler="flags",quantile="0.9"} NaN
+http_response_size_bytes{handler="flags",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="flags"} 0
+http_response_size_bytes_count{handler="flags"} 0
+http_response_size_bytes{handler="graph",quantile="0.5"} 3619
+http_response_size_bytes{handler="graph",quantile="0.9"} 3619
+http_response_size_bytes{handler="graph",quantile="0.99"} 3619
+http_response_size_bytes_sum{handler="graph"} 10857
+http_response_size_bytes_count{handler="graph"} 3
+http_response_size_bytes{handler="heap",quantile="0.5"} NaN
+http_response_size_bytes{handler="heap",quantile="0.9"} NaN
+http_response_size_bytes{handler="heap",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="heap"} 0
+http_response_size_bytes_count{handler="heap"} 0
+http_response_size_bytes{handler="label_values",quantile="0.5"} 642
+http_response_size_bytes{handler="label_values",quantile="0.9"} 642
+http_response_size_bytes{handler="label_values",quantile="0.99"} 642
+http_response_size_bytes_sum{handler="label_values"} 1926
+http_response_size_bytes_count{handler="label_values"} 3
+http_response_size_bytes{handler="options",quantile="0.5"} NaN
+http_response_size_bytes{handler="options",quantile="0.9"} NaN
+http_response_size_bytes{handler="options",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="options"} 0
+http_response_size_bytes_count{handler="options"} 0
+http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
+http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
+http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
+http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
+http_response_size_bytes_count{handler="prometheus"} 462
+http_response_size_bytes{handler="query",quantile="0.5"} 776
+http_response_size_bytes{handler="query",quantile="0.9"} 781
+http_response_size_bytes{handler="query",quantile="0.99"} 781
+http_response_size_bytes_sum{handler="query"} 4656
+http_response_size_bytes_count{handler="query"} 6
+http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
+http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
+http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="query_range"} 0
+http_response_size_bytes_count{handler="query_range"} 0
+http_response_size_bytes{handler="rules",quantile="0.5"} NaN
+http_response_size_bytes{handler="rules",quantile="0.9"} NaN
+http_response_size_bytes{handler="rules",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="rules"} 0
+http_response_size_bytes_count{handler="rules"} 0
+http_response_size_bytes{handler="series",quantile="0.5"} NaN
+http_response_size_bytes{handler="series",quantile="0.9"} NaN
+http_response_size_bytes{handler="series",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="series"} 0
+http_response_size_bytes_count{handler="series"} 0
+http_response_size_bytes{handler="static",quantile="0.5"} 6316
+http_response_size_bytes{handler="static",quantile="0.9"} 6316
+http_response_size_bytes{handler="static",quantile="0.99"} 6316
+http_response_size_bytes_sum{handler="static"} 18948
+http_response_size_bytes_count{handler="static"} 3
+http_response_size_bytes{handler="status",quantile="0.5"} NaN
+http_response_size_bytes{handler="status",quantile="0.9"} NaN
+http_response_size_bytes{handler="status",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="status"} 0
+http_response_size_bytes_count{handler="status"} 0
+http_response_size_bytes{handler="targets",quantile="0.5"} NaN
+http_response_size_bytes{handler="targets",quantile="0.9"} NaN
+http_response_size_bytes{handler="targets",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="targets"} 0
+http_response_size_bytes_count{handler="targets"} 0
+http_response_size_bytes{handler="version",quantile="0.5"} NaN
+http_response_size_bytes{handler="version",quantile="0.9"} NaN
+http_response_size_bytes{handler="version",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="version"} 0
+http_response_size_bytes_count{handler="version"} 0
+# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built.
+# TYPE prometheus_build_info gauge
+prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
+# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
+# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge
+prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
+# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful.
+# TYPE prometheus_config_last_reload_successful gauge
+prometheus_config_last_reload_successful 1
+# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations.
+# TYPE prometheus_evaluator_duration_seconds summary
+prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds_count 1
+# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage.
+# TYPE prometheus_evaluator_iterations_skipped_total counter
+prometheus_evaluator_iterations_skipped_total 0
+# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration.
+# TYPE prometheus_notifications_dropped_total counter
+prometheus_notifications_dropped_total 0
+# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
+# TYPE prometheus_notifications_queue_capacity gauge
+prometheus_notifications_queue_capacity 10000
+# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
+# TYPE prometheus_notifications_queue_length gauge
+prometheus_notifications_queue_length 0
+# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
+# TYPE prometheus_rule_evaluation_failures_total counter
+prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
+prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
+# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds.
+# TYPE prometheus_sd_azure_refresh_duration_seconds summary
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_azure_refresh_duration_seconds_sum 0
+prometheus_sd_azure_refresh_duration_seconds_count 0
+# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures.
+# TYPE prometheus_sd_azure_refresh_failures_total counter
+prometheus_sd_azure_refresh_failures_total 0
+# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds.
+# TYPE prometheus_sd_consul_rpc_duration_seconds summary
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
+prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
+prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
+# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures.
+# TYPE prometheus_sd_consul_rpc_failures_total counter
+prometheus_sd_consul_rpc_failures_total 0
+# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures.
+# TYPE prometheus_sd_dns_lookup_failures_total counter
+prometheus_sd_dns_lookup_failures_total 0
+# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups.
+# TYPE prometheus_sd_dns_lookups_total counter
+prometheus_sd_dns_lookups_total 0
+# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds.
+# TYPE prometheus_sd_ec2_refresh_duration_seconds summary
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_ec2_refresh_duration_seconds_sum 0
+prometheus_sd_ec2_refresh_duration_seconds_count 0
+# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures.
+# TYPE prometheus_sd_ec2_refresh_failures_total counter
+prometheus_sd_ec2_refresh_failures_total 0
+# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors.
+# TYPE prometheus_sd_file_read_errors_total counter
+prometheus_sd_file_read_errors_total 0
+# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds.
+# TYPE prometheus_sd_file_scan_duration_seconds summary
+prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_file_scan_duration_seconds_sum 0
+prometheus_sd_file_scan_duration_seconds_count 0
+# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds.
+# TYPE prometheus_sd_gce_refresh_duration summary
+prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
+prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
+prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
+prometheus_sd_gce_refresh_duration_sum 0
+prometheus_sd_gce_refresh_duration_count 0
+# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures.
+# TYPE prometheus_sd_gce_refresh_failures_total counter
+prometheus_sd_gce_refresh_failures_total 0
+# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled.
+# TYPE prometheus_sd_kubernetes_events_total counter
+prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
+# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds.
+# TYPE prometheus_sd_marathon_refresh_duration_seconds summary
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_marathon_refresh_duration_seconds_sum 0
+prometheus_sd_marathon_refresh_duration_seconds_count 0
+# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures.
+# TYPE prometheus_sd_marathon_refresh_failures_total counter
+prometheus_sd_marathon_refresh_failures_total 0
+# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
+# TYPE prometheus_target_interval_length_seconds summary
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
+prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
+prometheus_target_interval_length_seconds_count{interval="50ms"} 685
+# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool.
+# TYPE prometheus_target_scrape_pool_sync_total counter
+prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
+# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled.
+# TYPE prometheus_target_skipped_scrapes_total counter
+prometheus_target_skipped_scrapes_total 0
+# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool.
+# TYPE prometheus_target_sync_length_seconds summary
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
+prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
+prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
+# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines.
+# TYPE prometheus_treecache_watcher_goroutines gauge
+prometheus_treecache_watcher_goroutines 0
+# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures.
+# TYPE prometheus_treecache_zookeeper_failures_total counter
+prometheus_treecache_zookeeper_failures_total 0 \ No newline at end of file
diff --git a/src/go/plugin/go.d/pkg/socket/client.go b/src/go/plugin/go.d/pkg/socket/client.go
new file mode 100644
index 000000000..26ae1dfa6
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/socket/client.go
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package socket
+
+import (
+ "bufio"
+ "crypto/tls"
+ "errors"
+ "net"
+ "time"
+)
+
+// New returns a new pointer to a socket client given the socket
+// type (IP, TCP, UDP, UNIX), a network address (IP/domain:port),
+// a timeout and a TLS config. It supports both IPv4 and IPv6 address
+// and reuses connection where possible.
+func New(config Config) *Socket {
+ return &Socket{
+ Config: config,
+ conn: nil,
+ }
+}
+
+// Socket is the implementation of a socket client.
+type Socket struct {
+ Config
+ conn net.Conn
+}
+
+// Connect connects to the Socket address on the named network.
+// If the address is a domain name it will also perform the DNS resolution.
+// Address like :80 will attempt to connect to the localhost.
+// The config timeout and TLS config will be used.
+func (s *Socket) Connect() error {
+ network, address := networkType(s.Address)
+ var conn net.Conn
+ var err error
+
+ if s.TLSConf == nil {
+ conn, err = net.DialTimeout(network, address, s.ConnectTimeout)
+ } else {
+ var d net.Dialer
+ d.Timeout = s.ConnectTimeout
+ conn, err = tls.DialWithDialer(&d, network, address, s.TLSConf)
+ }
+ if err != nil {
+ return err
+ }
+
+ s.conn = conn
+
+ return nil
+}
+
+// Disconnect closes the connection.
+// Any in-flight commands will be cancelled and return errors.
+func (s *Socket) Disconnect() (err error) {
+ if s.conn != nil {
+ err = s.conn.Close()
+ s.conn = nil
+ }
+ return err
+}
+
+// Command writes the command string to the connection and passed the
+// response bytes line by line to the process function. It uses the
+// timeout value from the Socket config and returns read, write and
+// timeout errors if any. If a timeout occurs during the processing
+// of the responses this function will stop processing and return a
+// timeout error.
+func (s *Socket) Command(command string, process Processor) error {
+ if s.conn == nil {
+ return errors.New("cannot send command on nil connection")
+ }
+ if err := write(command, s.conn, s.WriteTimeout); err != nil {
+ return err
+ }
+ return read(s.conn, process, s.ReadTimeout)
+}
+
+func write(command string, writer net.Conn, timeout time.Duration) error {
+ if writer == nil {
+ return errors.New("attempt to write on nil connection")
+ }
+ if err := writer.SetWriteDeadline(time.Now().Add(timeout)); err != nil {
+ return err
+ }
+ _, err := writer.Write([]byte(command))
+ return err
+}
+
+func read(reader net.Conn, process Processor, timeout time.Duration) error {
+ if process == nil {
+ return errors.New("process func is nil")
+ }
+ if reader == nil {
+ return errors.New("attempt to read on nil connection")
+ }
+ if err := reader.SetReadDeadline(time.Now().Add(timeout)); err != nil {
+ return err
+ }
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() && process(scanner.Bytes()) {
+ }
+ return scanner.Err()
+}
diff --git a/src/go/plugin/go.d/pkg/socket/client_test.go b/src/go/plugin/go.d/pkg/socket/client_test.go
new file mode 100644
index 000000000..fa64f4558
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/socket/client_test.go
@@ -0,0 +1,163 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package socket
+
+import (
+ "crypto/tls"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+const (
+ testServerAddress = "127.0.0.1:9999"
+ testUdpServerAddress = "udp://127.0.0.1:9999"
+ testUnixServerAddress = "/tmp/testSocketFD"
+ defaultTimeout = 100 * time.Millisecond
+)
+
+var tcpConfig = Config{
+ Address: testServerAddress,
+ ConnectTimeout: defaultTimeout,
+ ReadTimeout: defaultTimeout,
+ WriteTimeout: defaultTimeout,
+ TLSConf: nil,
+}
+
+var udpConfig = Config{
+ Address: testUdpServerAddress,
+ ConnectTimeout: defaultTimeout,
+ ReadTimeout: defaultTimeout,
+ WriteTimeout: defaultTimeout,
+ TLSConf: nil,
+}
+
+var unixConfig = Config{
+ Address: testUnixServerAddress,
+ ConnectTimeout: defaultTimeout,
+ ReadTimeout: defaultTimeout,
+ WriteTimeout: defaultTimeout,
+ TLSConf: nil,
+}
+
+var tcpTlsConfig = Config{
+ Address: testServerAddress,
+ ConnectTimeout: defaultTimeout,
+ ReadTimeout: defaultTimeout,
+ WriteTimeout: defaultTimeout,
+ TLSConf: &tls.Config{},
+}
+
+func Test_clientCommand(t *testing.T) {
+ srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1}
+ go func() { _ = srv.Run(); defer func() { _ = srv.Close() }() }()
+
+ time.Sleep(time.Millisecond * 100)
+ sock := New(tcpConfig)
+ require.NoError(t, sock.Connect())
+ err := sock.Command("ping\n", func(bytes []byte) bool {
+ assert.Equal(t, "pong", string(bytes))
+ return true
+ })
+ require.NoError(t, sock.Disconnect())
+ require.NoError(t, err)
+}
+
+func Test_clientTimeout(t *testing.T) {
+ srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1}
+ go func() { _ = srv.Run() }()
+
+ time.Sleep(time.Millisecond * 100)
+ sock := New(tcpConfig)
+ require.NoError(t, sock.Connect())
+ sock.ReadTimeout = 0
+ sock.ReadTimeout = 0
+ err := sock.Command("ping\n", func(bytes []byte) bool {
+ assert.Equal(t, "pong", string(bytes))
+ return true
+ })
+ require.Error(t, err)
+}
+
+func Test_clientIncompleteSSL(t *testing.T) {
+ srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1}
+ go func() { _ = srv.Run() }()
+
+ time.Sleep(time.Millisecond * 100)
+ sock := New(tcpTlsConfig)
+ err := sock.Connect()
+ require.Error(t, err)
+}
+
+func Test_clientCommandStopProcessing(t *testing.T) {
+ srv := &tcpServer{addr: testServerAddress, rowsNumResp: 2}
+ go func() { _ = srv.Run() }()
+
+ time.Sleep(time.Millisecond * 100)
+ sock := New(tcpConfig)
+ require.NoError(t, sock.Connect())
+ err := sock.Command("ping\n", func(bytes []byte) bool {
+ assert.Equal(t, "pong", string(bytes))
+ return false
+ })
+ require.NoError(t, sock.Disconnect())
+ require.NoError(t, err)
+}
+
+func Test_clientUDPCommand(t *testing.T) {
+ srv := &udpServer{addr: testServerAddress, rowsNumResp: 1}
+ go func() { _ = srv.Run(); defer func() { _ = srv.Close() }() }()
+
+ time.Sleep(time.Millisecond * 100)
+ sock := New(udpConfig)
+ require.NoError(t, sock.Connect())
+ err := sock.Command("ping\n", func(bytes []byte) bool {
+ assert.Equal(t, "pong", string(bytes))
+ return false
+ })
+ require.NoError(t, sock.Disconnect())
+ require.NoError(t, err)
+}
+
+func Test_clientTCPAddress(t *testing.T) {
+ srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1}
+ go func() { _ = srv.Run() }()
+ time.Sleep(time.Millisecond * 100)
+
+ sock := New(tcpConfig)
+ require.NoError(t, sock.Connect())
+
+ tcpConfig.Address = "tcp://" + tcpConfig.Address
+ sock = New(tcpConfig)
+ require.NoError(t, sock.Connect())
+}
+
+func Test_clientUnixCommand(t *testing.T) {
+ srv := &unixServer{addr: testUnixServerAddress, rowsNumResp: 1}
+ // cleanup previous file descriptors
+ _ = srv.Close()
+ go func() { _ = srv.Run() }()
+
+ time.Sleep(time.Millisecond * 200)
+ sock := New(unixConfig)
+ require.NoError(t, sock.Connect())
+ err := sock.Command("ping\n", func(bytes []byte) bool {
+ assert.Equal(t, "pong", string(bytes))
+ return false
+ })
+ require.NoError(t, err)
+ require.NoError(t, sock.Disconnect())
+}
+
+func Test_clientEmptyProcessFunc(t *testing.T) {
+ srv := &tcpServer{addr: testServerAddress, rowsNumResp: 1}
+ go func() { _ = srv.Run() }()
+
+ time.Sleep(time.Millisecond * 100)
+ sock := New(tcpConfig)
+ require.NoError(t, sock.Connect())
+ err := sock.Command("ping\n", nil)
+ require.Error(t, err, "nil process func should return an error")
+}
diff --git a/src/go/plugin/go.d/pkg/socket/servers_test.go b/src/go/plugin/go.d/pkg/socket/servers_test.go
new file mode 100644
index 000000000..d66178162
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/socket/servers_test.go
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package socket
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "net"
+ "os"
+ "strings"
+ "time"
+)
+
+type tcpServer struct {
+ addr string
+ server net.Listener
+ rowsNumResp int
+}
+
+func (t *tcpServer) Run() (err error) {
+ t.server, err = net.Listen("tcp", t.addr)
+ if err != nil {
+ return
+ }
+ return t.handleConnections()
+}
+
+func (t *tcpServer) Close() (err error) {
+ return t.server.Close()
+}
+
+func (t *tcpServer) handleConnections() (err error) {
+ for {
+ conn, err := t.server.Accept()
+ if err != nil || conn == nil {
+ return errors.New("could not accept connection")
+ }
+ t.handleConnection(conn)
+ }
+}
+
+func (t *tcpServer) handleConnection(conn net.Conn) {
+ defer func() { _ = conn.Close() }()
+ _ = conn.SetDeadline(time.Now().Add(time.Millisecond * 100))
+
+ rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
+ _, err := rw.ReadString('\n')
+ if err != nil {
+ _, _ = rw.WriteString("failed to read input")
+ _ = rw.Flush()
+ } else {
+ resp := strings.Repeat("pong\n", t.rowsNumResp)
+ _, _ = rw.WriteString(resp)
+ _ = rw.Flush()
+ }
+}
+
+type udpServer struct {
+ addr string
+ conn *net.UDPConn
+ rowsNumResp int
+}
+
+func (u *udpServer) Run() (err error) {
+ addr, err := net.ResolveUDPAddr("udp", u.addr)
+ if err != nil {
+ return err
+ }
+ u.conn, err = net.ListenUDP("udp", addr)
+ if err != nil {
+ return
+ }
+ u.handleConnections()
+ return nil
+}
+
+func (u *udpServer) Close() (err error) {
+ return u.conn.Close()
+}
+
+func (u *udpServer) handleConnections() {
+ for {
+ var buf [2048]byte
+ _, addr, _ := u.conn.ReadFromUDP(buf[0:])
+ resp := strings.Repeat("pong\n", u.rowsNumResp)
+ _, _ = u.conn.WriteToUDP([]byte(resp), addr)
+ }
+}
+
+type unixServer struct {
+ addr string
+ conn *net.UnixListener
+ rowsNumResp int
+}
+
+func (u *unixServer) Run() (err error) {
+ _, _ = os.CreateTemp("/tmp", "testSocketFD")
+ addr, err := net.ResolveUnixAddr("unix", u.addr)
+ if err != nil {
+ return err
+ }
+ u.conn, err = net.ListenUnix("unix", addr)
+ if err != nil {
+ return
+ }
+ go u.handleConnections()
+ return nil
+}
+
+func (u *unixServer) Close() (err error) {
+ _ = os.Remove(testUnixServerAddress)
+ return u.conn.Close()
+}
+
+func (u *unixServer) handleConnections() {
+ var conn net.Conn
+ var err error
+ conn, err = u.conn.AcceptUnix()
+ if err != nil {
+ panic(fmt.Errorf("could not accept connection: %v", err))
+ }
+ u.handleConnection(conn)
+}
+
+func (u *unixServer) handleConnection(conn net.Conn) {
+ _ = conn.SetDeadline(time.Now().Add(time.Second))
+
+ rw := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
+ _, err := rw.ReadString('\n')
+ if err != nil {
+ _, _ = rw.WriteString("failed to read input")
+ _ = rw.Flush()
+ } else {
+ resp := strings.Repeat("pong\n", u.rowsNumResp)
+ _, _ = rw.WriteString(resp)
+ _ = rw.Flush()
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/socket/types.go b/src/go/plugin/go.d/pkg/socket/types.go
new file mode 100644
index 000000000..693faf5be
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/socket/types.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package socket
+
+import (
+ "crypto/tls"
+ "time"
+)
+
+// Processor function passed to the Socket.Command function.
+// It is passed by the caller to process a command's response
+// line by line.
+type Processor func([]byte) bool
+
+// Client is the interface that wraps the basic socket client operations
+// and hides the implementation details from the users.
+//
+// Connect should prepare the connection.
+//
+// Disconnect should stop any in-flight connections.
+//
+// Command should send the actual data to the wire and pass
+// any results to the processor function.
+//
+// Implementations should return TCP, UDP or Unix ready sockets.
+type Client interface {
+ Connect() error
+ Disconnect() error
+ Command(command string, process Processor) error
+}
+
+// Config holds the network ip v4 or v6 address, port,
+// Socket type(ip, tcp, udp, unix), timeout and TLS configuration
+// for a Socket
+type Config struct {
+ Address string
+ ConnectTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+ TLSConf *tls.Config
+}
diff --git a/src/go/plugin/go.d/pkg/socket/utils.go b/src/go/plugin/go.d/pkg/socket/utils.go
new file mode 100644
index 000000000..dcc48b383
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/socket/utils.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package socket
+
+import "strings"
+
+func IsUnixSocket(address string) bool {
+ return strings.HasPrefix(address, "/") || strings.HasPrefix(address, "unix://")
+}
+
+func IsUdpSocket(address string) bool {
+ return strings.HasPrefix(address, "udp://")
+}
+
+func networkType(address string) (string, string) {
+ switch {
+ case IsUnixSocket(address):
+ address = strings.TrimPrefix(address, "unix://")
+ return "unix", address
+ case IsUdpSocket(address):
+ return "udp", strings.TrimPrefix(address, "udp://")
+ default:
+ return "tcp", strings.TrimPrefix(address, "tcp://")
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/stm/stm.go b/src/go/plugin/go.d/pkg/stm/stm.go
new file mode 100644
index 000000000..7d07ba9a4
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/stm/stm.go
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package stm
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+const (
+ fieldTagName = "stm"
+ structKey = "STMKey"
+)
+
+type (
+ Value interface {
+ WriteTo(rv map[string]int64, key string, mul, div int)
+ }
+)
+
+// ToMap converts struct to a map[string]int64 based on 'stm' tags
+func ToMap(s ...interface{}) map[string]int64 {
+ rv := map[string]int64{}
+ for _, v := range s {
+ value := reflect.Indirect(reflect.ValueOf(v))
+ toMap(value, rv, "", 1, 1)
+ }
+ return rv
+}
+
+func toMap(value reflect.Value, rv map[string]int64, key string, mul, div int) {
+ if !value.IsValid() {
+ log.Panicf("value is not valid key=%s", key)
+ }
+ if value.CanInterface() {
+ val, ok := value.Interface().(Value)
+ if ok {
+ val.WriteTo(rv, key, mul, div)
+ return
+ }
+ }
+ switch value.Kind() {
+ case reflect.Ptr:
+ convertPtr(value, rv, key, mul, div)
+ case reflect.Struct:
+ convertStruct(value, rv, key)
+ case reflect.Array, reflect.Slice:
+ convertArraySlice(value, rv, key, mul, div)
+ case reflect.Map:
+ convertMap(value, rv, key, mul, div)
+ case reflect.Bool:
+ convertBool(value, rv, key)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ convertInteger(value, rv, key, mul, div)
+ case reflect.Float32, reflect.Float64:
+ convertFloat(value, rv, key, mul, div)
+ case reflect.Interface:
+ convertInterface(value, rv, key, mul, div)
+ default:
+ log.Panicf("unsupported data type: %v", value.Kind())
+ }
+}
+
+func convertPtr(value reflect.Value, rv map[string]int64, key string, mul, div int) {
+ if !value.IsNil() {
+ toMap(value.Elem(), rv, key, mul, div)
+ }
+}
+
+func convertStruct(value reflect.Value, rv map[string]int64, key string) {
+ t := value.Type()
+ k := value.FieldByName(structKey)
+ if k.Kind() == reflect.String {
+ key = joinPrefix(key, k.String())
+ }
+ for i := 0; i < t.NumField(); i++ {
+ ft := t.Field(i)
+ tag, ok := ft.Tag.Lookup(fieldTagName)
+ if !ok || ft.Name == structKey {
+ continue
+ }
+ value := value.Field(i)
+ prefix, mul, div := parseTag(tag)
+ toMap(value, rv, joinPrefix(key, prefix), mul, div)
+ }
+}
+
+func convertMap(value reflect.Value, rv map[string]int64, key string, mul, div int) {
+ if value.IsNil() {
+ log.Panicf("value is nil key=%s", key)
+ }
+ for _, k := range value.MapKeys() {
+ toMap(value.MapIndex(k), rv, joinPrefix(key, k.String()), mul, div)
+ }
+}
+
+func convertArraySlice(value reflect.Value, rv map[string]int64, key string, mul, div int) {
+ for i := 0; i < value.Len(); i++ {
+ toMap(value.Index(i), rv, key, mul, div)
+ }
+}
+
+func convertBool(value reflect.Value, rv map[string]int64, key string) {
+ if _, ok := rv[key]; ok {
+ log.Panic("duplicate key: ", key)
+ }
+ if value.Bool() {
+ rv[key] = 1
+ } else {
+ rv[key] = 0
+ }
+}
+
+func convertInteger(value reflect.Value, rv map[string]int64, key string, mul, div int) {
+ if _, ok := rv[key]; ok {
+ log.Panic("duplicate key: ", key)
+ }
+ intVal := value.Int()
+ rv[key] = intVal * int64(mul) / int64(div)
+}
+
+func convertFloat(value reflect.Value, rv map[string]int64, key string, mul, div int) {
+ if _, ok := rv[key]; ok {
+ log.Panic("duplicate key: ", key)
+ }
+ floatVal := value.Float()
+ rv[key] = int64(floatVal * float64(mul) / float64(div))
+}
+
+func convertInterface(value reflect.Value, rv map[string]int64, key string, mul, div int) {
+ fv := reflect.ValueOf(value.Interface())
+ toMap(fv, rv, key, mul, div)
+}
+
+func joinPrefix(prefix, key string) string {
+ if prefix == "" {
+ return key
+ }
+ if key == "" {
+ return prefix
+ }
+ return prefix + "_" + key
+}
+
+func parseTag(tag string) (prefix string, mul int, div int) {
+ tokens := strings.Split(tag, ",")
+ mul = 1
+ div = 1
+ var err error
+ switch len(tokens) {
+ case 3:
+ div, err = strconv.Atoi(tokens[2])
+ if err != nil {
+ log.Panic(err)
+ }
+ fallthrough
+ case 2:
+ mul, err = strconv.Atoi(tokens[1])
+ if err != nil {
+ log.Panic(err)
+ }
+ fallthrough
+ case 1:
+ prefix = tokens[0]
+ default:
+ log.Panic(fmt.Errorf("invalid tag format: %s", tag))
+ }
+ return
+}
diff --git a/src/go/plugin/go.d/pkg/stm/stm_test.go b/src/go/plugin/go.d/pkg/stm/stm_test.go
new file mode 100644
index 000000000..74ac6f3f1
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/stm/stm_test.go
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package stm_test
+
+import (
+ "testing"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/stm"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/metrics"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestToMap_empty(t *testing.T) {
+ s := struct{}{}
+
+ expected := map[string]int64{}
+
+ assert.EqualValuesf(t, expected, stm.ToMap(s), "value test")
+ assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test")
+}
+
+func TestToMap_metrics(t *testing.T) {
+ s := struct {
+ C metrics.Counter `stm:"c"`
+ G metrics.Gauge `stm:"g,100"`
+ H metrics.Histogram `stm:"h,100"`
+ S metrics.Summary `stm:"s,200,2"`
+ }{}
+ s.C.Inc()
+ s.G.Set(3.14)
+ s.H = metrics.NewHistogram([]float64{1, 5, 10})
+
+ s.H.Observe(3.14)
+ s.H.Observe(6.28)
+ s.H.Observe(20)
+
+ s.S = metrics.NewSummary()
+ s.S.Observe(3.14)
+ s.S.Observe(6.28)
+
+ expected := map[string]int64{
+ "c": 1,
+ "g": 314,
+
+ "h_count": 3,
+ "h_sum": 2942,
+ "h_bucket_1": 0,
+ "h_bucket_2": 1,
+ "h_bucket_3": 2,
+
+ "s_count": 2,
+ "s_sum": 942,
+ "s_min": 314,
+ "s_max": 628,
+ "s_avg": 471,
+ }
+
+ assert.Equal(t, expected, stm.ToMap(s), "value test")
+ assert.Equal(t, expected, stm.ToMap(&s), "ptr test")
+}
+
+func TestToMap_int(t *testing.T) {
+ s := struct {
+ I int `stm:"int"`
+ I8 int8 `stm:"int8"`
+ I16 int16 `stm:"int16"`
+ I32 int32 `stm:"int32"`
+ I64 int64 `stm:"int64"`
+ }{
+ I: 1, I8: 2, I16: 3, I32: 4, I64: 5,
+ }
+
+ expected := map[string]int64{
+ "int": 1, "int8": 2, "int16": 3, "int32": 4, "int64": 5,
+ }
+
+ assert.EqualValuesf(t, expected, stm.ToMap(s), "value test")
+ assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test")
+}
+
+func TestToMap_float(t *testing.T) {
+ s := struct {
+ F32 float32 `stm:"f32,100"`
+ F64 float64 `stm:"f64"`
+ }{
+ 3.14, 628,
+ }
+
+ expected := map[string]int64{
+ "f32": 314, "f64": 628,
+ }
+
+ assert.EqualValuesf(t, expected, stm.ToMap(s), "value test")
+ assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test")
+}
+
+func TestToMap_struct(t *testing.T) {
+ type pair struct {
+ Left int `stm:"left"`
+ Right int `stm:"right"`
+ }
+ s := struct {
+ I int `stm:"int"`
+ Pempty pair `stm:""`
+ Ps pair `stm:"s"`
+ Notag int
+ }{
+ I: 1,
+ Pempty: pair{2, 3},
+ Ps: pair{4, 5},
+ Notag: 6,
+ }
+
+ expected := map[string]int64{
+ "int": 1,
+ "left": 2, "right": 3,
+ "s_left": 4, "s_right": 5,
+ }
+
+ assert.EqualValuesf(t, expected, stm.ToMap(s), "value test")
+ assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test")
+}
+
+func TestToMap_tree(t *testing.T) {
+ type node struct {
+ Value int `stm:"v"`
+ Left *node `stm:"left"`
+ Right *node `stm:"right"`
+ }
+ s := node{1,
+ &node{2, nil, nil},
+ &node{3,
+ &node{4, nil, nil},
+ nil,
+ },
+ }
+ expected := map[string]int64{
+ "v": 1,
+ "left_v": 2,
+ "right_v": 3,
+ "right_left_v": 4,
+ }
+
+ assert.EqualValuesf(t, expected, stm.ToMap(s), "value test")
+ assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test")
+}
+
+func TestToMap_map(t *testing.T) {
+ s := struct {
+ I int `stm:"int"`
+ M map[string]int64 `stm:""`
+ }{
+ I: 1,
+ M: map[string]int64{
+ "a": 2,
+ "b": 3,
+ },
+ }
+
+ expected := map[string]int64{
+ "int": 1,
+ "a": 2,
+ "b": 3,
+ }
+
+ assert.EqualValuesf(t, expected, stm.ToMap(s), "value test")
+ assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test")
+}
+
+func TestToMap_nestMap(t *testing.T) {
+ s := struct {
+ I int `stm:"int"`
+ M map[string]interface{} `stm:""`
+ }{
+ I: 1,
+ M: map[string]interface{}{
+ "a": 2,
+ "b": 3,
+ "m": map[string]interface{}{
+ "c": 4,
+ },
+ },
+ }
+
+ expected := map[string]int64{
+ "int": 1,
+ "a": 2,
+ "b": 3,
+ "m_c": 4,
+ }
+
+ assert.EqualValuesf(t, expected, stm.ToMap(s), "value test")
+ assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test")
+}
+
+func TestToMap_ptr(t *testing.T) {
+ two := 2
+ s := struct {
+ I int `stm:"int"`
+ Ptr *int `stm:"ptr"`
+ Nil *int `stm:"nil"`
+ }{
+ I: 1,
+ Ptr: &two,
+ Nil: nil,
+ }
+
+ expected := map[string]int64{
+ "int": 1,
+ "ptr": 2,
+ }
+
+ assert.EqualValuesf(t, expected, stm.ToMap(s), "value test")
+ assert.EqualValuesf(t, expected, stm.ToMap(&s), "ptr test")
+}
+
+func TestToMap_invalidType(t *testing.T) {
+ s := struct {
+ Str string `stm:"int"`
+ }{
+ Str: "abc",
+ }
+
+ assert.Panics(t, func() {
+ stm.ToMap(s)
+ }, "value test")
+ assert.Panics(t, func() {
+ stm.ToMap(&s)
+ }, "ptr test")
+}
+
+func TestToMap_duplicateKey(t *testing.T) {
+ {
+ s := struct {
+ Key int `stm:"key"`
+ M map[string]int `stm:""`
+ }{
+ Key: 1,
+ M: map[string]int{
+ "key": 2,
+ },
+ }
+
+ assert.Panics(t, func() {
+ stm.ToMap(s)
+ }, "value test")
+ assert.Panics(t, func() {
+ stm.ToMap(&s)
+ }, "ptr test")
+ }
+ {
+ s := struct {
+ Key float64 `stm:"key"`
+ M map[string]float64 `stm:""`
+ }{
+ Key: 1,
+ M: map[string]float64{
+ "key": 2,
+ },
+ }
+
+ assert.Panics(t, func() {
+ stm.ToMap(s)
+ }, "value test")
+ assert.Panics(t, func() {
+ stm.ToMap(&s)
+ }, "ptr test")
+ }
+}
+
+func TestToMap_Variadic(t *testing.T) {
+ s1 := struct {
+ Key1 int `stm:"key1"`
+ }{
+ Key1: 1,
+ }
+ s2 := struct {
+ Key2 int `stm:"key2"`
+ }{
+ Key2: 2,
+ }
+ s3 := struct {
+ Key3 int `stm:"key3"`
+ }{
+ Key3: 3,
+ }
+
+ assert.Equal(
+ t,
+ map[string]int64{
+ "key1": 1,
+ "key2": 2,
+ "key3": 3,
+ },
+ stm.ToMap(s1, s2, s3),
+ )
+}
+
+func TestToMap_badTag(t *testing.T) {
+ assert.Panics(t, func() {
+ s := struct {
+ A int `stm:"a,not_int"`
+ }{1}
+ stm.ToMap(s)
+ })
+ assert.Panics(t, func() {
+ s := struct {
+ A int `stm:"a,1,not_int"`
+ }{1}
+ stm.ToMap(s)
+ })
+ assert.Panics(t, func() {
+ s := struct {
+ A int `stm:"a,not_int,1"`
+ }{1}
+ stm.ToMap(s)
+ })
+ assert.Panics(t, func() {
+ s := struct {
+ A int `stm:"a,1,2,3"`
+ }{1}
+ stm.ToMap(s)
+ })
+}
+
+func TestToMap_nilValue(t *testing.T) {
+ assert.Panics(t, func() {
+ s := struct {
+ a metrics.CounterVec `stm:"a"`
+ }{nil}
+ stm.ToMap(s)
+ })
+}
+func TestToMap_bool(t *testing.T) {
+ s := struct {
+ A bool `stm:"a"`
+ B bool `stm:"b"`
+ }{
+ A: true,
+ B: false,
+ }
+ assert.Equal(
+ t,
+ map[string]int64{
+ "a": 1,
+ "b": 0,
+ },
+ stm.ToMap(s),
+ )
+}
+
+func TestToMap_ArraySlice(t *testing.T) {
+ s := [4]interface{}{
+ map[string]int{
+ "B": 1,
+ "C": 2,
+ },
+ struct {
+ D int `stm:"D"`
+ E int `stm:"E"`
+ }{
+ D: 3,
+ E: 4,
+ },
+ struct {
+ STMKey string
+ F int `stm:"F"`
+ G int `stm:"G"`
+ }{
+ F: 5,
+ G: 6,
+ },
+ struct {
+ STMKey string
+ H int `stm:"H"`
+ I int `stm:"I"`
+ }{
+ STMKey: "KEY",
+ H: 7,
+ I: 8,
+ },
+ }
+
+ assert.Equal(
+ t,
+ map[string]int64{
+ "B": 1,
+ "C": 2,
+ "D": 3,
+ "E": 4,
+ "F": 5,
+ "G": 6,
+ "KEY_H": 7,
+ "KEY_I": 8,
+ },
+ stm.ToMap(s),
+ )
+
+ assert.Equal(
+ t,
+ map[string]int64{
+ "B": 1,
+ "C": 2,
+ "D": 3,
+ "E": 4,
+ "F": 5,
+ "G": 6,
+ "KEY_H": 7,
+ "KEY_I": 8,
+ },
+ stm.ToMap(s[:]),
+ )
+}
diff --git a/src/go/plugin/go.d/pkg/tlscfg/config.go b/src/go/plugin/go.d/pkg/tlscfg/config.go
new file mode 100644
index 000000000..7a227c4c8
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/tlscfg/config.go
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tlscfg
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "os"
+)
+
+// TLSConfig represents the standard client TLS configuration.
+type TLSConfig struct {
+ // TLSCA specifies the certificate authority to use when verifying server certificates.
+ TLSCA string `yaml:"tls_ca,omitempty" json:"tls_ca"`
+
+ // TLSCert specifies tls certificate file.
+ TLSCert string `yaml:"tls_cert,omitempty" json:"tls_cert"`
+
+ // TLSKey specifies tls key file.
+ TLSKey string `yaml:"tls_key,omitempty" json:"tls_key"`
+
+ // InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name.
+ InsecureSkipVerify bool `yaml:"tls_skip_verify,omitempty" json:"tls_skip_verify"`
+}
+
+// NewTLSConfig creates a tls.Config, may be nil without an error if TLS is not configured.
+func NewTLSConfig(cfg TLSConfig) (*tls.Config, error) {
+ if cfg.TLSCA == "" && cfg.TLSKey == "" && cfg.TLSCert == "" && !cfg.InsecureSkipVerify {
+ return nil, nil
+ }
+
+ tlsConfig := &tls.Config{
+ InsecureSkipVerify: cfg.InsecureSkipVerify,
+ Renegotiation: tls.RenegotiateNever,
+ }
+
+ if cfg.TLSCA != "" {
+ pool, err := loadCertPool([]string{cfg.TLSCA})
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.RootCAs = pool
+ }
+
+ if cfg.TLSCert != "" && cfg.TLSKey != "" {
+ cert, err := loadCertificate(cfg.TLSCert, cfg.TLSKey)
+ if err != nil {
+ return nil, err
+ }
+ tlsConfig.Certificates = []tls.Certificate{cert}
+ }
+
+ return tlsConfig, nil
+}
+
+func loadCertPool(certFiles []string) (*x509.CertPool, error) {
+ pool := x509.NewCertPool()
+ for _, certFile := range certFiles {
+ pem, err := os.ReadFile(certFile)
+ if err != nil {
+ return nil, fmt.Errorf("could not read certificate %q: %v", certFile, err)
+ }
+ if !pool.AppendCertsFromPEM(pem) {
+ return nil, fmt.Errorf("could not parse any PEM certificates %q: %v", certFile, err)
+ }
+ }
+ return pool, nil
+}
+
+func loadCertificate(certFile, keyFile string) (tls.Certificate, error) {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return tls.Certificate{}, fmt.Errorf("could not load keypair %s:%s: %v", certFile, keyFile, err)
+ }
+ return cert, nil
+}
diff --git a/src/go/plugin/go.d/pkg/tlscfg/config_test.go b/src/go/plugin/go.d/pkg/tlscfg/config_test.go
new file mode 100644
index 000000000..d95fe24bc
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/tlscfg/config_test.go
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package tlscfg
+
+import "testing"
+
+// TODO:
+func TestNewClientTLSConfig(t *testing.T) {
+
+}
diff --git a/src/go/plugin/go.d/pkg/web/client.go b/src/go/plugin/go.d/pkg/web/client.go
new file mode 100644
index 000000000..02dc17de1
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/web/client.go
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package web
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "net/http"
+ "net/url"
+
+ "github.com/netdata/netdata/go/plugins/plugin/go.d/pkg/tlscfg"
+)
+
+// ErrRedirectAttempted indicates that a redirect occurred.
+var ErrRedirectAttempted = errors.New("redirect")
+
+// Client is the configuration of the HTTP client.
+// This structure is not intended to be used directly as part of a module's configuration.
+// Supported configuration file formats: YAML.
+type Client struct {
+ // Timeout specifies a time limit for requests made by this Client.
+ // Default (zero value) is no timeout. Must be set before http.Client creation.
+ Timeout Duration `yaml:"timeout,omitempty" json:"timeout"`
+
+ // NotFollowRedirect specifies the policy for handling redirects.
+ // Default (zero value) is std http package default policy (stop after 10 consecutive requests).
+ NotFollowRedirect bool `yaml:"not_follow_redirects,omitempty" json:"not_follow_redirects"`
+
+ // ProxyURL specifies the URL of the proxy to use. An empty string means use the environment variables
+ // HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof) to get the URL.
+ ProxyURL string `yaml:"proxy_url,omitempty" json:"proxy_url"`
+
+ // TLSConfig specifies the TLS configuration.
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+}
+
+// NewHTTPClient returns a new *http.Client given a Client configuration and an error if any.
+func NewHTTPClient(cfg Client) (*http.Client, error) {
+ tlsConfig, err := tlscfg.NewTLSConfig(cfg.TLSConfig)
+ if err != nil {
+ return nil, fmt.Errorf("error on creating TLS config: %v", err)
+ }
+
+ if cfg.ProxyURL != "" {
+ if _, err := url.Parse(cfg.ProxyURL); err != nil {
+ return nil, fmt.Errorf("error on parsing proxy URL '%s': %v", cfg.ProxyURL, err)
+ }
+ }
+
+ d := &net.Dialer{Timeout: cfg.Timeout.Duration()}
+
+ transport := &http.Transport{
+ Proxy: proxyFunc(cfg.ProxyURL),
+ TLSClientConfig: tlsConfig,
+ DialContext: d.DialContext,
+ TLSHandshakeTimeout: cfg.Timeout.Duration(),
+ }
+
+ return &http.Client{
+ Timeout: cfg.Timeout.Duration(),
+ Transport: transport,
+ CheckRedirect: redirectFunc(cfg.NotFollowRedirect),
+ }, nil
+}
+
+func redirectFunc(notFollowRedirect bool) func(req *http.Request, via []*http.Request) error {
+ if follow := !notFollowRedirect; follow {
+ return nil
+ }
+ return func(_ *http.Request, _ []*http.Request) error { return ErrRedirectAttempted }
+}
+
+func proxyFunc(rawProxyURL string) func(r *http.Request) (*url.URL, error) {
+ if rawProxyURL == "" {
+ return http.ProxyFromEnvironment
+ }
+ proxyURL, _ := url.Parse(rawProxyURL)
+ return http.ProxyURL(proxyURL)
+}
diff --git a/src/go/plugin/go.d/pkg/web/client_test.go b/src/go/plugin/go.d/pkg/web/client_test.go
new file mode 100644
index 000000000..ead1486c3
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/web/client_test.go
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package web
+
+import (
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestNewHTTPClient(t *testing.T) {
+ client, _ := NewHTTPClient(Client{
+ Timeout: Duration(time.Second * 5),
+ NotFollowRedirect: true,
+ ProxyURL: "http://127.0.0.1:3128",
+ })
+
+ assert.IsType(t, (*http.Client)(nil), client)
+ assert.Equal(t, time.Second*5, client.Timeout)
+ assert.NotNil(t, client.CheckRedirect)
+}
diff --git a/src/go/plugin/go.d/pkg/web/doc.go b/src/go/plugin/go.d/pkg/web/doc.go
new file mode 100644
index 000000000..4c6d31461
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/web/doc.go
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+/*
+Package web contains HTTP request and client configurations.
+HTTP structure embeds both of them, and it's the only structure that intended to be used as part of a module's configuration.
+Every module that uses HTTP requests to collect metrics should use it.
+It allows to have same set of user configurable options across all modules.
+*/
+package web
diff --git a/src/go/plugin/go.d/pkg/web/doc_test.go b/src/go/plugin/go.d/pkg/web/doc_test.go
new file mode 100644
index 000000000..137eed207
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/web/doc_test.go
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package web
+
+func ExampleHTTP_usage() {
+ // Just embed HTTP into your module structure.
+ // It allows you to have both Request and Client fields in the module configuration file.
+ type myModule struct {
+ HTTP `yaml:",inline"`
+ }
+
+ var m myModule
+ _, _ = NewHTTPRequest(m.Request)
+ _, _ = NewHTTPClient(m.Client)
+}
diff --git a/src/go/plugin/go.d/pkg/web/duration.go b/src/go/plugin/go.d/pkg/web/duration.go
new file mode 100644
index 000000000..85d5ef650
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/web/duration.go
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package web
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "time"
+)
+
+type Duration time.Duration
+
+func (d Duration) Duration() time.Duration {
+ return time.Duration(d)
+}
+
+func (d Duration) String() string {
+ return d.Duration().String()
+}
+
+func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var s string
+
+ if err := unmarshal(&s); err != nil {
+ return err
+ }
+
+ if v, err := time.ParseDuration(s); err == nil {
+ *d = Duration(v)
+ return nil
+ }
+ if v, err := strconv.ParseInt(s, 10, 64); err == nil {
+ *d = Duration(time.Duration(v) * time.Second)
+ return nil
+ }
+ if v, err := strconv.ParseFloat(s, 64); err == nil {
+ *d = Duration(v * float64(time.Second))
+ return nil
+ }
+
+ return fmt.Errorf("unparsable duration format '%s'", s)
+}
+
+func (d Duration) MarshalYAML() (any, error) {
+ seconds := float64(d) / float64(time.Second)
+ return seconds, nil
+}
+
+func (d *Duration) UnmarshalJSON(b []byte) error {
+ s := string(b)
+
+ if v, err := time.ParseDuration(s); err == nil {
+ *d = Duration(v)
+ return nil
+ }
+ if v, err := strconv.ParseInt(s, 10, 64); err == nil {
+ *d = Duration(time.Duration(v) * time.Second)
+ return nil
+ }
+ if v, err := strconv.ParseFloat(s, 64); err == nil {
+ *d = Duration(v * float64(time.Second))
+ return nil
+ }
+
+ return fmt.Errorf("unparsable duration format '%s'", s)
+}
+
+func (d Duration) MarshalJSON() ([]byte, error) {
+ seconds := float64(d) / float64(time.Second)
+ return json.Marshal(seconds)
+}
diff --git a/src/go/plugin/go.d/pkg/web/duration_test.go b/src/go/plugin/go.d/pkg/web/duration_test.go
new file mode 100644
index 000000000..b45063f13
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/web/duration_test.go
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package web
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "gopkg.in/yaml.v2"
+)
+
+func TestDuration_MarshalYAML(t *testing.T) {
+ tests := map[string]struct {
+ d Duration
+ want string
+ }{
+ "1 second": {d: Duration(time.Second), want: "1"},
+ "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ bs, err := yaml.Marshal(&test.d)
+ require.NoError(t, err)
+
+ assert.Equal(t, test.want, strings.TrimSpace(string(bs)))
+ })
+ }
+}
+
+func TestDuration_MarshalJSON(t *testing.T) {
+ tests := map[string]struct {
+ d Duration
+ want string
+ }{
+ "1 second": {d: Duration(time.Second), want: "1"},
+ "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ bs, err := json.Marshal(&test.d)
+ require.NoError(t, err)
+
+ assert.Equal(t, test.want, strings.TrimSpace(string(bs)))
+ })
+ }
+}
+
+func TestDuration_UnmarshalYAML(t *testing.T) {
+ tests := map[string]struct {
+ input any
+ }{
+ "duration": {input: "300ms"},
+ "string int": {input: "1"},
+ "string float": {input: "1.1"},
+ "int": {input: 2},
+ "float": {input: 2.2},
+ }
+
+ var zero Duration
+
+ for name, test := range tests {
+ name = fmt.Sprintf("%s (%v)", name, test.input)
+ t.Run(name, func(t *testing.T) {
+ data, err := yaml.Marshal(test.input)
+ require.NoError(t, err)
+
+ var d Duration
+ require.NoError(t, yaml.Unmarshal(data, &d))
+ assert.NotEqual(t, zero.String(), d.String())
+ })
+ }
+}
+
+func TestDuration_UnmarshalJSON(t *testing.T) {
+ tests := map[string]struct {
+ input any
+ }{
+ "duration": {input: "300ms"},
+ "string int": {input: "1"},
+ "string float": {input: "1.1"},
+ "int": {input: 2},
+ "float": {input: 2.2},
+ }
+
+ var zero Duration
+
+ type duration struct {
+ D Duration `json:"d"`
+ }
+ type input struct {
+ D any `json:"d"`
+ }
+
+ for name, test := range tests {
+ name = fmt.Sprintf("%s (%v)", name, test.input)
+ t.Run(name, func(t *testing.T) {
+ input := input{D: test.input}
+ data, err := yaml.Marshal(input)
+ require.NoError(t, err)
+
+ var d duration
+ require.NoError(t, yaml.Unmarshal(data, &d))
+ assert.NotEqual(t, zero.String(), d.D.String())
+ })
+ }
+}
diff --git a/src/go/plugin/go.d/pkg/web/request.go b/src/go/plugin/go.d/pkg/web/request.go
new file mode 100644
index 000000000..20a6ec093
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/web/request.go
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package web
+
+import (
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strings"
+
+ "github.com/netdata/netdata/go/plugins/pkg/buildinfo"
+ "github.com/netdata/netdata/go/plugins/pkg/executable"
+)
+
+// Request is the configuration of the HTTP request.
+// This structure is not intended to be used directly as part of a module's configuration.
+// Supported configuration file formats: YAML.
+type Request struct {
+ // URL specifies the URL to access.
+ URL string `yaml:"url" json:"url"`
+
+ // Username specifies the username for basic HTTP authentication.
+ Username string `yaml:"username,omitempty" json:"username"`
+
+ // Password specifies the password for basic HTTP authentication.
+ Password string `yaml:"password,omitempty" json:"password"`
+
+ // ProxyUsername specifies the username for basic HTTP authentication.
+ // It is used to authenticate a user agent to a proxy server.
+ ProxyUsername string `yaml:"proxy_username,omitempty" json:"proxy_username"`
+
+ // ProxyPassword specifies the password for basic HTTP authentication.
+ // It is used to authenticate a user agent to a proxy server.
+ ProxyPassword string `yaml:"proxy_password,omitempty" json:"proxy_password"`
+
+ // Method specifies the HTTP method (GET, POST, PUT, etc.). An empty string means GET.
+ Method string `yaml:"method,omitempty" json:"method"`
+
+ // Headers specifies the HTTP request header fields to be sent by the client.
+ Headers map[string]string `yaml:"headers,omitempty" json:"headers"`
+
+ // Body specifies the HTTP request body to be sent by the client.
+ Body string `yaml:"body,omitempty" json:"body"`
+}
+
+// Copy makes a full copy of the Request.
+func (r Request) Copy() Request {
+ headers := make(map[string]string, len(r.Headers))
+ for k, v := range r.Headers {
+ headers[k] = v
+ }
+ r.Headers = headers
+ return r
+}
+
+var userAgent = fmt.Sprintf("Netdata %s.plugin/%s", executable.Name, buildinfo.Version)
+
+// NewHTTPRequest returns a new *http.Requests given a Request configuration and an error if any.
+func NewHTTPRequest(cfg Request) (*http.Request, error) {
+ var body io.Reader
+ if cfg.Body != "" {
+ body = strings.NewReader(cfg.Body)
+ }
+
+ req, err := http.NewRequest(cfg.Method, cfg.URL, body)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Set("User-Agent", userAgent)
+
+ if cfg.Username != "" || cfg.Password != "" {
+ req.SetBasicAuth(cfg.Username, cfg.Password)
+ }
+
+ if cfg.ProxyUsername != "" && cfg.ProxyPassword != "" {
+ basicAuth := base64.StdEncoding.EncodeToString([]byte(cfg.ProxyUsername + ":" + cfg.ProxyPassword))
+ req.Header.Set("Proxy-Authorization", "Basic "+basicAuth)
+ }
+
+ for k, v := range cfg.Headers {
+ switch k {
+ case "host", "Host":
+ req.Host = v
+ default:
+ req.Header.Set(k, v)
+ }
+ }
+
+ return req, nil
+}
+
+func NewHTTPRequestWithPath(cfg Request, urlPath string) (*http.Request, error) {
+ cfg = cfg.Copy()
+
+ v, err := url.JoinPath(cfg.URL, urlPath)
+ if err != nil {
+ return nil, fmt.Errorf("failed to join URL path: %v", err)
+ }
+ cfg.URL = v
+
+ return NewHTTPRequest(cfg)
+}
diff --git a/src/go/plugin/go.d/pkg/web/request_test.go b/src/go/plugin/go.d/pkg/web/request_test.go
new file mode 100644
index 000000000..d39f9a36a
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/web/request_test.go
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package web
+
+import (
+ "encoding/base64"
+ "net/http"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestRequest_Copy(t *testing.T) {
+ tests := map[string]struct {
+ orig Request
+ change func(req *Request)
+ }{
+ "change headers": {
+ orig: Request{
+ URL: "http://127.0.0.1:19999/api/v1/info",
+ Method: "POST",
+ Headers: map[string]string{
+ "X-Api-Key": "secret",
+ },
+ Username: "username",
+ Password: "password",
+ ProxyUsername: "proxy_username",
+ ProxyPassword: "proxy_password",
+ },
+ change: func(req *Request) {
+ req.Headers["header_key"] = "header_value"
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ reqCopy := test.orig.Copy()
+
+ assert.Equal(t, test.orig, reqCopy)
+ test.change(&reqCopy)
+ assert.NotEqual(t, test.orig, reqCopy)
+ })
+ }
+}
+
+func TestNewHTTPRequest(t *testing.T) {
+ tests := map[string]struct {
+ req Request
+ wantErr bool
+ }{
+ "test url": {
+ req: Request{
+ URL: "http://127.0.0.1:19999/api/v1/info",
+ },
+ wantErr: false,
+ },
+ "test body": {
+ req: Request{
+ Body: "content",
+ },
+ wantErr: false,
+ },
+ "test method": {
+ req: Request{
+ Method: "POST",
+ },
+ wantErr: false,
+ },
+ "test headers": {
+ req: Request{
+ Headers: map[string]string{
+ "X-Api-Key": "secret",
+ },
+ },
+ wantErr: false,
+ },
+ "test special headers (host)": {
+ req: Request{
+ Headers: map[string]string{
+ "host": "Host",
+ },
+ },
+ wantErr: false,
+ },
+ "test special headers (Host)": {
+ req: Request{
+ Headers: map[string]string{
+ "Host": "Host",
+ },
+ },
+ wantErr: false,
+ },
+ "test username and password": {
+ req: Request{
+ Username: "username",
+ Password: "password",
+ },
+ wantErr: false,
+ },
+ "test proxy username and proxy password": {
+ req: Request{
+ ProxyUsername: "proxy_username",
+ ProxyPassword: "proxy_password",
+ },
+ wantErr: false,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ httpReq, err := NewHTTPRequest(test.req)
+
+ if test.wantErr {
+ assert.Error(t, err)
+ assert.Nil(t, httpReq)
+ return
+ }
+
+ require.NoError(t, err)
+ require.NotNil(t, httpReq)
+ require.IsType(t, (*http.Request)(nil), httpReq)
+
+ assert.Equal(t, test.req.URL, httpReq.URL.String())
+
+ if test.req.Body != "" {
+ assert.NotNil(t, httpReq.Body)
+ }
+
+ if test.req.Username != "" || test.req.Password != "" {
+ user, pass, ok := httpReq.BasicAuth()
+ assert.True(t, ok)
+ assert.Equal(t, test.req.Username, user)
+ assert.Equal(t, test.req.Password, pass)
+ }
+
+ if test.req.Method != "" {
+ assert.Equal(t, test.req.Method, httpReq.Method)
+ }
+
+ if test.req.ProxyUsername != "" || test.req.ProxyPassword != "" {
+ user, pass, ok := parseBasicAuth(httpReq.Header.Get("Proxy-Authorization"))
+ assert.True(t, ok)
+ assert.Equal(t, test.req.ProxyUsername, user)
+ assert.Equal(t, test.req.ProxyPassword, pass)
+ }
+
+ for k, v := range test.req.Headers {
+ switch k {
+ case "host", "Host":
+ assert.Equal(t, httpReq.Host, v)
+ default:
+ assert.Equal(t, v, httpReq.Header.Get(k))
+ }
+ }
+ })
+ }
+}
+
+func TestNewRequest(t *testing.T) {
+ tests := map[string]struct {
+ url string
+ path string
+ wantURL string
+ }{
+ "base url": {
+ url: "http://127.0.0.1:65535",
+ path: "/bar",
+ wantURL: "http://127.0.0.1:65535/bar",
+ },
+ "with path": {
+ url: "http://127.0.0.1:65535/foo/",
+ path: "/bar",
+ wantURL: "http://127.0.0.1:65535/foo/bar",
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ req, err := NewHTTPRequestWithPath(Request{URL: test.url}.Copy(), test.path)
+ require.NoError(t, err)
+
+ assert.Equal(t, test.wantURL, req.URL.String())
+ })
+ }
+}
+
+func parseBasicAuth(auth string) (username, password string, ok bool) {
+ const prefix = "Basic "
+ if len(auth) < len(prefix) || !strings.EqualFold(auth[:len(prefix)], prefix) {
+ return "", "", false
+ }
+
+ decoded, err := base64.StdEncoding.DecodeString(auth[len(prefix):])
+ if err != nil {
+ return "", "", false
+ }
+
+ decodedStr := string(decoded)
+ idx := strings.IndexByte(decodedStr, ':')
+ if idx < 0 {
+ return "", "", false
+ }
+
+ return decodedStr[:idx], decodedStr[idx+1:], true
+}
diff --git a/src/go/plugin/go.d/pkg/web/web.go b/src/go/plugin/go.d/pkg/web/web.go
new file mode 100644
index 000000000..cbda396d4
--- /dev/null
+++ b/src/go/plugin/go.d/pkg/web/web.go
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package web
+
+// HTTP is a struct with embedded Request and Client.
+// This structure intended to be part of the module configuration.
+// Supported configuration file formats: YAML.
+type HTTP struct {
+ Request `yaml:",inline" json:""`
+ Client `yaml:",inline" json:""`
+}